Bhaskar2611 commited on
Commit
97de22a
·
verified ·
1 Parent(s): 419af44

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -44
app.py CHANGED
@@ -3,35 +3,6 @@
3
  # from langchain.chat_models import ChatOpenAI
4
  # from langchain import LLMChain, PromptTemplate
5
  # from langchain.memory import ConversationBufferMemory
6
-
7
- # OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
8
-
9
- # template = """You are a helpful assistant to answer all user queries.
10
- # {chat_history}
11
- # User: {user_message}
12
- # Chatbot:"""
13
-
14
- # prompt = PromptTemplate(
15
- # input_variables=["chat_history", "user_message"], template=template
16
- # )
17
-
18
- # memory = ConversationBufferMemory(memory_key="chat_history")
19
-
20
- # llm_chain = LLMChain(
21
- # llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
22
- # prompt=prompt,
23
- # verbose=True,
24
- # memory=memory,
25
- # )
26
-
27
- # def get_text_response(user_message,history):
28
- # response = llm_chain.predict(user_message = user_message)
29
- # return response
30
-
31
- # demo = gr.ChatInterface(get_text_response)
32
-
33
- # if __name__ == "__main__":
34
- # demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
35
  import os
36
  import gradio as gr
37
  from langchain.chat_models import ChatOpenAI
@@ -39,40 +10,75 @@ from langchain.prompts import PromptTemplate
39
  from langchain.chains import LLMChain
40
  from langchain.memory import ConversationBufferMemory
41
 
42
- # Get API key from environment variable
43
- OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
44
 
45
- # Define the template for the chatbot's response
46
  template = """You are a helpful assistant to answer all user queries.
47
  {chat_history}
48
  User: {user_message}
49
  Chatbot:"""
50
 
51
- # Define the prompt template
52
  prompt = PromptTemplate(
53
- input_variables=["chat_history", "user_message"],
54
- template=template
55
  )
56
 
57
- # Initialize conversation memory
58
  memory = ConversationBufferMemory(memory_key="chat_history")
59
 
60
- # Define the LLM chain with the ChatOpenAI model and conversation memory
61
  llm_chain = LLMChain(
62
- llm=ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo"), # Use 'model' instead of 'model_name'
63
  prompt=prompt,
64
  verbose=True,
65
  memory=memory,
66
  )
67
 
68
- # Function to get chatbot response
69
- def get_text_response(user_message, history):
70
- response = llm_chain.predict(user_message=user_message)
71
  return response
72
 
73
- # Create a Gradio chat interface
74
- demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text")
75
 
76
  if __name__ == "__main__":
77
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
 
 
3
  # from langchain.chat_models import ChatOpenAI
4
  # from langchain import LLMChain, PromptTemplate
5
  # from langchain.memory import ConversationBufferMemory
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  import os
7
  import gradio as gr
8
  from langchain.chat_models import ChatOpenAI
 
10
  from langchain.chains import LLMChain
11
  from langchain.memory import ConversationBufferMemory
12
 
13
+ OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
 
14
 
 
15
  template = """You are a helpful assistant to answer all user queries.
16
  {chat_history}
17
  User: {user_message}
18
  Chatbot:"""
19
 
 
20
  prompt = PromptTemplate(
21
+ input_variables=["chat_history", "user_message"], template=template
 
22
  )
23
 
 
24
  memory = ConversationBufferMemory(memory_key="chat_history")
25
 
 
26
  llm_chain = LLMChain(
27
+ llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
28
  prompt=prompt,
29
  verbose=True,
30
  memory=memory,
31
  )
32
 
33
+ def get_text_response(user_message,history):
34
+ response = llm_chain.predict(user_message = user_message)
 
35
  return response
36
 
37
+ demo = gr.ChatInterface(get_text_response)
 
38
 
39
  if __name__ == "__main__":
40
+ demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
41
+ # import os
42
+ # import gradio as gr
43
+ # from langchain.chat_models import ChatOpenAI
44
+ # from langchain.prompts import PromptTemplate
45
+ # from langchain.chains import LLMChain
46
+ # from langchain.memory import ConversationBufferMemory
47
+
48
+ # # Get API key from environment variable
49
+ # OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
50
+
51
+ # # Define the template for the chatbot's response
52
+ # template = """You are a helpful assistant to answer all user queries.
53
+ # {chat_history}
54
+ # User: {user_message}
55
+ # Chatbot:"""
56
+
57
+ # # Define the prompt template
58
+ # prompt = PromptTemplate(
59
+ # input_variables=["chat_history", "user_message"],
60
+ # template=template
61
+ # )
62
+
63
+ # # Initialize conversation memory
64
+ # memory = ConversationBufferMemory(memory_key="chat_history")
65
+
66
+ # # Define the LLM chain with the ChatOpenAI model and conversation memory
67
+ # llm_chain = LLMChain(
68
+ # llm=ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo"), # Use 'model' instead of 'model_name'
69
+ # prompt=prompt,
70
+ # verbose=True,
71
+ # memory=memory,
72
+ # )
73
+
74
+ # # Function to get chatbot response
75
+ # def get_text_response(user_message, history):
76
+ # response = llm_chain.predict(user_message=user_message)
77
+ # return response
78
+
79
+ # # Create a Gradio chat interface
80
+ # demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text")
81
+
82
+ # if __name__ == "__main__":
83
+ # demo.launch()
84