Bhaskar2611 commited on
Commit
d5a8216
·
verified ·
1 Parent(s): b307974

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -11
app.py CHANGED
@@ -1,21 +1,40 @@
1
  import os
2
- import openai
3
  import gradio as gr
 
 
 
 
4
 
5
  # Set OpenAI API Key
6
  OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
7
- openai.api_key = OPENAI_API_KEY
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  def get_text_response(user_message, history):
10
- # Call OpenAI GPT model
11
- response = openai.ChatCompletion.create(
12
- model="gpt-3.5-turbo",
13
- messages=[
14
- {"role": "system", "content": "You are a helpful assistant."},
15
- {"role": "user", "content": user_message},
16
- ]
17
- )
18
- return response['choices'][0]['message']['content']
19
 
20
  # Create a Gradio chat interface
21
  demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text")
 
1
  import os
 
2
  import gradio as gr
3
+ from langchain.chat_models import ChatOpenAI
4
+ from langchain.prompts import PromptTemplate
5
+ from langchain.chains import LLMChain
6
+ from langchain.memory import ConversationBufferMemory
7
 
8
  # Set OpenAI API Key
9
  OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
 
10
 
11
+ # Define the template for the chatbot's response
12
+ template = """You are a helpful assistant to answer all user queries.
13
+ {chat_history}
14
+ User: {user_message}
15
+ Chatbot:"""
16
+
17
+ # Define the prompt template
18
+ prompt = PromptTemplate(
19
+ input_variables=["chat_history", "user_message"],
20
+ template=template
21
+ )
22
+
23
+ # Initialize conversation memory
24
+ memory = ConversationBufferMemory(memory_key="chat_history")
25
+
26
+ # Define the LLM chain with the ChatOpenAI model and conversation memory
27
+ llm_chain = LLMChain(
28
+ llm=ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo"), # Use 'model' instead of 'model_name'
29
+ prompt=prompt,
30
+ verbose=True,
31
+ memory=memory,
32
+ )
33
+
34
+ # Function to get chatbot response
35
  def get_text_response(user_message, history):
36
+ response = llm_chain.predict(user_message=user_message)
37
+ return response
 
 
 
 
 
 
 
38
 
39
  # Create a Gradio chat interface
40
  demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text")