Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -193,13 +193,65 @@
|
|
193 |
# if __name__ == "__main__":
|
194 |
# demo.launch()
|
195 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
196 |
import os
|
|
|
197 |
import gradio as gr
|
|
|
|
|
|
|
|
|
198 |
from langchain_openai import ChatOpenAI
|
199 |
from langchain.prompts import PromptTemplate
|
|
|
200 |
from langchain.memory import ConversationBufferMemory
|
201 |
-
from langchain.schema import AIMessage, HumanMessage
|
202 |
-
from langchain import Runnable # Using Runnable instead of RunnableSequence
|
203 |
|
204 |
# Set OpenAI API Key
|
205 |
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
@@ -216,23 +268,22 @@ prompt = PromptTemplate(
|
|
216 |
template=template
|
217 |
)
|
218 |
|
219 |
-
# Initialize conversation memory
|
220 |
-
memory = ConversationBufferMemory(
|
221 |
-
|
222 |
-
# Define the LLM (language model)
|
223 |
-
llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo")
|
224 |
|
225 |
-
#
|
226 |
-
|
|
|
|
|
|
|
|
|
|
|
227 |
|
228 |
# Function to get chatbot response
|
229 |
def get_text_response(user_message, history):
|
230 |
# Prepare the conversation history
|
231 |
-
chat_history = [
|
232 |
-
|
233 |
-
# Pass the prompt and history to the language model sequence
|
234 |
-
response = llm_runnable.invoke({"chat_history": history, "user_message": user_message})
|
235 |
-
|
236 |
return response
|
237 |
|
238 |
# Create a Gradio chat interface
|
@@ -244,3 +295,4 @@ if __name__ == "__main__":
|
|
244 |
|
245 |
|
246 |
|
|
|
|
193 |
# if __name__ == "__main__":
|
194 |
# demo.launch()
|
195 |
|
196 |
+
# import os
|
197 |
+
# import gradio as gr
|
198 |
+
# from langchain_openai import ChatOpenAI
|
199 |
+
# from langchain.prompts import PromptTemplate
|
200 |
+
# from langchain.memory import ConversationBufferMemory
|
201 |
+
# from langchain.schema import AIMessage, HumanMessage
|
202 |
+
# from langchain import Runnable # Using Runnable instead of RunnableSequence
|
203 |
+
|
204 |
+
# # Set OpenAI API Key
|
205 |
+
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
206 |
+
|
207 |
+
# # Define the template for the chatbot's response
|
208 |
+
# template = """You are a helpful assistant to answer all user queries.
|
209 |
+
# {chat_history}
|
210 |
+
# User: {user_message}
|
211 |
+
# Chatbot:"""
|
212 |
+
|
213 |
+
# # Define the prompt template
|
214 |
+
# prompt = PromptTemplate(
|
215 |
+
# input_variables=["chat_history", "user_message"],
|
216 |
+
# template=template
|
217 |
+
# )
|
218 |
+
|
219 |
+
# # Initialize conversation memory (following migration guide)
|
220 |
+
# memory = ConversationBufferMemory(return_messages=True) # Use return_messages=True for updated usage
|
221 |
+
|
222 |
+
# # Define the LLM (language model)
|
223 |
+
# llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo")
|
224 |
+
|
225 |
+
# # Create the Runnable instead of RunnableSequence
|
226 |
+
# llm_runnable = Runnable(lambda inputs: prompt.format(**inputs)) | llm
|
227 |
+
|
228 |
+
# # Function to get chatbot response
|
229 |
+
# def get_text_response(user_message, history):
|
230 |
+
# # Prepare the conversation history
|
231 |
+
# chat_history = [HumanMessage(content=user_message)]
|
232 |
+
|
233 |
+
# # Pass the prompt and history to the language model sequence
|
234 |
+
# response = llm_runnable.invoke({"chat_history": history, "user_message": user_message})
|
235 |
+
|
236 |
+
# return response
|
237 |
+
|
238 |
+
# # Create a Gradio chat interface
|
239 |
+
# demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text")
|
240 |
+
|
241 |
+
# if __name__ == "__main__":
|
242 |
+
# demo.launch()
|
243 |
+
|
244 |
import os
|
245 |
+
import subprocess
|
246 |
import gradio as gr
|
247 |
+
|
248 |
+
# Install necessary packages
|
249 |
+
subprocess.check_call(["pip", "install", "-U", "langchain-openai", "gradio", "langchain-community"])
|
250 |
+
|
251 |
from langchain_openai import ChatOpenAI
|
252 |
from langchain.prompts import PromptTemplate
|
253 |
+
from langchain.chains import LLMChain
|
254 |
from langchain.memory import ConversationBufferMemory
|
|
|
|
|
255 |
|
256 |
# Set OpenAI API Key
|
257 |
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
|
|
268 |
template=template
|
269 |
)
|
270 |
|
271 |
+
# Initialize conversation memory
|
272 |
+
memory = ConversationBufferMemory(memory_key="chat_history")
|
|
|
|
|
|
|
273 |
|
274 |
+
# Define the LLM chain with the ChatOpenAI model and conversation memory
|
275 |
+
llm_chain = LLMChain(
|
276 |
+
llm=ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo"), # Use 'model' instead of 'model_name'
|
277 |
+
prompt=prompt,
|
278 |
+
verbose=True,
|
279 |
+
memory=memory,
|
280 |
+
)
|
281 |
|
282 |
# Function to get chatbot response
|
283 |
def get_text_response(user_message, history):
|
284 |
# Prepare the conversation history
|
285 |
+
chat_history = history + [f"User: {user_message}"]
|
286 |
+
response = llm_chain.predict(user_message=user_message, chat_history=chat_history)
|
|
|
|
|
|
|
287 |
return response
|
288 |
|
289 |
# Create a Gradio chat interface
|
|
|
295 |
|
296 |
|
297 |
|
298 |
+
|