Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -241,6 +241,57 @@
|
|
241 |
# if __name__ == "__main__":
|
242 |
# demo.launch()
|
243 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
244 |
import os
|
245 |
import subprocess
|
246 |
import gradio as gr
|
@@ -250,8 +301,8 @@ subprocess.check_call(["pip", "install", "-U", "langchain-openai", "gradio", "la
|
|
250 |
|
251 |
from langchain_openai import ChatOpenAI
|
252 |
from langchain.prompts import PromptTemplate
|
253 |
-
from langchain.chains import LLMChain
|
254 |
from langchain.memory import ConversationBufferMemory
|
|
|
255 |
|
256 |
# Set OpenAI API Key
|
257 |
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
@@ -271,23 +322,23 @@ prompt = PromptTemplate(
|
|
271 |
# Initialize conversation memory
|
272 |
memory = ConversationBufferMemory(memory_key="chat_history")
|
273 |
|
274 |
-
# Define the
|
275 |
-
|
276 |
-
llm=ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo"), # Use 'model' instead of 'model_name'
|
277 |
-
prompt=prompt,
|
278 |
-
verbose=True,
|
279 |
-
memory=memory,
|
280 |
-
)
|
281 |
|
282 |
# Function to get chatbot response
|
283 |
-
def get_text_response(user_message, history):
|
|
|
|
|
|
|
|
|
284 |
# Prepare the conversation history
|
285 |
chat_history = history + [f"User: {user_message}"]
|
286 |
-
response =
|
|
|
287 |
return response
|
288 |
|
289 |
# Create a Gradio chat interface
|
290 |
-
demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text")
|
291 |
|
292 |
if __name__ == "__main__":
|
293 |
demo.launch()
|
@@ -296,3 +347,4 @@ if __name__ == "__main__":
|
|
296 |
|
297 |
|
298 |
|
|
|
|
241 |
# if __name__ == "__main__":
|
242 |
# demo.launch()
|
243 |
|
244 |
+
# import os
|
245 |
+
# import subprocess
|
246 |
+
# import gradio as gr
|
247 |
+
|
248 |
+
# # Install necessary packages
|
249 |
+
# subprocess.check_call(["pip", "install", "-U", "langchain-openai", "gradio", "langchain-community"])
|
250 |
+
|
251 |
+
# from langchain_openai import ChatOpenAI
|
252 |
+
# from langchain.prompts import PromptTemplate
|
253 |
+
# from langchain.chains import LLMChain
|
254 |
+
# from langchain.memory import ConversationBufferMemory
|
255 |
+
|
256 |
+
# # Set OpenAI API Key
|
257 |
+
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
258 |
+
|
259 |
+
# # Define the template for the chatbot's response
|
260 |
+
# template = """You are a helpful assistant to answer all user queries.
|
261 |
+
# {chat_history}
|
262 |
+
# User: {user_message}
|
263 |
+
# Chatbot:"""
|
264 |
+
|
265 |
+
# # Define the prompt template
|
266 |
+
# prompt = PromptTemplate(
|
267 |
+
# input_variables=["chat_history", "user_message"],
|
268 |
+
# template=template
|
269 |
+
# )
|
270 |
+
|
271 |
+
# # Initialize conversation memory
|
272 |
+
# memory = ConversationBufferMemory(memory_key="chat_history")
|
273 |
+
|
274 |
+
# # Define the LLM chain with the ChatOpenAI model and conversation memory
|
275 |
+
# llm_chain = LLMChain(
|
276 |
+
# llm=ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo"), # Use 'model' instead of 'model_name'
|
277 |
+
# prompt=prompt,
|
278 |
+
# verbose=True,
|
279 |
+
# memory=memory,
|
280 |
+
# )
|
281 |
+
|
282 |
+
# # Function to get chatbot response
|
283 |
+
# def get_text_response(user_message, history):
|
284 |
+
# # Prepare the conversation history
|
285 |
+
# chat_history = history + [f"User: {user_message}"]
|
286 |
+
# response = llm_chain.predict(user_message=user_message, chat_history=chat_history)
|
287 |
+
# return response
|
288 |
+
|
289 |
+
# # Create a Gradio chat interface
|
290 |
+
# demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text")
|
291 |
+
|
292 |
+
# if __name__ == "__main__":
|
293 |
+
# demo.launch()
|
294 |
+
|
295 |
import os
|
296 |
import subprocess
|
297 |
import gradio as gr
|
|
|
301 |
|
302 |
from langchain_openai import ChatOpenAI
|
303 |
from langchain.prompts import PromptTemplate
|
|
|
304 |
from langchain.memory import ConversationBufferMemory
|
305 |
+
from langchain.chains import Runnable, RunnableSequence
|
306 |
|
307 |
# Set OpenAI API Key
|
308 |
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
|
|
322 |
# Initialize conversation memory
|
323 |
memory = ConversationBufferMemory(memory_key="chat_history")
|
324 |
|
325 |
+
# Define the runnable sequence
|
326 |
+
chatbot_runnable = RunnableSequence(prompt | ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo"))
|
|
|
|
|
|
|
|
|
|
|
327 |
|
328 |
# Function to get chatbot response
|
329 |
+
def get_text_response(user_message, history=None):
|
330 |
+
# Ensure history is a list
|
331 |
+
if history is None:
|
332 |
+
history = []
|
333 |
+
|
334 |
# Prepare the conversation history
|
335 |
chat_history = history + [f"User: {user_message}"]
|
336 |
+
response = chatbot_runnable.invoke({"chat_history": "\n".join(chat_history), "user_message": user_message})
|
337 |
+
|
338 |
return response
|
339 |
|
340 |
# Create a Gradio chat interface
|
341 |
+
demo = gr.Interface(fn=get_text_response, inputs=["text", "state"], outputs="text")
|
342 |
|
343 |
if __name__ == "__main__":
|
344 |
demo.launch()
|
|
|
347 |
|
348 |
|
349 |
|
350 |
+
|