Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -145,13 +145,61 @@
|
|
145 |
# if __name__ == "__main__":
|
146 |
# demo.launch()
|
147 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
import os
|
149 |
import gradio as gr
|
150 |
from langchain_openai import ChatOpenAI
|
151 |
from langchain.prompts import PromptTemplate
|
152 |
from langchain.memory import ConversationBufferMemory
|
153 |
from langchain.schema import AIMessage, HumanMessage
|
154 |
-
from langchain
|
155 |
|
156 |
# Set OpenAI API Key
|
157 |
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
@@ -174,8 +222,8 @@ memory = ConversationBufferMemory(return_messages=True) # Use return_messages=T
|
|
174 |
# Define the LLM (language model)
|
175 |
llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo")
|
176 |
|
177 |
-
# Create the
|
178 |
-
|
179 |
|
180 |
# Function to get chatbot response
|
181 |
def get_text_response(user_message, history):
|
@@ -183,7 +231,7 @@ def get_text_response(user_message, history):
|
|
183 |
chat_history = [HumanMessage(content=user_message)]
|
184 |
|
185 |
# Pass the prompt and history to the language model sequence
|
186 |
-
response =
|
187 |
|
188 |
return response
|
189 |
|
@@ -195,3 +243,4 @@ if __name__ == "__main__":
|
|
195 |
|
196 |
|
197 |
|
|
|
|
145 |
# if __name__ == "__main__":
|
146 |
# demo.launch()
|
147 |
|
148 |
+
# import os
|
149 |
+
# import gradio as gr
|
150 |
+
# from langchain_openai import ChatOpenAI
|
151 |
+
# from langchain.prompts import PromptTemplate
|
152 |
+
# from langchain.memory import ConversationBufferMemory
|
153 |
+
# from langchain.schema import AIMessage, HumanMessage
|
154 |
+
# from langchain.chains import RunnableSequence
|
155 |
+
|
156 |
+
# # Set OpenAI API Key
|
157 |
+
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
158 |
+
|
159 |
+
# # Define the template for the chatbot's response
|
160 |
+
# template = """You are a helpful assistant to answer all user queries.
|
161 |
+
# {chat_history}
|
162 |
+
# User: {user_message}
|
163 |
+
# Chatbot:"""
|
164 |
+
|
165 |
+
# # Define the prompt template
|
166 |
+
# prompt = PromptTemplate(
|
167 |
+
# input_variables=["chat_history", "user_message"],
|
168 |
+
# template=template
|
169 |
+
# )
|
170 |
+
|
171 |
+
# # Initialize conversation memory (following migration guide)
|
172 |
+
# memory = ConversationBufferMemory(return_messages=True) # Use return_messages=True for updated usage
|
173 |
+
|
174 |
+
# # Define the LLM (language model)
|
175 |
+
# llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo")
|
176 |
+
|
177 |
+
# # Create the RunnableSequence instead of LLMChain
|
178 |
+
# llm_sequence = prompt | llm # This pipelines the prompt into the language model
|
179 |
+
|
180 |
+
# # Function to get chatbot response
|
181 |
+
# def get_text_response(user_message, history):
|
182 |
+
# # Prepare the conversation history
|
183 |
+
# chat_history = [HumanMessage(content=user_message)]
|
184 |
+
|
185 |
+
# # Pass the prompt and history to the language model sequence
|
186 |
+
# response = llm_sequence.invoke({"chat_history": history, "user_message": user_message})
|
187 |
+
|
188 |
+
# return response
|
189 |
+
|
190 |
+
# # Create a Gradio chat interface
|
191 |
+
# demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text")
|
192 |
+
|
193 |
+
# if __name__ == "__main__":
|
194 |
+
# demo.launch()
|
195 |
+
|
196 |
import os
|
197 |
import gradio as gr
|
198 |
from langchain_openai import ChatOpenAI
|
199 |
from langchain.prompts import PromptTemplate
|
200 |
from langchain.memory import ConversationBufferMemory
|
201 |
from langchain.schema import AIMessage, HumanMessage
|
202 |
+
from langchain import Runnable # Using Runnable instead of RunnableSequence
|
203 |
|
204 |
# Set OpenAI API Key
|
205 |
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
|
|
222 |
# Define the LLM (language model)
|
223 |
llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo")
|
224 |
|
225 |
+
# Create the Runnable instead of RunnableSequence
|
226 |
+
llm_runnable = Runnable(lambda inputs: prompt.format(**inputs)) | llm
|
227 |
|
228 |
# Function to get chatbot response
|
229 |
def get_text_response(user_message, history):
|
|
|
231 |
chat_history = [HumanMessage(content=user_message)]
|
232 |
|
233 |
# Pass the prompt and history to the language model sequence
|
234 |
+
response = llm_runnable.invoke({"chat_history": history, "user_message": user_message})
|
235 |
|
236 |
return response
|
237 |
|
|
|
243 |
|
244 |
|
245 |
|
246 |
+
|