Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -100,12 +100,58 @@
|
|
100 |
# demo.launch()
|
101 |
|
102 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
import os
|
104 |
import gradio as gr
|
105 |
from langchain_openai import ChatOpenAI
|
106 |
from langchain.prompts import PromptTemplate
|
107 |
from langchain.memory import ConversationBufferMemory
|
108 |
-
from langchain.
|
|
|
109 |
|
110 |
# Set OpenAI API Key
|
111 |
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
@@ -122,21 +168,23 @@ prompt = PromptTemplate(
|
|
122 |
template=template
|
123 |
)
|
124 |
|
125 |
-
# Initialize conversation memory
|
126 |
-
memory = ConversationBufferMemory(
|
127 |
|
128 |
-
# Define the LLM (language model)
|
129 |
llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo")
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
verbose=True,
|
134 |
-
memory=memory,
|
135 |
-
)
|
136 |
|
137 |
# Function to get chatbot response
|
138 |
def get_text_response(user_message, history):
|
139 |
-
|
|
|
|
|
|
|
|
|
|
|
140 |
return response
|
141 |
|
142 |
# Create a Gradio chat interface
|
@@ -146,3 +194,4 @@ if __name__ == "__main__":
|
|
146 |
demo.launch()
|
147 |
|
148 |
|
|
|
|
100 |
# demo.launch()
|
101 |
|
102 |
|
103 |
+
# import os
|
104 |
+
# import gradio as gr
|
105 |
+
# from langchain_openai import ChatOpenAI
|
106 |
+
# from langchain.prompts import PromptTemplate
|
107 |
+
# from langchain.memory import ConversationBufferMemory
|
108 |
+
# from langchain.chains import LLMChain
|
109 |
+
|
110 |
+
# # Set OpenAI API Key
|
111 |
+
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
112 |
+
|
113 |
+
# # Define the template for the chatbot's response
|
114 |
+
# template = """You are a helpful assistant to answer all user queries.
|
115 |
+
# {chat_history}
|
116 |
+
# User: {user_message}
|
117 |
+
# Chatbot:"""
|
118 |
+
|
119 |
+
# # Define the prompt template
|
120 |
+
# prompt = PromptTemplate(
|
121 |
+
# input_variables=["chat_history", "user_message"],
|
122 |
+
# template=template
|
123 |
+
# )
|
124 |
+
|
125 |
+
# # Initialize conversation memory
|
126 |
+
# memory = ConversationBufferMemory(memory_key="chat_history")
|
127 |
+
|
128 |
+
# # Define the LLM (language model) and chain
|
129 |
+
# llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo")
|
130 |
+
# llm_chain = LLMChain(
|
131 |
+
# llm=llm,
|
132 |
+
# prompt=prompt,
|
133 |
+
# verbose=True,
|
134 |
+
# memory=memory,
|
135 |
+
# )
|
136 |
+
|
137 |
+
# # Function to get chatbot response
|
138 |
+
# def get_text_response(user_message, history):
|
139 |
+
# response = llm_chain.predict(user_message=user_message)
|
140 |
+
# return response
|
141 |
+
|
142 |
+
# # Create a Gradio chat interface
|
143 |
+
# demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text")
|
144 |
+
|
145 |
+
# if __name__ == "__main__":
|
146 |
+
# demo.launch()
|
147 |
+
|
148 |
import os
|
149 |
import gradio as gr
|
150 |
from langchain_openai import ChatOpenAI
|
151 |
from langchain.prompts import PromptTemplate
|
152 |
from langchain.memory import ConversationBufferMemory
|
153 |
+
from langchain.schema import AIMessage, HumanMessage
|
154 |
+
from langchain.chains import RunnableSequence
|
155 |
|
156 |
# Set OpenAI API Key
|
157 |
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
|
|
168 |
template=template
|
169 |
)
|
170 |
|
171 |
+
# Initialize conversation memory (following migration guide)
|
172 |
+
memory = ConversationBufferMemory(return_messages=True) # Use return_messages=True for updated usage
|
173 |
|
174 |
+
# Define the LLM (language model)
|
175 |
llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo")
|
176 |
+
|
177 |
+
# Create the RunnableSequence instead of LLMChain
|
178 |
+
llm_sequence = prompt | llm # This pipelines the prompt into the language model
|
|
|
|
|
|
|
179 |
|
180 |
# Function to get chatbot response
|
181 |
def get_text_response(user_message, history):
|
182 |
+
# Prepare the conversation history
|
183 |
+
chat_history = [HumanMessage(content=user_message)]
|
184 |
+
|
185 |
+
# Pass the prompt and history to the language model sequence
|
186 |
+
response = llm_sequence.invoke({"chat_history": history, "user_message": user_message})
|
187 |
+
|
188 |
return response
|
189 |
|
190 |
# Create a Gradio chat interface
|
|
|
194 |
demo.launch()
|
195 |
|
196 |
|
197 |
+
|