Spaces:
Sleeping
Sleeping
# import os | |
# import gradio as gr | |
# from langchain.chat_models import ChatOpenAI | |
# from langchain.prompts import PromptTemplate | |
# from langchain.chains import LLMChain | |
# from langchain.memory import ConversationBufferMemory | |
# # Set OpenAI API Key | |
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
# # Define the template for the chatbot's response | |
# template = """You are a helpful assistant to answer all user queries. | |
# {chat_history} | |
# User: {user_message} | |
# Chatbot:""" | |
# # Define the prompt template | |
# prompt = PromptTemplate( | |
# input_variables=["chat_history", "user_message"], | |
# template=template | |
# ) | |
# # Initialize conversation memory | |
# memory = ConversationBufferMemory(memory_key="chat_history") | |
# # Define the LLM chain with the ChatOpenAI model and conversation memory | |
# llm_chain = LLMChain( | |
# llm=ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo"), # Use 'model' instead of 'model_name' | |
# prompt=prompt, | |
# verbose=True, | |
# memory=memory, | |
# ) | |
# # Function to get chatbot response | |
# def get_text_response(user_message, history): | |
# response = llm_chain.predict(user_message=user_message) | |
# return response | |
# # Create a Gradio chat interface | |
# demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text") | |
# if __name__ == "__main__": | |
# demo.launch() | |
# import subprocess | |
# # Run the Bash script that installs dependencies and runs the app | |
# subprocess.run(['./run.sh']) | |
# # Rest of your application code can go here | |
# import subprocess | |
# import os | |
# # Ensure the run.sh script has executable permissions | |
# # subprocess.run(['chmod', '+x', './run.sh']) | |
# # Run the Bash script that installs dependencies and runs the app | |
# # subprocess.run(['./run.sh']) | |
# import gradio as gr | |
# from langchain_openai import ChatOpenAI | |
# from langchain.prompts import PromptTemplate | |
# from langchain.memory import ConversationBufferMemory | |
# from langchain.chains import RunnableSequence | |
# # Set OpenAI API Key | |
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
# # Define the template for the chatbot's response | |
# template = """You are a helpful assistant to answer all user queries. | |
# {chat_history} | |
# User: {user_message} | |
# Chatbot:""" | |
# # Define the prompt template | |
# prompt = PromptTemplate( | |
# input_variables=["chat_history", "user_message"], | |
# template=template | |
# ) | |
# # Initialize conversation memory | |
# memory = ConversationBufferMemory(memory_key="chat_history") | |
# # Define the LLM (language model) | |
# llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo") | |
# # Define the chain using RunnableSequence (replace LLMChain) | |
# llm_chain = prompt | llm # Chaining the prompt and the LLM | |
# # Function to get chatbot response | |
# def get_text_response(user_message, history): | |
# inputs = {"chat_history": history, "user_message": user_message} | |
# response = llm_chain(inputs) | |
# return response['text'] | |
# # Create a Gradio chat interface | |
# demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text") | |
# if __name__ == "__main__": | |
# demo.launch() | |
# import os | |
# import gradio as gr | |
# from langchain_openai import ChatOpenAI | |
# from langchain.prompts import PromptTemplate | |
# from langchain.memory import ConversationBufferMemory | |
# from langchain.chains import LLMChain | |
# # Set OpenAI API Key | |
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
# # Define the template for the chatbot's response | |
# template = """You are a helpful assistant to answer all user queries. | |
# {chat_history} | |
# User: {user_message} | |
# Chatbot:""" | |
# # Define the prompt template | |
# prompt = PromptTemplate( | |
# input_variables=["chat_history", "user_message"], | |
# template=template | |
# ) | |
# # Initialize conversation memory | |
# memory = ConversationBufferMemory(memory_key="chat_history") | |
# # Define the LLM (language model) and chain | |
# llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo") | |
# llm_chain = LLMChain( | |
# llm=llm, | |
# prompt=prompt, | |
# verbose=True, | |
# memory=memory, | |
# ) | |
# # Function to get chatbot response | |
# def get_text_response(user_message, history): | |
# response = llm_chain.predict(user_message=user_message) | |
# return response | |
# # Create a Gradio chat interface | |
# demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text") | |
# if __name__ == "__main__": | |
# demo.launch() | |
# import os | |
# import gradio as gr | |
# from langchain_openai import ChatOpenAI | |
# from langchain.prompts import PromptTemplate | |
# from langchain.memory import ConversationBufferMemory | |
# from langchain.schema import AIMessage, HumanMessage | |
# from langchain.chains import RunnableSequence | |
# # Set OpenAI API Key | |
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
# # Define the template for the chatbot's response | |
# template = """You are a helpful assistant to answer all user queries. | |
# {chat_history} | |
# User: {user_message} | |
# Chatbot:""" | |
# # Define the prompt template | |
# prompt = PromptTemplate( | |
# input_variables=["chat_history", "user_message"], | |
# template=template | |
# ) | |
# # Initialize conversation memory (following migration guide) | |
# memory = ConversationBufferMemory(return_messages=True) # Use return_messages=True for updated usage | |
# # Define the LLM (language model) | |
# llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo") | |
# # Create the RunnableSequence instead of LLMChain | |
# llm_sequence = prompt | llm # This pipelines the prompt into the language model | |
# # Function to get chatbot response | |
# def get_text_response(user_message, history): | |
# # Prepare the conversation history | |
# chat_history = [HumanMessage(content=user_message)] | |
# # Pass the prompt and history to the language model sequence | |
# response = llm_sequence.invoke({"chat_history": history, "user_message": user_message}) | |
# return response | |
# # Create a Gradio chat interface | |
# demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text") | |
# if __name__ == "__main__": | |
# demo.launch() | |
# import os | |
# import gradio as gr | |
# from langchain_openai import ChatOpenAI | |
# from langchain.prompts import PromptTemplate | |
# from langchain.memory import ConversationBufferMemory | |
# from langchain.schema import AIMessage, HumanMessage | |
# from langchain import Runnable # Using Runnable instead of RunnableSequence | |
# # Set OpenAI API Key | |
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
# # Define the template for the chatbot's response | |
# template = """You are a helpful assistant to answer all user queries. | |
# {chat_history} | |
# User: {user_message} | |
# Chatbot:""" | |
# # Define the prompt template | |
# prompt = PromptTemplate( | |
# input_variables=["chat_history", "user_message"], | |
# template=template | |
# ) | |
# # Initialize conversation memory (following migration guide) | |
# memory = ConversationBufferMemory(return_messages=True) # Use return_messages=True for updated usage | |
# # Define the LLM (language model) | |
# llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo") | |
# # Create the Runnable instead of RunnableSequence | |
# llm_runnable = Runnable(lambda inputs: prompt.format(**inputs)) | llm | |
# # Function to get chatbot response | |
# def get_text_response(user_message, history): | |
# # Prepare the conversation history | |
# chat_history = [HumanMessage(content=user_message)] | |
# # Pass the prompt and history to the language model sequence | |
# response = llm_runnable.invoke({"chat_history": history, "user_message": user_message}) | |
# return response | |
# # Create a Gradio chat interface | |
# demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text") | |
# if __name__ == "__main__": | |
# demo.launch() | |
# import os | |
# import subprocess | |
# import gradio as gr | |
# # Install necessary packages | |
# subprocess.check_call(["pip", "install", "-U", "langchain-openai", "gradio", "langchain-community"]) | |
# from langchain_openai import ChatOpenAI | |
# from langchain.prompts import PromptTemplate | |
# from langchain.chains import LLMChain | |
# from langchain.memory import ConversationBufferMemory | |
# # Set OpenAI API Key | |
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
# # Define the template for the chatbot's response | |
# template = """You are a helpful assistant to answer all user queries. | |
# {chat_history} | |
# User: {user_message} | |
# Chatbot:""" | |
# # Define the prompt template | |
# prompt = PromptTemplate( | |
# input_variables=["chat_history", "user_message"], | |
# template=template | |
# ) | |
# # Initialize conversation memory | |
# memory = ConversationBufferMemory(memory_key="chat_history") | |
# # Define the LLM chain with the ChatOpenAI model and conversation memory | |
# llm_chain = LLMChain( | |
# llm=ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo"), # Use 'model' instead of 'model_name' | |
# prompt=prompt, | |
# verbose=True, | |
# memory=memory, | |
# ) | |
# # Function to get chatbot response | |
# def get_text_response(user_message, history): | |
# # Prepare the conversation history | |
# chat_history = history + [f"User: {user_message}"] | |
# response = llm_chain.predict(user_message=user_message, chat_history=chat_history) | |
# return response | |
# # Create a Gradio chat interface | |
# demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text") | |
# if __name__ == "__main__": | |
# demo.launch() | |
# import os | |
# import subprocess | |
# import gradio as gr | |
# # Install necessary packages | |
# subprocess.check_call(["pip", "install", "-U", "langchain-openai", "gradio", "langchain-community"]) | |
# from langchain_openai import ChatOpenAI | |
# from langchain.prompts import PromptTemplate | |
# from langchain.memory import ConversationBufferMemory | |
# from langchain.chains import Runnable, RunnableSequence | |
# # Set OpenAI API Key | |
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
# # Define the template for the chatbot's response | |
# template = """You are a helpful assistant to answer all user queries. | |
# {chat_history} | |
# User: {user_message} | |
# Chatbot:""" | |
# # Define the prompt template | |
# prompt = PromptTemplate( | |
# input_variables=["chat_history", "user_message"], | |
# template=template | |
# ) | |
# # Initialize conversation memory | |
# memory = ConversationBufferMemory(memory_key="chat_history") | |
# # Define the runnable sequence | |
# chatbot_runnable = RunnableSequence(prompt | ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo")) | |
# # Function to get chatbot response | |
# def get_text_response(user_message, history=None): | |
# # Ensure history is a list | |
# if history is None: | |
# history = [] | |
# # Prepare the conversation history | |
# chat_history = history + [f"User: {user_message}"] | |
# response = chatbot_runnable.invoke({"chat_history": "\n".join(chat_history), "user_message": user_message}) | |
# return response | |
# # Create a Gradio chat interface | |
# demo = gr.Interface(fn=get_text_response, inputs=["text", "state"], outputs="text") | |
# if __name__ == "__main__": | |
# demo.launch() | |
# import os | |
# import subprocess | |
# import gradio as gr | |
# # Install necessary packages | |
# subprocess.check_call(["pip", "install", "-U", "langchain-openai", "gradio", "langchain-community"]) | |
# from langchain_openai import ChatOpenAI | |
# from langchain.prompts import PromptTemplate | |
# from langchain.memory import ConversationBufferMemory | |
# # Set OpenAI API Key | |
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
# # Define the template for the chatbot's response | |
# template = """You are a helpful assistant to answer all user queries. | |
# {chat_history} | |
# User: {user_message} | |
# Chatbot:""" | |
# # Define the prompt template | |
# prompt = PromptTemplate( | |
# input_variables=["chat_history", "user_message"], | |
# template=template | |
# ) | |
# # Initialize conversation memory | |
# memory = ConversationBufferMemory(memory_key="chat_history") | |
# # Function to get chatbot response | |
# def get_text_response(user_message, history=None): | |
# # Ensure history is a list | |
# if history is None: | |
# history = [] | |
# # Prepare the conversation history | |
# chat_history = history + [f"User: {user_message}"] | |
# llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo") | |
# response = llm({"chat_history": "\n".join(chat_history), "user_message": user_message}) | |
# return response['choices'][0]['message']['content'] | |
# # Create a Gradio chat interface | |
# demo = gr.Interface(fn=get_text_response, inputs=["text", "state"], outputs="text") | |
# if __name__ == "__main__": | |
# demo.launch() | |
# import os | |
# import subprocess | |
# import gradio as gr | |
# # Install necessary packages | |
# subprocess.check_call(["pip", "install", "-U", "langchain-openai", "gradio", "langchain-community"]) | |
# from langchain_openai import ChatOpenAI | |
# from langchain.prompts import PromptTemplate | |
# from langchain.memory import ConversationBufferMemory | |
# # Set OpenAI API Key | |
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
# # Define the template for the chatbot's response | |
# template = """You are a helpful assistant to answer all user queries. | |
# {chat_history} | |
# User: {user_message} | |
# Chatbot:""" | |
# # Define the prompt template | |
# prompt = PromptTemplate( | |
# input_variables=["chat_history", "user_message"], | |
# template=template | |
# ) | |
# # Initialize conversation memory | |
# memory = ConversationBufferMemory(memory_key="chat_history") | |
# # Function to get chatbot response | |
# def get_text_response(user_message, history=None): | |
# # Ensure history is a list | |
# if history is None: | |
# history = [] | |
# # Prepare the conversation history | |
# chat_history = history + [f"User: {user_message}"] | |
# llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo") | |
# response = llm({"chat_history": "\n".join(chat_history), "user_message": user_message}) | |
# # Return the response and updated history | |
# return response['choices'][0]['message']['content'], chat_history | |
# # Create a Gradio chat interface | |
# demo = gr.Interface( | |
# fn=get_text_response, | |
# inputs=["text", "state"], | |
# outputs=["text", "state"], | |
# ) | |
# if __name__ == "__main__": | |
# demo.launch() | |
# import os | |
# import subprocess | |
# import gradio as gr | |
# # Install necessary packages | |
# subprocess.check_call(["pip", "install", "-U", "langchain-openai", "gradio", "langchain-community"]) | |
# from langchain_openai import ChatOpenAI | |
# from langchain.prompts import PromptTemplate | |
# from langchain.memory import ConversationBufferMemory | |
# # Set OpenAI API Key | |
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
# # Define the template for the chatbot's response | |
# template = """You are a helpful assistant to answer all user queries. | |
# {chat_history} | |
# User: {user_message} | |
# Chatbot:""" | |
# # Define the prompt template | |
# prompt = PromptTemplate( | |
# input_variables=["chat_history", "user_message"], | |
# template=template | |
# ) | |
# # Initialize conversation memory | |
# memory = ConversationBufferMemory(memory_key="chat_history") | |
# # Function to get chatbot response | |
# def get_text_response(user_message, history=None): | |
# # Ensure history is a list | |
# if history is None: | |
# history = [] | |
# # Prepare the conversation history | |
# chat_history = history + [f"User: {user_message}"] | |
# # Create the full prompt string | |
# full_prompt = prompt.format(chat_history="\n".join(chat_history), user_message=user_message) | |
# llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo") | |
# # Use the invoke method instead of __call__ | |
# response = llm.invoke(full_prompt) | |
# # Return the response and updated history | |
# return response['choices'][0]['message']['content'], chat_history | |
# # Create a Gradio chat interface | |
# demo = gr.Interface( | |
# fn=get_text_response, | |
# inputs=["text", "state"], | |
# outputs=["text", "state"], | |
# ) | |
# if __name__ == "__main__": | |
# demo.launch() | |
import os | |
import gradio as gr | |
from langchain_openai import ChatOpenAI | |
from langchain.prompts import PromptTemplate | |
from langchain.memory import ConversationBufferMemory | |
# Set OpenAI API Key | |
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
# Define the template for the chatbot's response | |
template = """You are a helpful assistant to answer all user queries. | |
{chat_history} | |
User: {user_message} | |
Chatbot:""" | |
# Define the prompt template | |
prompt = PromptTemplate( | |
input_variables=["chat_history", "user_message"], | |
template=template | |
) | |
# Initialize conversation memory | |
memory = ConversationBufferMemory(memory_key="chat_history") | |
# Function to get chatbot response | |
def get_text_response(user_message, history=None): | |
# Ensure history is a list | |
if history is None: | |
history = [] | |
# Prepare the conversation history | |
chat_history = history + [f"User: {user_message}"] | |
# Create the full prompt string | |
full_prompt = prompt.format(chat_history="\n".join(chat_history), user_message=user_message) | |
llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo") | |
# Use the invoke method instead of __call__ | |
response = llm.invoke(full_prompt) | |
# Return the response and updated history | |
return response['choices'][0]['message']['content'], chat_history | |
# Create a Gradio chat interface using ChatInterface | |
with gr.Blocks() as demo: | |
chatbot = gr.ChatInterface( | |
get_text_response, # Function to get responses | |
memory=ConversationBufferMemory() # Gradio stateful chat memory | |
) | |
if __name__ == "__main__": | |
demo.launch() | |