Spaces:
Sleeping
Sleeping
# import os | |
# import gradio as gr | |
# from langchain.chat_models import ChatOpenAI | |
# from langchain.prompts import PromptTemplate | |
# from langchain.chains import LLMChain | |
# from langchain.memory import ConversationBufferMemory | |
# # Set OpenAI API Key | |
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
# # Define the template for the chatbot's response | |
# template = """You are a helpful assistant to answer all user queries. | |
# {chat_history} | |
# User: {user_message} | |
# Chatbot:""" | |
# # Define the prompt template | |
# prompt = PromptTemplate( | |
# input_variables=["chat_history", "user_message"], | |
# template=template | |
# ) | |
# # Initialize conversation memory | |
# memory = ConversationBufferMemory(memory_key="chat_history") | |
# # Define the LLM chain with the ChatOpenAI model and conversation memory | |
# llm_chain = LLMChain( | |
# llm=ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo"), # Use 'model' instead of 'model_name' | |
# prompt=prompt, | |
# verbose=True, | |
# memory=memory, | |
# ) | |
# # Function to get chatbot response | |
# def get_text_response(user_message, history): | |
# response = llm_chain.predict(user_message=user_message) | |
# return response | |
# # Create a Gradio chat interface | |
# demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text") | |
# if __name__ == "__main__": | |
# demo.launch() | |
# import subprocess | |
# # Run the Bash script that installs dependencies and runs the app | |
# subprocess.run(['./run.sh']) | |
# # Rest of your application code can go here | |
# import subprocess | |
# import os | |
# # Ensure the run.sh script has executable permissions | |
# # subprocess.run(['chmod', '+x', './run.sh']) | |
# # Run the Bash script that installs dependencies and runs the app | |
# # subprocess.run(['./run.sh']) | |
# import gradio as gr | |
# from langchain_openai import ChatOpenAI | |
# from langchain.prompts import PromptTemplate | |
# from langchain.memory import ConversationBufferMemory | |
# from langchain.chains import RunnableSequence | |
# # Set OpenAI API Key | |
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
# # Define the template for the chatbot's response | |
# template = """You are a helpful assistant to answer all user queries. | |
# {chat_history} | |
# User: {user_message} | |
# Chatbot:""" | |
# # Define the prompt template | |
# prompt = PromptTemplate( | |
# input_variables=["chat_history", "user_message"], | |
# template=template | |
# ) | |
# # Initialize conversation memory | |
# memory = ConversationBufferMemory(memory_key="chat_history") | |
# # Define the LLM (language model) | |
# llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo") | |
# # Define the chain using RunnableSequence (replace LLMChain) | |
# llm_chain = prompt | llm # Chaining the prompt and the LLM | |
# # Function to get chatbot response | |
# def get_text_response(user_message, history): | |
# inputs = {"chat_history": history, "user_message": user_message} | |
# response = llm_chain(inputs) | |
# return response['text'] | |
# # Create a Gradio chat interface | |
# demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text") | |
# if __name__ == "__main__": | |
# demo.launch() | |
# import os | |
# import gradio as gr | |
# from langchain_openai import ChatOpenAI | |
# from langchain.prompts import PromptTemplate | |
# from langchain.memory import ConversationBufferMemory | |
# from langchain.chains import LLMChain | |
# # Set OpenAI API Key | |
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
# # Define the template for the chatbot's response | |
# template = """You are a helpful assistant to answer all user queries. | |
# {chat_history} | |
# User: {user_message} | |
# Chatbot:""" | |
# # Define the prompt template | |
# prompt = PromptTemplate( | |
# input_variables=["chat_history", "user_message"], | |
# template=template | |
# ) | |
# # Initialize conversation memory | |
# memory = ConversationBufferMemory(memory_key="chat_history") | |
# # Define the LLM (language model) and chain | |
# llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo") | |
# llm_chain = LLMChain( | |
# llm=llm, | |
# prompt=prompt, | |
# verbose=True, | |
# memory=memory, | |
# ) | |
# # Function to get chatbot response | |
# def get_text_response(user_message, history): | |
# response = llm_chain.predict(user_message=user_message) | |
# return response | |
# # Create a Gradio chat interface | |
# demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text") | |
# if __name__ == "__main__": | |
# demo.launch() | |
# import os | |
# import gradio as gr | |
# from langchain_openai import ChatOpenAI | |
# from langchain.prompts import PromptTemplate | |
# from langchain.memory import ConversationBufferMemory | |
# from langchain.schema import AIMessage, HumanMessage | |
# from langchain.chains import RunnableSequence | |
# # Set OpenAI API Key | |
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
# # Define the template for the chatbot's response | |
# template = """You are a helpful assistant to answer all user queries. | |
# {chat_history} | |
# User: {user_message} | |
# Chatbot:""" | |
# # Define the prompt template | |
# prompt = PromptTemplate( | |
# input_variables=["chat_history", "user_message"], | |
# template=template | |
# ) | |
# # Initialize conversation memory (following migration guide) | |
# memory = ConversationBufferMemory(return_messages=True) # Use return_messages=True for updated usage | |
# # Define the LLM (language model) | |
# llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo") | |
# # Create the RunnableSequence instead of LLMChain | |
# llm_sequence = prompt | llm # This pipelines the prompt into the language model | |
# # Function to get chatbot response | |
# def get_text_response(user_message, history): | |
# # Prepare the conversation history | |
# chat_history = [HumanMessage(content=user_message)] | |
# # Pass the prompt and history to the language model sequence | |
# response = llm_sequence.invoke({"chat_history": history, "user_message": user_message}) | |
# return response | |
# # Create a Gradio chat interface | |
# demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text") | |
# if __name__ == "__main__": | |
# demo.launch() | |
# import os | |
# import gradio as gr | |
# from langchain_openai import ChatOpenAI | |
# from langchain.prompts import PromptTemplate | |
# from langchain.memory import ConversationBufferMemory | |
# from langchain.schema import AIMessage, HumanMessage | |
# from langchain import Runnable # Using Runnable instead of RunnableSequence | |
# # Set OpenAI API Key | |
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
# # Define the template for the chatbot's response | |
# template = """You are a helpful assistant to answer all user queries. | |
# {chat_history} | |
# User: {user_message} | |
# Chatbot:""" | |
# # Define the prompt template | |
# prompt = PromptTemplate( | |
# input_variables=["chat_history", "user_message"], | |
# template=template | |
# ) | |
# # Initialize conversation memory (following migration guide) | |
# memory = ConversationBufferMemory(return_messages=True) # Use return_messages=True for updated usage | |
# # Define the LLM (language model) | |
# llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo") | |
# # Create the Runnable instead of RunnableSequence | |
# llm_runnable = Runnable(lambda inputs: prompt.format(**inputs)) | llm | |
# # Function to get chatbot response | |
# def get_text_response(user_message, history): | |
# # Prepare the conversation history | |
# chat_history = [HumanMessage(content=user_message)] | |
# # Pass the prompt and history to the language model sequence | |
# response = llm_runnable.invoke({"chat_history": history, "user_message": user_message}) | |
# return response | |
# # Create a Gradio chat interface | |
# demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text") | |
# if __name__ == "__main__": | |
# demo.launch() | |
import os | |
import subprocess | |
import gradio as gr | |
# Install necessary packages | |
subprocess.check_call(["pip", "install", "-U", "langchain-openai", "gradio", "langchain-community"]) | |
from langchain_openai import ChatOpenAI | |
from langchain.prompts import PromptTemplate | |
from langchain.chains import LLMChain | |
from langchain.memory import ConversationBufferMemory | |
# Set OpenAI API Key | |
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
# Define the template for the chatbot's response | |
template = """You are a helpful assistant to answer all user queries. | |
{chat_history} | |
User: {user_message} | |
Chatbot:""" | |
# Define the prompt template | |
prompt = PromptTemplate( | |
input_variables=["chat_history", "user_message"], | |
template=template | |
) | |
# Initialize conversation memory | |
memory = ConversationBufferMemory(memory_key="chat_history") | |
# Define the LLM chain with the ChatOpenAI model and conversation memory | |
llm_chain = LLMChain( | |
llm=ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo"), # Use 'model' instead of 'model_name' | |
prompt=prompt, | |
verbose=True, | |
memory=memory, | |
) | |
# Function to get chatbot response | |
def get_text_response(user_message, history): | |
# Prepare the conversation history | |
chat_history = history + [f"User: {user_message}"] | |
response = llm_chain.predict(user_message=user_message, chat_history=chat_history) | |
return response | |
# Create a Gradio chat interface | |
demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text") | |
if __name__ == "__main__": | |
demo.launch() | |