Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
from langchain.chat_models import ChatOpenAI | |
from langchain.prompts import PromptTemplate | |
from langchain.chains import LLMChain | |
from langchain.memory import ConversationBufferMemory | |
# Set OpenAI API Key | |
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
# Define the template for the chatbot's response | |
template = """You are a helpful assistant to answer all user queries. | |
{chat_history} | |
User: {user_message} | |
Chatbot:""" | |
# Define the prompt template | |
prompt = PromptTemplate( | |
input_variables=["chat_history", "user_message"], | |
template=template | |
) | |
# Initialize conversation memory | |
memory = ConversationBufferMemory(memory_key="chat_history") | |
# Define the LLM chain with the ChatOpenAI model and conversation memory | |
llm_chain = LLMChain( | |
llm=ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo"), # Use 'model' instead of 'model_name' | |
prompt=prompt, | |
verbose=True, | |
memory=memory, | |
) | |
# Function to get chatbot response | |
def get_text_response(user_message, history): | |
response = llm_chain.predict(user_message=user_message) | |
return response | |
# Create a Gradio chat interface | |
demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text") | |
if __name__ == "__main__": | |
demo.launch() | |