DexterSptizu's picture
Create app.py
f767699 verified
raw
history blame
2.18 kB
import os
import gradio as gr
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import START, MessagesState, StateGraph
def create_chat_app(api_key):
# Initialize the chat model
llm = ChatOpenAI(
model="gpt-4o-mini",
api_key=api_key,
temperature=0
)[1]
# Define the graph
workflow = StateGraph(state_schema=MessagesState)
# Define the function that calls the model
def call_model(state: MessagesState):
response = llm.invoke(state["messages"])
return {"messages": response}[1]
# Add node and edge to graph
workflow.add_edge(START, "model")
workflow.add_node("model", call_model)[1]
# Add memory
memory = MemorySaver()
return workflow.compile(checkpointer=memory)[1]
def chat(message, history, api_key, thread_id):
if not api_key:
return "Please enter your OpenAI API key first."
try:
# Create chat application if not exists
app = create_chat_app(api_key)
# Configure thread
config = {"configurable": {"thread_id": thread_id}}[1]
# Prepare input message
input_messages = [HumanMessage(message)]
# Get response
output = app.invoke({"messages": input_messages}, config)[1]
return output["messages"][-1].content
except Exception as e:
return f"Error: {str(e)}"
# Create Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# LangChain Chat with Message History")
with gr.Row():
api_key = gr.Textbox(
label="OpenAI API Key",
placeholder="Enter your OpenAI API key",
type="password"
)
thread_id = gr.Textbox(
label="Thread ID",
value="default_thread",
placeholder="Enter a unique thread ID"
)
chatbot = gr.ChatInterface(
fn=lambda message, history: chat(message, history, api_key.value, thread_id.value),
title="Chat with GPT-4o-mini"
)[2]
# Launch the application
demo.launch()