|
import os |
|
import gradio as gr |
|
from langchain_openai import ChatOpenAI |
|
from langchain_core.messages import HumanMessage |
|
from langgraph.checkpoint.memory import MemorySaver |
|
from langgraph.graph import START, MessagesState, StateGraph |
|
|
|
def create_chat_app(api_key): |
|
|
|
llm = ChatOpenAI( |
|
model="gpt-4o-mini", |
|
api_key=api_key, |
|
temperature=0 |
|
)[1] |
|
|
|
|
|
workflow = StateGraph(state_schema=MessagesState) |
|
|
|
|
|
def call_model(state: MessagesState): |
|
response = llm.invoke(state["messages"]) |
|
return {"messages": response}[1] |
|
|
|
|
|
workflow.add_edge(START, "model") |
|
workflow.add_node("model", call_model)[1] |
|
|
|
|
|
memory = MemorySaver() |
|
return workflow.compile(checkpointer=memory)[1] |
|
|
|
def chat(message, history, api_key, thread_id): |
|
if not api_key: |
|
return "Please enter your OpenAI API key first." |
|
|
|
try: |
|
|
|
app = create_chat_app(api_key) |
|
|
|
|
|
config = {"configurable": {"thread_id": thread_id}}[1] |
|
|
|
|
|
input_messages = [HumanMessage(message)] |
|
|
|
|
|
output = app.invoke({"messages": input_messages}, config)[1] |
|
|
|
return output["messages"][-1].content |
|
except Exception as e: |
|
return f"Error: {str(e)}" |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("# LangChain Chat with Message History") |
|
|
|
with gr.Row(): |
|
api_key = gr.Textbox( |
|
label="OpenAI API Key", |
|
placeholder="Enter your OpenAI API key", |
|
type="password" |
|
) |
|
thread_id = gr.Textbox( |
|
label="Thread ID", |
|
value="default_thread", |
|
placeholder="Enter a unique thread ID" |
|
) |
|
|
|
chatbot = gr.ChatInterface( |
|
fn=lambda message, history: chat(message, history, api_key.value, thread_id.value), |
|
title="Chat with GPT-4o-mini" |
|
)[2] |
|
|
|
|
|
demo.launch() |