ai-puppy commited on
Commit
2473fee
·
1 Parent(s): 9fb199b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -8
app.py CHANGED
@@ -1,12 +1,25 @@
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
 
8
 
9
 
 
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
@@ -15,8 +28,44 @@ def respond(
15
  temperature,
16
  top_p,
17
  ):
18
- # Simple modification: always respond with "haha"
19
- yield "haha"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
 
22
  """
@@ -25,7 +74,7 @@ For information on how to customize the ChatInterface, peruse the gradio docs: h
25
  demo = gr.ChatInterface(
26
  respond,
27
  additional_inputs=[
28
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
29
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
30
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
31
  gr.Slider(
 
1
+ import os
2
  import gradio as gr
3
+ from dotenv import find_dotenv, load_dotenv
4
+ from langchain.chat_models import init_chat_model
5
+ from langchain.schema import HumanMessage, SystemMessage
6
+ from langgraph.prebuilt import create_react_agent
7
+ from langsmith import traceable
8
 
9
+ # Load environment variables
10
+ load_dotenv(find_dotenv())
11
+
12
+ # Initialize OpenAI model
13
+ openai_model = init_chat_model(
14
+ model="gpt-4.1-nano-2025-04-14",
15
+ api_key=os.getenv("OPENAI_API_KEY"),
16
+ )
17
+
18
+ # Create the agent (you can add tools here later if needed)
19
+ chat_agent = create_react_agent(openai_model, tools=[])
20
 
21
 
22
+ @traceable
23
  def respond(
24
  message,
25
  history: list[tuple[str, str]],
 
28
  temperature,
29
  top_p,
30
  ):
31
+ """
32
+ Main chat function that processes user input and returns AI response
33
+ """
34
+ try:
35
+ # Convert history to LangChain message format
36
+ messages = [SystemMessage(content=system_message)]
37
+
38
+ # Add conversation history
39
+ for user_msg, assistant_msg in history:
40
+ if user_msg:
41
+ messages.append(HumanMessage(content=user_msg))
42
+ if assistant_msg:
43
+ messages.append(SystemMessage(content=assistant_msg))
44
+
45
+ # Add current user message
46
+ messages.append(HumanMessage(content=message))
47
+
48
+ # Prepare input for the agent
49
+ input_data = {"messages": messages}
50
+
51
+ # Stream the response
52
+ response_text = ""
53
+ for chunk in chat_agent.stream(input_data, stream_mode="values"):
54
+ if "messages" in chunk and chunk["messages"]:
55
+ latest_message = chunk["messages"][-1]
56
+ if hasattr(latest_message, 'content'):
57
+ # Extract content from the message
58
+ current_content = latest_message.content
59
+ if current_content and len(current_content) > len(response_text):
60
+ response_text = current_content
61
+ yield response_text
62
+
63
+ # Ensure we return something even if streaming doesn't work
64
+ if not response_text:
65
+ yield "I'm sorry, I couldn't process your message. Please check your OpenAI API key."
66
+
67
+ except Exception as e:
68
+ yield f"Error: {str(e)}. Please make sure your OpenAI API key is set correctly."
69
 
70
 
71
  """
 
74
  demo = gr.ChatInterface(
75
  respond,
76
  additional_inputs=[
77
+ gr.Textbox(value="You are a helpful AI assistant. Be friendly, informative, and concise in your responses.", label="System message"),
78
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
79
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
80
  gr.Slider(