Spaces:
Sleeping
Sleeping
EtienneB
commited on
Commit
·
1669f2b
1
Parent(s):
9349849
updated new app.py and agent.py
Browse filesto keep app.py as original as possible.
- __pycache__/agent.cpython-313.pyc +0 -0
- __pycache__/tools.cpython-313.pyc +0 -0
- agent.py +78 -0
- app.py +14 -3
- requirements.txt +2 -1
- tools.py +1 -5
__pycache__/agent.cpython-313.pyc
CHANGED
Binary files a/__pycache__/agent.cpython-313.pyc and b/__pycache__/agent.cpython-313.pyc differ
|
|
__pycache__/tools.cpython-313.pyc
CHANGED
Binary files a/__pycache__/tools.cpython-313.pyc and b/__pycache__/tools.cpython-313.pyc differ
|
|
agent.py
CHANGED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
from langchain_core.messages import HumanMessage, SystemMessage
|
5 |
+
from langchain_core.tools import tool
|
6 |
+
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
|
7 |
+
from langgraph.graph import START, MessagesState, StateGraph
|
8 |
+
from langgraph.prebuilt import ToolNode, tools_condition
|
9 |
+
|
10 |
+
from tools import (absolute, add, compound_interest, convert_temperature,
|
11 |
+
divide, exponential, factorial, floor_divide,
|
12 |
+
get_current_time_in_timezone, greatest_common_divisor,
|
13 |
+
is_prime, least_common_multiple, logarithm, modulus,
|
14 |
+
multiply, percentage_calculator, power,
|
15 |
+
roman_calculator_converter, square_root, subtract,
|
16 |
+
web_search)
|
17 |
+
|
18 |
+
# Load Constants
|
19 |
+
load_dotenv()
|
20 |
+
HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
21 |
+
|
22 |
+
|
23 |
+
tools = [
|
24 |
+
multiply, add, subtract, power, divide, modulus,
|
25 |
+
square_root, floor_divide, absolute, logarithm,
|
26 |
+
exponential, web_search, roman_calculator_converter,
|
27 |
+
get_current_time_in_timezone, compound_interest,
|
28 |
+
convert_temperature, factorial, greatest_common_divisor,
|
29 |
+
is_prime, least_common_multiple, percentage_calculator
|
30 |
+
]
|
31 |
+
|
32 |
+
def build_graph():
|
33 |
+
"""Build the graph"""
|
34 |
+
|
35 |
+
# First create the HuggingFaceEndpoint
|
36 |
+
llm_endpoint = HuggingFaceEndpoint(
|
37 |
+
repo_id="Qwen/Qwen2.5-Coder-32B-Instruct",
|
38 |
+
huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN,
|
39 |
+
temperature=0.1, # Lower temperature for more consistent responses
|
40 |
+
max_new_tokens=1024,
|
41 |
+
timeout=30,
|
42 |
+
)
|
43 |
+
|
44 |
+
# Then wrap it with ChatHuggingFace to get chat model functionality
|
45 |
+
llm = ChatHuggingFace(llm=llm_endpoint)
|
46 |
+
|
47 |
+
# Bind tools to LLM
|
48 |
+
llm_with_tools = llm.bind_tools(tools)
|
49 |
+
|
50 |
+
# Node
|
51 |
+
def assistant(state: MessagesState):
|
52 |
+
"""Assistant node"""
|
53 |
+
return {"messages": [llm_with_tools.invoke(state["messages"])]}
|
54 |
+
|
55 |
+
|
56 |
+
builder = StateGraph(MessagesState)
|
57 |
+
builder.add_node("assistant", assistant)
|
58 |
+
builder.add_node("tools", ToolNode(tools))
|
59 |
+
builder.add_conditional_edges(
|
60 |
+
"assistant",
|
61 |
+
tools_condition,
|
62 |
+
)
|
63 |
+
builder.add_edge(START, "assistant")
|
64 |
+
builder.add_edge("tools", "assistant")
|
65 |
+
|
66 |
+
# Compile graph
|
67 |
+
return builder.compile()
|
68 |
+
|
69 |
+
# test
|
70 |
+
if __name__ == "__main__":
|
71 |
+
question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?"
|
72 |
+
# Build the graph
|
73 |
+
graph = build_graph()
|
74 |
+
# Run the graph
|
75 |
+
messages = [HumanMessage(content=question)]
|
76 |
+
messages = graph.invoke({"messages": messages})
|
77 |
+
for m in messages["messages"]:
|
78 |
+
m.pretty_print()
|
app.py
CHANGED
@@ -4,6 +4,10 @@ import os
|
|
4 |
import gradio as gr
|
5 |
import pandas as pd
|
6 |
import requests
|
|
|
|
|
|
|
|
|
7 |
|
8 |
# (Keep Constants as is)
|
9 |
# --- Constants ---
|
@@ -14,11 +18,18 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
14 |
class BasicAgent:
|
15 |
def __init__(self):
|
16 |
print("BasicAgent initialized.")
|
|
|
|
|
17 |
def __call__(self, question: str) -> str:
|
18 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
19 |
-
|
20 |
-
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
24 |
"""
|
|
|
4 |
import gradio as gr
|
5 |
import pandas as pd
|
6 |
import requests
|
7 |
+
# Additional libraries
|
8 |
+
from langchain_core.messages import HumanMessage
|
9 |
+
|
10 |
+
from agent import build_graph
|
11 |
|
12 |
# (Keep Constants as is)
|
13 |
# --- Constants ---
|
|
|
18 |
class BasicAgent:
|
19 |
def __init__(self):
|
20 |
print("BasicAgent initialized.")
|
21 |
+
self.graph = build_graph()
|
22 |
+
|
23 |
def __call__(self, question: str) -> str:
|
24 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
25 |
+
# Wrap the question in a HumanMessage from langchain_core
|
26 |
+
messages = [HumanMessage(content=question)]
|
27 |
+
messages = self.graph.invoke({"messages": messages})
|
28 |
+
answer = messages['messages'][-1].content
|
29 |
+
return answer[14:]
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
|
34 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
35 |
"""
|
requirements.txt
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
# UI and OAuth
|
2 |
-
gradio
|
3 |
requests
|
4 |
pandas
|
5 |
|
@@ -8,6 +8,7 @@ langchain
|
|
8 |
langchain-core
|
9 |
langchain-community
|
10 |
langgraph
|
|
|
11 |
|
12 |
# Hugging Face integration
|
13 |
huggingface_hub
|
|
|
1 |
# UI and OAuth
|
2 |
+
gradio
|
3 |
requests
|
4 |
pandas
|
5 |
|
|
|
8 |
langchain-core
|
9 |
langchain-community
|
10 |
langgraph
|
11 |
+
langchain-huggingface
|
12 |
|
13 |
# Hugging Face integration
|
14 |
huggingface_hub
|
tools.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
import datetime
|
2 |
import math
|
3 |
-
import re
|
4 |
from typing import Union
|
5 |
|
6 |
import pytz
|
@@ -350,9 +349,7 @@ def get_current_time_in_timezone(timezone: str) -> str:
|
|
350 |
return f"Error fetching time for timezone '{timezone}': {str(e)}"
|
351 |
|
352 |
|
353 |
-
|
354 |
-
|
355 |
-
@tool
|
356 |
def factorial(n: int) -> Union[int, str]:
|
357 |
"""Calculates the factorial of a non-negative integer.
|
358 |
|
@@ -551,4 +548,3 @@ def convert_temperature(value: Union[int, float], from_unit: str, to_unit: str)
|
|
551 |
return round(result, 2)
|
552 |
except Exception as e:
|
553 |
return f"Error converting temperature: {str(e)}"
|
554 |
-
|
|
|
1 |
import datetime
|
2 |
import math
|
|
|
3 |
from typing import Union
|
4 |
|
5 |
import pytz
|
|
|
349 |
return f"Error fetching time for timezone '{timezone}': {str(e)}"
|
350 |
|
351 |
|
352 |
+
@tool
|
|
|
|
|
353 |
def factorial(n: int) -> Union[int, str]:
|
354 |
"""Calculates the factorial of a non-negative integer.
|
355 |
|
|
|
548 |
return round(result, 2)
|
549 |
except Exception as e:
|
550 |
return f"Error converting temperature: {str(e)}"
|
|