Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,19 +1,43 @@
|
|
1 |
import os
|
|
|
2 |
import gradio as gr
|
3 |
import requests
|
4 |
import inspect
|
5 |
import pandas as pd
|
|
|
|
|
6 |
|
7 |
# rulo additionally
|
8 |
-
from langchain_core.messages import HumanMessage
|
9 |
-
from agent import build_graph
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
-
# Rulo
|
12 |
|
13 |
# (Keep Constants as is)
|
14 |
# --- Constants ---
|
15 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
# --- Basic Agent Definition ---
|
18 |
# ----- THIS IS WHERE YOU CAN BUILD WHAT YOU WANT ------
|
19 |
|
@@ -22,17 +46,85 @@ class BasicAgent:
|
|
22 |
def __init__(self):
|
23 |
print("BasicAgent initialized.")
|
24 |
#self.graph = build_graph("huggingface")
|
25 |
-
self.graph = build_graph()
|
26 |
|
27 |
def __call__(self, question: str) -> str:
|
28 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
|
|
|
|
|
|
29 |
# Wrap the question in a HumanMessage from langchain_core
|
30 |
-
messages = [HumanMessage(content=question)]
|
31 |
-
messages = self.graph.invoke({"messages": messages})
|
32 |
-
answer = messages['messages'][-1].content
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
return answer[14:]
|
35 |
-
|
36 |
|
37 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
38 |
"""
|
|
|
1 |
import os
|
2 |
+
from dotenv import load_dotenv
|
3 |
import gradio as gr
|
4 |
import requests
|
5 |
import inspect
|
6 |
import pandas as pd
|
7 |
+
import re as re
|
8 |
+
|
9 |
|
10 |
# rulo additionally
|
11 |
+
#from langchain_core.messages import HumanMessage
|
12 |
+
#from agent import build_graph
|
13 |
+
from langgraph.prebuilt import create_react_agent
|
14 |
+
from langchain_core.prompts import ChatPromptTemplate
|
15 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
16 |
+
from langchain_community.tools.tavily_search import TavilySearchResults
|
17 |
+
from langchain_community.tools import DuckDuckGoSearchResults
|
18 |
+
from langgraph.graph import START, StateGraph, MessagesState
|
19 |
+
from langchain_core.messages import SystemMessage, HumanMessage
|
20 |
+
from langgraph.prebuilt import tools_condition
|
21 |
+
from langgraph.prebuilt import ToolNode
|
22 |
+
from langchain_core.tools import tool
|
23 |
+
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings
|
24 |
+
from langchain_community.document_loaders import WikipediaLoader
|
25 |
+
from langchain_community.document_loaders import ArxivLoader
|
26 |
+
|
27 |
+
load_dotenv()
|
28 |
|
|
|
29 |
|
30 |
# (Keep Constants as is)
|
31 |
# --- Constants ---
|
32 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
33 |
|
34 |
+
# load the system prompt from the file
|
35 |
+
with open("system_prompt.txt", "r", encoding="utf-8") as f:
|
36 |
+
system_prompt = f.read()
|
37 |
+
|
38 |
+
# System message
|
39 |
+
system_prompt = SystemMessage(content=system_prompt)
|
40 |
+
|
41 |
# --- Basic Agent Definition ---
|
42 |
# ----- THIS IS WHERE YOU CAN BUILD WHAT YOU WANT ------
|
43 |
|
|
|
46 |
def __init__(self):
|
47 |
print("BasicAgent initialized.")
|
48 |
#self.graph = build_graph("huggingface")
|
49 |
+
#self.graph = build_graph()
|
50 |
|
51 |
def __call__(self, question: str) -> str:
|
52 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
53 |
+
fixed_answer = "This is a default answer."
|
54 |
+
print(f"Agent returning fixed answer: {fixed_answer}")
|
55 |
+
return fixed_answer
|
56 |
# Wrap the question in a HumanMessage from langchain_core
|
57 |
+
#messages = [HumanMessage(content=question)]
|
58 |
+
#messages = self.graph.invoke({"messages": messages})
|
59 |
+
#answer = messages['messages'][-1].content
|
60 |
+
#return answer[14:]
|
61 |
+
|
62 |
+
class AssignmentAgent:
|
63 |
+
def __init__(self, system_prompt):
|
64 |
+
#if "ANTHROPIC_API_KEY" not in os.environ:
|
65 |
+
# raise Exception("No Anthropic API key in environment")
|
66 |
|
67 |
+
"""
|
68 |
+
llm = ChatAnthropic(
|
69 |
+
model="claude-3-5-sonnet-20240620",
|
70 |
+
temperature=0,
|
71 |
+
max_tokens=1024,
|
72 |
+
timeout=None,
|
73 |
+
max_retries=2
|
74 |
+
)
|
75 |
+
"""
|
76 |
+
|
77 |
+
llm = ChatGoogleGenerativeAI(
|
78 |
+
model="gemini-2.0-flash",
|
79 |
+
temperature=0,
|
80 |
+
max_retries=2,
|
81 |
+
)
|
82 |
+
#llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash", temperature=0)
|
83 |
+
|
84 |
+
|
85 |
+
web_search = DuckDuckGoSearchResults()
|
86 |
+
|
87 |
+
@tool
|
88 |
+
def wiki_search(query: str) -> str:
|
89 |
+
"""Search Wikipedia for a query and return maximum 2 results.
|
90 |
+
|
91 |
+
Args:
|
92 |
+
query: The search query.
|
93 |
+
|
94 |
+
"""
|
95 |
+
search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
|
96 |
+
formatted_search_docs = "\n\n---\n\n".join(
|
97 |
+
[
|
98 |
+
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
|
99 |
+
for doc in search_docs
|
100 |
+
])
|
101 |
+
|
102 |
+
return {"wiki_results": formatted_search_docs}
|
103 |
+
|
104 |
+
tools =[
|
105 |
+
web_search,
|
106 |
+
wiki_search,
|
107 |
+
]
|
108 |
+
|
109 |
+
agent = create_react_agent(llm, tools, prompt=system_prompt)
|
110 |
+
self.llm = llm
|
111 |
+
self.tool = tool
|
112 |
+
self.agent = agent
|
113 |
+
print("AssignmentAgent initialized.")
|
114 |
+
|
115 |
+
def __call__(self, question: str) -> str:
|
116 |
+
print(f"AssignmentAgent received question (first 50 chars): {question[:50]}...")
|
117 |
+
# Wrap the question in a HumanMessage from Langchain_core
|
118 |
+
messages = [HumanMessage(content=question)]
|
119 |
+
result = self.agent.invoke({"messages":messages})
|
120 |
+
#pattern = re.compile(".*<answer>(.*)</answer>")
|
121 |
+
#regex_result = pattern.search(result['messages'][-1].content)
|
122 |
+
#answer = regex_result.group(1)
|
123 |
+
#print(f"AssignmentAgent returning answer: {answer}")
|
124 |
+
#return answer
|
125 |
+
answer = result['messages'][-1].content
|
126 |
return answer[14:]
|
127 |
+
|
128 |
|
129 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
130 |
"""
|