Spaces:
Runtime error
Runtime error
Upload 2 files
Browse files- app.py +827 -0
- requirements.txt +18 -0
app.py
ADDED
@@ -0,0 +1,827 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
import requests
|
4 |
+
import pandas as pd
|
5 |
+
import time
|
6 |
+
from pathlib import Path
|
7 |
+
from typing import Dict, Any, List, Optional, TypedDict, Annotated
|
8 |
+
import operator
|
9 |
+
|
10 |
+
# LangChain and LangGraph imports
|
11 |
+
from langchain_community.tools import DuckDuckGoSearchResults
|
12 |
+
from langchain_openai import AzureChatOpenAI
|
13 |
+
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
|
14 |
+
from langchain_core.tools import tool
|
15 |
+
from langchain_core.prompts import ChatPromptTemplate
|
16 |
+
from langgraph.graph import StateGraph, MessagesState, START, END
|
17 |
+
from langgraph.prebuilt import ToolNode
|
18 |
+
from langgraph.checkpoint.memory import MemorySaver
|
19 |
+
|
20 |
+
# Existing utility imports
|
21 |
+
from youtube_transcript_api import YouTubeTranscriptApi, NoTranscriptFound
|
22 |
+
from bs4 import BeautifulSoup
|
23 |
+
import pdfplumber
|
24 |
+
import docx
|
25 |
+
import speech_recognition as sr
|
26 |
+
import base64
|
27 |
+
import tempfile
|
28 |
+
import re
|
29 |
+
from io import BytesIO, StringIO
|
30 |
+
from dotenv import load_dotenv
|
31 |
+
|
32 |
+
load_dotenv()
|
33 |
+
|
34 |
+
# ------------------------------
|
35 |
+
# Configuration
|
36 |
+
# ------------------------------
|
37 |
+
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
38 |
+
|
39 |
+
api_key = os.getenv("AZURE_OPENAI_API_KEY")
|
40 |
+
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
|
41 |
+
azure_api_version = os.getenv("AZURE_OPENAI_API_VERSION")
|
42 |
+
azure_deployment_name = os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME")
|
43 |
+
azure_model_name = os.getenv("AZURE_OPENAI_MODEL_NAME")
|
44 |
+
|
45 |
+
# Initialize Azure OpenAI LLM
|
46 |
+
llm = AzureChatOpenAI(
|
47 |
+
deployment_name=azure_deployment_name,
|
48 |
+
model_name=azure_model_name,
|
49 |
+
temperature=0.0,
|
50 |
+
top_p=0.1,
|
51 |
+
azure_endpoint=azure_endpoint,
|
52 |
+
api_key=api_key,
|
53 |
+
api_version=azure_api_version,
|
54 |
+
)
|
55 |
+
|
56 |
+
# ------------------------------
|
57 |
+
# State Definition
|
58 |
+
# ------------------------------
|
59 |
+
class AgentState(TypedDict):
|
60 |
+
messages: Annotated[List[Any], operator.add]
|
61 |
+
question: str
|
62 |
+
task_id: str
|
63 |
+
file_name: str
|
64 |
+
file_type: Optional[str]
|
65 |
+
file_url: Optional[str]
|
66 |
+
final_answer: Optional[str]
|
67 |
+
agent_used: Optional[str]
|
68 |
+
reasoning: Optional[str]
|
69 |
+
|
70 |
+
# ------------------------------
|
71 |
+
# Tool Functions
|
72 |
+
# ------------------------------
|
73 |
+
|
74 |
+
def transcribe_audio(content: bytes) -> str:
|
75 |
+
"""Transcribe audio from bytes to text."""
|
76 |
+
try:
|
77 |
+
with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as mp3_tmp:
|
78 |
+
mp3_tmp.write(content)
|
79 |
+
mp3_path = mp3_tmp.name
|
80 |
+
|
81 |
+
wav_path = mp3_path.replace(".mp3", ".wav")
|
82 |
+
|
83 |
+
try:
|
84 |
+
from pydub import AudioSegment
|
85 |
+
audio = AudioSegment.from_mp3(mp3_path)
|
86 |
+
audio.export(wav_path, format="wav")
|
87 |
+
audio_file = wav_path
|
88 |
+
except ImportError:
|
89 |
+
audio_file = mp3_path
|
90 |
+
|
91 |
+
recognizer = sr.Recognizer()
|
92 |
+
with sr.AudioFile(audio_file) as source:
|
93 |
+
audio = recognizer.record(source)
|
94 |
+
transcript = recognizer.recognize_google(audio)
|
95 |
+
|
96 |
+
for path in [mp3_path, wav_path]:
|
97 |
+
if os.path.exists(path):
|
98 |
+
os.remove(path)
|
99 |
+
|
100 |
+
return f"Audio Transcript: {transcript}"
|
101 |
+
|
102 |
+
except Exception as e:
|
103 |
+
print(f"Audio transcription error: {e}")
|
104 |
+
return "Could not transcribe audio"
|
105 |
+
|
106 |
+
@tool
|
107 |
+
def parse_file_tool(file_url: str, file_name: str) -> str:
|
108 |
+
"""Parse various file types and extract content."""
|
109 |
+
try:
|
110 |
+
if len(file_name) > 0:
|
111 |
+
file_type = Path(file_name).suffix.lower()
|
112 |
+
file_type = file_type.split("?")[0]
|
113 |
+
else:
|
114 |
+
file_type = None
|
115 |
+
|
116 |
+
if file_type:
|
117 |
+
resp = requests.get(file_url, timeout=30)
|
118 |
+
resp.raise_for_status()
|
119 |
+
content = resp.content
|
120 |
+
|
121 |
+
# Excel Files
|
122 |
+
if file_type in [".xlsx", ".xls"]:
|
123 |
+
try:
|
124 |
+
df = pd.read_excel(BytesIO(content))
|
125 |
+
return f"Excel Content:\n{df.head(10).to_string(index=False)}"
|
126 |
+
except Exception as e:
|
127 |
+
return f"Excel parsing error: {str(e)}"
|
128 |
+
|
129 |
+
# CSV Files
|
130 |
+
elif file_type == ".csv":
|
131 |
+
try:
|
132 |
+
df = pd.read_csv(BytesIO(content))
|
133 |
+
return f"CSV Content:\n{df.head(10).to_string(index=False)}"
|
134 |
+
except Exception as e:
|
135 |
+
return f"CSV parsing error: {str(e)}"
|
136 |
+
|
137 |
+
# Text Files
|
138 |
+
elif file_type == ".txt":
|
139 |
+
text = content.decode(errors='ignore')
|
140 |
+
return f"Text Content:\n{text[:5000]}"
|
141 |
+
|
142 |
+
# PDF Files
|
143 |
+
elif file_type == ".pdf":
|
144 |
+
try:
|
145 |
+
with pdfplumber.open(BytesIO(content)) as pdf:
|
146 |
+
text = "\n".join(page.extract_text() or "" for page in pdf.pages[:5])
|
147 |
+
return f"PDF Content:\n{text[:5000]}"
|
148 |
+
except Exception as e:
|
149 |
+
return f"PDF parsing error: {str(e)}"
|
150 |
+
|
151 |
+
# DOCX Files
|
152 |
+
elif file_type == ".docx":
|
153 |
+
try:
|
154 |
+
d = docx.Document(BytesIO(content))
|
155 |
+
text = "\n".join(p.text for p in d.paragraphs[:100])
|
156 |
+
return f"DOCX Content:\n{text[:5000]}"
|
157 |
+
except Exception as e:
|
158 |
+
return f"DOCX parsing error: {str(e)}"
|
159 |
+
|
160 |
+
# MP3 Files
|
161 |
+
elif file_type == ".mp3":
|
162 |
+
return transcribe_audio(content)
|
163 |
+
|
164 |
+
# Python Files
|
165 |
+
elif file_type == ".py":
|
166 |
+
text = content.decode(errors='ignore')
|
167 |
+
return f"Python Code:\n{text[:5000]}"
|
168 |
+
|
169 |
+
else:
|
170 |
+
return f"Unsupported file type: {file_type}"
|
171 |
+
else:
|
172 |
+
return "No file type provided or file URL is invalid."
|
173 |
+
except Exception as e:
|
174 |
+
print(f"[parse_file_tool] ERROR: {e}")
|
175 |
+
return f"File parsing failed: {str(e)}"
|
176 |
+
|
177 |
+
@tool
|
178 |
+
def youtube_transcript_tool(url: str) -> str:
|
179 |
+
"""Extract transcript from YouTube video."""
|
180 |
+
try:
|
181 |
+
video_id = url.split("v=")[-1].split("&")[0]
|
182 |
+
transcript = YouTubeTranscriptApi.get_transcript(video_id)
|
183 |
+
return " ".join([e['text'] for e in transcript])
|
184 |
+
except NoTranscriptFound:
|
185 |
+
return "No transcript available for this video"
|
186 |
+
except Exception as e:
|
187 |
+
return f"Error retrieving transcript: {str(e)}"
|
188 |
+
|
189 |
+
def scrape_text_from_url(url: str, max_chars=4000) -> str:
|
190 |
+
"""Fetch and clean main text from a webpage."""
|
191 |
+
try:
|
192 |
+
resp = requests.get(url, timeout=10)
|
193 |
+
soup = BeautifulSoup(resp.text, 'html.parser')
|
194 |
+
text = ' '.join(soup.stripped_strings)
|
195 |
+
return text[:max_chars]
|
196 |
+
except Exception as e:
|
197 |
+
return f"Could not scrape {url}: {e}"
|
198 |
+
|
199 |
+
@tool
|
200 |
+
def web_search_tool(question: str) -> str:
|
201 |
+
"""Perform web search using DuckDuckGo and scrape results."""
|
202 |
+
try:
|
203 |
+
from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
|
204 |
+
ddg_spec = DuckDuckGoSearchToolSpec()
|
205 |
+
results = ddg_spec.duckduckgo_full_search(question) or []
|
206 |
+
|
207 |
+
if not isinstance(results, list):
|
208 |
+
return "No search results found."
|
209 |
+
|
210 |
+
max_results = 10
|
211 |
+
min_chars = 400
|
212 |
+
max_chars = 4000
|
213 |
+
|
214 |
+
for entry in results[:max_results]:
|
215 |
+
href = entry.get("href", "")
|
216 |
+
if not href:
|
217 |
+
continue
|
218 |
+
|
219 |
+
text = scrape_text_from_url(href, max_chars=max_chars)
|
220 |
+
if text.startswith("Could not scrape") or len(text) < min_chars:
|
221 |
+
continue
|
222 |
+
|
223 |
+
return (
|
224 |
+
f"Here is content scraped from {href}:\n\n"
|
225 |
+
f"{text}\n\n"
|
226 |
+
"Based on this, please answer the original question."
|
227 |
+
)
|
228 |
+
|
229 |
+
# Fallback to search result metadata
|
230 |
+
if not results:
|
231 |
+
return "No search results found."
|
232 |
+
|
233 |
+
summary_lines = []
|
234 |
+
for idx, entry in enumerate(results[:max_results], start=1):
|
235 |
+
title = entry.get("title") or "Untitled result"
|
236 |
+
snippet = (entry.get("body") or "").replace("\n", " ")[:160]
|
237 |
+
href = entry.get("href")
|
238 |
+
summary_lines.append(f"{idx}. {title} – {snippet} ({href})")
|
239 |
+
|
240 |
+
return (
|
241 |
+
"I could not successfully scrape any of the top pages. "
|
242 |
+
"Here are the top DuckDuckGo results:\n\n"
|
243 |
+
+ "\n".join(summary_lines)
|
244 |
+
+ "\n\nPlease answer the original question using this list."
|
245 |
+
)
|
246 |
+
except Exception as e:
|
247 |
+
return f"Web search failed: {str(e)}"
|
248 |
+
|
249 |
+
@tool
|
250 |
+
def image_processing_tool(file_url: str, question: str) -> str:
|
251 |
+
"""Process image and answer questions about it using Azure Vision."""
|
252 |
+
try:
|
253 |
+
print(f"Processing image from URL: {file_url}")
|
254 |
+
resp = requests.get(file_url, timeout=30)
|
255 |
+
resp.raise_for_status()
|
256 |
+
raw = resp.content
|
257 |
+
|
258 |
+
mime = resp.headers.get("Content-Type", "image/png")
|
259 |
+
img_b64 = base64.b64encode(raw).decode()
|
260 |
+
data_uri = f"data:{mime};base64,{img_b64}"
|
261 |
+
|
262 |
+
print("Image downloaded and encoded successfully.")
|
263 |
+
|
264 |
+
from openai import AzureOpenAI
|
265 |
+
vision_client = AzureOpenAI(
|
266 |
+
api_key=api_key,
|
267 |
+
api_version=azure_api_version,
|
268 |
+
azure_endpoint=azure_endpoint,
|
269 |
+
)
|
270 |
+
|
271 |
+
messages = [
|
272 |
+
{"role": "system", "content": "You are a vision expert. Answer based only on the image content."},
|
273 |
+
{"role": "user", "content": [
|
274 |
+
{"type": "text", "text": question},
|
275 |
+
{"type": "image_url", "image_url": {"url": data_uri}}
|
276 |
+
]},
|
277 |
+
]
|
278 |
+
|
279 |
+
response = vision_client.chat.completions.create(
|
280 |
+
model=azure_model_name,
|
281 |
+
messages=messages,
|
282 |
+
temperature=0.0,
|
283 |
+
max_tokens=2000,
|
284 |
+
)
|
285 |
+
|
286 |
+
print(f"Vision API response received: {response.choices[0].message.content.strip()}")
|
287 |
+
return response.choices[0].message.content.strip()
|
288 |
+
except Exception as e:
|
289 |
+
return f"Vision API error: {e}"
|
290 |
+
|
291 |
+
# ------------------------------
|
292 |
+
# Agent Functions
|
293 |
+
# ------------------------------
|
294 |
+
|
295 |
+
# prompts.py (new file)
|
296 |
+
SCORER_TEMPLATE = """You are a general AI assistant.
|
297 |
+
Answer the question and finish with:
|
298 |
+
FINAL ANSWER: <your answer>
|
299 |
+
|
300 |
+
Formatting rules:
|
301 |
+
• numbers: digits only, no commas/units unless requested
|
302 |
+
• strings: no articles/abbreviations, digits in plain text
|
303 |
+
• for lists: same rules per element, comma-separated, no spaces
|
304 |
+
"""
|
305 |
+
|
306 |
+
from langchain_core.prompts import ChatPromptTemplate
|
307 |
+
|
308 |
+
def make_prompt(extra_instruction: str = "") -> ChatPromptTemplate:
|
309 |
+
return ChatPromptTemplate.from_messages([
|
310 |
+
("system", SCORER_TEMPLATE + "\n" + extra_instruction),
|
311 |
+
("human", "{human_input}")
|
312 |
+
])
|
313 |
+
|
314 |
+
import re
|
315 |
+
|
316 |
+
def extract_final_answer(text: str) -> str:
|
317 |
+
# robust to quotes, stray whitespace, different capitalisation
|
318 |
+
m = re.search(r"FINAL ANSWER:\s*(.+)", text, re.I | re.S)
|
319 |
+
ans = m.group(1).strip() if m else text.strip()
|
320 |
+
# strip surrounding quotes/backticks
|
321 |
+
return re.sub(r'^[\'"`\s]+|[\'"`\s]+$', "", ans)
|
322 |
+
|
323 |
+
|
324 |
+
|
325 |
+
def router_agent(state: AgentState) -> AgentState:
|
326 |
+
"""Router agent that determines which specialized agent to use."""
|
327 |
+
question = state["question"]
|
328 |
+
file_name = state.get("file_name", "")
|
329 |
+
|
330 |
+
# Check for files
|
331 |
+
if file_name:
|
332 |
+
file_type = Path(file_name).suffix.lower().split("?")[0] if len(file_name)>0 else None
|
333 |
+
|
334 |
+
# Image files
|
335 |
+
if file_type in ['.jpg', '.jpeg', '.png', '.bmp', '.gif', '.webp']:
|
336 |
+
return {
|
337 |
+
**state,
|
338 |
+
"agent_used": "image_agent",
|
339 |
+
"reasoning": f"Image file detected: {file_name}"
|
340 |
+
}
|
341 |
+
# Other files
|
342 |
+
else:
|
343 |
+
return {
|
344 |
+
**state,
|
345 |
+
"agent_used": "file_agent",
|
346 |
+
"reasoning": f"File detected: {file_name} (type: {file_type})"
|
347 |
+
}
|
348 |
+
|
349 |
+
# Check for YouTube links
|
350 |
+
if "youtube.com" in question.lower() or "youtu.be" in question.lower():
|
351 |
+
return {
|
352 |
+
**state,
|
353 |
+
"agent_used": "youtube_agent",
|
354 |
+
"reasoning": "YouTube link detected in question"
|
355 |
+
}
|
356 |
+
|
357 |
+
# Check if question contains all needed information (self-contained)
|
358 |
+
self_contained_indicators = [
|
359 |
+
"reverse", "backward", "opposite", "calculate", "math", "add", "subtract",
|
360 |
+
"multiply", "divide", "cipher", "decode", "encode", "spell", "count"
|
361 |
+
]
|
362 |
+
|
363 |
+
if any(indicator in question.lower() for indicator in self_contained_indicators):
|
364 |
+
# Additional check: does it seem like it needs external info?
|
365 |
+
external_indicators = ["who is", "when did", "where is", "what year", "latest", "current"]
|
366 |
+
if not any(indicator in question.lower() for indicator in external_indicators):
|
367 |
+
return {
|
368 |
+
**state,
|
369 |
+
"agent_used": "reasoning_agent",
|
370 |
+
"reasoning": "Question appears self-contained, no external data needed"
|
371 |
+
}
|
372 |
+
|
373 |
+
# Default to web search
|
374 |
+
return {
|
375 |
+
**state,
|
376 |
+
"agent_used": "web_search_agent",
|
377 |
+
"reasoning": "Question requires external knowledge - using web search"
|
378 |
+
}
|
379 |
+
|
380 |
+
def reasoning_agent(state: AgentState) -> AgentState:
|
381 |
+
"""Agent for self-contained reasoning tasks."""
|
382 |
+
question = state["question"]
|
383 |
+
|
384 |
+
extra_sys = """You are a reasoning expert. Answer questions that can be
|
385 |
+
solved with logic, mathematics, or text manipulation without external data."""
|
386 |
+
prompt = make_prompt(extra_sys)
|
387 |
+
|
388 |
+
human_block = question
|
389 |
+
|
390 |
+
content = (prompt | llm).invoke({"human_input": human_block}).content
|
391 |
+
final_answer = extract_final_answer(content)
|
392 |
+
|
393 |
+
return {
|
394 |
+
**state,
|
395 |
+
"final_answer": final_answer,
|
396 |
+
"messages": state["messages"] + [AIMessage(content=content)]
|
397 |
+
}
|
398 |
+
|
399 |
+
def file_agent(state: AgentState) -> AgentState:
|
400 |
+
"""Agent for processing various file types."""
|
401 |
+
question = state["question"]
|
402 |
+
file_url = state.get("file_url")
|
403 |
+
file_name = state.get("file_name", "")
|
404 |
+
|
405 |
+
if not file_url:
|
406 |
+
return {
|
407 |
+
**state,
|
408 |
+
"final_answer": "No file URL provided",
|
409 |
+
"messages": state["messages"] + [AIMessage(content="No file URL provided")]
|
410 |
+
}
|
411 |
+
|
412 |
+
# Parse the file
|
413 |
+
file_content = parse_file_tool.invoke({"file_url": file_url, "file_name": file_name})
|
414 |
+
|
415 |
+
extra_sys = """You are a file analysis expert. Based on the file content provided,
|
416 |
+
answer the user's question accurately and concisely."""
|
417 |
+
prompt = make_prompt(extra_sys)
|
418 |
+
|
419 |
+
human_block = f"Question: {question}\n\nFile Content:\n{file_content}"
|
420 |
+
|
421 |
+
content = (prompt | llm).invoke({"human_input": human_block}).content
|
422 |
+
|
423 |
+
final_answer = extract_final_answer(content)
|
424 |
+
|
425 |
+
return {
|
426 |
+
**state,
|
427 |
+
"final_answer": final_answer,
|
428 |
+
"messages": state["messages"] + [AIMessage(content=content)]
|
429 |
+
}
|
430 |
+
|
431 |
+
def youtube_agent(state: AgentState) -> AgentState:
|
432 |
+
"""Agent for processing YouTube video transcripts."""
|
433 |
+
question = state["question"]
|
434 |
+
|
435 |
+
# Extract YouTube URL from question
|
436 |
+
import re
|
437 |
+
youtube_pattern = r'(https?://(?:www\.)?(?:youtube\.com/watch\?v=|youtu\.be/)[\w-]+)'
|
438 |
+
urls = re.findall(youtube_pattern, question)
|
439 |
+
|
440 |
+
if not urls:
|
441 |
+
return {
|
442 |
+
**state,
|
443 |
+
"final_answer": "No YouTube URL found in question",
|
444 |
+
"messages": state["messages"] + [AIMessage(content="No YouTube URL found")]
|
445 |
+
}
|
446 |
+
|
447 |
+
# Get transcript
|
448 |
+
transcript = youtube_transcript_tool.invoke({"url": urls[0]})
|
449 |
+
|
450 |
+
extra_sys = """You are a YouTube content expert. Based on the video transcript provided,
|
451 |
+
answer the user's question accurately and concisely."""
|
452 |
+
prompt = make_prompt(extra_sys)
|
453 |
+
|
454 |
+
human_block = f"Question: {question}\n\nTranscript: {transcript}"
|
455 |
+
|
456 |
+
content = (prompt | llm).invoke({"human_input": human_block}).content
|
457 |
+
|
458 |
+
final_answer = extract_final_answer(content)
|
459 |
+
|
460 |
+
return {
|
461 |
+
**state,
|
462 |
+
"final_answer": final_answer,
|
463 |
+
"messages": state["messages"] + [AIMessage(content=content)]
|
464 |
+
}
|
465 |
+
|
466 |
+
def web_search_agent(state: AgentState) -> AgentState:
|
467 |
+
"""Agent for web search and information retrieval."""
|
468 |
+
question = state["question"]
|
469 |
+
|
470 |
+
# Perform web search
|
471 |
+
search_results = web_search_tool.invoke({"question": question})
|
472 |
+
|
473 |
+
extra_sys = """You are a web search expert. Based on the search results provided,
|
474 |
+
answer the user's question accurately and concisely."""
|
475 |
+
prompt = make_prompt(extra_sys)
|
476 |
+
|
477 |
+
human_block = f"Question: {question}\n\Search Results:: {search_results}"
|
478 |
+
|
479 |
+
content = (prompt | llm).invoke({"human_input": human_block}).content
|
480 |
+
|
481 |
+
final_answer = extract_final_answer(content)
|
482 |
+
|
483 |
+
return {
|
484 |
+
**state,
|
485 |
+
"final_answer": final_answer,
|
486 |
+
"messages": state["messages"] + [AIMessage(content=content)]
|
487 |
+
}
|
488 |
+
|
489 |
+
def image_agent(state: AgentState) -> AgentState:
|
490 |
+
"""Agent for processing images."""
|
491 |
+
question = state["question"]
|
492 |
+
file_url = state.get("file_url")
|
493 |
+
|
494 |
+
if not file_url:
|
495 |
+
return {
|
496 |
+
**state,
|
497 |
+
"final_answer": "No image URL provided",
|
498 |
+
"messages": state["messages"] + [AIMessage(content="No image URL provided")]
|
499 |
+
}
|
500 |
+
|
501 |
+
# Process the image
|
502 |
+
image_analysis = image_processing_tool.invoke({"file_url": file_url, "question": question})
|
503 |
+
|
504 |
+
extra_sys = """You are a web search expert. Based on the search results provided,
|
505 |
+
answer the user's question accurately and concisely."""
|
506 |
+
prompt = make_prompt(extra_sys)
|
507 |
+
|
508 |
+
human_block = f"Question: {question}\n\nImage Analysis: {image_analysis}"
|
509 |
+
|
510 |
+
content = (prompt | llm).invoke({"human_input": human_block}).content
|
511 |
+
|
512 |
+
final_answer = extract_final_answer(content)
|
513 |
+
|
514 |
+
return {
|
515 |
+
**state,
|
516 |
+
"final_answer": final_answer,
|
517 |
+
"messages": state["messages"] + [AIMessage(content=content)]
|
518 |
+
}
|
519 |
+
|
520 |
+
# ------------------------------
|
521 |
+
# Conditional Logic
|
522 |
+
# ------------------------------
|
523 |
+
|
524 |
+
def route_to_agent(state: AgentState) -> str:
|
525 |
+
"""Route to the appropriate agent based on the router's decision."""
|
526 |
+
agent_used = state.get("agent_used")
|
527 |
+
|
528 |
+
if agent_used == "reasoning_agent":
|
529 |
+
return "reasoning_agent"
|
530 |
+
elif agent_used == "file_agent":
|
531 |
+
return "file_agent"
|
532 |
+
elif agent_used == "youtube_agent":
|
533 |
+
return "youtube_agent"
|
534 |
+
elif agent_used == "image_agent":
|
535 |
+
return "image_agent"
|
536 |
+
else:
|
537 |
+
return "web_search_agent"
|
538 |
+
|
539 |
+
def should_end(state: AgentState) -> str:
|
540 |
+
"""Check if we have a final answer and should end."""
|
541 |
+
if state.get("final_answer"):
|
542 |
+
return END
|
543 |
+
else:
|
544 |
+
return "router"
|
545 |
+
|
546 |
+
# ------------------------------
|
547 |
+
# Graph Construction
|
548 |
+
# ------------------------------
|
549 |
+
|
550 |
+
def create_agent_graph():
|
551 |
+
"""Create and return the agent graph."""
|
552 |
+
workflow = StateGraph(AgentState)
|
553 |
+
|
554 |
+
# Add nodes
|
555 |
+
workflow.add_node("router", router_agent)
|
556 |
+
workflow.add_node("reasoning_agent", reasoning_agent)
|
557 |
+
workflow.add_node("file_agent", file_agent)
|
558 |
+
workflow.add_node("youtube_agent", youtube_agent)
|
559 |
+
workflow.add_node("web_search_agent", web_search_agent)
|
560 |
+
workflow.add_node("image_agent", image_agent)
|
561 |
+
|
562 |
+
# Add edges
|
563 |
+
workflow.add_edge(START, "router")
|
564 |
+
workflow.add_conditional_edges("router", route_to_agent)
|
565 |
+
|
566 |
+
# All agents go to end
|
567 |
+
workflow.add_edge("reasoning_agent", END)
|
568 |
+
workflow.add_edge("file_agent", END)
|
569 |
+
workflow.add_edge("youtube_agent", END)
|
570 |
+
workflow.add_edge("web_search_agent", END)
|
571 |
+
workflow.add_edge("image_agent", END)
|
572 |
+
|
573 |
+
# Compile the graph
|
574 |
+
memory = MemorySaver()
|
575 |
+
graph = workflow.compile(checkpointer=memory)
|
576 |
+
|
577 |
+
return graph
|
578 |
+
|
579 |
+
# ------------------------------
|
580 |
+
# Main Agent Class
|
581 |
+
# ------------------------------
|
582 |
+
|
583 |
+
class LangGraphAgent:
|
584 |
+
def __init__(self):
|
585 |
+
"""Initialize the LangGraph agent."""
|
586 |
+
self.graph = create_agent_graph()
|
587 |
+
self.api_url = DEFAULT_API_URL
|
588 |
+
|
589 |
+
def __call__(self, question: str, task_id: str, file_name: str, file_type: str = None) -> str:
|
590 |
+
"""
|
591 |
+
Main method to process a question and return an answer.
|
592 |
+
|
593 |
+
Args:
|
594 |
+
question (str): The question to answer
|
595 |
+
task_id (str): Task ID for file retrieval
|
596 |
+
file_name (str): Name of the file associated with the question
|
597 |
+
file_type (str): Type of the file (e.g., .pdf, .docx, etc.)
|
598 |
+
Returns:
|
599 |
+
str: The answer to the question
|
600 |
+
"""
|
601 |
+
try:
|
602 |
+
# Prepare initial state
|
603 |
+
initial_state = {
|
604 |
+
"messages": [HumanMessage(content=question)],
|
605 |
+
"question": question,
|
606 |
+
"task_id": task_id,
|
607 |
+
"file_name": file_name or "",
|
608 |
+
"file_type": Path(file_name).suffix.lower().split("?")[0] if len(file_name)>0 else None,
|
609 |
+
"file_url": f"{self.api_url}/files/{task_id}" if len(file_name)>0 else None,
|
610 |
+
"final_answer": None,
|
611 |
+
"agent_used": None,
|
612 |
+
"reasoning": None
|
613 |
+
}
|
614 |
+
|
615 |
+
print(f"Processing question: {question}")
|
616 |
+
if len(file_name)>0:
|
617 |
+
print(f"File detected: {file_name} (type: {file_type})")
|
618 |
+
|
619 |
+
# Run the graph
|
620 |
+
config = {"configurable": {"thread_id": task_id}}
|
621 |
+
result = self.graph.invoke(initial_state, config=config)
|
622 |
+
|
623 |
+
final_answer = result.get("final_answer", "No answer generated")
|
624 |
+
agent_used = result.get("agent_used", "unknown")
|
625 |
+
reasoning = result.get("reasoning", "")
|
626 |
+
|
627 |
+
print(f"Agent used: {agent_used}")
|
628 |
+
print(f"Reasoning: {reasoning}")
|
629 |
+
print(f"Final answer: {final_answer}")
|
630 |
+
print("=" * 80)
|
631 |
+
|
632 |
+
return final_answer
|
633 |
+
|
634 |
+
except Exception as e:
|
635 |
+
print(f"Error in LangGraphAgent.__call__: {e}")
|
636 |
+
return f"Error processing question: {str(e)}"
|
637 |
+
|
638 |
+
# ------------------------------
|
639 |
+
# Gradio Interface Functions
|
640 |
+
# ------------------------------
|
641 |
+
|
642 |
+
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
643 |
+
"""
|
644 |
+
Fetches all questions, runs the LangGraphAgent on them, submits all answers,
|
645 |
+
and displays the results.
|
646 |
+
"""
|
647 |
+
space_id = os.getenv("SPACE_ID")
|
648 |
+
|
649 |
+
if profile:
|
650 |
+
username = f"{profile.username}"
|
651 |
+
print(f"User logged in: {username}")
|
652 |
+
else:
|
653 |
+
print("User not logged in.")
|
654 |
+
return "Please Login to Hugging Face with the button.", None
|
655 |
+
|
656 |
+
api_url = DEFAULT_API_URL
|
657 |
+
questions_url = f"{api_url}/questions"
|
658 |
+
submit_url = f"{api_url}/submit"
|
659 |
+
|
660 |
+
# 1. Instantiate Agent
|
661 |
+
try:
|
662 |
+
agent = LangGraphAgent()
|
663 |
+
print("LangGraphAgent instantiated successfully.")
|
664 |
+
except Exception as e:
|
665 |
+
print(f"Error instantiating agent: {e}")
|
666 |
+
return f"Error initializing agent: {e}", None
|
667 |
+
|
668 |
+
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
669 |
+
print(agent_code)
|
670 |
+
|
671 |
+
# 2. Fetch Questions
|
672 |
+
print(f"Fetching questions from: {questions_url}")
|
673 |
+
try:
|
674 |
+
response = requests.get(questions_url, timeout=15)
|
675 |
+
response.raise_for_status()
|
676 |
+
questions_data = response.json()
|
677 |
+
if not questions_data:
|
678 |
+
print("Fetched questions list is empty.")
|
679 |
+
return "Fetched questions list is empty or invalid format.", None
|
680 |
+
print(f"Fetched {len(questions_data)} questions.")
|
681 |
+
except requests.exceptions.RequestException as e:
|
682 |
+
print(f"Error fetching questions: {e}")
|
683 |
+
return f"Error fetching questions: {e}", None
|
684 |
+
except requests.exceptions.JSONDecodeError as e:
|
685 |
+
print(f"Error decoding JSON response from questions endpoint: {e}")
|
686 |
+
print(f"Response text: {response.text[:500]}")
|
687 |
+
return f"Error decoding server response for questions: {e}", None
|
688 |
+
except Exception as e:
|
689 |
+
print(f"An unexpected error occurred fetching questions: {e}")
|
690 |
+
return f"An unexpected error occurred fetching questions: {e}", None
|
691 |
+
|
692 |
+
# 3. Run Agent
|
693 |
+
results_log = []
|
694 |
+
answers_payload = []
|
695 |
+
print(f"Running agent on {len(questions_data)} questions...")
|
696 |
+
|
697 |
+
for item in questions_data:
|
698 |
+
task_id = item.get("task_id")
|
699 |
+
question_text = item.get("question")
|
700 |
+
file_name = item.get("file_name", "")
|
701 |
+
|
702 |
+
if not task_id or question_text is None:
|
703 |
+
print(f"Skipping item with missing task_id or question: {item}")
|
704 |
+
continue
|
705 |
+
|
706 |
+
try:
|
707 |
+
file_type = Path(file_name).suffix.lower().split("?")[0] if len(file_name)>0 else None
|
708 |
+
|
709 |
+
# Call the agent
|
710 |
+
submitted_answer = agent(question_text, task_id, file_name, file_type)
|
711 |
+
|
712 |
+
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
713 |
+
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
714 |
+
|
715 |
+
except Exception as e:
|
716 |
+
print(f"Error running agent on task {task_id}: {e}")
|
717 |
+
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
|
718 |
+
|
719 |
+
if not answers_payload:
|
720 |
+
print("Agent did not produce any answers to submit.")
|
721 |
+
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
722 |
+
|
723 |
+
# 4. Prepare Submission
|
724 |
+
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
725 |
+
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
726 |
+
print(status_update)
|
727 |
+
|
728 |
+
# 5. Submit
|
729 |
+
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
|
730 |
+
try:
|
731 |
+
response = requests.post(submit_url, json=submission_data, timeout=60)
|
732 |
+
response.raise_for_status()
|
733 |
+
result_data = response.json()
|
734 |
+
final_status = (
|
735 |
+
f"Submission Successful!\n"
|
736 |
+
f"User: {result_data.get('username')}\n"
|
737 |
+
f"Overall Score: {result_data.get('score', 'N/A')}% "
|
738 |
+
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
|
739 |
+
f"Message: {result_data.get('message', 'No message received.')}"
|
740 |
+
)
|
741 |
+
print("Submission successful.")
|
742 |
+
results_df = pd.DataFrame(results_log)
|
743 |
+
return final_status, results_df
|
744 |
+
except requests.exceptions.HTTPError as e:
|
745 |
+
error_detail = f"Server responded with status {e.response.status_code}."
|
746 |
+
try:
|
747 |
+
error_json = e.response.json()
|
748 |
+
error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
|
749 |
+
except requests.exceptions.JSONDecodeError:
|
750 |
+
error_detail += f" Response: {e.response.text[:500]}"
|
751 |
+
status_message = f"Submission Failed: {error_detail}"
|
752 |
+
print(status_message)
|
753 |
+
results_df = pd.DataFrame(results_log)
|
754 |
+
return status_message, results_df
|
755 |
+
except requests.exceptions.Timeout:
|
756 |
+
status_message = "Submission Failed: The request timed out."
|
757 |
+
print(status_message)
|
758 |
+
results_df = pd.DataFrame(results_log)
|
759 |
+
return status_message, results_df
|
760 |
+
except requests.exceptions.RequestException as e:
|
761 |
+
status_message = f"Submission Failed: Network error - {e}"
|
762 |
+
print(status_message)
|
763 |
+
results_df = pd.DataFrame(results_log)
|
764 |
+
return status_message, results_df
|
765 |
+
except Exception as e:
|
766 |
+
status_message = f"An unexpected error occurred during submission: {e}"
|
767 |
+
print(status_message)
|
768 |
+
results_df = pd.DataFrame(results_log)
|
769 |
+
return status_message, results_df
|
770 |
+
|
771 |
+
# ------------------------------
|
772 |
+
# Gradio Interface
|
773 |
+
# ------------------------------
|
774 |
+
|
775 |
+
# --- Build Gradio Interface using Blocks ---
|
776 |
+
with gr.Blocks() as demo:
|
777 |
+
gr.Markdown("# Basic Agent Evaluation Runner")
|
778 |
+
gr.Markdown(
|
779 |
+
"""
|
780 |
+
**Instructions:**
|
781 |
+
|
782 |
+
1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
|
783 |
+
2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
|
784 |
+
3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
|
785 |
+
|
786 |
+
---
|
787 |
+
**Disclaimers:**
|
788 |
+
Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
|
789 |
+
This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
|
790 |
+
"""
|
791 |
+
)
|
792 |
+
|
793 |
+
gr.LoginButton()
|
794 |
+
|
795 |
+
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
796 |
+
|
797 |
+
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
|
798 |
+
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
|
799 |
+
|
800 |
+
run_button.click(
|
801 |
+
fn=run_and_submit_all,
|
802 |
+
outputs=[status_output, results_table]
|
803 |
+
)
|
804 |
+
|
805 |
+
if __name__ == "__main__":
|
806 |
+
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
807 |
+
# Check for SPACE_HOST and SPACE_ID at startup for information
|
808 |
+
space_host_startup = os.getenv("SPACE_HOST")
|
809 |
+
space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
|
810 |
+
|
811 |
+
if space_host_startup:
|
812 |
+
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
813 |
+
print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
|
814 |
+
else:
|
815 |
+
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
816 |
+
|
817 |
+
if space_id_startup: # Print repo URLs if SPACE_ID is found
|
818 |
+
print(f"✅ SPACE_ID found: {space_id_startup}")
|
819 |
+
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
820 |
+
print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
|
821 |
+
else:
|
822 |
+
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
|
823 |
+
|
824 |
+
print("-"*(60 + len(" App Starting ")) + "\n")
|
825 |
+
|
826 |
+
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
827 |
+
demo.launch(debug=True, share=False)
|
requirements.txt
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
requests
|
3 |
+
torch==2.7.0
|
4 |
+
wikipedia==1.4.0
|
5 |
+
openpyxl==3.1.5
|
6 |
+
python-docx==1.1.2
|
7 |
+
youtube_transcript_api==1.0.3
|
8 |
+
SpeechRecognition==3.14.3
|
9 |
+
pdfplumber==0.11.6
|
10 |
+
docx==0.2.4
|
11 |
+
beautifulsoup4
|
12 |
+
python-dotenv
|
13 |
+
gradio[oauth]
|
14 |
+
langchain==0.3.25
|
15 |
+
langchain-openai==0.3.18
|
16 |
+
langgraph==0.4.7
|
17 |
+
langchain-community==0.3.24
|
18 |
+
duckduckgo-search
|