Spaces:
Sleeping
Sleeping
path
Browse files
app.py
CHANGED
@@ -348,6 +348,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
348 |
return f"An unexpected error occurred fetching questions: {e}", None
|
349 |
|
350 |
# 3. Run your Agent
|
|
|
351 |
results_log = []
|
352 |
answers_payload = []
|
353 |
print(f"Running agent on {len(questions_data)} questions...")
|
|
|
348 |
return f"An unexpected error occurred fetching questions: {e}", None
|
349 |
|
350 |
# 3. Run your Agent
|
351 |
+
|
352 |
results_log = []
|
353 |
answers_payload = []
|
354 |
print(f"Running agent on {len(questions_data)} questions...")
|
tools.py
CHANGED
@@ -10,75 +10,103 @@ from langchain.schema import HumanMessage
|
|
10 |
import regex as re
|
11 |
import time
|
12 |
|
13 |
-
|
|
|
|
|
|
|
14 |
"""
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
If the result is a DuckDuckGo 202 Ratelimit error, retry up to 5 times with a 5 second sleep between attempts.
|
19 |
"""
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
"web_search_query": None,
|
37 |
-
"web_search_result": result_text
|
38 |
-
}
|
39 |
|
40 |
def ocr_image_tool(state: AgentState) -> AgentState:
|
41 |
"""
|
42 |
-
Expects
|
43 |
-
|
|
|
|
|
|
|
44 |
"""
|
45 |
-
|
46 |
-
|
47 |
-
if not path:
|
48 |
return {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
try:
|
50 |
-
img = Image.open(
|
51 |
-
text = pytesseract.image_to_string(img)
|
52 |
-
text = text.strip() or "(no visible text)"
|
53 |
except Exception as e:
|
54 |
text = f"Error during OCR: {e}"
|
55 |
-
|
56 |
return {
|
57 |
"ocr_path": None,
|
58 |
"ocr_result": text
|
59 |
}
|
60 |
|
|
|
61 |
def parse_excel_tool(state: AgentState) -> AgentState:
|
62 |
"""
|
63 |
-
|
64 |
-
|
|
|
65 |
Returns:
|
66 |
{
|
67 |
"excel_path": None,
|
68 |
"excel_sheet_name": None,
|
69 |
-
"excel_result": "<
|
70 |
}
|
71 |
-
If neither a real file nor a table block is found, returns an error message.
|
72 |
"""
|
73 |
-
|
74 |
sheet = state.get("excel_sheet_name", "")
|
75 |
-
if not
|
76 |
return {}
|
77 |
|
78 |
-
# 1)
|
79 |
-
if os.path.exists(
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
try:
|
81 |
-
xls = pd.ExcelFile(
|
82 |
if sheet and sheet in xls.sheet_names:
|
83 |
df = pd.read_excel(xls, sheet_name=sheet)
|
84 |
else:
|
@@ -91,10 +119,10 @@ def parse_excel_tool(state: AgentState) -> AgentState:
|
|
91 |
"excel_result": text
|
92 |
}
|
93 |
except Exception as e:
|
94 |
-
|
95 |
-
|
96 |
|
97 |
-
#
|
98 |
messages = state.get("messages", [])
|
99 |
table_lines = []
|
100 |
collecting = False
|
@@ -102,12 +130,10 @@ def parse_excel_tool(state: AgentState) -> AgentState:
|
|
102 |
for msg in messages:
|
103 |
if isinstance(msg, HumanMessage):
|
104 |
for line in msg.content.splitlines():
|
105 |
-
# Start collecting when we see the first table header row
|
106 |
if re.match(r"^\s*\|\s*[-A-Za-z0-9]", line):
|
107 |
collecting = True
|
108 |
if collecting:
|
109 |
if not re.match(r"^\s*\|", line):
|
110 |
-
# stop when the block ends (blank line or non‐table line)
|
111 |
collecting = False
|
112 |
break
|
113 |
table_lines.append(line)
|
@@ -121,7 +147,7 @@ def parse_excel_tool(state: AgentState) -> AgentState:
|
|
121 |
"excel_result": "Error: No Excel file found and no Markdown table detected in prompt."
|
122 |
}
|
123 |
|
124 |
-
#
|
125 |
clean_rows = [row for row in table_lines if not re.match(r"^\s*\|\s*-+", row)]
|
126 |
table_block = "\n".join(clean_rows).strip()
|
127 |
|
@@ -154,40 +180,60 @@ from state import AgentState
|
|
154 |
|
155 |
def audio_transcriber_tool(state: AgentState) -> AgentState:
|
156 |
"""
|
157 |
-
LangGraph tool for transcribing audio via OpenAI’s
|
158 |
-
Expects: state["audio_path"] to be
|
|
|
|
|
|
|
|
|
159 |
Returns:
|
160 |
{
|
161 |
"audio_path": None,
|
162 |
-
"transcript": "<
|
163 |
}
|
164 |
-
If no valid audio_path is provided, returns {}.
|
165 |
"""
|
166 |
-
|
167 |
-
|
168 |
-
if not path or not os.path.exists(path):
|
169 |
return {}
|
170 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
try:
|
172 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
173 |
if not openai.api_key:
|
174 |
raise RuntimeError("OPENAI_API_KEY is not set in environment.")
|
175 |
|
176 |
-
with open(
|
177 |
# For OpenAI Python library v0.27.0+:
|
178 |
response = openai.Audio.transcribe("whisper-1", audio_file)
|
179 |
-
# If
|
180 |
# response = openai.Audio.create_transcription(file=audio_file, model="whisper-1")
|
181 |
|
182 |
-
text = response
|
183 |
except Exception as e:
|
184 |
text = f"Error during transcription: {e}"
|
185 |
-
|
186 |
return {
|
187 |
"audio_path": None,
|
188 |
"transcript": text
|
189 |
}
|
190 |
-
|
191 |
# tools.py
|
192 |
|
193 |
import re
|
|
|
10 |
import regex as re
|
11 |
import time
|
12 |
|
13 |
+
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
14 |
+
|
15 |
+
|
16 |
+
def _download_file_for_task(task_id: str, ext: str) -> str:
|
17 |
"""
|
18 |
+
Helper: attempt to GET the remote file for a given task_id.
|
19 |
+
Saves under ./hf_files/{task_id}.{ext}. Returns the local path if successful,
|
20 |
+
or an empty string if no file / download failed.
|
|
|
21 |
"""
|
22 |
+
os.makedirs("hf_files", exist_ok=True)
|
23 |
+
local_path = os.path.join("hf_files", f"{task_id}.{ext}")
|
24 |
+
url = f"{DEFAULT_API_URL}/files/{task_id}"
|
25 |
+
|
26 |
+
try:
|
27 |
+
resp = requests.get(url, timeout=10)
|
28 |
+
if resp.status_code == 200 and resp.content:
|
29 |
+
with open(local_path, "wb") as f:
|
30 |
+
f.write(resp.content)
|
31 |
+
return local_path
|
32 |
+
except Exception:
|
33 |
+
pass
|
34 |
+
|
35 |
+
# If we get here, either 404 or download error
|
36 |
+
return ""
|
37 |
+
|
|
|
|
|
|
|
38 |
|
39 |
def ocr_image_tool(state: AgentState) -> AgentState:
|
40 |
"""
|
41 |
+
Expects state["ocr_path"] to be either:
|
42 |
+
• A real local image path (e.g. "./hf_files/abc.png"), or
|
43 |
+
• A Task ID string like "abc123", in which case we GET /files/abc123.
|
44 |
+
Returns:
|
45 |
+
{ "ocr_path": None, "ocr_result": "<OCRed text or error string>" }
|
46 |
"""
|
47 |
+
path_or_id = state.get("ocr_path", "")
|
48 |
+
if not path_or_id:
|
|
|
49 |
return {}
|
50 |
+
|
51 |
+
# 1) If local file exists, use it. Otherwise, treat "path_or_id" as task_id and download.
|
52 |
+
if os.path.exists(path_or_id):
|
53 |
+
local_img = path_or_id
|
54 |
+
else:
|
55 |
+
# Assume it's a task_id; try to download a PNG or JPG
|
56 |
+
# (We don’t know extension, so try common ones in order)
|
57 |
+
local_img = ""
|
58 |
+
for ext in ("png", "jpg", "jpeg"):
|
59 |
+
candidate = _download_file_for_task(path_or_id, ext)
|
60 |
+
if candidate:
|
61 |
+
local_img = candidate
|
62 |
+
break
|
63 |
+
|
64 |
+
if not local_img or not os.path.exists(local_img):
|
65 |
+
return {
|
66 |
+
"ocr_path": None,
|
67 |
+
"ocr_result": "Error: No image file found (neither local nor downloadable)."
|
68 |
+
}
|
69 |
+
|
70 |
+
# 2) Run OCR
|
71 |
try:
|
72 |
+
img = Image.open(local_img)
|
73 |
+
text = pytesseract.image_to_string(img).strip() or "(no visible text)"
|
|
|
74 |
except Exception as e:
|
75 |
text = f"Error during OCR: {e}"
|
76 |
+
|
77 |
return {
|
78 |
"ocr_path": None,
|
79 |
"ocr_result": text
|
80 |
}
|
81 |
|
82 |
+
|
83 |
def parse_excel_tool(state: AgentState) -> AgentState:
|
84 |
"""
|
85 |
+
Expects state["excel_path"] to be either:
|
86 |
+
• A real local .xlsx path, or
|
87 |
+
• A Task ID string (e.g. "abc123"), in which case we GET /files/abc123.xlsx.
|
88 |
Returns:
|
89 |
{
|
90 |
"excel_path": None,
|
91 |
"excel_sheet_name": None,
|
92 |
+
"excel_result": "<stringified records or Markdown table>"
|
93 |
}
|
|
|
94 |
"""
|
95 |
+
path_or_id = state.get("excel_path", "")
|
96 |
sheet = state.get("excel_sheet_name", "")
|
97 |
+
if not path_or_id:
|
98 |
return {}
|
99 |
|
100 |
+
# 1) If local .xlsx exists, use it. Otherwise, try downloading.
|
101 |
+
if os.path.exists(path_or_id):
|
102 |
+
local_xlsx = path_or_id
|
103 |
+
else:
|
104 |
+
local_xlsx = _download_file_for_task(path_or_id, "xlsx")
|
105 |
+
|
106 |
+
# 2) If we finally have a real file, read it
|
107 |
+
if local_xlsx and os.path.exists(local_xlsx):
|
108 |
try:
|
109 |
+
xls = pd.ExcelFile(local_xlsx)
|
110 |
if sheet and sheet in xls.sheet_names:
|
111 |
df = pd.read_excel(xls, sheet_name=sheet)
|
112 |
else:
|
|
|
119 |
"excel_result": text
|
120 |
}
|
121 |
except Exception as e:
|
122 |
+
print(f">>> parse_excel_tool: Error reading Excel file {local_xlsx}: {e}")
|
123 |
+
# Fall back to scanning for Markdown below
|
124 |
|
125 |
+
# 3) Fallback: scan any HumanMessage for a Markdown‐style table
|
126 |
messages = state.get("messages", [])
|
127 |
table_lines = []
|
128 |
collecting = False
|
|
|
130 |
for msg in messages:
|
131 |
if isinstance(msg, HumanMessage):
|
132 |
for line in msg.content.splitlines():
|
|
|
133 |
if re.match(r"^\s*\|\s*[-A-Za-z0-9]", line):
|
134 |
collecting = True
|
135 |
if collecting:
|
136 |
if not re.match(r"^\s*\|", line):
|
|
|
137 |
collecting = False
|
138 |
break
|
139 |
table_lines.append(line)
|
|
|
147 |
"excel_result": "Error: No Excel file found and no Markdown table detected in prompt."
|
148 |
}
|
149 |
|
150 |
+
# 4) Strip out separator rows and return the table block
|
151 |
clean_rows = [row for row in table_lines if not re.match(r"^\s*\|\s*-+", row)]
|
152 |
table_block = "\n".join(clean_rows).strip()
|
153 |
|
|
|
180 |
|
181 |
def audio_transcriber_tool(state: AgentState) -> AgentState:
|
182 |
"""
|
183 |
+
LangGraph tool for transcribing audio via OpenAI’s Whisper API.
|
184 |
+
Expects: state["audio_path"] to be either:
|
185 |
+
• A local file path (e.g. "./hf_files/abc.mp3"), OR
|
186 |
+
• A Task ID (e.g. "abc123"), in which case we try downloading
|
187 |
+
GET {DEFAULT_API_URL}/files/{task_id} with .mp3, .wav, .m4a extensions.
|
188 |
+
|
189 |
Returns:
|
190 |
{
|
191 |
"audio_path": None,
|
192 |
+
"transcript": "<text or error message>"
|
193 |
}
|
|
|
194 |
"""
|
195 |
+
path_or_id = state.get("audio_path", "")
|
196 |
+
if not path_or_id:
|
|
|
197 |
return {}
|
198 |
|
199 |
+
# 1) If local file exists, use it. Otherwise, treat path_or_id as task_id and try downloads:
|
200 |
+
if os.path.exists(path_or_id):
|
201 |
+
local_audio = path_or_id
|
202 |
+
else:
|
203 |
+
local_audio = ""
|
204 |
+
for ext in ("mp3", "wav", "m4a"):
|
205 |
+
candidate = _download_file_for_task(path_or_id, ext)
|
206 |
+
if candidate:
|
207 |
+
local_audio = candidate
|
208 |
+
break
|
209 |
+
|
210 |
+
if not local_audio or not os.path.exists(local_audio):
|
211 |
+
# Neither a real file nor a downloadable attachment
|
212 |
+
return {
|
213 |
+
"audio_path": None,
|
214 |
+
"transcript": "Error: No audio file found (neither local nor downloadable)."
|
215 |
+
}
|
216 |
+
|
217 |
+
# 2) Send to OpenAI Whisper
|
218 |
try:
|
219 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
220 |
if not openai.api_key:
|
221 |
raise RuntimeError("OPENAI_API_KEY is not set in environment.")
|
222 |
|
223 |
+
with open(local_audio, "rb") as audio_file:
|
224 |
# For OpenAI Python library v0.27.0+:
|
225 |
response = openai.Audio.transcribe("whisper-1", audio_file)
|
226 |
+
# If you’re on an older library:
|
227 |
# response = openai.Audio.create_transcription(file=audio_file, model="whisper-1")
|
228 |
|
229 |
+
text = response.get("text", "").strip()
|
230 |
except Exception as e:
|
231 |
text = f"Error during transcription: {e}"
|
232 |
+
|
233 |
return {
|
234 |
"audio_path": None,
|
235 |
"transcript": text
|
236 |
}
|
|
|
237 |
# tools.py
|
238 |
|
239 |
import re
|