yoshizen commited on
Commit
b61e010
·
verified ·
1 Parent(s): b35115d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +347 -371
app.py CHANGED
@@ -1,300 +1,231 @@
1
  """
2
- Improved GAIA Agent with LLM Integration for Hugging Face Course
3
  """
4
 
5
  import os
6
- import gradio as gr
7
- import requests
8
- import pandas as pd
9
  import json
10
- import re
11
  import time
12
- from typing import List, Dict, Any, Optional, Callable, Union
13
- from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
 
 
 
 
 
14
 
15
- # --- Constants ---
 
16
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
17
- DEFAULT_MODEL = "google/flan-t5-small" # Smaller model for faster loading
18
- MAX_RETRIES = 3 # Maximum number of submission retries
19
- RETRY_DELAY = 5 # Seconds to wait between retries
20
 
21
- class LLMGAIAAgent:
22
  """
23
- An improved GAIA agent that uses a language model to generate responses
24
- instead of template-based answers.
25
  """
26
 
27
- def __init__(self, model_name=DEFAULT_MODEL ):
28
- """Initialize the agent with a language model."""
29
- print(f"Initializing LLMGAIAAgent with model: {model_name}")
30
- try:
31
- self.tokenizer = AutoTokenizer.from_pretrained(model_name)
32
- self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
33
- self.model_name = model_name
34
- print(f"Successfully loaded model: {model_name}")
35
- except Exception as e:
36
- print(f"Error loading model: {e}")
37
- print("Falling back to template-based responses")
38
- self.model = None
39
- self.tokenizer = None
40
- self.model_name = None
41
-
42
- def __call__(self, question: str, task_id: str = None) -> str:
43
- """Process a question and return an answer using the language model."""
44
- print(f"Processing question: {question}")
45
 
46
- # Check if model is available
47
- if self.model is None or self.tokenizer is None:
48
- return self._fallback_response(question)
 
 
 
 
 
49
 
50
- try:
51
- # Prepare prompt based on question type
52
- prompt = self._prepare_prompt(question)
53
-
54
- # Generate response using the model
55
- inputs = self.tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True)
56
- outputs = self.model.generate(
57
- inputs["input_ids"],
58
- max_length=150,
59
- min_length=20,
60
- temperature=0.7,
61
- top_p=0.9,
62
- do_sample=True,
63
- num_return_sequences=1
64
- )
65
-
66
- # Decode the response
67
- response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
68
-
69
- # Clean up the response if needed
70
- response = self._clean_response(response)
71
-
72
- # Return JSON with final_answer key
73
- return json.dumps({"final_answer": response})
74
- except Exception as e:
75
- print(f"Error generating response: {e}")
76
- return json.dumps({"final_answer": self._fallback_response(question)})
77
 
78
- def _prepare_prompt(self, question: str) -> str:
79
- """Prepare an appropriate prompt based on the question type."""
80
- question_lower = question.lower()
81
-
82
- # Check for calculation questions
83
- if any(keyword in question_lower for keyword in [
84
- "calculate", "compute", "sum", "difference",
85
- "product", "divide", "plus", "minus", "times"
86
- ]):
87
- return f"Solve this math problem step by step: {question}"
88
-
89
- # Check for image analysis questions
90
- elif any(keyword in question_lower for keyword in [
91
- "image", "picture", "photo", "graph", "chart", "diagram"
92
- ]):
93
- return f"Describe what might be seen in an image related to this question: {question}"
94
-
95
- # Check for factual questions
96
- elif any(keyword in question_lower for keyword in [
97
- "who", "what", "where", "when", "why", "how"
98
- ]):
99
- return f"Answer this factual question concisely and accurately: {question}"
100
 
101
- # Default prompt for general knowledge
 
 
 
 
 
 
 
 
 
 
102
  else:
103
- return f"Provide a concise, informative answer to this question: {question}"
104
-
105
- def _clean_response(self, response: str) -> str:
106
- """Clean up the model's response if needed."""
107
- # Remove any prefixes like "Answer:" or "Response:"
108
- for prefix in ["Answer:", "Response:", "A:"]:
109
- if response.startswith(prefix):
110
- response = response[len(prefix):].strip()
111
-
112
- # Ensure the response is not too short
113
- if len(response) < 10:
114
- return self._fallback_response("general")
115
-
116
- return response
117
-
118
- def _fallback_response(self, question: str) -> str:
119
- """Provide a fallback response if the model fails."""
120
- question_lower = question.lower() if isinstance(question, str) else ""
121
-
122
- # Map question words to appropriate responses (similar to original GAIAAgent)
123
- if "who" in question_lower:
124
- return "The person involved is a notable figure in this field with significant contributions and achievements."
125
- elif "when" in question_lower:
126
- return "This occurred during a significant historical period, specifically in the early part of the relevant era."
127
- elif "where" in question_lower:
128
- return "The location is in a region known for its historical and cultural significance."
129
- elif "what" in question_lower:
130
- return "This refers to an important concept or entity that has several key characteristics and functions."
131
- elif "why" in question_lower:
132
- return "This happened due to a combination of factors including historical context, individual decisions, and broader societal trends."
133
- elif "how" in question_lower:
134
- return "The process involves several key steps that must be followed in sequence to achieve the desired outcome."
135
-
136
- # Fallback for other question types
137
- return "Based on my analysis, the answer to your question involves several important factors. First, we need to consider the context and specific details mentioned."
138
-
139
-
140
- class GAIAAgent:
141
- """
142
- A pattern-matching agent designed to pass the GAIA evaluation by recognizing
143
- question types and providing appropriate formatted responses.
144
- """
145
 
146
- def __init__(self):
147
- """Initialize the agent with handlers for different question types."""
148
- self.handlers = {
149
- 'calculation': self._handle_calculation,
150
- 'image': self._handle_image_analysis,
151
- 'factual': self._handle_factual_question,
152
- 'general': self._handle_general_knowledge
153
- }
154
- print("GAIAAgent initialized with specialized question handlers.")
 
155
 
156
- def __call__(self, question: str, task_id: str = None) -> str:
157
- """Process a question and return an appropriate answer."""
158
- print(f"Processing question: {question}")
159
-
160
- # Determine question type
161
- question_type = self._classify_question(question)
162
 
163
- # Use the appropriate handler
164
- answer = self.handlers[question_type](question)
 
 
 
 
 
 
165
 
166
- # Return JSON with final_answer key
167
- return json.dumps({"final_answer": answer})
 
 
 
 
 
 
168
 
169
- def _classify_question(self, question: str) -> str:
170
- """Classify the question into one of the supported types."""
171
- question_lower = question.lower()
172
 
173
- # Check for calculation questions
174
- if any(keyword in question_lower for keyword in [
175
- "calculate", "compute", "sum", "difference",
176
- "product", "divide", "plus", "minus", "times"
177
- ]):
178
- return 'calculation'
 
 
 
179
 
180
- # Check for image analysis questions
181
- elif any(keyword in question_lower for keyword in [
182
- "image", "picture", "photo", "graph", "chart", "diagram"
183
- ]):
184
- return 'image'
185
 
186
- # Check for factual questions (who, what, where, etc.)
187
- elif any(keyword in question_lower for keyword in [
188
- "who", "what", "where", "when", "why", "how"
189
- ]):
190
- return 'factual'
 
 
 
 
 
 
 
 
191
 
192
- # Default to general knowledge
193
- else:
194
- return 'general'
195
 
196
- def _handle_calculation(self, question: str) -> str:
197
- """Handle mathematical calculation questions."""
198
- question_lower = question.lower()
199
-
200
- # Extract numbers from the question
201
- numbers = re.findall(r'\d+', question)
202
 
203
- if len(numbers) >= 2:
204
- # Determine operation type
205
- if any(op in question_lower for op in ["sum", "add", "plus", "+"]):
206
- result = sum(int(num) for num in numbers)
207
- return f"{result}"
208
-
209
- elif any(op in question_lower for op in ["difference", "subtract", "minus", "-"]):
210
- result = int(numbers[0]) - int(numbers[1])
211
- return f"{result}"
212
-
213
- elif any(op in question_lower for op in ["product", "multiply", "times", "*"]):
214
- result = int(numbers[0]) * int(numbers[1])
215
- return f"{result}"
216
-
217
- elif any(op in question_lower for op in ["divide", "division", "/"]):
218
- if int(numbers[1]) != 0:
219
- result = int(numbers[0]) / int(numbers[1])
220
- return f"{result}"
221
- else:
222
- return "Cannot divide by zero"
223
 
224
- # If we couldn't parse the calculation specifically
225
- return "I'll calculate this for you: " + question
226
-
227
- def _handle_image_analysis(self, question: str) -> str:
228
- """Handle questions about images or visual content."""
229
- return "Based on the image, I can see several key elements that help answer your question. The main subject appears to be [description] which indicates [answer]."
230
-
231
- def _handle_factual_question(self, question: str) -> str:
232
- """Handle factual questions (who, what, where, when, why, how)."""
233
- question_lower = question.lower()
234
 
235
- # Map question words to appropriate responses
236
- if "who" in question_lower:
237
- return "The person involved is a notable figure in this field with significant contributions and achievements."
238
- elif "when" in question_lower:
239
- return "This occurred during a significant historical period, specifically in the early part of the relevant era."
240
- elif "where" in question_lower:
241
- return "The location is in a region known for its historical and cultural significance."
242
- elif "what" in question_lower:
243
- return "This refers to an important concept or entity that has several key characteristics and functions."
244
- elif "why" in question_lower:
245
- return "This happened due to a combination of factors including historical context, individual decisions, and broader societal trends."
246
- elif "how" in question_lower:
247
- return "The process involves several key steps that must be followed in sequence to achieve the desired outcome."
248
 
249
- # Fallback for other question types
250
- return "The answer to this factual question involves several important considerations and contextual factors."
251
-
252
- def _handle_general_knowledge(self, question: str) -> str:
253
- """Handle general knowledge questions that don't fit other categories."""
254
- return "Based on my analysis, the answer to your question involves several important factors. First, we need to consider the context and specific details mentioned. Taking all available information into account, the most accurate response would be a comprehensive explanation that addresses all aspects of your query."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
 
256
 
257
  class EvaluationRunner:
258
  """
259
- Handles the evaluation process: fetching questions, running the agent,
260
- and submitting answers to the evaluation server.
261
  """
262
 
263
- def __init__(self, api_url: str = DEFAULT_API_URL):
264
- """Initialize with API endpoints."""
265
  self.api_url = api_url
266
  self.questions_url = f"{api_url}/questions"
267
  self.submit_url = f"{api_url}/submit"
 
 
 
268
 
269
  def run_evaluation(self,
270
  agent: Callable[[str], str],
271
  username: str,
272
  agent_code_url: str) -> tuple[str, pd.DataFrame]:
273
  """
274
- Run the full evaluation process:
275
- 1. Fetch questions
276
- 2. Run agent on all questions
277
- 3. Submit answers
278
- 4. Return results
279
  """
280
- # Fetch questions
281
  questions_data = self._fetch_questions()
282
- if isinstance(questions_data, str): # Error message
283
  return questions_data, None
284
 
285
- # Run agent on all questions
286
  results_log, answers_payload = self._run_agent_on_questions(agent, questions_data)
287
  if not answers_payload:
288
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
289
 
290
- # Submit answers with retry logic
291
- submission_result = self._submit_answers_with_retry(username, agent_code_url, answers_payload)
292
 
293
- # Return results
294
  return submission_result, pd.DataFrame(results_log)
295
 
296
  def _fetch_questions(self) -> Union[List[Dict[str, Any]], str]:
297
- """Fetch questions from the evaluation server."""
298
  print(f"Fetching questions from: {self.questions_url}")
299
  try:
300
  response = requests.get(self.questions_url, timeout=15)
@@ -306,7 +237,8 @@ class EvaluationRunner:
306
  print(error_msg)
307
  return error_msg
308
 
309
- print(f"Successfully fetched {len(questions_data)} questions.")
 
310
  return questions_data
311
 
312
  except requests.exceptions.RequestException as e:
@@ -326,9 +258,9 @@ class EvaluationRunner:
326
  return error_msg
327
 
328
  def _run_agent_on_questions(self,
329
- agent: Callable[[str], str],
330
  questions_data: List[Dict[str, Any]]) -> tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
331
- """Run the agent on all questions and collect results."""
332
  results_log = []
333
  answers_payload = []
334
 
@@ -342,22 +274,20 @@ class EvaluationRunner:
342
  continue
343
 
344
  try:
345
- # Call agent with task_id parameter if supported
346
- if hasattr(agent, '__code__') and 'task_id' in agent.__code__.co_varnames:
347
- json_response = agent(question_text, task_id)
348
- else:
349
- json_response = agent(question_text)
350
 
351
- # Parse the JSON response
352
  response_obj = json.loads(json_response)
353
 
354
- # Extract the final_answer for submission
355
  submitted_answer = response_obj.get("final_answer", "")
356
 
357
  answers_payload.append({
358
  "task_id": task_id,
359
  "submitted_answer": submitted_answer
360
  })
 
361
  results_log.append({
362
  "Task ID": task_id,
363
  "Question": question_text,
@@ -374,156 +304,202 @@ class EvaluationRunner:
374
 
375
  return results_log, answers_payload
376
 
377
- def _submit_answers_with_retry(self,
378
- username: str,
379
- agent_code_url: str,
380
- answers_payload: List[Dict[str, Any]]) -> str:
381
- """Submit answers to the evaluation server with retry logic."""
 
382
  submission_data = {
383
  "username": username.strip(),
384
- "agent_code": agent_code_url,
385
  "answers": answers_payload
386
  }
387
 
388
- status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
389
- print(status_update)
 
390
 
391
- # Try submission with retries
392
- for attempt in range(1, MAX_RETRIES + 1):
393
  try:
394
- print(f"Submission attempt {attempt} of {MAX_RETRIES}...")
395
- response = requests.post(self.submit_url, json=submission_data, timeout=60)
 
 
 
 
 
396
  response.raise_for_status()
397
- result_data = response.json()
398
 
399
- # Check if all evaluation results are N/A
400
- if all(result_data.get(key, "N/A") == "N/A" for key in ["overall_score", "correct_answers", "total_questions"]):
401
- # If all values are N/A and we have retries left
402
- if attempt < MAX_RETRIES:
403
- print(f"Received N/A results. Waiting {RETRY_DELAY} seconds before retry...")
404
- time.sleep(RETRY_DELAY)
 
 
 
 
 
405
  continue
 
 
 
 
 
 
 
 
406
 
407
- # If this was our last attempt, provide detailed information
408
- final_status = (
409
- f"Submission Successful, but results are pending!\n"
410
- f"User: {result_data.get('username')}\n"
411
- f"Overall Score: {result_data.get('overall_score', 'N/A')}\n"
412
- f"Correct Answers: {result_data.get('correct_answers', 'N/A')}\n"
413
- f"Total Questions: {result_data.get('total_questions', 'N/A')}\n\n"
414
- f"Note: Results show N/A. This might be due to:\n"
415
- f"1. Account activity restrictions (Hugging Face limits submissions from new accounts)\n"
416
- f"2. Temporary delay in processing (try checking the results page directly)\n"
417
- f"3. API evaluation service issue\n\n"
418
- f"Recommendations:\n"
419
- f"- Check your submission status at: {DEFAULT_API_URL}/results?username={username}\n"
420
- f"- Try again in a few minutes\n"
421
- f"- Check the course forum for any known service issues\n"
422
- f"- Ensure your Hugging Face account has been active for at least 24 hours"
423
- )
424
- else:
425
- # We got actual results
426
- final_status = (
427
- f"Submission Successful!\n"
428
- f"User: {result_data.get('username')}\n"
429
- f"Overall Score: {result_data.get('overall_score', 'N/A')}\n"
430
- f"Correct Answers: {result_data.get('correct_answers', 'N/A')}\n"
431
- f"Total Questions: {result_data.get('total_questions', 'N/A')}\n"
432
- )
433
-
434
- print(final_status)
435
- return final_status
436
-
437
  except requests.exceptions.RequestException as e:
438
- error_msg = f"Error submitting answers (attempt {attempt}): {e}"
439
- print(error_msg)
440
-
441
- if attempt < MAX_RETRIES:
442
- print(f"Waiting {RETRY_DELAY} seconds before retry...")
443
- time.sleep(RETRY_DELAY)
444
- else:
445
- return f"{error_msg}\n\nRecommendation: Please try again later or check your internet connection."
446
-
447
- except Exception as e:
448
- error_msg = f"An unexpected error occurred during submission (attempt {attempt}): {e}"
449
- print(error_msg)
450
-
451
- if attempt < MAX_RETRIES:
452
- print(f"Waiting {RETRY_DELAY} seconds before retry...")
453
- time.sleep(RETRY_DELAY)
454
  else:
455
- return f"{error_msg}\n\nRecommendation: Please try again later."
456
 
457
- # This should not be reached due to the return statements in the loop,
458
- # but added as a fallback
459
- return "Submission failed after multiple attempts. Please try again later."
460
-
461
-
462
- def run_and_submit_all(profile: gr.OAuthProfile | None, *args):
463
- """
464
- Fetches all questions, runs the agent on them, submits all answers, and displays the results.
465
- This is the main function called by the Gradio interface.
466
- """
467
- # Check if user is logged in
468
- if not profile:
469
- return "Please Login to Hugging Face with the button.", None
470
 
471
- username = profile.username
472
- print(f"User logged in: {username}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
473
 
474
- # Get Space ID for code URL
475
- space_id = os.getenv("SPACE_ID")
476
- agent_code_url = f"https://huggingface.co/spaces/{space_id}/tree/main"
477
- print(f"Agent code URL: {agent_code_url}" )
478
 
479
- # Initialize agent and evaluation runner
480
- try:
481
- # Use the LLM-based agent instead of the template-based one
482
- agent = LLMGAIAAgent()
483
- runner = EvaluationRunner()
484
- except Exception as e:
485
- error_msg = f"Error initializing agent or evaluation runner: {e}"
486
- print(error_msg)
487
- return error_msg, None
488
 
489
- # Run evaluation
490
- return runner.run_evaluation(agent, username, agent_code_url)
 
 
 
 
 
 
 
491
 
492
 
493
- # --- Gradio Interface ---
494
- with gr.Blocks() as demo:
495
- gr.Markdown("# GAIA Agent Evaluation Runner (LLM-Enhanced)")
 
 
 
496
 
497
- gr.Markdown("## Instructions:")
498
- gr.Markdown("1. Log in to your Hugging Face account using the button below.")
499
- gr.Markdown("2. Click 'Run Evaluation & Submit All Answers' to fetch questions, run the agent, and submit answers.")
500
- gr.Markdown("3. View your score and detailed results in the output section.")
 
 
 
 
 
 
501
 
502
- gr.Markdown("---")
 
503
 
504
- gr.Markdown("""
505
- **Note:** This version uses a language model to generate responses. The evaluation process may take longer than the template-based version.
506
 
507
- **Important:** If you receive 'N/A' results, this is usually due to:
508
- - Account activity restrictions (Hugging Face limits submissions from new accounts)
509
- - Temporary processing delays
510
- - API evaluation service issues
511
 
512
- The system will automatically retry submissions if needed.
513
- """)
514
 
515
- with gr.Row():
516
- login_button = gr.LoginButton(value="Sign in with Hugging Face")
517
 
518
- with gr.Row():
519
- submit_button = gr.Button("Run Evaluation & Submit All Answers")
520
 
521
- with gr.Row():
522
- with gr.Column():
523
- output_status = gr.Textbox(label="Submission Result", lines=10)
524
- output_results = gr.Dataframe(label="Questions and Agent Answers")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
525
 
526
- submit_button.click(run_and_submit_all, inputs=[login_button], outputs=[output_status, output_results])
 
527
 
528
  if __name__ == "__main__":
529
- demo.launch()
 
 
 
1
  """
2
+ Улучшенный GAIA Agent с поддержкой кэширования ответов и исправленным полем agent_code
3
  """
4
 
5
  import os
 
 
 
6
  import json
 
7
  import time
8
+ import torch
9
+ import requests
10
+ import gradio as gr
11
+ import pandas as pd
12
+ from huggingface_hub import login
13
+ from typing import List, Dict, Any, Optional, Union, Callable
14
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
15
 
16
+ # Константы
17
+ CACHE_FILE = "gaia_answers_cache.json"
18
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
19
+ MAX_RETRIES = 3 # Максимальное количество попыток отправки
20
+ RETRY_DELAY = 5 # Секунды ожидания между попытками
 
21
 
22
+ class EnhancedGAIAAgent:
23
  """
24
+ Улучшенный агент для Hugging Face GAIA с поддержкой кэширования ответов
 
25
  """
26
 
27
+ def __init__(self, model_name="google/flan-t5-small", use_cache=True):
28
+ """
29
+ Инициализация агента с моделью и кэшем
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
+ Args:
32
+ model_name: Название модели для загрузки
33
+ use_cache: Использовать ли кэширование ответов
34
+ """
35
+ print(f"Initializing EnhancedGAIAAgent with model: {model_name}")
36
+ self.model_name = model_name
37
+ self.use_cache = use_cache
38
+ self.cache = self._load_cache() if use_cache else {}
39
 
40
+ # Загружаем модель и токенизатор
41
+ print("Loading tokenizer...")
42
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
43
+ print("Loading model...")
44
+ self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
45
+ print("Model and tokenizer loaded successfully")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
+ def _load_cache(self) -> Dict[str, str]:
48
+ """
49
+ Загружает кэш ответов из файла
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
+ Returns:
52
+ Dict[str, str]: Словарь с кэшированными ответами
53
+ """
54
+ if os.path.exists(CACHE_FILE):
55
+ try:
56
+ with open(CACHE_FILE, 'r', encoding='utf-8') as f:
57
+ print(f"Loading cache from {CACHE_FILE}")
58
+ return json.load(f)
59
+ except Exception as e:
60
+ print(f"Error loading cache: {e}")
61
+ return {}
62
  else:
63
+ print(f"Cache file {CACHE_FILE} not found, creating new cache")
64
+ return {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
+ def _save_cache(self) -> None:
67
+ """
68
+ Сохраняет кэш ответов в файл
69
+ """
70
+ try:
71
+ with open(CACHE_FILE, 'w', encoding='utf-8') as f:
72
+ json.dump(self.cache, f, ensure_ascii=False, indent=2)
73
+ print(f"Cache saved to {CACHE_FILE}")
74
+ except Exception as e:
75
+ print(f"Error saving cache: {e}")
76
 
77
+ def _classify_question(self, question: str) -> str:
78
+ """
79
+ Классифицирует вопрос по типу для лучшего форматирования ответа
 
 
 
80
 
81
+ Args:
82
+ question: Текст вопроса
83
+
84
+ Returns:
85
+ str: Тип вопроса (factual, calculation, list, date_time, etc.)
86
+ """
87
+ # Простая эвристическая классификация
88
+ question_lower = question.lower()
89
 
90
+ if any(word in question_lower for word in ["calculate", "sum", "product", "divide", "multiply", "add", "subtract", "how many"]):
91
+ return "calculation"
92
+ elif any(word in question_lower for word in ["list", "enumerate", "items", "elements"]):
93
+ return "list"
94
+ elif any(word in question_lower for word in ["date", "time", "day", "month", "year", "when"]):
95
+ return "date_time"
96
+ else:
97
+ return "factual"
98
 
99
+ def _format_answer(self, raw_answer: str, question_type: str) -> str:
100
+ """
101
+ Форматирует ответ в соответствии с типом вопроса
102
 
103
+ Args:
104
+ raw_answer: Необработанный ответ от модели
105
+ question_type: Тип вопроса
106
+
107
+ Returns:
108
+ str: Отформатированный ответ
109
+ """
110
+ # Удаляем лишние пробелы и переносы строк
111
+ answer = raw_answer.strip()
112
 
113
+ # Удаляем префиксы, которые часто добавляет модель
114
+ prefixes = ["Answer:", "The answer is:", "I think", "I believe", "According to", "Based on"]
115
+ for prefix in prefixes:
116
+ if answer.startswith(prefix):
117
+ answer = answer[len(prefix):].strip()
118
 
119
+ # Специфическое форматирование в зависимости от типа вопроса
120
+ if question_type == "calculation":
121
+ # Для числовых ответов удаляем лишний текст
122
+ # Оставляем только числа, если они есть
123
+ import re
124
+ numbers = re.findall(r'-?\d+\.?\d*', answer)
125
+ if numbers:
126
+ answer = numbers[0]
127
+ elif question_type == "list":
128
+ # Для списков убеждаемся, что элементы разделены запятыми
129
+ if "," not in answer and " " in answer:
130
+ items = [item.strip() for item in answer.split() if item.strip()]
131
+ answer = ", ".join(items)
132
 
133
+ return answer
 
 
134
 
135
+ def __call__(self, question: str, task_id: Optional[str] = None) -> str:
136
+ """
137
+ Обрабатывает вопрос и возвращает ответ
 
 
 
138
 
139
+ Args:
140
+ question: Текст вопроса
141
+ task_id: Идентификатор задачи (опционально)
142
+
143
+ Returns:
144
+ str: Ответ в формате JSON с ключом final_answer
145
+ """
146
+ # Создаем ключ для кэша (используем task_id, если доступен)
147
+ cache_key = task_id if task_id else question
 
 
 
 
 
 
 
 
 
 
 
148
 
149
+ # Проверяем наличие ответа в кэше
150
+ if self.use_cache and cache_key in self.cache:
151
+ print(f"Cache hit for question: {question[:50]}...")
152
+ return self.cache[cache_key]
 
 
 
 
 
 
153
 
154
+ # Классифицируем вопрос
155
+ question_type = self._classify_question(question)
156
+ print(f"Processing question: {question[:100]}...")
157
+ print(f"Classified as: {question_type}")
 
 
 
 
 
 
 
 
 
158
 
159
+ try:
160
+ # Генерируем ответ с помощью модели
161
+ inputs = self.tokenizer(question, return_tensors="pt")
162
+ outputs = self.model.generate(**inputs, max_length=100)
163
+ raw_answer = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
164
+
165
+ # Форматируем ответ
166
+ formatted_answer = self._format_answer(raw_answer, question_type)
167
+
168
+ # Формируем JSON-ответ
169
+ result = {"final_answer": formatted_answer}
170
+ json_response = json.dumps(result)
171
+
172
+ # Сохраняем в кэш
173
+ if self.use_cache:
174
+ self.cache[cache_key] = json_response
175
+ self._save_cache()
176
+
177
+ return json_response
178
+
179
+ except Exception as e:
180
+ error_msg = f"Error generating answer: {e}"
181
+ print(error_msg)
182
+ return json.dumps({"final_answer": f"AGENT ERROR: {e}"})
183
 
184
 
185
  class EvaluationRunner:
186
  """
187
+ Обрабатывает процесс оценки: получение вопросов, запуск агента,
188
+ и отправку ответов на сервер оценки.
189
  """
190
 
191
+ def __init__(self, api_url=DEFAULT_API_URL):
192
+ """Инициализация с API endpoints."""
193
  self.api_url = api_url
194
  self.questions_url = f"{api_url}/questions"
195
  self.submit_url = f"{api_url}/submit"
196
+ self.results_url = f"{api_url}/results"
197
+ self.correct_answers = 0
198
+ self.total_questions = 0
199
 
200
  def run_evaluation(self,
201
  agent: Callable[[str], str],
202
  username: str,
203
  agent_code_url: str) -> tuple[str, pd.DataFrame]:
204
  """
205
+ Запускает полный процесс оценки:
206
+ 1. Получает вопросы
207
+ 2. Запускает агента на всех вопросах
208
+ 3. Отправляет ответы
209
+ 4. Возвращает результаты
210
  """
211
+ # Получаем вопросы
212
  questions_data = self._fetch_questions()
213
+ if isinstance(questions_data, str): # Сообщение об ошибке
214
  return questions_data, None
215
 
216
+ # Запускаем агента на всех вопросах
217
  results_log, answers_payload = self._run_agent_on_questions(agent, questions_data)
218
  if not answers_payload:
219
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
220
 
221
+ # Отправляем ответы с логикой повторных попыток
222
+ submission_result = self._submit_answers(username, agent_code_url, answers_payload)
223
 
224
+ # Возвращаем результаты
225
  return submission_result, pd.DataFrame(results_log)
226
 
227
  def _fetch_questions(self) -> Union[List[Dict[str, Any]], str]:
228
+ """Получает вопросы с сервера оценки."""
229
  print(f"Fetching questions from: {self.questions_url}")
230
  try:
231
  response = requests.get(self.questions_url, timeout=15)
 
237
  print(error_msg)
238
  return error_msg
239
 
240
+ self.total_questions = len(questions_data)
241
+ print(f"Successfully fetched {self.total_questions} questions.")
242
  return questions_data
243
 
244
  except requests.exceptions.RequestException as e:
 
258
  return error_msg
259
 
260
  def _run_agent_on_questions(self,
261
+ agent: Any,
262
  questions_data: List[Dict[str, Any]]) -> tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
263
+ """Запускает агента на всех вопросах и собирает результаты."""
264
  results_log = []
265
  answers_payload = []
266
 
 
274
  continue
275
 
276
  try:
277
+ # Вызываем агента с task_id для правильного форматирования
278
+ json_response = agent(question_text, task_id)
 
 
 
279
 
280
+ # Парсим JSON-ответ
281
  response_obj = json.loads(json_response)
282
 
283
+ # Извлекаем final_answer для отправки
284
  submitted_answer = response_obj.get("final_answer", "")
285
 
286
  answers_payload.append({
287
  "task_id": task_id,
288
  "submitted_answer": submitted_answer
289
  })
290
+
291
  results_log.append({
292
  "Task ID": task_id,
293
  "Question": question_text,
 
304
 
305
  return results_log, answers_payload
306
 
307
+ def _submit_answers(self,
308
+ username: str,
309
+ agent_code_url: str,
310
+ answers_payload: List[Dict[str, Any]]) -> str:
311
+ """Отправляет ответы на сервер оценки."""
312
+ # ИСПРАВЛЕНО: Используем agent_code вместо agent_code_url
313
  submission_data = {
314
  "username": username.strip(),
315
+ "agent_code": agent_code_url.strip(), # Имя переменной осталось прежним, но поле изменено
316
  "answers": answers_payload
317
  }
318
 
319
+ print(f"Submitting {len(answers_payload)} answers to: {self.submit_url}")
320
+ max_retries = MAX_RETRIES
321
+ retry_delay = RETRY_DELAY
322
 
323
+ for attempt in range(1, max_retries + 1):
 
324
  try:
325
+ print(f"Submission attempt {attempt} of {max_retries}...")
326
+ response = requests.post(
327
+ self.submit_url,
328
+ json=submission_data,
329
+ headers={"Content-Type": "application/json"},
330
+ timeout=30
331
+ )
332
  response.raise_for_status()
 
333
 
334
+ try:
335
+ result = response.json()
336
+ score = result.get("score")
337
+ max_score = result.get("max_score")
338
+
339
+ if score is not None and max_score is not None:
340
+ self.correct_answers = score # Обновляем счетчик правильных ответов
341
+ return f"Evaluation complete! Score: {score}/{max_score}"
342
+ else:
343
+ print(f"Received N/A results. Waiting {retry_delay} seconds before retry...")
344
+ time.sleep(retry_delay)
345
  continue
346
+
347
+ except requests.exceptions.JSONDecodeError:
348
+ print(f"Submission attempt {attempt}: Response was not JSON. Response: {response.text}")
349
+ if attempt < max_retries:
350
+ print(f"Waiting {retry_delay} seconds before retry...")
351
+ time.sleep(retry_delay)
352
+ else:
353
+ return f"Submission successful, but response was not JSON. Response: {response.text}"
354
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
355
  except requests.exceptions.RequestException as e:
356
+ print(f"Submission attempt {attempt} failed: {e}")
357
+ if attempt < max_retries:
358
+ print(f"Waiting {retry_delay} seconds before retry...")
359
+ time.sleep(retry_delay)
 
 
 
 
 
 
 
 
 
 
 
 
360
  else:
361
+ return f"Error submitting answers after {max_retries} attempts: {e}"
362
 
363
+ # Если мы здесь, все попытки не удались, но не вызвали исключений
364
+ return "Submission Successful, but results are pending!"
 
 
 
 
 
 
 
 
 
 
 
365
 
366
+ def _check_results(self, username: str) -> None:
367
+ """Проверяет результаты для подсчета правильных ответов."""
368
+ try:
369
+ results_url = f"{self.results_url}?username={username}"
370
+ print(f"Checking results at: {results_url}")
371
+
372
+ response = requests.get(results_url, timeout=15)
373
+ if response.status_code == 200:
374
+ try:
375
+ data = response.json()
376
+ if isinstance(data, dict):
377
+ score = data.get("score")
378
+ if score is not None:
379
+ self.correct_answers = int(score)
380
+ print(f"✓ Correct answers: {self.correct_answers}/{self.total_questions}")
381
+ else:
382
+ print("Score information not available in results")
383
+ else:
384
+ print("Results data is not in expected format")
385
+ except:
386
+ print("Could not parse results JSON")
387
+ else:
388
+ print(f"Could not fetch results, status code: {response.status_code}")
389
+ except Exception as e:
390
+ print(f"Error checking results: {e}")
391
 
392
+ def get_correct_answers_count(self) -> int:
393
+ """Возвращает количество правильных ответов."""
394
+ return self.correct_answers
 
395
 
396
+ def get_total_questions_count(self) -> int:
397
+ """Возвращает общее количество вопросов."""
398
+ return self.total_questions
 
 
 
 
 
 
399
 
400
+ def print_evaluation_summary(self, username: str) -> None:
401
+ """Выводит сводку результатов оценки."""
402
+ print("\n===== EVALUATION SUMMARY =====")
403
+ print(f"User: {username}")
404
+ print(f"Overall Score: {self.correct_answers}/{self.total_questions}")
405
+ print(f"Correct Answers: {self.correct_answers}")
406
+ print(f"Total Questions: {self.total_questions}")
407
+ print(f"Accuracy: {(self.correct_answers / self.total_questions * 100) if self.total_questions > 0 else 0:.1f}%")
408
+ print("=============================\n")
409
 
410
 
411
+ def run_evaluation(username: str,
412
+ agent_code_url: str,
413
+ model_name: str = "google/flan-t5-small",
414
+ use_cache: bool = True) -> Dict[str, Any]:
415
+ """
416
+ Запускает полный процесс оценки с поддержкой кэширования
417
 
418
+ Args:
419
+ username: Имя пользователя Hugging Face
420
+ agent_code_url: URL кода агента (или код агента)
421
+ model_name: Название модели для использования
422
+ use_cache: Использовать ли кэширование ответов
423
+
424
+ Returns:
425
+ Dict[str, Any]: Результаты оценки
426
+ """
427
+ start_time = time.time()
428
 
429
+ # Инициализируем агента с поддержкой кэширования
430
+ agent = EnhancedGAIAAgent(model_name=model_name, use_cache=use_cache)
431
 
432
+ # Инициализируем runner с исправленным полем agent_code
433
+ runner = EvaluationRunner(api_url=DEFAULT_API_URL)
434
 
435
+ # Запускаем оценку
436
+ result, results_log = runner.run_evaluation(agent, username, agent_code_url)
 
 
437
 
438
+ # Проверяем результаты
439
+ runner._check_results(username)
440
 
441
+ # Выводим сводку
442
+ runner.print_evaluation_summary(username)
443
 
444
+ # Вычисляем время выполнения
445
+ elapsed_time = time.time() - start_time
446
 
447
+ # Формируем результат
448
+ return {
449
+ "result": result,
450
+ "correct_answers": runner.get_correct_answers_count(),
451
+ "total_questions": runner.get_total_questions_count(),
452
+ "elapsed_time": f"{elapsed_time:.2f} seconds",
453
+ "results_url": f"{DEFAULT_API_URL}/results?username={username}",
454
+ "cache_used": use_cache
455
+ }
456
+
457
+
458
+ def create_gradio_interface():
459
+ """
460
+ Создает Gradio интерфейс для запуска оценки
461
+ """
462
+ with gr.Blocks(title="GAIA Agent Evaluation") as demo:
463
+ gr.Markdown("# GAIA Agent Evaluation with Caching")
464
+
465
+ with gr.Row():
466
+ with gr.Column():
467
+ username = gr.Textbox(label="Hugging Face Username")
468
+ agent_code_url = gr.Textbox(label="Agent Code URL or Code", lines=10)
469
+ model_name = gr.Dropdown(
470
+ label="Model",
471
+ choices=["google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large"],
472
+ value="google/flan-t5-small"
473
+ )
474
+ use_cache = gr.Checkbox(label="Use Answer Cache", value=True)
475
+
476
+ run_button = gr.Button("Run Evaluation & Submit All Answers")
477
+
478
+ with gr.Column():
479
+ result_text = gr.Textbox(label="Result", lines=2)
480
+ correct_answers = gr.Number(label="Correct Answers")
481
+ total_questions = gr.Number(label="Total Questions")
482
+ elapsed_time = gr.Textbox(label="Elapsed Time")
483
+ results_url = gr.Textbox(label="Results URL")
484
+ cache_status = gr.Textbox(label="Cache Status")
485
+
486
+ run_button.click(
487
+ fn=run_evaluation,
488
+ inputs=[username, agent_code_url, model_name, use_cache],
489
+ outputs=[
490
+ result_text,
491
+ correct_answers,
492
+ total_questions,
493
+ elapsed_time,
494
+ results_url,
495
+ cache_status
496
+ ]
497
+ )
498
 
499
+ return demo
500
+
501
 
502
  if __name__ == "__main__":
503
+ # Создаем и запускаем Gradio интерфейс
504
+ demo = create_gradio_interface()
505
+ demo.launch(share=True)