Update app.py
Browse files
app.py
CHANGED
@@ -8,12 +8,15 @@ import requests
|
|
8 |
import pandas as pd
|
9 |
import json
|
10 |
import re
|
|
|
11 |
from typing import List, Dict, Any, Optional, Callable, Union
|
12 |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
13 |
|
14 |
# --- Constants ---
|
15 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
16 |
DEFAULT_MODEL = "google/flan-t5-small" # Smaller model for faster loading
|
|
|
|
|
17 |
|
18 |
class LLMGAIAAgent:
|
19 |
"""
|
@@ -280,8 +283,8 @@ class EvaluationRunner:
|
|
280 |
if not answers_payload:
|
281 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
282 |
|
283 |
-
# Submit answers
|
284 |
-
submission_result = self.
|
285 |
|
286 |
# Return results
|
287 |
return submission_result, pd.DataFrame(results_log)
|
@@ -355,11 +358,11 @@ class EvaluationRunner:
|
|
355 |
|
356 |
return results_log, answers_payload
|
357 |
|
358 |
-
def
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
"""Submit answers to the evaluation server."""
|
363 |
submission_data = {
|
364 |
"username": username.strip(),
|
365 |
"agent_code": agent_code_url,
|
@@ -369,30 +372,75 @@ class EvaluationRunner:
|
|
369 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
370 |
print(status_update)
|
371 |
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
391 |
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
396 |
|
397 |
|
398 |
def run_and_submit_all(profile: gr.OAuthProfile | None, *args):
|
@@ -437,7 +485,16 @@ with gr.Blocks() as demo:
|
|
437 |
|
438 |
gr.Markdown("---")
|
439 |
|
440 |
-
gr.Markdown("
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
441 |
|
442 |
with gr.Row():
|
443 |
login_button = gr.LoginButton(value="Sign in with Hugging Face")
|
@@ -447,7 +504,7 @@ with gr.Blocks() as demo:
|
|
447 |
|
448 |
with gr.Row():
|
449 |
with gr.Column():
|
450 |
-
output_status = gr.Textbox(label="Submission Result")
|
451 |
output_results = gr.Dataframe(label="Questions and Agent Answers")
|
452 |
|
453 |
submit_button.click(run_and_submit_all, inputs=[login_button], outputs=[output_status, output_results])
|
|
|
8 |
import pandas as pd
|
9 |
import json
|
10 |
import re
|
11 |
+
import time
|
12 |
from typing import List, Dict, Any, Optional, Callable, Union
|
13 |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
14 |
|
15 |
# --- Constants ---
|
16 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
17 |
DEFAULT_MODEL = "google/flan-t5-small" # Smaller model for faster loading
|
18 |
+
MAX_RETRIES = 3 # Maximum number of submission retries
|
19 |
+
RETRY_DELAY = 5 # Seconds to wait between retries
|
20 |
|
21 |
class LLMGAIAAgent:
|
22 |
"""
|
|
|
283 |
if not answers_payload:
|
284 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
285 |
|
286 |
+
# Submit answers with retry logic
|
287 |
+
submission_result = self._submit_answers_with_retry(username, agent_code_url, answers_payload)
|
288 |
|
289 |
# Return results
|
290 |
return submission_result, pd.DataFrame(results_log)
|
|
|
358 |
|
359 |
return results_log, answers_payload
|
360 |
|
361 |
+
def _submit_answers_with_retry(self,
|
362 |
+
username: str,
|
363 |
+
agent_code_url: str,
|
364 |
+
answers_payload: List[Dict[str, Any]]) -> str:
|
365 |
+
"""Submit answers to the evaluation server with retry logic."""
|
366 |
submission_data = {
|
367 |
"username": username.strip(),
|
368 |
"agent_code": agent_code_url,
|
|
|
372 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
373 |
print(status_update)
|
374 |
|
375 |
+
# Try submission with retries
|
376 |
+
for attempt in range(1, MAX_RETRIES + 1):
|
377 |
+
try:
|
378 |
+
print(f"Submission attempt {attempt} of {MAX_RETRIES}...")
|
379 |
+
response = requests.post(self.submit_url, json=submission_data, timeout=60)
|
380 |
+
response.raise_for_status()
|
381 |
+
result_data = response.json()
|
382 |
+
|
383 |
+
# Check if all evaluation results are N/A
|
384 |
+
if all(result_data.get(key, "N/A") == "N/A" for key in ["overall_score", "correct_answers", "total_questions"]):
|
385 |
+
# If all values are N/A and we have retries left
|
386 |
+
if attempt < MAX_RETRIES:
|
387 |
+
print(f"Received N/A results. Waiting {RETRY_DELAY} seconds before retry...")
|
388 |
+
time.sleep(RETRY_DELAY)
|
389 |
+
continue
|
390 |
+
|
391 |
+
# If this was our last attempt, provide detailed information
|
392 |
+
final_status = (
|
393 |
+
f"Submission Successful, but results are pending!\n"
|
394 |
+
f"User: {result_data.get('username')}\n"
|
395 |
+
f"Overall Score: {result_data.get('overall_score', 'N/A')}\n"
|
396 |
+
f"Correct Answers: {result_data.get('correct_answers', 'N/A')}\n"
|
397 |
+
f"Total Questions: {result_data.get('total_questions', 'N/A')}\n\n"
|
398 |
+
f"Note: Results show N/A. This might be due to:\n"
|
399 |
+
f"1. Account activity restrictions (Hugging Face limits submissions from new accounts)\n"
|
400 |
+
f"2. Temporary delay in processing (try checking the results page directly)\n"
|
401 |
+
f"3. API evaluation service issue\n\n"
|
402 |
+
f"Recommendations:\n"
|
403 |
+
f"- Check your submission status at: {DEFAULT_API_URL}/results?username={username}\n"
|
404 |
+
f"- Try again in a few minutes\n"
|
405 |
+
f"- Check the course forum for any known service issues\n"
|
406 |
+
f"- Ensure your Hugging Face account has been active for at least 24 hours"
|
407 |
+
)
|
408 |
+
else:
|
409 |
+
# We got actual results
|
410 |
+
final_status = (
|
411 |
+
f"Submission Successful!\n"
|
412 |
+
f"User: {result_data.get('username')}\n"
|
413 |
+
f"Overall Score: {result_data.get('overall_score', 'N/A')}\n"
|
414 |
+
f"Correct Answers: {result_data.get('correct_answers', 'N/A')}\n"
|
415 |
+
f"Total Questions: {result_data.get('total_questions', 'N/A')}\n"
|
416 |
+
)
|
417 |
+
|
418 |
+
print(final_status)
|
419 |
+
return final_status
|
420 |
+
|
421 |
+
except requests.exceptions.RequestException as e:
|
422 |
+
error_msg = f"Error submitting answers (attempt {attempt}): {e}"
|
423 |
+
print(error_msg)
|
424 |
+
|
425 |
+
if attempt < MAX_RETRIES:
|
426 |
+
print(f"Waiting {RETRY_DELAY} seconds before retry...")
|
427 |
+
time.sleep(RETRY_DELAY)
|
428 |
+
else:
|
429 |
+
return f"{error_msg}\n\nRecommendation: Please try again later or check your internet connection."
|
430 |
|
431 |
+
except Exception as e:
|
432 |
+
error_msg = f"An unexpected error occurred during submission (attempt {attempt}): {e}"
|
433 |
+
print(error_msg)
|
434 |
+
|
435 |
+
if attempt < MAX_RETRIES:
|
436 |
+
print(f"Waiting {RETRY_DELAY} seconds before retry...")
|
437 |
+
time.sleep(RETRY_DELAY)
|
438 |
+
else:
|
439 |
+
return f"{error_msg}\n\nRecommendation: Please try again later."
|
440 |
+
|
441 |
+
# This should not be reached due to the return statements in the loop,
|
442 |
+
# but added as a fallback
|
443 |
+
return "Submission failed after multiple attempts. Please try again later."
|
444 |
|
445 |
|
446 |
def run_and_submit_all(profile: gr.OAuthProfile | None, *args):
|
|
|
485 |
|
486 |
gr.Markdown("---")
|
487 |
|
488 |
+
gr.Markdown("""
|
489 |
+
**Note:** This version uses a language model to generate responses. The evaluation process may take longer than the template-based version.
|
490 |
+
|
491 |
+
**Important:** If you receive 'N/A' results, this is usually due to:
|
492 |
+
- Account activity restrictions (Hugging Face limits submissions from new accounts)
|
493 |
+
- Temporary processing delays
|
494 |
+
- API evaluation service issues
|
495 |
+
|
496 |
+
The system will automatically retry submissions if needed.
|
497 |
+
""")
|
498 |
|
499 |
with gr.Row():
|
500 |
login_button = gr.LoginButton(value="Sign in with Hugging Face")
|
|
|
504 |
|
505 |
with gr.Row():
|
506 |
with gr.Column():
|
507 |
+
output_status = gr.Textbox(label="Submission Result", lines=10)
|
508 |
output_results = gr.Dataframe(label="Questions and Agent Answers")
|
509 |
|
510 |
submit_button.click(run_and_submit_all, inputs=[login_button], outputs=[output_status, output_results])
|