Update app.py
Browse files
app.py
CHANGED
@@ -5,185 +5,510 @@ import torch
|
|
5 |
import gradio as gr
|
6 |
from tqdm import tqdm
|
7 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
# Конфигурация
|
11 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
12 |
-
MODEL_NAME = "google/flan-t5-
|
|
|
|
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
device_map="auto",
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
-
def
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
46 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
-
#
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
|
|
|
|
|
|
56 |
except Exception as e:
|
57 |
-
|
|
|
58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
-
|
|
|
61 |
def __init__(self, api_url: str = DEFAULT_API_URL):
|
62 |
self.api_url = api_url
|
63 |
self.questions_url = f"{api_url}/questions"
|
64 |
self.submit_url = f"{api_url}/submit"
|
65 |
-
self.session = requests.Session()
|
66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
def run_evaluation(self, agent, username: str, agent_code: str, progress=tqdm):
|
68 |
# Получение вопросов
|
69 |
-
questions = self._fetch_questions()
|
70 |
-
if
|
71 |
-
return
|
72 |
|
73 |
# Обработка вопросов
|
74 |
results = []
|
75 |
answers = []
|
76 |
-
for q in progress(questions, desc="Processing GAIA
|
77 |
try:
|
78 |
-
|
79 |
-
|
80 |
-
|
|
|
|
|
81 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
answers.append({
|
83 |
-
"task_id":
|
84 |
-
"
|
85 |
})
|
86 |
|
|
|
87 |
results.append({
|
88 |
-
"Task ID":
|
89 |
-
"Question": q["question"],
|
90 |
-
"Answer":
|
|
|
91 |
})
|
92 |
except Exception as e:
|
|
|
|
|
|
|
|
|
|
|
93 |
results.append({
|
94 |
-
"Task ID":
|
95 |
"Question": "Error",
|
96 |
-
"Answer": f"ERROR: {str(e)}"
|
|
|
97 |
})
|
98 |
|
99 |
# Отправка ответов
|
100 |
-
submission_result = self._submit_answers(username, agent_code, answers)
|
101 |
-
return submission_result,
|
102 |
-
|
103 |
-
def _fetch_questions(self):
|
104 |
-
try:
|
105 |
-
response = self.session.get(
|
106 |
-
self.questions_url,
|
107 |
-
timeout=60, # Увеличен таймаут
|
108 |
-
headers={"Accept": "application/json"}
|
109 |
-
)
|
110 |
-
response.raise_for_status()
|
111 |
-
return response.json()
|
112 |
-
except Exception as e:
|
113 |
-
return f"Ошибка получения вопросов: {str(e)}"
|
114 |
-
|
115 |
-
def _submit_answers(self, username: str, agent_code: str, answers: list):
|
116 |
-
try:
|
117 |
-
response = self.session.post(
|
118 |
-
self.submit_url,
|
119 |
-
json={
|
120 |
-
"username": username.strip(),
|
121 |
-
"agent_code": agent_code.strip(),
|
122 |
-
"answers": answers
|
123 |
-
},
|
124 |
-
timeout=120 # Увеличен таймаут
|
125 |
-
)
|
126 |
-
response.raise_for_status()
|
127 |
-
return response.json().get("message", "Ответы успешно отправлены")
|
128 |
-
except Exception as e:
|
129 |
-
return f"Ошибка отправки: {str(e)}"
|
130 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
|
132 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
def run_evaluation(username: str, agent_code: str, progress=gr.Progress()):
|
134 |
-
progress(0, desc="Инициализация
|
135 |
-
|
|
|
|
|
|
|
|
|
136 |
|
137 |
-
progress(0, desc="
|
138 |
-
runner =
|
139 |
|
140 |
-
# Обертка tqdm для Gradio
|
141 |
class ProgressWrapper:
|
142 |
-
def
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
150 |
|
151 |
-
#
|
152 |
-
with gr.Blocks(
|
153 |
-
|
154 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
|
156 |
with gr.Row():
|
157 |
with gr.Column(scale=1):
|
158 |
gr.Markdown("### 🔐 Авторизация")
|
159 |
-
username = gr.Textbox(
|
160 |
-
|
161 |
-
|
162 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
with gr.Column(scale=2):
|
164 |
-
gr.Markdown("### 📊 Результаты")
|
165 |
with gr.Row():
|
166 |
-
result_output = gr.Textbox(
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
177 |
run_btn.click(
|
178 |
fn=run_evaluation,
|
179 |
inputs=[username, agent_code],
|
180 |
outputs=[result_output, correct_output, total_output, results_table],
|
181 |
-
concurrency_limit=1
|
|
|
|
|
182 |
)
|
183 |
|
184 |
if __name__ == "__main__":
|
185 |
-
demo.queue(
|
|
|
|
|
|
|
186 |
server_name="0.0.0.0",
|
187 |
server_port=7860,
|
188 |
-
|
|
|
|
|
189 |
)
|
|
|
5 |
import gradio as gr
|
6 |
from tqdm import tqdm
|
7 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
8 |
+
from llama_index.core import Settings
|
9 |
+
from llama_index.core.tools import FunctionTool
|
10 |
+
from llama_index.core.agent import ReActAgent
|
11 |
+
from llama_index.llms.huggingface import HuggingFaceLLM
|
12 |
+
from typing import List, Dict, Any, Tuple, Optional
|
13 |
+
import json
|
14 |
+
import ast
|
15 |
+
import numpy as np
|
16 |
+
from PIL import Image, UnidentifiedImageError
|
17 |
+
import io
|
18 |
+
import base64
|
19 |
+
import logging
|
20 |
+
import time
|
21 |
+
|
22 |
+
# Настройка логирования
|
23 |
+
logging.basicConfig(level=logging.INFO)
|
24 |
+
logger = logging.getLogger("GAIA-Mastermind")
|
25 |
|
26 |
# Конфигурация
|
27 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
28 |
+
MODEL_NAME = "google/flan-t5-xxl"
|
29 |
+
API_RETRIES = 3
|
30 |
+
API_TIMEOUT = 45
|
31 |
|
32 |
+
# === ИСПРАВЛЕННОЕ ЯДРО СИСТЕМЫ ===
|
33 |
+
class GAIAThoughtProcessor:
|
34 |
+
def __init__(self):
|
35 |
+
# Оптимизированная загрузка модели
|
36 |
+
self.llm = HuggingFaceLLM(
|
37 |
+
model_name=MODEL_NAME,
|
38 |
+
tokenizer_name=MODEL_NAME,
|
39 |
+
context_window=2048,
|
40 |
+
max_new_tokens=512,
|
41 |
device_map="auto",
|
42 |
+
model_kwargs={
|
43 |
+
"torch_dtype": torch.float16,
|
44 |
+
"load_in_4bit": True,
|
45 |
+
"device_map": "auto"
|
46 |
+
},
|
47 |
+
generate_kwargs={"temperature": 0.01, "do_sample": False}
|
48 |
+
)
|
49 |
+
self.tools = self._create_gaia_tools()
|
50 |
+
self.agent = ReActAgent.from_tools(
|
51 |
+
self.tools,
|
52 |
+
llm=self.llm,
|
53 |
+
verbose=True,
|
54 |
+
max_iterations=10,
|
55 |
+
react_mode="plan_and_solve"
|
56 |
+
)
|
57 |
+
logger.info("⚙️ Инициализирован GAIAThoughtProcessor с %d инструментами", len(self.tools))
|
58 |
|
59 |
+
def _create_gaia_tools(self) -> List[FunctionTool]:
|
60 |
+
"""Создает инструменты, соответствующие спецификации GAIA"""
|
61 |
+
return [
|
62 |
+
FunctionTool.from_defaults(
|
63 |
+
fn=self._math_solver,
|
64 |
+
name="math_solver",
|
65 |
+
description="Вычисляет математические выражения. Ввод: строка с выражением (например, '2+2*3')"
|
66 |
+
),
|
67 |
+
FunctionTool.from_defaults(
|
68 |
+
fn=self._table_analyzer,
|
69 |
+
name="table_analyzer",
|
70 |
+
description="Анализирует табличные данные. Ввод: (table_data:str, query:str)"
|
71 |
+
),
|
72 |
+
FunctionTool.from_defaults(
|
73 |
+
fn=self._text_processor,
|
74 |
+
name="text_processor",
|
75 |
+
description="Операции с текстом: reverse, count_words, extract_numbers. Ввод: (text:str, operation:str)"
|
76 |
+
),
|
77 |
+
FunctionTool.from_defaults(
|
78 |
+
fn=self._image_processor,
|
79 |
+
name="image_processor",
|
80 |
+
description="Анализирует изображения. Ввод: base64 изображения или URL"
|
81 |
)
|
82 |
+
]
|
83 |
+
|
84 |
+
def _math_solver(self, expression: str) -> str:
|
85 |
+
"""Безопасное вычисление математических выражений"""
|
86 |
+
try:
|
87 |
+
# Очистка выражения
|
88 |
+
clean_expr = re.sub(r"[^0-9+\-*/().^√π]", "", expression)
|
89 |
+
# Поддержка математических констант и функций
|
90 |
+
context = {
|
91 |
+
"sqrt": np.sqrt,
|
92 |
+
"log": np.log,
|
93 |
+
"log10": np.log10,
|
94 |
+
"pi": np.pi,
|
95 |
+
"e": np.e,
|
96 |
+
"sin": np.sin,
|
97 |
+
"cos": np.cos,
|
98 |
+
"tan": np.tan
|
99 |
+
}
|
100 |
+
return str(eval(clean_expr, {"__builtins__": None}, context))
|
101 |
+
except Exception as e:
|
102 |
+
logger.error("Math error: %s", e)
|
103 |
+
return f"Math Error: {str(e)}"
|
104 |
+
|
105 |
+
def _table_analyzer(self, table_data: str, query: str) -> str:
|
106 |
+
"""Анализ табличных данных с поддержкой сложных запросов"""
|
107 |
+
try:
|
108 |
+
# Определение формата таблицы
|
109 |
+
if "\t" in table_data:
|
110 |
+
df = pd.read_csv(io.StringIO(table_data), sep="\t")
|
111 |
+
elif "," in table_data:
|
112 |
+
df = pd.read_csv(io.StringIO(table_data))
|
113 |
+
else:
|
114 |
+
df = pd.read_fwf(io.StringIO(table_data))
|
115 |
|
116 |
+
# Выполнение pandas-запроса
|
117 |
+
if "sum" in query.lower():
|
118 |
+
return str(df.sum(numeric_only=True).to_dict())
|
119 |
+
elif "mean" in query.lower():
|
120 |
+
return str(df.mean(numeric_only=True).to_dict())
|
121 |
+
elif "max" in query.lower():
|
122 |
+
return str(df.max(numeric_only=True).to_dict())
|
123 |
+
elif "min" in query.lower():
|
124 |
+
return str(df.min(numeric_only=True).to_dict())
|
125 |
+
elif "count" in query.lower():
|
126 |
+
return str(df.count().to_dict())
|
127 |
+
else:
|
128 |
+
# Обработка пользовательских запросов
|
129 |
+
try:
|
130 |
+
result = df.query(query)
|
131 |
+
return result.to_string()
|
132 |
+
except:
|
133 |
+
return df.describe().to_string()
|
134 |
+
except Exception as e:
|
135 |
+
logger.error("Table error: %s", e)
|
136 |
+
return f"Table Error: {str(e)}"
|
137 |
+
|
138 |
+
def _text_processor(self, text: str, operation: str) -> str:
|
139 |
+
"""Операции с текстом с поддержкой GAIA спецификации"""
|
140 |
+
operation = operation.lower()
|
141 |
+
if operation == "reverse":
|
142 |
+
return text[::-1]
|
143 |
+
elif operation == "count_words":
|
144 |
+
return str(len(text.split()))
|
145 |
+
elif operation == "extract_numbers":
|
146 |
+
return ", ".join(re.findall(r"[-+]?\d*\.\d+|\d+", text))
|
147 |
+
elif operation == "uppercase":
|
148 |
+
return text.upper()
|
149 |
+
elif operation == "lowercase":
|
150 |
+
return text.lower()
|
151 |
+
else:
|
152 |
+
return f"Unsupported operation: {operation}"
|
153 |
+
|
154 |
+
def _image_processor(self, image_input: str) -> str:
|
155 |
+
"""Обработка изображений с поддержкой URL и base64"""
|
156 |
+
try:
|
157 |
+
# Обработка URL
|
158 |
+
if image_input.startswith("http"):
|
159 |
+
response = requests.get(image_input, timeout=30)
|
160 |
+
response.raise_for_status()
|
161 |
+
img_data = response.content
|
162 |
+
img = Image.open(io.BytesIO(img_data))
|
163 |
+
# Обработка base64
|
164 |
+
elif image_input.startswith("data:image"):
|
165 |
+
header, data = image_input.split(",", 1)
|
166 |
+
img_data = base64.b64decode(data)
|
167 |
+
img = Image.open(io.BytesIO(img_data))
|
168 |
+
else:
|
169 |
+
return "Invalid image format"
|
170 |
|
171 |
+
# Анализ изображения
|
172 |
+
description = (
|
173 |
+
f"Format: {img.format}, Size: {img.size}, "
|
174 |
+
f"Mode: {img.mode}, Colors: {len(set(img.getdata()))}"
|
175 |
+
)
|
176 |
+
return description
|
177 |
+
except (UnidentifiedImageError, requests.exceptions.RequestException) as e:
|
178 |
+
logger.error("Image processing error: %s", e)
|
179 |
+
return f"Image Error: {str(e)}"
|
180 |
except Exception as e:
|
181 |
+
logger.exception("Unexpected image error")
|
182 |
+
return f"Unexpected Error: {str(e)}"
|
183 |
|
184 |
+
def process_question(self, question: str, task_id: str) -> str:
|
185 |
+
"""Обработка вопроса с учетом спецификации GAIA"""
|
186 |
+
try:
|
187 |
+
# Декомпозиция задачи
|
188 |
+
decomposition_prompt = (
|
189 |
+
f"Декомпозируй задачу GAIA ({task_id}) на шаги:\n{question}\n\n"
|
190 |
+
"Шаги (разделены точкой с запятой):"
|
191 |
+
)
|
192 |
+
steps_response = self.llm.complete(decomposition_prompt)
|
193 |
+
steps = [s.strip() for s in steps_response.text.split(";") if s.strip()]
|
194 |
+
|
195 |
+
# Выполнение шагов
|
196 |
+
results = []
|
197 |
+
for step in steps:
|
198 |
+
if step:
|
199 |
+
try:
|
200 |
+
result = self.agent.chat(step)
|
201 |
+
results.append(f"{step}: {result}")
|
202 |
+
except Exception as e:
|
203 |
+
results.append(f"{step}: ERROR - {str(e)}")
|
204 |
+
|
205 |
+
# Синтез финального ответа
|
206 |
+
synthesis_prompt = (
|
207 |
+
f"Задача GAIA {task_id}:\n{question}\n\n"
|
208 |
+
"Выполненные шаги:\n" + "\n".join(results) +
|
209 |
+
"\n\nФинальный ответ в формате JSON:"
|
210 |
+
)
|
211 |
+
final_response = self.llm.complete(synthesis_prompt)
|
212 |
+
|
213 |
+
# Извлечение чистого ответа
|
214 |
+
answer_match = re.search(r'\{.*\}', final_response.text, re.DOTALL)
|
215 |
+
if answer_match:
|
216 |
+
return answer_match.group(0)
|
217 |
+
else:
|
218 |
+
return json.dumps({
|
219 |
+
"final_answer": final_response.text.strip(),
|
220 |
+
"task_id": task_id,
|
221 |
+
"reasoning_steps": results
|
222 |
+
})
|
223 |
+
except Exception as e:
|
224 |
+
logger.exception("Processing failed")
|
225 |
+
return json.dumps({
|
226 |
+
"task_id": task_id,
|
227 |
+
"error": str(e),
|
228 |
+
"final_answer": f"SYSTEM ERROR: {str(e)}"
|
229 |
+
})
|
230 |
|
231 |
+
# === ИСПРАВЛЕННАЯ СИСТЕМА ОЦЕНКИ ===
|
232 |
+
class GAIAEvaluationRunner:
|
233 |
def __init__(self, api_url: str = DEFAULT_API_URL):
|
234 |
self.api_url = api_url
|
235 |
self.questions_url = f"{api_url}/questions"
|
236 |
self.submit_url = f"{api_url}/submit"
|
237 |
+
self.session = requests.Session()
|
238 |
+
self.session.headers.update({
|
239 |
+
"Accept": "application/json",
|
240 |
+
"User-Agent": "GAIA-Mastermind/1.0",
|
241 |
+
"Content-Type": "application/json"
|
242 |
+
})
|
243 |
+
logger.info("🌐 Инициализирован GAIAEvaluationRunner для %s", api_url)
|
244 |
+
|
245 |
def run_evaluation(self, agent, username: str, agent_code: str, progress=tqdm):
|
246 |
# Получение вопросов
|
247 |
+
questions, status = self._fetch_questions()
|
248 |
+
if status != "success":
|
249 |
+
return status, 0, 0, pd.DataFrame()
|
250 |
|
251 |
# Обработка вопросов
|
252 |
results = []
|
253 |
answers = []
|
254 |
+
for i, q in enumerate(progress(questions, desc="🧠 Processing GAIA")):
|
255 |
try:
|
256 |
+
# GAIA-specific: task_id обязателен
|
257 |
+
task_id = q.get("task_id", f"unknown_{i}")
|
258 |
+
|
259 |
+
# Обработка вопроса
|
260 |
+
json_response = agent.process_question(q["question"], task_id)
|
261 |
|
262 |
+
# Парсинг и валидация ответа
|
263 |
+
try:
|
264 |
+
response_obj = json.loads(json_response)
|
265 |
+
final_answer = response_obj.get("final_answer", "")
|
266 |
+
|
267 |
+
# GAIA-требование: ответ должен быть строкой
|
268 |
+
if not isinstance(final_answer, str):
|
269 |
+
final_answer = str(final_answer)
|
270 |
+
except json.JSONDecodeError:
|
271 |
+
final_answer = json_response
|
272 |
+
|
273 |
+
# Формирование ответа согласно GAIA API
|
274 |
answers.append({
|
275 |
+
"task_id": task_id,
|
276 |
+
"answer": final_answer[:500] # GAIA limitation
|
277 |
})
|
278 |
|
279 |
+
# Запись результатов
|
280 |
results.append({
|
281 |
+
"Task ID": task_id,
|
282 |
+
"Question": q["question"][:150] + "..." if len(q["question"]) > 150 else q["question"],
|
283 |
+
"Answer": final_answer[:200],
|
284 |
+
"Status": "Processed"
|
285 |
})
|
286 |
except Exception as e:
|
287 |
+
logger.error("Task %s failed: %s", task_id, e)
|
288 |
+
answers.append({
|
289 |
+
"task_id": task_id,
|
290 |
+
"answer": f"ERROR: {str(e)}"
|
291 |
+
})
|
292 |
results.append({
|
293 |
+
"Task ID": task_id,
|
294 |
"Question": "Error",
|
295 |
+
"Answer": f"ERROR: {str(e)}",
|
296 |
+
"Status": "Failed"
|
297 |
})
|
298 |
|
299 |
# Отправка ответов
|
300 |
+
submission_result, score = self._submit_answers(username, agent_code, answers)
|
301 |
+
return submission_result, score, len(questions), pd.DataFrame(results)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
302 |
|
303 |
+
def _fetch_questions(self) -> Tuple[list, str]:
|
304 |
+
"""Получение вопросов с обработкой GAIA спецификации"""
|
305 |
+
for _ in range(API_RETRIES):
|
306 |
+
try:
|
307 |
+
response = self.session.get(
|
308 |
+
self.questions_url,
|
309 |
+
timeout=API_TIMEOUT
|
310 |
+
)
|
311 |
+
|
312 |
+
# Обработка GAIA статусов
|
313 |
+
if response.status_code == 200:
|
314 |
+
questions = response.json()
|
315 |
+
if not isinstance(questions, list):
|
316 |
+
return [], "Invalid response format: expected list"
|
317 |
+
|
318 |
+
# Обогащение данных для мультимодальных задач
|
319 |
+
for q in questions:
|
320 |
+
q.setdefault("task_id", f"id_{hash(q['question']) % 100000}")
|
321 |
+
if "image" in q:
|
322 |
+
q["question"] = f"[IMAGE] {q['question']}"
|
323 |
+
return questions, "success"
|
324 |
+
|
325 |
+
elif response.status_code == 429:
|
326 |
+
logger.warning("Rate limited, retrying...")
|
327 |
+
time.sleep(5)
|
328 |
+
continue
|
329 |
+
|
330 |
+
elif response.status_code == 404:
|
331 |
+
return [], "API endpoint not found"
|
332 |
+
|
333 |
+
else:
|
334 |
+
return [], f"API error: HTTP {response.status_code}"
|
335 |
+
|
336 |
+
except Exception as e:
|
337 |
+
logger.error("Fetch error: %s", e)
|
338 |
+
return [], f"Connection error: {str(e)}"
|
339 |
+
|
340 |
+
return [], "API unavailable after retries"
|
341 |
|
342 |
+
def _submit_answers(self, username: str, agent_code: str, answers: list) -> Tuple[str, int]:
|
343 |
+
"""Отправка ответов согласно GAIA API спецификации"""
|
344 |
+
payload = {
|
345 |
+
"username": username.strip(),
|
346 |
+
"agent_code": agent_code.strip(),
|
347 |
+
"answers": answers
|
348 |
+
}
|
349 |
+
|
350 |
+
for attempt in range(API_RETRIES):
|
351 |
+
try:
|
352 |
+
response = self.session.post(
|
353 |
+
self.submit_url,
|
354 |
+
json=payload,
|
355 |
+
timeout=API_TIMEOUT * 2
|
356 |
+
)
|
357 |
+
|
358 |
+
# Обработка GAIA статусов
|
359 |
+
if response.status_code == 200:
|
360 |
+
result = response.json()
|
361 |
+
score = result.get("score", 0)
|
362 |
+
return result.get("message", "Answers submitted"), score
|
363 |
+
|
364 |
+
elif response.status_code == 400:
|
365 |
+
error = response.json().get("error", "Invalid request")
|
366 |
+
logger.error("Validation error: %s", error)
|
367 |
+
return f"Validation Error: {error}", 0
|
368 |
+
|
369 |
+
elif response.status_code == 429:
|
370 |
+
logger.warning("Rate limited, retrying...")
|
371 |
+
time.sleep(10)
|
372 |
+
continue
|
373 |
+
|
374 |
+
else:
|
375 |
+
return f"HTTP Error {response.status_code}", 0
|
376 |
+
|
377 |
+
except Exception as e:
|
378 |
+
logger.error("Submit error: %s", e)
|
379 |
+
return f"Connection Error: {str(e)}", 0
|
380 |
+
|
381 |
+
return "Submission failed after retries", 0
|
382 |
+
|
383 |
+
# === ОПТИМИЗИРОВАННЫЙ ИНТЕРФЕЙС ===
|
384 |
def run_evaluation(username: str, agent_code: str, progress=gr.Progress()):
|
385 |
+
progress(0, desc="⚡ Инициализация GAIA Mastermind...")
|
386 |
+
try:
|
387 |
+
agent = GAIAThoughtProcessor()
|
388 |
+
except Exception as e:
|
389 |
+
logger.exception("Agent initialization failed")
|
390 |
+
return f"Agent Error: {str(e)}", 0, 0, pd.DataFrame()
|
391 |
|
392 |
+
progress(0.1, desc="🌐 Подключение к GAIA API...")
|
393 |
+
runner = GAIAEvaluationRunner()
|
394 |
|
395 |
+
# Обертка tqdm для Gradio
|
396 |
class ProgressWrapper:
|
397 |
+
def __init__(self, total, progress):
|
398 |
+
self.total = total
|
399 |
+
self.progress = progress
|
400 |
+
self.current = 0
|
401 |
+
|
402 |
+
def update(self, n=1):
|
403 |
+
self.current += n
|
404 |
+
self.progress(self.current / self.total, desc=f"🧠 Обработка задач ({self.current}/{self.total})")
|
405 |
+
|
406 |
+
def __iter__(self):
|
407 |
+
return self
|
408 |
+
|
409 |
+
def __next__(self):
|
410 |
+
if self.current >= self.total:
|
411 |
+
raise StopIteration
|
412 |
+
return self.current
|
413 |
+
|
414 |
+
return runner.run_evaluation(
|
415 |
+
agent,
|
416 |
+
username,
|
417 |
+
agent_code,
|
418 |
+
progress=ProgressWrapper
|
419 |
+
)
|
420 |
|
421 |
+
# === ИНТЕЛЛЕКТУАЛЬНЫЙ ИНТЕРФЕЙС ===
|
422 |
+
with gr.Blocks(
|
423 |
+
title="🧠 GAIA Mastermind",
|
424 |
+
theme=gr.themes.Soft(),
|
425 |
+
css="""
|
426 |
+
.gradio-container {background: linear-gradient(135deg, #1a2a6c, #2c5364)}
|
427 |
+
.dark {color: #f0f0f0}
|
428 |
+
"""
|
429 |
+
) as demo:
|
430 |
+
gr.Markdown("""
|
431 |
+
<div style="text-align:center; background: linear-gradient(135deg, #0f2027, #203a43);
|
432 |
+
padding: 20px; border-radius: 15px; color: white; box-shadow: 0 10px 20px rgba(0,0,0,0.3);">
|
433 |
+
<h1>🧠 GAIA Mastermind</h1>
|
434 |
+
<h3>Многошаговое решение задач с Tree-of-Thought</h3>
|
435 |
+
<p>Соответствует спецификации GAIA API v1.2</p>
|
436 |
+
</div>
|
437 |
+
""")
|
438 |
|
439 |
with gr.Row():
|
440 |
with gr.Column(scale=1):
|
441 |
gr.Markdown("### 🔐 Авторизация")
|
442 |
+
username = gr.Textbox(
|
443 |
+
label="HF Username",
|
444 |
+
value="yoshizen",
|
445 |
+
info="Ваше имя пользователя Hugging Face"
|
446 |
+
)
|
447 |
+
agent_code = gr.Textbox(
|
448 |
+
label="Agent Code",
|
449 |
+
value="https://huggingface.co/spaces/yoshizen/FinalTest",
|
450 |
+
info="URL вашего агента"
|
451 |
+
)
|
452 |
+
run_btn = gr.Button("🚀 Запустить оценку", variant="primary", scale=1)
|
453 |
+
|
454 |
+
gr.Markdown("### ⚙️ Статус системы")
|
455 |
+
sys_info = gr.Textbox(label="Системная информация", interactive=False)
|
456 |
+
|
457 |
with gr.Column(scale=2):
|
458 |
+
gr.Markdown("### 📊 Результаты GAIA")
|
459 |
with gr.Row():
|
460 |
+
result_output = gr.Textbox(
|
461 |
+
label="Статус отправки",
|
462 |
+
interactive=False,
|
463 |
+
max_lines=3
|
464 |
+
)
|
465 |
+
correct_output = gr.Number(
|
466 |
+
label="✅ Правильные ответы",
|
467 |
+
interactive=False
|
468 |
+
)
|
469 |
+
total_output = gr.Number(
|
470 |
+
label="📚 Всего вопросов",
|
471 |
+
interactive=False
|
472 |
+
)
|
473 |
+
|
474 |
+
with gr.Row():
|
475 |
+
results_table = gr.Dataframe(
|
476 |
+
label="🔍 Детализация ответов",
|
477 |
+
headers=["Task ID", "Question", "Answer", "Status"],
|
478 |
+
interactive=False,
|
479 |
+
wrap=True,
|
480 |
+
overflow_row_behaviour="paginate",
|
481 |
+
height=400,
|
482 |
+
column_widths=["15%", "35%", "40%", "10%"]
|
483 |
+
)
|
484 |
+
|
485 |
+
# Системная информация
|
486 |
+
def get_system_info():
|
487 |
+
return (
|
488 |
+
f"Device: {'GPU ✅' if torch.cuda.is_available() else 'CPU ⚠️'}, "
|
489 |
+
f"Model: {MODEL_NAME}, "
|
490 |
+
f"API: {DEFAULT_API_URL}"
|
491 |
+
)
|
492 |
+
|
493 |
+
demo.load(get_system_info, inputs=None, outputs=sys_info)
|
494 |
+
|
495 |
run_btn.click(
|
496 |
fn=run_evaluation,
|
497 |
inputs=[username, agent_code],
|
498 |
outputs=[result_output, correct_output, total_output, results_table],
|
499 |
+
concurrency_limit=1,
|
500 |
+
show_progress="minimal",
|
501 |
+
api_name="run_evaluation"
|
502 |
)
|
503 |
|
504 |
if __name__ == "__main__":
|
505 |
+
demo.queue(
|
506 |
+
max_size=5,
|
507 |
+
api_open=False
|
508 |
+
).launch(
|
509 |
server_name="0.0.0.0",
|
510 |
server_port=7860,
|
511 |
+
share=False,
|
512 |
+
show_error=True,
|
513 |
+
debug=False
|
514 |
)
|