yoshizen commited on
Commit
fa2995b
·
verified ·
1 Parent(s): 0721db2

Delete agent.py

Browse files
Files changed (1) hide show
  1. agent.py +0 -52
agent.py DELETED
@@ -1,52 +0,0 @@
1
- import torch
2
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
-
4
- class GAIAExpertAgent:
5
- def __init__(self, model_name: str = "google/flan-t5-large"):
6
- self.device = "cuda" if torch.cuda.is_available() else "cpu"
7
- self.tokenizer = AutoTokenizer.from_pretrained(model_name)
8
- self.model = AutoModelForSeq2SeqLM.from_pretrained(
9
- model_name,
10
- device_map="auto",
11
- torch_dtype=torch.float16 if "cuda" in self.device else torch.float32
12
- ).eval()
13
-
14
- def __call__(self, question: str, task_id: str = None) -> str:
15
- """Генерация ответа с оптимизациями для GAIA"""
16
- try:
17
- # Специальные обработчики для GAIA
18
- if "reverse" in question.lower():
19
- return self._handle_reverse_text(question)
20
- if "how many" in question.lower():
21
- return self._handle_numerical(question)
22
-
23
- # Стандартная обработка
24
- inputs = self.tokenizer(
25
- f"GAIA Question: {question}\nAnswer concisely:",
26
- return_tensors="pt",
27
- max_length=512,
28
- truncation=True
29
- ).to(self.device)
30
-
31
- outputs = self.model.generate(
32
- **inputs,
33
- max_new_tokens=50,
34
- num_beams=3,
35
- early_stopping=True
36
- )
37
-
38
- answer = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
39
- return {"final_answer": answer.strip()}
40
-
41
- except Exception as e:
42
- return {"final_answer": f"Error: {str(e)}"}
43
-
44
- def _handle_reverse_text(self, text: str) -> str:
45
- """Обработка обратного текста (специфика GAIA)"""
46
- return {"final_answer": text[::-1][:100]}
47
-
48
- def _handle_numerical(self, question: str) -> str:
49
- """Извлечение чисел из вопроса"""
50
- import re
51
- numbers = re.findall(r'\d+', question)
52
- return {"final_answer": str(sum(map(int, numbers))) if numbers else "42"}