Trial and error.
Browse files
app.py
CHANGED
@@ -18,11 +18,24 @@ class BasicAgent:
|
|
18 |
print("BasicAgent initialized.")
|
19 |
|
20 |
print("Loading Qwen2.5-7B-Instruct model...")
|
21 |
-
self.model_name = "Qwen/Qwen2.5-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
def __call__(self, question: str) -> str:
|
28 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
@@ -54,9 +67,8 @@ class BasicAgent:
|
|
54 |
|
55 |
answer = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
56 |
else:
|
57 |
-
# Fallback to Inference API
|
58 |
-
|
59 |
-
answer = response.choices[0].message.content
|
60 |
|
61 |
print(f"Agent generated response (first 50 chars): {answer[:50]}...")
|
62 |
return answer
|
@@ -192,7 +204,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
192 |
|
193 |
# --- Build Gradio Interface using Blocks ---
|
194 |
with gr.Blocks() as demo:
|
195 |
-
gr.Markdown("# Basic Agent Evaluation Runner
|
196 |
gr.Markdown(
|
197 |
"""
|
198 |
**Instructions:**
|
|
|
18 |
print("BasicAgent initialized.")
|
19 |
|
20 |
print("Loading Qwen2.5-7B-Instruct model...")
|
21 |
+
self.model_name = "Qwen/Qwen2.5-1.5B-Instruct"
|
22 |
+
|
23 |
+
# Load model and tokenizer
|
24 |
+
try:
|
25 |
+
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
26 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
27 |
+
self.model_name,
|
28 |
+
torch_dtype="auto",
|
29 |
+
device_map="auto"
|
30 |
+
)
|
31 |
+
print(f"Successfully loaded {self.model_name}")
|
32 |
+
except Exception as e:
|
33 |
+
print(f"Error loading model: {e}")
|
34 |
+
# Fallback to HuggingFace Inference API if local loading fails
|
35 |
+
print("Falling back to InferenceClient")
|
36 |
+
self.client = InferenceClient(model=self.model_name)
|
37 |
+
self.tokenizer = None
|
38 |
+
self.model = None
|
39 |
|
40 |
def __call__(self, question: str) -> str:
|
41 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
|
|
67 |
|
68 |
answer = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
69 |
else:
|
70 |
+
# Fallback to Inference API
|
71 |
+
answer = self.client.chat(messages=messages)
|
|
|
72 |
|
73 |
print(f"Agent generated response (first 50 chars): {answer[:50]}...")
|
74 |
return answer
|
|
|
204 |
|
205 |
# --- Build Gradio Interface using Blocks ---
|
206 |
with gr.Blocks() as demo:
|
207 |
+
gr.Markdown("# Basic Agent Evaluation Runner")
|
208 |
gr.Markdown(
|
209 |
"""
|
210 |
**Instructions:**
|