Trial and error.
Browse files
app.py
CHANGED
@@ -38,6 +38,8 @@ class BasicAgent:
|
|
38 |
|
39 |
# Generate response
|
40 |
if self.model and self.tokenizer:
|
|
|
|
|
41 |
# Local model generation
|
42 |
text = self.tokenizer.apply_chat_template(
|
43 |
messages,
|
@@ -60,6 +62,8 @@ class BasicAgent:
|
|
60 |
|
61 |
answer = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
62 |
else:
|
|
|
|
|
63 |
# Fallback to Inference API
|
64 |
response = self.client.chat(messages=messages)
|
65 |
answer = response.content
|
@@ -198,7 +202,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
198 |
|
199 |
# --- Build Gradio Interface using Blocks ---
|
200 |
with gr.Blocks() as demo:
|
201 |
-
gr.Markdown("# Basic Agent Evaluation Runner #
|
202 |
gr.Markdown(
|
203 |
"""
|
204 |
**Instructions:**
|
|
|
38 |
|
39 |
# Generate response
|
40 |
if self.model and self.tokenizer:
|
41 |
+
printf("Using local model for generation...")
|
42 |
+
|
43 |
# Local model generation
|
44 |
text = self.tokenizer.apply_chat_template(
|
45 |
messages,
|
|
|
62 |
|
63 |
answer = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
64 |
else:
|
65 |
+
print("Using Inference API for generation...")
|
66 |
+
|
67 |
# Fallback to Inference API
|
68 |
response = self.client.chat(messages=messages)
|
69 |
answer = response.content
|
|
|
202 |
|
203 |
# --- Build Gradio Interface using Blocks ---
|
204 |
with gr.Blocks() as demo:
|
205 |
+
gr.Markdown("# Basic Agent Evaluation Runner #2")
|
206 |
gr.Markdown(
|
207 |
"""
|
208 |
**Instructions:**
|