Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -149,8 +149,34 @@ def safe_calculator(expression: str) -> str:
|
|
149 |
print(f"Error during calculation for '{expression}': {e}")
|
150 |
return f"Error calculating '{expression}': Invalid expression or calculation error ({e})."
|
151 |
|
152 |
-
# ---
|
153 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
# --- Agent Definition using HfAgent ---
|
155 |
class HfAgentWrapper:
|
156 |
def __init__(self):
|
|
|
149 |
print(f"Error during calculation for '{expression}': {e}")
|
150 |
return f"Error calculating '{expression}': Invalid expression or calculation error ({e})."
|
151 |
|
152 |
+
# --- Custom Agent to Force Correct Behavior ---
|
153 |
+
class MyCustomHfAgent(HfAgent):
|
154 |
+
"""
|
155 |
+
A custom agent that inherits from HfAgent to override the text generation method,
|
156 |
+
forcing it to use the local pipeline instead of attempting a faulty web request.
|
157 |
+
"""
|
158 |
+
def generate_one(self, prompt: str, stop: list):
|
159 |
+
print("--- INSIDE CUSTOM HfAgent's generate_one method ---")
|
160 |
+
|
161 |
+
# This is the crucial check. We're logging what the agent thinks its state is.
|
162 |
+
is_pipeline = self.llm.is_hf_pipeline if hasattr(self.llm, "is_hf_pipeline") else "LLM has no is_hf_pipeline attr"
|
163 |
+
print(f"--> self.llm.is_hf_pipeline is: {is_pipeline}")
|
164 |
+
|
165 |
+
# Regardless of what the agent thinks, we KNOW we gave it a pipeline.
|
166 |
+
# So, we will force it to execute the code path for local pipelines.
|
167 |
+
print("--> Forcing execution of the local pipeline path...")
|
168 |
+
|
169 |
+
try:
|
170 |
+
# This is the code from the 'if self.llm.is_hf_pipeline:' block in the original Agent.generate_one method
|
171 |
+
processed_prompt = self.llm.processor.process_prompt(prompt, **self.tokenizer_kwargs)
|
172 |
+
model_outputs = self.llm.pipeline(processed_prompt, stop_sequence=stop, **self.generate_kwargs)
|
173 |
+
return self.llm.processor.process_outputs(model_outputs, stop_sequence=stop)
|
174 |
+
except Exception as e:
|
175 |
+
print(f"--- ERROR during forced pipeline execution: {e} ---")
|
176 |
+
traceback.print_exc()
|
177 |
+
# If this fails, we return an error string.
|
178 |
+
return f"Error during custom pipeline execution: {e}"
|
179 |
+
|
180 |
# --- Agent Definition using HfAgent ---
|
181 |
class HfAgentWrapper:
|
182 |
def __init__(self):
|