DragonProgrammer commited on
Commit
6e0603f
·
verified ·
1 Parent(s): d3b4a4f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -2
app.py CHANGED
@@ -149,8 +149,34 @@ def safe_calculator(expression: str) -> str:
149
  print(f"Error during calculation for '{expression}': {e}")
150
  return f"Error calculating '{expression}': Invalid expression or calculation error ({e})."
151
 
152
- # --- Basic Agent Definition ---
153
- # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
  # --- Agent Definition using HfAgent ---
155
  class HfAgentWrapper:
156
  def __init__(self):
 
149
  print(f"Error during calculation for '{expression}': {e}")
150
  return f"Error calculating '{expression}': Invalid expression or calculation error ({e})."
151
 
152
+ # --- Custom Agent to Force Correct Behavior ---
153
+ class MyCustomHfAgent(HfAgent):
154
+ """
155
+ A custom agent that inherits from HfAgent to override the text generation method,
156
+ forcing it to use the local pipeline instead of attempting a faulty web request.
157
+ """
158
+ def generate_one(self, prompt: str, stop: list):
159
+ print("--- INSIDE CUSTOM HfAgent's generate_one method ---")
160
+
161
+ # This is the crucial check. We're logging what the agent thinks its state is.
162
+ is_pipeline = self.llm.is_hf_pipeline if hasattr(self.llm, "is_hf_pipeline") else "LLM has no is_hf_pipeline attr"
163
+ print(f"--> self.llm.is_hf_pipeline is: {is_pipeline}")
164
+
165
+ # Regardless of what the agent thinks, we KNOW we gave it a pipeline.
166
+ # So, we will force it to execute the code path for local pipelines.
167
+ print("--> Forcing execution of the local pipeline path...")
168
+
169
+ try:
170
+ # This is the code from the 'if self.llm.is_hf_pipeline:' block in the original Agent.generate_one method
171
+ processed_prompt = self.llm.processor.process_prompt(prompt, **self.tokenizer_kwargs)
172
+ model_outputs = self.llm.pipeline(processed_prompt, stop_sequence=stop, **self.generate_kwargs)
173
+ return self.llm.processor.process_outputs(model_outputs, stop_sequence=stop)
174
+ except Exception as e:
175
+ print(f"--- ERROR during forced pipeline execution: {e} ---")
176
+ traceback.print_exc()
177
+ # If this fails, we return an error string.
178
+ return f"Error during custom pipeline execution: {e}"
179
+
180
  # --- Agent Definition using HfAgent ---
181
  class HfAgentWrapper:
182
  def __init__(self):