Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -311,15 +311,23 @@ Answer should be a short string, number, or comma-separated list. Keep it brief.
|
|
311 |
)
|
312 |
self.agent.prompt_templates["system_prompt"] = self.agent.prompt_templates["system_prompt"] + system_prompt
|
313 |
|
|
|
|
|
|
|
|
|
|
|
|
|
314 |
def __call__(self, question: str) -> str:
|
315 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
|
|
|
|
|
|
316 |
answer = self.agent.run(question)
|
317 |
print(f"Agent returning answer: {answer}")
|
318 |
return answer
|
319 |
|
320 |
|
321 |
|
322 |
-
|
323 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
324 |
"""
|
325 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
|
|
311 |
)
|
312 |
self.agent.prompt_templates["system_prompt"] = self.agent.prompt_templates["system_prompt"] + system_prompt
|
313 |
|
314 |
+
def _build_safe_prompt(self, history: str, question: str, max_total_tokens=32768, reserve_for_output=2048):
|
315 |
+
max_input_tokens = max_total_tokens - reserve_for_output
|
316 |
+
full_prompt = f"{self.system_prompt}\n{history}\nQuestion: {question}"
|
317 |
+
tokenized = self.tokenizer(full_prompt, truncation=True, max_length=max_input_tokens, return_tensors="pt")
|
318 |
+
return self.tokenizer.decode(tokenized["input_ids"][0])
|
319 |
+
|
320 |
def __call__(self, question: str) -> str:
|
321 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
322 |
+
history = "" # could be conversation history, if available
|
323 |
+
safe_prompt = self._build_safe_prompt(history, question)
|
324 |
+
answer = self.agent.run(safe_prompt)
|
325 |
answer = self.agent.run(question)
|
326 |
print(f"Agent returning answer: {answer}")
|
327 |
return answer
|
328 |
|
329 |
|
330 |
|
|
|
331 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
332 |
"""
|
333 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|