Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -34,6 +34,32 @@ def get_current_time_in_timezone(timezone: str) -> str:
|
|
| 34 |
return f"Error fetching time for timezone '{timezone}': {str(e)}"
|
| 35 |
|
| 36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
final_answer = FinalAnswerTool()
|
| 38 |
|
| 39 |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
|
|
@@ -47,15 +73,14 @@ custom_role_conversions=None,
|
|
| 47 |
)
|
| 48 |
|
| 49 |
|
| 50 |
-
|
| 51 |
-
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
|
| 52 |
|
| 53 |
with open("prompts.yaml", 'r') as stream:
|
| 54 |
prompt_templates = yaml.safe_load(stream)
|
| 55 |
|
| 56 |
agent = CodeAgent(
|
| 57 |
model=model,
|
| 58 |
-
tools=[final_answer], ## add your tools here (don't remove final answer)
|
| 59 |
max_steps=6,
|
| 60 |
verbosity_level=1,
|
| 61 |
grammar=None,
|
|
|
|
| 34 |
return f"Error fetching time for timezone '{timezone}': {str(e)}"
|
| 35 |
|
| 36 |
|
| 37 |
+
# Import tool from Hub
|
| 38 |
+
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
|
| 39 |
+
|
| 40 |
+
@tool
|
| 41 |
+
def generate_image(prompt:str, num_images: int=1, size: str="512x512") -> list:
|
| 42 |
+
"""Generate image(s) from a text prompt via HF text-to-image.
|
| 43 |
+
Args:
|
| 44 |
+
prompt: description of the image
|
| 45 |
+
num_images: how many to produce
|
| 46 |
+
size: image resolution (e.g. "512x512")
|
| 47 |
+
"""
|
| 48 |
+
return image_generation_tool(prompt=prompt, num_images=num_images, size=size)
|
| 49 |
+
|
| 50 |
+
@tool
|
| 51 |
+
def duckduckgo_search(query: str, max_results: int=5) -> str:
|
| 52 |
+
"""Search DuckDuckGo for a query and return the top results.
|
| 53 |
+
Args:
|
| 54 |
+
query: the search terms
|
| 55 |
+
max_results: how many results to fetch
|
| 56 |
+
"""
|
| 57 |
+
searcher = DuckDuckGoSearchTool()
|
| 58 |
+
results = searcher(query, max_results=max_results)
|
| 59 |
+
return results
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
|
| 63 |
final_answer = FinalAnswerTool()
|
| 64 |
|
| 65 |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
|
|
|
|
| 73 |
)
|
| 74 |
|
| 75 |
|
| 76 |
+
|
|
|
|
| 77 |
|
| 78 |
with open("prompts.yaml", 'r') as stream:
|
| 79 |
prompt_templates = yaml.safe_load(stream)
|
| 80 |
|
| 81 |
agent = CodeAgent(
|
| 82 |
model=model,
|
| 83 |
+
tools=[final_answer, duckduckgo_search, get_current_time_in_timezone, my_custom_tool, generate_image], ## add your tools here (don't remove final answer)
|
| 84 |
max_steps=6,
|
| 85 |
verbosity_level=1,
|
| 86 |
grammar=None,
|