cs2010 commited on
Commit
6645da1
·
verified ·
1 Parent(s): f9d5c41

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -2
app.py CHANGED
@@ -40,6 +40,46 @@ def get_random_joke() -> str:
40
  data = response.json()
41
  return f"{data.get('setup')} - {data.get('punchline')}"
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  final_answer = FinalAnswerTool()
44
 
45
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
@@ -55,14 +95,14 @@ custom_role_conversions=None,
55
 
56
 
57
  # Import tool from Hub
58
- image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
59
 
60
  with open("prompts.yaml", 'r') as stream:
61
  prompt_templates = yaml.safe_load(stream)
62
 
63
  agent = CodeAgent(
64
  model=model,
65
- tools=[final_answer,get_current_time_in_timezone,get_random_joke,image_generation_tool], ## add your tools here (don't remove final answer)
66
  max_steps=6,
67
  verbosity_level=1,
68
  grammar=None,
 
40
  data = response.json()
41
  return f"{data.get('setup')} - {data.get('punchline')}"
42
 
43
+ @tool
44
+ def generate_flux_image(prompt: str, width: int = 1024, height: int = 1024, guidance_scale: float = 3.5, num_inference_steps: int = 28) -> str:
45
+ """Generates an image using FLUX.1 text-to-image model.
46
+
47
+ Args:
48
+ prompt: Text description of the image to generate
49
+ width: Width of the generated image (default: 1024)
50
+ height: Height of the generated image (default: 1024)
51
+ guidance_scale: How closely the image should follow the prompt (default: 3.5)
52
+ num_inference_steps: Number of denoising steps (default: 28)
53
+ """
54
+ try:
55
+ from gradio_client import Client
56
+ import tempfile
57
+ import os
58
+
59
+ # Create a client for the FLUX model
60
+ client = Client("black-forest-labs/FLUX.1-dev")
61
+
62
+ # Call the model to generate an image
63
+ result = client.predict(
64
+ prompt=prompt,
65
+ seed=0,
66
+ randomize_seed=True,
67
+ width=width,
68
+ height=height,
69
+ guidance_scale=guidance_scale,
70
+ num_inference_steps=num_inference_steps,
71
+ api_name="/infer"
72
+ )
73
+
74
+ # The result is typically a path to an image
75
+ image_path = result
76
+
77
+ # You could return the path or handle the image as needed
78
+ return f"Image successfully generated based on prompt: '{prompt}'. Image path: {image_path}"
79
+
80
+ except Exception as e:
81
+ return f"Error generating image: {str(e)}"
82
+
83
  final_answer = FinalAnswerTool()
84
 
85
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
 
95
 
96
 
97
  # Import tool from Hub
98
+ #image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
99
 
100
  with open("prompts.yaml", 'r') as stream:
101
  prompt_templates = yaml.safe_load(stream)
102
 
103
  agent = CodeAgent(
104
  model=model,
105
+ tools=[final_answer,get_current_time_in_timezone,get_random_joke,generate_flux_image], ## add your tools here (don't remove final answer)
106
  max_steps=6,
107
  verbosity_level=1,
108
  grammar=None,