sumityadav329 commited on
Commit
b253396
·
verified ·
1 Parent(s): 2f5cdae
Files changed (1) hide show
  1. app.py +90 -25
app.py CHANGED
@@ -2,9 +2,87 @@ import os
2
  import gradio as gr
3
  from PIL import Image
4
  import io
5
- from utils import query_hf_api
 
6
 
7
- def generate_image(prompt: str) -> Image.Image:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  """
9
  Generate an image from a text prompt.
10
 
@@ -12,20 +90,24 @@ def generate_image(prompt: str) -> Image.Image:
12
  prompt (str): Text description for image generation
13
 
14
  Returns:
15
- Image.Image: Generated PIL Image
16
  """
17
  try:
 
 
 
 
18
  # Generate image bytes
19
  image_bytes = query_hf_api(prompt)
20
 
21
  # Convert to PIL Image
22
  image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
23
 
24
- return image
25
 
26
  except Exception as e:
27
  print(f"Image generation error: {e}")
28
- return None
29
 
30
  def create_gradio_interface():
31
  """
@@ -52,23 +134,6 @@ def create_gradio_interface():
52
  lines=3
53
  )
54
 
55
- # Advanced Options
56
- with gr.Accordion("Advanced Options", open=False):
57
- steps_slider = gr.Slider(
58
- minimum=10,
59
- maximum=100,
60
- value=50,
61
- step=1,
62
- label="Inference Steps"
63
- )
64
- guidance_slider = gr.Slider(
65
- minimum=1,
66
- maximum=20,
67
- value=7.5,
68
- step=0.5,
69
- label="Guidance Scale"
70
- )
71
-
72
  # Generate Button
73
  generate_button = gr.Button("✨ Generate Image", variant="primary")
74
 
@@ -80,14 +145,14 @@ def create_gradio_interface():
80
  interactive=False
81
  )
82
 
83
- # Error Handling Output
84
- error_output = gr.Textbox(label="Status", visible=False)
85
 
86
  # Event Handlers
87
  generate_button.click(
88
  fn=generate_image,
89
  inputs=[text_input],
90
- outputs=[output_image, error_output],
91
  api_name="generate"
92
  )
93
 
 
2
  import gradio as gr
3
  from PIL import Image
4
  import io
5
+ import requests
6
+ from typing import Optional, Tuple
7
 
8
+ def load_environment():
9
+ """
10
+ Attempt to load environment variables with error handling.
11
+
12
+ Returns:
13
+ Optional[str]: Hugging Face Token or None
14
+ """
15
+ try:
16
+ from dotenv import load_dotenv
17
+ load_dotenv()
18
+ except ImportError:
19
+ print("python-dotenv not installed. Ensure HF_TOKEN is set in environment.")
20
+
21
+ return os.getenv("HF_TOKEN")
22
+
23
+ def query_hf_api(
24
+ prompt: str,
25
+ model_url: str = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0",
26
+ max_retries: int = 3
27
+ ) -> Optional[bytes]:
28
+ """
29
+ Query the Hugging Face Inference API with robust error handling and retry mechanism.
30
+
31
+ Args:
32
+ prompt (str): Text prompt for image generation
33
+ model_url (str): URL of the Hugging Face model
34
+ max_retries (int): Maximum number of retry attempts
35
+
36
+ Returns:
37
+ Optional[bytes]: Generated image bytes or None
38
+ """
39
+ # Validate inputs
40
+ if not prompt or not prompt.strip():
41
+ raise ValueError("Prompt cannot be empty")
42
+
43
+ # Load token
44
+ HF_TOKEN = load_environment()
45
+ if not HF_TOKEN:
46
+ raise ValueError("Hugging Face token not found. Set HF_TOKEN in .env or environment variables.")
47
+
48
+ # Prepare headers
49
+ headers = {
50
+ "Authorization": f"Bearer {HF_TOKEN}",
51
+ "Content-Type": "application/json"
52
+ }
53
+
54
+ # Payload with additional configuration
55
+ payload = {
56
+ "inputs": prompt,
57
+ "parameters": {
58
+ "negative_prompt": "low quality, bad anatomy, blurry",
59
+ "num_inference_steps": 50,
60
+ }
61
+ }
62
+
63
+ # Retry mechanism
64
+ for attempt in range(max_retries):
65
+ try:
66
+ response = requests.post(
67
+ model_url,
68
+ headers=headers,
69
+ json=payload,
70
+ timeout=120 # 2-minute timeout
71
+ )
72
+
73
+ response.raise_for_status() # Raise exception for bad status codes
74
+
75
+ return response.content
76
+
77
+ except requests.exceptions.RequestException as e:
78
+ print(f"Request error (Attempt {attempt + 1}/{max_retries}): {e}")
79
+
80
+ if attempt == max_retries - 1:
81
+ raise RuntimeError(f"Failed to generate image after {max_retries} attempts: {e}")
82
+
83
+ raise RuntimeError("Unexpected error in image generation")
84
+
85
+ def generate_image(prompt: str) -> Tuple[Optional[Image.Image], str]:
86
  """
87
  Generate an image from a text prompt.
88
 
 
90
  prompt (str): Text description for image generation
91
 
92
  Returns:
93
+ Tuple[Optional[Image.Image], str]: Generated PIL Image and status message
94
  """
95
  try:
96
+ # Validate prompt
97
+ if not prompt or not prompt.strip():
98
+ return None, "Error: Prompt cannot be empty"
99
+
100
  # Generate image bytes
101
  image_bytes = query_hf_api(prompt)
102
 
103
  # Convert to PIL Image
104
  image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
105
 
106
+ return image, "Image generated successfully!"
107
 
108
  except Exception as e:
109
  print(f"Image generation error: {e}")
110
+ return None, f"Error: {str(e)}"
111
 
112
  def create_gradio_interface():
113
  """
 
134
  lines=3
135
  )
136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  # Generate Button
138
  generate_button = gr.Button("✨ Generate Image", variant="primary")
139
 
 
145
  interactive=False
146
  )
147
 
148
+ # Status Output
149
+ status_output = gr.Textbox(label="Status")
150
 
151
  # Event Handlers
152
  generate_button.click(
153
  fn=generate_image,
154
  inputs=[text_input],
155
+ outputs=[output_image, status_output],
156
  api_name="generate"
157
  )
158