Zhofang commited on
Commit
af19c28
·
verified ·
1 Parent(s): c0308f7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -178
app.py CHANGED
@@ -8,131 +8,77 @@ from deep_translator import GoogleTranslator
8
 
9
  # os.makedirs('assets', exist_ok=True)
10
  if not os.path.exists('icon.jpg'):
11
- print("Downloading icon.jpg...")
12
- try:
13
- # Use a more robust way to download, requests is already imported
14
- response = requests.get("https://i.pinimg.com/564x/64/49/88/644988c59447eb00286834c2e70fdd6b.jpg", stream=True)
15
- response.raise_for_status() # Raise an exception for HTTP errors
16
- with open('icon.jpg', 'wb') as f:
17
- for chunk in response.iter_content(chunk_size=8192):
18
- f.write(chunk)
19
- print("Icon downloaded successfully.")
20
- except requests.exceptions.RequestException as e:
21
- print(f"Error downloading icon.jpg: {e}. Please ensure you have internet access or place icon.jpg manually.")
22
- # As a fallback, you might want to skip using the icon or use a placeholder
23
- # For now, the app will proceed and might show a broken image if icon.jpg is missing.
24
-
25
  API_URL_DEV = "https://lol-v2.mxflower.eu.org/api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
26
  API_URL = "https://lol-v2.mxflower.eu.org/api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
27
  timeout = 100
28
 
29
- def query(prompt, negative_prompt_text, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, huggingface_api_key_ui=None, use_dev=False):
30
  # Determine which API URL to use
31
  api_url = API_URL_DEV if use_dev else API_URL
32
 
33
- # Determine the API Token to use
34
- # Priority: 1. UI input, 2. Environment variable HF_READ_TOKEN
35
- auth_token = None
36
- if huggingface_api_key_ui and huggingface_api_key_ui.strip(): # Check if UI key is provided and not just whitespace
37
- auth_token = huggingface_api_key_ui.strip()
38
- print("Using API key provided in the UI.")
39
- else:
40
- auth_token = os.getenv("HF_READ_TOKEN")
41
- if auth_token:
42
- print("Using API key from HF_READ_TOKEN environment variable.")
43
- else:
44
- # If neither is available, raise an error.
45
- raise gr.Error("Hugging Face API Key is required. Please provide it in the 'Hugging Face API Key' field or set the HF_READ_TOKEN environment variable.")
46
 
47
- headers = {"Authorization": f"Bearer {auth_token}"}
 
 
 
 
 
 
 
 
48
 
49
- if not prompt or not prompt.strip(): # Check if prompt is None, empty, or just whitespace
50
- # Optionally, return a placeholder or a message instead of None
51
- # For now, returning None as per original logic for empty prompt
52
- gr.Warning("Prompt cannot be empty.")
53
- return None, seed # Return seed as well to match output structure
54
 
55
  key = random.randint(0, 999)
56
 
57
- # Translate prompt if it seems to be in Russian (simple check, can be improved)
58
- # For simplicity, let's assume Russian if it contains Cyrillic characters
59
- try:
60
- # A more robust check might be needed, but this is a common heuristic
61
- if any('\u0400' <= char <= '\u04FF' for char in prompt):
62
- translated_prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
63
- print(f'\033[1mGeneration {key} RU->EN translation:\033[0m {translated_prompt}')
64
- prompt = translated_prompt
65
- else:
66
- print(f'\033[1mGeneration {key} using EN prompt (no translation needed).\033[0m')
67
- except Exception as e:
68
- print(f"Error during translation: {e}. Using original prompt.")
69
- # Fallback to original prompt if translation fails
70
 
71
- # Augment the prompt
72
- augmented_prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
73
- print(f'\033[1mGeneration {key} final prompt:\033[0m {augmented_prompt}')
74
 
75
  # If seed is -1, generate a random seed and use it
76
- current_seed = seed
77
- if current_seed == -1:
78
- current_seed = random.randint(1, 1000000000)
79
-
80
- # Note: The 'sampler' variable is passed to this function but not used in the payload.
81
- # The custom API might handle sampler selection server-side or not support it.
82
- # The 'is_negative' key in payload might be expecting the negative_prompt_text.
83
- # Assuming the custom API expects negative prompt text under the 'is_negative' key.
84
- # If it expects a boolean, this part needs adjustment.
85
  payload = {
86
- "inputs": augmented_prompt,
87
- "is_negative": negative_prompt_text, # This sends the text of negative_prompt
88
  "steps": steps,
89
  "cfg_scale": cfg_scale,
90
- "seed": current_seed,
91
  "strength": strength
92
  }
93
 
94
- print(f"Sending payload to {api_url}: {payload}") # For debugging
95
-
96
- try:
97
- response = requests.post(api_url, headers=headers, json=payload, timeout=timeout)
98
- response.raise_for_status() # This will raise an HTTPError for bad responses (4xx or 5xx)
99
- except requests.exceptions.Timeout:
100
- raise gr.Error(f"Request timed out after {timeout} seconds. The model might be too busy or the server is slow.")
101
- except requests.exceptions.HTTPError as e:
102
- print(f"Error: Failed to get image. Response status: {e.response.status_code}")
103
- print(f"Response content: {e.response.text}")
104
- if e.response.status_code == 503:
105
- raise gr.Error(f"{e.response.status_code}: Service Unavailable. The model might be loading or overloaded. Please try again later.")
106
- elif e.response.status_code == 401:
107
- raise gr.Error(f"{e.response.status_code}: Unauthorized. Please check your API Key.")
108
- elif e.response.status_code == 400:
109
- raise gr.Error(f"{e.response.status_code}: Bad Request. Please check your prompt and parameters. Details: {e.response.text[:200]}") # Show first 200 chars of error
110
- else:
111
- raise gr.Error(f"API Error: {e.response.status_code}. Details: {e.response.text[:200]}")
112
- except requests.exceptions.RequestException as e: # Catch other network errors
113
- raise gr.Error(f"A network error occurred: {e}")
114
-
115
-
116
  try:
117
  image_bytes = response.content
118
  image = Image.open(io.BytesIO(image_bytes))
119
- print(f'\033[1mGeneration {key} completed!\033[0m ({augmented_prompt})')
120
 
121
  # Save the image to a file and return the file path and seed
122
- os.makedirs("outputs", exist_ok=True) # Ensure output directory exists
123
- output_path = f"./outputs/output_{key}_{current_seed}.png" # Include seed in filename for uniqueness
124
  image.save(output_path)
125
 
126
- return output_path, current_seed
127
  except Exception as e:
128
- print(f"Error processing image response: {e}")
129
- print(f"Response content that caused error: {response.content[:500]}") # Log first 500 bytes
130
- raise gr.Error(f"Failed to process the image from API. The API might have returned an unexpected response. Details: {str(e)}")
131
-
132
 
133
  css = """
134
  #app-container {
135
- max-width: 700px; /* Slightly wider for better layout */
136
  margin-left: auto;
137
  margin-right: auto;
138
  }
@@ -140,110 +86,51 @@ css = """
140
  display: flex;
141
  align-items: center;
142
  justify-content: center;
143
- margin-bottom: 10px; /* Add some space below title */
144
  }
145
  #title-icon {
146
- width: 32px;
147
  height: auto;
148
- margin-right: 10px;
149
  }
150
  #title-text {
151
- font-size: 24px;
152
  font-weight: bold;
153
  }
154
- .gr-box { /* Ensure accordion and other boxes have some padding */
155
- padding: 10px;
156
- }
157
  """
158
 
159
  with gr.Blocks(theme='Nymbo/Nymbo_Theme', css=css) as app:
160
  gr.HTML("""
161
- <div style="text-align: center; margin-bottom: 20px;">
162
  <div id="title-container">
163
- <img id="title-icon" src="file/icon.jpg" alt="Icon"> <!-- Use file/ prefix for local files in Gradio -->
164
  <h1 id="title-text">FLUX Capacitor</h1>
165
  </div>
166
- <p>Generate images using FLUX models. Provide your API key or ensure HF_READ_TOKEN is set.</p>
167
- </div>
168
  """)
169
 
170
  with gr.Column(elem_id="app-container"):
171
  with gr.Row():
172
- with gr.Column(scale=3): # Give more space to prompt
173
- text_prompt = gr.Textbox(
174
- label="Prompt",
175
- placeholder="Enter your prompt here (Russian will be auto-translated)",
176
- lines=3, # Increased lines for prompt
177
- elem_id="prompt-text-input"
178
- )
179
- negative_prompt = gr.Textbox(
180
- label="Negative Prompt",
181
- placeholder="What should not be in the image",
182
- value="(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos",
183
- lines=2, # Increased lines for negative prompt
184
- elem_id="negative-prompt-text-input"
185
- )
186
- with gr.Column(scale=2): # Settings column
187
- huggingface_api_key = gr.Textbox(
188
- label="Hugging Face API Key (optional)",
189
- placeholder="Uses HF_READ_TOKEN env var if empty",
190
- type="password",
191
- elem_id="api-key"
192
- )
193
- use_dev = gr.Checkbox(label="Use Dev API (FLUX.1-dev)", value=False, elem_id="use-dev-checkbox")
194
-
195
-
196
- with gr.Accordion("Advanced Generation Settings", open=False):
197
- with gr.Row():
198
- steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=100, step=1)
199
- cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=0.5) # Allow 0.5 steps
200
- with gr.Row():
201
- # Sampler is not currently used in the payload. If your API supports it, add it to the payload.
202
- sampler_method = gr.Radio(
203
- label="Sampling method (Note: Not sent to API)",
204
- value="DPM++ 2M Karras",
205
- choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"],
206
- # info="This setting is currently for UI only and not passed to the backend API."
207
- )
208
- strength = gr.Slider(label="Strength (e.g., for img2img)", value=0.7, minimum=0, maximum=1, step=0.01) # Finer steps
209
- seed = gr.Slider(label="Seed (-1 for random)", value=-1, minimum=-1, maximum=2147483647, step=1) # Max 32-bit signed int
210
 
211
  with gr.Row():
212
- text_button = gr.Button("Generate Image", variant='primary', elem_id="gen-button", scale=1)
213
-
214
- gr.Markdown("### Output")
215
  with gr.Row():
216
- image_output = gr.Image(type="filepath", label="Generated Image", elem_id="gallery") # Use filepath for saved images
217
- seed_output = gr.Textbox(label="Seed Used", interactive=False, elem_id="seed-output") # interactive=False as it's an output
218
 
219
- # Ensure the order of inputs matches the function signature of query
220
- text_button.click(
221
- query,
222
- inputs=[
223
- text_prompt,
224
- negative_prompt, # This is passed as `negative_prompt_text`
225
- steps,
226
- cfg,
227
- sampler_method, # Passed as `sampler`
228
- seed,
229
- strength,
230
- huggingface_api_key, # Passed as `huggingface_api_key_ui`
231
- use_dev
232
- ],
233
- outputs=[image_output, seed_output]
234
- )
235
-
236
- # To run this:
237
- # 1. Make sure 'gradio', 'requests', 'Pillow', 'deep_translator' are installed:
238
- # pip install gradio requests Pillow deep_translator
239
- # 2. Optionally, set the HF_READ_TOKEN environment variable:
240
- # export HF_READ_TOKEN="your_hf_api_token_here" (Linux/macOS)
241
- # set HF_READ_TOKEN="your_hf_api_token_here" (Windows CMD)
242
- # $env:HF_READ_TOKEN="your_hf_api_token_here" (Windows PowerShell)
243
- # 3. Run the script: python your_script_name.py
244
-
245
- if __name__ == "__main__":
246
- # For local Gradio image serving, Gradio needs to know where the 'icon.jpg' is.
247
- # If it's in the same directory, 'file/icon.jpg' should work.
248
- # If you have an 'assets' folder, it would be 'file/assets/icon.jpg'.
249
- app.launch(show_api=True, share=False)
 
8
 
9
  # os.makedirs('assets', exist_ok=True)
10
  if not os.path.exists('icon.jpg'):
11
+ os.system("wget -O icon.jpg https://i.pinimg.com/564x/64/49/88/644988c59447eb00286834c2e70fdd6b.jpg")
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  API_URL_DEV = "https://lol-v2.mxflower.eu.org/api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
13
  API_URL = "https://lol-v2.mxflower.eu.org/api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
14
  timeout = 100
15
 
16
+ def query(prompt, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, huggingface_api_key=None, use_dev=False):
17
  # Determine which API URL to use
18
  api_url = API_URL_DEV if use_dev else API_URL
19
 
20
+ # Check if the request is an API call by checking for the presence of the huggingface_api_key
21
+ is_api_call = huggingface_api_key is not None
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ if is_api_call:
24
+ # Use the environment variable for the API key in GUI mode
25
+ API_TOKEN = os.getenv("HF_READ_TOKEN")
26
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
27
+ else:
28
+ # Validate the API key if it's an API call
29
+ if huggingface_api_key == "":
30
+ raise gr.Error("API key is required for API calls.")
31
+ headers = {"Authorization": f"Bearer {huggingface_api_key}"}
32
 
33
+ if prompt == "" or prompt is None:
34
+ return None
 
 
 
35
 
36
  key = random.randint(0, 999)
37
 
38
+ prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
39
+ print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')
 
 
 
 
 
 
 
 
 
 
 
40
 
41
+ prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
42
+ print(f'\033[1mGeneration {key}:\033[0m {prompt}')
 
43
 
44
  # If seed is -1, generate a random seed and use it
45
+ if seed == -1:
46
+ seed = random.randint(1, 1000000000)
47
+
 
 
 
 
 
 
48
  payload = {
49
+ "inputs": prompt,
50
+ "is_negative": is_negative,
51
  "steps": steps,
52
  "cfg_scale": cfg_scale,
53
+ "seed": seed,
54
  "strength": strength
55
  }
56
 
57
+ response = requests.post(api_url, headers=headers, json=payload, timeout=timeout)
58
+ if response.status_code != 200:
59
+ print(f"Error: Failed to get image. Response status: {response.status_code}")
60
+ print(f"Response content: {response.text}")
61
+ if response.status_code == 503:
62
+ raise gr.Error(f"{response.status_code} : The model is being loaded")
63
+ raise gr.Error(f"{response.status_code}")
64
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  try:
66
  image_bytes = response.content
67
  image = Image.open(io.BytesIO(image_bytes))
68
+ print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})')
69
 
70
  # Save the image to a file and return the file path and seed
71
+ output_path = f"./output_{key}.png"
 
72
  image.save(output_path)
73
 
74
+ return output_path, seed
75
  except Exception as e:
76
+ print(f"Error when trying to open the image: {e}")
77
+ return None, None
 
 
78
 
79
  css = """
80
  #app-container {
81
+ max-width: 600px;
82
  margin-left: auto;
83
  margin-right: auto;
84
  }
 
86
  display: flex;
87
  align-items: center;
88
  justify-content: center;
 
89
  }
90
  #title-icon {
91
+ width: 32px; /* Adjust the width of the icon as needed */
92
  height: auto;
93
+ margin-right: 10px; /* Space between icon and title */
94
  }
95
  #title-text {
96
+ font-size: 24px; /* Adjust font size as needed */
97
  font-weight: bold;
98
  }
 
 
 
99
  """
100
 
101
  with gr.Blocks(theme='Nymbo/Nymbo_Theme', css=css) as app:
102
  gr.HTML("""
103
+ <center>
104
  <div id="title-container">
105
+ <img id="title-icon" src="icon.jpg" alt="Icon">
106
  <h1 id="title-text">FLUX Capacitor</h1>
107
  </div>
108
+ </center>
 
109
  """)
110
 
111
  with gr.Column(elem_id="app-container"):
112
  with gr.Row():
113
+ with gr.Column(elem_id="prompt-container"):
114
+ with gr.Row():
115
+ text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=2, elem_id="prompt-text-input")
116
+ with gr.Row():
117
+ with gr.Accordion("Advanced Settings", open=False):
118
+ negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value="(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos", lines=3, elem_id="negative-prompt-text-input")
119
+ steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=100, step=1)
120
+ cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1)
121
+ method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
122
+ strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001)
123
+ seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
124
+ huggingface_api_key = gr.Textbox(label="Hugging Face API Key (required for API calls)", placeholder="Enter your Hugging Face API Key here", type="password", elem_id="api-key")
125
+ use_dev = gr.Checkbox(label="Use Dev API", value=False, elem_id="use-dev-checkbox")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
 
127
  with gr.Row():
128
+ text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
 
 
129
  with gr.Row():
130
+ image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
131
+ seed_output = gr.Textbox(label="Seed Used", elem_id="seed-output")
132
 
133
+ # Adjust the click function to include the API key and use_dev as inputs
134
+ text_button.click(query, inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, strength, huggingface_api_key, use_dev], outputs=[image_output, seed_output])
135
+
136
+ app.launch(show_api=True, share=False)