Raxephion commited on
Commit
dae4606
·
verified ·
1 Parent(s): af49f2c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +149 -149
app.py CHANGED
@@ -1,12 +1,12 @@
1
  # -*- coding: utf-8 -*-
2
  """
3
- Hugging Face Spaces Script for Basic Stable Diffusion 1.5 Gradio App
4
- Adapted from user's local script and HF Spaces template.
5
- Supports Hub models and CPU/GPU selection based on available hardware.
6
  """
7
 
8
  import gradio as gr
9
- import numpy as np
10
  import random
11
  import torch
12
  from diffusers import StableDiffusionPipeline
@@ -45,6 +45,12 @@ DEFAULT_HUB_MODELS = [
45
  # Add other diffusers-compatible SD1.5 models here
46
  ]
47
 
 
 
 
 
 
 
48
  # --- Determine available devices and set up options ---
49
  # This logic is from the user's script and works well for Spaces
50
  AVAILABLE_DEVICES = ["CPU"]
@@ -92,6 +98,7 @@ INITIAL_MODEL_ID = DEFAULT_HUB_MODELS[0] if DEFAULT_HUB_MODELS else None
92
  if INITIAL_MODEL_ID:
93
  print(f"\nLoading initial model '{INITIAL_MODEL_ID}' on startup...")
94
  try:
 
95
  current_pipeline = StableDiffusionPipeline.from_pretrained(
96
  INITIAL_MODEL_ID,
97
  torch_dtype=initial_dtype_to_use,
@@ -146,131 +153,106 @@ def infer(
146
  progress=gr.Progress(track_tqdm=True), # Added progress argument from template
147
  ):
148
  """Generates an image using the selected model and parameters on the chosen device."""
149
- global current_pipeline, current_model_id, current_device_loaded, SCHEDULER_MAP
150
-
151
- # Check if initial load failed
152
- if current_pipeline is None and model_identifier != INITIAL_MODEL_ID:
153
- # Try loading the selected model if initial load failed or model is different
154
- pass # Logic below handles loading
155
-
156
- if not model_identifier or model_identifier == "No models found":
157
- raise gr.Error(f"No model selected or available. Please select a model from the list.")
158
- if not prompt:
159
- raise gr.Error("Please enter a prompt.")
160
-
161
- # Map selected device string to PyTorch device string
162
- device_to_use = "cuda" if selected_device_str == "GPU" and "GPU" in AVAILABLE_DEVICES else "cpu"
163
- # If GPU was selected but not available, raise an error specific to this condition
164
- if selected_device_str == "GPU" and device_to_use == "cpu":
165
- raise gr.Error("GPU selected but CUDA is not available to PyTorch on this Space. Please select CPU or ensure the Space is configured with a GPU and the CUDA version of PyTorch is installed.")
166
-
167
- # Determine dtype based on the actual device being used
168
- dtype_to_use = torch.float32 # Default
169
- if device_to_use == "cuda":
170
- if torch.cuda.is_available() and torch.cuda.get_device_capability(0)[0] >= 7:
171
- dtype_to_use = torch.float16
172
- else:
173
- dtype_to_use = torch.float32
174
- else:
175
- dtype_to_use = torch.float32
176
-
177
-
178
- print(f"Attempting generation on device: {device_to_use}, using dtype: {dtype_to_use}")
179
 
180
- # 1. Load/Switch Model if necessary
181
- # Check if the requested model identifier OR the requested device has changed
182
- # Use string comparison for current_device_loaded as it's a torch.device object
183
  if current_pipeline is None or current_model_id != model_identifier or (current_device_loaded is not None and str(current_device_loaded) != device_to_use):
184
- print(f"Loading model: {model_identifier} onto {device_to_use}...")
185
- # Clear previous pipeline to potentially free memory *before* loading the new one
186
- if current_pipeline is not None:
187
- print(f"Unloading previous model '{current_model_id}' from {current_device_loaded}...")
188
- # Move pipeline to CPU before deleting if it was on GPU, might help with freeing VRAM
189
- if str(current_device_loaded) == "cuda":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
  try:
191
- current_pipeline.to("cpu")
192
- print("Moved previous pipeline to CPU.")
193
- except Exception as move_e:
194
- print(f"Warning: Failed to move previous pipeline to CPU: {move_e}")
195
-
196
- del current_pipeline
197
- current_pipeline = None # Set to None immediately
198
- # Attempt to clear CUDA cache if using GPU (from the previous device)
199
- if str(current_device_loaded) == "cuda":
200
- try:
201
- torch.cuda.empty_cache()
202
- print("Cleared CUDA cache.")
203
- except Exception as cache_e:
204
- print(f"Warning: Error clearing CUDA cache: {cache_e}")
205
-
206
- # Ensure the device is actually available if not CPU (redundant with earlier check but safe)
207
- if device_to_use == "cuda":
208
- if not torch.cuda.is_available():
209
- raise gr.Error("CUDA selected but not available. Please select CPU.")
210
-
211
- try:
212
- # For Spaces, assume model_identifier is a Hub ID or a path *within the repo*
213
- # from_pretrained can handle both.
214
- print(f"Attempting to load model from: {model_identifier}")
215
- pipeline = StableDiffusionPipeline.from_pretrained(
216
- model_identifier,
217
- torch_dtype=dtype_to_use,
218
- safety_checker=None, # <<< SAFETY CHECKER DISABLED <<<
219
- # Add `vae=AutoencoderKL.from_pretrained(...)` if needed for specific models
220
- )
221
-
222
- pipeline = pipeline.to(device_to_use) # Move to the selected device
223
-
224
- current_pipeline = pipeline
225
- current_model_id = model_identifier
226
- current_device_loaded = torch.device(device_to_use)
227
-
228
- # Basic check for SD1.x architecture (cross_attention_dim = 768)
229
- unet_config = getattr(pipeline, 'unet', None)
230
- if unet_config and hasattr(unet_config, 'config') and hasattr(unet_config.config, 'cross_attention_dim'):
231
- cross_attn_dim = unet_config.config.cross_attention_dim
232
- if cross_attn_dim != 768:
233
- warning_msg = (f"Warning: Loaded model '{model_identifier}' might not be a standard SD 1.x model "
234
- f"(expected UNet cross_attention_dim 768, found {cross_attn_dim}). "
235
- "Results may be unexpected or generation might fail.")
236
- print(warning_msg)
237
- gr.Warning(warning_msg)
238
- else:
239
- print("UNet cross_attention_dim is 768, consistent with SD 1.x.")
240
- else:
241
- print("Could not check UNet cross_attention_dim.")
242
-
243
-
244
- print(f"Model '{model_identifier}' loaded successfully on {current_device_loaded} with dtype {dtype_to_use}.")
 
 
 
 
 
245
 
246
- except Exception as e:
247
- # Reset global state on load failure
248
- current_pipeline = None
249
- current_model_id = None
250
- current_device_loaded = None
251
- print(f"Error loading model '{model_identifier}': {e}")
252
- error_message_lower = str(e).lower()
253
- # Provide more specific error messages based on common exceptions
254
- if "cannot find requested files" in error_message_lower or "404 client error" in error_message_lower or "no such file or directory" in error_message_lower:
255
- raise gr.Error(f"Model '{model_identifier}' not found on Hugging Face Hub or in repo files. Check ID/path or internet connection. Error: {e}")
256
- elif "checkpointsnotfounderror" in error_message_lower or "valueerror: could not find a valid model structure" in error_message_lower:
257
- raise gr.Error(f"No valid diffusers model at '{model_identifier}'. Ensure it's a diffusers format ID/path. Error: {e}")
258
- elif "out of memory" in error_message_lower:
259
- raise gr.Error(f"Out of Memory (OOM) loading model. This Space might not have enough RAM/VRAM for this model. Try a lighter model or select CPU (if available). Error: {e}")
260
- elif "cusolver64" in error_message_lower or "cuda driver version" in error_message_lower or "cuda error" in error_message_lower:
261
- raise gr.Error(f"CUDA/GPU Driver/Installation Error on Space: {e}. Check Space hardware or select CPU.")
262
- elif "safetensors_rust.safetensorserror" in error_message_lower or "oserror: cannot load" in error_message_lower or "filenotfounderror" in error_message_lower:
263
- raise gr.Error(f"Model file error for '{model_identifier}': {e}. Files might be corrupt or incomplete on the Hub/in repo.")
264
- elif "could not import" in error_message_lower or "module not found" in error_message_lower:
265
- raise gr.Error(f"Dependency error: {e}. Ensure required libraries are in requirements.txt.")
266
- else:
267
- raise gr.Error(f"Failed to load model '{model_identifier}': {e}")
268
 
269
  # Check if pipeline is successfully loaded before proceeding
270
  if current_pipeline is None:
271
  raise gr.Error("Model failed to load. Cannot generate image.")
272
 
273
-
274
  # 2. Configure Scheduler
275
  selected_scheduler_class = SCHEDULER_MAP.get(scheduler_name)
276
  if selected_scheduler_class is None:
@@ -322,26 +304,41 @@ def infer(
322
 
323
 
324
  # 4. Set Seed Generator
325
- generator = None
326
  # The generator device needs to match the pipeline device
327
  generator_device = current_pipeline.device # Must match the pipeline device
328
 
329
  if randomize_seed: # Use the randomize_seed checkbox
330
- seed = random.randint(0, MAX_SEED) # Re-randomize seed
331
  print(f"Randomizing seed to: {seed}")
332
  else:
333
  # Use provided seed if not randomizing (-1 will still use it)
 
 
 
 
 
334
  print(f"Using provided seed: {int(seed)}")
335
 
336
 
337
  try:
 
 
338
  # Explicitly move generator to the desired device
339
- generator = torch.Generator(device=generator_device).manual_seed(int(seed))
340
- print(f"Generator set with seed {int(seed)} on device: {generator_device}")
341
  except Exception as e:
342
- print(f"Warning: Error setting seed generator on device {generator_device}: {e}. Generation might still proceed with a default generator (potentially on CPU).")
343
- gr.Warning(f"Failed to set seed generator on device {generator_device}. Generation might use a random seed on a different device. Error: {e}")
 
344
  generator = None # Let pipeline handle random seed if generator creation fails or device mismatch
 
 
 
 
 
 
 
 
345
 
346
  # 5. Generate Image
347
  # Ensure required parameters are integers/floats
@@ -354,7 +351,7 @@ def infer(
354
  if width <= 0 or height <= 0:
355
  raise ValueError("Image width and height must be positive.")
356
 
357
- print(f"Generating: Prompt='{prompt[:80]}{'...' if len(prompt) > 80 else ''}', NegPrompt='{negative_prompt[:80]}{'...' if len(negative_prompt) > 80 else ''}', Steps={num_inference_steps_int}, CFG={guidance_scale_float}, Size={width}x{height}, Scheduler={scheduler_name}, Seed={int(seed)}, Device={device_to_use}, Dtype={dtype_to_use}")
358
  start_time = time.time()
359
 
360
  try:
@@ -366,7 +363,7 @@ def infer(
366
  guidance_scale=guidance_scale_float,
367
  width=width,
368
  height=height,
369
- generator=generator,
370
  # Pass progress object for tqdm tracking in Gradio
371
  callback_steps=max(1, num_inference_steps_int // 20), # Update progress bar periodically
372
  callback=lambda step, timestep, latents: progress((step / num_inference_steps_int, f"Step {step}/{num_inference_steps_int}")),
@@ -381,8 +378,11 @@ def infer(
381
  print(f"Generation finished in {end_time - start_time:.2f} seconds.")
382
  generated_image = output.images[0]
383
 
 
 
 
384
  # Return both the image and the seed (potentially randomized)
385
- return generated_image, seed
386
 
387
  except gr.Error as e:
388
  # Re-raise Gradio errors directly
@@ -423,8 +423,16 @@ else:
423
  initial_model_choices = model_choices
424
  # Set a reasonable default if available
425
  initial_default_model = INITIAL_MODEL_ID if INITIAL_MODEL_ID else "No models found"
426
- model_dropdown_interactive = True if INITIAL_MODEL_ID else False
427
 
 
 
 
 
 
 
 
 
428
 
429
  scheduler_choices = list(SCHEDULER_MAP.keys())
430
 
@@ -449,8 +457,10 @@ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo: # Added Soft theme from
449
  # Add a note about model loading time
450
  if INITIAL_MODEL_ID:
451
  gr.Markdown(f"*(Note: The initial model '{INITIAL_MODEL_ID}' is loading... First generation might take longer.)*")
 
 
452
  else:
453
- gr.Markdown(f"*(Note: No initial model configured or loaded. Select a model from the dropdown to start.)*")
454
 
455
 
456
  with gr.Row():
@@ -487,8 +497,9 @@ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo: # Added Soft theme from
487
  )
488
  # Combine seed input and randomize checkbox
489
  with gr.Row():
490
- seed_input = gr.Number(label="Seed", value=0, precision=0, interactive=True) # Use 0 as default, interactive initially
491
- randomize_seed_checkbox = gr.Checkbox(label="Randomize seed (-1 equivalent)", value=True)
 
492
 
493
 
494
  generate_button = gr.Button("✨ Generate Image ✨", variant="primary", scale=1) # Added emojis
@@ -529,25 +540,14 @@ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo: # Added Soft theme from
529
  )
530
 
531
  # Add examples from template
532
- # Ensure examples match the input types and order of the infer function
533
- # Examples inputs: [prompt, neg_prompt, seed, randomize_seed, width, height, cfg_scale, steps]
534
- # Note: Size and Scheduler are not easily handled in standard examples, they'll use defaults.
535
- # Let's adjust the example inputs to match the infer function's first few parameters
536
- example_prompts = [
537
- ["Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", None, 0, True, "512x512", "Euler", 7.5, 30],
538
- ["An astronaut riding a green horse", None, 0, True, "512x512", "Euler", 7.5, 30],
539
- ["A delicious ceviche cheesecake slice", None, 0, True, "512x512", "Euler", 7.5, 30],
540
- ]
541
- # Update example inputs to match the infer function parameters
542
- # [model_identifier, selected_device_str, prompt, negative_prompt, steps, cfg_scale, scheduler_name, size, seed, randomize_seed]
543
- # Need to add dummy values for model, device, steps, cfg, scheduler, size for examples
544
- # Let's simplify examples to just prompt/neg_prompt for typical template usage
545
  template_examples = [
546
  "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
547
  "An astronaut riding a green horse",
548
  "A delicious ceviche cheesecake slice",
549
  ]
550
- # Map template examples to the input components: [prompt_input]
 
551
  gr.Examples(examples=template_examples, inputs=[prompt_input])
552
 
553
 
@@ -559,7 +559,7 @@ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo: # Added Soft theme from
559
  1. Select a model from the dropdown (Hugging Face Hub ID). Models are downloaded and cached on the Space.
560
  2. Choose your processing device (GPU recommended if available).
561
  3. Enter your positive and optional negative prompts.
562
- 4. Adjust advanced settings (Steps, CFG Scale, Scheduler, Size, Seed) if needed. Seed -1 or the "Randomize seed" checkbox will use a random seed.
563
  5. Click "Generate Image".
564
  The first generation with a new model/device might take some time to load.
565
  """ # Removed notes about local models and batch files
@@ -576,7 +576,7 @@ if __name__ == "__main__":
576
  print(f"Available devices detected by PyTorch: {', '.join(AVAILABLE_DEVICES)}")
577
  print(f"Default device selected by app: {DEFAULT_DEVICE}")
578
  if current_pipeline:
579
- print(f"Initial model '{current_model_id}' loaded successfully.")
580
  else:
581
  print("No initial model loaded. Check model list and network connectivity.")
582
 
 
1
  # -*- coding: utf-8 -*-
2
  """
3
+ CipherCore SD1.5 Image Generator, FAST CPU INFERENCE
4
+ Raxephion @2025
5
+
6
  """
7
 
8
  import gradio as gr
9
+ import numpy as np # <-- Needed for np.iinfo
10
  import random
11
  import torch
12
  from diffusers import StableDiffusionPipeline
 
45
  # Add other diffusers-compatible SD1.5 models here
46
  ]
47
 
48
+ # --- Constants for Gradio UI / Generation ---
49
+ MAX_SEED = np.iinfo(np.int32).max # <-- Added this line back! Defines the maximum seed value
50
+ # MAX_IMAGE_SIZE = 1024 # This was in the template but not strictly used in the logic outside the slider max, keeping it defined is harmless but maybe not needed
51
+ # However, let's add MAX_IMAGE_SIZE as it was related to the sliders in the template
52
+ MAX_IMAGE_SIZE_SLIDER = 1024 # Renamed to avoid confusion, used only for slider max
53
+
54
  # --- Determine available devices and set up options ---
55
  # This logic is from the user's script and works well for Spaces
56
  AVAILABLE_DEVICES = ["CPU"]
 
98
  if INITIAL_MODEL_ID:
99
  print(f"\nLoading initial model '{INITIAL_MODEL_ID}' on startup...")
100
  try:
101
+ # Load the pipeline onto the initial device and dtype
102
  current_pipeline = StableDiffusionPipeline.from_pretrained(
103
  INITIAL_MODEL_ID,
104
  torch_dtype=initial_dtype_to_use,
 
153
  progress=gr.Progress(track_tqdm=True), # Added progress argument from template
154
  ):
155
  """Generates an image using the selected model and parameters on the chosen device."""
156
+ global current_pipeline, current_model_id, current_device_loaded, SCHEDULER_MAP, MAX_SEED # <-- Make MAX_SEED global
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
 
158
+ # Check if initial load failed or model is different
 
 
159
  if current_pipeline is None or current_model_id != model_identifier or (current_device_loaded is not None and str(current_device_loaded) != device_to_use):
160
+ # This check is done before parameter parsing so we can determine device/dtype for loading
161
+ # Need to redo some parameter parsing here to get device_to_use early
162
+ temp_device_to_use = "cuda" if selected_device_str == "GPU" and "GPU" in AVAILABLE_DEVICES else "cpu"
163
+ temp_dtype_to_use = torch.float32
164
+ if temp_device_to_use == "cuda":
165
+ if torch.cuda.is_available() and torch.cuda.get_device_capability(0)[0] >= 7:
166
+ temp_dtype_to_use = torch.float16
167
+ else:
168
+ temp_dtype_to_use = torch.float32
169
+ else:
170
+ temp_dtype_to_use = torch.float32
171
+
172
+ # Now proceed with actual model loading based on parsed device/dtype
173
+ print(f"Loading model: {model_identifier} onto {temp_device_to_use} with dtype {temp_dtype_to_use}...")
174
+ # Clear previous pipeline to potentially free memory *before* loading the new one
175
+ if current_pipeline is not None:
176
+ print(f"Unloading previous model '{current_model_id}' from {current_device_loaded}...")
177
+ if str(current_device_loaded) == "cuda":
178
+ try:
179
+ current_pipeline.to("cpu")
180
+ print("Moved previous pipeline to CPU.")
181
+ except Exception as move_e:
182
+ print(f"Warning: Failed to move previous pipeline to CPU: {move_e}")
183
+ del current_pipeline
184
+ current_pipeline = None # Set to None immediately
185
+ if str(current_device_loaded) == "cuda":
186
  try:
187
+ torch.cuda.empty_cache()
188
+ print("Cleared CUDA cache.")
189
+ except Exception as cache_e:
190
+ print(f"Warning: Error clearing CUDA cache: {cache_e}")
191
+
192
+ # Ensure the device is actually available if not CPU (redundant with earlier check but safe)
193
+ if temp_device_to_use == "cuda":
194
+ if not torch.cuda.is_available():
195
+ raise gr.Error("CUDA selected but not available to PyTorch on this Space. Please select CPU or ensure the Space is configured with a GPU and the CUDA version of PyTorch is installed.")
196
+
197
+ try:
198
+ pipeline = StableDiffusionPipeline.from_pretrained(
199
+ model_identifier,
200
+ torch_dtype=temp_dtype_to_use, # Use the determined dtype for loading
201
+ safety_checker=None,
202
+ )
203
+ pipeline = pipeline.to(temp_device_to_use) # Use the determined device
204
+
205
+ current_pipeline = pipeline
206
+ current_model_id = model_identifier
207
+ current_device_loaded = torch.device(temp_device_to_use) # Store the actual device object
208
+
209
+ # Basic check for SD1.x architecture
210
+ unet_config = getattr(pipeline, 'unet', None)
211
+ if unet_config and hasattr(unet_config, 'config') and hasattr(unet_config.config, 'cross_attention_dim'):
212
+ cross_attn_dim = unet_config.config.cross_attention_dim
213
+ if cross_attn_dim != 768:
214
+ warning_msg = (f"Warning: Loaded model '{model_identifier}' might not be a standard SD 1.x model "
215
+ f"(expected UNet cross_attention_dim 768, found {cross_attn_dim}). "
216
+ "Results may be unexpected or generation might fail.")
217
+ print(warning_msg)
218
+ gr.Warning(warning_msg)
219
+ else:
220
+ print("UNet cross_attention_dim is 768, consistent with SD 1.x.")
221
+ else:
222
+ print("Could not check UNet cross_attention_dim.")
223
+
224
+ print(f"Model '{model_identifier}' loaded successfully on {current_device_loaded} with dtype {temp_dtype_to_use}.")
225
+
226
+ except Exception as e:
227
+ current_pipeline = None
228
+ current_model_id = None
229
+ current_device_loaded = None
230
+ print(f"Error loading model '{model_identifier}': {e}")
231
+ error_message_lower = str(e).lower()
232
+ if "cannot find requested files" in error_message_lower or "404 client error" in error_message_lower or "no such file or directory" in error_message_lower:
233
+ raise gr.Error(f"Model '{model_identifier}' not found on Hugging Face Hub or in repo files. Check ID/path or internet connection. Error: {e}")
234
+ elif "checkpointsnotfounderror" in error_message_lower or "valueerror: could not find a valid model structure" in error_message_lower:
235
+ raise gr.Error(f"No valid diffusers model at '{model_identifier}'. Ensure it's a diffusers format ID/path. Error: {e}")
236
+ elif "out of memory" in error_message_lower:
237
+ raise gr.Error(f"Out of Memory (OOM) loading model. This Space might not have enough RAM/VRAM for this model. Try a lighter model or select CPU (if available). Error: {e}")
238
+ elif "cusolver64" in error_message_lower or "cuda driver version" in error_message_lower or "cuda error" in error_message_lower:
239
+ raise gr.Error(f"CUDA/GPU Driver/Installation Error on Space: {e}. Check Space hardware or select CPU.")
240
+ elif "safetensors_rust.safetensorserror" in error_message_lower or "oserror: cannot load" in error_message_lower or "filenotfounderror" in error_message_lower:
241
+ raise gr.Error(f"Model file error for '{model_identifier}': {e}. Files might be corrupt or incomplete on the Hub/in repo.")
242
+ elif "could not import" in error_message_lower or "module not found" in error_message_lower:
243
+ raise gr.Error(f"Dependency error: {e}. Ensure required libraries are in requirements.txt.")
244
+ else:
245
+ raise gr.Error(f"Failed to load model '{model_identifier}': {e}")
246
 
247
+ # Re-determine device_to_use and dtype_to_use *after* ensuring pipeline is loaded
248
+ # They should match current_device_loaded and the pipeline's dtype
249
+ device_to_use = str(current_pipeline.device) if current_pipeline else ("cuda" if selected_device_str == "GPU" and "GPU" in AVAILABLE_DEVICES else "cpu")
250
+ dtype_to_use = current_pipeline.dtype if current_pipeline else torch.float32 # Fallback if somehow pipeline is still None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
 
252
  # Check if pipeline is successfully loaded before proceeding
253
  if current_pipeline is None:
254
  raise gr.Error("Model failed to load. Cannot generate image.")
255
 
 
256
  # 2. Configure Scheduler
257
  selected_scheduler_class = SCHEDULER_MAP.get(scheduler_name)
258
  if selected_scheduler_class is None:
 
304
 
305
 
306
  # 4. Set Seed Generator
 
307
  # The generator device needs to match the pipeline device
308
  generator_device = current_pipeline.device # Must match the pipeline device
309
 
310
  if randomize_seed: # Use the randomize_seed checkbox
311
+ seed = random.randint(0, MAX_SEED) # Re-randomize seed using MAX_SEED
312
  print(f"Randomizing seed to: {seed}")
313
  else:
314
  # Use provided seed if not randomizing (-1 will still use it)
315
+ # If seed is -1 but randomize_seed is False, we should still use a random seed,
316
+ # but make it deterministic based on system time or similar if possible, or just MAX_SEED based random.
317
+ # Given the UI has a 'randomize_seed' checkbox, let's treat seed=-1 as a specific "use system random" signal IF randomize_seed is also False.
318
+ # Or, more simply, if randomize_seed is False, always use the value in the seed input.
319
+ # Let's stick to the simpler interpretation: if randomize_seed is True, override seed input. If False, use seed input value.
320
  print(f"Using provided seed: {int(seed)}")
321
 
322
 
323
  try:
324
+ # Ensure seed is an integer for the generator
325
+ seed_int = int(seed)
326
  # Explicitly move generator to the desired device
327
+ generator = torch.Generator(device=generator_device).manual_seed(seed_int)
328
+ print(f"Generator set with seed {seed_int} on device: {generator_device}")
329
  except Exception as e:
330
+ # Handle potential issues like non-integer seed input
331
+ print(f"Warning: Error setting seed generator with seed {seed} on device {generator_device}: {e}. Falling back to default generator (potentially on CPU) and using a potentially different seed.")
332
+ gr.Warning(f"Failed to set seed generator with seed {seed}. Using random seed. Error: {e}")
333
  generator = None # Let pipeline handle random seed if generator creation fails or device mismatch
334
+ # If generator creation failed, the actual seed used might be different. How to report it?
335
+ # The simplest is to let the pipeline run and report the seed *we attempted to use*.
336
+ # Or, if we know pipeline will use a random seed on failure, report that.
337
+ # For simplicity, let's report the seed_int we derived UNLESS randomize_seed was True, then report the generated random seed.
338
+ # No, the most accurate is to let the pipeline generate, and some diffusers versions return the generator state/seed.
339
+ # However, the standard pipeline output doesn't return the seed.
340
+ # Let's just report the seed we *tried* to use. If randomize_seed was True, it's the random one. If False, it's the user input one.
341
+ pass # Keep the last calculated seed_int or seed value
342
 
343
  # 5. Generate Image
344
  # Ensure required parameters are integers/floats
 
351
  if width <= 0 or height <= 0:
352
  raise ValueError("Image width and height must be positive.")
353
 
354
+ print(f"Generating: Prompt='{prompt[:80]}{'...' if len(prompt) > 80 else ''}', NegPrompt='{negative_prompt[:80]}{'...' if len(negative_prompt) > 80 else ''}', Steps={num_inference_steps_int}, CFG={guidance_scale_float}, Size={width}x{height}, Scheduler={scheduler_name}, Seed={seed_int if generator else 'Random (Generator Failed)'}, Device={device_to_use}, Dtype={dtype_to_use}")
355
  start_time = time.time()
356
 
357
  try:
 
363
  guidance_scale=guidance_scale_float,
364
  width=width,
365
  height=height,
366
+ generator=generator, # Pass the generator (which might be None)
367
  # Pass progress object for tqdm tracking in Gradio
368
  callback_steps=max(1, num_inference_steps_int // 20), # Update progress bar periodically
369
  callback=lambda step, timestep, latents: progress((step / num_inference_steps_int, f"Step {step}/{num_inference_steps_int}")),
 
378
  print(f"Generation finished in {end_time - start_time:.2f} seconds.")
379
  generated_image = output.images[0]
380
 
381
+ # Determine the seed to return: the one we attempted to use
382
+ actual_seed_used = seed_int if generator else -1 # Return -1 or the input seed if generator failed
383
+
384
  # Return both the image and the seed (potentially randomized)
385
+ return generated_image, actual_seed_used
386
 
387
  except gr.Error as e:
388
  # Re-raise Gradio errors directly
 
423
  initial_model_choices = model_choices
424
  # Set a reasonable default if available
425
  initial_default_model = INITIAL_MODEL_ID if INITIAL_MODEL_ID else "No models found"
426
+ model_dropdown_interactive = True # Make it interactive if there's *any* model choice
427
 
428
+ # Ensure the initial default model is actually in the choices list if possible
429
+ if initial_default_model != "No models found" and initial_default_model not in initial_model_choices:
430
+ print(f"Warning: Initial default model '{initial_default_model}' is not in the model_choices list.")
431
+ if initial_model_choices and initial_model_choices[0] != "No models found":
432
+ initial_default_model = initial_model_choices[0]
433
+ print(f"Setting default model to first available choice: {initial_default_model}")
434
+ else:
435
+ initial_default_model = "No models found" # Fallback if no choices
436
 
437
  scheduler_choices = list(SCHEDULER_MAP.keys())
438
 
 
457
  # Add a note about model loading time
458
  if INITIAL_MODEL_ID:
459
  gr.Markdown(f"*(Note: The initial model '{INITIAL_MODEL_ID}' is loading... First generation might take longer.)*")
460
+ elif initial_default_model != "No models found":
461
+ gr.Markdown(f"*(Note: Loading model '{initial_default_model}' on first generation... This might take some time.)*")
462
  else:
463
+ gr.Markdown(f"*(Note: No models available. Add Hub IDs to DEFAULT_HUB_MODELS in the script.)*")
464
 
465
 
466
  with gr.Row():
 
497
  )
498
  # Combine seed input and randomize checkbox
499
  with gr.Row():
500
+ # Use MAX_SEED for slider max
501
+ seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, precision=0, interactive=True) # Use 0 as default, interactive initially
502
+ randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True) # Simplified label
503
 
504
 
505
  generate_button = gr.Button("✨ Generate Image ✨", variant="primary", scale=1) # Added emojis
 
540
  )
541
 
542
  # Add examples from template
543
+ # Ensure examples map to the correct input component indices
 
 
 
 
 
 
 
 
 
 
 
 
544
  template_examples = [
545
  "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
546
  "An astronaut riding a green horse",
547
  "A delicious ceviche cheesecake slice",
548
  ]
549
+ # Examples will only populate the first input they match.
550
+ # In this case, passing just a list of strings populates the first Textbox: prompt_input.
551
  gr.Examples(examples=template_examples, inputs=[prompt_input])
552
 
553
 
 
559
  1. Select a model from the dropdown (Hugging Face Hub ID). Models are downloaded and cached on the Space.
560
  2. Choose your processing device (GPU recommended if available).
561
  3. Enter your positive and optional negative prompts.
562
+ 4. Adjust advanced settings (Steps, CFG Scale, Scheduler, Size, Seed) if needed. The "Randomize seed" checkbox will override the seed value in the input box.
563
  5. Click "Generate Image".
564
  The first generation with a new model/device might take some time to load.
565
  """ # Removed notes about local models and batch files
 
576
  print(f"Available devices detected by PyTorch: {', '.join(AVAILABLE_DEVICES)}")
577
  print(f"Default device selected by app: {DEFAULT_DEVICE}")
578
  if current_pipeline:
579
+ print(f"Initial model '{current_model_id}' loaded successfully on {current_device_loaded}.")
580
  else:
581
  print("No initial model loaded. Check model list and network connectivity.")
582