Raxephion commited on
Commit
af49f2c
·
verified ·
1 Parent(s): 5d50e61

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +550 -113
app.py CHANGED
@@ -1,62 +1,434 @@
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
-
5
- # import spaces #[uncomment to use ZeroGPU]
6
- from diffusers import DiffusionPipeline
7
  import torch
 
 
 
 
 
 
 
 
8
 
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
 
 
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  if torch.cuda.is_available():
13
- torch_dtype = torch.float16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  else:
15
- torch_dtype = torch.float32
 
 
 
16
 
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
- pipe = pipe.to(device)
 
 
 
 
19
 
20
- MAX_SEED = np.iinfo(np.int32).max
21
- MAX_IMAGE_SIZE = 1024
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
 
24
- # @spaces.GPU #[uncomment to use ZeroGPU]
 
25
  def infer(
26
- prompt,
27
- negative_prompt,
28
- seed,
29
- randomize_seed,
30
- width,
31
- height,
32
- guidance_scale,
33
- num_inference_steps,
34
- progress=gr.Progress(track_tqdm=True),
 
 
35
  ):
36
- if randomize_seed:
37
- seed = random.randint(0, MAX_SEED)
38
 
39
- generator = torch.Generator().manual_seed(seed)
 
 
 
40
 
41
- image = pipe(
42
- prompt=prompt,
43
- negative_prompt=negative_prompt,
44
- guidance_scale=guidance_scale,
45
- num_inference_steps=num_inference_steps,
46
- width=width,
47
- height=height,
48
- generator=generator,
49
- ).images[0]
50
 
51
- return image, seed
 
 
 
 
52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
- examples = [
55
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
56
- "An astronaut riding a green horse",
57
- "A delicious ceviche cheesecake slice",
58
- ]
59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  css = """
61
  #col-container {
62
  margin: 0 auto;
@@ -64,91 +436,156 @@ css = """
64
  }
65
  """
66
 
67
- with gr.Blocks(css=css) as demo:
68
- with gr.Column(elem_id="col-container"):
69
- gr.Markdown(" # Text-to-Image Gradio Template")
70
-
71
- with gr.Row():
72
- prompt = gr.Text(
73
- label="Prompt",
74
- show_label=False,
75
- max_lines=1,
76
- placeholder="Enter your prompt",
77
- container=False,
78
- )
79
 
80
- run_button = gr.Button("Run", scale=0, variant="primary")
 
 
 
 
81
 
82
- result = gr.Image(label="Result", show_label=False)
83
 
84
- with gr.Accordion("Advanced Settings", open=False):
85
- negative_prompt = gr.Text(
86
- label="Negative prompt",
87
- max_lines=1,
88
- placeholder="Enter a negative prompt",
89
- visible=False,
 
90
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
- seed = gr.Slider(
93
- label="Seed",
94
- minimum=0,
95
- maximum=MAX_SEED,
96
- step=1,
97
- value=0,
 
 
 
 
 
 
98
  )
 
 
99
 
100
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
-
102
- with gr.Row():
103
- width = gr.Slider(
104
- label="Width",
105
- minimum=256,
106
- maximum=MAX_IMAGE_SIZE,
107
- step=32,
108
- value=1024, # Replace with defaults that work for your model
109
- )
110
-
111
- height = gr.Slider(
112
- label="Height",
113
- minimum=256,
114
- maximum=MAX_IMAGE_SIZE,
115
- step=32,
116
- value=1024, # Replace with defaults that work for your model
117
- )
118
-
119
- with gr.Row():
120
- guidance_scale = gr.Slider(
121
- label="Guidance scale",
122
- minimum=0.0,
123
- maximum=10.0,
124
- step=0.1,
125
- value=0.0, # Replace with defaults that work for your model
126
- )
127
-
128
- num_inference_steps = gr.Slider(
129
- label="Number of inference steps",
130
- minimum=1,
131
- maximum=50,
132
- step=1,
133
- value=2, # Replace with defaults that work for your model
134
- )
135
-
136
- gr.Examples(examples=examples, inputs=[prompt])
137
  gr.on(
138
- triggers=[run_button.click, prompt.submit],
139
  fn=infer,
140
  inputs=[
141
- prompt,
142
- negative_prompt,
143
- seed,
144
- randomize_seed,
145
- width,
146
- height,
147
- guidance_scale,
148
- num_inference_steps,
 
 
149
  ],
150
- outputs=[result, seed],
 
151
  )
152
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
  if __name__ == "__main__":
154
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Hugging Face Spaces Script for Basic Stable Diffusion 1.5 Gradio App
4
+ Adapted from user's local script and HF Spaces template.
5
+ Supports Hub models and CPU/GPU selection based on available hardware.
6
+ """
7
+
8
  import gradio as gr
9
  import numpy as np
10
  import random
 
 
 
11
  import torch
12
+ from diffusers import StableDiffusionPipeline
13
+ # Import commonly used schedulers
14
+ from diffusers import DDPMScheduler, EulerDiscreteScheduler, DPMSolverMultistepScheduler, LMSDiscreteScheduler
15
+ import os # Keep os for potential checks, though local paths are less standard on Spaces
16
+ # import spaces #[uncomment to use ZeroGPU if needed, typically not for standard GPU usage]
17
+ from PIL import Image
18
+ import time # Optional: for timing generation
19
+ # huggingface_hub is implicitly used by from_pretrained
20
 
21
+ # --- Configuration ---
22
+ # MODELS_DIR is less relevant for Spaces unless specifically placing models in repo
23
+ # For Spaces, models are primarily loaded via their Hugging Face Hub IDs
24
+ SUPPORTED_SD15_SIZES = ["512x512", "768x512", "512x768", "768x768", "1024x768", "768x1024", "1024x1024", "hire.fix"]
25
 
26
+ # Mapping of friendly scheduler names to their diffusers classes
27
+ SCHEDULER_MAP = {
28
+ "Euler": EulerDiscreteScheduler,
29
+ "DPM++ 2M": DPMSolverMultistepScheduler,
30
+ "DDPM": DDPMScheduler,
31
+ "LMS": LMSDiscreteScheduler,
32
+ # Add more as needed from diffusers.schedulers (make sure they are imported)
33
+ }
34
+ DEFAULT_SCHEDULER = "Euler" # Default scheduler on startup
35
+
36
+ # List of popular Stable Diffusion 1.5 models on the Hugging Face Hub
37
+ # For Spaces, this is the primary source of models.
38
+ DEFAULT_HUB_MODELS = [
39
+ "runwayml/stable-diffusion-v1-5",
40
+ "SG161222/Realistic_Vision_V6.0_B1_noVAE", # Example popular 1.5 model
41
+ "nitrosocke/Ghibli-Diffusion",
42
+ "danyloylo/sd1.5-ghibli-style-05",
43
+ "Bilal326/SD_1.5_DragonWarriorV2"
44
+ # "CompVis/stable-diffusion-v1-4", # Example SD 1.4 model (might behave slightly differently)
45
+ # Add other diffusers-compatible SD1.5 models here
46
+ ]
47
+
48
+ # --- Determine available devices and set up options ---
49
+ # This logic is from the user's script and works well for Spaces
50
+ AVAILABLE_DEVICES = ["CPU"]
51
  if torch.cuda.is_available():
52
+ AVAILABLE_DEVICES.append("GPU")
53
+ print(f"CUDA available. Found {torch.cuda.device_count()} GPU(s).")
54
+ if torch.cuda.device_count() > 0:
55
+ print(f"Using GPU 0: {torch.cuda.get_device_name(0)}")
56
+ else:
57
+ print("CUDA not available. Running on CPU.")
58
+
59
+ # Default device preference: GPU if available, else CPU
60
+ DEFAULT_DEVICE = "GPU" if "GPU" in AVAILABLE_DEVICES else "CPU"
61
+
62
+ # Set initial PyTorch device string based on detection
63
+ initial_device_to_use = "cuda" if DEFAULT_DEVICE == "GPU" else "cpu"
64
+ print(f"Initial pipeline will load on device: {initial_device_to_use}")
65
+
66
+ # Determine initial dtype
67
+ # Note: fp16 is generally faster and uses less VRAM on compatible GPUs
68
+ initial_dtype_to_use = torch.float32 # Default
69
+ if initial_device_to_use == "cuda":
70
+ # Check if the GPU supports fp16 (most modern ones do)
71
+ if torch.cuda.is_available() and torch.cuda.get_device_capability(0)[0] >= 7: # Check compute capability (7.0+ for good fp16)
72
+ initial_dtype_to_use = torch.float16
73
+ print("Detected GPU supports FP16, will attempt using torch.float16.")
74
+ else:
75
+ initial_dtype_to_use = torch.float32 # Fallback
76
+ print("Detected GPU might not fully support FP16 or capability check failed, using torch.float32.")
77
  else:
78
+ initial_dtype_to_use = torch.float32 # CPU requires float32
79
+
80
+ print(f"Initial dtype: {initial_dtype_to_use}")
81
+
82
 
83
+ # --- Global state for the loaded pipeline ---
84
+ # We'll load the *initial* pipeline once on startup and keep it in memory.
85
+ # Subsequent model changes will reload the pipeline within the infer function.
86
+ current_pipeline = None
87
+ current_model_id = None # Keep track of the currently loaded model identifier
88
+ current_device_loaded = None # Keep track of the device the pipeline is currently on
89
 
90
+ # Initial model to load on startup
91
+ INITIAL_MODEL_ID = DEFAULT_HUB_MODELS[0] if DEFAULT_HUB_MODELS else None
92
+ if INITIAL_MODEL_ID:
93
+ print(f"\nLoading initial model '{INITIAL_MODEL_ID}' on startup...")
94
+ try:
95
+ current_pipeline = StableDiffusionPipeline.from_pretrained(
96
+ INITIAL_MODEL_ID,
97
+ torch_dtype=initial_dtype_to_use,
98
+ safety_checker=None, # <<< SAFETY CHECKER DISABLED <<<
99
+ )
100
+ current_pipeline = current_pipeline.to(initial_device_to_use)
101
+ current_model_id = INITIAL_MODEL_ID
102
+ current_device_loaded = torch.device(initial_device_to_use)
103
+ print(f"Initial model loaded successfully on {current_device_loaded}.")
104
+
105
+ # Basic check for SD1.x architecture
106
+ unet_config = getattr(current_pipeline, 'unet', None)
107
+ if unet_config and hasattr(unet_config, 'config') and hasattr(unet_config.config, 'cross_attention_dim'):
108
+ cross_attn_dim = unet_config.config.cross_attention_dim
109
+ if cross_attn_dim != 768:
110
+ warning_msg = (f"Warning: Loaded model '{INITIAL_MODEL_ID}' might not be a standard SD 1.x model "
111
+ f"(expected UNet cross_attention_dim 768, found {cross_attn_dim}). "
112
+ "Results may be unexpected.")
113
+ print(warning_msg)
114
+ # gr.Warning(warning_msg) # Cannot raise Gradio error/warning during startup load
115
+ else:
116
+ print("UNet cross_attention_dim is 768, consistent with SD 1.x.")
117
+ else:
118
+ print("Could not check UNet cross_attention_dim for initial model.")
119
+
120
+
121
+ except Exception as e:
122
+ current_pipeline = None
123
+ current_model_id = None
124
+ current_device_loaded = None
125
+ print(f"Error loading initial model '{INITIAL_MODEL_ID}': {e}")
126
+ print("Application will start, but image generation may fail if the initial model cannot be loaded.")
127
+ # Cannot raise gr.Error here as Gradio not fully initialized
128
+ else:
129
+ print("\nNo default Hub models defined. Application will start without a loaded model.")
130
+ print("Please select a model from the dropdown to enable generation.")
131
 
132
 
133
+ # --- Image Generation Function (Adapted for Hugging Face Spaces 'infer' signature) ---
134
+ # @spaces.GPU #[uncomment if using ZeroGPU, otherwise standard torch device handles it]
135
  def infer(
136
+ model_identifier, # From model_dropdown
137
+ selected_device_str, # From device_dropdown
138
+ prompt, # From prompt_input
139
+ negative_prompt, # From negative_prompt_input
140
+ steps, # From steps_slider
141
+ cfg_scale, # From cfg_slider
142
+ scheduler_name, # From scheduler_dropdown
143
+ size, # From size_dropdown
144
+ seed, # From seed_input
145
+ randomize_seed, # From randomize_seed_checkbox
146
+ progress=gr.Progress(track_tqdm=True), # Added progress argument from template
147
  ):
148
+ """Generates an image using the selected model and parameters on the chosen device."""
149
+ global current_pipeline, current_model_id, current_device_loaded, SCHEDULER_MAP
150
 
151
+ # Check if initial load failed
152
+ if current_pipeline is None and model_identifier != INITIAL_MODEL_ID:
153
+ # Try loading the selected model if initial load failed or model is different
154
+ pass # Logic below handles loading
155
 
156
+ if not model_identifier or model_identifier == "No models found":
157
+ raise gr.Error(f"No model selected or available. Please select a model from the list.")
158
+ if not prompt:
159
+ raise gr.Error("Please enter a prompt.")
 
 
 
 
 
160
 
161
+ # Map selected device string to PyTorch device string
162
+ device_to_use = "cuda" if selected_device_str == "GPU" and "GPU" in AVAILABLE_DEVICES else "cpu"
163
+ # If GPU was selected but not available, raise an error specific to this condition
164
+ if selected_device_str == "GPU" and device_to_use == "cpu":
165
+ raise gr.Error("GPU selected but CUDA is not available to PyTorch on this Space. Please select CPU or ensure the Space is configured with a GPU and the CUDA version of PyTorch is installed.")
166
 
167
+ # Determine dtype based on the actual device being used
168
+ dtype_to_use = torch.float32 # Default
169
+ if device_to_use == "cuda":
170
+ if torch.cuda.is_available() and torch.cuda.get_device_capability(0)[0] >= 7:
171
+ dtype_to_use = torch.float16
172
+ else:
173
+ dtype_to_use = torch.float32
174
+ else:
175
+ dtype_to_use = torch.float32
176
+
177
+
178
+ print(f"Attempting generation on device: {device_to_use}, using dtype: {dtype_to_use}")
179
+
180
+ # 1. Load/Switch Model if necessary
181
+ # Check if the requested model identifier OR the requested device has changed
182
+ # Use string comparison for current_device_loaded as it's a torch.device object
183
+ if current_pipeline is None or current_model_id != model_identifier or (current_device_loaded is not None and str(current_device_loaded) != device_to_use):
184
+ print(f"Loading model: {model_identifier} onto {device_to_use}...")
185
+ # Clear previous pipeline to potentially free memory *before* loading the new one
186
+ if current_pipeline is not None:
187
+ print(f"Unloading previous model '{current_model_id}' from {current_device_loaded}...")
188
+ # Move pipeline to CPU before deleting if it was on GPU, might help with freeing VRAM
189
+ if str(current_device_loaded) == "cuda":
190
+ try:
191
+ current_pipeline.to("cpu")
192
+ print("Moved previous pipeline to CPU.")
193
+ except Exception as move_e:
194
+ print(f"Warning: Failed to move previous pipeline to CPU: {move_e}")
195
+
196
+ del current_pipeline
197
+ current_pipeline = None # Set to None immediately
198
+ # Attempt to clear CUDA cache if using GPU (from the previous device)
199
+ if str(current_device_loaded) == "cuda":
200
+ try:
201
+ torch.cuda.empty_cache()
202
+ print("Cleared CUDA cache.")
203
+ except Exception as cache_e:
204
+ print(f"Warning: Error clearing CUDA cache: {cache_e}")
205
+
206
+ # Ensure the device is actually available if not CPU (redundant with earlier check but safe)
207
+ if device_to_use == "cuda":
208
+ if not torch.cuda.is_available():
209
+ raise gr.Error("CUDA selected but not available. Please select CPU.")
210
+
211
+ try:
212
+ # For Spaces, assume model_identifier is a Hub ID or a path *within the repo*
213
+ # from_pretrained can handle both.
214
+ print(f"Attempting to load model from: {model_identifier}")
215
+ pipeline = StableDiffusionPipeline.from_pretrained(
216
+ model_identifier,
217
+ torch_dtype=dtype_to_use,
218
+ safety_checker=None, # <<< SAFETY CHECKER DISABLED <<<
219
+ # Add `vae=AutoencoderKL.from_pretrained(...)` if needed for specific models
220
+ )
221
+
222
+ pipeline = pipeline.to(device_to_use) # Move to the selected device
223
+
224
+ current_pipeline = pipeline
225
+ current_model_id = model_identifier
226
+ current_device_loaded = torch.device(device_to_use)
227
+
228
+ # Basic check for SD1.x architecture (cross_attention_dim = 768)
229
+ unet_config = getattr(pipeline, 'unet', None)
230
+ if unet_config and hasattr(unet_config, 'config') and hasattr(unet_config.config, 'cross_attention_dim'):
231
+ cross_attn_dim = unet_config.config.cross_attention_dim
232
+ if cross_attn_dim != 768:
233
+ warning_msg = (f"Warning: Loaded model '{model_identifier}' might not be a standard SD 1.x model "
234
+ f"(expected UNet cross_attention_dim 768, found {cross_attn_dim}). "
235
+ "Results may be unexpected or generation might fail.")
236
+ print(warning_msg)
237
+ gr.Warning(warning_msg)
238
+ else:
239
+ print("UNet cross_attention_dim is 768, consistent with SD 1.x.")
240
+ else:
241
+ print("Could not check UNet cross_attention_dim.")
242
+
243
+
244
+ print(f"Model '{model_identifier}' loaded successfully on {current_device_loaded} with dtype {dtype_to_use}.")
245
+
246
+ except Exception as e:
247
+ # Reset global state on load failure
248
+ current_pipeline = None
249
+ current_model_id = None
250
+ current_device_loaded = None
251
+ print(f"Error loading model '{model_identifier}': {e}")
252
+ error_message_lower = str(e).lower()
253
+ # Provide more specific error messages based on common exceptions
254
+ if "cannot find requested files" in error_message_lower or "404 client error" in error_message_lower or "no such file or directory" in error_message_lower:
255
+ raise gr.Error(f"Model '{model_identifier}' not found on Hugging Face Hub or in repo files. Check ID/path or internet connection. Error: {e}")
256
+ elif "checkpointsnotfounderror" in error_message_lower or "valueerror: could not find a valid model structure" in error_message_lower:
257
+ raise gr.Error(f"No valid diffusers model at '{model_identifier}'. Ensure it's a diffusers format ID/path. Error: {e}")
258
+ elif "out of memory" in error_message_lower:
259
+ raise gr.Error(f"Out of Memory (OOM) loading model. This Space might not have enough RAM/VRAM for this model. Try a lighter model or select CPU (if available). Error: {e}")
260
+ elif "cusolver64" in error_message_lower or "cuda driver version" in error_message_lower or "cuda error" in error_message_lower:
261
+ raise gr.Error(f"CUDA/GPU Driver/Installation Error on Space: {e}. Check Space hardware or select CPU.")
262
+ elif "safetensors_rust.safetensorserror" in error_message_lower or "oserror: cannot load" in error_message_lower or "filenotfounderror" in error_message_lower:
263
+ raise gr.Error(f"Model file error for '{model_identifier}': {e}. Files might be corrupt or incomplete on the Hub/in repo.")
264
+ elif "could not import" in error_message_lower or "module not found" in error_message_lower:
265
+ raise gr.Error(f"Dependency error: {e}. Ensure required libraries are in requirements.txt.")
266
+ else:
267
+ raise gr.Error(f"Failed to load model '{model_identifier}': {e}")
268
+
269
+ # Check if pipeline is successfully loaded before proceeding
270
+ if current_pipeline is None:
271
+ raise gr.Error("Model failed to load. Cannot generate image.")
272
+
273
+
274
+ # 2. Configure Scheduler
275
+ selected_scheduler_class = SCHEDULER_MAP.get(scheduler_name)
276
+ if selected_scheduler_class is None:
277
+ print(f"Warning: Unknown scheduler '{scheduler_name}'. Using default: {DEFAULT_SCHEDULER}.")
278
+ selected_scheduler_class = SCHEDULER_MAP[DEFAULT_SCHEDULER]
279
+ gr.Warning(f"Unknown scheduler '{scheduler_name}'. Using default: {DEFAULT_SCHEDULER}.")
280
+
281
+ # Recreate scheduler from config to ensure compatibility with the loaded pipeline
282
+ try:
283
+ scheduler_config = current_pipeline.scheduler.config
284
+ current_pipeline.scheduler = selected_scheduler_class.from_config(scheduler_config)
285
+ print(f"Scheduler set to: {scheduler_name}")
286
+ except Exception as e:
287
+ print(f"Error setting scheduler '{scheduler_name}': {e}")
288
+ # Attempt to fallback to a default if setting fails
289
+ try:
290
+ print(f"Attempting to fallback to default scheduler: {DEFAULT_SCHEDULER}")
291
+ current_pipeline.scheduler = SCHEDULER_MAP[DEFAULT_SCHEDULER].from_config(scheduler_config)
292
+ gr.Warning(f"Failed to set scheduler to '{scheduler_name}', fell back to {DEFAULT_SCHEDULER}. Error: {e}")
293
+ except Exception as fallback_e:
294
+ print(f"Fallback scheduler failed too: {fallback_e}")
295
+ raise gr.Error(f"Failed to configure scheduler '{scheduler_name}' and fallback failed. Error: {e}")
296
 
 
 
 
 
 
297
 
298
+ # 3. Parse Image Size
299
+ width, height = 512, 512 # Default size
300
+ if size.lower() == "hire.fix":
301
+ width, height = 1024, 1024
302
+ print(f"Interpreting 'hire.fix' size as {width}x{height}")
303
+ else:
304
+ try:
305
+ w_str, h_str = size.split('x')
306
+ width = int(w_str)
307
+ height = int(h_str)
308
+ except ValueError:
309
+ raise gr.Error(f"Invalid size format: '{size}'. Use 'WidthxHeight' (e.g., 512x512) or 'hire.fix'.")
310
+ except Exception as e:
311
+ raise gr.Error(f"Error parsing size '{size}': {e}")
312
+
313
+ # Size multiple check (SD 1.5 works best with multiples of 64 or 8)
314
+ multiple_check = 64 # Use 64 as a standard check
315
+ if width % multiple_check != 0 or height % multiple_check != 0:
316
+ warning_msg_size = (f"Warning: Image size {width}x{height} is not a multiple of {multiple_check}. "
317
+ f"Stable Diffusion 1.5 models are typically trained on sizes like 512x512. "
318
+ "Using non-standard sizes may cause tiling, distortions, or other artifacts.")
319
+ print(warning_msg_size)
320
+ gr.Warning(warning_msg_size)
321
+ # Optional: Round size to nearest multiple of 64? Not implemented here to preserve user choice.
322
+
323
+
324
+ # 4. Set Seed Generator
325
+ generator = None
326
+ # The generator device needs to match the pipeline device
327
+ generator_device = current_pipeline.device # Must match the pipeline device
328
+
329
+ if randomize_seed: # Use the randomize_seed checkbox
330
+ seed = random.randint(0, MAX_SEED) # Re-randomize seed
331
+ print(f"Randomizing seed to: {seed}")
332
+ else:
333
+ # Use provided seed if not randomizing (-1 will still use it)
334
+ print(f"Using provided seed: {int(seed)}")
335
+
336
+
337
+ try:
338
+ # Explicitly move generator to the desired device
339
+ generator = torch.Generator(device=generator_device).manual_seed(int(seed))
340
+ print(f"Generator set with seed {int(seed)} on device: {generator_device}")
341
+ except Exception as e:
342
+ print(f"Warning: Error setting seed generator on device {generator_device}: {e}. Generation might still proceed with a default generator (potentially on CPU).")
343
+ gr.Warning(f"Failed to set seed generator on device {generator_device}. Generation might use a random seed on a different device. Error: {e}")
344
+ generator = None # Let pipeline handle random seed if generator creation fails or device mismatch
345
+
346
+ # 5. Generate Image
347
+ # Ensure required parameters are integers/floats
348
+ num_inference_steps_int = int(steps)
349
+ guidance_scale_float = float(cfg_scale)
350
+
351
+ # Basic validation on parameters
352
+ if num_inference_steps_int <= 0 or guidance_scale_float <= 0:
353
+ raise ValueError("Steps and CFG Scale must be positive values.")
354
+ if width <= 0 or height <= 0:
355
+ raise ValueError("Image width and height must be positive.")
356
+
357
+ print(f"Generating: Prompt='{prompt[:80]}{'...' if len(prompt) > 80 else ''}', NegPrompt='{negative_prompt[:80]}{'...' if len(negative_prompt) > 80 else ''}', Steps={num_inference_steps_int}, CFG={guidance_scale_float}, Size={width}x{height}, Scheduler={scheduler_name}, Seed={int(seed)}, Device={device_to_use}, Dtype={dtype_to_use}")
358
+ start_time = time.time()
359
+
360
+ try:
361
+ # Use the progress parameter from the template
362
+ output = current_pipeline(
363
+ prompt=prompt,
364
+ negative_prompt=negative_prompt if negative_prompt else None,
365
+ num_inference_steps=num_inference_steps_int,
366
+ guidance_scale=guidance_scale_float,
367
+ width=width,
368
+ height=height,
369
+ generator=generator,
370
+ # Pass progress object for tqdm tracking in Gradio
371
+ callback_steps=max(1, num_inference_steps_int // 20), # Update progress bar periodically
372
+ callback=lambda step, timestep, latents: progress((step / num_inference_steps_int, f"Step {step}/{num_inference_steps_int}")),
373
+
374
+ # Add VAE usage here if needed for specific models that require it
375
+ # vae=...
376
+ # Potentially add attention slicing/xformers/etc. for memory efficiency
377
+ # enable_attention_slicing="auto", # Can help with VRAM on smaller GPUs
378
+ # enable_xformers_memory_efficient_attention() # Needs xformers installed & compatible GPU
379
+ )
380
+ end_time = time.time()
381
+ print(f"Generation finished in {end_time - start_time:.2f} seconds.")
382
+ generated_image = output.images[0]
383
+
384
+ # Return both the image and the seed (potentially randomized)
385
+ return generated_image, seed
386
+
387
+ except gr.Error as e:
388
+ # Re-raise Gradio errors directly
389
+ raise e
390
+ except ValueError as ve:
391
+ # Handle specific value errors like invalid parameters
392
+ print(f"Parameter Error: {ve}")
393
+ raise gr.Error(f"Invalid Parameter: {ve}")
394
+ except Exception as e:
395
+ # Catch any other unexpected errors during generation
396
+ print(f"An error occurred during image generation: {e}")
397
+ error_message_lower = str(e).lower()
398
+ if "size must be a multiple of" in error_message_lower or "invalid dimensions" in error_message_lower or "shape mismatch" in error_message_lower:
399
+ raise gr.Error(f"Image generation failed - Invalid size '{width}x{height}' for model: {e}. Try a multiple of 64 or 8.")
400
+ elif "out of memory" in error_message_lower or "cuda out of memory" in error_message_lower:
401
+ print("Hint: Try smaller image size, fewer steps, or a model that uses less VRAM.")
402
+ raise gr.Error(f"Out of Memory (OOM) during generation. This Space might not have enough VRAM. Try smaller size/steps or select CPU (if available). Error: {e}")
403
+ elif "runtimeerror" in error_message_lower:
404
+ raise gr.Error(f"Runtime Error during generation: {e}. This could be a model/scheduler incompatibility or other issue.")
405
+ elif "device-side assert" in error_message_lower or "cuda error" in error_message_lower:
406
+ raise gr.Error(f"CUDA/GPU Error during generation: {e}. Ensure the Space is configured with a GPU and compatible PyTorch.")
407
+ elif "expected all tensors to be on the same device" in error_message_lower:
408
+ raise gr.Error(f"Device mismatch error during generation: {e}. This is an internal error, please report it.")
409
+ else:
410
+ # Generic catch-all for unknown errors
411
+ raise gr.Error(f"Image generation failed: An unexpected error occurred. {e}")
412
+
413
+
414
+ # --- Gradio Interface ---
415
+ # For Spaces, we primarily list Hub models in the dropdown
416
+ model_choices = DEFAULT_HUB_MODELS
417
+ if not model_choices:
418
+ initial_model_choices = ["No models found"]
419
+ initial_default_model = "No models found"
420
+ model_dropdown_interactive = False
421
+ print(f"\n!!! WARNING: No default Hub models listed in script. Model dropdown will be empty. !!!")
422
+ else:
423
+ initial_model_choices = model_choices
424
+ # Set a reasonable default if available
425
+ initial_default_model = INITIAL_MODEL_ID if INITIAL_MODEL_ID else "No models found"
426
+ model_dropdown_interactive = True if INITIAL_MODEL_ID else False
427
+
428
+
429
+ scheduler_choices = list(SCHEDULER_MAP.keys())
430
+
431
+ # Use the template's CSS
432
  css = """
433
  #col-container {
434
  margin: 0 auto;
 
436
  }
437
  """
438
 
439
+ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo: # Added Soft theme from user's script
440
+ gr.Markdown(
441
+ """
442
+ # CipherCore Stable Diffusion 1.5 Generator
443
+ Create images with Stable Diffusion 1.5 using models from Hugging Face Hub.
444
+ Choose a model, set your prompts and parameters, and generate!
445
+ _Note: 'hire.fix' size option currently generates at 1024x1024._
446
+ """ # Removed reference to local checkpoints and "coming soon"
447
+ )
 
 
 
448
 
449
+ # Add a note about model loading time
450
+ if INITIAL_MODEL_ID:
451
+ gr.Markdown(f"*(Note: The initial model '{INITIAL_MODEL_ID}' is loading... First generation might take longer.)*")
452
+ else:
453
+ gr.Markdown(f"*(Note: No initial model configured or loaded. Select a model from the dropdown to start.)*")
454
 
 
455
 
456
+ with gr.Row():
457
+ with gr.Column(scale=2): # Give more space to controls
458
+ model_dropdown = gr.Dropdown(
459
+ choices=initial_model_choices,
460
+ value=initial_default_model,
461
+ label="Select Model (Hugging Face Hub ID)", # Updated label
462
+ interactive=model_dropdown_interactive,
463
  )
464
+ device_dropdown = gr.Dropdown(
465
+ choices=AVAILABLE_DEVICES,
466
+ value=DEFAULT_DEVICE,
467
+ label="Processing Device",
468
+ interactive=len(AVAILABLE_DEVICES) > 1, # Only make interactive if both CPU and GPU are options
469
+ )
470
+ prompt_input = gr.Textbox(label="Positive Prompt", placeholder="e.g., a majestic lion in a vibrant jungle, photorealistic", lines=3, autofocus=True) # Autofocus on prompt
471
+ negative_prompt_input = gr.Textbox(label="Negative Prompt (Optional)", placeholder="e.g., blurry, low quality, deformed, watermark", lines=2)
472
+
473
+ with gr.Accordion("Advanced Settings", open=False): # Keep advanced settings initially closed
474
+ with gr.Row():
475
+ steps_slider = gr.Slider(minimum=5, maximum=150, value=30, label="Inference Steps", step=1)
476
+ cfg_slider = gr.Slider(minimum=1.0, maximum=30.0, value=7.5, label="CFG Scale", step=0.1)
477
+ with gr.Row():
478
+ scheduler_dropdown = gr.Dropdown(
479
+ choices=scheduler_choices,
480
+ value=DEFAULT_SCHEDULER,
481
+ label="Scheduler"
482
+ )
483
+ size_dropdown = gr.Dropdown(
484
+ choices=SUPPORTED_SD15_SIZES,
485
+ value="512x512", # SD1.5 default
486
+ label="Image Size"
487
+ )
488
+ # Combine seed input and randomize checkbox
489
+ with gr.Row():
490
+ seed_input = gr.Number(label="Seed", value=0, precision=0, interactive=True) # Use 0 as default, interactive initially
491
+ randomize_seed_checkbox = gr.Checkbox(label="Randomize seed (-1 equivalent)", value=True)
492
 
493
+
494
+ generate_button = gr.Button("✨ Generate Image ✨", variant="primary", scale=1) # Added emojis
495
+
496
+ with gr.Column(scale=3): # Give more space to image
497
+ output_image = gr.Image(
498
+ label="Generated Image",
499
+ type="pil",
500
+ height=768, # Slightly larger preview if possible
501
+ width=768, # Match height for square
502
+ show_share_button=True,
503
+ show_download_button=True,
504
+ interactive=False # Output image is not interactive
505
  )
506
+ # The template returned the seed, let's add a display for the actual seed used
507
+ actual_seed_output = gr.Number(label="Actual Seed Used", precision=0, interactive=False)
508
 
509
+
510
+ # Link button click to generation function
511
+ # Use gr.on as in the template
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
512
  gr.on(
513
+ triggers=[generate_button.click, prompt_input.submit], # Also trigger on prompt submit
514
  fn=infer,
515
  inputs=[
516
+ model_dropdown,
517
+ device_dropdown,
518
+ prompt_input,
519
+ negative_prompt_input,
520
+ steps_slider,
521
+ cfg_slider,
522
+ scheduler_dropdown,
523
+ size_dropdown,
524
+ seed_input,
525
+ randomize_seed_checkbox, # Pass the checkbox value
526
  ],
527
+ outputs=[output_image, actual_seed_output], # Return image and the actual seed used
528
+ api_name="generate" # Optional: For API access
529
  )
530
 
531
+ # Add examples from template
532
+ # Ensure examples match the input types and order of the infer function
533
+ # Examples inputs: [prompt, neg_prompt, seed, randomize_seed, width, height, cfg_scale, steps]
534
+ # Note: Size and Scheduler are not easily handled in standard examples, they'll use defaults.
535
+ # Let's adjust the example inputs to match the infer function's first few parameters
536
+ example_prompts = [
537
+ ["Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", None, 0, True, "512x512", "Euler", 7.5, 30],
538
+ ["An astronaut riding a green horse", None, 0, True, "512x512", "Euler", 7.5, 30],
539
+ ["A delicious ceviche cheesecake slice", None, 0, True, "512x512", "Euler", 7.5, 30],
540
+ ]
541
+ # Update example inputs to match the infer function parameters
542
+ # [model_identifier, selected_device_str, prompt, negative_prompt, steps, cfg_scale, scheduler_name, size, seed, randomize_seed]
543
+ # Need to add dummy values for model, device, steps, cfg, scheduler, size for examples
544
+ # Let's simplify examples to just prompt/neg_prompt for typical template usage
545
+ template_examples = [
546
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
547
+ "An astronaut riding a green horse",
548
+ "A delicious ceviche cheesecake slice",
549
+ ]
550
+ # Map template examples to the input components: [prompt_input]
551
+ gr.Examples(examples=template_examples, inputs=[prompt_input])
552
+
553
+
554
+ # Add some notes/footer from user's script, adapted for Spaces
555
+ gr.Markdown(
556
+ """
557
+ ---
558
+ **Usage Notes:**
559
+ 1. Select a model from the dropdown (Hugging Face Hub ID). Models are downloaded and cached on the Space.
560
+ 2. Choose your processing device (GPU recommended if available).
561
+ 3. Enter your positive and optional negative prompts.
562
+ 4. Adjust advanced settings (Steps, CFG Scale, Scheduler, Size, Seed) if needed. Seed -1 or the "Randomize seed" checkbox will use a random seed.
563
+ 5. Click "Generate Image".
564
+ The first generation with a new model/device might take some time to load.
565
+ """ # Removed notes about local models and batch files
566
+ )
567
+
568
+
569
+ # --- Launch the App ---
570
  if __name__ == "__main__":
571
+ print("\n--- Starting CipherCore Stable Diffusion 1.5 Generator (Hugging Face Spaces) ---")
572
+ cuda_status = "CUDA available" if torch.cuda.is_available() else "CUDA not available"
573
+ gpu_count_str = f"Found {torch.cuda.device_count()} GPU(s)." if torch.cuda.is_available() else ""
574
+
575
+ print(f"{cuda_status} {gpu_count_str}")
576
+ print(f"Available devices detected by PyTorch: {', '.join(AVAILABLE_DEVICES)}")
577
+ print(f"Default device selected by app: {DEFAULT_DEVICE}")
578
+ if current_pipeline:
579
+ print(f"Initial model '{current_model_id}' loaded successfully.")
580
+ else:
581
+ print("No initial model loaded. Check model list and network connectivity.")
582
+
583
+
584
+ print("Launching Gradio interface...")
585
+ # For Spaces, usually launched directly without launch() parameters in app.py
586
+ # Spaces handles the server_name, server_port, share, etc.
587
+ # If running locally for testing, uncomment demo.launch()
588
+ # demo.launch(show_error=True, inbrowser=True) # Uncomment for local testing
589
+ demo.launch() # Standard launch for Hugging Face Spaces
590
+
591
+ print("Gradio interface closing.")