File size: 34,827 Bytes
af49f2c
 
d7ff0be
 
 
 
29856bf
af49f2c
 
6fd4322
ce1167c
6fd4322
 
af49f2c
 
 
 
 
 
 
 
6fd4322
af49f2c
 
 
 
6fd4322
af49f2c
 
 
 
 
 
 
 
 
 
 
 
 
d140c45
192e252
 
af49f2c
 
 
 
dae4606
049d1c0
 
dae4606
af49f2c
 
 
6fd4322
af49f2c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6fd4322
af49f2c
 
 
 
6fd4322
af49f2c
 
 
 
 
 
6fd4322
af49f2c
 
 
 
 
dae4606
ce1167c
af49f2c
 
 
 
ce1167c
 
d7ff0be
 
 
 
 
 
 
 
 
ce1167c
 
 
 
af49f2c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6fd4322
 
af49f2c
 
6fd4322
af49f2c
 
 
 
 
 
 
 
049d1c0
af49f2c
d7ff0be
29856bf
6fd4322
af49f2c
ce1167c
049d1c0
 
 
 
 
 
 
 
 
 
 
dae4606
 
049d1c0
 
 
 
dae4606
 
 
 
 
 
 
 
 
 
 
 
ce1167c
dae4606
af49f2c
dae4606
 
 
 
 
 
 
 
ce1167c
dae4606
 
 
 
 
049d1c0
dae4606
ce1167c
 
 
 
 
d7ff0be
ce1167c
 
 
 
 
 
d7ff0be
ce1167c
 
 
 
 
d7ff0be
dae4606
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
af49f2c
dae4606
 
ce1167c
 
dae4606
 
af49f2c
049d1c0
 
af49f2c
049d1c0
 
af49f2c
ce1167c
 
d7ff0be
ce1167c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
af49f2c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6fd4322
 
af49f2c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
049d1c0
af49f2c
049d1c0
 
af49f2c
049d1c0
 
 
 
 
 
 
 
d7ff0be
af49f2c
 
049d1c0
af49f2c
 
dae4606
 
af49f2c
049d1c0
 
af49f2c
049d1c0
 
 
 
af49f2c
 
 
 
 
 
 
 
 
 
 
 
ce1167c
af49f2c
 
 
 
 
 
 
 
 
 
 
dae4606
af49f2c
 
29856bf
 
af49f2c
 
 
 
 
 
 
 
 
049d1c0
 
dae4606
af49f2c
dae4606
af49f2c
 
 
 
 
 
 
29856bf
af49f2c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
049d1c0
 
 
af49f2c
 
 
 
6fd4322
 
 
 
 
 
 
af49f2c
 
 
590767f
af49f2c
 
 
 
 
6fd4322
af49f2c
049d1c0
 
af49f2c
dae4606
6fd4322
 
af49f2c
 
 
 
 
 
 
6fd4322
af49f2c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d7ff0be
049d1c0
dae4606
6fd4322
d7ff0be
ce1167c
 
 
 
 
 
 
 
 
 
af49f2c
 
 
 
 
 
 
 
 
 
 
 
6fd4322
af49f2c
 
6fd4322
af49f2c
 
 
6fd4322
af49f2c
6fd4322
 
af49f2c
 
 
 
 
 
 
 
 
ce1167c
d7ff0be
6fd4322
af49f2c
 
6fd4322
 
af49f2c
dae4606
af49f2c
 
 
 
 
dae4606
 
af49f2c
 
 
 
 
 
 
 
 
 
 
dae4606
af49f2c
 
 
 
 
 
 
6fd4322
af49f2c
 
 
 
 
 
 
 
dae4606
af49f2c
049d1c0
af49f2c
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
# -*- coding: utf-8 -*-
"""
Hugging Face Spaces Script for Basic Stable Diffusion 1.5 Gradio App
Adapted from user's local script and HF Spaces template.
Supports Hub models and CPU/GPU selection based on available hardware.
Includes Attention Slicing optimization toggle.
Corrected progress callback format.
"""

import gradio as gr
import numpy as np
import random
import torch
from diffusers import StableDiffusionPipeline
# Import commonly used schedulers
from diffusers import DDPMScheduler, EulerDiscreteScheduler, DPMSolverMultistepScheduler, LMSDiscreteScheduler
import os # Keep os for potential checks, though local paths are less standard on Spaces
# import spaces #[uncomment to use ZeroGPU if needed, typically not for standard GPU usage]
from PIL import Image
import time # Optional: for timing generation
# huggingface_hub is implicitly used by from_pretrained

# --- Configuration ---
# MODELS_DIR is less relevant for Spaces unless specifically placing models in repo
# For Spaces, models are primarily loaded via their Hugging Face Hub IDs
SUPPORTED_SD15_SIZES = ["512x512", "768x512", "512x768", "768x768", "1024x768", "768x1024", "1024x1024", "hire.fix"]

# Mapping of friendly scheduler names to their diffusers classes
SCHEDULER_MAP = {
    "Euler": EulerDiscreteScheduler,
    "DPM++ 2M": DPMSolverMultistepScheduler,
    "DDPM": DDPMScheduler,
    "LMS": LMSDiscreteScheduler,
    # Add more as needed from diffusers.schedulers (make sure they are imported)
}
DEFAULT_SCHEDULER = "Euler" # Default scheduler on startup

# List of popular Stable Diffusion 1.5 models on the Hugging Face Hub
# For Spaces, this is the primary source of models.
DEFAULT_HUB_MODELS = [
    "Raxephion/Typhoon-SD1.5-V1",
    "Yntec/RevAnimatedV2Rebirth",
    "stablediffusionapi/realcartoon-anime-v11"
    # "CompVis/stable-diffusion-v1-4", # Example SD 1.4 model (might behave slightly differently)
    # Add other diffusers-compatible SD1.5 models here
]

# --- Constants for Gradio UI / Generation ---
MAX_SEED = np.iinfo(np.int32).max # Defines the maximum seed value
# MAX_IMAGE_SIZE_SLIDER = 1024 # Used only for slider max, kept for consistency if needed elsewhere

# --- Determine available devices and set up options ---
# This logic is from the user's script and works well for Spaces
AVAILABLE_DEVICES = ["CPU"]
if torch.cuda.is_available():
    AVAILABLE_DEVICES.append("GPU")
    print(f"CUDA available. Found {torch.cuda.device_count()} GPU(s).")
    if torch.cuda.device_count() > 0:
        print(f"Using GPU 0: {torch.cuda.get_device_name(0)}")
else:
    print("CUDA not available. Running on CPU.")

# Default device preference: GPU if available, else CPU
DEFAULT_DEVICE = "GPU" if "GPU" in AVAILABLE_DEVICES else "CPU"

# Set initial PyTorch device string based on detection
initial_device_to_use = "cuda" if DEFAULT_DEVICE == "GPU" else "cpu"
print(f"Initial pipeline will load on device: {initial_device_to_use}")

# Determine initial dtype
# Note: fp16 is generally faster and uses less VRAM on compatible GPUs
initial_dtype_to_use = torch.float32 # Default
if initial_device_to_use == "cuda":
    # Check if the GPU supports fp16 (most modern ones do)
    if torch.cuda.is_available() and torch.cuda.get_device_capability(0)[0] >= 7: # Check compute capability (7.0+ for good fp16)
         initial_dtype_to_use = torch.float16
         print("Detected GPU supports FP16, will attempt using torch.float16.")
    else:
         initial_dtype_to_use = torch.float32 # Fallback
         print("Detected GPU might not fully support FP16 or capability check failed, using torch.float32.")
else:
    initial_dtype_to_use = torch.float32 # CPU requires float32

print(f"Initial dtype: {initial_dtype_to_use}")


# --- Global state for the loaded pipeline ---
# We'll load the *initial* pipeline once on startup and keep it in memory.
# Subsequent model changes will reload the pipeline within the infer function.
current_pipeline = None
current_model_id = None # Keep track of the currently loaded model identifier
current_device_loaded = None # Keep track of the device the pipeline is currently on

# Initial model to load on startup
INITIAL_MODEL_ID = DEFAULT_HUB_MODELS[0] if DEFAULT_HUB_MODELS else None
if INITIAL_MODEL_ID:
    print(f"\nLoading initial model '{INITIAL_MODEL_ID}' on startup...")
    try:
        # Load the pipeline onto the initial device and dtype
        pipeline = StableDiffusionPipeline.from_pretrained(
            INITIAL_MODEL_ID,
            torch_dtype=initial_dtype_to_use,
            safety_checker=None, # <<< SAFETY CHECKER DISABLED <<<
        )

        # --- Apply Optimizations during initial load ---
        # Apply attention slicing by default if GPU is available for memory efficiency on Spaces
        if initial_device_to_use == "cuda":
            try:
                pipeline.enable_attention_slicing()
                print("Attention Slicing enabled during initial load.")
            except Exception as e:
                print(f"Warning: Failed to enable Attention Slicing during initial load: {e}")
                # Don't raise Gradio error here, just print warning


        pipeline = pipeline.to(initial_device_to_use) # Move to the initial device

        current_pipeline = pipeline
        current_model_id = INITIAL_MODEL_ID
        current_device_loaded = torch.device(initial_device_to_use)
        print(f"Initial model loaded successfully on {current_device_loaded}.")

         # Basic check for SD1.x architecture
        unet_config = getattr(current_pipeline, 'unet', None)
        if unet_config and hasattr(unet_config, 'config') and hasattr(unet_config.config, 'cross_attention_dim'):
             cross_attn_dim = unet_config.config.cross_attention_dim
             if cross_attn_dim != 768:
                 warning_msg = (f"Warning: Loaded model '{INITIAL_MODEL_ID}' might not be a standard SD 1.x model "
                                f"(expected UNet cross_attention_dim 768, found {cross_attn_dim}). "
                                "Results may be unexpected.")
                 print(warning_msg)
                 # gr.Warning(warning_msg) # Cannot raise Gradio error/warning during startup load
             else:
                 print("UNet cross_attention_dim is 768, consistent with SD 1.x.")
        else:
             print("Could not check UNet cross_attention_dim for initial model.")


    except Exception as e:
        current_pipeline = None
        current_model_id = None
        current_device_loaded = None
        print(f"Error loading initial model '{INITIAL_MODEL_ID}': {e}")
        print("Application will start, but image generation may fail if the initial model cannot be loaded.")
        # Cannot raise gr.Error here as Gradio not fully initialized
else:
     print("\nNo default Hub models defined. Application will start without a loaded model.")
     print("Please select a model from the dropdown to enable generation.")


# --- Image Generation Function (Adapted for Hugging Face Spaces 'infer' signature) ---
# @spaces.GPU #[uncomment if using ZeroGPU, otherwise standard torch device handles it]
def infer(
    model_identifier,         # From model_dropdown
    selected_device_str,      # From device_dropdown
    prompt,                   # From prompt_input
    negative_prompt,          # From negative_prompt_input
    steps,                    # From steps_slider
    cfg_scale,                # From cfg_slider
    scheduler_name,           # From scheduler_dropdown
    size,                     # From size_dropdown
    seed,                     # From seed_input (now a Slider)
    randomize_seed,           # From randomize_seed_checkbox
    enable_attention_slicing, # New input for the optimization toggle
    progress=gr.Progress(),   # Corrected Progress initialization (removed track_tqdm=True)
):
    """Generates an image using the selected model and parameters on the chosen device."""
    global current_pipeline, current_model_id, current_device_loaded, SCHEDULER_MAP, MAX_SEED

    # This check is done before parameter parsing so we can determine device/dtype for loading
    # Need to redo some parameter parsing here to get device_to_use early
    temp_device_to_use = "cuda" if selected_device_str == "GPU" and "GPU" in AVAILABLE_DEVICES else "cpu"
    temp_dtype_to_use = torch.float32
    if temp_device_to_use == "cuda":
        if torch.cuda.is_available() and torch.cuda.get_device_capability(0)[0] >= 7:
             temp_dtype_to_use = torch.float16
        else:
             temp_dtype_to_use = torch.float32
    else:
         temp_dtype_to_use = torch.float32

    # 1. Load/Switch Model if necessary
    # Check if the requested model identifier OR the requested device has changed
    if current_pipeline is None or current_model_id != model_identifier or (current_device_loaded is not None and str(current_device_loaded) != temp_device_to_use):

         print(f"Loading model: {model_identifier} onto {temp_device_to_use} with dtype {temp_dtype_to_use}...")
         # Clear previous pipeline to potentially free memory *before* loading the new one
         if current_pipeline is not None:
              print(f"Unloading previous model '{current_model_id}' from {current_device_loaded}...")
              if str(current_device_loaded) == "cuda":
                   try:
                       current_pipeline.to("cpu")
                       print("Moved previous pipeline to CPU.")
                   except Exception as move_e:
                       print(f"Warning: Failed to move previous pipeline to CPU: {move_e}")
              del current_pipeline
              current_pipeline = None # Set to None immediately
              # Attempt to clear CUDA cache if using GPU (from the previous device)
              if str(current_device_loaded) == "cuda":
                  try:
                      torch.cuda.empty_cache()
                      print("Cleared CUDA cache.")
                  except Exception as cache_e:
                      print(f"Warning: Error clearing CUDA cache: {cache_e}")

         # Ensure the device is actually available if not CPU (redundant with earlier check but safe)
         if temp_device_to_use == "cuda":
              if not torch.cuda.is_available():
                   raise gr.Error("GPU selected but CUDA is not available to PyTorch on this Space. Please select CPU or ensure the Space is configured with a GPU and the CUDA version of PyTorch is installed.")

         try:
             pipeline = StableDiffusionPipeline.from_pretrained(
                 model_identifier,
                 torch_dtype=temp_dtype_to_use, # Use the determined dtype for loading
                 safety_checker=None, # DISABLED
             )

             # Apply optimizations based on UI input during load
             if enable_attention_slicing and temp_device_to_use == "cuda": # Only apply on GPU
                 try:
                     pipeline.enable_attention_slicing()
                     print("Attention Slicing enabled during model load.")
                 except Exception as e:
                     print(f"Warning: Failed to enable Attention Slicing: {e}")
                     gr.Warning(f"Failed to enable Attention Slicing. Error: {e}")
             else:
                 try:
                     pipeline.disable_attention_slicing() # Ensure it's off if toggle is off or on CPU
                     # print("Attention Slicing disabled during model load.") # Avoid noise
                 except Exception as e:
                      # May fail if it wasn't enabled, ignore
                      pass


             pipeline = pipeline.to(temp_device_to_use) # Move to the determined device

             current_pipeline = pipeline
             current_model_id = model_identifier
             current_device_loaded = torch.device(temp_device_to_use) # Store the actual device object

             # Basic check for SD1.x architecture
             unet_config = getattr(pipeline, 'unet', None)
             if unet_config and hasattr(unet_config, 'config') and hasattr(unet_config.config, 'cross_attention_dim'):
                  cross_attn_dim = unet_config.config.cross_attention_dim
                  if cross_attn_dim != 768:
                      warning_msg = (f"Warning: Loaded model '{model_identifier}' might not be a standard SD 1.x model "
                                     f"(expected UNet cross_attention_dim 768, found {cross_attn_dim}). "
                                     "Results may be unexpected or generation might fail.")
                      print(warning_msg)
                      gr.Warning(warning_msg)
                  else:
                      print("UNet cross_attention_dim is 768, consistent with SD 1.x.")
             else:
                  print("Could not check UNet cross_attention_dim.")

             print(f"Model '{model_identifier}' loaded successfully on {current_device_loaded} with dtype {temp_dtype_to_use}.")

         except Exception as e:
             current_pipeline = None
             current_model_id = None
             current_device_loaded = None
             print(f"Error loading model '{model_identifier}': {e}")
             error_message_lower = str(e).lower()
             if "cannot find requested files" in error_message_lower or "404 client error" in error_message_lower or "no such file or directory" in error_message_lower:
                  raise gr.Error(f"Model '{model_identifier}' not found on Hugging Face Hub or in repo files. Check ID/path or internet connection. Error: {e}")
             elif "checkpointsnotfounderror" in error_message_lower or "valueerror: could not find a valid model structure" in error_message_lower:
                  raise gr.Error(f"No valid diffusers model at '{model_identifier}'. Ensure it's a diffusers format ID/path. Error: {e}")
             elif "out of memory" in error_message_lower:
                  raise gr.Error(f"Out of Memory (OOM) loading model. This Space might not have enough RAM/VRAM for this model. Try a lighter model or select CPU (if available). Error: {e}")
             elif "cusolver64" in error_message_lower or "cuda driver version" in error_message_lower or "cuda error" in error_message_lower:
                  raise gr.Error(f"CUDA/GPU Driver/Installation Error on Space: {e}. Check Space hardware or select CPU.")
             elif "safetensors_rust.safetensorserror" in error_message_lower or "oserror: cannot load" in error_message_lower or "filenotfounderror" in error_message_lower:
                  raise gr.Error(f"Model file error for '{model_identifier}': {e}. Files might be corrupt or incomplete on the Hub/in repo.")
             elif "could not import" in error_message_lower or "module not found" in error_message_lower:
                  raise gr.Error(f"Dependency error: {e}. Ensure required libraries are in requirements.txt.")
             else:
                 raise gr.Error(f"Failed to load model '{model_identifier}': {e}")

    # Re-determine device_to_use and dtype_to_use *after* ensuring pipeline is loaded
    # They should match current_device_loaded and the pipeline's dtype
    # This is crucial because current_pipeline.device and dtype are the definitive source
    # after a potentially successful load or switch.
    device_to_use = str(current_pipeline.device) if current_pipeline else ("cuda" if selected_device_str == "GPU" and "GPU" in AVAILABLE_DEVICES else "cpu")
    dtype_to_use = current_pipeline.dtype if current_pipeline else torch.float32 # Fallback if somehow pipeline is still None


    # Check if pipeline is successfully loaded before proceeding with generation
    if current_pipeline is None:
         raise gr.Error("Model failed to load during setup or switching. Cannot generate image.")


    # --- Apply Optimizations *before* generation if model was already loaded ---
    # If the model didn't need reloading, we need to apply/remove slicing here
    # Check the pipeline's actual device
    if str(current_pipeline.device) == "cuda": # Only attempt on GPU
        if enable_attention_slicing:
             try:
                 current_pipeline.enable_attention_slicing()
                 # print("Attention Slicing enabled for generation.") # Too verbose
             except Exception as e:
                  print(f"Warning: Failed to enable Attention Slicing before generation: {e}")
                  gr.Warning(f"Failed to enable Attention Slicing. Error: {e}")
        else:
             try:
                 current_pipeline.disable_attention_slicing()
                 # print("Attention Slicing disabled for generation.") # Too verbose
             except Exception as e:
                  # May fail if it wasn't enabled, ignore
                  pass
    else: # Ensure slicing is off on CPU
         try:
              current_pipeline.disable_attention_slicing()
         except Exception as e:
              pass # Ignore


    # 2. Configure Scheduler
    selected_scheduler_class = SCHEDULER_MAP.get(scheduler_name)
    if selected_scheduler_class is None:
         print(f"Warning: Unknown scheduler '{scheduler_name}'. Using default: {DEFAULT_SCHEDULER}.")
         selected_scheduler_class = SCHEDULER_MAP[DEFAULT_SCHEDULER]
         gr.Warning(f"Unknown scheduler '{scheduler_name}'. Using default: {DEFAULT_SCHEDULER}.")

    # Recreate scheduler from config to ensure compatibility with the loaded pipeline
    try:
        scheduler_config = current_pipeline.scheduler.config
        current_pipeline.scheduler = selected_scheduler_class.from_config(scheduler_config)
        print(f"Scheduler set to: {scheduler_name}")
    except Exception as e:
        print(f"Error setting scheduler '{scheduler_name}': {e}")
        # Attempt to fallback to a default if setting fails
        try:
             print(f"Attempting to fallback to default scheduler: {DEFAULT_SCHEDULER}")
             current_pipeline.scheduler = SCHEDULER_MAP[DEFAULT_SCHEDULER].from_config(scheduler_config)
             gr.Warning(f"Failed to set scheduler to '{scheduler_name}', fell back to {DEFAULT_SCHEDULER}. Error: {e}")
        except Exception as fallback_e:
             print(f"Fallback scheduler failed too: {fallback_e}")
             raise gr.Error(f"Failed to configure scheduler '{scheduler_name}' and fallback failed. Error: {e}")


    # 3. Parse Image Size
    width, height = 512, 512 # Default size
    if size.lower() == "hire.fix":
        width, height = 1024, 1024
        print(f"Interpreting 'hire.fix' size as {width}x{height}")
    else:
        try:
            w_str, h_str = size.split('x')
            width = int(w_str)
            height = int(h_str)
        except ValueError:
            raise gr.Error(f"Invalid size format: '{size}'. Use 'WidthxHeight' (e.g., 512x512) or 'hire.fix'.")
        except Exception as e:
             raise gr.Error(f"Error parsing size '{size}': {e}")

    # Size multiple check (SD 1.5 works best with multiples of 64 or 8)
    multiple_check = 64 # Use 64 as a standard check
    if width % multiple_check != 0 or height % multiple_check != 0:
         warning_msg_size = (f"Warning: Image size {width}x{height} is not a multiple of {multiple_check}. "
                             f"Stable Diffusion 1.5 models are typically trained on sizes like 512x512. "
                             "Using non-standard sizes may cause tiling, distortions, or other artifacts.")
         print(warning_msg_size)
         gr.Warning(warning_msg_size)
         # Optional: Round size to nearest multiple of 64? Not implemented here to preserve user choice.


    # 4. Set Seed Generator
    # The generator device needs to match the pipeline device
    generator_device = current_pipeline.device # Must match the pipeline device

    seed_int = 0 # Default if issue occurs
    if randomize_seed: # Use the randomize_seed checkbox
        seed_int = random.randint(0, MAX_SEED) # Re-randomize seed using MAX_SEED
        print(f"Randomizing seed to: {seed_int}")
    else:
        # Use provided seed from the slider input
        try:
             seed_int = int(seed)
             print(f"Using provided seed: {seed_int}")
        except ValueError:
             print(f"Warning: Invalid seed input '{seed}'. Using random seed instead.")
             gr.Warning(f"Invalid seed input '{seed}'. Using random seed instead.")
             seed_int = random.randint(0, MAX_SEED) # Fallback to random if input is not int
             # No need to set randomize_seed = True here, just use the new random seed_int


    generator = None # Initialize generator as None
    try:
        # Explicitly move generator to the desired device
        generator = torch.Generator(device=generator_device).manual_seed(seed_int)
        print(f"Generator set with seed {seed_int} on device: {generator_device}")
    except Exception as e:
         print(f"Warning: Error setting seed generator with seed {seed_int} on device {generator_device}: {e}. Falling back to default generator (potentially on CPU) or system random.")
         gr.Warning(f"Failed to set seed generator with seed {seed_int}. Using system random seed. Error: {e}")
         generator = None # Let pipeline handle random seed if generator creation fails or device mismatch
         # If generator creation failed, the actual seed used by the pipeline will be different and system-dependent random.
         # We should probably report -1 in this case, or just report the seed we tried to use.
         # Reporting the seed we *tried* to use is simpler and often sufficient.
         pass # Keep the last calculated seed_int

    # 5. Generate Image
    # Ensure required parameters are integers/floats
    num_inference_steps_int = int(steps)
    guidance_scale_float = float(cfg_scale)

    # Basic validation on parameters
    if num_inference_steps_int <= 0 or guidance_scale_float <= 0:
         raise ValueError("Steps and CFG Scale must be positive values.")
    if width <= 0 or height <= 0:
         raise ValueError("Image width and height must be positive.")

    print(f"Generating: Prompt='{prompt[:80]}{'...' if len(prompt) > 80 else ''}', NegPrompt='{negative_prompt[:80]}{'...' if len(negative_prompt) > 80 else ''}', Steps={num_inference_steps_int}, CFG={guidance_scale_float}, Size={width}x{height}, Scheduler={scheduler_name}, Seed={seed_int if generator else 'System Random'}, Device={device_to_use}, Dtype={dtype_to_use}, Slicing Enabled={enable_attention_slicing and device_to_use == 'cuda'}")
    start_time = time.time()

    try:
        # Use the progress parameter from the template
        output = current_pipeline(
            prompt=prompt,
            negative_prompt=negative_prompt if negative_prompt else None,
            num_inference_steps=num_inference_steps_int,
            guidance_scale=guidance_scale_float,
            width=width,
            height=height,
            generator=generator, # Pass the generator (which might be None)
            # Pass progress object for tqdm tracking in Gradio
            callback_steps=max(1, num_inference_steps_int // 20), # Update progress bar periodically
            # --- CORRECTED CALLBACK FORMAT ---
            callback=lambda step, timestep, latents: progress((step, num_inference_steps_int), desc=f"Step {step}/{num_inference_steps_int}"),

            # Add VAE usage here if needed for specific models that require it
            # vae=...
            # enable_xformers_memory_efficient_attention() # Needs xformers installed & compatible GPU
        )
        end_time = time.time()
        print(f"Generation finished in {end_time - start_time:.2f} seconds.")
        generated_image = output.images[0]

        # Determine the seed to return: the one we attempted to use, or -1 if generator creation failed
        actual_seed_used = seed_int if generator else -1

        # Return both the image and the seed (potentially randomized)
        return generated_image, actual_seed_used

    except gr.Error as e:
         # Re-raise Gradio errors directly
         raise e
    except ValueError as ve:
         # Handle specific value errors like invalid parameters
         print(f"Parameter Error: {ve}")
         # This is the line that was re-raising the error, it remains here.
         raise gr.Error(f"Invalid Parameter: {ve}")
    except Exception as e:
        # Catch any other unexpected errors during generation
        print(f"An error occurred during image generation: {e}")
        error_message_lower = str(e).lower()
        if "size must be a multiple of" in error_message_lower or "invalid dimensions" in error_message_lower or "shape mismatch" in error_message_lower:
             raise gr.Error(f"Image generation failed - Invalid size '{width}x{height}' for model: {e}. Try a multiple of 64 or 8.")
        elif "out of memory" in error_message_lower or "cuda out of memory" in error_message_lower:
             print("Hint: Try smaller image size, fewer steps, or a model that uses less VRAM.")
             raise gr.Error(f"Out of Memory (OOM) during generation. This Space might not have enough VRAM. Try smaller size/steps or select CPU (if available). Error: {e}")
        elif "runtimeerror" in error_message_lower:
             raise gr.Error(f"Runtime Error during generation: {e}. This could be a model/scheduler incompatibility or other issue.")
        elif "device-side assert" in error_message_lower or "cuda error" in error_message_lower:
             raise gr.Error(f"CUDA/GPU Error during generation: {e}. Ensure the Space is configured with a GPU and compatible PyTorch.")
        elif "expected all tensors to be on the same device" in error_message_lower:
             raise gr.Error(f"Device mismatch error during generation: {e}. This is an internal error, please report it.")
        else:
             # Generic catch-all for unknown errors
             raise gr.Error(f"Image generation failed: An unexpected error occurred. {e}")


# --- Gradio Interface ---
# For Spaces, we primarily list Hub models in the dropdown
model_choices = DEFAULT_HUB_MODELS
if not model_choices:
    initial_model_choices = ["No models found"]
    initial_default_model = "No models found"
    model_dropdown_interactive = False
    print(f"\n!!! WARNING: No default Hub models listed in script. Model dropdown will be empty. !!!")
else:
    initial_model_choices = model_choices
    # Set a reasonable default if available
    initial_default_model = INITIAL_MODEL_ID if INITIAL_MODEL_ID and INITIAL_MODEL_ID in initial_model_choices else (initial_model_choices[0] if initial_model_choices and initial_model_choices[0] != "No models found" else "No models found")

    model_dropdown_interactive = True if initial_default_model != "No models found" else False # Make it interactive if there's *any* model choice

scheduler_choices = list(SCHEDULER_MAP.keys())

# Use the template's CSS
css = """
#col-container {
    margin: 0 auto;
    max-width: 640px;
}
"""

with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo: # Added Soft theme from user's script
    gr.Markdown(
        """
        # CipherCore Stable Diffusion 1.5 Gradio WebUI
        Create images with Stable Diffusion 1.5 using models from Hugging Face Hub.
        Choose a model, set your prompts and parameters, and generate!
        _Note: 'hire.fix' size option currently generates at 1024x1024._
        """ # Removed reference to local checkpoints and "coming soon"
    )

    # Add a note about model loading time
    if initial_default_model != "No models found":
         gr.Markdown(f"*(Note: Model '{initial_default_model}' will load on first generation or model switch... This might take some time.)*")
    else:
         gr.Markdown(f"*(Note: No models available. Add Hub IDs to DEFAULT_HUB_MODELS in the script.)*")


    with gr.Row():
        with gr.Column(scale=2): # Give more space to controls
            model_dropdown = gr.Dropdown(
                choices=initial_model_choices,
                value=initial_default_model,
                label="Select Model (Hugging Face Hub ID)", # Updated label
                interactive=model_dropdown_interactive,
            )
            device_dropdown = gr.Dropdown(
                choices=AVAILABLE_DEVICES,
                value=DEFAULT_DEVICE,
                label="Processing Device",
                interactive=len(AVAILABLE_DEVICES) > 1, # Only make interactive if both CPU and GPU are options
            )
            prompt_input = gr.Textbox(label="Positive Prompt", placeholder="e.g., a majestic lion in a vibrant jungle, photorealistic", lines=3, autofocus=True) # Autofocus on prompt
            negative_prompt_input = gr.Textbox(label="Negative Prompt (Optional)", placeholder="e.g., blurry, low quality, deformed, watermark", lines=2)

            with gr.Accordion("Advanced Settings", open=False): # Keep advanced settings initially closed
                with gr.Row():
                    steps_slider = gr.Slider(minimum=5, maximum=150, value=30, label="Inference Steps", step=1)
                    cfg_slider = gr.Slider(minimum=1.0, maximum=30.0, value=7.5, label="CFG Scale", step=0.1)
                with gr.Row():
                     scheduler_dropdown = gr.Dropdown(
                        choices=scheduler_choices,
                        value=DEFAULT_SCHEDULER,
                        label="Scheduler"
                    )
                     size_dropdown = gr.Dropdown(
                        choices=SUPPORTED_SD15_SIZES,
                        value="512x512", # SD1.5 default
                        label="Image Size"
                    )
                # Combine seed input and randomize checkbox
                with gr.Row():
                     # Removed precision=0 from Slider - FIX FOR TYPEERROR
                     seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, interactive=True) # Use 0 as default, interactive initially
                     randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True) # Simplified label

                # --- Memory Optimization Toggle ---
                with gr.Row():
                     # Default to enabled if GPU is available, otherwise off
                     default_slicing = True if "GPU" in AVAILABLE_DEVICES else False
                     enable_attention_slicing_checkbox = gr.Checkbox(
                         label="Enable Attention Slicing (Memory Optimization - GPU only)",
                         value=default_slicing,
                         interactive="GPU" in AVAILABLE_DEVICES # Only interactive if GPU is an option
                     )
                     gr.Markdown("*(Helps reduce VRAM usage, may slightly affect speed/quality)*")


            generate_button = gr.Button("✨ Generate Image ✨", variant="primary", scale=1) # Added emojis

        with gr.Column(scale=3): # Give more space to image
            output_image = gr.Image(
                label="Generated Image",
                type="pil",
                height=768, # Slightly larger preview if possible
                width=768, # Match height for square
                show_share_button=True,
                show_download_button=True,
                interactive=False # Output image is not interactive
            )
            # The template returned the seed, let's add a display for the actual seed used
            actual_seed_output = gr.Number(label="Actual Seed Used", precision=0, interactive=False)


    # Link button click to generation function
    # Use gr.on as in the template
    gr.on(
        triggers=[generate_button.click, prompt_input.submit], # Also trigger on prompt submit
        fn=infer,
        inputs=[
            model_dropdown,
            device_dropdown,
            prompt_input,
            negative_prompt_input,
            steps_slider,
            cfg_slider,
            scheduler_dropdown,
            size_dropdown,
            seed_input,
            randomize_seed_checkbox,
            enable_attention_slicing_checkbox, # Pass the new checkbox value
        ],
        outputs=[output_image, actual_seed_output], # Return image and the actual seed used
        api_name="generate" # Optional: For API access
    )

    # Add examples from template
    # Ensure examples map to the correct input component indices
    template_examples = [
        "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
        "An astronaut riding a green horse",
        "A delicious ceviche cheesecake slice",
    ]
    # Examples will only populate the first input they match.
    # In this case, passing just a list of strings populates the first Textbox: prompt_input.
    gr.Examples(examples=template_examples, inputs=[prompt_input])


    # Add some notes/footer from user's script, adapted for Spaces
    gr.Markdown(
        """
        ---
        **Usage Notes:**
        1. Select a model from the dropdown (Hugging Face Hub ID). Models are downloaded and cached on the Space.
        2. Choose your processing device (GPU recommended if available).
        3. Enter your positive and optional negative prompts.
        4. Adjust advanced settings (Steps, CFG Scale, Scheduler, Size, Seed) if needed. The "Randomize seed" checkbox will override the seed value in the input box.
        5. Click "Generate Image".
        The first generation with a new model/device might take some time to load.
        """ # Removed notes about local models and batch files
    )


# --- Launch the App ---
if __name__ == "__main__":
    print("\n--- Starting CipherCore Stable Diffusion 1.5 Generator (Hugging Face Spaces) ---")
    cuda_status = "CUDA available" if torch.cuda.is_available() else "CUDA not available"
    gpu_count_str = f"Found {torch.cuda.device_count()} GPU(s)." if torch.cuda.is_available() else ""

    print(f"{cuda_status} {gpu_count_str}")
    print(f"Available devices detected by PyTorch: {', '.join(AVAILABLE_DEVICES)}")
    print(f"Default device selected by app: {DEFAULT_DEVICE}")
    if current_pipeline:
         print(f"Initial model '{current_model_id}' loaded successfully on {current_device_loaded}.")
    else:
         print("No initial model loaded or initial load failed. Check model list and network connectivity.")


    print("Launching Gradio interface...")
    # For Spaces, usually launched directly without launch() parameters in app.py
    # Spaces handles the server_name, server_port, share, etc.
    # If running locally for testing, uncomment demo.launch()
    # demo.launch(show_error=True, inbrowser=True) # Uncomment for local testing
    demo.launch() # Standard launch for Hugging Face Spaces

    print("Gradio interface closing.")