Raxephion's picture
Update app.py
af49f2c verified
raw
history blame
31.1 kB
# -*- coding: utf-8 -*-
"""
Hugging Face Spaces Script for Basic Stable Diffusion 1.5 Gradio App
Adapted from user's local script and HF Spaces template.
Supports Hub models and CPU/GPU selection based on available hardware.
"""
import gradio as gr
import numpy as np
import random
import torch
from diffusers import StableDiffusionPipeline
# Import commonly used schedulers
from diffusers import DDPMScheduler, EulerDiscreteScheduler, DPMSolverMultistepScheduler, LMSDiscreteScheduler
import os # Keep os for potential checks, though local paths are less standard on Spaces
# import spaces #[uncomment to use ZeroGPU if needed, typically not for standard GPU usage]
from PIL import Image
import time # Optional: for timing generation
# huggingface_hub is implicitly used by from_pretrained
# --- Configuration ---
# MODELS_DIR is less relevant for Spaces unless specifically placing models in repo
# For Spaces, models are primarily loaded via their Hugging Face Hub IDs
SUPPORTED_SD15_SIZES = ["512x512", "768x512", "512x768", "768x768", "1024x768", "768x1024", "1024x1024", "hire.fix"]
# Mapping of friendly scheduler names to their diffusers classes
SCHEDULER_MAP = {
"Euler": EulerDiscreteScheduler,
"DPM++ 2M": DPMSolverMultistepScheduler,
"DDPM": DDPMScheduler,
"LMS": LMSDiscreteScheduler,
# Add more as needed from diffusers.schedulers (make sure they are imported)
}
DEFAULT_SCHEDULER = "Euler" # Default scheduler on startup
# List of popular Stable Diffusion 1.5 models on the Hugging Face Hub
# For Spaces, this is the primary source of models.
DEFAULT_HUB_MODELS = [
"runwayml/stable-diffusion-v1-5",
"SG161222/Realistic_Vision_V6.0_B1_noVAE", # Example popular 1.5 model
"nitrosocke/Ghibli-Diffusion",
"danyloylo/sd1.5-ghibli-style-05",
"Bilal326/SD_1.5_DragonWarriorV2"
# "CompVis/stable-diffusion-v1-4", # Example SD 1.4 model (might behave slightly differently)
# Add other diffusers-compatible SD1.5 models here
]
# --- Determine available devices and set up options ---
# This logic is from the user's script and works well for Spaces
AVAILABLE_DEVICES = ["CPU"]
if torch.cuda.is_available():
AVAILABLE_DEVICES.append("GPU")
print(f"CUDA available. Found {torch.cuda.device_count()} GPU(s).")
if torch.cuda.device_count() > 0:
print(f"Using GPU 0: {torch.cuda.get_device_name(0)}")
else:
print("CUDA not available. Running on CPU.")
# Default device preference: GPU if available, else CPU
DEFAULT_DEVICE = "GPU" if "GPU" in AVAILABLE_DEVICES else "CPU"
# Set initial PyTorch device string based on detection
initial_device_to_use = "cuda" if DEFAULT_DEVICE == "GPU" else "cpu"
print(f"Initial pipeline will load on device: {initial_device_to_use}")
# Determine initial dtype
# Note: fp16 is generally faster and uses less VRAM on compatible GPUs
initial_dtype_to_use = torch.float32 # Default
if initial_device_to_use == "cuda":
# Check if the GPU supports fp16 (most modern ones do)
if torch.cuda.is_available() and torch.cuda.get_device_capability(0)[0] >= 7: # Check compute capability (7.0+ for good fp16)
initial_dtype_to_use = torch.float16
print("Detected GPU supports FP16, will attempt using torch.float16.")
else:
initial_dtype_to_use = torch.float32 # Fallback
print("Detected GPU might not fully support FP16 or capability check failed, using torch.float32.")
else:
initial_dtype_to_use = torch.float32 # CPU requires float32
print(f"Initial dtype: {initial_dtype_to_use}")
# --- Global state for the loaded pipeline ---
# We'll load the *initial* pipeline once on startup and keep it in memory.
# Subsequent model changes will reload the pipeline within the infer function.
current_pipeline = None
current_model_id = None # Keep track of the currently loaded model identifier
current_device_loaded = None # Keep track of the device the pipeline is currently on
# Initial model to load on startup
INITIAL_MODEL_ID = DEFAULT_HUB_MODELS[0] if DEFAULT_HUB_MODELS else None
if INITIAL_MODEL_ID:
print(f"\nLoading initial model '{INITIAL_MODEL_ID}' on startup...")
try:
current_pipeline = StableDiffusionPipeline.from_pretrained(
INITIAL_MODEL_ID,
torch_dtype=initial_dtype_to_use,
safety_checker=None, # <<< SAFETY CHECKER DISABLED <<<
)
current_pipeline = current_pipeline.to(initial_device_to_use)
current_model_id = INITIAL_MODEL_ID
current_device_loaded = torch.device(initial_device_to_use)
print(f"Initial model loaded successfully on {current_device_loaded}.")
# Basic check for SD1.x architecture
unet_config = getattr(current_pipeline, 'unet', None)
if unet_config and hasattr(unet_config, 'config') and hasattr(unet_config.config, 'cross_attention_dim'):
cross_attn_dim = unet_config.config.cross_attention_dim
if cross_attn_dim != 768:
warning_msg = (f"Warning: Loaded model '{INITIAL_MODEL_ID}' might not be a standard SD 1.x model "
f"(expected UNet cross_attention_dim 768, found {cross_attn_dim}). "
"Results may be unexpected.")
print(warning_msg)
# gr.Warning(warning_msg) # Cannot raise Gradio error/warning during startup load
else:
print("UNet cross_attention_dim is 768, consistent with SD 1.x.")
else:
print("Could not check UNet cross_attention_dim for initial model.")
except Exception as e:
current_pipeline = None
current_model_id = None
current_device_loaded = None
print(f"Error loading initial model '{INITIAL_MODEL_ID}': {e}")
print("Application will start, but image generation may fail if the initial model cannot be loaded.")
# Cannot raise gr.Error here as Gradio not fully initialized
else:
print("\nNo default Hub models defined. Application will start without a loaded model.")
print("Please select a model from the dropdown to enable generation.")
# --- Image Generation Function (Adapted for Hugging Face Spaces 'infer' signature) ---
# @spaces.GPU #[uncomment if using ZeroGPU, otherwise standard torch device handles it]
def infer(
model_identifier, # From model_dropdown
selected_device_str, # From device_dropdown
prompt, # From prompt_input
negative_prompt, # From negative_prompt_input
steps, # From steps_slider
cfg_scale, # From cfg_slider
scheduler_name, # From scheduler_dropdown
size, # From size_dropdown
seed, # From seed_input
randomize_seed, # From randomize_seed_checkbox
progress=gr.Progress(track_tqdm=True), # Added progress argument from template
):
"""Generates an image using the selected model and parameters on the chosen device."""
global current_pipeline, current_model_id, current_device_loaded, SCHEDULER_MAP
# Check if initial load failed
if current_pipeline is None and model_identifier != INITIAL_MODEL_ID:
# Try loading the selected model if initial load failed or model is different
pass # Logic below handles loading
if not model_identifier or model_identifier == "No models found":
raise gr.Error(f"No model selected or available. Please select a model from the list.")
if not prompt:
raise gr.Error("Please enter a prompt.")
# Map selected device string to PyTorch device string
device_to_use = "cuda" if selected_device_str == "GPU" and "GPU" in AVAILABLE_DEVICES else "cpu"
# If GPU was selected but not available, raise an error specific to this condition
if selected_device_str == "GPU" and device_to_use == "cpu":
raise gr.Error("GPU selected but CUDA is not available to PyTorch on this Space. Please select CPU or ensure the Space is configured with a GPU and the CUDA version of PyTorch is installed.")
# Determine dtype based on the actual device being used
dtype_to_use = torch.float32 # Default
if device_to_use == "cuda":
if torch.cuda.is_available() and torch.cuda.get_device_capability(0)[0] >= 7:
dtype_to_use = torch.float16
else:
dtype_to_use = torch.float32
else:
dtype_to_use = torch.float32
print(f"Attempting generation on device: {device_to_use}, using dtype: {dtype_to_use}")
# 1. Load/Switch Model if necessary
# Check if the requested model identifier OR the requested device has changed
# Use string comparison for current_device_loaded as it's a torch.device object
if current_pipeline is None or current_model_id != model_identifier or (current_device_loaded is not None and str(current_device_loaded) != device_to_use):
print(f"Loading model: {model_identifier} onto {device_to_use}...")
# Clear previous pipeline to potentially free memory *before* loading the new one
if current_pipeline is not None:
print(f"Unloading previous model '{current_model_id}' from {current_device_loaded}...")
# Move pipeline to CPU before deleting if it was on GPU, might help with freeing VRAM
if str(current_device_loaded) == "cuda":
try:
current_pipeline.to("cpu")
print("Moved previous pipeline to CPU.")
except Exception as move_e:
print(f"Warning: Failed to move previous pipeline to CPU: {move_e}")
del current_pipeline
current_pipeline = None # Set to None immediately
# Attempt to clear CUDA cache if using GPU (from the previous device)
if str(current_device_loaded) == "cuda":
try:
torch.cuda.empty_cache()
print("Cleared CUDA cache.")
except Exception as cache_e:
print(f"Warning: Error clearing CUDA cache: {cache_e}")
# Ensure the device is actually available if not CPU (redundant with earlier check but safe)
if device_to_use == "cuda":
if not torch.cuda.is_available():
raise gr.Error("CUDA selected but not available. Please select CPU.")
try:
# For Spaces, assume model_identifier is a Hub ID or a path *within the repo*
# from_pretrained can handle both.
print(f"Attempting to load model from: {model_identifier}")
pipeline = StableDiffusionPipeline.from_pretrained(
model_identifier,
torch_dtype=dtype_to_use,
safety_checker=None, # <<< SAFETY CHECKER DISABLED <<<
# Add `vae=AutoencoderKL.from_pretrained(...)` if needed for specific models
)
pipeline = pipeline.to(device_to_use) # Move to the selected device
current_pipeline = pipeline
current_model_id = model_identifier
current_device_loaded = torch.device(device_to_use)
# Basic check for SD1.x architecture (cross_attention_dim = 768)
unet_config = getattr(pipeline, 'unet', None)
if unet_config and hasattr(unet_config, 'config') and hasattr(unet_config.config, 'cross_attention_dim'):
cross_attn_dim = unet_config.config.cross_attention_dim
if cross_attn_dim != 768:
warning_msg = (f"Warning: Loaded model '{model_identifier}' might not be a standard SD 1.x model "
f"(expected UNet cross_attention_dim 768, found {cross_attn_dim}). "
"Results may be unexpected or generation might fail.")
print(warning_msg)
gr.Warning(warning_msg)
else:
print("UNet cross_attention_dim is 768, consistent with SD 1.x.")
else:
print("Could not check UNet cross_attention_dim.")
print(f"Model '{model_identifier}' loaded successfully on {current_device_loaded} with dtype {dtype_to_use}.")
except Exception as e:
# Reset global state on load failure
current_pipeline = None
current_model_id = None
current_device_loaded = None
print(f"Error loading model '{model_identifier}': {e}")
error_message_lower = str(e).lower()
# Provide more specific error messages based on common exceptions
if "cannot find requested files" in error_message_lower or "404 client error" in error_message_lower or "no such file or directory" in error_message_lower:
raise gr.Error(f"Model '{model_identifier}' not found on Hugging Face Hub or in repo files. Check ID/path or internet connection. Error: {e}")
elif "checkpointsnotfounderror" in error_message_lower or "valueerror: could not find a valid model structure" in error_message_lower:
raise gr.Error(f"No valid diffusers model at '{model_identifier}'. Ensure it's a diffusers format ID/path. Error: {e}")
elif "out of memory" in error_message_lower:
raise gr.Error(f"Out of Memory (OOM) loading model. This Space might not have enough RAM/VRAM for this model. Try a lighter model or select CPU (if available). Error: {e}")
elif "cusolver64" in error_message_lower or "cuda driver version" in error_message_lower or "cuda error" in error_message_lower:
raise gr.Error(f"CUDA/GPU Driver/Installation Error on Space: {e}. Check Space hardware or select CPU.")
elif "safetensors_rust.safetensorserror" in error_message_lower or "oserror: cannot load" in error_message_lower or "filenotfounderror" in error_message_lower:
raise gr.Error(f"Model file error for '{model_identifier}': {e}. Files might be corrupt or incomplete on the Hub/in repo.")
elif "could not import" in error_message_lower or "module not found" in error_message_lower:
raise gr.Error(f"Dependency error: {e}. Ensure required libraries are in requirements.txt.")
else:
raise gr.Error(f"Failed to load model '{model_identifier}': {e}")
# Check if pipeline is successfully loaded before proceeding
if current_pipeline is None:
raise gr.Error("Model failed to load. Cannot generate image.")
# 2. Configure Scheduler
selected_scheduler_class = SCHEDULER_MAP.get(scheduler_name)
if selected_scheduler_class is None:
print(f"Warning: Unknown scheduler '{scheduler_name}'. Using default: {DEFAULT_SCHEDULER}.")
selected_scheduler_class = SCHEDULER_MAP[DEFAULT_SCHEDULER]
gr.Warning(f"Unknown scheduler '{scheduler_name}'. Using default: {DEFAULT_SCHEDULER}.")
# Recreate scheduler from config to ensure compatibility with the loaded pipeline
try:
scheduler_config = current_pipeline.scheduler.config
current_pipeline.scheduler = selected_scheduler_class.from_config(scheduler_config)
print(f"Scheduler set to: {scheduler_name}")
except Exception as e:
print(f"Error setting scheduler '{scheduler_name}': {e}")
# Attempt to fallback to a default if setting fails
try:
print(f"Attempting to fallback to default scheduler: {DEFAULT_SCHEDULER}")
current_pipeline.scheduler = SCHEDULER_MAP[DEFAULT_SCHEDULER].from_config(scheduler_config)
gr.Warning(f"Failed to set scheduler to '{scheduler_name}', fell back to {DEFAULT_SCHEDULER}. Error: {e}")
except Exception as fallback_e:
print(f"Fallback scheduler failed too: {fallback_e}")
raise gr.Error(f"Failed to configure scheduler '{scheduler_name}' and fallback failed. Error: {e}")
# 3. Parse Image Size
width, height = 512, 512 # Default size
if size.lower() == "hire.fix":
width, height = 1024, 1024
print(f"Interpreting 'hire.fix' size as {width}x{height}")
else:
try:
w_str, h_str = size.split('x')
width = int(w_str)
height = int(h_str)
except ValueError:
raise gr.Error(f"Invalid size format: '{size}'. Use 'WidthxHeight' (e.g., 512x512) or 'hire.fix'.")
except Exception as e:
raise gr.Error(f"Error parsing size '{size}': {e}")
# Size multiple check (SD 1.5 works best with multiples of 64 or 8)
multiple_check = 64 # Use 64 as a standard check
if width % multiple_check != 0 or height % multiple_check != 0:
warning_msg_size = (f"Warning: Image size {width}x{height} is not a multiple of {multiple_check}. "
f"Stable Diffusion 1.5 models are typically trained on sizes like 512x512. "
"Using non-standard sizes may cause tiling, distortions, or other artifacts.")
print(warning_msg_size)
gr.Warning(warning_msg_size)
# Optional: Round size to nearest multiple of 64? Not implemented here to preserve user choice.
# 4. Set Seed Generator
generator = None
# The generator device needs to match the pipeline device
generator_device = current_pipeline.device # Must match the pipeline device
if randomize_seed: # Use the randomize_seed checkbox
seed = random.randint(0, MAX_SEED) # Re-randomize seed
print(f"Randomizing seed to: {seed}")
else:
# Use provided seed if not randomizing (-1 will still use it)
print(f"Using provided seed: {int(seed)}")
try:
# Explicitly move generator to the desired device
generator = torch.Generator(device=generator_device).manual_seed(int(seed))
print(f"Generator set with seed {int(seed)} on device: {generator_device}")
except Exception as e:
print(f"Warning: Error setting seed generator on device {generator_device}: {e}. Generation might still proceed with a default generator (potentially on CPU).")
gr.Warning(f"Failed to set seed generator on device {generator_device}. Generation might use a random seed on a different device. Error: {e}")
generator = None # Let pipeline handle random seed if generator creation fails or device mismatch
# 5. Generate Image
# Ensure required parameters are integers/floats
num_inference_steps_int = int(steps)
guidance_scale_float = float(cfg_scale)
# Basic validation on parameters
if num_inference_steps_int <= 0 or guidance_scale_float <= 0:
raise ValueError("Steps and CFG Scale must be positive values.")
if width <= 0 or height <= 0:
raise ValueError("Image width and height must be positive.")
print(f"Generating: Prompt='{prompt[:80]}{'...' if len(prompt) > 80 else ''}', NegPrompt='{negative_prompt[:80]}{'...' if len(negative_prompt) > 80 else ''}', Steps={num_inference_steps_int}, CFG={guidance_scale_float}, Size={width}x{height}, Scheduler={scheduler_name}, Seed={int(seed)}, Device={device_to_use}, Dtype={dtype_to_use}")
start_time = time.time()
try:
# Use the progress parameter from the template
output = current_pipeline(
prompt=prompt,
negative_prompt=negative_prompt if negative_prompt else None,
num_inference_steps=num_inference_steps_int,
guidance_scale=guidance_scale_float,
width=width,
height=height,
generator=generator,
# Pass progress object for tqdm tracking in Gradio
callback_steps=max(1, num_inference_steps_int // 20), # Update progress bar periodically
callback=lambda step, timestep, latents: progress((step / num_inference_steps_int, f"Step {step}/{num_inference_steps_int}")),
# Add VAE usage here if needed for specific models that require it
# vae=...
# Potentially add attention slicing/xformers/etc. for memory efficiency
# enable_attention_slicing="auto", # Can help with VRAM on smaller GPUs
# enable_xformers_memory_efficient_attention() # Needs xformers installed & compatible GPU
)
end_time = time.time()
print(f"Generation finished in {end_time - start_time:.2f} seconds.")
generated_image = output.images[0]
# Return both the image and the seed (potentially randomized)
return generated_image, seed
except gr.Error as e:
# Re-raise Gradio errors directly
raise e
except ValueError as ve:
# Handle specific value errors like invalid parameters
print(f"Parameter Error: {ve}")
raise gr.Error(f"Invalid Parameter: {ve}")
except Exception as e:
# Catch any other unexpected errors during generation
print(f"An error occurred during image generation: {e}")
error_message_lower = str(e).lower()
if "size must be a multiple of" in error_message_lower or "invalid dimensions" in error_message_lower or "shape mismatch" in error_message_lower:
raise gr.Error(f"Image generation failed - Invalid size '{width}x{height}' for model: {e}. Try a multiple of 64 or 8.")
elif "out of memory" in error_message_lower or "cuda out of memory" in error_message_lower:
print("Hint: Try smaller image size, fewer steps, or a model that uses less VRAM.")
raise gr.Error(f"Out of Memory (OOM) during generation. This Space might not have enough VRAM. Try smaller size/steps or select CPU (if available). Error: {e}")
elif "runtimeerror" in error_message_lower:
raise gr.Error(f"Runtime Error during generation: {e}. This could be a model/scheduler incompatibility or other issue.")
elif "device-side assert" in error_message_lower or "cuda error" in error_message_lower:
raise gr.Error(f"CUDA/GPU Error during generation: {e}. Ensure the Space is configured with a GPU and compatible PyTorch.")
elif "expected all tensors to be on the same device" in error_message_lower:
raise gr.Error(f"Device mismatch error during generation: {e}. This is an internal error, please report it.")
else:
# Generic catch-all for unknown errors
raise gr.Error(f"Image generation failed: An unexpected error occurred. {e}")
# --- Gradio Interface ---
# For Spaces, we primarily list Hub models in the dropdown
model_choices = DEFAULT_HUB_MODELS
if not model_choices:
initial_model_choices = ["No models found"]
initial_default_model = "No models found"
model_dropdown_interactive = False
print(f"\n!!! WARNING: No default Hub models listed in script. Model dropdown will be empty. !!!")
else:
initial_model_choices = model_choices
# Set a reasonable default if available
initial_default_model = INITIAL_MODEL_ID if INITIAL_MODEL_ID else "No models found"
model_dropdown_interactive = True if INITIAL_MODEL_ID else False
scheduler_choices = list(SCHEDULER_MAP.keys())
# Use the template's CSS
css = """
#col-container {
margin: 0 auto;
max-width: 640px;
}
"""
with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo: # Added Soft theme from user's script
gr.Markdown(
"""
# CipherCore Stable Diffusion 1.5 Generator
Create images with Stable Diffusion 1.5 using models from Hugging Face Hub.
Choose a model, set your prompts and parameters, and generate!
_Note: 'hire.fix' size option currently generates at 1024x1024._
""" # Removed reference to local checkpoints and "coming soon"
)
# Add a note about model loading time
if INITIAL_MODEL_ID:
gr.Markdown(f"*(Note: The initial model '{INITIAL_MODEL_ID}' is loading... First generation might take longer.)*")
else:
gr.Markdown(f"*(Note: No initial model configured or loaded. Select a model from the dropdown to start.)*")
with gr.Row():
with gr.Column(scale=2): # Give more space to controls
model_dropdown = gr.Dropdown(
choices=initial_model_choices,
value=initial_default_model,
label="Select Model (Hugging Face Hub ID)", # Updated label
interactive=model_dropdown_interactive,
)
device_dropdown = gr.Dropdown(
choices=AVAILABLE_DEVICES,
value=DEFAULT_DEVICE,
label="Processing Device",
interactive=len(AVAILABLE_DEVICES) > 1, # Only make interactive if both CPU and GPU are options
)
prompt_input = gr.Textbox(label="Positive Prompt", placeholder="e.g., a majestic lion in a vibrant jungle, photorealistic", lines=3, autofocus=True) # Autofocus on prompt
negative_prompt_input = gr.Textbox(label="Negative Prompt (Optional)", placeholder="e.g., blurry, low quality, deformed, watermark", lines=2)
with gr.Accordion("Advanced Settings", open=False): # Keep advanced settings initially closed
with gr.Row():
steps_slider = gr.Slider(minimum=5, maximum=150, value=30, label="Inference Steps", step=1)
cfg_slider = gr.Slider(minimum=1.0, maximum=30.0, value=7.5, label="CFG Scale", step=0.1)
with gr.Row():
scheduler_dropdown = gr.Dropdown(
choices=scheduler_choices,
value=DEFAULT_SCHEDULER,
label="Scheduler"
)
size_dropdown = gr.Dropdown(
choices=SUPPORTED_SD15_SIZES,
value="512x512", # SD1.5 default
label="Image Size"
)
# Combine seed input and randomize checkbox
with gr.Row():
seed_input = gr.Number(label="Seed", value=0, precision=0, interactive=True) # Use 0 as default, interactive initially
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed (-1 equivalent)", value=True)
generate_button = gr.Button("✨ Generate Image ✨", variant="primary", scale=1) # Added emojis
with gr.Column(scale=3): # Give more space to image
output_image = gr.Image(
label="Generated Image",
type="pil",
height=768, # Slightly larger preview if possible
width=768, # Match height for square
show_share_button=True,
show_download_button=True,
interactive=False # Output image is not interactive
)
# The template returned the seed, let's add a display for the actual seed used
actual_seed_output = gr.Number(label="Actual Seed Used", precision=0, interactive=False)
# Link button click to generation function
# Use gr.on as in the template
gr.on(
triggers=[generate_button.click, prompt_input.submit], # Also trigger on prompt submit
fn=infer,
inputs=[
model_dropdown,
device_dropdown,
prompt_input,
negative_prompt_input,
steps_slider,
cfg_slider,
scheduler_dropdown,
size_dropdown,
seed_input,
randomize_seed_checkbox, # Pass the checkbox value
],
outputs=[output_image, actual_seed_output], # Return image and the actual seed used
api_name="generate" # Optional: For API access
)
# Add examples from template
# Ensure examples match the input types and order of the infer function
# Examples inputs: [prompt, neg_prompt, seed, randomize_seed, width, height, cfg_scale, steps]
# Note: Size and Scheduler are not easily handled in standard examples, they'll use defaults.
# Let's adjust the example inputs to match the infer function's first few parameters
example_prompts = [
["Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", None, 0, True, "512x512", "Euler", 7.5, 30],
["An astronaut riding a green horse", None, 0, True, "512x512", "Euler", 7.5, 30],
["A delicious ceviche cheesecake slice", None, 0, True, "512x512", "Euler", 7.5, 30],
]
# Update example inputs to match the infer function parameters
# [model_identifier, selected_device_str, prompt, negative_prompt, steps, cfg_scale, scheduler_name, size, seed, randomize_seed]
# Need to add dummy values for model, device, steps, cfg, scheduler, size for examples
# Let's simplify examples to just prompt/neg_prompt for typical template usage
template_examples = [
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
"An astronaut riding a green horse",
"A delicious ceviche cheesecake slice",
]
# Map template examples to the input components: [prompt_input]
gr.Examples(examples=template_examples, inputs=[prompt_input])
# Add some notes/footer from user's script, adapted for Spaces
gr.Markdown(
"""
---
**Usage Notes:**
1. Select a model from the dropdown (Hugging Face Hub ID). Models are downloaded and cached on the Space.
2. Choose your processing device (GPU recommended if available).
3. Enter your positive and optional negative prompts.
4. Adjust advanced settings (Steps, CFG Scale, Scheduler, Size, Seed) if needed. Seed -1 or the "Randomize seed" checkbox will use a random seed.
5. Click "Generate Image".
The first generation with a new model/device might take some time to load.
""" # Removed notes about local models and batch files
)
# --- Launch the App ---
if __name__ == "__main__":
print("\n--- Starting CipherCore Stable Diffusion 1.5 Generator (Hugging Face Spaces) ---")
cuda_status = "CUDA available" if torch.cuda.is_available() else "CUDA not available"
gpu_count_str = f"Found {torch.cuda.device_count()} GPU(s)." if torch.cuda.is_available() else ""
print(f"{cuda_status} {gpu_count_str}")
print(f"Available devices detected by PyTorch: {', '.join(AVAILABLE_DEVICES)}")
print(f"Default device selected by app: {DEFAULT_DEVICE}")
if current_pipeline:
print(f"Initial model '{current_model_id}' loaded successfully.")
else:
print("No initial model loaded. Check model list and network connectivity.")
print("Launching Gradio interface...")
# For Spaces, usually launched directly without launch() parameters in app.py
# Spaces handles the server_name, server_port, share, etc.
# If running locally for testing, uncomment demo.launch()
# demo.launch(show_error=True, inbrowser=True) # Uncomment for local testing
demo.launch() # Standard launch for Hugging Face Spaces
print("Gradio interface closing.")