# -*- coding: utf-8 -*- """ CipherCore SD1.5 Image Generator, FAST CPU INFERENCE Raxephion @2025 """ import gradio as gr import numpy as np # <-- Needed for np.iinfo import random import torch from diffusers import StableDiffusionPipeline # Import commonly used schedulers from diffusers import DDPMScheduler, EulerDiscreteScheduler, DPMSolverMultistepScheduler, LMSDiscreteScheduler import os # Keep os for potential checks, though local paths are less standard on Spaces # import spaces #[uncomment to use ZeroGPU if needed, typically not for standard GPU usage] from PIL import Image import time # Optional: for timing generation # huggingface_hub is implicitly used by from_pretrained # --- Configuration --- # MODELS_DIR is less relevant for Spaces unless specifically placing models in repo # For Spaces, models are primarily loaded via their Hugging Face Hub IDs SUPPORTED_SD15_SIZES = ["512x512", "768x512", "512x768", "768x768", "1024x768", "768x1024", "1024x1024", "hire.fix"] # Mapping of friendly scheduler names to their diffusers classes SCHEDULER_MAP = { "Euler": EulerDiscreteScheduler, "DPM++ 2M": DPMSolverMultistepScheduler, "DDPM": DDPMScheduler, "LMS": LMSDiscreteScheduler, # Add more as needed from diffusers.schedulers (make sure they are imported) } DEFAULT_SCHEDULER = "Euler" # Default scheduler on startup # List of popular Stable Diffusion 1.5 models on the Hugging Face Hub # For Spaces, this is the primary source of models. DEFAULT_HUB_MODELS = [ "runwayml/stable-diffusion-v1-5", "SG161222/Realistic_Vision_V6.0_B1_noVAE", # Example popular 1.5 model "nitrosocke/Ghibli-Diffusion", "danyloylo/sd1.5-ghibli-style-05", "Bilal326/SD_1.5_DragonWarriorV2" # "CompVis/stable-diffusion-v1-4", # Example SD 1.4 model (might behave slightly differently) # Add other diffusers-compatible SD1.5 models here ] # --- Constants for Gradio UI / Generation --- MAX_SEED = np.iinfo(np.int32).max # <-- Added this line back! Defines the maximum seed value # MAX_IMAGE_SIZE = 1024 # This was in the template but not strictly used in the logic outside the slider max, keeping it defined is harmless but maybe not needed # However, let's add MAX_IMAGE_SIZE as it was related to the sliders in the template MAX_IMAGE_SIZE_SLIDER = 1024 # Renamed to avoid confusion, used only for slider max # --- Determine available devices and set up options --- # This logic is from the user's script and works well for Spaces AVAILABLE_DEVICES = ["CPU"] if torch.cuda.is_available(): AVAILABLE_DEVICES.append("GPU") print(f"CUDA available. Found {torch.cuda.device_count()} GPU(s).") if torch.cuda.device_count() > 0: print(f"Using GPU 0: {torch.cuda.get_device_name(0)}") else: print("CUDA not available. Running on CPU.") # Default device preference: GPU if available, else CPU DEFAULT_DEVICE = "GPU" if "GPU" in AVAILABLE_DEVICES else "CPU" # Set initial PyTorch device string based on detection initial_device_to_use = "cuda" if DEFAULT_DEVICE == "GPU" else "cpu" print(f"Initial pipeline will load on device: {initial_device_to_use}") # Determine initial dtype # Note: fp16 is generally faster and uses less VRAM on compatible GPUs initial_dtype_to_use = torch.float32 # Default if initial_device_to_use == "cuda": # Check if the GPU supports fp16 (most modern ones do) if torch.cuda.is_available() and torch.cuda.get_device_capability(0)[0] >= 7: # Check compute capability (7.0+ for good fp16) initial_dtype_to_use = torch.float16 print("Detected GPU supports FP16, will attempt using torch.float16.") else: initial_dtype_to_use = torch.float32 # Fallback print("Detected GPU might not fully support FP16 or capability check failed, using torch.float32.") else: initial_dtype_to_use = torch.float32 # CPU requires float32 print(f"Initial dtype: {initial_dtype_to_use}") # --- Global state for the loaded pipeline --- # We'll load the *initial* pipeline once on startup and keep it in memory. # Subsequent model changes will reload the pipeline within the infer function. current_pipeline = None current_model_id = None # Keep track of the currently loaded model identifier current_device_loaded = None # Keep track of the device the pipeline is currently on # Initial model to load on startup INITIAL_MODEL_ID = DEFAULT_HUB_MODELS[0] if DEFAULT_HUB_MODELS else None if INITIAL_MODEL_ID: print(f"\nLoading initial model '{INITIAL_MODEL_ID}' on startup...") try: # Load the pipeline onto the initial device and dtype current_pipeline = StableDiffusionPipeline.from_pretrained( INITIAL_MODEL_ID, torch_dtype=initial_dtype_to_use, safety_checker=None, # <<< SAFETY CHECKER DISABLED <<< ) current_pipeline = current_pipeline.to(initial_device_to_use) current_model_id = INITIAL_MODEL_ID current_device_loaded = torch.device(initial_device_to_use) print(f"Initial model loaded successfully on {current_device_loaded}.") # Basic check for SD1.x architecture unet_config = getattr(current_pipeline, 'unet', None) if unet_config and hasattr(unet_config, 'config') and hasattr(unet_config.config, 'cross_attention_dim'): cross_attn_dim = unet_config.config.cross_attention_dim if cross_attn_dim != 768: warning_msg = (f"Warning: Loaded model '{INITIAL_MODEL_ID}' might not be a standard SD 1.x model " f"(expected UNet cross_attention_dim 768, found {cross_attn_dim}). " "Results may be unexpected.") print(warning_msg) # gr.Warning(warning_msg) # Cannot raise Gradio error/warning during startup load else: print("UNet cross_attention_dim is 768, consistent with SD 1.x.") else: print("Could not check UNet cross_attention_dim for initial model.") except Exception as e: current_pipeline = None current_model_id = None current_device_loaded = None print(f"Error loading initial model '{INITIAL_MODEL_ID}': {e}") print("Application will start, but image generation may fail if the initial model cannot be loaded.") # Cannot raise gr.Error here as Gradio not fully initialized else: print("\nNo default Hub models defined. Application will start without a loaded model.") print("Please select a model from the dropdown to enable generation.") # --- Image Generation Function (Adapted for Hugging Face Spaces 'infer' signature) --- # @spaces.GPU #[uncomment if using ZeroGPU, otherwise standard torch device handles it] def infer( model_identifier, # From model_dropdown selected_device_str, # From device_dropdown prompt, # From prompt_input negative_prompt, # From negative_prompt_input steps, # From steps_slider cfg_scale, # From cfg_slider scheduler_name, # From scheduler_dropdown size, # From size_dropdown seed, # From seed_input randomize_seed, # From randomize_seed_checkbox progress=gr.Progress(track_tqdm=True), # Added progress argument from template ): """Generates an image using the selected model and parameters on the chosen device.""" global current_pipeline, current_model_id, current_device_loaded, SCHEDULER_MAP, MAX_SEED # <-- Make MAX_SEED global # Check if initial load failed or model is different if current_pipeline is None or current_model_id != model_identifier or (current_device_loaded is not None and str(current_device_loaded) != device_to_use): # This check is done before parameter parsing so we can determine device/dtype for loading # Need to redo some parameter parsing here to get device_to_use early temp_device_to_use = "cuda" if selected_device_str == "GPU" and "GPU" in AVAILABLE_DEVICES else "cpu" temp_dtype_to_use = torch.float32 if temp_device_to_use == "cuda": if torch.cuda.is_available() and torch.cuda.get_device_capability(0)[0] >= 7: temp_dtype_to_use = torch.float16 else: temp_dtype_to_use = torch.float32 else: temp_dtype_to_use = torch.float32 # Now proceed with actual model loading based on parsed device/dtype print(f"Loading model: {model_identifier} onto {temp_device_to_use} with dtype {temp_dtype_to_use}...") # Clear previous pipeline to potentially free memory *before* loading the new one if current_pipeline is not None: print(f"Unloading previous model '{current_model_id}' from {current_device_loaded}...") if str(current_device_loaded) == "cuda": try: current_pipeline.to("cpu") print("Moved previous pipeline to CPU.") except Exception as move_e: print(f"Warning: Failed to move previous pipeline to CPU: {move_e}") del current_pipeline current_pipeline = None # Set to None immediately if str(current_device_loaded) == "cuda": try: torch.cuda.empty_cache() print("Cleared CUDA cache.") except Exception as cache_e: print(f"Warning: Error clearing CUDA cache: {cache_e}") # Ensure the device is actually available if not CPU (redundant with earlier check but safe) if temp_device_to_use == "cuda": if not torch.cuda.is_available(): raise gr.Error("CUDA selected but not available to PyTorch on this Space. Please select CPU or ensure the Space is configured with a GPU and the CUDA version of PyTorch is installed.") try: pipeline = StableDiffusionPipeline.from_pretrained( model_identifier, torch_dtype=temp_dtype_to_use, # Use the determined dtype for loading safety_checker=None, ) pipeline = pipeline.to(temp_device_to_use) # Use the determined device current_pipeline = pipeline current_model_id = model_identifier current_device_loaded = torch.device(temp_device_to_use) # Store the actual device object # Basic check for SD1.x architecture unet_config = getattr(pipeline, 'unet', None) if unet_config and hasattr(unet_config, 'config') and hasattr(unet_config.config, 'cross_attention_dim'): cross_attn_dim = unet_config.config.cross_attention_dim if cross_attn_dim != 768: warning_msg = (f"Warning: Loaded model '{model_identifier}' might not be a standard SD 1.x model " f"(expected UNet cross_attention_dim 768, found {cross_attn_dim}). " "Results may be unexpected or generation might fail.") print(warning_msg) gr.Warning(warning_msg) else: print("UNet cross_attention_dim is 768, consistent with SD 1.x.") else: print("Could not check UNet cross_attention_dim.") print(f"Model '{model_identifier}' loaded successfully on {current_device_loaded} with dtype {temp_dtype_to_use}.") except Exception as e: current_pipeline = None current_model_id = None current_device_loaded = None print(f"Error loading model '{model_identifier}': {e}") error_message_lower = str(e).lower() if "cannot find requested files" in error_message_lower or "404 client error" in error_message_lower or "no such file or directory" in error_message_lower: raise gr.Error(f"Model '{model_identifier}' not found on Hugging Face Hub or in repo files. Check ID/path or internet connection. Error: {e}") elif "checkpointsnotfounderror" in error_message_lower or "valueerror: could not find a valid model structure" in error_message_lower: raise gr.Error(f"No valid diffusers model at '{model_identifier}'. Ensure it's a diffusers format ID/path. Error: {e}") elif "out of memory" in error_message_lower: raise gr.Error(f"Out of Memory (OOM) loading model. This Space might not have enough RAM/VRAM for this model. Try a lighter model or select CPU (if available). Error: {e}") elif "cusolver64" in error_message_lower or "cuda driver version" in error_message_lower or "cuda error" in error_message_lower: raise gr.Error(f"CUDA/GPU Driver/Installation Error on Space: {e}. Check Space hardware or select CPU.") elif "safetensors_rust.safetensorserror" in error_message_lower or "oserror: cannot load" in error_message_lower or "filenotfounderror" in error_message_lower: raise gr.Error(f"Model file error for '{model_identifier}': {e}. Files might be corrupt or incomplete on the Hub/in repo.") elif "could not import" in error_message_lower or "module not found" in error_message_lower: raise gr.Error(f"Dependency error: {e}. Ensure required libraries are in requirements.txt.") else: raise gr.Error(f"Failed to load model '{model_identifier}': {e}") # Re-determine device_to_use and dtype_to_use *after* ensuring pipeline is loaded # They should match current_device_loaded and the pipeline's dtype device_to_use = str(current_pipeline.device) if current_pipeline else ("cuda" if selected_device_str == "GPU" and "GPU" in AVAILABLE_DEVICES else "cpu") dtype_to_use = current_pipeline.dtype if current_pipeline else torch.float32 # Fallback if somehow pipeline is still None # Check if pipeline is successfully loaded before proceeding if current_pipeline is None: raise gr.Error("Model failed to load. Cannot generate image.") # 2. Configure Scheduler selected_scheduler_class = SCHEDULER_MAP.get(scheduler_name) if selected_scheduler_class is None: print(f"Warning: Unknown scheduler '{scheduler_name}'. Using default: {DEFAULT_SCHEDULER}.") selected_scheduler_class = SCHEDULER_MAP[DEFAULT_SCHEDULER] gr.Warning(f"Unknown scheduler '{scheduler_name}'. Using default: {DEFAULT_SCHEDULER}.") # Recreate scheduler from config to ensure compatibility with the loaded pipeline try: scheduler_config = current_pipeline.scheduler.config current_pipeline.scheduler = selected_scheduler_class.from_config(scheduler_config) print(f"Scheduler set to: {scheduler_name}") except Exception as e: print(f"Error setting scheduler '{scheduler_name}': {e}") # Attempt to fallback to a default if setting fails try: print(f"Attempting to fallback to default scheduler: {DEFAULT_SCHEDULER}") current_pipeline.scheduler = SCHEDULER_MAP[DEFAULT_SCHEDULER].from_config(scheduler_config) gr.Warning(f"Failed to set scheduler to '{scheduler_name}', fell back to {DEFAULT_SCHEDULER}. Error: {e}") except Exception as fallback_e: print(f"Fallback scheduler failed too: {fallback_e}") raise gr.Error(f"Failed to configure scheduler '{scheduler_name}' and fallback failed. Error: {e}") # 3. Parse Image Size width, height = 512, 512 # Default size if size.lower() == "hire.fix": width, height = 1024, 1024 print(f"Interpreting 'hire.fix' size as {width}x{height}") else: try: w_str, h_str = size.split('x') width = int(w_str) height = int(h_str) except ValueError: raise gr.Error(f"Invalid size format: '{size}'. Use 'WidthxHeight' (e.g., 512x512) or 'hire.fix'.") except Exception as e: raise gr.Error(f"Error parsing size '{size}': {e}") # Size multiple check (SD 1.5 works best with multiples of 64 or 8) multiple_check = 64 # Use 64 as a standard check if width % multiple_check != 0 or height % multiple_check != 0: warning_msg_size = (f"Warning: Image size {width}x{height} is not a multiple of {multiple_check}. " f"Stable Diffusion 1.5 models are typically trained on sizes like 512x512. " "Using non-standard sizes may cause tiling, distortions, or other artifacts.") print(warning_msg_size) gr.Warning(warning_msg_size) # Optional: Round size to nearest multiple of 64? Not implemented here to preserve user choice. # 4. Set Seed Generator # The generator device needs to match the pipeline device generator_device = current_pipeline.device # Must match the pipeline device if randomize_seed: # Use the randomize_seed checkbox seed = random.randint(0, MAX_SEED) # Re-randomize seed using MAX_SEED print(f"Randomizing seed to: {seed}") else: # Use provided seed if not randomizing (-1 will still use it) # If seed is -1 but randomize_seed is False, we should still use a random seed, # but make it deterministic based on system time or similar if possible, or just MAX_SEED based random. # Given the UI has a 'randomize_seed' checkbox, let's treat seed=-1 as a specific "use system random" signal IF randomize_seed is also False. # Or, more simply, if randomize_seed is False, always use the value in the seed input. # Let's stick to the simpler interpretation: if randomize_seed is True, override seed input. If False, use seed input value. print(f"Using provided seed: {int(seed)}") try: # Ensure seed is an integer for the generator seed_int = int(seed) # Explicitly move generator to the desired device generator = torch.Generator(device=generator_device).manual_seed(seed_int) print(f"Generator set with seed {seed_int} on device: {generator_device}") except Exception as e: # Handle potential issues like non-integer seed input print(f"Warning: Error setting seed generator with seed {seed} on device {generator_device}: {e}. Falling back to default generator (potentially on CPU) and using a potentially different seed.") gr.Warning(f"Failed to set seed generator with seed {seed}. Using random seed. Error: {e}") generator = None # Let pipeline handle random seed if generator creation fails or device mismatch # If generator creation failed, the actual seed used might be different. How to report it? # The simplest is to let the pipeline run and report the seed *we attempted to use*. # Or, if we know pipeline will use a random seed on failure, report that. # For simplicity, let's report the seed_int we derived UNLESS randomize_seed was True, then report the generated random seed. # No, the most accurate is to let the pipeline generate, and some diffusers versions return the generator state/seed. # However, the standard pipeline output doesn't return the seed. # Let's just report the seed we *tried* to use. If randomize_seed was True, it's the random one. If False, it's the user input one. pass # Keep the last calculated seed_int or seed value # 5. Generate Image # Ensure required parameters are integers/floats num_inference_steps_int = int(steps) guidance_scale_float = float(cfg_scale) # Basic validation on parameters if num_inference_steps_int <= 0 or guidance_scale_float <= 0: raise ValueError("Steps and CFG Scale must be positive values.") if width <= 0 or height <= 0: raise ValueError("Image width and height must be positive.") print(f"Generating: Prompt='{prompt[:80]}{'...' if len(prompt) > 80 else ''}', NegPrompt='{negative_prompt[:80]}{'...' if len(negative_prompt) > 80 else ''}', Steps={num_inference_steps_int}, CFG={guidance_scale_float}, Size={width}x{height}, Scheduler={scheduler_name}, Seed={seed_int if generator else 'Random (Generator Failed)'}, Device={device_to_use}, Dtype={dtype_to_use}") start_time = time.time() try: # Use the progress parameter from the template output = current_pipeline( prompt=prompt, negative_prompt=negative_prompt if negative_prompt else None, num_inference_steps=num_inference_steps_int, guidance_scale=guidance_scale_float, width=width, height=height, generator=generator, # Pass the generator (which might be None) # Pass progress object for tqdm tracking in Gradio callback_steps=max(1, num_inference_steps_int // 20), # Update progress bar periodically callback=lambda step, timestep, latents: progress((step / num_inference_steps_int, f"Step {step}/{num_inference_steps_int}")), # Add VAE usage here if needed for specific models that require it # vae=... # Potentially add attention slicing/xformers/etc. for memory efficiency # enable_attention_slicing="auto", # Can help with VRAM on smaller GPUs # enable_xformers_memory_efficient_attention() # Needs xformers installed & compatible GPU ) end_time = time.time() print(f"Generation finished in {end_time - start_time:.2f} seconds.") generated_image = output.images[0] # Determine the seed to return: the one we attempted to use actual_seed_used = seed_int if generator else -1 # Return -1 or the input seed if generator failed # Return both the image and the seed (potentially randomized) return generated_image, actual_seed_used except gr.Error as e: # Re-raise Gradio errors directly raise e except ValueError as ve: # Handle specific value errors like invalid parameters print(f"Parameter Error: {ve}") raise gr.Error(f"Invalid Parameter: {ve}") except Exception as e: # Catch any other unexpected errors during generation print(f"An error occurred during image generation: {e}") error_message_lower = str(e).lower() if "size must be a multiple of" in error_message_lower or "invalid dimensions" in error_message_lower or "shape mismatch" in error_message_lower: raise gr.Error(f"Image generation failed - Invalid size '{width}x{height}' for model: {e}. Try a multiple of 64 or 8.") elif "out of memory" in error_message_lower or "cuda out of memory" in error_message_lower: print("Hint: Try smaller image size, fewer steps, or a model that uses less VRAM.") raise gr.Error(f"Out of Memory (OOM) during generation. This Space might not have enough VRAM. Try smaller size/steps or select CPU (if available). Error: {e}") elif "runtimeerror" in error_message_lower: raise gr.Error(f"Runtime Error during generation: {e}. This could be a model/scheduler incompatibility or other issue.") elif "device-side assert" in error_message_lower or "cuda error" in error_message_lower: raise gr.Error(f"CUDA/GPU Error during generation: {e}. Ensure the Space is configured with a GPU and compatible PyTorch.") elif "expected all tensors to be on the same device" in error_message_lower: raise gr.Error(f"Device mismatch error during generation: {e}. This is an internal error, please report it.") else: # Generic catch-all for unknown errors raise gr.Error(f"Image generation failed: An unexpected error occurred. {e}") # --- Gradio Interface --- # For Spaces, we primarily list Hub models in the dropdown model_choices = DEFAULT_HUB_MODELS if not model_choices: initial_model_choices = ["No models found"] initial_default_model = "No models found" model_dropdown_interactive = False print(f"\n!!! WARNING: No default Hub models listed in script. Model dropdown will be empty. !!!") else: initial_model_choices = model_choices # Set a reasonable default if available initial_default_model = INITIAL_MODEL_ID if INITIAL_MODEL_ID else "No models found" model_dropdown_interactive = True # Make it interactive if there's *any* model choice # Ensure the initial default model is actually in the choices list if possible if initial_default_model != "No models found" and initial_default_model not in initial_model_choices: print(f"Warning: Initial default model '{initial_default_model}' is not in the model_choices list.") if initial_model_choices and initial_model_choices[0] != "No models found": initial_default_model = initial_model_choices[0] print(f"Setting default model to first available choice: {initial_default_model}") else: initial_default_model = "No models found" # Fallback if no choices scheduler_choices = list(SCHEDULER_MAP.keys()) # Use the template's CSS css = """ #col-container { margin: 0 auto; max-width: 640px; } """ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo: # Added Soft theme from user's script gr.Markdown( """ # CipherCore Stable Diffusion 1.5 Generator Create images with Stable Diffusion 1.5 using models from Hugging Face Hub. Choose a model, set your prompts and parameters, and generate! _Note: 'hire.fix' size option currently generates at 1024x1024._ """ # Removed reference to local checkpoints and "coming soon" ) # Add a note about model loading time if INITIAL_MODEL_ID: gr.Markdown(f"*(Note: The initial model '{INITIAL_MODEL_ID}' is loading... First generation might take longer.)*") elif initial_default_model != "No models found": gr.Markdown(f"*(Note: Loading model '{initial_default_model}' on first generation... This might take some time.)*") else: gr.Markdown(f"*(Note: No models available. Add Hub IDs to DEFAULT_HUB_MODELS in the script.)*") with gr.Row(): with gr.Column(scale=2): # Give more space to controls model_dropdown = gr.Dropdown( choices=initial_model_choices, value=initial_default_model, label="Select Model (Hugging Face Hub ID)", # Updated label interactive=model_dropdown_interactive, ) device_dropdown = gr.Dropdown( choices=AVAILABLE_DEVICES, value=DEFAULT_DEVICE, label="Processing Device", interactive=len(AVAILABLE_DEVICES) > 1, # Only make interactive if both CPU and GPU are options ) prompt_input = gr.Textbox(label="Positive Prompt", placeholder="e.g., a majestic lion in a vibrant jungle, photorealistic", lines=3, autofocus=True) # Autofocus on prompt negative_prompt_input = gr.Textbox(label="Negative Prompt (Optional)", placeholder="e.g., blurry, low quality, deformed, watermark", lines=2) with gr.Accordion("Advanced Settings", open=False): # Keep advanced settings initially closed with gr.Row(): steps_slider = gr.Slider(minimum=5, maximum=150, value=30, label="Inference Steps", step=1) cfg_slider = gr.Slider(minimum=1.0, maximum=30.0, value=7.5, label="CFG Scale", step=0.1) with gr.Row(): scheduler_dropdown = gr.Dropdown( choices=scheduler_choices, value=DEFAULT_SCHEDULER, label="Scheduler" ) size_dropdown = gr.Dropdown( choices=SUPPORTED_SD15_SIZES, value="512x512", # SD1.5 default label="Image Size" ) # Combine seed input and randomize checkbox with gr.Row(): # Use MAX_SEED for slider max seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, precision=0, interactive=True) # Use 0 as default, interactive initially randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True) # Simplified label generate_button = gr.Button("✨ Generate Image ✨", variant="primary", scale=1) # Added emojis with gr.Column(scale=3): # Give more space to image output_image = gr.Image( label="Generated Image", type="pil", height=768, # Slightly larger preview if possible width=768, # Match height for square show_share_button=True, show_download_button=True, interactive=False # Output image is not interactive ) # The template returned the seed, let's add a display for the actual seed used actual_seed_output = gr.Number(label="Actual Seed Used", precision=0, interactive=False) # Link button click to generation function # Use gr.on as in the template gr.on( triggers=[generate_button.click, prompt_input.submit], # Also trigger on prompt submit fn=infer, inputs=[ model_dropdown, device_dropdown, prompt_input, negative_prompt_input, steps_slider, cfg_slider, scheduler_dropdown, size_dropdown, seed_input, randomize_seed_checkbox, # Pass the checkbox value ], outputs=[output_image, actual_seed_output], # Return image and the actual seed used api_name="generate" # Optional: For API access ) # Add examples from template # Ensure examples map to the correct input component indices template_examples = [ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", "An astronaut riding a green horse", "A delicious ceviche cheesecake slice", ] # Examples will only populate the first input they match. # In this case, passing just a list of strings populates the first Textbox: prompt_input. gr.Examples(examples=template_examples, inputs=[prompt_input]) # Add some notes/footer from user's script, adapted for Spaces gr.Markdown( """ --- **Usage Notes:** 1. Select a model from the dropdown (Hugging Face Hub ID). Models are downloaded and cached on the Space. 2. Choose your processing device (GPU recommended if available). 3. Enter your positive and optional negative prompts. 4. Adjust advanced settings (Steps, CFG Scale, Scheduler, Size, Seed) if needed. The "Randomize seed" checkbox will override the seed value in the input box. 5. Click "Generate Image". The first generation with a new model/device might take some time to load. """ # Removed notes about local models and batch files ) # --- Launch the App --- if __name__ == "__main__": print("\n--- Starting CipherCore Stable Diffusion 1.5 Generator (Hugging Face Spaces) ---") cuda_status = "CUDA available" if torch.cuda.is_available() else "CUDA not available" gpu_count_str = f"Found {torch.cuda.device_count()} GPU(s)." if torch.cuda.is_available() else "" print(f"{cuda_status} {gpu_count_str}") print(f"Available devices detected by PyTorch: {', '.join(AVAILABLE_DEVICES)}") print(f"Default device selected by app: {DEFAULT_DEVICE}") if current_pipeline: print(f"Initial model '{current_model_id}' loaded successfully on {current_device_loaded}.") else: print("No initial model loaded. Check model list and network connectivity.") print("Launching Gradio interface...") # For Spaces, usually launched directly without launch() parameters in app.py # Spaces handles the server_name, server_port, share, etc. # If running locally for testing, uncomment demo.launch() # demo.launch(show_error=True, inbrowser=True) # Uncomment for local testing demo.launch() # Standard launch for Hugging Face Spaces print("Gradio interface closing.")