|
|
|
""" |
|
Hugging Face Spaces Script for Basic Stable Diffusion 1.5 Gradio App |
|
Adapted from user's local script and HF Spaces template. |
|
Supports Hub models and CPU/GPU selection based on available hardware. |
|
Includes Attention Slicing optimization toggle. |
|
Corrected progress callback format. |
|
""" |
|
|
|
import gradio as gr |
|
import numpy as np |
|
import random |
|
import torch |
|
from diffusers import StableDiffusionPipeline |
|
|
|
from diffusers import DDPMScheduler, EulerDiscreteScheduler, DPMSolverMultistepScheduler, LMSDiscreteScheduler |
|
import os |
|
|
|
from PIL import Image |
|
import time |
|
|
|
|
|
|
|
|
|
|
|
SUPPORTED_SD15_SIZES = ["512x512", "768x512", "512x768", "768x768", "1024x768", "768x1024", "1024x1024", "hire.fix"] |
|
|
|
|
|
SCHEDULER_MAP = { |
|
"Euler": EulerDiscreteScheduler, |
|
"DPM++ 2M": DPMSolverMultistepScheduler, |
|
"DDPM": DDPMScheduler, |
|
"LMS": LMSDiscreteScheduler, |
|
|
|
} |
|
DEFAULT_SCHEDULER = "Euler" |
|
|
|
|
|
|
|
DEFAULT_HUB_MODELS = [ |
|
"Raxephion/Typhoon-SD1.5-V1", |
|
"Yntec/RevAnimatedV2Rebirth", |
|
"stablediffusionapi/realcartoon-anime-v11" |
|
|
|
|
|
] |
|
|
|
|
|
MAX_SEED = np.iinfo(np.int32).max |
|
|
|
|
|
|
|
|
|
AVAILABLE_DEVICES = ["CPU"] |
|
if torch.cuda.is_available(): |
|
AVAILABLE_DEVICES.append("GPU") |
|
print(f"CUDA available. Found {torch.cuda.device_count()} GPU(s).") |
|
if torch.cuda.device_count() > 0: |
|
print(f"Using GPU 0: {torch.cuda.get_device_name(0)}") |
|
else: |
|
print("CUDA not available. Running on CPU.") |
|
|
|
|
|
DEFAULT_DEVICE = "GPU" if "GPU" in AVAILABLE_DEVICES else "CPU" |
|
|
|
|
|
initial_device_to_use = "cuda" if DEFAULT_DEVICE == "GPU" else "cpu" |
|
print(f"Initial pipeline will load on device: {initial_device_to_use}") |
|
|
|
|
|
|
|
initial_dtype_to_use = torch.float32 |
|
if initial_device_to_use == "cuda": |
|
|
|
if torch.cuda.is_available() and torch.cuda.get_device_capability(0)[0] >= 7: |
|
initial_dtype_to_use = torch.float16 |
|
print("Detected GPU supports FP16, will attempt using torch.float16.") |
|
else: |
|
initial_dtype_to_use = torch.float32 |
|
print("Detected GPU might not fully support FP16 or capability check failed, using torch.float32.") |
|
else: |
|
initial_dtype_to_use = torch.float32 |
|
|
|
print(f"Initial dtype: {initial_dtype_to_use}") |
|
|
|
|
|
|
|
|
|
|
|
current_pipeline = None |
|
current_model_id = None |
|
current_device_loaded = None |
|
|
|
|
|
INITIAL_MODEL_ID = DEFAULT_HUB_MODELS[0] if DEFAULT_HUB_MODELS else None |
|
if INITIAL_MODEL_ID: |
|
print(f"\nLoading initial model '{INITIAL_MODEL_ID}' on startup...") |
|
try: |
|
|
|
pipeline = StableDiffusionPipeline.from_pretrained( |
|
INITIAL_MODEL_ID, |
|
torch_dtype=initial_dtype_to_use, |
|
safety_checker=None, |
|
) |
|
|
|
|
|
|
|
if initial_device_to_use == "cuda": |
|
try: |
|
pipeline.enable_attention_slicing() |
|
print("Attention Slicing enabled during initial load.") |
|
except Exception as e: |
|
print(f"Warning: Failed to enable Attention Slicing during initial load: {e}") |
|
|
|
|
|
|
|
pipeline = pipeline.to(initial_device_to_use) |
|
|
|
current_pipeline = pipeline |
|
current_model_id = INITIAL_MODEL_ID |
|
current_device_loaded = torch.device(initial_device_to_use) |
|
print(f"Initial model loaded successfully on {current_device_loaded}.") |
|
|
|
|
|
unet_config = getattr(current_pipeline, 'unet', None) |
|
if unet_config and hasattr(unet_config, 'config') and hasattr(unet_config.config, 'cross_attention_dim'): |
|
cross_attn_dim = unet_config.config.cross_attention_dim |
|
if cross_attn_dim != 768: |
|
warning_msg = (f"Warning: Loaded model '{INITIAL_MODEL_ID}' might not be a standard SD 1.x model " |
|
f"(expected UNet cross_attention_dim 768, found {cross_attn_dim}). " |
|
"Results may be unexpected.") |
|
print(warning_msg) |
|
|
|
else: |
|
print("UNet cross_attention_dim is 768, consistent with SD 1.x.") |
|
else: |
|
print("Could not check UNet cross_attention_dim for initial model.") |
|
|
|
|
|
except Exception as e: |
|
current_pipeline = None |
|
current_model_id = None |
|
current_device_loaded = None |
|
print(f"Error loading initial model '{INITIAL_MODEL_ID}': {e}") |
|
print("Application will start, but image generation may fail if the initial model cannot be loaded.") |
|
|
|
else: |
|
print("\nNo default Hub models defined. Application will start without a loaded model.") |
|
print("Please select a model from the dropdown to enable generation.") |
|
|
|
|
|
|
|
|
|
def infer( |
|
model_identifier, |
|
selected_device_str, |
|
prompt, |
|
negative_prompt, |
|
steps, |
|
cfg_scale, |
|
scheduler_name, |
|
size, |
|
seed, |
|
randomize_seed, |
|
enable_attention_slicing, |
|
progress=gr.Progress(), |
|
): |
|
"""Generates an image using the selected model and parameters on the chosen device.""" |
|
global current_pipeline, current_model_id, current_device_loaded, SCHEDULER_MAP, MAX_SEED |
|
|
|
|
|
|
|
temp_device_to_use = "cuda" if selected_device_str == "GPU" and "GPU" in AVAILABLE_DEVICES else "cpu" |
|
temp_dtype_to_use = torch.float32 |
|
if temp_device_to_use == "cuda": |
|
if torch.cuda.is_available() and torch.cuda.get_device_capability(0)[0] >= 7: |
|
temp_dtype_to_use = torch.float16 |
|
else: |
|
temp_dtype_to_use = torch.float32 |
|
else: |
|
temp_dtype_to_use = torch.float32 |
|
|
|
|
|
|
|
if current_pipeline is None or current_model_id != model_identifier or (current_device_loaded is not None and str(current_device_loaded) != temp_device_to_use): |
|
|
|
print(f"Loading model: {model_identifier} onto {temp_device_to_use} with dtype {temp_dtype_to_use}...") |
|
|
|
if current_pipeline is not None: |
|
print(f"Unloading previous model '{current_model_id}' from {current_device_loaded}...") |
|
if str(current_device_loaded) == "cuda": |
|
try: |
|
current_pipeline.to("cpu") |
|
print("Moved previous pipeline to CPU.") |
|
except Exception as move_e: |
|
print(f"Warning: Failed to move previous pipeline to CPU: {move_e}") |
|
del current_pipeline |
|
current_pipeline = None |
|
|
|
if str(current_device_loaded) == "cuda": |
|
try: |
|
torch.cuda.empty_cache() |
|
print("Cleared CUDA cache.") |
|
except Exception as cache_e: |
|
print(f"Warning: Error clearing CUDA cache: {cache_e}") |
|
|
|
|
|
if temp_device_to_use == "cuda": |
|
if not torch.cuda.is_available(): |
|
raise gr.Error("GPU selected but CUDA is not available to PyTorch on this Space. Please select CPU or ensure the Space is configured with a GPU and the CUDA version of PyTorch is installed.") |
|
|
|
try: |
|
pipeline = StableDiffusionPipeline.from_pretrained( |
|
model_identifier, |
|
torch_dtype=temp_dtype_to_use, |
|
safety_checker=None, |
|
) |
|
|
|
|
|
if enable_attention_slicing and temp_device_to_use == "cuda": |
|
try: |
|
pipeline.enable_attention_slicing() |
|
print("Attention Slicing enabled during model load.") |
|
except Exception as e: |
|
print(f"Warning: Failed to enable Attention Slicing: {e}") |
|
gr.Warning(f"Failed to enable Attention Slicing. Error: {e}") |
|
else: |
|
try: |
|
pipeline.disable_attention_slicing() |
|
|
|
except Exception as e: |
|
|
|
pass |
|
|
|
|
|
pipeline = pipeline.to(temp_device_to_use) |
|
|
|
current_pipeline = pipeline |
|
current_model_id = model_identifier |
|
current_device_loaded = torch.device(temp_device_to_use) |
|
|
|
|
|
unet_config = getattr(pipeline, 'unet', None) |
|
if unet_config and hasattr(unet_config, 'config') and hasattr(unet_config.config, 'cross_attention_dim'): |
|
cross_attn_dim = unet_config.config.cross_attention_dim |
|
if cross_attn_dim != 768: |
|
warning_msg = (f"Warning: Loaded model '{model_identifier}' might not be a standard SD 1.x model " |
|
f"(expected UNet cross_attention_dim 768, found {cross_attn_dim}). " |
|
"Results may be unexpected or generation might fail.") |
|
print(warning_msg) |
|
gr.Warning(warning_msg) |
|
else: |
|
print("UNet cross_attention_dim is 768, consistent with SD 1.x.") |
|
else: |
|
print("Could not check UNet cross_attention_dim.") |
|
|
|
print(f"Model '{model_identifier}' loaded successfully on {current_device_loaded} with dtype {temp_dtype_to_use}.") |
|
|
|
except Exception as e: |
|
current_pipeline = None |
|
current_model_id = None |
|
current_device_loaded = None |
|
print(f"Error loading model '{model_identifier}': {e}") |
|
error_message_lower = str(e).lower() |
|
if "cannot find requested files" in error_message_lower or "404 client error" in error_message_lower or "no such file or directory" in error_message_lower: |
|
raise gr.Error(f"Model '{model_identifier}' not found on Hugging Face Hub or in repo files. Check ID/path or internet connection. Error: {e}") |
|
elif "checkpointsnotfounderror" in error_message_lower or "valueerror: could not find a valid model structure" in error_message_lower: |
|
raise gr.Error(f"No valid diffusers model at '{model_identifier}'. Ensure it's a diffusers format ID/path. Error: {e}") |
|
elif "out of memory" in error_message_lower: |
|
raise gr.Error(f"Out of Memory (OOM) loading model. This Space might not have enough RAM/VRAM for this model. Try a lighter model or select CPU (if available). Error: {e}") |
|
elif "cusolver64" in error_message_lower or "cuda driver version" in error_message_lower or "cuda error" in error_message_lower: |
|
raise gr.Error(f"CUDA/GPU Driver/Installation Error on Space: {e}. Check Space hardware or select CPU.") |
|
elif "safetensors_rust.safetensorserror" in error_message_lower or "oserror: cannot load" in error_message_lower or "filenotfounderror" in error_message_lower: |
|
raise gr.Error(f"Model file error for '{model_identifier}': {e}. Files might be corrupt or incomplete on the Hub/in repo.") |
|
elif "could not import" in error_message_lower or "module not found" in error_message_lower: |
|
raise gr.Error(f"Dependency error: {e}. Ensure required libraries are in requirements.txt.") |
|
else: |
|
raise gr.Error(f"Failed to load model '{model_identifier}': {e}") |
|
|
|
|
|
|
|
|
|
|
|
device_to_use = str(current_pipeline.device) if current_pipeline else ("cuda" if selected_device_str == "GPU" and "GPU" in AVAILABLE_DEVICES else "cpu") |
|
dtype_to_use = current_pipeline.dtype if current_pipeline else torch.float32 |
|
|
|
|
|
|
|
if current_pipeline is None: |
|
raise gr.Error("Model failed to load during setup or switching. Cannot generate image.") |
|
|
|
|
|
|
|
|
|
|
|
if str(current_pipeline.device) == "cuda": |
|
if enable_attention_slicing: |
|
try: |
|
current_pipeline.enable_attention_slicing() |
|
|
|
except Exception as e: |
|
print(f"Warning: Failed to enable Attention Slicing before generation: {e}") |
|
gr.Warning(f"Failed to enable Attention Slicing. Error: {e}") |
|
else: |
|
try: |
|
current_pipeline.disable_attention_slicing() |
|
|
|
except Exception as e: |
|
|
|
pass |
|
else: |
|
try: |
|
current_pipeline.disable_attention_slicing() |
|
except Exception as e: |
|
pass |
|
|
|
|
|
|
|
selected_scheduler_class = SCHEDULER_MAP.get(scheduler_name) |
|
if selected_scheduler_class is None: |
|
print(f"Warning: Unknown scheduler '{scheduler_name}'. Using default: {DEFAULT_SCHEDULER}.") |
|
selected_scheduler_class = SCHEDULER_MAP[DEFAULT_SCHEDULER] |
|
gr.Warning(f"Unknown scheduler '{scheduler_name}'. Using default: {DEFAULT_SCHEDULER}.") |
|
|
|
|
|
try: |
|
scheduler_config = current_pipeline.scheduler.config |
|
current_pipeline.scheduler = selected_scheduler_class.from_config(scheduler_config) |
|
print(f"Scheduler set to: {scheduler_name}") |
|
except Exception as e: |
|
print(f"Error setting scheduler '{scheduler_name}': {e}") |
|
|
|
try: |
|
print(f"Attempting to fallback to default scheduler: {DEFAULT_SCHEDULER}") |
|
current_pipeline.scheduler = SCHEDULER_MAP[DEFAULT_SCHEDULER].from_config(scheduler_config) |
|
gr.Warning(f"Failed to set scheduler to '{scheduler_name}', fell back to {DEFAULT_SCHEDULER}. Error: {e}") |
|
except Exception as fallback_e: |
|
print(f"Fallback scheduler failed too: {fallback_e}") |
|
raise gr.Error(f"Failed to configure scheduler '{scheduler_name}' and fallback failed. Error: {e}") |
|
|
|
|
|
|
|
width, height = 512, 512 |
|
if size.lower() == "hire.fix": |
|
width, height = 1024, 1024 |
|
print(f"Interpreting 'hire.fix' size as {width}x{height}") |
|
else: |
|
try: |
|
w_str, h_str = size.split('x') |
|
width = int(w_str) |
|
height = int(h_str) |
|
except ValueError: |
|
raise gr.Error(f"Invalid size format: '{size}'. Use 'WidthxHeight' (e.g., 512x512) or 'hire.fix'.") |
|
except Exception as e: |
|
raise gr.Error(f"Error parsing size '{size}': {e}") |
|
|
|
|
|
multiple_check = 64 |
|
if width % multiple_check != 0 or height % multiple_check != 0: |
|
warning_msg_size = (f"Warning: Image size {width}x{height} is not a multiple of {multiple_check}. " |
|
f"Stable Diffusion 1.5 models are typically trained on sizes like 512x512. " |
|
"Using non-standard sizes may cause tiling, distortions, or other artifacts.") |
|
print(warning_msg_size) |
|
gr.Warning(warning_msg_size) |
|
|
|
|
|
|
|
|
|
|
|
generator_device = current_pipeline.device |
|
|
|
seed_int = 0 |
|
if randomize_seed: |
|
seed_int = random.randint(0, MAX_SEED) |
|
print(f"Randomizing seed to: {seed_int}") |
|
else: |
|
|
|
try: |
|
seed_int = int(seed) |
|
print(f"Using provided seed: {seed_int}") |
|
except ValueError: |
|
print(f"Warning: Invalid seed input '{seed}'. Using random seed instead.") |
|
gr.Warning(f"Invalid seed input '{seed}'. Using random seed instead.") |
|
seed_int = random.randint(0, MAX_SEED) |
|
|
|
|
|
|
|
generator = None |
|
try: |
|
|
|
generator = torch.Generator(device=generator_device).manual_seed(seed_int) |
|
print(f"Generator set with seed {seed_int} on device: {generator_device}") |
|
except Exception as e: |
|
print(f"Warning: Error setting seed generator with seed {seed_int} on device {generator_device}: {e}. Falling back to default generator (potentially on CPU) or system random.") |
|
gr.Warning(f"Failed to set seed generator with seed {seed_int}. Using system random seed. Error: {e}") |
|
generator = None |
|
|
|
|
|
|
|
pass |
|
|
|
|
|
|
|
num_inference_steps_int = int(steps) |
|
guidance_scale_float = float(cfg_scale) |
|
|
|
|
|
if num_inference_steps_int <= 0 or guidance_scale_float <= 0: |
|
raise ValueError("Steps and CFG Scale must be positive values.") |
|
if width <= 0 or height <= 0: |
|
raise ValueError("Image width and height must be positive.") |
|
|
|
print(f"Generating: Prompt='{prompt[:80]}{'...' if len(prompt) > 80 else ''}', NegPrompt='{negative_prompt[:80]}{'...' if len(negative_prompt) > 80 else ''}', Steps={num_inference_steps_int}, CFG={guidance_scale_float}, Size={width}x{height}, Scheduler={scheduler_name}, Seed={seed_int if generator else 'System Random'}, Device={device_to_use}, Dtype={dtype_to_use}, Slicing Enabled={enable_attention_slicing and device_to_use == 'cuda'}") |
|
start_time = time.time() |
|
|
|
try: |
|
|
|
output = current_pipeline( |
|
prompt=prompt, |
|
negative_prompt=negative_prompt if negative_prompt else None, |
|
num_inference_steps=num_inference_steps_int, |
|
guidance_scale=guidance_scale_float, |
|
width=width, |
|
height=height, |
|
generator=generator, |
|
|
|
callback_steps=max(1, num_inference_steps_int // 20), |
|
|
|
callback=lambda step, timestep, latents: progress((step, num_inference_steps_int), desc=f"Step {step}/{num_inference_steps_int}"), |
|
|
|
|
|
|
|
|
|
) |
|
end_time = time.time() |
|
print(f"Generation finished in {end_time - start_time:.2f} seconds.") |
|
generated_image = output.images[0] |
|
|
|
|
|
actual_seed_used = seed_int if generator else -1 |
|
|
|
|
|
return generated_image, actual_seed_used |
|
|
|
except gr.Error as e: |
|
|
|
raise e |
|
except ValueError as ve: |
|
|
|
print(f"Parameter Error: {ve}") |
|
|
|
raise gr.Error(f"Invalid Parameter: {ve}") |
|
except Exception as e: |
|
|
|
print(f"An error occurred during image generation: {e}") |
|
error_message_lower = str(e).lower() |
|
if "size must be a multiple of" in error_message_lower or "invalid dimensions" in error_message_lower or "shape mismatch" in error_message_lower: |
|
raise gr.Error(f"Image generation failed - Invalid size '{width}x{height}' for model: {e}. Try a multiple of 64 or 8.") |
|
elif "out of memory" in error_message_lower or "cuda out of memory" in error_message_lower: |
|
print("Hint: Try smaller image size, fewer steps, or a model that uses less VRAM.") |
|
raise gr.Error(f"Out of Memory (OOM) during generation. This Space might not have enough VRAM. Try smaller size/steps or select CPU (if available). Error: {e}") |
|
elif "runtimeerror" in error_message_lower: |
|
raise gr.Error(f"Runtime Error during generation: {e}. This could be a model/scheduler incompatibility or other issue.") |
|
elif "device-side assert" in error_message_lower or "cuda error" in error_message_lower: |
|
raise gr.Error(f"CUDA/GPU Error during generation: {e}. Ensure the Space is configured with a GPU and compatible PyTorch.") |
|
elif "expected all tensors to be on the same device" in error_message_lower: |
|
raise gr.Error(f"Device mismatch error during generation: {e}. This is an internal error, please report it.") |
|
else: |
|
|
|
raise gr.Error(f"Image generation failed: An unexpected error occurred. {e}") |
|
|
|
|
|
|
|
|
|
model_choices = DEFAULT_HUB_MODELS |
|
if not model_choices: |
|
initial_model_choices = ["No models found"] |
|
initial_default_model = "No models found" |
|
model_dropdown_interactive = False |
|
print(f"\n!!! WARNING: No default Hub models listed in script. Model dropdown will be empty. !!!") |
|
else: |
|
initial_model_choices = model_choices |
|
|
|
initial_default_model = INITIAL_MODEL_ID if INITIAL_MODEL_ID and INITIAL_MODEL_ID in initial_model_choices else (initial_model_choices[0] if initial_model_choices and initial_model_choices[0] != "No models found" else "No models found") |
|
|
|
model_dropdown_interactive = True if initial_default_model != "No models found" else False |
|
|
|
scheduler_choices = list(SCHEDULER_MAP.keys()) |
|
|
|
|
|
css = """ |
|
#col-container { |
|
margin: 0 auto; |
|
max-width: 640px; |
|
} |
|
""" |
|
|
|
with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo: |
|
gr.Markdown( |
|
""" |
|
# CipherCore Stable Diffusion 1.5 Gradio WebUI |
|
Create images with Stable Diffusion 1.5 using models from Hugging Face Hub. |
|
Choose a model, set your prompts and parameters, and generate! |
|
_Note: 'hire.fix' size option currently generates at 1024x1024._ |
|
""" |
|
) |
|
|
|
|
|
if initial_default_model != "No models found": |
|
gr.Markdown(f"*(Note: Model '{initial_default_model}' will load on first generation or model switch... This might take some time.)*") |
|
else: |
|
gr.Markdown(f"*(Note: No models available. Add Hub IDs to DEFAULT_HUB_MODELS in the script.)*") |
|
|
|
|
|
with gr.Row(): |
|
with gr.Column(scale=2): |
|
model_dropdown = gr.Dropdown( |
|
choices=initial_model_choices, |
|
value=initial_default_model, |
|
label="Select Model (Hugging Face Hub ID)", |
|
interactive=model_dropdown_interactive, |
|
) |
|
device_dropdown = gr.Dropdown( |
|
choices=AVAILABLE_DEVICES, |
|
value=DEFAULT_DEVICE, |
|
label="Processing Device", |
|
interactive=len(AVAILABLE_DEVICES) > 1, |
|
) |
|
prompt_input = gr.Textbox(label="Positive Prompt", placeholder="e.g., a majestic lion in a vibrant jungle, photorealistic", lines=3, autofocus=True) |
|
negative_prompt_input = gr.Textbox(label="Negative Prompt (Optional)", placeholder="e.g., blurry, low quality, deformed, watermark", lines=2) |
|
|
|
with gr.Accordion("Advanced Settings", open=False): |
|
with gr.Row(): |
|
steps_slider = gr.Slider(minimum=5, maximum=150, value=30, label="Inference Steps", step=1) |
|
cfg_slider = gr.Slider(minimum=1.0, maximum=30.0, value=7.5, label="CFG Scale", step=0.1) |
|
with gr.Row(): |
|
scheduler_dropdown = gr.Dropdown( |
|
choices=scheduler_choices, |
|
value=DEFAULT_SCHEDULER, |
|
label="Scheduler" |
|
) |
|
size_dropdown = gr.Dropdown( |
|
choices=SUPPORTED_SD15_SIZES, |
|
value="512x512", |
|
label="Image Size" |
|
) |
|
|
|
with gr.Row(): |
|
|
|
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, interactive=True) |
|
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True) |
|
|
|
|
|
with gr.Row(): |
|
|
|
default_slicing = True if "GPU" in AVAILABLE_DEVICES else False |
|
enable_attention_slicing_checkbox = gr.Checkbox( |
|
label="Enable Attention Slicing (Memory Optimization - GPU only)", |
|
value=default_slicing, |
|
interactive="GPU" in AVAILABLE_DEVICES |
|
) |
|
gr.Markdown("*(Helps reduce VRAM usage, may slightly affect speed/quality)*") |
|
|
|
|
|
generate_button = gr.Button("✨ Generate Image ✨", variant="primary", scale=1) |
|
|
|
with gr.Column(scale=3): |
|
output_image = gr.Image( |
|
label="Generated Image", |
|
type="pil", |
|
height=768, |
|
width=768, |
|
show_share_button=True, |
|
show_download_button=True, |
|
interactive=False |
|
) |
|
|
|
actual_seed_output = gr.Number(label="Actual Seed Used", precision=0, interactive=False) |
|
|
|
|
|
|
|
|
|
gr.on( |
|
triggers=[generate_button.click, prompt_input.submit], |
|
fn=infer, |
|
inputs=[ |
|
model_dropdown, |
|
device_dropdown, |
|
prompt_input, |
|
negative_prompt_input, |
|
steps_slider, |
|
cfg_slider, |
|
scheduler_dropdown, |
|
size_dropdown, |
|
seed_input, |
|
randomize_seed_checkbox, |
|
enable_attention_slicing_checkbox, |
|
], |
|
outputs=[output_image, actual_seed_output], |
|
api_name="generate" |
|
) |
|
|
|
|
|
|
|
template_examples = [ |
|
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", |
|
"An astronaut riding a green horse", |
|
"A delicious ceviche cheesecake slice", |
|
] |
|
|
|
|
|
gr.Examples(examples=template_examples, inputs=[prompt_input]) |
|
|
|
|
|
|
|
gr.Markdown( |
|
""" |
|
--- |
|
**Usage Notes:** |
|
1. Select a model from the dropdown (Hugging Face Hub ID). Models are downloaded and cached on the Space. |
|
2. Choose your processing device (GPU recommended if available). |
|
3. Enter your positive and optional negative prompts. |
|
4. Adjust advanced settings (Steps, CFG Scale, Scheduler, Size, Seed) if needed. The "Randomize seed" checkbox will override the seed value in the input box. |
|
5. Click "Generate Image". |
|
The first generation with a new model/device might take some time to load. |
|
""" |
|
) |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
print("\n--- Starting CipherCore Stable Diffusion 1.5 Generator (Hugging Face Spaces) ---") |
|
cuda_status = "CUDA available" if torch.cuda.is_available() else "CUDA not available" |
|
gpu_count_str = f"Found {torch.cuda.device_count()} GPU(s)." if torch.cuda.is_available() else "" |
|
|
|
print(f"{cuda_status} {gpu_count_str}") |
|
print(f"Available devices detected by PyTorch: {', '.join(AVAILABLE_DEVICES)}") |
|
print(f"Default device selected by app: {DEFAULT_DEVICE}") |
|
if current_pipeline: |
|
print(f"Initial model '{current_model_id}' loaded successfully on {current_device_loaded}.") |
|
else: |
|
print("No initial model loaded or initial load failed. Check model list and network connectivity.") |
|
|
|
|
|
print("Launching Gradio interface...") |
|
|
|
|
|
|
|
|
|
demo.launch() |
|
|
|
print("Gradio interface closing.") |