Update app.py
Browse files
app.py
CHANGED
@@ -1,8 +1,6 @@
|
|
1 |
# -*- coding: utf-8 -*-
|
2 |
"""
|
3 |
-
|
4 |
-
Raxephion @2025
|
5 |
-
|
6 |
"""
|
7 |
|
8 |
import gradio as gr
|
@@ -46,10 +44,8 @@ DEFAULT_HUB_MODELS = [
|
|
46 |
]
|
47 |
|
48 |
# --- Constants for Gradio UI / Generation ---
|
49 |
-
MAX_SEED = np.iinfo(np.int32).max #
|
50 |
-
#
|
51 |
-
# However, let's add MAX_IMAGE_SIZE as it was related to the sliders in the template
|
52 |
-
MAX_IMAGE_SIZE_SLIDER = 1024 # Renamed to avoid confusion, used only for slider max
|
53 |
|
54 |
# --- Determine available devices and set up options ---
|
55 |
# This logic is from the user's script and works well for Spaces
|
@@ -148,28 +144,30 @@ def infer(
|
|
148 |
cfg_scale, # From cfg_slider
|
149 |
scheduler_name, # From scheduler_dropdown
|
150 |
size, # From size_dropdown
|
151 |
-
seed, # From seed_input
|
152 |
randomize_seed, # From randomize_seed_checkbox
|
153 |
progress=gr.Progress(track_tqdm=True), # Added progress argument from template
|
154 |
):
|
155 |
"""Generates an image using the selected model and parameters on the chosen device."""
|
156 |
-
global current_pipeline, current_model_id, current_device_loaded, SCHEDULER_MAP, MAX_SEED #
|
157 |
-
|
158 |
-
#
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
|
|
|
|
|
|
|
|
|
|
163 |
temp_dtype_to_use = torch.float32
|
164 |
-
if temp_device_to_use == "cuda":
|
165 |
-
if torch.cuda.is_available() and torch.cuda.get_device_capability(0)[0] >= 7:
|
166 |
-
temp_dtype_to_use = torch.float16
|
167 |
-
else:
|
168 |
-
temp_dtype_to_use = torch.float32
|
169 |
-
else:
|
170 |
-
temp_dtype_to_use = torch.float32
|
171 |
|
172 |
-
|
|
|
|
|
|
|
|
|
173 |
print(f"Loading model: {model_identifier} onto {temp_device_to_use} with dtype {temp_dtype_to_use}...")
|
174 |
# Clear previous pipeline to potentially free memory *before* loading the new one
|
175 |
if current_pipeline is not None:
|
@@ -198,7 +196,7 @@ def infer(
|
|
198 |
pipeline = StableDiffusionPipeline.from_pretrained(
|
199 |
model_identifier,
|
200 |
torch_dtype=temp_dtype_to_use, # Use the determined dtype for loading
|
201 |
-
safety_checker=None,
|
202 |
)
|
203 |
pipeline = pipeline.to(temp_device_to_use) # Use the determined device
|
204 |
|
@@ -249,9 +247,11 @@ def infer(
|
|
249 |
device_to_use = str(current_pipeline.device) if current_pipeline else ("cuda" if selected_device_str == "GPU" and "GPU" in AVAILABLE_DEVICES else "cpu")
|
250 |
dtype_to_use = current_pipeline.dtype if current_pipeline else torch.float32 # Fallback if somehow pipeline is still None
|
251 |
|
252 |
-
|
|
|
253 |
if current_pipeline is None:
|
254 |
-
raise gr.Error("Model failed to load. Cannot generate image.")
|
|
|
255 |
|
256 |
# 2. Configure Scheduler
|
257 |
selected_scheduler_class = SCHEDULER_MAP.get(scheduler_name)
|
@@ -307,38 +307,35 @@ def infer(
|
|
307 |
# The generator device needs to match the pipeline device
|
308 |
generator_device = current_pipeline.device # Must match the pipeline device
|
309 |
|
|
|
310 |
if randomize_seed: # Use the randomize_seed checkbox
|
311 |
-
|
312 |
-
print(f"Randomizing seed to: {
|
313 |
else:
|
314 |
-
# Use provided seed
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
|
|
|
|
321 |
|
322 |
|
|
|
323 |
try:
|
324 |
-
# Ensure seed is an integer for the generator
|
325 |
-
seed_int = int(seed)
|
326 |
# Explicitly move generator to the desired device
|
327 |
generator = torch.Generator(device=generator_device).manual_seed(seed_int)
|
328 |
print(f"Generator set with seed {seed_int} on device: {generator_device}")
|
329 |
except Exception as e:
|
330 |
-
|
331 |
-
|
332 |
-
gr.Warning(f"Failed to set seed generator with seed {seed}. Using random seed. Error: {e}")
|
333 |
generator = None # Let pipeline handle random seed if generator creation fails or device mismatch
|
334 |
-
# If generator creation failed, the actual seed used
|
335 |
-
#
|
336 |
-
#
|
337 |
-
#
|
338 |
-
# No, the most accurate is to let the pipeline generate, and some diffusers versions return the generator state/seed.
|
339 |
-
# However, the standard pipeline output doesn't return the seed.
|
340 |
-
# Let's just report the seed we *tried* to use. If randomize_seed was True, it's the random one. If False, it's the user input one.
|
341 |
-
pass # Keep the last calculated seed_int or seed value
|
342 |
|
343 |
# 5. Generate Image
|
344 |
# Ensure required parameters are integers/floats
|
@@ -351,7 +348,7 @@ def infer(
|
|
351 |
if width <= 0 or height <= 0:
|
352 |
raise ValueError("Image width and height must be positive.")
|
353 |
|
354 |
-
print(f"Generating: Prompt='{prompt[:80]}{'...' if len(prompt) > 80 else ''}', NegPrompt='{negative_prompt[:80]}{'...' if len(negative_prompt) > 80 else ''}', Steps={num_inference_steps_int}, CFG={guidance_scale_float}, Size={width}x{height}, Scheduler={scheduler_name}, Seed={seed_int if generator else 'Random
|
355 |
start_time = time.time()
|
356 |
|
357 |
try:
|
@@ -378,8 +375,8 @@ def infer(
|
|
378 |
print(f"Generation finished in {end_time - start_time:.2f} seconds.")
|
379 |
generated_image = output.images[0]
|
380 |
|
381 |
-
# Determine the seed to return: the one we attempted to use
|
382 |
-
actual_seed_used = seed_int if generator else -1
|
383 |
|
384 |
# Return both the image and the seed (potentially randomized)
|
385 |
return generated_image, actual_seed_used
|
@@ -422,17 +419,9 @@ if not model_choices:
|
|
422 |
else:
|
423 |
initial_model_choices = model_choices
|
424 |
# Set a reasonable default if available
|
425 |
-
initial_default_model = INITIAL_MODEL_ID if INITIAL_MODEL_ID else "No models found"
|
426 |
-
|
427 |
-
|
428 |
-
# Ensure the initial default model is actually in the choices list if possible
|
429 |
-
if initial_default_model != "No models found" and initial_default_model not in initial_model_choices:
|
430 |
-
print(f"Warning: Initial default model '{initial_default_model}' is not in the model_choices list.")
|
431 |
-
if initial_model_choices and initial_model_choices[0] != "No models found":
|
432 |
-
initial_default_model = initial_model_choices[0]
|
433 |
-
print(f"Setting default model to first available choice: {initial_default_model}")
|
434 |
-
else:
|
435 |
-
initial_default_model = "No models found" # Fallback if no choices
|
436 |
|
437 |
scheduler_choices = list(SCHEDULER_MAP.keys())
|
438 |
|
@@ -455,10 +444,8 @@ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo: # Added Soft theme from
|
|
455 |
)
|
456 |
|
457 |
# Add a note about model loading time
|
458 |
-
if
|
459 |
-
gr.Markdown(f"*(Note:
|
460 |
-
elif initial_default_model != "No models found":
|
461 |
-
gr.Markdown(f"*(Note: Loading model '{initial_default_model}' on first generation... This might take some time.)*")
|
462 |
else:
|
463 |
gr.Markdown(f"*(Note: No models available. Add Hub IDs to DEFAULT_HUB_MODELS in the script.)*")
|
464 |
|
@@ -497,8 +484,8 @@ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo: # Added Soft theme from
|
|
497 |
)
|
498 |
# Combine seed input and randomize checkbox
|
499 |
with gr.Row():
|
500 |
-
#
|
501 |
-
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0,
|
502 |
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True) # Simplified label
|
503 |
|
504 |
|
@@ -578,7 +565,7 @@ if __name__ == "__main__":
|
|
578 |
if current_pipeline:
|
579 |
print(f"Initial model '{current_model_id}' loaded successfully on {current_device_loaded}.")
|
580 |
else:
|
581 |
-
print("No initial model loaded. Check model list and network connectivity.")
|
582 |
|
583 |
|
584 |
print("Launching Gradio interface...")
|
|
|
1 |
# -*- coding: utf-8 -*-
|
2 |
"""
|
3 |
+
Author: @Raxephion 2025
|
|
|
|
|
4 |
"""
|
5 |
|
6 |
import gradio as gr
|
|
|
44 |
]
|
45 |
|
46 |
# --- Constants for Gradio UI / Generation ---
|
47 |
+
MAX_SEED = np.iinfo(np.int32).max # Defines the maximum seed value
|
48 |
+
# MAX_IMAGE_SIZE_SLIDER = 1024 # Used only for slider max, kept for consistency if needed elsewhere
|
|
|
|
|
49 |
|
50 |
# --- Determine available devices and set up options ---
|
51 |
# This logic is from the user's script and works well for Spaces
|
|
|
144 |
cfg_scale, # From cfg_slider
|
145 |
scheduler_name, # From scheduler_dropdown
|
146 |
size, # From size_dropdown
|
147 |
+
seed, # From seed_input (now a Slider)
|
148 |
randomize_seed, # From randomize_seed_checkbox
|
149 |
progress=gr.Progress(track_tqdm=True), # Added progress argument from template
|
150 |
):
|
151 |
"""Generates an image using the selected model and parameters on the chosen device."""
|
152 |
+
global current_pipeline, current_model_id, current_device_loaded, SCHEDULER_MAP, MAX_SEED # MAX_SEED is global
|
153 |
+
|
154 |
+
# This check is done before parameter parsing so we can determine device/dtype for loading
|
155 |
+
# Need to redo some parameter parsing here to get device_to_use early
|
156 |
+
temp_device_to_use = "cuda" if selected_device_str == "GPU" and "GPU" in AVAILABLE_DEVICES else "cpu"
|
157 |
+
temp_dtype_to_use = torch.float32
|
158 |
+
if temp_device_to_use == "cuda":
|
159 |
+
if torch.cuda.is_available() and torch.cuda.get_device_capability(0)[0] >= 7:
|
160 |
+
temp_dtype_to_use = torch.float16
|
161 |
+
else:
|
162 |
+
temp_dtype_to_use = torch.float32
|
163 |
+
else:
|
164 |
temp_dtype_to_use = torch.float32
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
|
166 |
+
# 1. Load/Switch Model if necessary
|
167 |
+
# Check if the requested model identifier OR the requested device has changed
|
168 |
+
# Use string comparison for current_device_loaded as it's a torch.device object
|
169 |
+
if current_pipeline is None or current_model_id != model_identifier or (current_device_loaded is not None and str(current_device_loaded) != temp_device_to_use):
|
170 |
+
|
171 |
print(f"Loading model: {model_identifier} onto {temp_device_to_use} with dtype {temp_dtype_to_use}...")
|
172 |
# Clear previous pipeline to potentially free memory *before* loading the new one
|
173 |
if current_pipeline is not None:
|
|
|
196 |
pipeline = StableDiffusionPipeline.from_pretrained(
|
197 |
model_identifier,
|
198 |
torch_dtype=temp_dtype_to_use, # Use the determined dtype for loading
|
199 |
+
safety_checker=None, # DISABLED
|
200 |
)
|
201 |
pipeline = pipeline.to(temp_device_to_use) # Use the determined device
|
202 |
|
|
|
247 |
device_to_use = str(current_pipeline.device) if current_pipeline else ("cuda" if selected_device_str == "GPU" and "GPU" in AVAILABLE_DEVICES else "cpu")
|
248 |
dtype_to_use = current_pipeline.dtype if current_pipeline else torch.float32 # Fallback if somehow pipeline is still None
|
249 |
|
250 |
+
|
251 |
+
# Check if pipeline is successfully loaded before proceeding with generation
|
252 |
if current_pipeline is None:
|
253 |
+
raise gr.Error("Model failed to load during setup or switching. Cannot generate image.")
|
254 |
+
|
255 |
|
256 |
# 2. Configure Scheduler
|
257 |
selected_scheduler_class = SCHEDULER_MAP.get(scheduler_name)
|
|
|
307 |
# The generator device needs to match the pipeline device
|
308 |
generator_device = current_pipeline.device # Must match the pipeline device
|
309 |
|
310 |
+
seed_int = 0 # Default if issue occurs
|
311 |
if randomize_seed: # Use the randomize_seed checkbox
|
312 |
+
seed_int = random.randint(0, MAX_SEED) # Re-randomize seed using MAX_SEED
|
313 |
+
print(f"Randomizing seed to: {seed_int}")
|
314 |
else:
|
315 |
+
# Use provided seed from the slider input
|
316 |
+
try:
|
317 |
+
seed_int = int(seed)
|
318 |
+
print(f"Using provided seed: {seed_int}")
|
319 |
+
except ValueError:
|
320 |
+
print(f"Warning: Invalid seed input '{seed}'. Using random seed instead.")
|
321 |
+
gr.Warning(f"Invalid seed input '{seed}'. Using random seed instead.")
|
322 |
+
seed_int = random.randint(0, MAX_SEED) # Fallback to random if input is not int
|
323 |
+
randomize_seed = True # Mark as randomized for reporting
|
324 |
|
325 |
|
326 |
+
generator = None # Initialize generator as None
|
327 |
try:
|
|
|
|
|
328 |
# Explicitly move generator to the desired device
|
329 |
generator = torch.Generator(device=generator_device).manual_seed(seed_int)
|
330 |
print(f"Generator set with seed {seed_int} on device: {generator_device}")
|
331 |
except Exception as e:
|
332 |
+
print(f"Warning: Error setting seed generator with seed {seed_int} on device {generator_device}: {e}. Falling back to default generator (potentially on CPU) or system random.")
|
333 |
+
gr.Warning(f"Failed to set seed generator with seed {seed_int}. Using system random seed. Error: {e}")
|
|
|
334 |
generator = None # Let pipeline handle random seed if generator creation fails or device mismatch
|
335 |
+
# If generator creation failed, the actual seed used by the pipeline will be different and system-dependent random.
|
336 |
+
# We should probably report -1 in this case, or just report the seed we tried to use.
|
337 |
+
# Reporting the seed we *tried* to use is simpler and often sufficient.
|
338 |
+
pass # Keep the last calculated seed_int
|
|
|
|
|
|
|
|
|
339 |
|
340 |
# 5. Generate Image
|
341 |
# Ensure required parameters are integers/floats
|
|
|
348 |
if width <= 0 or height <= 0:
|
349 |
raise ValueError("Image width and height must be positive.")
|
350 |
|
351 |
+
print(f"Generating: Prompt='{prompt[:80]}{'...' if len(prompt) > 80 else ''}', NegPrompt='{negative_prompt[:80]}{'...' if len(negative_prompt) > 80 else ''}', Steps={num_inference_steps_int}, CFG={guidance_scale_float}, Size={width}x{height}, Scheduler={scheduler_name}, Seed={seed_int if generator else 'System Random'}, Device={device_to_use}, Dtype={dtype_to_use}")
|
352 |
start_time = time.time()
|
353 |
|
354 |
try:
|
|
|
375 |
print(f"Generation finished in {end_time - start_time:.2f} seconds.")
|
376 |
generated_image = output.images[0]
|
377 |
|
378 |
+
# Determine the seed to return: the one we attempted to use, or -1 if generator creation failed
|
379 |
+
actual_seed_used = seed_int if generator else -1
|
380 |
|
381 |
# Return both the image and the seed (potentially randomized)
|
382 |
return generated_image, actual_seed_used
|
|
|
419 |
else:
|
420 |
initial_model_choices = model_choices
|
421 |
# Set a reasonable default if available
|
422 |
+
initial_default_model = INITIAL_MODEL_ID if INITIAL_MODEL_ID and INITIAL_MODEL_ID in initial_model_choices else (initial_model_choices[0] if initial_model_choices and initial_model_choices[0] != "No models found" else "No models found")
|
423 |
+
|
424 |
+
model_dropdown_interactive = True if initial_default_model != "No models found" else False # Make it interactive if there's *any* model choice
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
425 |
|
426 |
scheduler_choices = list(SCHEDULER_MAP.keys())
|
427 |
|
|
|
444 |
)
|
445 |
|
446 |
# Add a note about model loading time
|
447 |
+
if initial_default_model != "No models found":
|
448 |
+
gr.Markdown(f"*(Note: Model '{initial_default_model}' will load on first generation or model switch... This might take some time.)*")
|
|
|
|
|
449 |
else:
|
450 |
gr.Markdown(f"*(Note: No models available. Add Hub IDs to DEFAULT_HUB_MODELS in the script.)*")
|
451 |
|
|
|
484 |
)
|
485 |
# Combine seed input and randomize checkbox
|
486 |
with gr.Row():
|
487 |
+
# Removed precision=0 from Slider
|
488 |
+
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, interactive=True) # Use 0 as default, interactive initially
|
489 |
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True) # Simplified label
|
490 |
|
491 |
|
|
|
565 |
if current_pipeline:
|
566 |
print(f"Initial model '{current_model_id}' loaded successfully on {current_device_loaded}.")
|
567 |
else:
|
568 |
+
print("No initial model loaded or initial load failed. Check model list and network connectivity.")
|
569 |
|
570 |
|
571 |
print("Launching Gradio interface...")
|