Spaces:
Sleeping
Sleeping
Commit
Β·
d7db9b7
1
Parent(s):
d2729b9
[Update]: Improved Stable Diffusion initialization and Gradio configuration π
Browse files- Added: Enhanced logging for Stable Diffusion model loading, including GPU and CPU mode messages.
- Updated: Refined memory optimizations to clarify GPU-specific settings.
- Improved: Gradio interface comments for better understanding of configuration options.
- Pro Tip of the Commit: Clear messages are like breadcrumbs leading the way through the code forest! ππ²
Aye, Aye! π’
app.py
CHANGED
|
@@ -59,27 +59,37 @@ try:
|
|
| 59 |
|
| 60 |
# Initialize Stable Diffusion pipeline with Zero GPU optimizations
|
| 61 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
pipe = DiffusionPipeline.from_pretrained(
|
| 63 |
-
|
| 64 |
-
torch_dtype=
|
| 65 |
use_safetensors=True,
|
| 66 |
variant="fp16" if device == "cuda" else None
|
| 67 |
)
|
| 68 |
pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe.scheduler.config)
|
| 69 |
pipe.to(device)
|
| 70 |
|
| 71 |
-
# Enable memory optimizations
|
| 72 |
if device == "cuda":
|
| 73 |
pipe.enable_model_cpu_offload()
|
| 74 |
pipe.enable_vae_slicing()
|
| 75 |
pipe.enable_vae_tiling()
|
| 76 |
-
# Enable attention slicing for lower memory usage
|
| 77 |
pipe.enable_attention_slicing(slice_size="max")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
except Exception as e:
|
| 79 |
-
print(f"Warning: Failed to initialize Stable Diffusion: {e}")
|
|
|
|
| 80 |
pipe = None
|
| 81 |
except ImportError:
|
| 82 |
-
print("Warning: diffusers package not available. Artistic visualization will be disabled.")
|
| 83 |
|
| 84 |
# Create a directory for memory snapshots if it doesn't exist
|
| 85 |
MEMORY_DIR = "memory_snapshots"
|
|
@@ -961,12 +971,16 @@ def create_interface():
|
|
| 961 |
return demo
|
| 962 |
|
| 963 |
if __name__ == "__main__":
|
| 964 |
-
#
|
| 965 |
demo = create_interface()
|
| 966 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 967 |
demo.launch(
|
| 968 |
-
share=False, # Don't create a public link
|
| 969 |
server_name="0.0.0.0", # Listen on all interfaces
|
| 970 |
-
server_port=7860,
|
| 971 |
-
|
|
|
|
| 972 |
)
|
|
|
|
| 59 |
|
| 60 |
# Initialize Stable Diffusion pipeline with Zero GPU optimizations
|
| 61 |
try:
|
| 62 |
+
model_id = "stabilityai/stable-diffusion-xl-base-1.0"
|
| 63 |
+
print(f"Loading Stable Diffusion model: {model_id}")
|
| 64 |
+
|
| 65 |
+
# Always use float32 for CPU, only use float16 for GPU
|
| 66 |
+
model_dtype = torch.float16 if device == "cuda" else torch.float32
|
| 67 |
+
|
| 68 |
pipe = DiffusionPipeline.from_pretrained(
|
| 69 |
+
model_id,
|
| 70 |
+
torch_dtype=model_dtype,
|
| 71 |
use_safetensors=True,
|
| 72 |
variant="fp16" if device == "cuda" else None
|
| 73 |
)
|
| 74 |
pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe.scheduler.config)
|
| 75 |
pipe.to(device)
|
| 76 |
|
| 77 |
+
# Enable memory optimizations only on GPU
|
| 78 |
if device == "cuda":
|
| 79 |
pipe.enable_model_cpu_offload()
|
| 80 |
pipe.enable_vae_slicing()
|
| 81 |
pipe.enable_vae_tiling()
|
|
|
|
| 82 |
pipe.enable_attention_slicing(slice_size="max")
|
| 83 |
+
print("β¨ Stable Diffusion loaded with GPU optimizations")
|
| 84 |
+
else:
|
| 85 |
+
print("β¨ Stable Diffusion loaded in CPU mode")
|
| 86 |
+
|
| 87 |
except Exception as e:
|
| 88 |
+
print(f"β οΈ Warning: Failed to initialize Stable Diffusion: {e}")
|
| 89 |
+
STABLE_DIFFUSION_AVAILABLE = False
|
| 90 |
pipe = None
|
| 91 |
except ImportError:
|
| 92 |
+
print("β οΈ Warning: diffusers package not available. Artistic visualization will be disabled.")
|
| 93 |
|
| 94 |
# Create a directory for memory snapshots if it doesn't exist
|
| 95 |
MEMORY_DIR = "memory_snapshots"
|
|
|
|
| 971 |
return demo
|
| 972 |
|
| 973 |
if __name__ == "__main__":
|
| 974 |
+
# Configure Gradio for Hugging Face Spaces
|
| 975 |
demo = create_interface()
|
| 976 |
+
|
| 977 |
+
# Enable queuing for better resource management
|
| 978 |
+
demo.queue(max_size=10)
|
| 979 |
+
|
| 980 |
+
# Launch with Spaces-compatible settings
|
| 981 |
demo.launch(
|
|
|
|
| 982 |
server_name="0.0.0.0", # Listen on all interfaces
|
| 983 |
+
server_port=7860, # Default Spaces port
|
| 984 |
+
share=False, # Don't create public link
|
| 985 |
+
show_api=False # Hide API docs
|
| 986 |
)
|