Aatricks commited on
Commit
8efa789
·
verified ·
1 Parent(s): 8bbedc0

changed space gpu runtime timeout

Browse files
Files changed (1) hide show
  1. app.py +372 -372
app.py CHANGED
@@ -1,372 +1,372 @@
1
- import glob
2
- import gradio as gr
3
- import sys
4
- import os
5
- from PIL import Image
6
- import numpy as np
7
- import spaces
8
-
9
- sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
10
-
11
- from modules.user.pipeline import pipeline
12
- import torch
13
-
14
-
15
- def load_generated_images():
16
- """Load generated images with given prefix from disk"""
17
- image_files = glob.glob("./_internal/output/**/*.png")
18
-
19
- # If there are no image files, return
20
- if not image_files:
21
- return []
22
-
23
- # Sort files by modification time in descending order
24
- image_files.sort(key=os.path.getmtime, reverse=True)
25
-
26
- # Get most recent timestamp
27
- latest_time = os.path.getmtime(image_files[0])
28
-
29
- # Get all images from same batch (within 1 second of most recent)
30
- batch_images = []
31
- for file in image_files:
32
- if abs(os.path.getmtime(file) - latest_time) < 1.0:
33
- try:
34
- img = Image.open(file)
35
- batch_images.append(img)
36
- except:
37
- continue
38
-
39
- if not batch_images:
40
- return []
41
- return batch_images
42
-
43
-
44
- @spaces.GPU
45
- def generate_images(
46
- prompt: str,
47
- width: int = 512,
48
- height: int = 512,
49
- num_images: int = 1,
50
- batch_size: int = 1,
51
- hires_fix: bool = False,
52
- adetailer: bool = False,
53
- enhance_prompt: bool = False,
54
- img2img_enabled: bool = False,
55
- img2img_image: str = None,
56
- stable_fast: bool = False,
57
- reuse_seed: bool = False,
58
- flux_enabled: bool = False,
59
- prio_speed: bool = False,
60
- realistic_model: bool = False,
61
- multiscale_enabled: bool = True,
62
- multiscale_intermittent: bool = False,
63
- multiscale_factor: float = 0.5,
64
- multiscale_fullres_start: int = 3,
65
- multiscale_fullres_end: int = 8,
66
- keep_models_loaded: bool = True,
67
- progress=gr.Progress(),
68
- ):
69
- """Generate images using the LightDiffusion pipeline"""
70
- try:
71
- # Set model persistence preference
72
- from modules.Device.ModelCache import set_keep_models_loaded
73
-
74
- set_keep_models_loaded(keep_models_loaded)
75
-
76
- if img2img_enabled and img2img_image is not None:
77
- # Convert numpy array to PIL Image
78
- if isinstance(img2img_image, np.ndarray):
79
- img_pil = Image.fromarray(img2img_image)
80
- img_pil.save("temp_img2img.png")
81
- prompt = "temp_img2img.png"
82
-
83
- # Run pipeline and capture saved images
84
- with torch.inference_mode():
85
- pipeline(
86
- prompt=prompt,
87
- w=width,
88
- h=height,
89
- number=num_images,
90
- batch=batch_size,
91
- hires_fix=hires_fix,
92
- adetailer=adetailer,
93
- enhance_prompt=enhance_prompt,
94
- img2img=img2img_enabled,
95
- stable_fast=stable_fast,
96
- reuse_seed=reuse_seed,
97
- flux_enabled=flux_enabled,
98
- prio_speed=prio_speed,
99
- autohdr=True,
100
- realistic_model=realistic_model,
101
- enable_multiscale=multiscale_enabled,
102
- multiscale_intermittent_fullres=multiscale_intermittent,
103
- multiscale_factor=multiscale_factor,
104
- multiscale_fullres_start=multiscale_fullres_start,
105
- multiscale_fullres_end=multiscale_fullres_end,
106
- )
107
-
108
- # Clean up temporary file if it exists
109
- if os.path.exists("temp_img2img.png"):
110
- os.remove("temp_img2img.png")
111
-
112
- return load_generated_images()
113
-
114
- except Exception:
115
- import traceback
116
-
117
- print(traceback.format_exc())
118
- # Clean up temporary file if it exists
119
- if os.path.exists("temp_img2img.png"):
120
- os.remove("temp_img2img.png")
121
- return [Image.new("RGB", (512, 512), color="black")]
122
-
123
-
124
- def get_vram_info():
125
- """Get VRAM usage information"""
126
- try:
127
- from modules.Device.ModelCache import get_memory_info
128
-
129
- info = get_memory_info()
130
- return f"""
131
- **VRAM Usage:**
132
- - Total: {info["total_vram"]:.1f} GB
133
- - Used: {info["used_vram"]:.1f} GB
134
- - Free: {info["free_vram"]:.1f} GB
135
- - Keep Models Loaded: {info["keep_loaded"]}
136
- - Has Cached Checkpoint: {info["has_cached_checkpoint"]}
137
- """
138
- except Exception as e:
139
- return f"Error getting VRAM info: {e}"
140
-
141
-
142
- def clear_model_cache_ui():
143
- """Clear model cache from UI"""
144
- try:
145
- from modules.Device.ModelCache import clear_model_cache
146
-
147
- clear_model_cache()
148
- return "✅ Model cache cleared successfully!"
149
- except Exception as e:
150
- return f"❌ Error clearing cache: {e}"
151
-
152
-
153
- def apply_multiscale_preset(preset_name):
154
- """Apply multiscale preset values to the UI components"""
155
- if preset_name == "None":
156
- return gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
157
-
158
- try:
159
- from modules.sample.multiscale_presets import get_preset_parameters
160
-
161
- params = get_preset_parameters(preset_name)
162
-
163
- return (
164
- gr.update(value=params["enable_multiscale"]),
165
- gr.update(value=params["multiscale_factor"]),
166
- gr.update(value=params["multiscale_fullres_start"]),
167
- gr.update(value=params["multiscale_fullres_end"]),
168
- gr.update(value=params["multiscale_intermittent_fullres"]),
169
- )
170
- except Exception as e:
171
- print(f"Error applying preset {preset_name}: {e}")
172
- return gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
173
-
174
-
175
- # Create Gradio interface
176
- with gr.Blocks(title="LightDiffusion Web UI") as demo:
177
- gr.Markdown("# LightDiffusion Web UI")
178
- gr.Markdown("Generate AI images using LightDiffusion")
179
- gr.Markdown(
180
- "This is the demo for LightDiffusion, the fastest diffusion backend for generating images. https://github.com/LightDiffusion/LightDiffusion-Next"
181
- )
182
-
183
- with gr.Row():
184
- with gr.Column():
185
- # Input components
186
- prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...")
187
-
188
- with gr.Row():
189
- width = gr.Slider(
190
- minimum=64, maximum=2048, value=512, step=64, label="Width"
191
- )
192
- height = gr.Slider(
193
- minimum=64, maximum=2048, value=512, step=64, label="Height"
194
- )
195
-
196
- with gr.Row():
197
- num_images = gr.Slider(
198
- minimum=1, maximum=10, value=1, step=1, label="Number of Images"
199
- )
200
- batch_size = gr.Slider(
201
- minimum=1, maximum=4, value=1, step=1, label="Batch Size"
202
- )
203
-
204
- with gr.Row():
205
- hires_fix = gr.Checkbox(label="HiRes Fix")
206
- adetailer = gr.Checkbox(label="Auto Face/Body Enhancement")
207
- enhance_prompt = gr.Checkbox(label="Enhance Prompt")
208
- stable_fast = gr.Checkbox(label="Stable Fast Mode")
209
-
210
- with gr.Row():
211
- reuse_seed = gr.Checkbox(label="Reuse Seed")
212
- flux_enabled = gr.Checkbox(label="Flux Mode")
213
- prio_speed = gr.Checkbox(label="Prioritize Speed")
214
- realistic_model = gr.Checkbox(label="Realistic Model")
215
-
216
- with gr.Row():
217
- multiscale_enabled = gr.Checkbox(
218
- label="Multi-Scale Diffusion", value=True
219
- )
220
- img2img_enabled = gr.Checkbox(label="Image to Image Mode")
221
- keep_models_loaded = gr.Checkbox(
222
- label="Keep Models in VRAM",
223
- value=True,
224
- info="Keep models loaded for instant reuse (faster but uses more VRAM)",
225
- )
226
-
227
- img2img_image = gr.Image(label="Input Image for img2img", visible=False)
228
-
229
- # Multi-scale preset selection
230
- with gr.Row():
231
- multiscale_preset = gr.Dropdown(
232
- label="Multi-Scale Preset",
233
- choices=["None", "quality", "performance", "balanced", "disabled"],
234
- value="None",
235
- info="Select a preset to automatically configure multi-scale settings",
236
- )
237
- multiscale_intermittent = gr.Checkbox(
238
- label="Intermittent Full-Res",
239
- value=False,
240
- info="Enable intermittent full-resolution rendering in low-res region",
241
- )
242
-
243
- with gr.Row():
244
- multiscale_factor = gr.Slider(
245
- minimum=0.1,
246
- maximum=1.0,
247
- value=0.5,
248
- step=0.1,
249
- label="Multi-Scale Factor",
250
- )
251
- multiscale_fullres_start = gr.Slider(
252
- minimum=0, maximum=10, value=3, step=1, label="Full-Res Start Steps"
253
- )
254
- multiscale_fullres_end = gr.Slider(
255
- minimum=0, maximum=20, value=8, step=1, label="Full-Res End Steps"
256
- )
257
-
258
- # Make input image visible only when img2img is enabled
259
- img2img_enabled.change(
260
- fn=lambda x: gr.update(visible=x),
261
- inputs=[img2img_enabled],
262
- outputs=[img2img_image],
263
- )
264
-
265
- # Handle preset changes
266
- multiscale_preset.change(
267
- fn=apply_multiscale_preset,
268
- inputs=[multiscale_preset],
269
- outputs=[
270
- multiscale_enabled,
271
- multiscale_factor,
272
- multiscale_fullres_start,
273
- multiscale_fullres_end,
274
- multiscale_intermittent,
275
- ],
276
- )
277
-
278
- generate_btn = gr.Button("Generate")
279
-
280
- # Model Cache Management
281
- with gr.Accordion("Model Cache Management", open=False):
282
- with gr.Row():
283
- vram_info_btn = gr.Button("🔍 Check VRAM Usage")
284
- clear_cache_btn = gr.Button("🗑️ Clear Model Cache")
285
- vram_info_display = gr.Markdown("")
286
- cache_status_display = gr.Markdown("")
287
-
288
- # Output gallery
289
- gallery = gr.Gallery(
290
- label="Generated Images",
291
- show_label=True,
292
- elem_id="gallery",
293
- columns=[2],
294
- rows=[2],
295
- object_fit="contain",
296
- height="auto",
297
- )
298
-
299
- # Connect generate button to pipeline
300
- generate_btn.click(
301
- fn=generate_images,
302
- inputs=[
303
- prompt,
304
- width,
305
- height,
306
- num_images,
307
- batch_size,
308
- hires_fix,
309
- adetailer,
310
- enhance_prompt,
311
- img2img_enabled,
312
- img2img_image,
313
- stable_fast,
314
- reuse_seed,
315
- flux_enabled,
316
- prio_speed,
317
- realistic_model,
318
- multiscale_enabled,
319
- multiscale_intermittent,
320
- multiscale_factor,
321
- multiscale_fullres_start,
322
- multiscale_fullres_end,
323
- keep_models_loaded,
324
- ],
325
- outputs=gallery,
326
- )
327
-
328
- # Connect VRAM info and cache management buttons
329
- vram_info_btn.click(
330
- fn=get_vram_info,
331
- outputs=vram_info_display,
332
- )
333
-
334
- clear_cache_btn.click(
335
- fn=clear_model_cache_ui,
336
- outputs=cache_status_display,
337
- )
338
-
339
-
340
- def is_huggingface_space():
341
- return "SPACE_ID" in os.environ
342
-
343
-
344
- def is_docker_environment():
345
- return "GRADIO_SERVER_PORT" in os.environ and "GRADIO_SERVER_NAME" in os.environ
346
-
347
-
348
- # For local testing
349
- if __name__ == "__main__":
350
- if is_huggingface_space():
351
- demo.launch(
352
- debug=False,
353
- server_name="0.0.0.0",
354
- server_port=7860, # Standard HF Spaces port
355
- )
356
- elif is_docker_environment():
357
- # Docker environment - use environment variables
358
- server_name = os.environ.get("GRADIO_SERVER_NAME", "0.0.0.0")
359
- server_port = int(os.environ.get("GRADIO_SERVER_PORT", 7860))
360
- demo.launch(
361
- debug=False,
362
- server_name=server_name,
363
- server_port=server_port,
364
- )
365
- else:
366
- demo.launch(
367
- server_name="0.0.0.0",
368
- server_port=8000,
369
- auth=None,
370
- share=True, # Only enable sharing locally
371
- debug=True,
372
- )
 
1
+ import glob
2
+ import gradio as gr
3
+ import sys
4
+ import os
5
+ from PIL import Image
6
+ import numpy as np
7
+ import spaces
8
+
9
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
10
+
11
+ from modules.user.pipeline import pipeline
12
+ import torch
13
+
14
+
15
+ def load_generated_images():
16
+ """Load generated images with given prefix from disk"""
17
+ image_files = glob.glob("./_internal/output/**/*.png")
18
+
19
+ # If there are no image files, return
20
+ if not image_files:
21
+ return []
22
+
23
+ # Sort files by modification time in descending order
24
+ image_files.sort(key=os.path.getmtime, reverse=True)
25
+
26
+ # Get most recent timestamp
27
+ latest_time = os.path.getmtime(image_files[0])
28
+
29
+ # Get all images from same batch (within 1 second of most recent)
30
+ batch_images = []
31
+ for file in image_files:
32
+ if abs(os.path.getmtime(file) - latest_time) < 1.0:
33
+ try:
34
+ img = Image.open(file)
35
+ batch_images.append(img)
36
+ except:
37
+ continue
38
+
39
+ if not batch_images:
40
+ return []
41
+ return batch_images
42
+
43
+
44
+ @spaces.GPU(duration=120)
45
+ def generate_images(
46
+ prompt: str,
47
+ width: int = 512,
48
+ height: int = 512,
49
+ num_images: int = 1,
50
+ batch_size: int = 1,
51
+ hires_fix: bool = False,
52
+ adetailer: bool = False,
53
+ enhance_prompt: bool = False,
54
+ img2img_enabled: bool = False,
55
+ img2img_image: str = None,
56
+ stable_fast: bool = False,
57
+ reuse_seed: bool = False,
58
+ flux_enabled: bool = False,
59
+ prio_speed: bool = False,
60
+ realistic_model: bool = False,
61
+ multiscale_enabled: bool = True,
62
+ multiscale_intermittent: bool = False,
63
+ multiscale_factor: float = 0.5,
64
+ multiscale_fullres_start: int = 3,
65
+ multiscale_fullres_end: int = 8,
66
+ keep_models_loaded: bool = True,
67
+ progress=gr.Progress(),
68
+ ):
69
+ """Generate images using the LightDiffusion pipeline"""
70
+ try:
71
+ # Set model persistence preference
72
+ from modules.Device.ModelCache import set_keep_models_loaded
73
+
74
+ set_keep_models_loaded(keep_models_loaded)
75
+
76
+ if img2img_enabled and img2img_image is not None:
77
+ # Convert numpy array to PIL Image
78
+ if isinstance(img2img_image, np.ndarray):
79
+ img_pil = Image.fromarray(img2img_image)
80
+ img_pil.save("temp_img2img.png")
81
+ prompt = "temp_img2img.png"
82
+
83
+ # Run pipeline and capture saved images
84
+ with torch.inference_mode():
85
+ pipeline(
86
+ prompt=prompt,
87
+ w=width,
88
+ h=height,
89
+ number=num_images,
90
+ batch=batch_size,
91
+ hires_fix=hires_fix,
92
+ adetailer=adetailer,
93
+ enhance_prompt=enhance_prompt,
94
+ img2img=img2img_enabled,
95
+ stable_fast=stable_fast,
96
+ reuse_seed=reuse_seed,
97
+ flux_enabled=flux_enabled,
98
+ prio_speed=prio_speed,
99
+ autohdr=True,
100
+ realistic_model=realistic_model,
101
+ enable_multiscale=multiscale_enabled,
102
+ multiscale_intermittent_fullres=multiscale_intermittent,
103
+ multiscale_factor=multiscale_factor,
104
+ multiscale_fullres_start=multiscale_fullres_start,
105
+ multiscale_fullres_end=multiscale_fullres_end,
106
+ )
107
+
108
+ # Clean up temporary file if it exists
109
+ if os.path.exists("temp_img2img.png"):
110
+ os.remove("temp_img2img.png")
111
+
112
+ return load_generated_images()
113
+
114
+ except Exception:
115
+ import traceback
116
+
117
+ print(traceback.format_exc())
118
+ # Clean up temporary file if it exists
119
+ if os.path.exists("temp_img2img.png"):
120
+ os.remove("temp_img2img.png")
121
+ return [Image.new("RGB", (512, 512), color="black")]
122
+
123
+
124
+ def get_vram_info():
125
+ """Get VRAM usage information"""
126
+ try:
127
+ from modules.Device.ModelCache import get_memory_info
128
+
129
+ info = get_memory_info()
130
+ return f"""
131
+ **VRAM Usage:**
132
+ - Total: {info["total_vram"]:.1f} GB
133
+ - Used: {info["used_vram"]:.1f} GB
134
+ - Free: {info["free_vram"]:.1f} GB
135
+ - Keep Models Loaded: {info["keep_loaded"]}
136
+ - Has Cached Checkpoint: {info["has_cached_checkpoint"]}
137
+ """
138
+ except Exception as e:
139
+ return f"Error getting VRAM info: {e}"
140
+
141
+
142
+ def clear_model_cache_ui():
143
+ """Clear model cache from UI"""
144
+ try:
145
+ from modules.Device.ModelCache import clear_model_cache
146
+
147
+ clear_model_cache()
148
+ return "✅ Model cache cleared successfully!"
149
+ except Exception as e:
150
+ return f"❌ Error clearing cache: {e}"
151
+
152
+
153
+ def apply_multiscale_preset(preset_name):
154
+ """Apply multiscale preset values to the UI components"""
155
+ if preset_name == "None":
156
+ return gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
157
+
158
+ try:
159
+ from modules.sample.multiscale_presets import get_preset_parameters
160
+
161
+ params = get_preset_parameters(preset_name)
162
+
163
+ return (
164
+ gr.update(value=params["enable_multiscale"]),
165
+ gr.update(value=params["multiscale_factor"]),
166
+ gr.update(value=params["multiscale_fullres_start"]),
167
+ gr.update(value=params["multiscale_fullres_end"]),
168
+ gr.update(value=params["multiscale_intermittent_fullres"]),
169
+ )
170
+ except Exception as e:
171
+ print(f"Error applying preset {preset_name}: {e}")
172
+ return gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
173
+
174
+
175
+ # Create Gradio interface
176
+ with gr.Blocks(title="LightDiffusion Web UI") as demo:
177
+ gr.Markdown("# LightDiffusion Web UI")
178
+ gr.Markdown("Generate AI images using LightDiffusion")
179
+ gr.Markdown(
180
+ "This is the demo for LightDiffusion, the fastest diffusion backend for generating images. https://github.com/LightDiffusion/LightDiffusion-Next"
181
+ )
182
+
183
+ with gr.Row():
184
+ with gr.Column():
185
+ # Input components
186
+ prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...")
187
+
188
+ with gr.Row():
189
+ width = gr.Slider(
190
+ minimum=64, maximum=2048, value=512, step=64, label="Width"
191
+ )
192
+ height = gr.Slider(
193
+ minimum=64, maximum=2048, value=512, step=64, label="Height"
194
+ )
195
+
196
+ with gr.Row():
197
+ num_images = gr.Slider(
198
+ minimum=1, maximum=10, value=1, step=1, label="Number of Images"
199
+ )
200
+ batch_size = gr.Slider(
201
+ minimum=1, maximum=4, value=1, step=1, label="Batch Size"
202
+ )
203
+
204
+ with gr.Row():
205
+ hires_fix = gr.Checkbox(label="HiRes Fix")
206
+ adetailer = gr.Checkbox(label="Auto Face/Body Enhancement")
207
+ enhance_prompt = gr.Checkbox(label="Enhance Prompt")
208
+ stable_fast = gr.Checkbox(label="Stable Fast Mode")
209
+
210
+ with gr.Row():
211
+ reuse_seed = gr.Checkbox(label="Reuse Seed")
212
+ flux_enabled = gr.Checkbox(label="Flux Mode")
213
+ prio_speed = gr.Checkbox(label="Prioritize Speed")
214
+ realistic_model = gr.Checkbox(label="Realistic Model")
215
+
216
+ with gr.Row():
217
+ multiscale_enabled = gr.Checkbox(
218
+ label="Multi-Scale Diffusion", value=True
219
+ )
220
+ img2img_enabled = gr.Checkbox(label="Image to Image Mode")
221
+ keep_models_loaded = gr.Checkbox(
222
+ label="Keep Models in VRAM",
223
+ value=True,
224
+ info="Keep models loaded for instant reuse (faster but uses more VRAM)",
225
+ )
226
+
227
+ img2img_image = gr.Image(label="Input Image for img2img", visible=False)
228
+
229
+ # Multi-scale preset selection
230
+ with gr.Row():
231
+ multiscale_preset = gr.Dropdown(
232
+ label="Multi-Scale Preset",
233
+ choices=["None", "quality", "performance", "balanced", "disabled"],
234
+ value="None",
235
+ info="Select a preset to automatically configure multi-scale settings",
236
+ )
237
+ multiscale_intermittent = gr.Checkbox(
238
+ label="Intermittent Full-Res",
239
+ value=False,
240
+ info="Enable intermittent full-resolution rendering in low-res region",
241
+ )
242
+
243
+ with gr.Row():
244
+ multiscale_factor = gr.Slider(
245
+ minimum=0.1,
246
+ maximum=1.0,
247
+ value=0.5,
248
+ step=0.1,
249
+ label="Multi-Scale Factor",
250
+ )
251
+ multiscale_fullres_start = gr.Slider(
252
+ minimum=0, maximum=10, value=3, step=1, label="Full-Res Start Steps"
253
+ )
254
+ multiscale_fullres_end = gr.Slider(
255
+ minimum=0, maximum=20, value=8, step=1, label="Full-Res End Steps"
256
+ )
257
+
258
+ # Make input image visible only when img2img is enabled
259
+ img2img_enabled.change(
260
+ fn=lambda x: gr.update(visible=x),
261
+ inputs=[img2img_enabled],
262
+ outputs=[img2img_image],
263
+ )
264
+
265
+ # Handle preset changes
266
+ multiscale_preset.change(
267
+ fn=apply_multiscale_preset,
268
+ inputs=[multiscale_preset],
269
+ outputs=[
270
+ multiscale_enabled,
271
+ multiscale_factor,
272
+ multiscale_fullres_start,
273
+ multiscale_fullres_end,
274
+ multiscale_intermittent,
275
+ ],
276
+ )
277
+
278
+ generate_btn = gr.Button("Generate")
279
+
280
+ # Model Cache Management
281
+ with gr.Accordion("Model Cache Management", open=False):
282
+ with gr.Row():
283
+ vram_info_btn = gr.Button("🔍 Check VRAM Usage")
284
+ clear_cache_btn = gr.Button("🗑️ Clear Model Cache")
285
+ vram_info_display = gr.Markdown("")
286
+ cache_status_display = gr.Markdown("")
287
+
288
+ # Output gallery
289
+ gallery = gr.Gallery(
290
+ label="Generated Images",
291
+ show_label=True,
292
+ elem_id="gallery",
293
+ columns=[2],
294
+ rows=[2],
295
+ object_fit="contain",
296
+ height="auto",
297
+ )
298
+
299
+ # Connect generate button to pipeline
300
+ generate_btn.click(
301
+ fn=generate_images,
302
+ inputs=[
303
+ prompt,
304
+ width,
305
+ height,
306
+ num_images,
307
+ batch_size,
308
+ hires_fix,
309
+ adetailer,
310
+ enhance_prompt,
311
+ img2img_enabled,
312
+ img2img_image,
313
+ stable_fast,
314
+ reuse_seed,
315
+ flux_enabled,
316
+ prio_speed,
317
+ realistic_model,
318
+ multiscale_enabled,
319
+ multiscale_intermittent,
320
+ multiscale_factor,
321
+ multiscale_fullres_start,
322
+ multiscale_fullres_end,
323
+ keep_models_loaded,
324
+ ],
325
+ outputs=gallery,
326
+ )
327
+
328
+ # Connect VRAM info and cache management buttons
329
+ vram_info_btn.click(
330
+ fn=get_vram_info,
331
+ outputs=vram_info_display,
332
+ )
333
+
334
+ clear_cache_btn.click(
335
+ fn=clear_model_cache_ui,
336
+ outputs=cache_status_display,
337
+ )
338
+
339
+
340
+ def is_huggingface_space():
341
+ return "SPACE_ID" in os.environ
342
+
343
+
344
+ def is_docker_environment():
345
+ return "GRADIO_SERVER_PORT" in os.environ and "GRADIO_SERVER_NAME" in os.environ
346
+
347
+
348
+ # For local testing
349
+ if __name__ == "__main__":
350
+ if is_huggingface_space():
351
+ demo.launch(
352
+ debug=False,
353
+ server_name="0.0.0.0",
354
+ server_port=7860, # Standard HF Spaces port
355
+ )
356
+ elif is_docker_environment():
357
+ # Docker environment - use environment variables
358
+ server_name = os.environ.get("GRADIO_SERVER_NAME", "0.0.0.0")
359
+ server_port = int(os.environ.get("GRADIO_SERVER_PORT", 7860))
360
+ demo.launch(
361
+ debug=False,
362
+ server_name=server_name,
363
+ server_port=server_port,
364
+ )
365
+ else:
366
+ demo.launch(
367
+ server_name="0.0.0.0",
368
+ server_port=8000,
369
+ auth=None,
370
+ share=True, # Only enable sharing locally
371
+ debug=True,
372
+ )