Fabrice-TIERCELIN commited on
Commit
f60c4e9
·
verified ·
1 Parent(s): 7ec4d6d

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +18 -10
  2. app.py +864 -623
  3. requirements.txt +48 -15
README.md CHANGED
@@ -1,13 +1,21 @@
1
  ---
2
- title: LTX Video Fast
3
- emoji: 🎥
4
- colorFrom: yellow
5
- colorTo: pink
6
  sdk: gradio
7
- sdk_version: 5.29.1
 
8
  app_file: app.py
9
- pinned: false
10
- short_description: ultra-fast video model, LTX 0.9.7 13B distilled
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: SUPIR Image Upscaler
 
 
 
3
  sdk: gradio
4
+ emoji: 📷
5
+ sdk_version: 4.38.1
6
  app_file: app.py
7
+ license: mit
8
+ colorFrom: blue
9
+ colorTo: pink
10
+ tags:
11
+ - Upscaling
12
+ - Restoring
13
+ - Image-to-Image
14
+ - Image-2-Image
15
+ - Img-to-Img
16
+ - Img-2-Img
17
+ - language models
18
+ - LLMs
19
+ short_description: Restore blurred or small images with prompt
20
+ suggested_hardware: zero-a10g
21
+ ---
app.py CHANGED
@@ -1,623 +1,864 @@
1
- import gradio as gr
2
- import torch
3
- import spaces
4
- import numpy as np
5
- import random
6
- import os
7
- import yaml
8
- from pathlib import Path
9
- import imageio
10
- import tempfile
11
- from PIL import Image
12
- from huggingface_hub import hf_hub_download
13
- import shutil
14
-
15
- from inference import (
16
- create_ltx_video_pipeline,
17
- create_latent_upsampler,
18
- load_image_to_tensor_with_resize_and_crop,
19
- seed_everething,
20
- get_device,
21
- calculate_padding,
22
- load_media_file
23
- )
24
- from ltx_video.pipelines.pipeline_ltx_video import ConditioningItem, LTXMultiScalePipeline, LTXVideoPipeline
25
- from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy
26
-
27
- image_i2v_debug_value = None
28
- i2v_prompt_debug_value = None
29
- height_input_debug_value = None
30
- width_input_debug_value = None
31
- duration_input_debug_value = None
32
- config_file_path = "configs/ltxv-13b-0.9.7-distilled.yaml"
33
- with open(config_file_path, "r") as file:
34
- PIPELINE_CONFIG_YAML = yaml.safe_load(file)
35
-
36
- LTX_REPO = "Lightricks/LTX-Video"
37
- MAX_IMAGE_SIZE = PIPELINE_CONFIG_YAML.get("max_resolution", 1280)
38
- MAX_NUM_FRAMES = 257
39
-
40
- FPS = 30.0
41
-
42
- # --- Global variables for loaded models ---
43
- pipeline_instance = None
44
- latent_upsampler_instance = None
45
- models_dir = "downloaded_models_gradio_cpu_init"
46
- Path(models_dir).mkdir(parents=True, exist_ok=True)
47
-
48
- print("Downloading models (if not present)...")
49
- distilled_model_actual_path = hf_hub_download(
50
- repo_id=LTX_REPO,
51
- filename=PIPELINE_CONFIG_YAML["checkpoint_path"],
52
- local_dir=models_dir,
53
- local_dir_use_symlinks=False
54
- )
55
- PIPELINE_CONFIG_YAML["checkpoint_path"] = distilled_model_actual_path
56
- print(f"Distilled model path: {distilled_model_actual_path}")
57
-
58
- SPATIAL_UPSCALER_FILENAME = PIPELINE_CONFIG_YAML["spatial_upscaler_model_path"]
59
- spatial_upscaler_actual_path = hf_hub_download(
60
- repo_id=LTX_REPO,
61
- filename=SPATIAL_UPSCALER_FILENAME,
62
- local_dir=models_dir,
63
- local_dir_use_symlinks=False
64
- )
65
- PIPELINE_CONFIG_YAML["spatial_upscaler_model_path"] = spatial_upscaler_actual_path
66
- print(f"Spatial upscaler model path: {spatial_upscaler_actual_path}")
67
-
68
- print("Creating LTX Video pipeline on CPU...")
69
- pipeline_instance = create_ltx_video_pipeline(
70
- ckpt_path=PIPELINE_CONFIG_YAML["checkpoint_path"],
71
- precision=PIPELINE_CONFIG_YAML["precision"],
72
- text_encoder_model_name_or_path=PIPELINE_CONFIG_YAML["text_encoder_model_name_or_path"],
73
- sampler=PIPELINE_CONFIG_YAML["sampler"],
74
- device="cpu",
75
- enhance_prompt=False,
76
- prompt_enhancer_image_caption_model_name_or_path=PIPELINE_CONFIG_YAML["prompt_enhancer_image_caption_model_name_or_path"],
77
- prompt_enhancer_llm_model_name_or_path=PIPELINE_CONFIG_YAML["prompt_enhancer_llm_model_name_or_path"],
78
- )
79
- print("LTX Video pipeline created on CPU.")
80
-
81
- if PIPELINE_CONFIG_YAML.get("spatial_upscaler_model_path"):
82
- print("Creating latent upsampler on CPU...")
83
- latent_upsampler_instance = create_latent_upsampler(
84
- PIPELINE_CONFIG_YAML["spatial_upscaler_model_path"],
85
- device="cpu"
86
- )
87
- print("Latent upsampler created on CPU.")
88
-
89
- target_inference_device = "cuda"
90
- print(f"Target inference device: {target_inference_device}")
91
- pipeline_instance.to(target_inference_device)
92
- if latent_upsampler_instance:
93
- latent_upsampler_instance.to(target_inference_device)
94
-
95
-
96
- # --- Helper function for dimension calculation ---
97
- MIN_DIM_SLIDER = 256 # As defined in the sliders minimum attribute
98
- TARGET_FIXED_SIDE = 768 # Desired fixed side length as per requirement
99
-
100
- def calculate_new_dimensions(orig_w, orig_h):
101
- """
102
- Calculates new dimensions for height and width sliders based on original media dimensions.
103
- Ensures one side is TARGET_FIXED_SIDE, the other is scaled proportionally,
104
- both are multiples of 32, and within [MIN_DIM_SLIDER, MAX_IMAGE_SIZE].
105
- """
106
- if orig_w == 0 or orig_h == 0:
107
- # Default to TARGET_FIXED_SIDE square if original dimensions are invalid
108
- return int(TARGET_FIXED_SIDE), int(TARGET_FIXED_SIDE)
109
-
110
- if orig_w >= orig_h: # Landscape or square
111
- new_h = TARGET_FIXED_SIDE
112
- aspect_ratio = orig_w / orig_h
113
- new_w_ideal = new_h * aspect_ratio
114
-
115
- # Round to nearest multiple of 32
116
- new_w = round(new_w_ideal / 32) * 32
117
-
118
- # Clamp to [MIN_DIM_SLIDER, MAX_IMAGE_SIZE]
119
- new_w = max(MIN_DIM_SLIDER, min(new_w, MAX_IMAGE_SIZE))
120
- # Ensure new_h is also clamped (TARGET_FIXED_SIDE should be within these bounds if configured correctly)
121
- new_h = max(MIN_DIM_SLIDER, min(new_h, MAX_IMAGE_SIZE))
122
- else: # Portrait
123
- new_w = TARGET_FIXED_SIDE
124
- aspect_ratio = orig_h / orig_w # Use H/W ratio for portrait scaling
125
- new_h_ideal = new_w * aspect_ratio
126
-
127
- # Round to nearest multiple of 32
128
- new_h = round(new_h_ideal / 32) * 32
129
-
130
- # Clamp to [MIN_DIM_SLIDER, MAX_IMAGE_SIZE]
131
- new_h = max(MIN_DIM_SLIDER, min(new_h, MAX_IMAGE_SIZE))
132
- # Ensure new_w is also clamped
133
- new_w = max(MIN_DIM_SLIDER, min(new_w, MAX_IMAGE_SIZE))
134
-
135
- return int(new_h), int(new_w)
136
-
137
- def get_duration(prompt, negative_prompt, input_image_filepath, input_video_filepath,
138
- height_ui, width_ui, mode,
139
- duration_ui, # Removed ui_steps
140
- ui_frames_to_use,
141
- seed_ui, randomize_seed, ui_guidance_scale, improve_texture_flag,
142
- progress):
143
- if duration_ui > 7:
144
- return 120
145
- else:
146
- return 120
147
-
148
- @spaces.GPU(duration=get_duration)
149
- def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath,
150
- height_ui, width_ui, mode,
151
- duration_ui,
152
- ui_frames_to_use,
153
- seed_ui, randomize_seed, ui_guidance_scale, improve_texture_flag,
154
- progress=gr.Progress(track_tqdm=True)):
155
- global i2v_prompt_debug_value
156
- global image_i2v_debug_value
157
- global height_input_debug_value
158
- global width_input_debug_value
159
- global duration_input_debug_value
160
-
161
- if i2v_prompt_debug_value is not None:
162
- prompt = i2v_prompt_debug_value
163
- i2v_prompt_debug_value = None
164
-
165
- if image_i2v_debug_value is not None:
166
- input_image_filepath = image_i2v_debug_value
167
- image_i2v_debug_value = None
168
-
169
- if height_input_debug_value is not None:
170
- height_ui = height_input_debug_value
171
- height_input_debug_value = None
172
-
173
- if width_input_debug_value is not None:
174
- width_ui = width_input_debug_value
175
- width_input_debug_value = None
176
-
177
- if duration_input_debug_value is not None:
178
- duration_ui = duration_input_debug_value
179
- duration_input_debug_value = None
180
-
181
- if randomize_seed:
182
- seed_ui = random.randint(0, 2**32 - 1)
183
- seed_everething(int(seed_ui))
184
-
185
- target_frames_ideal = duration_ui * FPS
186
- target_frames_rounded = round(target_frames_ideal)
187
- if target_frames_rounded < 1:
188
- target_frames_rounded = 1
189
-
190
- n_val = round((float(target_frames_rounded) - 1.0) / 8.0)
191
- actual_num_frames = int(n_val * 8 + 1)
192
-
193
- actual_num_frames = max(9, actual_num_frames)
194
- actual_num_frames = min(MAX_NUM_FRAMES, actual_num_frames)
195
-
196
- actual_height = int(height_ui)
197
- actual_width = int(width_ui)
198
-
199
- height_padded = ((actual_height - 1) // 32 + 1) * 32
200
- width_padded = ((actual_width - 1) // 32 + 1) * 32
201
- num_frames_padded = ((actual_num_frames - 2) // 8 + 1) * 8 + 1
202
- if num_frames_padded != actual_num_frames:
203
- print(f"Warning: actual_num_frames ({actual_num_frames}) and num_frames_padded ({num_frames_padded}) differ. Using num_frames_padded for pipeline.")
204
-
205
- padding_values = calculate_padding(actual_height, actual_width, height_padded, width_padded)
206
-
207
- call_kwargs = {
208
- "prompt": prompt,
209
- "negative_prompt": negative_prompt,
210
- "height": height_padded,
211
- "width": width_padded,
212
- "num_frames": num_frames_padded,
213
- "frame_rate": int(FPS),
214
- "generator": torch.Generator(device=target_inference_device).manual_seed(int(seed_ui)),
215
- "output_type": "pt",
216
- "conditioning_items": None,
217
- "media_items": None,
218
- "decode_timestep": PIPELINE_CONFIG_YAML["decode_timestep"],
219
- "decode_noise_scale": PIPELINE_CONFIG_YAML["decode_noise_scale"],
220
- "stochastic_sampling": PIPELINE_CONFIG_YAML["stochastic_sampling"],
221
- "image_cond_noise_scale": 0.15,
222
- "is_video": True,
223
- "vae_per_channel_normalize": True,
224
- "mixed_precision": (PIPELINE_CONFIG_YAML["precision"] == "mixed_precision"),
225
- "offload_to_cpu": False,
226
- "enhance_prompt": False,
227
- }
228
-
229
- stg_mode_str = PIPELINE_CONFIG_YAML.get("stg_mode", "attention_values")
230
- if stg_mode_str.lower() in ["stg_av", "attention_values"]:
231
- call_kwargs["skip_layer_strategy"] = SkipLayerStrategy.AttentionValues
232
- elif stg_mode_str.lower() in ["stg_as", "attention_skip"]:
233
- call_kwargs["skip_layer_strategy"] = SkipLayerStrategy.AttentionSkip
234
- elif stg_mode_str.lower() in ["stg_r", "residual"]:
235
- call_kwargs["skip_layer_strategy"] = SkipLayerStrategy.Residual
236
- elif stg_mode_str.lower() in ["stg_t", "transformer_block"]:
237
- call_kwargs["skip_layer_strategy"] = SkipLayerStrategy.TransformerBlock
238
- else:
239
- raise ValueError(f"Invalid stg_mode: {stg_mode_str}")
240
-
241
- if mode == "image-to-video" and input_image_filepath:
242
- try:
243
- media_tensor = load_image_to_tensor_with_resize_and_crop(
244
- input_image_filepath, actual_height, actual_width
245
- )
246
- media_tensor = torch.nn.functional.pad(media_tensor, padding_values)
247
- call_kwargs["conditioning_items"] = [ConditioningItem(media_tensor.to(target_inference_device), 0, 1.0)]
248
- except Exception as e:
249
- print(f"Error loading image {input_image_filepath}: {e}")
250
- raise gr.Error(f"Could not load image: {e}")
251
- elif mode == "video-to-video" and input_video_filepath:
252
- try:
253
- call_kwargs["media_items"] = load_media_file(
254
- media_path=input_video_filepath,
255
- height=actual_height,
256
- width=actual_width,
257
- max_frames=int(ui_frames_to_use),
258
- padding=padding_values
259
- ).to(target_inference_device)
260
- except Exception as e:
261
- print(f"Error loading video {input_video_filepath}: {e}")
262
- raise gr.Error(f"Could not load video: {e}")
263
-
264
- print(f"Moving models to {target_inference_device} for inference (if not already there)...")
265
-
266
- active_latent_upsampler = None
267
- if improve_texture_flag and latent_upsampler_instance:
268
- active_latent_upsampler = latent_upsampler_instance
269
-
270
- result_images_tensor = None
271
- if improve_texture_flag:
272
- if not active_latent_upsampler:
273
- raise gr.Error("Spatial upscaler model not loaded or improve_texture not selected, cannot use multi-scale.")
274
-
275
- multi_scale_pipeline_obj = LTXMultiScalePipeline(pipeline_instance, active_latent_upsampler)
276
-
277
- first_pass_args = PIPELINE_CONFIG_YAML.get("first_pass", {}).copy()
278
- first_pass_args["guidance_scale"] = float(ui_guidance_scale) # UI overrides YAML
279
- # num_inference_steps will be derived from len(timesteps) in the pipeline
280
- first_pass_args.pop("num_inference_steps", None)
281
-
282
-
283
- second_pass_args = PIPELINE_CONFIG_YAML.get("second_pass", {}).copy()
284
- second_pass_args["guidance_scale"] = float(ui_guidance_scale) # UI overrides YAML
285
- # num_inference_steps will be derived from len(timesteps) in the pipeline
286
- second_pass_args.pop("num_inference_steps", None)
287
-
288
- multi_scale_call_kwargs = call_kwargs.copy()
289
- multi_scale_call_kwargs.update({
290
- "downscale_factor": PIPELINE_CONFIG_YAML["downscale_factor"],
291
- "first_pass": first_pass_args,
292
- "second_pass": second_pass_args,
293
- })
294
-
295
- print(f"Calling multi-scale pipeline (eff. HxW: {actual_height}x{actual_width}, Frames: {actual_num_frames} -> Padded: {num_frames_padded}) on {target_inference_device}")
296
- result_images_tensor = multi_scale_pipeline_obj(**multi_scale_call_kwargs).images
297
- else:
298
- single_pass_call_kwargs = call_kwargs.copy()
299
- first_pass_config_from_yaml = PIPELINE_CONFIG_YAML.get("first_pass", {})
300
-
301
- single_pass_call_kwargs["timesteps"] = first_pass_config_from_yaml.get("timesteps")
302
- single_pass_call_kwargs["guidance_scale"] = float(ui_guidance_scale) # UI overrides YAML
303
- single_pass_call_kwargs["stg_scale"] = first_pass_config_from_yaml.get("stg_scale")
304
- single_pass_call_kwargs["rescaling_scale"] = first_pass_config_from_yaml.get("rescaling_scale")
305
- single_pass_call_kwargs["skip_block_list"] = first_pass_config_from_yaml.get("skip_block_list")
306
-
307
- # Remove keys that might conflict or are not used in single pass / handled by above
308
- single_pass_call_kwargs.pop("num_inference_steps", None)
309
- single_pass_call_kwargs.pop("first_pass", None)
310
- single_pass_call_kwargs.pop("second_pass", None)
311
- single_pass_call_kwargs.pop("downscale_factor", None)
312
-
313
- print(f"Calling base pipeline (padded HxW: {height_padded}x{width_padded}, Frames: {actual_num_frames} -> Padded: {num_frames_padded}) on {target_inference_device}")
314
- result_images_tensor = pipeline_instance(**single_pass_call_kwargs).images
315
-
316
- if result_images_tensor is None:
317
- raise gr.Error("Generation failed.")
318
-
319
- pad_left, pad_right, pad_top, pad_bottom = padding_values
320
- slice_h_end = -pad_bottom if pad_bottom > 0 else None
321
- slice_w_end = -pad_right if pad_right > 0 else None
322
-
323
- result_images_tensor = result_images_tensor[
324
- :, :, :actual_num_frames, pad_top:slice_h_end, pad_left:slice_w_end
325
- ]
326
-
327
- video_np = result_images_tensor[0].permute(1, 2, 3, 0).cpu().float().numpy()
328
-
329
- video_np = np.clip(video_np, 0, 1)
330
- video_np = (video_np * 255).astype(np.uint8)
331
-
332
- temp_dir = tempfile.mkdtemp()
333
- timestamp = random.randint(10000,99999)
334
- output_video_path = os.path.join(temp_dir, f"output_{timestamp}.mp4")
335
-
336
- try:
337
- with imageio.get_writer(output_video_path, fps=call_kwargs["frame_rate"], macro_block_size=1) as video_writer:
338
- for frame_idx in range(video_np.shape[0]):
339
- progress(frame_idx / video_np.shape[0], desc="Saving video")
340
- video_writer.append_data(video_np[frame_idx])
341
- except Exception as e:
342
- print(f"Error saving video with macro_block_size=1: {e}")
343
- try:
344
- with imageio.get_writer(output_video_path, fps=call_kwargs["frame_rate"], format='FFMPEG', codec='libx264', quality=8) as video_writer:
345
- for frame_idx in range(video_np.shape[0]):
346
- progress(frame_idx / video_np.shape[0], desc="Saving video (fallback ffmpeg)")
347
- video_writer.append_data(video_np[frame_idx])
348
- except Exception as e2:
349
- print(f"Fallback video saving error: {e2}")
350
- raise gr.Error(f"Failed to save video: {e2}")
351
-
352
- return output_video_path, seed_ui
353
-
354
- def update_task_image():
355
- return "image-to-video"
356
-
357
- def update_task_text():
358
- return "text-to-video"
359
-
360
- def update_task_video():
361
- return "video-to-video"
362
-
363
- # --- Gradio UI Definition ---
364
- css="""
365
- #col-container {
366
- margin: 0 auto;
367
- max-width: 900px;
368
- }
369
- """
370
-
371
- with gr.Blocks(css=css) as demo:
372
- gr.Markdown("# LTX Video 0.9.7 Distilled")
373
- gr.Markdown("Fast high quality video generation. [Model](https://huggingface.co/Lightricks/LTX-Video/blob/main/ltxv-13b-0.9.7-distilled.safetensors) [GitHub](https://github.com/Lightricks/LTX-Video) [Diffusers](#)")
374
-
375
- with gr.Row():
376
- with gr.Column():
377
- with gr.Tab("image-to-video") as image_tab:
378
- video_i_hidden = gr.Textbox(label="video_i", visible=False, value=None)
379
- image_i2v = gr.Image(label="Input Image", type="filepath", sources=["upload", "webcam", "clipboard"])
380
- i2v_prompt = gr.Textbox(label="Prompt", value="The creature from the image starts to move", lines=3)
381
- i2v_button = gr.Button("Generate Image-to-Video", variant="primary")
382
- with gr.Tab("text-to-video") as text_tab:
383
- image_n_hidden = gr.Textbox(label="image_n", visible=False, value=None)
384
- video_n_hidden = gr.Textbox(label="video_n", visible=False, value=None)
385
- t2v_prompt = gr.Textbox(label="Prompt", value="A majestic dragon flying over a medieval castle", lines=3)
386
- t2v_button = gr.Button("Generate Text-to-Video", variant="primary")
387
- with gr.Tab("video-to-video", visible=False) as video_tab:
388
- image_v_hidden = gr.Textbox(label="image_v", visible=False, value=None)
389
- video_v2v = gr.Video(label="Input Video", sources=["upload", "webcam"]) # type defaults to filepath
390
- frames_to_use = gr.Slider(label="Frames to use from input video", minimum=9, maximum=MAX_NUM_FRAMES, value=9, step=8, info="Number of initial frames to use for conditioning/transformation. Must be N*8+1.")
391
- v2v_prompt = gr.Textbox(label="Prompt", value="Change the style to cinematic anime", lines=3)
392
- v2v_button = gr.Button("Generate Video-to-Video", variant="primary")
393
-
394
- duration_input = gr.Slider(
395
- label="Video Duration (seconds)",
396
- minimum=0.3,
397
- maximum=8.5,
398
- value=2,
399
- step=0.1,
400
- info=f"Target video duration (0.3s to 8.5s)"
401
- )
402
- improve_texture = gr.Checkbox(label="Improve Texture (multi-scale)", value=True, info="Uses a two-pass generation for better quality, but is slower. Recommended for final output.")
403
-
404
- with gr.Column():
405
- output_video = gr.Video(label="Generated Video", interactive=False)
406
- # gr.DeepLinkButton()
407
-
408
- with gr.Accordion("Advanced settings", open=False):
409
- mode = gr.Dropdown(["text-to-video", "image-to-video", "video-to-video"], label="task", value="image-to-video", visible=False)
410
- negative_prompt_input = gr.Textbox(label="Negative Prompt", value="worst quality, inconsistent motion, blurry, jittery, distorted", lines=2)
411
- with gr.Row():
412
- seed_input = gr.Number(label="Seed", value=42, precision=0, minimum=0, maximum=2**32-1)
413
- randomize_seed_input = gr.Checkbox(label="Randomize Seed", value=True)
414
- with gr.Row():
415
- guidance_scale_input = gr.Slider(label="Guidance Scale (CFG)", minimum=1.0, maximum=10.0, value=PIPELINE_CONFIG_YAML.get("first_pass", {}).get("guidance_scale", 1.0), step=0.1, info="Controls how much the prompt influences the output. Higher values = stronger influence.")
416
- with gr.Row():
417
- height_input = gr.Slider(label="Height", value=512, step=32, minimum=MIN_DIM_SLIDER, maximum=MAX_IMAGE_SIZE, info="Must be divisible by 32.")
418
- width_input = gr.Slider(label="Width", value=704, step=32, minimum=MIN_DIM_SLIDER, maximum=MAX_IMAGE_SIZE, info="Must be divisible by 32.")
419
-
420
- with gr.Accordion("Debug", open=False):
421
- image_i2v_debug = gr.Image(label="Input Image Debug", type="filepath", sources=["upload", "webcam", "clipboard"])
422
- i2v_prompt_debug = gr.Textbox(label="Prompt Debug", value="", lines=3)
423
- height_input_debug = gr.Slider(label="Height Debug", value=512, step=32, minimum=MIN_DIM_SLIDER, maximum=MAX_IMAGE_SIZE, info="Must be divisible by 32.")
424
- width_input_debug = gr.Slider(label="Width Debug", value=704, step=32, minimum=MIN_DIM_SLIDER, maximum=MAX_IMAGE_SIZE, info="Must be divisible by 32.")
425
- duration_input_debug = gr.Slider(
426
- label="Video Duration Debug (seconds)",
427
- minimum=0.3,
428
- maximum=8.5,
429
- value=6,
430
- step=0.1,
431
- info=f"Target video duration (0.3s to 8.5s)"
432
- )
433
-
434
- with gr.Row(visible=False):
435
- gr.Examples(
436
- examples = [
437
- [
438
- "View of the sea as far as the eye can see, from the seaside, a piece of land is barely visible on the horizon at the middle, the sky is radiant, reflections of the sun in the water, photorealistic, realistic, intricate details, 8k, insanely detailed",
439
- "",
440
- "./Example_LTX/Example1.png",
441
- None,
442
- 512,
443
- 800,
444
- "image-to-video",
445
- 6,
446
- 9,
447
- 42,
448
- True,
449
- 1,
450
- True
451
- ],
452
- ],
453
- run_on_click = True,
454
- fn = generate,
455
- inputs = [i2v_prompt, negative_prompt_input, image_i2v, video_i_hidden,
456
- height_input, width_input, mode,
457
- duration_input, frames_to_use,
458
- seed_input, randomize_seed_input, guidance_scale_input, improve_texture],
459
- outputs = [output_video, seed_input],
460
- cache_examples = True,
461
- )
462
-
463
- def height_input_debug_change(value):
464
- global height_input_debug_value
465
- height_input_debug_value = value
466
- return []
467
-
468
- def width_input_debug_change(value):
469
- global width_input_debug_value
470
- width_input_debug_value = value
471
- return []
472
-
473
- def duration_input_debug_change(value):
474
- global duration_input_debug_value
475
- duration_input_debug_value = value
476
- return []
477
-
478
- def i2v_prompt_debug_change(prompt):
479
- global i2v_prompt_debug_value
480
- i2v_prompt_debug_value = prompt
481
- return []
482
-
483
- # --- Event handlers for updating dimensions on upload ---
484
- def handle_image_upload_for_dims(image_filepath, current_h, current_w):
485
- if not image_filepath: # Image cleared or no image initially
486
- # Keep current slider values if image is cleared or no input
487
- return gr.update(value=current_h), gr.update(value=current_w)
488
- try:
489
- img = Image.open(image_filepath)
490
- orig_w, orig_h = img.size
491
- new_h, new_w = calculate_new_dimensions(orig_w, orig_h)
492
- return gr.update(value=new_h), gr.update(value=new_w)
493
- except Exception as e:
494
- print(f"Error processing image for dimension update: {e}")
495
- # Keep current slider values on error
496
- return gr.update(value=current_h), gr.update(value=current_w)
497
-
498
- def handle_image_debug_upload_for_dims(image_filepath, current_h, current_w):
499
- global image_i2v_debug_value
500
- image_i2v_debug_value = image_filepath
501
- if not image_filepath: # Image cleared or no image initially
502
- # Keep current slider values if image is cleared or no input
503
- return gr.update(value=current_h), gr.update(value=current_w)
504
- try:
505
- img = Image.open(image_filepath)
506
- orig_w, orig_h = img.size
507
- new_h, new_w = calculate_new_dimensions(orig_w, orig_h)
508
- global height_input_debug_value
509
- height_input_debug_value = new_h
510
- global width_input_debug_value
511
- width_input_debug_value = new_w
512
- return gr.update(value=new_h), gr.update(value=new_w)
513
- except Exception as e:
514
- # Keep current slider values on error
515
- return gr.update(value=current_h), gr.update(value=current_w)
516
-
517
- def handle_video_upload_for_dims(video_filepath, current_h, current_w):
518
- if not video_filepath: # Video cleared or no video initially
519
- return gr.update(value=current_h), gr.update(value=current_w)
520
- try:
521
- # Ensure video_filepath is a string for os.path.exists and imageio
522
- video_filepath_str = str(video_filepath)
523
- if not os.path.exists(video_filepath_str):
524
- print(f"Video file path does not exist for dimension update: {video_filepath_str}")
525
- return gr.update(value=current_h), gr.update(value=current_w)
526
-
527
- orig_w, orig_h = -1, -1
528
- with imageio.get_reader(video_filepath_str) as reader:
529
- meta = reader.get_meta_data()
530
- if 'size' in meta:
531
- orig_w, orig_h = meta['size']
532
- else:
533
- # Fallback: read first frame if 'size' not in metadata
534
- try:
535
- first_frame = reader.get_data(0)
536
- # Shape is (h, w, c) for frames
537
- orig_h, orig_w = first_frame.shape[0], first_frame.shape[1]
538
- except Exception as e_frame:
539
- print(f"Could not get video size from metadata or first frame: {e_frame}")
540
- return gr.update(value=current_h), gr.update(value=current_w)
541
-
542
- if orig_w == -1 or orig_h == -1: # If dimensions couldn't be determined
543
- print(f"Could not determine dimensions for video: {video_filepath_str}")
544
- return gr.update(value=current_h), gr.update(value=current_w)
545
-
546
- new_h, new_w = calculate_new_dimensions(orig_w, orig_h)
547
- return gr.update(value=new_h), gr.update(value=new_w)
548
- except Exception as e:
549
- # Log type of video_filepath for debugging if it's not a path-like string
550
- print(f"Error processing video for dimension update: {e} (Path: {video_filepath}, Type: {type(video_filepath)})")
551
- return gr.update(value=current_h), gr.update(value=current_w)
552
-
553
-
554
- image_i2v_debug.upload(
555
- fn=handle_image_debug_upload_for_dims,
556
- inputs=[image_i2v_debug, height_input_debug, width_input_debug],
557
- outputs=[height_input_debug, width_input_debug]
558
- )
559
-
560
- image_i2v.upload(
561
- fn=handle_image_upload_for_dims,
562
- inputs=[image_i2v, height_input, width_input],
563
- outputs=[height_input, width_input]
564
- )
565
- video_v2v.upload(
566
- fn=handle_video_upload_for_dims,
567
- inputs=[video_v2v, height_input, width_input],
568
- outputs=[height_input, width_input]
569
- )
570
- i2v_prompt_debug.change(
571
- fn=i2v_prompt_debug_change,
572
- inputs=[image_i2v_debug],
573
- outputs=[]
574
- )
575
- height_input_debug.change(
576
- fn=height_input_debug_change,
577
- inputs=[height_input_debug],
578
- outputs=[]
579
- )
580
- width_input_debug.change(
581
- fn=width_input_debug_change,
582
- inputs=[width_input_debug],
583
- outputs=[]
584
- )
585
- duration_input_debug.change(
586
- fn=duration_input_debug_change,
587
- inputs=[duration_input_debug],
588
- outputs=[]
589
- )
590
-
591
- image_tab.select(
592
- fn=update_task_image,
593
- outputs=[mode]
594
- )
595
- text_tab.select(
596
- fn=update_task_text,
597
- outputs=[mode]
598
- )
599
-
600
- t2v_inputs = [t2v_prompt, negative_prompt_input, image_n_hidden, video_n_hidden,
601
- height_input, width_input, mode,
602
- duration_input, frames_to_use,
603
- seed_input, randomize_seed_input, guidance_scale_input, improve_texture]
604
-
605
- i2v_inputs = [i2v_prompt, negative_prompt_input, image_i2v, video_i_hidden,
606
- height_input, width_input, mode,
607
- duration_input, frames_to_use,
608
- seed_input, randomize_seed_input, guidance_scale_input, improve_texture]
609
-
610
- v2v_inputs = [v2v_prompt, negative_prompt_input, image_v_hidden, video_v2v,
611
- height_input, width_input, mode,
612
- duration_input, frames_to_use,
613
- seed_input, randomize_seed_input, guidance_scale_input, improve_texture]
614
-
615
- t2v_button.click(fn=generate, inputs=t2v_inputs, outputs=[output_video, seed_input], api_name="text_to_video")
616
- i2v_button.click(fn=generate, inputs=i2v_inputs, outputs=[output_video, seed_input], api_name="image_to_video")
617
- v2v_button.click(fn=generate, inputs=v2v_inputs, outputs=[output_video, seed_input], api_name="video_to_video")
618
-
619
- if __name__ == "__main__":
620
- if os.path.exists(models_dir) and os.path.isdir(models_dir):
621
- print(f"Model directory: {Path(models_dir).resolve()}")
622
-
623
- demo.queue().launch(debug=True, share=False, mcp_server=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import argparse
4
+ import numpy as np
5
+ import torch
6
+ import einops
7
+ import copy
8
+ import math
9
+ import time
10
+ import random
11
+ import spaces
12
+ import re
13
+ import uuid
14
+
15
+ from gradio_imageslider import ImageSlider
16
+ from PIL import Image
17
+ from SUPIR.util import HWC3, upscale_image, fix_resize, convert_dtype, create_SUPIR_model, load_QF_ckpt
18
+ from huggingface_hub import hf_hub_download
19
+ from pillow_heif import register_heif_opener
20
+
21
+ register_heif_opener()
22
+
23
+ max_64_bit_int = np.iinfo(np.int32).max
24
+
25
+ hf_hub_download(repo_id="laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", filename="open_clip_pytorch_model.bin", local_dir="laion_CLIP-ViT-bigG-14-laion2B-39B-b160k")
26
+ hf_hub_download(repo_id="camenduru/SUPIR", filename="sd_xl_base_1.0_0.9vae.safetensors", local_dir="yushan777_SUPIR")
27
+ hf_hub_download(repo_id="camenduru/SUPIR", filename="SUPIR-v0F.ckpt", local_dir="yushan777_SUPIR")
28
+ hf_hub_download(repo_id="camenduru/SUPIR", filename="SUPIR-v0Q.ckpt", local_dir="yushan777_SUPIR")
29
+ hf_hub_download(repo_id="RunDiffusion/Juggernaut-XL-Lightning", filename="Juggernaut_RunDiffusionPhoto2_Lightning_4Steps.safetensors", local_dir="RunDiffusion_Juggernaut-XL-Lightning")
30
+
31
+ parser = argparse.ArgumentParser()
32
+ parser.add_argument("--opt", type=str, default='options/SUPIR_v0.yaml')
33
+ parser.add_argument("--ip", type=str, default='127.0.0.1')
34
+ parser.add_argument("--port", type=int, default='6688')
35
+ parser.add_argument("--no_llava", action='store_true', default=True)#False
36
+ parser.add_argument("--use_image_slider", action='store_true', default=False)#False
37
+ parser.add_argument("--log_history", action='store_true', default=False)
38
+ parser.add_argument("--loading_half_params", action='store_true', default=False)#False
39
+ parser.add_argument("--use_tile_vae", action='store_true', default=True)#False
40
+ parser.add_argument("--encoder_tile_size", type=int, default=512)
41
+ parser.add_argument("--decoder_tile_size", type=int, default=64)
42
+ parser.add_argument("--load_8bit_llava", action='store_true', default=False)
43
+ args = parser.parse_args()
44
+
45
+ if torch.cuda.device_count() > 0:
46
+ SUPIR_device = 'cuda:0'
47
+
48
+ # Load SUPIR
49
+ model, default_setting = create_SUPIR_model(args.opt, SUPIR_sign='Q', load_default_setting=True)
50
+ if args.loading_half_params:
51
+ model = model.half()
52
+ if args.use_tile_vae:
53
+ model.init_tile_vae(encoder_tile_size=args.encoder_tile_size, decoder_tile_size=args.decoder_tile_size)
54
+ model = model.to(SUPIR_device)
55
+ model.first_stage_model.denoise_encoder_s1 = copy.deepcopy(model.first_stage_model.denoise_encoder)
56
+ model.current_model = 'v0-Q'
57
+ ckpt_Q, ckpt_F = load_QF_ckpt(args.opt)
58
+
59
+ def check_upload(input_image):
60
+ if input_image is None:
61
+ raise gr.Error("Please provide an image to restore.")
62
+ return gr.update(visible = True)
63
+
64
+ def update_seed(is_randomize_seed, seed):
65
+ if is_randomize_seed:
66
+ return random.randint(0, max_64_bit_int)
67
+ return seed
68
+
69
+ def reset():
70
+ return [
71
+ None,
72
+ 0,
73
+ None,
74
+ None,
75
+ "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
76
+ "painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
77
+ 1,
78
+ 1024,
79
+ 1,
80
+ 2,
81
+ 50,
82
+ -1.0,
83
+ 1.,
84
+ default_setting.s_cfg_Quality if torch.cuda.device_count() > 0 else 1.0,
85
+ True,
86
+ random.randint(0, max_64_bit_int),
87
+ 5,
88
+ 1.003,
89
+ "Wavelet",
90
+ "fp32",
91
+ "fp32",
92
+ 1.0,
93
+ True,
94
+ False,
95
+ default_setting.spt_linear_CFG_Quality if torch.cuda.device_count() > 0 else 1.0,
96
+ 0.,
97
+ "v0-Q",
98
+ "input",
99
+ 179
100
+ ]
101
+
102
+ def check_and_update(input_image):
103
+ if input_image is None:
104
+ raise gr.Error("Please provide an image to restore.")
105
+ return gr.update(visible = True)
106
+
107
+ @spaces.GPU(duration=420)
108
+ def stage1_process(
109
+ input_image,
110
+ gamma_correction,
111
+ diff_dtype,
112
+ ae_dtype
113
+ ):
114
+ print('stage1_process ==>>')
115
+ if torch.cuda.device_count() == 0:
116
+ gr.Warning('Set this space to GPU config to make it work.')
117
+ return None, None
118
+ torch.cuda.set_device(SUPIR_device)
119
+ LQ = HWC3(np.array(Image.open(input_image)))
120
+ LQ = fix_resize(LQ, 512)
121
+ # stage1
122
+ LQ = np.array(LQ) / 255 * 2 - 1
123
+ LQ = torch.tensor(LQ, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0).to(SUPIR_device)[:, :3, :, :]
124
+
125
+ model.ae_dtype = convert_dtype(ae_dtype)
126
+ model.model.dtype = convert_dtype(diff_dtype)
127
+
128
+ LQ = model.batchify_denoise(LQ, is_stage1=True)
129
+ LQ = (LQ[0].permute(1, 2, 0) * 127.5 + 127.5).cpu().numpy().round().clip(0, 255).astype(np.uint8)
130
+ # gamma correction
131
+ LQ = LQ / 255.0
132
+ LQ = np.power(LQ, gamma_correction)
133
+ LQ *= 255.0
134
+ LQ = LQ.round().clip(0, 255).astype(np.uint8)
135
+ print('<<== stage1_process')
136
+ return LQ, gr.update(visible = True)
137
+
138
+ def stage2_process(*args, **kwargs):
139
+ try:
140
+ return restore_in_Xmin(*args, **kwargs)
141
+ except Exception as e:
142
+ # NO_GPU_MESSAGE_INQUEUE
143
+ print("gradio.exceptions.Error 'No GPU is currently available for you after 60s'")
144
+ print('str(type(e)): ' + str(type(e))) # <class 'gradio.exceptions.Error'>
145
+ print('str(e): ' + str(e)) # You have exceeded your GPU quota...
146
+ try:
147
+ print('e.message: ' + e.message) # No GPU is currently available for you after 60s
148
+ except Exception as e2:
149
+ print('Failure')
150
+ if str(e).startswith("No GPU is currently available for you after 60s"):
151
+ print('Exception identified!!!')
152
+ #if str(type(e)) == "<class 'gradio.exceptions.Error'>":
153
+ #print('Exception of name ' + type(e).__name__)
154
+ raise e
155
+
156
+ def restore_in_Xmin(
157
+ noisy_image,
158
+ rotation,
159
+ denoise_image,
160
+ prompt,
161
+ a_prompt,
162
+ n_prompt,
163
+ num_samples,
164
+ min_size,
165
+ downscale,
166
+ upscale,
167
+ edm_steps,
168
+ s_stage1,
169
+ s_stage2,
170
+ s_cfg,
171
+ randomize_seed,
172
+ seed,
173
+ s_churn,
174
+ s_noise,
175
+ color_fix_type,
176
+ diff_dtype,
177
+ ae_dtype,
178
+ gamma_correction,
179
+ linear_CFG,
180
+ linear_s_stage2,
181
+ spt_linear_CFG,
182
+ spt_linear_s_stage2,
183
+ model_select,
184
+ output_format,
185
+ allocation
186
+ ):
187
+ print("noisy_image:\n" + str(noisy_image))
188
+ print("denoise_image:\n" + str(denoise_image))
189
+ print("rotation: " + str(rotation))
190
+ print("prompt: " + str(prompt))
191
+ print("a_prompt: " + str(a_prompt))
192
+ print("n_prompt: " + str(n_prompt))
193
+ print("num_samples: " + str(num_samples))
194
+ print("min_size: " + str(min_size))
195
+ print("downscale: " + str(downscale))
196
+ print("upscale: " + str(upscale))
197
+ print("edm_steps: " + str(edm_steps))
198
+ print("s_stage1: " + str(s_stage1))
199
+ print("s_stage2: " + str(s_stage2))
200
+ print("s_cfg: " + str(s_cfg))
201
+ print("randomize_seed: " + str(randomize_seed))
202
+ print("seed: " + str(seed))
203
+ print("s_churn: " + str(s_churn))
204
+ print("s_noise: " + str(s_noise))
205
+ print("color_fix_type: " + str(color_fix_type))
206
+ print("diff_dtype: " + str(diff_dtype))
207
+ print("ae_dtype: " + str(ae_dtype))
208
+ print("gamma_correction: " + str(gamma_correction))
209
+ print("linear_CFG: " + str(linear_CFG))
210
+ print("linear_s_stage2: " + str(linear_s_stage2))
211
+ print("spt_linear_CFG: " + str(spt_linear_CFG))
212
+ print("spt_linear_s_stage2: " + str(spt_linear_s_stage2))
213
+ print("model_select: " + str(model_select))
214
+ print("GPU time allocation: " + str(allocation) + " min")
215
+ print("output_format: " + str(output_format))
216
+
217
+ input_format = re.sub(r"^.*\.([^\.]+)$", r"\1", noisy_image)
218
+
219
+ if input_format not in ['png', 'webp', 'jpg', 'jpeg', 'gif', 'bmp', 'heic']:
220
+ gr.Warning('Invalid image format. Please first convert into *.png, *.webp, *.jpg, *.jpeg, *.gif, *.bmp or *.heic.')
221
+ return None, None, None, None
222
+
223
+ if output_format == "input":
224
+ if noisy_image is None:
225
+ output_format = "png"
226
+ else:
227
+ output_format = input_format
228
+ print("final output_format: " + str(output_format))
229
+
230
+ if prompt is None:
231
+ prompt = ""
232
+
233
+ if a_prompt is None:
234
+ a_prompt = ""
235
+
236
+ if n_prompt is None:
237
+ n_prompt = ""
238
+
239
+ if prompt != "" and a_prompt != "":
240
+ a_prompt = prompt + ", " + a_prompt
241
+ else:
242
+ a_prompt = prompt + a_prompt
243
+ print("Final prompt: " + str(a_prompt))
244
+
245
+ denoise_image = np.array(Image.open(noisy_image if denoise_image is None else denoise_image))
246
+
247
+ if rotation == 90:
248
+ denoise_image = np.array(list(zip(*denoise_image[::-1])))
249
+ elif rotation == 180:
250
+ denoise_image = np.array(list(zip(*denoise_image[::-1])))
251
+ denoise_image = np.array(list(zip(*denoise_image[::-1])))
252
+ elif rotation == -90:
253
+ denoise_image = np.array(list(zip(*denoise_image))[::-1])
254
+
255
+ if 1 < downscale:
256
+ input_height, input_width, input_channel = denoise_image.shape
257
+ denoise_image = np.array(Image.fromarray(denoise_image).resize((input_width // downscale, input_height // downscale), Image.LANCZOS))
258
+
259
+ denoise_image = HWC3(denoise_image)
260
+
261
+ if torch.cuda.device_count() == 0:
262
+ gr.Warning('Set this space to GPU config to make it work.')
263
+ return [noisy_image, denoise_image], gr.update(label="Downloadable results in *." + output_format + " format", format = output_format, value = [denoise_image]), None, gr.update(visible=True)
264
+
265
+ if model_select != model.current_model:
266
+ print('load ' + model_select)
267
+ if model_select == 'v0-Q':
268
+ model.load_state_dict(ckpt_Q, strict=False)
269
+ elif model_select == 'v0-F':
270
+ model.load_state_dict(ckpt_F, strict=False)
271
+ model.current_model = model_select
272
+
273
+ model.ae_dtype = convert_dtype(ae_dtype)
274
+ model.model.dtype = convert_dtype(diff_dtype)
275
+
276
+ return restore_on_gpu(
277
+ noisy_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, min_size, downscale, upscale, edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select, output_format, allocation
278
+ )
279
+
280
+ def get_duration(
281
+ noisy_image,
282
+ input_image,
283
+ prompt,
284
+ a_prompt,
285
+ n_prompt,
286
+ num_samples,
287
+ min_size,
288
+ downscale,
289
+ upscale,
290
+ edm_steps,
291
+ s_stage1,
292
+ s_stage2,
293
+ s_cfg,
294
+ randomize_seed,
295
+ seed,
296
+ s_churn,
297
+ s_noise,
298
+ color_fix_type,
299
+ diff_dtype,
300
+ ae_dtype,
301
+ gamma_correction,
302
+ linear_CFG,
303
+ linear_s_stage2,
304
+ spt_linear_CFG,
305
+ spt_linear_s_stage2,
306
+ model_select,
307
+ output_format,
308
+ allocation
309
+ ):
310
+ return allocation
311
+
312
+ @spaces.GPU(duration=get_duration)
313
+ def restore_on_gpu(
314
+ noisy_image,
315
+ input_image,
316
+ prompt,
317
+ a_prompt,
318
+ n_prompt,
319
+ num_samples,
320
+ min_size,
321
+ downscale,
322
+ upscale,
323
+ edm_steps,
324
+ s_stage1,
325
+ s_stage2,
326
+ s_cfg,
327
+ randomize_seed,
328
+ seed,
329
+ s_churn,
330
+ s_noise,
331
+ color_fix_type,
332
+ diff_dtype,
333
+ ae_dtype,
334
+ gamma_correction,
335
+ linear_CFG,
336
+ linear_s_stage2,
337
+ spt_linear_CFG,
338
+ spt_linear_s_stage2,
339
+ model_select,
340
+ output_format,
341
+ allocation
342
+ ):
343
+ start = time.time()
344
+ print('restore ==>>')
345
+
346
+ torch.cuda.set_device(SUPIR_device)
347
+
348
+ with torch.no_grad():
349
+ input_image = upscale_image(input_image, upscale, unit_resolution=32, min_size=min_size)
350
+ LQ = np.array(input_image) / 255.0
351
+ LQ = np.power(LQ, gamma_correction)
352
+ LQ *= 255.0
353
+ LQ = LQ.round().clip(0, 255).astype(np.uint8)
354
+ LQ = LQ / 255 * 2 - 1
355
+ LQ = torch.tensor(LQ, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0).to(SUPIR_device)[:, :3, :, :]
356
+ captions = ['']
357
+
358
+ samples = model.batchify_sample(LQ, captions, num_steps=edm_steps, restoration_scale=s_stage1, s_churn=s_churn,
359
+ s_noise=s_noise, cfg_scale=s_cfg, control_scale=s_stage2, seed=seed,
360
+ num_samples=num_samples, p_p=a_prompt, n_p=n_prompt, color_fix_type=color_fix_type,
361
+ use_linear_CFG=linear_CFG, use_linear_control_scale=linear_s_stage2,
362
+ cfg_scale_start=spt_linear_CFG, control_scale_start=spt_linear_s_stage2)
363
+
364
+ x_samples = (einops.rearrange(samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().round().clip(
365
+ 0, 255).astype(np.uint8)
366
+ results = [x_samples[i] for i in range(num_samples)]
367
+ torch.cuda.empty_cache()
368
+
369
+ # All the results have the same size
370
+ input_height, input_width, input_channel = np.array(input_image).shape
371
+ result_height, result_width, result_channel = np.array(results[0]).shape
372
+
373
+ print('<<== restore')
374
+ end = time.time()
375
+ secondes = int(end - start)
376
+ minutes = math.floor(secondes / 60)
377
+ secondes = secondes - (minutes * 60)
378
+ hours = math.floor(minutes / 60)
379
+ minutes = minutes - (hours * 60)
380
+ information = ("Start the process again if you want a different result. " if randomize_seed else "") + \
381
+ "If you don't get the image you wanted, add more details in the « Image description ». " + \
382
+ "Wait " + str(allocation) + " min before a new run to avoid quota penalty or use another computer. " + \
383
+ "The image" + (" has" if len(results) == 1 else "s have") + " been generated in " + \
384
+ ((str(hours) + " h, ") if hours != 0 else "") + \
385
+ ((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
386
+ str(secondes) + " sec. " + \
387
+ "The new image resolution is " + str(result_width) + \
388
+ " pixels large and " + str(result_height) + \
389
+ " pixels high, so a resolution of " + f'{result_width * result_height:,}' + " pixels."
390
+ print(information)
391
+ try:
392
+ print("Initial resolution: " + f'{input_width * input_height:,}')
393
+ print("Final resolution: " + f'{result_width * result_height:,}')
394
+ print("edm_steps: " + str(edm_steps))
395
+ print("num_samples: " + str(num_samples))
396
+ print("downscale: " + str(downscale))
397
+ print("Estimated minutes: " + f'{(((result_width * result_height**(1/1.75)) * input_width * input_height * (edm_steps**(1/2)) * (num_samples**(1/2.5)))**(1/2.5)) / 25000:,}')
398
+ except Exception as e:
399
+ print('Exception of Estimation')
400
+
401
+ # Only one image can be shown in the slider
402
+ return [noisy_image] + [results[0]], gr.update(label="Downloadable results in *." + output_format + " format", format = output_format, value = results), gr.update(value = information, visible = True), gr.update(visible=True)
403
+
404
+ def load_and_reset(param_setting):
405
+ print('load_and_reset ==>>')
406
+ if torch.cuda.device_count() == 0:
407
+ gr.Warning('Set this space to GPU config to make it work.')
408
+ return None, None, None, None, None, None, None, None, None, None, None, None, None, None
409
+ edm_steps = default_setting.edm_steps
410
+ s_stage2 = 1.0
411
+ s_stage1 = -1.0
412
+ s_churn = 5
413
+ s_noise = 1.003
414
+ a_prompt = 'Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - ' \
415
+ 'realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore ' \
416
+ 'detailing, hyper sharpness, perfect without deformations.'
417
+ n_prompt = 'painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, ' \
418
+ '3D render, unreal engine, blurring, dirty, messy, worst quality, low quality, frames, watermark, ' \
419
+ 'signature, jpeg artifacts, deformed, lowres, over-smooth'
420
+ color_fix_type = 'Wavelet'
421
+ spt_linear_s_stage2 = 0.0
422
+ linear_s_stage2 = False
423
+ linear_CFG = True
424
+ if param_setting == "Quality":
425
+ s_cfg = default_setting.s_cfg_Quality
426
+ spt_linear_CFG = default_setting.spt_linear_CFG_Quality
427
+ model_select = "v0-Q"
428
+ elif param_setting == "Fidelity":
429
+ s_cfg = default_setting.s_cfg_Fidelity
430
+ spt_linear_CFG = default_setting.spt_linear_CFG_Fidelity
431
+ model_select = "v0-F"
432
+ else:
433
+ raise NotImplementedError
434
+ gr.Info('The parameters are reset.')
435
+ print('<<== load_and_reset')
436
+ return edm_steps, s_cfg, s_stage2, s_stage1, s_churn, s_noise, a_prompt, n_prompt, color_fix_type, linear_CFG, \
437
+ linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select
438
+
439
+ def log_information(result_gallery):
440
+ print('log_information')
441
+ if result_gallery is not None:
442
+ for i, result in enumerate(result_gallery):
443
+ print(result[0])
444
+
445
+ def on_select_result(result_slider, result_gallery, evt: gr.SelectData):
446
+ print('on_select_result')
447
+ if result_gallery is not None:
448
+ for i, result in enumerate(result_gallery):
449
+ print(result[0])
450
+ return [result_slider[0], result_gallery[evt.index][0]]
451
+
452
+ title_html = """
453
+ <h1><center>SUPIR</center></h1>
454
+ <big><center>Upscale your images up to x10 freely, without account, without watermark and download it</center></big>
455
+ <center><big><big>🤸<big><big><big><big><big><big>🤸</big></big></big></big></big></big></big></big></center>
456
+
457
+ <p>This is an online demo of SUPIR, a practicing model scaling for photo-realistic image restoration.
458
+ The content added by SUPIR is <b><u>imagination, not real-world information</u></b>.
459
+ SUPIR is for beauty and illustration only.
460
+ Most of the processes last few minutes.
461
+ If you want to upscale AI-generated images, be noticed that <i>PixArt Sigma</i> space can directly generate 5984x5984 images.
462
+ Due to Gradio issues, the generated image is slightly less satured than the original.
463
+ Please leave a <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR/discussions/new">message in discussion</a> if you encounter issues.
464
+ You can also use <a href="https://huggingface.co/spaces/gokaygokay/AuraSR">AuraSR</a> to upscale x4.
465
+
466
+ <p><center><a href="https://arxiv.org/abs/2401.13627">Paper</a> &emsp; <a href="http://supir.xpixel.group/">Project Page</a> &emsp; <a href="https://huggingface.co/blog/MonsterMMORPG/supir-sota-image-upscale-better-than-magnific-ai">Local Install Guide</a></center></p>
467
+ <p><center><a style="display:inline-block" href='https://github.com/Fanghua-Yu/SUPIR'><img alt="GitHub Repo stars" src="https://img.shields.io/github/stars/Fanghua-Yu/SUPIR?style=social"></a></center></p>
468
+ """
469
+
470
+
471
+ claim_md = """
472
+ ## **Piracy**
473
+ The images are not stored but the logs are saved during a month.
474
+ ## **How to get SUPIR**
475
+ You can get SUPIR on HuggingFace by [duplicating this space](https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR?duplicate=true) and set GPU.
476
+ You can also install SUPIR on your computer following [this tutorial](https://huggingface.co/blog/MonsterMMORPG/supir-sota-image-upscale-better-than-magnific-ai).
477
+ You can install _Pinokio_ on your computer and then install _SUPIR_ into it. It should be quite easy if you have an Nvidia GPU.
478
+ ## **Terms of use**
479
+ By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research. Please submit a feedback to us if you get any inappropriate answer! We will collect those to keep improving our models. For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
480
+ ## **License**
481
+ The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/Fanghua-Yu/SUPIR) of SUPIR.
482
+ """
483
+
484
+ # Gradio interface
485
+ with gr.Blocks() as interface:
486
+ if torch.cuda.device_count() == 0:
487
+ with gr.Row():
488
+ gr.HTML("""
489
+ <p style="background-color: red;"><big><big><big><b>⚠️To use SUPIR, <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR?duplicate=true">duplicate this space</a> and set a GPU with 30 GB VRAM.</b>
490
+
491
+ You can't use SUPIR directly here because this space runs on a CPU, which is not enough for SUPIR. Please provide <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR/discussions/new">feedback</a> if you have issues.
492
+ </big></big></big></p>
493
+ """)
494
+ gr.HTML(title_html)
495
+
496
+ input_image = gr.Image(label="Input (*.png, *.webp, *.jpeg, *.jpg, *.gif, *.bmp, *.heic)", show_label=True, type="filepath", height=600, elem_id="image-input")
497
+ rotation = gr.Radio([["No rotation", 0], ["⤵ Rotate +90°", 90], ["↩ Return 180°", 180], ["⤴ Rotate -90°", -90]], label="Orientation correction", info="Will apply the following rotation before restoring the image; the AI needs a good orientation to understand the content", value=0, interactive=True, visible=False)
498
+ with gr.Group():
499
+ prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible, especially the details we can't see on the original image; you can write in any language", value="", placeholder="A 33 years old man, walking, in the street, Santiago, morning, Summer, photorealistic", lines=3)
500
+ prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/badayvedat/LLaVA'"'>LlaVa space</a> to auto-generate the description of your image.")
501
+ upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8], ["x9", 9], ["x10", 10]], label="Upscale factor", info="Resolution x1 to x10", value=2, interactive=True)
502
+ output_format = gr.Radio([["As input", "input"], ["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for result", info="File extention", value="input", interactive=True)
503
+ allocation = gr.Slider(label="GPU allocation time (in seconds)", info="lower=May abort run, higher=Quota penalty for next runs", value=179, minimum=59, maximum=320, step=1)
504
+
505
+ with gr.Accordion("Pre-denoising (optional)", open=False):
506
+ gamma_correction = gr.Slider(label="Gamma Correction", info = "lower=lighter, higher=darker", minimum=0.1, maximum=2.0, value=1.0, step=0.1)
507
+ denoise_button = gr.Button(value="Pre-denoise")
508
+ denoise_image = gr.Image(label="Denoised image", show_label=True, type="filepath", sources=[], interactive = False, height=600, elem_id="image-s1")
509
+ denoise_information = gr.HTML(value="If present, the denoised image will be used for the restoration instead of the input image.", visible=False)
510
+
511
+ with gr.Accordion("Advanced options", open=False):
512
+ a_prompt = gr.Textbox(label="Additional image description",
513
+ info="Completes the main image description",
514
+ value='Cinematic, High Contrast, highly detailed, taken using a Canon EOS R '
515
+ 'camera, hyper detailed photo - realistic maximum detail, 32k, Color '
516
+ 'Grading, ultra HD, extreme meticulous detailing, skin pore detailing, clothing fabric detailing, '
517
+ 'hyper sharpness, perfect without deformations.',
518
+ lines=3)
519
+ n_prompt = gr.Textbox(label="Negative image description",
520
+ info="Disambiguate by listing what the image does NOT represent",
521
+ value='painting, oil painting, illustration, drawing, art, sketch, anime, '
522
+ 'cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, '
523
+ 'worst quality, low quality, frames, watermark, signature, jpeg artifacts, '
524
+ 'deformed, lowres, over-smooth',
525
+ lines=3)
526
+ edm_steps = gr.Slider(label="Steps", info="lower=faster, higher=more details; too many steps create a checker effect", minimum=1, maximum=200, value=default_setting.edm_steps if torch.cuda.device_count() > 0 else 1, step=1)
527
+ num_samples = gr.Slider(label="Num Samples", info="Number of generated results", minimum=1, maximum=4 if not args.use_image_slider else 1
528
+ , value=1, step=1)
529
+ min_size = gr.Slider(label="Minimum size", info="Minimum height, minimum width of the result", minimum=32, maximum=4096, value=1024, step=32)
530
+ downscale = gr.Radio([["/1", 1], ["/2", 2], ["/3", 3], ["/4", 4], ["/5", 5], ["/6", 6], ["/7", 7], ["/8", 8], ["/9", 9], ["/10", 10]], label="Pre-downscale factor", info="Reducing blurred image reduce the process time", value=1, interactive=True)
531
+ with gr.Row():
532
+ with gr.Column():
533
+ model_select = gr.Radio([["💃 Quality (v0-Q)", "v0-Q"], ["🎯 Fidelity (v0-F)", "v0-F"]], label="Model Selection", info="Pretrained model", value="v0-Q",
534
+ interactive=True)
535
+ with gr.Column():
536
+ color_fix_type = gr.Radio([["None", "None"], ["AdaIn (improve as a photo)", "AdaIn"], ["Wavelet (for JPEG artifacts)", "Wavelet"]], label="Color-Fix Type", info="AdaIn=Improve following a style, Wavelet=For JPEG artifacts", value="AdaIn",
537
+ interactive=True)
538
+ s_cfg = gr.Slider(label="Text Guidance Scale", info="lower=follow the image, higher=follow the prompt", minimum=1.0, maximum=15.0,
539
+ value=default_setting.s_cfg_Quality if torch.cuda.device_count() > 0 else 1.0, step=0.1)
540
+ s_stage2 = gr.Slider(label="Restoring Guidance Strength", minimum=0., maximum=1., value=1., step=0.05)
541
+ s_stage1 = gr.Slider(label="Pre-denoising Guidance Strength", minimum=-1.0, maximum=6.0, value=-1.0, step=1.0)
542
+ s_churn = gr.Slider(label="S-Churn", minimum=0, maximum=40, value=5, step=1)
543
+ s_noise = gr.Slider(label="S-Noise", minimum=1.0, maximum=1.1, value=1.003, step=0.001)
544
+ with gr.Row():
545
+ with gr.Column():
546
+ linear_CFG = gr.Checkbox(label="Linear CFG", value=True)
547
+ spt_linear_CFG = gr.Slider(label="CFG Start", minimum=1.0,
548
+ maximum=9.0, value=default_setting.spt_linear_CFG_Quality if torch.cuda.device_count() > 0 else 1.0, step=0.5)
549
+ with gr.Column():
550
+ linear_s_stage2 = gr.Checkbox(label="Linear Restoring Guidance", value=False)
551
+ spt_linear_s_stage2 = gr.Slider(label="Guidance Start", minimum=0.,
552
+ maximum=1., value=0., step=0.05)
553
+ with gr.Column():
554
+ diff_dtype = gr.Radio([["fp32 (precision)", "fp32"], ["fp16 (medium)", "fp16"], ["bf16 (speed)", "bf16"]], label="Diffusion Data Type", value="fp32",
555
+ interactive=True)
556
+ with gr.Column():
557
+ ae_dtype = gr.Radio([["fp32 (precision)", "fp32"], ["bf16 (speed)", "bf16"]], label="Auto-Encoder Data Type", value="fp32",
558
+ interactive=True)
559
+ randomize_seed = gr.Checkbox(label = "\U0001F3B2 Randomize seed", value = True, info = "If checked, result is always different")
560
+ seed = gr.Slider(label="Seed", minimum=0, maximum=max_64_bit_int, step=1, randomize=True)
561
+ with gr.Group():
562
+ param_setting = gr.Radio(["Quality", "Fidelity"], interactive=True, label="Presetting", value = "Quality")
563
+ restart_button = gr.Button(value="Apply presetting")
564
+
565
+ with gr.Column():
566
+ diffusion_button = gr.Button(value="🚀 Upscale/Restore", variant = "primary", elem_id = "process_button")
567
+ reset_btn = gr.Button(value="🧹 Reinit page", variant="stop", elem_id="reset_button", visible = False)
568
+
569
+ warning = gr.HTML(value = "<center><big>Your computer must <u>not</u> enter into standby mode.</big><br/>On Chrome, you can force to keep a tab alive in <code>chrome://discards/</code></center>", visible = False)
570
+ restore_information = gr.HTML(value = "Restart the process to get another result.", visible = False)
571
+ result_slider = ImageSlider(label = 'Comparator', show_label = False, interactive = False, elem_id = "slider1", show_download_button = False)
572
+ result_gallery = gr.Gallery(label = 'Downloadable results', show_label = True, interactive = False, elem_id = "gallery1")
573
+
574
+ gr.Examples(
575
+ examples = [
576
+ [
577
+ "./Examples/Example1.png",
578
+ 0,
579
+ None,
580
+ "Group of people, walking, happy, in the street, photorealistic, 8k, extremely detailled",
581
+ "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
582
+ "painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
583
+ 2,
584
+ 1024,
585
+ 1,
586
+ 8,
587
+ 100,
588
+ -1,
589
+ 1,
590
+ 7.5,
591
+ False,
592
+ 42,
593
+ 5,
594
+ 1.003,
595
+ "AdaIn",
596
+ "fp16",
597
+ "bf16",
598
+ 1.0,
599
+ True,
600
+ 4,
601
+ False,
602
+ 0.,
603
+ "v0-Q",
604
+ "input",
605
+ 179
606
+ ],
607
+ [
608
+ "./Examples/Example2.jpeg",
609
+ 0,
610
+ None,
611
+ "La cabeza de un gato atigrado, en una casa, fotorrealista, 8k, extremadamente detallada",
612
+ "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
613
+ "painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
614
+ 1,
615
+ 1024,
616
+ 1,
617
+ 1,
618
+ 200,
619
+ -1,
620
+ 1,
621
+ 7.5,
622
+ False,
623
+ 42,
624
+ 5,
625
+ 1.003,
626
+ "Wavelet",
627
+ "fp16",
628
+ "bf16",
629
+ 1.0,
630
+ True,
631
+ 4,
632
+ False,
633
+ 0.,
634
+ "v0-Q",
635
+ "input",
636
+ 179
637
+ ],
638
+ [
639
+ "./Examples/Example3.webp",
640
+ 0,
641
+ None,
642
+ "A red apple",
643
+ "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
644
+ "painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
645
+ 1,
646
+ 1024,
647
+ 1,
648
+ 1,
649
+ 200,
650
+ -1,
651
+ 1,
652
+ 7.5,
653
+ False,
654
+ 42,
655
+ 5,
656
+ 1.003,
657
+ "Wavelet",
658
+ "fp16",
659
+ "bf16",
660
+ 1.0,
661
+ True,
662
+ 4,
663
+ False,
664
+ 0.,
665
+ "v0-Q",
666
+ "input",
667
+ 179
668
+ ],
669
+ [
670
+ "./Examples/Example3.webp",
671
+ 0,
672
+ None,
673
+ "A red marble",
674
+ "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
675
+ "painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
676
+ 1,
677
+ 1024,
678
+ 1,
679
+ 1,
680
+ 200,
681
+ -1,
682
+ 1,
683
+ 7.5,
684
+ False,
685
+ 42,
686
+ 5,
687
+ 1.003,
688
+ "Wavelet",
689
+ "fp16",
690
+ "bf16",
691
+ 1.0,
692
+ True,
693
+ 4,
694
+ False,
695
+ 0.,
696
+ "v0-Q",
697
+ "input",
698
+ 179
699
+ ],
700
+ ],
701
+ run_on_click = True,
702
+ fn = stage2_process,
703
+ inputs = [
704
+ input_image,
705
+ rotation,
706
+ denoise_image,
707
+ prompt,
708
+ a_prompt,
709
+ n_prompt,
710
+ num_samples,
711
+ min_size,
712
+ downscale,
713
+ upscale,
714
+ edm_steps,
715
+ s_stage1,
716
+ s_stage2,
717
+ s_cfg,
718
+ randomize_seed,
719
+ seed,
720
+ s_churn,
721
+ s_noise,
722
+ color_fix_type,
723
+ diff_dtype,
724
+ ae_dtype,
725
+ gamma_correction,
726
+ linear_CFG,
727
+ linear_s_stage2,
728
+ spt_linear_CFG,
729
+ spt_linear_s_stage2,
730
+ model_select,
731
+ output_format,
732
+ allocation
733
+ ],
734
+ outputs = [
735
+ result_slider,
736
+ result_gallery,
737
+ restore_information,
738
+ reset_btn
739
+ ],
740
+ cache_examples = False,
741
+ )
742
+
743
+ with gr.Row():
744
+ gr.Markdown(claim_md)
745
+
746
+ input_image.upload(fn = check_upload, inputs = [
747
+ input_image
748
+ ], outputs = [
749
+ rotation
750
+ ], queue = False, show_progress = False)
751
+
752
+ denoise_button.click(fn = check_and_update, inputs = [
753
+ input_image
754
+ ], outputs = [warning], queue = False, show_progress = False).success(fn = stage1_process, inputs = [
755
+ input_image,
756
+ gamma_correction,
757
+ diff_dtype,
758
+ ae_dtype
759
+ ], outputs=[
760
+ denoise_image,
761
+ denoise_information
762
+ ])
763
+
764
+ diffusion_button.click(fn = update_seed, inputs = [
765
+ randomize_seed,
766
+ seed
767
+ ], outputs = [
768
+ seed
769
+ ], queue = False, show_progress = False).then(fn = check_and_update, inputs = [
770
+ input_image
771
+ ], outputs = [warning], queue = False, show_progress = False).success(fn=stage2_process, inputs = [
772
+ input_image,
773
+ rotation,
774
+ denoise_image,
775
+ prompt,
776
+ a_prompt,
777
+ n_prompt,
778
+ num_samples,
779
+ min_size,
780
+ downscale,
781
+ upscale,
782
+ edm_steps,
783
+ s_stage1,
784
+ s_stage2,
785
+ s_cfg,
786
+ randomize_seed,
787
+ seed,
788
+ s_churn,
789
+ s_noise,
790
+ color_fix_type,
791
+ diff_dtype,
792
+ ae_dtype,
793
+ gamma_correction,
794
+ linear_CFG,
795
+ linear_s_stage2,
796
+ spt_linear_CFG,
797
+ spt_linear_s_stage2,
798
+ model_select,
799
+ output_format,
800
+ allocation
801
+ ], outputs = [
802
+ result_slider,
803
+ result_gallery,
804
+ restore_information,
805
+ reset_btn
806
+ ]).success(fn = log_information, inputs = [
807
+ result_gallery
808
+ ], outputs = [], queue = False, show_progress = False)
809
+
810
+ result_gallery.change(on_select_result, [result_slider, result_gallery], result_slider)
811
+ result_gallery.select(on_select_result, [result_slider, result_gallery], result_slider)
812
+
813
+ restart_button.click(fn = load_and_reset, inputs = [
814
+ param_setting
815
+ ], outputs = [
816
+ edm_steps,
817
+ s_cfg,
818
+ s_stage2,
819
+ s_stage1,
820
+ s_churn,
821
+ s_noise,
822
+ a_prompt,
823
+ n_prompt,
824
+ color_fix_type,
825
+ linear_CFG,
826
+ linear_s_stage2,
827
+ spt_linear_CFG,
828
+ spt_linear_s_stage2,
829
+ model_select
830
+ ])
831
+
832
+ reset_btn.click(fn = reset, inputs = [], outputs = [
833
+ input_image,
834
+ rotation,
835
+ denoise_image,
836
+ prompt,
837
+ a_prompt,
838
+ n_prompt,
839
+ num_samples,
840
+ min_size,
841
+ downscale,
842
+ upscale,
843
+ edm_steps,
844
+ s_stage1,
845
+ s_stage2,
846
+ s_cfg,
847
+ randomize_seed,
848
+ seed,
849
+ s_churn,
850
+ s_noise,
851
+ color_fix_type,
852
+ diff_dtype,
853
+ ae_dtype,
854
+ gamma_correction,
855
+ linear_CFG,
856
+ linear_s_stage2,
857
+ spt_linear_CFG,
858
+ spt_linear_s_stage2,
859
+ model_select,
860
+ output_format,
861
+ allocation
862
+ ], queue = False, show_progress = False)
863
+
864
+ interface.queue(10).launch()
requirements.txt CHANGED
@@ -1,15 +1,48 @@
1
- accelerate
2
- transformers
3
- sentencepiece
4
- pillow
5
- numpy
6
- torchvision
7
- huggingface_hub
8
- spaces
9
- opencv-python
10
- imageio
11
- imageio-ffmpeg
12
- einops
13
- timm
14
- av
15
- git+https://github.com/huggingface/diffusers.git@main
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pydantic==2.10.6
2
+ fastapi==0.115.8
3
+ gradio_imageslider==0.0.20
4
+ gradio_client==1.7.0
5
+ numpy==1.26.4
6
+ requests==2.32.3
7
+ sentencepiece==0.2.0
8
+ tokenizers==0.19.1
9
+ torchvision==0.18.1
10
+ uvicorn==0.30.1
11
+ wandb==0.17.4
12
+ httpx==0.27.0
13
+ transformers==4.42.4
14
+ accelerate==0.32.1
15
+ scikit-learn==1.5.1
16
+ einops==0.8.0
17
+ einops-exts==0.0.4
18
+ timm==1.0.7
19
+ openai-clip==1.0.1
20
+ fsspec==2024.6.1
21
+ kornia==0.7.3
22
+ matplotlib==3.9.1
23
+ ninja==1.11.1.1
24
+ omegaconf==2.3.0
25
+ opencv-python==4.10.0.84
26
+ pandas==2.2.2
27
+ pillow==10.4.0
28
+ pytorch-lightning==2.3.3
29
+ PyYAML==6.0.1
30
+ scipy==1.14.0
31
+ tqdm==4.66.4
32
+ triton==2.3.1
33
+ urllib3==2.2.2
34
+ webdataset==0.2.86
35
+ xformers==0.0.27
36
+ facexlib==0.3.0
37
+ k-diffusion==0.1.1.post1
38
+ diffusers==0.30.0
39
+ pillow-heif==0.18.0
40
+
41
+ open-clip-torch==2.24.0
42
+
43
+ torchaudio
44
+ easydict==1.13
45
+ fairscale==0.4.13
46
+ torchsde==0.2.6
47
+ huggingface_hub==0.23.3
48
+ gradio