Fabrice-TIERCELIN commited on
Commit
297aed0
·
verified ·
1 Parent(s): 8b0de19

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +21 -14
  2. app.py +864 -488
  3. requirements.txt +46 -21
README.md CHANGED
@@ -1,14 +1,21 @@
1
- ---
2
- title: FramePack F1 + V2V + EF
3
- emoji: 👽
4
- colorFrom: pink
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 5.29.0
8
- app_file: app_v2v.py
9
- pinned: true
10
- license: apache-2.0
11
- short_description: fast video generation from images & text
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: SUPIR Image Upscaler
3
+ sdk: gradio
4
+ emoji: 📷
5
+ sdk_version: 4.38.1
6
+ app_file: app.py
7
+ license: mit
8
+ colorFrom: blue
9
+ colorTo: pink
10
+ tags:
11
+ - Upscaling
12
+ - Restoring
13
+ - Image-to-Image
14
+ - Image-2-Image
15
+ - Img-to-Img
16
+ - Img-2-Img
17
+ - language models
18
+ - LLMs
19
+ short_description: Restore blurred or small images with prompt
20
+ suggested_hardware: zero-a10g
21
+ ---
app.py CHANGED
@@ -1,488 +1,864 @@
1
- from diffusers_helper.hf_login import login
2
-
3
- import os
4
-
5
- os.environ['HF_HOME'] = os.path.abspath(os.path.realpath(os.path.join(os.path.dirname(__file__), './hf_download')))
6
-
7
- import gradio as gr
8
- import torch
9
- import traceback
10
- import einops
11
- import safetensors.torch as sf
12
- import numpy as np
13
- import math
14
- import spaces
15
-
16
- from PIL import Image
17
- from diffusers import AutoencoderKLHunyuanVideo
18
- from transformers import LlamaModel, CLIPTextModel, LlamaTokenizerFast, CLIPTokenizer
19
- from diffusers_helper.hunyuan import encode_prompt_conds, vae_decode, vae_encode, vae_decode_fake
20
- from diffusers_helper.utils import save_bcthw_as_mp4, crop_or_pad_yield_mask, soft_append_bcthw, resize_and_center_crop, state_dict_weighted_merge, state_dict_offset_merge, generate_timestamp
21
- from diffusers_helper.models.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked
22
- from diffusers_helper.pipelines.k_diffusion_hunyuan import sample_hunyuan
23
- from diffusers_helper.memory import cpu, gpu, get_cuda_free_memory_gb, move_model_to_device_with_memory_preservation, offload_model_from_device_for_memory_preservation, fake_diffusers_current_device, DynamicSwapInstaller, unload_complete_models, load_model_as_complete
24
- from diffusers_helper.thread_utils import AsyncStream, async_run
25
- from diffusers_helper.gradio.progress_bar import make_progress_bar_css, make_progress_bar_html
26
- from transformers import SiglipImageProcessor, SiglipVisionModel
27
- from diffusers_helper.clip_vision import hf_clip_vision_encode
28
- from diffusers_helper.bucket_tools import find_nearest_bucket
29
-
30
-
31
- free_mem_gb = get_cuda_free_memory_gb(gpu)
32
- high_vram = free_mem_gb > 80
33
-
34
- print(f'Free VRAM {free_mem_gb} GB')
35
- print(f'High-VRAM Mode: {high_vram}')
36
-
37
- text_encoder = LlamaModel.from_pretrained("Fabrice-TIERCELIN/HunyuanVideo", subfolder='text_encoder', torch_dtype=torch.float16).cpu()
38
- text_encoder_2 = CLIPTextModel.from_pretrained("Fabrice-TIERCELIN/HunyuanVideo", subfolder='text_encoder_2', torch_dtype=torch.float16).cpu()
39
- tokenizer = LlamaTokenizerFast.from_pretrained("Fabrice-TIERCELIN/HunyuanVideo", subfolder='tokenizer')
40
- tokenizer_2 = CLIPTokenizer.from_pretrained("Fabrice-TIERCELIN/HunyuanVideo", subfolder='tokenizer_2')
41
- vae = AutoencoderKLHunyuanVideo.from_pretrained("Fabrice-TIERCELIN/HunyuanVideo", subfolder='vae', torch_dtype=torch.float16).cpu()
42
-
43
- feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor')
44
- image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=torch.float16).cpu()
45
-
46
- # quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=True)
47
- # transformer = HunyuanVideoTransformer3DModelPacked.from_single_file("https://huggingface.co/sirolim/FramePack_F1_I2V_FP8/resolve/main/FramePack_F1_I2V_HY_fp8_e4m3fn.safetensors", torch_dtype=torch.bfloat16)
48
- # transformer = HunyuanVideoTransformer3DModelPacked.from_single_file('sirolim/FramePack_F1_I2V_FP8', "FramePack_F1_I2V_HY_fp8_e4m3fn.safetensors", use_safetensors=True, torch_dtype=torch.bfloat16).cpu()
49
- transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('lllyasviel/FramePack_F1_I2V_HY_20250503', torch_dtype=torch.bfloat16).cpu()
50
-
51
- vae.eval()
52
- text_encoder.eval()
53
- text_encoder_2.eval()
54
- image_encoder.eval()
55
- transformer.eval()
56
-
57
- if not high_vram:
58
- vae.enable_slicing()
59
- vae.enable_tiling()
60
-
61
- transformer.high_quality_fp32_output_for_inference = True
62
- print('transformer.high_quality_fp32_output_for_inference = True')
63
-
64
- transformer.to(dtype=torch.bfloat16)
65
- vae.to(dtype=torch.float16)
66
- image_encoder.to(dtype=torch.float16)
67
- text_encoder.to(dtype=torch.float16)
68
- text_encoder_2.to(dtype=torch.float16)
69
-
70
- vae.requires_grad_(False)
71
- text_encoder.requires_grad_(False)
72
- text_encoder_2.requires_grad_(False)
73
- image_encoder.requires_grad_(False)
74
- transformer.requires_grad_(False)
75
-
76
- if not high_vram:
77
- # DynamicSwapInstaller is same as huggingface's enable_sequential_offload but 3x faster
78
- DynamicSwapInstaller.install_model(transformer, device=gpu)
79
- DynamicSwapInstaller.install_model(text_encoder, device=gpu)
80
- else:
81
- text_encoder.to(gpu)
82
- text_encoder_2.to(gpu)
83
- image_encoder.to(gpu)
84
- vae.to(gpu)
85
- transformer.to(gpu)
86
-
87
- stream = AsyncStream()
88
-
89
- outputs_folder = './outputs/'
90
- os.makedirs(outputs_folder, exist_ok=True)
91
-
92
- examples = [
93
- ["img_examples/1.png", "The girl dances gracefully, with clear movements, full of charm.",],
94
- ["img_examples/2.jpg", "The man dances flamboyantly, swinging his hips and striking bold poses with dramatic flair."],
95
- ["img_examples/3.png", "The woman dances elegantly among the blossoms, spinning slowly with flowing sleeves and graceful hand movements."],
96
- ]
97
-
98
- def generate_examples(input_image, prompt):
99
-
100
- t2v=False
101
- n_prompt=""
102
- seed=31337
103
- total_second_length=5
104
- latent_window_size=9
105
- steps=25
106
- cfg=1.0
107
- gs=10.0
108
- rs=0.0
109
- gpu_memory_preservation=6
110
- use_teacache=True
111
- mp4_crf=16
112
-
113
- global stream
114
-
115
- # assert input_image is not None, 'No input image!'
116
- if t2v:
117
- default_height, default_width = 640, 640
118
- input_image = np.ones((default_height, default_width, 3), dtype=np.uint8) * 255
119
- print("No input image provided. Using a blank white image.")
120
-
121
- yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
122
-
123
- stream = AsyncStream()
124
-
125
- async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf)
126
-
127
- output_filename = None
128
-
129
- while True:
130
- flag, data = stream.output_queue.next()
131
-
132
- if flag == 'file':
133
- output_filename = data
134
- yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
135
-
136
- if flag == 'progress':
137
- preview, desc, html = data
138
- yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
139
-
140
- if flag == 'end':
141
- yield output_filename, gr.update(visible=False), gr.update(), '', gr.update(interactive=True), gr.update(interactive=False)
142
- break
143
-
144
-
145
-
146
- @torch.no_grad()
147
- def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
148
- total_latent_sections = (total_second_length * 30) / (latent_window_size * 4)
149
- total_latent_sections = int(max(round(total_latent_sections), 1))
150
-
151
- job_id = generate_timestamp()
152
-
153
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...'))))
154
-
155
- try:
156
- # Clean GPU
157
- if not high_vram:
158
- unload_complete_models(
159
- text_encoder, text_encoder_2, image_encoder, vae, transformer
160
- )
161
-
162
- # Text encoding
163
-
164
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Text encoding ...'))))
165
-
166
- if not high_vram:
167
- fake_diffusers_current_device(text_encoder, gpu) # since we only encode one text - that is one model move and one encode, offload is same time consumption since it is also one load and one encode.
168
- load_model_as_complete(text_encoder_2, target_device=gpu)
169
-
170
- llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
171
-
172
- if cfg == 1:
173
- llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler)
174
- else:
175
- llama_vec_n, clip_l_pooler_n = encode_prompt_conds(n_prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
176
-
177
- llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512)
178
- llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512)
179
-
180
- # Processing input image
181
-
182
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Image processing ...'))))
183
-
184
- H, W, C = input_image.shape
185
- height, width = find_nearest_bucket(H, W, resolution=640)
186
- input_image_np = resize_and_center_crop(input_image, target_width=width, target_height=height)
187
-
188
- Image.fromarray(input_image_np).save(os.path.join(outputs_folder, f'{job_id}.png'))
189
-
190
- input_image_pt = torch.from_numpy(input_image_np).float() / 127.5 - 1
191
- input_image_pt = input_image_pt.permute(2, 0, 1)[None, :, None]
192
-
193
- # VAE encoding
194
-
195
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'VAE encoding ...'))))
196
-
197
- if not high_vram:
198
- load_model_as_complete(vae, target_device=gpu)
199
-
200
- start_latent = vae_encode(input_image_pt, vae)
201
-
202
- # CLIP Vision
203
-
204
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))
205
-
206
- if not high_vram:
207
- load_model_as_complete(image_encoder, target_device=gpu)
208
-
209
- image_encoder_output = hf_clip_vision_encode(input_image_np, feature_extractor, image_encoder)
210
- image_encoder_last_hidden_state = image_encoder_output.last_hidden_state
211
-
212
- # Dtype
213
-
214
- llama_vec = llama_vec.to(transformer.dtype)
215
- llama_vec_n = llama_vec_n.to(transformer.dtype)
216
- clip_l_pooler = clip_l_pooler.to(transformer.dtype)
217
- clip_l_pooler_n = clip_l_pooler_n.to(transformer.dtype)
218
- image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype)
219
-
220
- # Sampling
221
-
222
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Start sampling ...'))))
223
-
224
- rnd = torch.Generator("cpu").manual_seed(seed)
225
-
226
- history_latents = torch.zeros(size=(1, 16, 16 + 2 + 1, height // 8, width // 8), dtype=torch.float32).cpu()
227
- history_pixels = None
228
-
229
- history_latents = torch.cat([history_latents, start_latent.to(history_latents)], dim=2)
230
- total_generated_latent_frames = 1
231
-
232
- for section_index in range(total_latent_sections):
233
- if stream.input_queue.top() == 'end':
234
- stream.output_queue.push(('end', None))
235
- return
236
-
237
- print(f'section_index = {section_index}, total_latent_sections = {total_latent_sections}')
238
-
239
- if not high_vram:
240
- unload_complete_models()
241
- move_model_to_device_with_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=gpu_memory_preservation)
242
-
243
- if use_teacache:
244
- transformer.initialize_teacache(enable_teacache=True, num_steps=steps)
245
- else:
246
- transformer.initialize_teacache(enable_teacache=False)
247
-
248
- def callback(d):
249
- preview = d['denoised']
250
- preview = vae_decode_fake(preview)
251
-
252
- preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
253
- preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
254
-
255
- if stream.input_queue.top() == 'end':
256
- stream.output_queue.push(('end', None))
257
- raise KeyboardInterrupt('User ends the task.')
258
-
259
- current_step = d['i'] + 1
260
- percentage = int(100.0 * current_step / steps)
261
- hint = f'Sampling {current_step}/{steps}'
262
- desc = f'Total generated frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / 30) :.2f} seconds (FPS-30). The video is being extended now ...'
263
- stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
264
- return
265
-
266
- indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)
267
- clean_latent_indices_start, clean_latent_4x_indices, clean_latent_2x_indices, clean_latent_1x_indices, latent_indices = indices.split([1, 16, 2, 1, latent_window_size], dim=1)
268
- clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
269
-
270
- clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents[:, :, -sum([16, 2, 1]):, :, :].split([16, 2, 1], dim=2)
271
- clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2)
272
-
273
- generated_latents = sample_hunyuan(
274
- transformer=transformer,
275
- sampler='unipc',
276
- width=width,
277
- height=height,
278
- frames=latent_window_size * 4 - 3,
279
- real_guidance_scale=cfg,
280
- distilled_guidance_scale=gs,
281
- guidance_rescale=rs,
282
- # shift=3.0,
283
- num_inference_steps=steps,
284
- generator=rnd,
285
- prompt_embeds=llama_vec,
286
- prompt_embeds_mask=llama_attention_mask,
287
- prompt_poolers=clip_l_pooler,
288
- negative_prompt_embeds=llama_vec_n,
289
- negative_prompt_embeds_mask=llama_attention_mask_n,
290
- negative_prompt_poolers=clip_l_pooler_n,
291
- device=gpu,
292
- dtype=torch.bfloat16,
293
- image_embeddings=image_encoder_last_hidden_state,
294
- latent_indices=latent_indices,
295
- clean_latents=clean_latents,
296
- clean_latent_indices=clean_latent_indices,
297
- clean_latents_2x=clean_latents_2x,
298
- clean_latent_2x_indices=clean_latent_2x_indices,
299
- clean_latents_4x=clean_latents_4x,
300
- clean_latent_4x_indices=clean_latent_4x_indices,
301
- callback=callback,
302
- )
303
-
304
- total_generated_latent_frames += int(generated_latents.shape[2])
305
- history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)
306
-
307
- if not high_vram:
308
- offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)
309
- load_model_as_complete(vae, target_device=gpu)
310
-
311
- real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :]
312
-
313
- if history_pixels is None:
314
- history_pixels = vae_decode(real_history_latents, vae).cpu()
315
- else:
316
- section_latent_frames = latent_window_size * 2
317
- overlapped_frames = latent_window_size * 4 - 3
318
-
319
- current_pixels = vae_decode(real_history_latents[:, :, -section_latent_frames:], vae).cpu()
320
- history_pixels = soft_append_bcthw(history_pixels, current_pixels, overlapped_frames)
321
-
322
- if not high_vram:
323
- unload_complete_models()
324
-
325
- output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
326
-
327
- save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=mp4_crf)
328
-
329
- print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
330
-
331
- stream.output_queue.push(('file', output_filename))
332
- except:
333
- traceback.print_exc()
334
-
335
- if not high_vram:
336
- unload_complete_models(
337
- text_encoder, text_encoder_2, image_encoder, vae, transformer
338
- )
339
-
340
- stream.output_queue.push(('end', None))
341
- return
342
-
343
- def get_duration(input_image, prompt, t2v, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
344
- return total_second_length * 60
345
-
346
- @spaces.GPU(duration=get_duration)
347
- def process(input_image, prompt,
348
- t2v=False,
349
- n_prompt="",
350
- seed=31337,
351
- total_second_length=5,
352
- latent_window_size=9,
353
- steps=25,
354
- cfg=1.0,
355
- gs=10.0,
356
- rs=0.0,
357
- gpu_memory_preservation=6,
358
- use_teacache=True,
359
- mp4_crf=16
360
- ):
361
- global stream
362
-
363
- # assert input_image is not None, 'No input image!'
364
- if t2v:
365
- default_height, default_width = 640, 640
366
- input_image = np.ones((default_height, default_width, 3), dtype=np.uint8) * 255
367
- print("No input image provided. Using a blank white image.")
368
- else:
369
- composite_rgba_uint8 = input_image["composite"]
370
-
371
- # rgb_uint8 will be (H, W, 3), dtype uint8
372
- rgb_uint8 = composite_rgba_uint8[:, :, :3]
373
- # mask_uint8 will be (H, W), dtype uint8
374
- mask_uint8 = composite_rgba_uint8[:, :, 3]
375
-
376
- # Create background
377
- h, w = rgb_uint8.shape[:2]
378
- # White background, (H, W, 3), dtype uint8
379
- background_uint8 = np.full((h, w, 3), 255, dtype=np.uint8)
380
-
381
- # Normalize mask to range [0.0, 1.0].
382
- alpha_normalized_float32 = mask_uint8.astype(np.float32) / 255.0
383
-
384
- # Expand alpha to 3 channels to match RGB images for broadcasting.
385
- # alpha_mask_float32 will have shape (H, W, 3)
386
- alpha_mask_float32 = np.stack([alpha_normalized_float32] * 3, axis=2)
387
-
388
- # alpha blending
389
- blended_image_float32 = rgb_uint8.astype(np.float32) * alpha_mask_float32 + \
390
- background_uint8.astype(np.float32) * (1.0 - alpha_mask_float32)
391
-
392
- input_image = np.clip(blended_image_float32, 0, 255).astype(np.uint8)
393
-
394
- yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
395
-
396
- stream = AsyncStream()
397
-
398
- async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf)
399
-
400
- output_filename = None
401
-
402
- while True:
403
- flag, data = stream.output_queue.next()
404
-
405
- if flag == 'file':
406
- output_filename = data
407
- yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
408
-
409
- if flag == 'progress':
410
- preview, desc, html = data
411
- yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
412
-
413
- if flag == 'end':
414
- yield output_filename, gr.update(visible=False), gr.update(), '', gr.update(interactive=True), gr.update(interactive=False)
415
- break
416
-
417
-
418
- def end_process():
419
- stream.input_queue.push('end')
420
-
421
-
422
- quick_prompts = [
423
- 'The girl dances gracefully, with clear movements, full of charm.',
424
- 'A character doing some simple body movements.',
425
- ]
426
- quick_prompts = [[x] for x in quick_prompts]
427
-
428
-
429
- css = make_progress_bar_css()
430
- block = gr.Blocks(css=css).queue()
431
- with block:
432
- gr.Markdown('# FramePack Essentials | Experimentation in Progress')
433
- gr.Markdown(f"""### Space is constantly being tinkered with, expect downtime and errors.
434
- """)
435
- with gr.Row():
436
- with gr.Column():
437
- input_image = gr.ImageEditor(type="numpy", label="Image", height=320, brush=gr.Brush(colors=["#ffffff"]))
438
- prompt = gr.Textbox(label="Prompt", value='')
439
- t2v = gr.Checkbox(label="do text-to-video", value=False)
440
- example_quick_prompts = gr.Dataset(samples=quick_prompts, label='Quick List', samples_per_page=1000, components=[prompt])
441
- example_quick_prompts.click(lambda x: x[0], inputs=[example_quick_prompts], outputs=prompt, show_progress=False, queue=False)
442
-
443
- with gr.Row():
444
- start_button = gr.Button(value="Start Generation")
445
- end_button = gr.Button(value="End Generation", interactive=False)
446
-
447
- total_second_length = gr.Slider(label="Total Video Length (Seconds)", minimum=1, maximum=5, value=2, step=0.1)
448
- with gr.Group():
449
- with gr.Accordion("Advanced settings", open=False):
450
- use_teacache = gr.Checkbox(label='Use TeaCache', value=True, info='Faster speed, but often makes hands and fingers slightly worse.')
451
-
452
- n_prompt = gr.Textbox(label="Negative Prompt", value="", visible=False) # Not used
453
- seed = gr.Number(label="Seed", value=31337, precision=0)
454
-
455
-
456
- latent_window_size = gr.Slider(label="Latent Window Size", minimum=1, maximum=33, value=9, step=1, visible=False) # Should not change
457
- steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=25, step=1, info='Changing this value is not recommended.')
458
-
459
- cfg = gr.Slider(label="CFG Scale", minimum=1.0, maximum=32.0, value=1.0, step=0.01, visible=False) # Should not change
460
- gs = gr.Slider(label="Distilled CFG Scale", minimum=1.0, maximum=32.0, value=10.0, step=0.01, info='Changing this value is not recommended.')
461
- rs = gr.Slider(label="CFG Re-Scale", minimum=0.0, maximum=1.0, value=0.0, step=0.01, visible=False) # Should not change
462
-
463
- gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB) (larger means slower)", minimum=6, maximum=128, value=6, step=0.1, info="Set this number to a larger value if you encounter OOM. Larger value causes slower speed.")
464
-
465
- mp4_crf = gr.Slider(label="MP4 Compression", minimum=0, maximum=100, value=16, step=1, info="Lower means better quality. 0 is uncompressed. Change to 16 if you get black outputs. ")
466
-
467
- with gr.Column():
468
- preview_image = gr.Image(label="Next Latents", height=200, visible=False)
469
- result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
470
- progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
471
- progress_bar = gr.HTML('', elem_classes='no-generating-animation')
472
-
473
- gr.HTML('<div style="text-align:center; margin-top:20px;">Share your results and find ideas at the <a href="https://x.com/search?q=framepack&f=live" target="_blank">FramePack Twitter (X) thread</a></div>')
474
-
475
- ips = [input_image, prompt, t2v, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf]
476
- start_button.click(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
477
- end_button.click(fn=end_process)
478
-
479
- # gr.Examples(
480
- # examples,
481
- # inputs=[input_image, prompt],
482
- # outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button],
483
- # fn=generate_examples,
484
- # cache_examples=True
485
- # )
486
-
487
-
488
- block.launch(ssr_mode=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import argparse
4
+ import numpy as np
5
+ import torch
6
+ import einops
7
+ import copy
8
+ import math
9
+ import time
10
+ import random
11
+ import spaces
12
+ import re
13
+ import uuid
14
+
15
+ from gradio_imageslider import ImageSlider
16
+ from PIL import Image
17
+ from SUPIR.util import HWC3, upscale_image, fix_resize, convert_dtype, create_SUPIR_model, load_QF_ckpt
18
+ from huggingface_hub import hf_hub_download
19
+ from pillow_heif import register_heif_opener
20
+
21
+ register_heif_opener()
22
+
23
+ max_64_bit_int = np.iinfo(np.int32).max
24
+
25
+ hf_hub_download(repo_id="laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", filename="open_clip_pytorch_model.bin", local_dir="laion_CLIP-ViT-bigG-14-laion2B-39B-b160k")
26
+ hf_hub_download(repo_id="camenduru/SUPIR", filename="sd_xl_base_1.0_0.9vae.safetensors", local_dir="yushan777_SUPIR")
27
+ hf_hub_download(repo_id="camenduru/SUPIR", filename="SUPIR-v0F.ckpt", local_dir="yushan777_SUPIR")
28
+ hf_hub_download(repo_id="camenduru/SUPIR", filename="SUPIR-v0Q.ckpt", local_dir="yushan777_SUPIR")
29
+ hf_hub_download(repo_id="RunDiffusion/Juggernaut-XL-Lightning", filename="Juggernaut_RunDiffusionPhoto2_Lightning_4Steps.safetensors", local_dir="RunDiffusion_Juggernaut-XL-Lightning")
30
+
31
+ parser = argparse.ArgumentParser()
32
+ parser.add_argument("--opt", type=str, default='options/SUPIR_v0.yaml')
33
+ parser.add_argument("--ip", type=str, default='127.0.0.1')
34
+ parser.add_argument("--port", type=int, default='6688')
35
+ parser.add_argument("--no_llava", action='store_true', default=True)#False
36
+ parser.add_argument("--use_image_slider", action='store_true', default=False)#False
37
+ parser.add_argument("--log_history", action='store_true', default=False)
38
+ parser.add_argument("--loading_half_params", action='store_true', default=False)#False
39
+ parser.add_argument("--use_tile_vae", action='store_true', default=True)#False
40
+ parser.add_argument("--encoder_tile_size", type=int, default=512)
41
+ parser.add_argument("--decoder_tile_size", type=int, default=64)
42
+ parser.add_argument("--load_8bit_llava", action='store_true', default=False)
43
+ args = parser.parse_args()
44
+
45
+ if torch.cuda.device_count() > 0:
46
+ SUPIR_device = 'cuda:0'
47
+
48
+ # Load SUPIR
49
+ model, default_setting = create_SUPIR_model(args.opt, SUPIR_sign='Q', load_default_setting=True)
50
+ if args.loading_half_params:
51
+ model = model.half()
52
+ if args.use_tile_vae:
53
+ model.init_tile_vae(encoder_tile_size=args.encoder_tile_size, decoder_tile_size=args.decoder_tile_size)
54
+ model = model.to(SUPIR_device)
55
+ model.first_stage_model.denoise_encoder_s1 = copy.deepcopy(model.first_stage_model.denoise_encoder)
56
+ model.current_model = 'v0-Q'
57
+ ckpt_Q, ckpt_F = load_QF_ckpt(args.opt)
58
+
59
+ def check_upload(input_image):
60
+ if input_image is None:
61
+ raise gr.Error("Please provide an image to restore.")
62
+ return gr.update(visible = True)
63
+
64
+ def update_seed(is_randomize_seed, seed):
65
+ if is_randomize_seed:
66
+ return random.randint(0, max_64_bit_int)
67
+ return seed
68
+
69
+ def reset():
70
+ return [
71
+ None,
72
+ 0,
73
+ None,
74
+ None,
75
+ "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
76
+ "painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
77
+ 1,
78
+ 1024,
79
+ 1,
80
+ 2,
81
+ 50,
82
+ -1.0,
83
+ 1.,
84
+ default_setting.s_cfg_Quality if torch.cuda.device_count() > 0 else 1.0,
85
+ True,
86
+ random.randint(0, max_64_bit_int),
87
+ 5,
88
+ 1.003,
89
+ "Wavelet",
90
+ "fp32",
91
+ "fp32",
92
+ 1.0,
93
+ True,
94
+ False,
95
+ default_setting.spt_linear_CFG_Quality if torch.cuda.device_count() > 0 else 1.0,
96
+ 0.,
97
+ "v0-Q",
98
+ "input",
99
+ 179
100
+ ]
101
+
102
+ def check_and_update(input_image):
103
+ if input_image is None:
104
+ raise gr.Error("Please provide an image to restore.")
105
+ return gr.update(visible = True)
106
+
107
+ @spaces.GPU(duration=420)
108
+ def stage1_process(
109
+ input_image,
110
+ gamma_correction,
111
+ diff_dtype,
112
+ ae_dtype
113
+ ):
114
+ print('stage1_process ==>>')
115
+ if torch.cuda.device_count() == 0:
116
+ gr.Warning('Set this space to GPU config to make it work.')
117
+ return None, None
118
+ torch.cuda.set_device(SUPIR_device)
119
+ LQ = HWC3(np.array(Image.open(input_image)))
120
+ LQ = fix_resize(LQ, 512)
121
+ # stage1
122
+ LQ = np.array(LQ) / 255 * 2 - 1
123
+ LQ = torch.tensor(LQ, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0).to(SUPIR_device)[:, :3, :, :]
124
+
125
+ model.ae_dtype = convert_dtype(ae_dtype)
126
+ model.model.dtype = convert_dtype(diff_dtype)
127
+
128
+ LQ = model.batchify_denoise(LQ, is_stage1=True)
129
+ LQ = (LQ[0].permute(1, 2, 0) * 127.5 + 127.5).cpu().numpy().round().clip(0, 255).astype(np.uint8)
130
+ # gamma correction
131
+ LQ = LQ / 255.0
132
+ LQ = np.power(LQ, gamma_correction)
133
+ LQ *= 255.0
134
+ LQ = LQ.round().clip(0, 255).astype(np.uint8)
135
+ print('<<== stage1_process')
136
+ return LQ, gr.update(visible = True)
137
+
138
+ def stage2_process(*args, **kwargs):
139
+ try:
140
+ return restore_in_Xmin(*args, **kwargs)
141
+ except Exception as e:
142
+ # NO_GPU_MESSAGE_INQUEUE
143
+ print("gradio.exceptions.Error 'No GPU is currently available for you after 60s'")
144
+ print('str(type(e)): ' + str(type(e))) # <class 'gradio.exceptions.Error'>
145
+ print('str(e): ' + str(e)) # You have exceeded your GPU quota...
146
+ try:
147
+ print('e.message: ' + e.message) # No GPU is currently available for you after 60s
148
+ except Exception as e2:
149
+ print('Failure')
150
+ if str(e).startswith("No GPU is currently available for you after 60s"):
151
+ print('Exception identified!!!')
152
+ #if str(type(e)) == "<class 'gradio.exceptions.Error'>":
153
+ #print('Exception of name ' + type(e).__name__)
154
+ raise e
155
+
156
+ def restore_in_Xmin(
157
+ noisy_image,
158
+ rotation,
159
+ denoise_image,
160
+ prompt,
161
+ a_prompt,
162
+ n_prompt,
163
+ num_samples,
164
+ min_size,
165
+ downscale,
166
+ upscale,
167
+ edm_steps,
168
+ s_stage1,
169
+ s_stage2,
170
+ s_cfg,
171
+ randomize_seed,
172
+ seed,
173
+ s_churn,
174
+ s_noise,
175
+ color_fix_type,
176
+ diff_dtype,
177
+ ae_dtype,
178
+ gamma_correction,
179
+ linear_CFG,
180
+ linear_s_stage2,
181
+ spt_linear_CFG,
182
+ spt_linear_s_stage2,
183
+ model_select,
184
+ output_format,
185
+ allocation
186
+ ):
187
+ print("noisy_image:\n" + str(noisy_image))
188
+ print("denoise_image:\n" + str(denoise_image))
189
+ print("rotation: " + str(rotation))
190
+ print("prompt: " + str(prompt))
191
+ print("a_prompt: " + str(a_prompt))
192
+ print("n_prompt: " + str(n_prompt))
193
+ print("num_samples: " + str(num_samples))
194
+ print("min_size: " + str(min_size))
195
+ print("downscale: " + str(downscale))
196
+ print("upscale: " + str(upscale))
197
+ print("edm_steps: " + str(edm_steps))
198
+ print("s_stage1: " + str(s_stage1))
199
+ print("s_stage2: " + str(s_stage2))
200
+ print("s_cfg: " + str(s_cfg))
201
+ print("randomize_seed: " + str(randomize_seed))
202
+ print("seed: " + str(seed))
203
+ print("s_churn: " + str(s_churn))
204
+ print("s_noise: " + str(s_noise))
205
+ print("color_fix_type: " + str(color_fix_type))
206
+ print("diff_dtype: " + str(diff_dtype))
207
+ print("ae_dtype: " + str(ae_dtype))
208
+ print("gamma_correction: " + str(gamma_correction))
209
+ print("linear_CFG: " + str(linear_CFG))
210
+ print("linear_s_stage2: " + str(linear_s_stage2))
211
+ print("spt_linear_CFG: " + str(spt_linear_CFG))
212
+ print("spt_linear_s_stage2: " + str(spt_linear_s_stage2))
213
+ print("model_select: " + str(model_select))
214
+ print("GPU time allocation: " + str(allocation) + " min")
215
+ print("output_format: " + str(output_format))
216
+
217
+ input_format = re.sub(r"^.*\.([^\.]+)$", r"\1", noisy_image)
218
+
219
+ if input_format not in ['png', 'webp', 'jpg', 'jpeg', 'gif', 'bmp', 'heic']:
220
+ gr.Warning('Invalid image format. Please first convert into *.png, *.webp, *.jpg, *.jpeg, *.gif, *.bmp or *.heic.')
221
+ return None, None, None, None
222
+
223
+ if output_format == "input":
224
+ if noisy_image is None:
225
+ output_format = "png"
226
+ else:
227
+ output_format = input_format
228
+ print("final output_format: " + str(output_format))
229
+
230
+ if prompt is None:
231
+ prompt = ""
232
+
233
+ if a_prompt is None:
234
+ a_prompt = ""
235
+
236
+ if n_prompt is None:
237
+ n_prompt = ""
238
+
239
+ if prompt != "" and a_prompt != "":
240
+ a_prompt = prompt + ", " + a_prompt
241
+ else:
242
+ a_prompt = prompt + a_prompt
243
+ print("Final prompt: " + str(a_prompt))
244
+
245
+ denoise_image = np.array(Image.open(noisy_image if denoise_image is None else denoise_image))
246
+
247
+ if rotation == 90:
248
+ denoise_image = np.array(list(zip(*denoise_image[::-1])))
249
+ elif rotation == 180:
250
+ denoise_image = np.array(list(zip(*denoise_image[::-1])))
251
+ denoise_image = np.array(list(zip(*denoise_image[::-1])))
252
+ elif rotation == -90:
253
+ denoise_image = np.array(list(zip(*denoise_image))[::-1])
254
+
255
+ if 1 < downscale:
256
+ input_height, input_width, input_channel = denoise_image.shape
257
+ denoise_image = np.array(Image.fromarray(denoise_image).resize((input_width // downscale, input_height // downscale), Image.LANCZOS))
258
+
259
+ denoise_image = HWC3(denoise_image)
260
+
261
+ if torch.cuda.device_count() == 0:
262
+ gr.Warning('Set this space to GPU config to make it work.')
263
+ return [noisy_image, denoise_image], gr.update(label="Downloadable results in *." + output_format + " format", format = output_format, value = [denoise_image]), None, gr.update(visible=True)
264
+
265
+ if model_select != model.current_model:
266
+ print('load ' + model_select)
267
+ if model_select == 'v0-Q':
268
+ model.load_state_dict(ckpt_Q, strict=False)
269
+ elif model_select == 'v0-F':
270
+ model.load_state_dict(ckpt_F, strict=False)
271
+ model.current_model = model_select
272
+
273
+ model.ae_dtype = convert_dtype(ae_dtype)
274
+ model.model.dtype = convert_dtype(diff_dtype)
275
+
276
+ return restore_on_gpu(
277
+ noisy_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, min_size, downscale, upscale, edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select, output_format, allocation
278
+ )
279
+
280
+ def get_duration(
281
+ noisy_image,
282
+ input_image,
283
+ prompt,
284
+ a_prompt,
285
+ n_prompt,
286
+ num_samples,
287
+ min_size,
288
+ downscale,
289
+ upscale,
290
+ edm_steps,
291
+ s_stage1,
292
+ s_stage2,
293
+ s_cfg,
294
+ randomize_seed,
295
+ seed,
296
+ s_churn,
297
+ s_noise,
298
+ color_fix_type,
299
+ diff_dtype,
300
+ ae_dtype,
301
+ gamma_correction,
302
+ linear_CFG,
303
+ linear_s_stage2,
304
+ spt_linear_CFG,
305
+ spt_linear_s_stage2,
306
+ model_select,
307
+ output_format,
308
+ allocation
309
+ ):
310
+ return allocation
311
+
312
+ @spaces.GPU(duration=get_duration)
313
+ def restore_on_gpu(
314
+ noisy_image,
315
+ input_image,
316
+ prompt,
317
+ a_prompt,
318
+ n_prompt,
319
+ num_samples,
320
+ min_size,
321
+ downscale,
322
+ upscale,
323
+ edm_steps,
324
+ s_stage1,
325
+ s_stage2,
326
+ s_cfg,
327
+ randomize_seed,
328
+ seed,
329
+ s_churn,
330
+ s_noise,
331
+ color_fix_type,
332
+ diff_dtype,
333
+ ae_dtype,
334
+ gamma_correction,
335
+ linear_CFG,
336
+ linear_s_stage2,
337
+ spt_linear_CFG,
338
+ spt_linear_s_stage2,
339
+ model_select,
340
+ output_format,
341
+ allocation
342
+ ):
343
+ start = time.time()
344
+ print('restore ==>>')
345
+
346
+ torch.cuda.set_device(SUPIR_device)
347
+
348
+ with torch.no_grad():
349
+ input_image = upscale_image(input_image, upscale, unit_resolution=32, min_size=min_size)
350
+ LQ = np.array(input_image) / 255.0
351
+ LQ = np.power(LQ, gamma_correction)
352
+ LQ *= 255.0
353
+ LQ = LQ.round().clip(0, 255).astype(np.uint8)
354
+ LQ = LQ / 255 * 2 - 1
355
+ LQ = torch.tensor(LQ, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0).to(SUPIR_device)[:, :3, :, :]
356
+ captions = ['']
357
+
358
+ samples = model.batchify_sample(LQ, captions, num_steps=edm_steps, restoration_scale=s_stage1, s_churn=s_churn,
359
+ s_noise=s_noise, cfg_scale=s_cfg, control_scale=s_stage2, seed=seed,
360
+ num_samples=num_samples, p_p=a_prompt, n_p=n_prompt, color_fix_type=color_fix_type,
361
+ use_linear_CFG=linear_CFG, use_linear_control_scale=linear_s_stage2,
362
+ cfg_scale_start=spt_linear_CFG, control_scale_start=spt_linear_s_stage2)
363
+
364
+ x_samples = (einops.rearrange(samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().round().clip(
365
+ 0, 255).astype(np.uint8)
366
+ results = [x_samples[i] for i in range(num_samples)]
367
+ torch.cuda.empty_cache()
368
+
369
+ # All the results have the same size
370
+ input_height, input_width, input_channel = np.array(input_image).shape
371
+ result_height, result_width, result_channel = np.array(results[0]).shape
372
+
373
+ print('<<== restore')
374
+ end = time.time()
375
+ secondes = int(end - start)
376
+ minutes = math.floor(secondes / 60)
377
+ secondes = secondes - (minutes * 60)
378
+ hours = math.floor(minutes / 60)
379
+ minutes = minutes - (hours * 60)
380
+ information = ("Start the process again if you want a different result. " if randomize_seed else "") + \
381
+ "If you don't get the image you wanted, add more details in the « Image description ». " + \
382
+ "Wait " + str(allocation) + " min before a new run to avoid quota penalty or use another computer. " + \
383
+ "The image" + (" has" if len(results) == 1 else "s have") + " been generated in " + \
384
+ ((str(hours) + " h, ") if hours != 0 else "") + \
385
+ ((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
386
+ str(secondes) + " sec. " + \
387
+ "The new image resolution is " + str(result_width) + \
388
+ " pixels large and " + str(result_height) + \
389
+ " pixels high, so a resolution of " + f'{result_width * result_height:,}' + " pixels."
390
+ print(information)
391
+ try:
392
+ print("Initial resolution: " + f'{input_width * input_height:,}')
393
+ print("Final resolution: " + f'{result_width * result_height:,}')
394
+ print("edm_steps: " + str(edm_steps))
395
+ print("num_samples: " + str(num_samples))
396
+ print("downscale: " + str(downscale))
397
+ print("Estimated minutes: " + f'{(((result_width * result_height**(1/1.75)) * input_width * input_height * (edm_steps**(1/2)) * (num_samples**(1/2.5)))**(1/2.5)) / 25000:,}')
398
+ except Exception as e:
399
+ print('Exception of Estimation')
400
+
401
+ # Only one image can be shown in the slider
402
+ return [noisy_image] + [results[0]], gr.update(label="Downloadable results in *." + output_format + " format", format = output_format, value = results), gr.update(value = information, visible = True), gr.update(visible=True)
403
+
404
+ def load_and_reset(param_setting):
405
+ print('load_and_reset ==>>')
406
+ if torch.cuda.device_count() == 0:
407
+ gr.Warning('Set this space to GPU config to make it work.')
408
+ return None, None, None, None, None, None, None, None, None, None, None, None, None, None
409
+ edm_steps = default_setting.edm_steps
410
+ s_stage2 = 1.0
411
+ s_stage1 = -1.0
412
+ s_churn = 5
413
+ s_noise = 1.003
414
+ a_prompt = 'Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - ' \
415
+ 'realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore ' \
416
+ 'detailing, hyper sharpness, perfect without deformations.'
417
+ n_prompt = 'painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, ' \
418
+ '3D render, unreal engine, blurring, dirty, messy, worst quality, low quality, frames, watermark, ' \
419
+ 'signature, jpeg artifacts, deformed, lowres, over-smooth'
420
+ color_fix_type = 'Wavelet'
421
+ spt_linear_s_stage2 = 0.0
422
+ linear_s_stage2 = False
423
+ linear_CFG = True
424
+ if param_setting == "Quality":
425
+ s_cfg = default_setting.s_cfg_Quality
426
+ spt_linear_CFG = default_setting.spt_linear_CFG_Quality
427
+ model_select = "v0-Q"
428
+ elif param_setting == "Fidelity":
429
+ s_cfg = default_setting.s_cfg_Fidelity
430
+ spt_linear_CFG = default_setting.spt_linear_CFG_Fidelity
431
+ model_select = "v0-F"
432
+ else:
433
+ raise NotImplementedError
434
+ gr.Info('The parameters are reset.')
435
+ print('<<== load_and_reset')
436
+ return edm_steps, s_cfg, s_stage2, s_stage1, s_churn, s_noise, a_prompt, n_prompt, color_fix_type, linear_CFG, \
437
+ linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select
438
+
439
+ def log_information(result_gallery):
440
+ print('log_information')
441
+ if result_gallery is not None:
442
+ for i, result in enumerate(result_gallery):
443
+ print(result[0])
444
+
445
+ def on_select_result(result_slider, result_gallery, evt: gr.SelectData):
446
+ print('on_select_result')
447
+ if result_gallery is not None:
448
+ for i, result in enumerate(result_gallery):
449
+ print(result[0])
450
+ return [result_slider[0], result_gallery[evt.index][0]]
451
+
452
+ title_html = """
453
+ <h1><center>SUPIR</center></h1>
454
+ <big><center>Upscale your images up to x10 freely, without account, without watermark and download it</center></big>
455
+ <center><big><big>🤸<big><big><big><big><big><big>🤸</big></big></big></big></big></big></big></big></center>
456
+
457
+ <p>This is an online demo of SUPIR, a practicing model scaling for photo-realistic image restoration.
458
+ The content added by SUPIR is <b><u>imagination, not real-world information</u></b>.
459
+ SUPIR is for beauty and illustration only.
460
+ Most of the processes last few minutes.
461
+ If you want to upscale AI-generated images, be noticed that <i>PixArt Sigma</i> space can directly generate 5984x5984 images.
462
+ Due to Gradio issues, the generated image is slightly less satured than the original.
463
+ Please leave a <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR/discussions/new">message in discussion</a> if you encounter issues.
464
+ You can also use <a href="https://huggingface.co/spaces/gokaygokay/AuraSR">AuraSR</a> to upscale x4.
465
+
466
+ <p><center><a href="https://arxiv.org/abs/2401.13627">Paper</a> &emsp; <a href="http://supir.xpixel.group/">Project Page</a> &emsp; <a href="https://huggingface.co/blog/MonsterMMORPG/supir-sota-image-upscale-better-than-magnific-ai">Local Install Guide</a></center></p>
467
+ <p><center><a style="display:inline-block" href='https://github.com/Fanghua-Yu/SUPIR'><img alt="GitHub Repo stars" src="https://img.shields.io/github/stars/Fanghua-Yu/SUPIR?style=social"></a></center></p>
468
+ """
469
+
470
+
471
+ claim_md = """
472
+ ## **Piracy**
473
+ The images are not stored but the logs are saved during a month.
474
+ ## **How to get SUPIR**
475
+ You can get SUPIR on HuggingFace by [duplicating this space](https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR?duplicate=true) and set GPU.
476
+ You can also install SUPIR on your computer following [this tutorial](https://huggingface.co/blog/MonsterMMORPG/supir-sota-image-upscale-better-than-magnific-ai).
477
+ You can install _Pinokio_ on your computer and then install _SUPIR_ into it. It should be quite easy if you have an Nvidia GPU.
478
+ ## **Terms of use**
479
+ By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research. Please submit a feedback to us if you get any inappropriate answer! We will collect those to keep improving our models. For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
480
+ ## **License**
481
+ The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/Fanghua-Yu/SUPIR) of SUPIR.
482
+ """
483
+
484
+ # Gradio interface
485
+ with gr.Blocks() as interface:
486
+ if torch.cuda.device_count() == 0:
487
+ with gr.Row():
488
+ gr.HTML("""
489
+ <p style="background-color: red;"><big><big><big><b>⚠️To use SUPIR, <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR?duplicate=true">duplicate this space</a> and set a GPU with 30 GB VRAM.</b>
490
+
491
+ You can't use SUPIR directly here because this space runs on a CPU, which is not enough for SUPIR. Please provide <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR/discussions/new">feedback</a> if you have issues.
492
+ </big></big></big></p>
493
+ """)
494
+ gr.HTML(title_html)
495
+
496
+ input_image = gr.Image(label="Input (*.png, *.webp, *.jpeg, *.jpg, *.gif, *.bmp, *.heic)", show_label=True, type="filepath", height=600, elem_id="image-input")
497
+ rotation = gr.Radio([["No rotation", 0], ["⤵ Rotate +90°", 90], ["↩ Return 180°", 180], ["⤴ Rotate -90°", -90]], label="Orientation correction", info="Will apply the following rotation before restoring the image; the AI needs a good orientation to understand the content", value=0, interactive=True, visible=False)
498
+ with gr.Group():
499
+ prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible, especially the details we can't see on the original image; you can write in any language", value="", placeholder="A 33 years old man, walking, in the street, Santiago, morning, Summer, photorealistic", lines=3)
500
+ prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/badayvedat/LLaVA'"'>LlaVa space</a> to auto-generate the description of your image.")
501
+ upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8], ["x9", 9], ["x10", 10]], label="Upscale factor", info="Resolution x1 to x10", value=2, interactive=True)
502
+ output_format = gr.Radio([["As input", "input"], ["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for result", info="File extention", value="input", interactive=True)
503
+ allocation = gr.Slider(label="GPU allocation time (in seconds)", info="lower=May abort run, higher=Quota penalty for next runs", value=179, minimum=59, maximum=320, step=1)
504
+
505
+ with gr.Accordion("Pre-denoising (optional)", open=False):
506
+ gamma_correction = gr.Slider(label="Gamma Correction", info = "lower=lighter, higher=darker", minimum=0.1, maximum=2.0, value=1.0, step=0.1)
507
+ denoise_button = gr.Button(value="Pre-denoise")
508
+ denoise_image = gr.Image(label="Denoised image", show_label=True, type="filepath", sources=[], interactive = False, height=600, elem_id="image-s1")
509
+ denoise_information = gr.HTML(value="If present, the denoised image will be used for the restoration instead of the input image.", visible=False)
510
+
511
+ with gr.Accordion("Advanced options", open=False):
512
+ a_prompt = gr.Textbox(label="Additional image description",
513
+ info="Completes the main image description",
514
+ value='Cinematic, High Contrast, highly detailed, taken using a Canon EOS R '
515
+ 'camera, hyper detailed photo - realistic maximum detail, 32k, Color '
516
+ 'Grading, ultra HD, extreme meticulous detailing, skin pore detailing, clothing fabric detailing, '
517
+ 'hyper sharpness, perfect without deformations.',
518
+ lines=3)
519
+ n_prompt = gr.Textbox(label="Negative image description",
520
+ info="Disambiguate by listing what the image does NOT represent",
521
+ value='painting, oil painting, illustration, drawing, art, sketch, anime, '
522
+ 'cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, '
523
+ 'worst quality, low quality, frames, watermark, signature, jpeg artifacts, '
524
+ 'deformed, lowres, over-smooth',
525
+ lines=3)
526
+ edm_steps = gr.Slider(label="Steps", info="lower=faster, higher=more details; too many steps create a checker effect", minimum=1, maximum=200, value=default_setting.edm_steps if torch.cuda.device_count() > 0 else 1, step=1)
527
+ num_samples = gr.Slider(label="Num Samples", info="Number of generated results", minimum=1, maximum=4 if not args.use_image_slider else 1
528
+ , value=1, step=1)
529
+ min_size = gr.Slider(label="Minimum size", info="Minimum height, minimum width of the result", minimum=32, maximum=4096, value=1024, step=32)
530
+ downscale = gr.Radio([["/1", 1], ["/2", 2], ["/3", 3], ["/4", 4], ["/5", 5], ["/6", 6], ["/7", 7], ["/8", 8], ["/9", 9], ["/10", 10]], label="Pre-downscale factor", info="Reducing blurred image reduce the process time", value=1, interactive=True)
531
+ with gr.Row():
532
+ with gr.Column():
533
+ model_select = gr.Radio([["💃 Quality (v0-Q)", "v0-Q"], ["🎯 Fidelity (v0-F)", "v0-F"]], label="Model Selection", info="Pretrained model", value="v0-Q",
534
+ interactive=True)
535
+ with gr.Column():
536
+ color_fix_type = gr.Radio([["None", "None"], ["AdaIn (improve as a photo)", "AdaIn"], ["Wavelet (for JPEG artifacts)", "Wavelet"]], label="Color-Fix Type", info="AdaIn=Improve following a style, Wavelet=For JPEG artifacts", value="AdaIn",
537
+ interactive=True)
538
+ s_cfg = gr.Slider(label="Text Guidance Scale", info="lower=follow the image, higher=follow the prompt", minimum=1.0, maximum=15.0,
539
+ value=default_setting.s_cfg_Quality if torch.cuda.device_count() > 0 else 1.0, step=0.1)
540
+ s_stage2 = gr.Slider(label="Restoring Guidance Strength", minimum=0., maximum=1., value=1., step=0.05)
541
+ s_stage1 = gr.Slider(label="Pre-denoising Guidance Strength", minimum=-1.0, maximum=6.0, value=-1.0, step=1.0)
542
+ s_churn = gr.Slider(label="S-Churn", minimum=0, maximum=40, value=5, step=1)
543
+ s_noise = gr.Slider(label="S-Noise", minimum=1.0, maximum=1.1, value=1.003, step=0.001)
544
+ with gr.Row():
545
+ with gr.Column():
546
+ linear_CFG = gr.Checkbox(label="Linear CFG", value=True)
547
+ spt_linear_CFG = gr.Slider(label="CFG Start", minimum=1.0,
548
+ maximum=9.0, value=default_setting.spt_linear_CFG_Quality if torch.cuda.device_count() > 0 else 1.0, step=0.5)
549
+ with gr.Column():
550
+ linear_s_stage2 = gr.Checkbox(label="Linear Restoring Guidance", value=False)
551
+ spt_linear_s_stage2 = gr.Slider(label="Guidance Start", minimum=0.,
552
+ maximum=1., value=0., step=0.05)
553
+ with gr.Column():
554
+ diff_dtype = gr.Radio([["fp32 (precision)", "fp32"], ["fp16 (medium)", "fp16"], ["bf16 (speed)", "bf16"]], label="Diffusion Data Type", value="fp32",
555
+ interactive=True)
556
+ with gr.Column():
557
+ ae_dtype = gr.Radio([["fp32 (precision)", "fp32"], ["bf16 (speed)", "bf16"]], label="Auto-Encoder Data Type", value="fp32",
558
+ interactive=True)
559
+ randomize_seed = gr.Checkbox(label = "\U0001F3B2 Randomize seed", value = True, info = "If checked, result is always different")
560
+ seed = gr.Slider(label="Seed", minimum=0, maximum=max_64_bit_int, step=1, randomize=True)
561
+ with gr.Group():
562
+ param_setting = gr.Radio(["Quality", "Fidelity"], interactive=True, label="Presetting", value = "Quality")
563
+ restart_button = gr.Button(value="Apply presetting")
564
+
565
+ with gr.Column():
566
+ diffusion_button = gr.Button(value="🚀 Upscale/Restore", variant = "primary", elem_id = "process_button")
567
+ reset_btn = gr.Button(value="🧹 Reinit page", variant="stop", elem_id="reset_button", visible = False)
568
+
569
+ warning = gr.HTML(value = "<center><big>Your computer must <u>not</u> enter into standby mode.</big><br/>On Chrome, you can force to keep a tab alive in <code>chrome://discards/</code></center>", visible = False)
570
+ restore_information = gr.HTML(value = "Restart the process to get another result.", visible = False)
571
+ result_slider = ImageSlider(label = 'Comparator', show_label = False, interactive = False, elem_id = "slider1", show_download_button = False)
572
+ result_gallery = gr.Gallery(label = 'Downloadable results', show_label = True, interactive = False, elem_id = "gallery1")
573
+
574
+ gr.Examples(
575
+ examples = [
576
+ [
577
+ "./Examples/Example1.png",
578
+ 0,
579
+ None,
580
+ "Group of people, walking, happy, in the street, photorealistic, 8k, extremely detailled",
581
+ "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
582
+ "painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
583
+ 2,
584
+ 1024,
585
+ 1,
586
+ 8,
587
+ 100,
588
+ -1,
589
+ 1,
590
+ 7.5,
591
+ False,
592
+ 42,
593
+ 5,
594
+ 1.003,
595
+ "AdaIn",
596
+ "fp16",
597
+ "bf16",
598
+ 1.0,
599
+ True,
600
+ 4,
601
+ False,
602
+ 0.,
603
+ "v0-Q",
604
+ "input",
605
+ 179
606
+ ],
607
+ [
608
+ "./Examples/Example2.jpeg",
609
+ 0,
610
+ None,
611
+ "La cabeza de un gato atigrado, en una casa, fotorrealista, 8k, extremadamente detallada",
612
+ "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
613
+ "painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
614
+ 1,
615
+ 1024,
616
+ 1,
617
+ 1,
618
+ 200,
619
+ -1,
620
+ 1,
621
+ 7.5,
622
+ False,
623
+ 42,
624
+ 5,
625
+ 1.003,
626
+ "Wavelet",
627
+ "fp16",
628
+ "bf16",
629
+ 1.0,
630
+ True,
631
+ 4,
632
+ False,
633
+ 0.,
634
+ "v0-Q",
635
+ "input",
636
+ 179
637
+ ],
638
+ [
639
+ "./Examples/Example3.webp",
640
+ 0,
641
+ None,
642
+ "A red apple",
643
+ "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
644
+ "painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
645
+ 1,
646
+ 1024,
647
+ 1,
648
+ 1,
649
+ 200,
650
+ -1,
651
+ 1,
652
+ 7.5,
653
+ False,
654
+ 42,
655
+ 5,
656
+ 1.003,
657
+ "Wavelet",
658
+ "fp16",
659
+ "bf16",
660
+ 1.0,
661
+ True,
662
+ 4,
663
+ False,
664
+ 0.,
665
+ "v0-Q",
666
+ "input",
667
+ 179
668
+ ],
669
+ [
670
+ "./Examples/Example3.webp",
671
+ 0,
672
+ None,
673
+ "A red marble",
674
+ "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
675
+ "painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
676
+ 1,
677
+ 1024,
678
+ 1,
679
+ 1,
680
+ 200,
681
+ -1,
682
+ 1,
683
+ 7.5,
684
+ False,
685
+ 42,
686
+ 5,
687
+ 1.003,
688
+ "Wavelet",
689
+ "fp16",
690
+ "bf16",
691
+ 1.0,
692
+ True,
693
+ 4,
694
+ False,
695
+ 0.,
696
+ "v0-Q",
697
+ "input",
698
+ 179
699
+ ],
700
+ ],
701
+ run_on_click = True,
702
+ fn = stage2_process,
703
+ inputs = [
704
+ input_image,
705
+ rotation,
706
+ denoise_image,
707
+ prompt,
708
+ a_prompt,
709
+ n_prompt,
710
+ num_samples,
711
+ min_size,
712
+ downscale,
713
+ upscale,
714
+ edm_steps,
715
+ s_stage1,
716
+ s_stage2,
717
+ s_cfg,
718
+ randomize_seed,
719
+ seed,
720
+ s_churn,
721
+ s_noise,
722
+ color_fix_type,
723
+ diff_dtype,
724
+ ae_dtype,
725
+ gamma_correction,
726
+ linear_CFG,
727
+ linear_s_stage2,
728
+ spt_linear_CFG,
729
+ spt_linear_s_stage2,
730
+ model_select,
731
+ output_format,
732
+ allocation
733
+ ],
734
+ outputs = [
735
+ result_slider,
736
+ result_gallery,
737
+ restore_information,
738
+ reset_btn
739
+ ],
740
+ cache_examples = False,
741
+ )
742
+
743
+ with gr.Row():
744
+ gr.Markdown(claim_md)
745
+
746
+ input_image.upload(fn = check_upload, inputs = [
747
+ input_image
748
+ ], outputs = [
749
+ rotation
750
+ ], queue = False, show_progress = False)
751
+
752
+ denoise_button.click(fn = check_and_update, inputs = [
753
+ input_image
754
+ ], outputs = [warning], queue = False, show_progress = False).success(fn = stage1_process, inputs = [
755
+ input_image,
756
+ gamma_correction,
757
+ diff_dtype,
758
+ ae_dtype
759
+ ], outputs=[
760
+ denoise_image,
761
+ denoise_information
762
+ ])
763
+
764
+ diffusion_button.click(fn = update_seed, inputs = [
765
+ randomize_seed,
766
+ seed
767
+ ], outputs = [
768
+ seed
769
+ ], queue = False, show_progress = False).then(fn = check_and_update, inputs = [
770
+ input_image
771
+ ], outputs = [warning], queue = False, show_progress = False).success(fn=stage2_process, inputs = [
772
+ input_image,
773
+ rotation,
774
+ denoise_image,
775
+ prompt,
776
+ a_prompt,
777
+ n_prompt,
778
+ num_samples,
779
+ min_size,
780
+ downscale,
781
+ upscale,
782
+ edm_steps,
783
+ s_stage1,
784
+ s_stage2,
785
+ s_cfg,
786
+ randomize_seed,
787
+ seed,
788
+ s_churn,
789
+ s_noise,
790
+ color_fix_type,
791
+ diff_dtype,
792
+ ae_dtype,
793
+ gamma_correction,
794
+ linear_CFG,
795
+ linear_s_stage2,
796
+ spt_linear_CFG,
797
+ spt_linear_s_stage2,
798
+ model_select,
799
+ output_format,
800
+ allocation
801
+ ], outputs = [
802
+ result_slider,
803
+ result_gallery,
804
+ restore_information,
805
+ reset_btn
806
+ ]).success(fn = log_information, inputs = [
807
+ result_gallery
808
+ ], outputs = [], queue = False, show_progress = False)
809
+
810
+ result_gallery.change(on_select_result, [result_slider, result_gallery], result_slider)
811
+ result_gallery.select(on_select_result, [result_slider, result_gallery], result_slider)
812
+
813
+ restart_button.click(fn = load_and_reset, inputs = [
814
+ param_setting
815
+ ], outputs = [
816
+ edm_steps,
817
+ s_cfg,
818
+ s_stage2,
819
+ s_stage1,
820
+ s_churn,
821
+ s_noise,
822
+ a_prompt,
823
+ n_prompt,
824
+ color_fix_type,
825
+ linear_CFG,
826
+ linear_s_stage2,
827
+ spt_linear_CFG,
828
+ spt_linear_s_stage2,
829
+ model_select
830
+ ])
831
+
832
+ reset_btn.click(fn = reset, inputs = [], outputs = [
833
+ input_image,
834
+ rotation,
835
+ denoise_image,
836
+ prompt,
837
+ a_prompt,
838
+ n_prompt,
839
+ num_samples,
840
+ min_size,
841
+ downscale,
842
+ upscale,
843
+ edm_steps,
844
+ s_stage1,
845
+ s_stage2,
846
+ s_cfg,
847
+ randomize_seed,
848
+ seed,
849
+ s_churn,
850
+ s_noise,
851
+ color_fix_type,
852
+ diff_dtype,
853
+ ae_dtype,
854
+ gamma_correction,
855
+ linear_CFG,
856
+ linear_s_stage2,
857
+ spt_linear_CFG,
858
+ spt_linear_s_stage2,
859
+ model_select,
860
+ output_format,
861
+ allocation
862
+ ], queue = False, show_progress = False)
863
+
864
+ interface.queue(10).launch()
requirements.txt CHANGED
@@ -1,23 +1,48 @@
1
- accelerate==1.6.0
2
- diffusers==0.33.1
3
- transformers==4.46.2
 
 
 
4
  sentencepiece==0.2.0
5
- pillow==11.1.0
6
- av==12.1.0
7
- numpy==1.26.2
8
- scipy==1.12.0
9
- requests==2.31.0
10
- torchsde==0.2.6
11
- torch>=2.0.0
12
- torchvision
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  torchaudio
14
- einops
15
- opencv-contrib-python
16
- safetensors
17
- huggingface_hub
18
- spaces
19
- decord
20
- imageio_ffmpeg
21
- sageattention
22
- xformers
23
- bitsandbytes
 
1
+ pydantic==2.10.6
2
+ fastapi==0.115.8
3
+ gradio_imageslider==0.0.20
4
+ gradio_client==1.7.0
5
+ numpy==1.26.4
6
+ requests==2.32.3
7
  sentencepiece==0.2.0
8
+ tokenizers==0.19.1
9
+ torchvision==0.18.1
10
+ uvicorn==0.30.1
11
+ wandb==0.17.4
12
+ httpx==0.27.0
13
+ transformers==4.42.4
14
+ accelerate==0.32.1
15
+ scikit-learn==1.5.1
16
+ einops==0.8.0
17
+ einops-exts==0.0.4
18
+ timm==1.0.7
19
+ openai-clip==1.0.1
20
+ fsspec==2024.6.1
21
+ kornia==0.7.3
22
+ matplotlib==3.9.1
23
+ ninja==1.11.1.1
24
+ omegaconf==2.3.0
25
+ opencv-python==4.10.0.84
26
+ pandas==2.2.2
27
+ pillow==10.4.0
28
+ pytorch-lightning==2.3.3
29
+ PyYAML==6.0.1
30
+ scipy==1.14.0
31
+ tqdm==4.66.4
32
+ triton==2.3.1
33
+ urllib3==2.2.2
34
+ webdataset==0.2.86
35
+ xformers==0.0.27
36
+ facexlib==0.3.0
37
+ k-diffusion==0.1.1.post1
38
+ diffusers==0.30.0
39
+ pillow-heif==0.18.0
40
+
41
+ open-clip-torch==2.24.0
42
+
43
  torchaudio
44
+ easydict==1.13
45
+ fairscale==0.4.13
46
+ torchsde==0.2.6
47
+ huggingface_hub==0.23.3
48
+ gradio