openfree commited on
Commit
497cc50
·
verified ·
1 Parent(s): 30c6c6a

Upload 3 files

Browse files
Files changed (3) hide show
  1. Dockerfile (1) +42 -0
  2. demo_gradio.py +427 -0
  3. setup.sh +7 -0
Dockerfile (1) ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:12.1.1-cudnn8-runtime-ubuntu22.04
2
+
3
+ # 设置非交互式安装并避免不必要的包
4
+ ENV DEBIAN_FRONTEND=noninteractive
5
+ ENV TZ=Asia/Shanghai
6
+
7
+ # 安装基本工具和Python
8
+ RUN apt-get update && apt-get install -y \
9
+ git \
10
+ python3 \
11
+ python3-pip \
12
+ ffmpeg \
13
+ libgl1-mesa-glx \
14
+ libglib2.0-0 \
15
+ && apt-get clean \
16
+ && rm -rf /var/lib/apt/lists/*
17
+
18
+ # 设置工作目录
19
+ WORKDIR /app
20
+
21
+ # 复制需要的文件
22
+ COPY requirements.txt ./
23
+ COPY app.py ./
24
+ COPY setup.sh ./
25
+ COPY README.md ./
26
+ COPY diffusers_helper ./diffusers_helper
27
+
28
+ # 安装Python依赖
29
+ RUN pip3 install --no-cache-dir -r requirements.txt
30
+
31
+ # 创建需要的目录
32
+ RUN mkdir -p /app/outputs
33
+ RUN mkdir -p /app/hf_download
34
+
35
+ # 设置权限
36
+ RUN chmod +x setup.sh
37
+
38
+ # 设置环境变量
39
+ ENV HF_HOME=/app/hf_download
40
+
41
+ # 运行应用
42
+ CMD ["python3", "app.py"]
demo_gradio.py ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import argparse
4
+
5
+
6
+
7
+ import torch
8
+ import gradio as gr
9
+
10
+
11
+ import numpy as np
12
+ import einops
13
+ import traceback
14
+
15
+ from PIL import Image
16
+ from diffusers import AutoencoderKLHunyuanVideo
17
+ from transformers import (
18
+ LlamaModel, CLIPTextModel,
19
+ LlamaTokenizerFast, CLIPTokenizer,
20
+ SiglipImageProcessor, SiglipVisionModel
21
+ )
22
+
23
+ from diffusers_helper.hf_login import login
24
+ from diffusers_helper.hunyuan import (
25
+ encode_prompt_conds, vae_decode, vae_encode,
26
+ vae_decode_fake
27
+ )
28
+ from diffusers_helper.utils import (
29
+ save_bcthw_as_mp4, crop_or_pad_yield_mask, soft_append_bcthw,
30
+ resize_and_center_crop, generate_timestamp
31
+ )
32
+ from diffusers_helper.models.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked
33
+ from diffusers_helper.pipelines.k_diffusion_hunyuan import sample_hunyuan
34
+ from diffusers_helper.memory import (
35
+ gpu, get_cuda_free_memory_gb, unload_complete_models, load_model_as_complete,
36
+ DynamicSwapInstaller, move_model_to_device_with_memory_preservation,
37
+ offload_model_from_device_for_memory_preservation, fake_diffusers_current_device
38
+ )
39
+ from diffusers_helper.clip_vision import hf_clip_vision_encode
40
+ from diffusers_helper.thread_utils import AsyncStream, async_run
41
+
42
+
43
+ # --- Args and config ---
44
+ parser = argparse.ArgumentParser()
45
+ parser.add_argument('--share', action='store_true')
46
+ parser.add_argument('--server', type=str, default='0.0.0.0')
47
+ parser.add_argument('--port', type=int, required=False)
48
+ parser.add_argument('--inbrowser', action='store_true')
49
+ args = parser.parse_args()
50
+
51
+ os.environ['HF_HOME'] = os.path.abspath(
52
+ os.path.realpath(os.path.join(os.path.dirname(__file__), './hf_download'))
53
+ )
54
+
55
+ print(args)
56
+
57
+ free_mem_gb = get_cuda_free_memory_gb(gpu)
58
+ high_vram = free_mem_gb > 60
59
+
60
+ print(f'Free VRAM {free_mem_gb} GB')
61
+ print(f'High-VRAM Mode: {high_vram}')
62
+
63
+ # --- Load models ---
64
+ text_encoder = LlamaModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder', torch_dtype=torch.float16).cpu()
65
+ text_encoder_2 = CLIPTextModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder_2', torch_dtype=torch.float16).cpu()
66
+ tokenizer = LlamaTokenizerFast.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer')
67
+ tokenizer_2 = CLIPTokenizer.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer_2')
68
+ vae = AutoencoderKLHunyuanVideo.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='vae', torch_dtype=torch.float16).cpu()
69
+
70
+ feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor')
71
+ image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=torch.float16).cpu()
72
+
73
+ transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('lllyasviel/FramePackI2V_HY', torch_dtype=torch.bfloat16).cpu()
74
+
75
+ vae.eval(), text_encoder.eval(), text_encoder_2.eval(), image_encoder.eval(), transformer.eval()
76
+
77
+
78
+
79
+
80
+
81
+ if not high_vram:
82
+ vae.enable_slicing()
83
+ vae.enable_tiling()
84
+
85
+ transformer.high_quality_fp32_output_for_inference = True
86
+
87
+
88
+ transformer.to(dtype=torch.bfloat16)
89
+ vae.to(dtype=torch.float16)
90
+ image_encoder.to(dtype=torch.float16)
91
+ text_encoder.to(dtype=torch.float16)
92
+ text_encoder_2.to(dtype=torch.float16)
93
+
94
+ for model in [vae, text_encoder, text_encoder_2, image_encoder, transformer]:
95
+ model.requires_grad_(False)
96
+
97
+
98
+
99
+
100
+ if not high_vram:
101
+
102
+ DynamicSwapInstaller.install_model(transformer, device=gpu)
103
+ DynamicSwapInstaller.install_model(text_encoder, device=gpu)
104
+ else:
105
+ transformer.to(gpu)
106
+
107
+ stream = AsyncStream()
108
+
109
+ outputs_folder = './outputs/'
110
+ os.makedirs(outputs_folder, exist_ok=True)
111
+
112
+ # --- UI + CSS ---
113
+ def make_progress_bar_css():
114
+ return """
115
+ body, .gradio-container {
116
+ background-color: #000000 !important;
117
+ color: #FFFFFF !important;
118
+ }
119
+ .gr-button, .gr-input, .gr-textbox, .gr-slider, .gr-checkbox {
120
+ background-color: #1a1a1a !important;
121
+ color: #ffffff !important;
122
+ border-color: #444 !important;
123
+ }
124
+ .gr-button:hover {
125
+ background-color: #333 !important;
126
+ }
127
+ .gr-markdown {
128
+ color: #ddd !important;
129
+ }
130
+ .gr-image-preview, .gr-video {
131
+ background-color: #111 !important;
132
+ }
133
+ """
134
+
135
+ def end_process():
136
+ stream.input_queue.push('end')
137
+
138
+
139
+
140
+
141
+
142
+
143
+
144
+
145
+
146
+
147
+
148
+
149
+
150
+
151
+
152
+
153
+
154
+
155
+
156
+
157
+
158
+
159
+
160
+
161
+
162
+
163
+
164
+
165
+
166
+
167
+
168
+
169
+
170
+
171
+
172
+
173
+
174
+
175
+
176
+
177
+
178
+
179
+
180
+
181
+
182
+
183
+
184
+
185
+
186
+
187
+
188
+
189
+
190
+
191
+
192
+
193
+
194
+
195
+
196
+
197
+
198
+
199
+
200
+
201
+
202
+
203
+
204
+
205
+
206
+
207
+
208
+
209
+
210
+
211
+
212
+
213
+
214
+
215
+
216
+
217
+
218
+
219
+
220
+
221
+
222
+
223
+
224
+
225
+
226
+
227
+
228
+
229
+
230
+
231
+
232
+
233
+
234
+
235
+
236
+
237
+
238
+
239
+
240
+
241
+
242
+
243
+
244
+
245
+
246
+
247
+
248
+
249
+
250
+
251
+
252
+
253
+
254
+
255
+
256
+
257
+
258
+
259
+
260
+
261
+
262
+
263
+
264
+
265
+
266
+
267
+
268
+
269
+
270
+
271
+
272
+
273
+
274
+
275
+
276
+
277
+
278
+
279
+
280
+
281
+
282
+
283
+
284
+
285
+
286
+
287
+
288
+
289
+
290
+
291
+
292
+
293
+
294
+
295
+
296
+
297
+
298
+
299
+
300
+
301
+
302
+
303
+
304
+
305
+
306
+
307
+
308
+
309
+
310
+
311
+
312
+
313
+
314
+
315
+
316
+
317
+
318
+
319
+
320
+
321
+
322
+
323
+
324
+
325
+
326
+
327
+
328
+
329
+
330
+
331
+
332
+
333
+
334
+
335
+
336
+
337
+
338
+
339
+
340
+
341
+
342
+
343
+
344
+
345
+
346
+
347
+
348
+
349
+
350
+
351
+ def process(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache):
352
+ global stream
353
+ assert input_image is not None, 'No input image!'
354
+
355
+ yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
356
+
357
+ stream = AsyncStream()
358
+
359
+ async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache)
360
+
361
+ output_filename = None
362
+
363
+ while True:
364
+ flag, data = stream.output_queue.next()
365
+
366
+ if flag == 'file':
367
+ output_filename = data
368
+ yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
369
+
370
+ if flag == 'progress':
371
+ preview, desc, html = data
372
+ yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
373
+
374
+ if flag == 'end':
375
+ yield output_filename, gr.update(visible=False), gr.update(), '', gr.update(interactive=True), gr.update(interactive=False)
376
+ break
377
+
378
+
379
+
380
+
381
+
382
+
383
+ quick_prompts = [
384
+ 'The girl dances gracefully, with clear movements, full of charm.',
385
+ 'A character doing some simple body movements.',
386
+ ]
387
+ quick_prompts = [[x] for x in quick_prompts]
388
+
389
+
390
+ css = make_progress_bar_css()
391
+
392
+ block = gr.Blocks(css=css).queue()
393
+ with block:
394
+ gr.Markdown('# FramePack')
395
+ end_button = gr.Button(value="End Generation", interactive=False)
396
+
397
+ with gr.Group():
398
+ use_teacache = gr.Checkbox(label='Use TeaCache', value=True)
399
+ n_prompt = gr.Textbox(label="Negative Prompt", value="", visible=False)
400
+
401
+ seed = gr.Number(label="Seed", value=31337, precision=0)
402
+
403
+ total_second_length = gr.Slider(label="Total Video Length (Seconds)", minimum=1, maximum=120, value=5, step=0.1)
404
+ latent_window_size = gr.Slider(label="Latent Window Size", minimum=1, maximum=33, value=9, step=1, visible=False)
405
+ steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=25, step=1)
406
+ cfg = gr.Slider(label="CFG Scale", minimum=1.0, maximum=32.0, value=1.0, step=0.01, visible=False)
407
+ gs = gr.Slider(label="Distilled CFG Scale", minimum=1.0, maximum=32.0, value=10.0, step=0.01)
408
+ rs = gr.Slider(label="CFG Re-Scale", minimum=0.0, maximum=1.0, value=0.0, step=0.01, visible=False)
409
+ gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB)", minimum=6, maximum=128, value=6, step=0.1)
410
+
411
+
412
+
413
+ with gr.Column():
414
+ preview_image = gr.Image(label="Next Latents", height=200, visible=False)
415
+ result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
416
+ gr.Markdown('Note: The ending actions are generated before the start. Wait for full video.')
417
+ progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
418
+ progress_bar = gr.HTML('', elem_classes='no-generating-animation')
419
+
420
+ ips = [input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache]
421
+ start_button.click(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
422
+ end_button.click(fn=end_process)
423
+
424
+
425
+ block.launch(
426
+ server_name=args.server,
427
+ server_port=args.port,
setup.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # 创建必要的目录
3
+ mkdir -p hf_download
4
+ mkdir -p outputs
5
+
6
+ # 如果模型尚未下载,会在首次运行时自动下载
7
+ echo "环境准备完毕,运行 python app.py 启动应用"