rahul7star commited on
Commit
47783c0
Β·
verified Β·
1 Parent(s): e35f16e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -55
app.py CHANGED
@@ -1,63 +1,101 @@
1
  import os
2
- from huggingface_hub import hf_hub_download
3
-
4
- REPO_ID = "tencent/HunyuanVideo-Avatar"
5
- BASE_PATH = "ckpts"
6
- LOCAL_BASE = os.path.join(os.getcwd(), "weights", "ckpts")
7
-
8
- # List of essential files/folders to download (you can expand this if needed)
9
- ESSENTIAL_PATHS = [
10
- # Transformers checkpoints
11
-
12
- "hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt",
13
-
14
-
15
- # VAE
16
- "hunyuan-video-t2v-720p/vae/config.json",
17
- "hunyuan-video-t2v-720p/vae/pytorch_model.pt",
18
-
19
- # llava_llama_image shard files (adjust count if needed)
20
- "llava_llama_image/model-00001-of-00004.safetensors",
21
- "llava_llama_image/model-00002-of-00004.safetensors",
22
- "llava_llama_image/model-00003-of-00004.safetensors",
23
- "llava_llama_image/model-00004-of-00004.safetensors",
24
- "llava_llama_image/config.json",
25
-
26
- # text_encoder_2
27
- "text_encoder_2/config.json",
28
- "text_encoder_2/pytorch_model.bin",
29
-
30
- # whisper-tiny
31
- "whisper-tiny/config.json",
32
- "whisper-tiny/pytorch_model.bin",
33
- "whisper-tiny/tokenizer.json",
34
- "whisper-tiny/tokenizer_config.json",
35
- "whisper-tiny/vocab.json",
36
-
37
- # det_align
38
- "det_align/config.json",
39
- "det_align/pytorch_model.bin",
40
- ]
41
-
42
- def download_files():
43
- for relative_path in ESSENTIAL_PATHS:
44
- source_path = f"{BASE_PATH}/{relative_path}"
45
- local_dir = os.path.join(LOCAL_BASE, os.path.dirname(relative_path))
46
- os.makedirs(local_dir, exist_ok=True)
47
-
48
- print(f"⬇️ Downloading {source_path} ...")
49
  try:
50
  hf_hub_download(
51
- repo_id=REPO_ID,
52
- filename=source_path,
53
- repo_type="model",
54
- local_dir=local_dir,
55
  local_dir_use_symlinks=False
56
  )
57
  except Exception as e:
58
- print(f"❌ Failed to download {source_path}: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
  if __name__ == "__main__":
61
- download_files()
62
- print("\nβœ… All selected model weights downloaded to:")
63
- print(f"{os.path.abspath(LOCAL_BASE)}")
 
1
  import os
2
+ import sys
3
+ import subprocess
4
+ import gradio as gr
5
+ from huggingface_hub import list_repo_files, hf_hub_download
6
+
7
+ MODEL_REPO = "tencent/HunyuanVideo-Avatar"
8
+ BASE_DIR = os.getcwd()
9
+ WEIGHTS_DIR = os.path.join(BASE_DIR, "weights")
10
+ OUTPUT_BASEPATH = os.path.join(BASE_DIR, "results-poor")
11
+
12
+ # Specific file to include from transformers/
13
+ ALLOWED_TRANSFORMER_FILE = "ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states_fp8.pt"
14
+
15
+ def list_ckpt_files():
16
+ all_files = list_repo_files(repo_id=MODEL_REPO, repo_type="model", revision="main")
17
+
18
+ filtered_files = []
19
+ for f in all_files:
20
+ # Only allow this one file from transformers/
21
+ if f.startswith("ckpts/hunyuan-video-t2v-720p/transformers/"):
22
+ if f == ALLOWED_TRANSFORMER_FILE:
23
+ filtered_files.append(f)
24
+ else:
25
+ filtered_files.append(f)
26
+ return filtered_files
27
+
28
+ def download_ckpts():
29
+ os.makedirs(WEIGHTS_DIR, exist_ok=True)
30
+ logs = []
31
+
32
+ for filepath in list_ckpt_files():
33
+ relative_path = os.path.relpath(filepath, "ckpts")
34
+ target_path = os.path.join(WEIGHTS_DIR, "ckpts", relative_path)
35
+
36
+ if os.path.exists(target_path):
37
+ logs.append(f"βœ… Exists: {target_path}")
38
+ continue
39
+
40
+ os.makedirs(os.path.dirname(target_path), exist_ok=True)
41
+ logs.append(f"⬇️ Downloading: {filepath}")
 
 
 
 
 
 
 
42
  try:
43
  hf_hub_download(
44
+ repo_id=MODEL_REPO,
45
+ filename=filepath,
46
+ local_dir=os.path.dirname(target_path),
 
47
  local_dir_use_symlinks=False
48
  )
49
  except Exception as e:
50
+ logs.append(f"❌ Failed to download {filepath}: {e}")
51
+ return "\n".join(logs)
52
+
53
+ def run_sample_gpu_poor():
54
+ checkpoint_fp8 = os.path.join(
55
+ WEIGHTS_DIR, "ckpts", "hunyuan-video-t2v-720p", "transformers", "mp_rank_00_model_states_fp8.pt"
56
+ )
57
+ if not os.path.isfile(checkpoint_fp8):
58
+ return f"❌ Missing checkpoint: {checkpoint_fp8}"
59
+
60
+ cmd = [
61
+ "python3", "hymm_sp/sample_gpu_poor.py",
62
+ "--input", "assets/test.csv",
63
+ "--ckpt", checkpoint_fp8,
64
+ "--sample-n-frames", "129",
65
+ "--seed", "128",
66
+ "--image-size", "704",
67
+ "--cfg-scale", "7.5",
68
+ "--infer-steps", "50",
69
+ "--use-deepcache", "1",
70
+ "--flow-shift-eval-video", "5.0",
71
+ "--save-path", OUTPUT_BASEPATH,
72
+ "--use-fp8",
73
+ "--cpu-offload",
74
+ "--infer-min"
75
+ ]
76
+
77
+ env = os.environ.copy()
78
+ env["PYTHONPATH"] = "./"
79
+ env["MODEL_BASE"] = WEIGHTS_DIR
80
+ env["CPU_OFFLOAD"] = "1"
81
+ env["CUDA_VISIBLE_DEVICES"] = "0"
82
+
83
+ result = subprocess.run(cmd, env=env, capture_output=True, text=True)
84
+ if result.returncode != 0:
85
+ return f"❌ sample_gpu_poor.py failed:\n{result.stderr}"
86
+ return f"βœ… sample_gpu_poor.py finished:\n{result.stdout}"
87
+
88
+ def download_and_run():
89
+ log1 = download_ckpts()
90
+ log2 = run_sample_gpu_poor()
91
+ return f"{log1}\n\n---\n\n{log2}"
92
+
93
+ # Gradio UI
94
+ with gr.Blocks() as demo:
95
+ gr.Markdown("## πŸ“¦ Download All Checkpoints (Except Only One File from Transformers)")
96
+ output = gr.Textbox(lines=30, label="Logs")
97
+ button = gr.Button("πŸš€ Download + Run")
98
+ button.click(fn=download_and_run, outputs=output)
99
 
100
  if __name__ == "__main__":
101
+ demo.launch(share=False)