Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -2,63 +2,57 @@ import os
|
|
2 |
import sys
|
3 |
import subprocess
|
4 |
import time
|
5 |
-
import
|
6 |
-
import psutil
|
7 |
from huggingface_hub import snapshot_download
|
8 |
|
|
|
|
|
|
|
9 |
MODEL_REPO = "tencent/HunyuanVideo-Avatar"
|
10 |
-
|
11 |
-
|
12 |
-
OUTPUT_BASEPATH = os.path.join(BASE_DIR, "results-poor")
|
13 |
-
CUSTOM_CACHE_DIR = "/data/hf_cache" # β
change if different
|
14 |
-
HF_HOME_ENV = {"HF_HOME": CUSTOM_CACHE_DIR}
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
CHECKPOINT_FP8_FILE = os.path.join(WEIGHTS_DIR, "ckpts", "hunyuan-video-t2v-720p", "transformers", "mp_rank_00_model_states_fp8.pt")
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
print(f"πΎ Disk space: {free_gb}GB free")
|
24 |
-
if free_gb < min_free_gb:
|
25 |
-
print(f"β Not enough disk space. {free_gb}GB available, {min_free_gb}GB required.")
|
26 |
-
sys.exit(1)
|
27 |
-
|
28 |
-
def clear_hf_cache():
|
29 |
-
hf_cache = os.path.expanduser("~/.cache/huggingface")
|
30 |
-
if os.path.exists(hf_cache):
|
31 |
-
print("π§Ή Cleaning Hugging Face cache...")
|
32 |
-
shutil.rmtree(hf_cache)
|
33 |
|
|
|
|
|
|
|
34 |
def download_model():
|
35 |
-
|
36 |
-
|
|
|
37 |
|
|
|
38 |
snapshot_download(
|
39 |
repo_id=MODEL_REPO,
|
40 |
-
local_dir=
|
41 |
-
local_dir_use_symlinks=False
|
42 |
-
**HF_HOME_ENV
|
43 |
)
|
44 |
|
45 |
-
if not
|
46 |
-
print(f"β
|
47 |
sys.exit(1)
|
48 |
-
|
49 |
-
|
|
|
50 |
sys.exit(1)
|
51 |
|
52 |
-
print("β
Model
|
53 |
-
clear_hf_cache()
|
54 |
|
|
|
|
|
|
|
55 |
def run_sample_gpu_poor():
|
56 |
print("π¬ Running sample_gpu_poor.py...")
|
57 |
-
|
58 |
cmd = [
|
59 |
"python3", "hymm_sp/sample_gpu_poor.py",
|
60 |
-
"--input",
|
61 |
-
"--ckpt", CHECKPOINT_FP8_FILE,
|
62 |
"--sample-n-frames", "129",
|
63 |
"--seed", "128",
|
64 |
"--image-size", "704",
|
@@ -66,74 +60,38 @@ def run_sample_gpu_poor():
|
|
66 |
"--infer-steps", "50",
|
67 |
"--use-deepcache", "1",
|
68 |
"--flow-shift-eval-video", "5.0",
|
69 |
-
"--save-path",
|
70 |
"--use-fp8",
|
71 |
"--cpu-offload",
|
72 |
"--infer-min"
|
73 |
]
|
74 |
|
75 |
env = os.environ.copy()
|
76 |
-
env.update(HF_HOME_ENV)
|
77 |
env["PYTHONPATH"] = "./"
|
78 |
-
env["MODEL_BASE"] =
|
79 |
env["CPU_OFFLOAD"] = "1"
|
80 |
env["CUDA_VISIBLE_DEVICES"] = "0"
|
81 |
|
82 |
-
|
83 |
-
if
|
84 |
print("β sample_gpu_poor.py failed.")
|
85 |
sys.exit(1)
|
|
|
86 |
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
print("π Starting flask_audio.py...")
|
91 |
-
cmd = [
|
92 |
-
"torchrun",
|
93 |
-
"--nnodes=1",
|
94 |
-
"--nproc_per_node=8",
|
95 |
-
"--master_port=29605",
|
96 |
-
"hymm_gradio/flask_audio.py",
|
97 |
-
"--input", "assets/test.csv",
|
98 |
-
"--ckpt", CHECKPOINT_FILE,
|
99 |
-
"--sample-n-frames", "129",
|
100 |
-
"--seed", "128",
|
101 |
-
"--image-size", "704",
|
102 |
-
"--cfg-scale", "7.5",
|
103 |
-
"--infer-steps", "50",
|
104 |
-
"--use-deepcache", "1",
|
105 |
-
"--flow-shift-eval-video", "5.0"
|
106 |
-
]
|
107 |
-
subprocess.Popen(cmd)
|
108 |
-
|
109 |
def run_gradio_ui():
|
110 |
-
print("π’ Launching Gradio
|
111 |
cmd = ["python3", "hymm_gradio/gradio_audio.py"]
|
112 |
subprocess.Popen(cmd)
|
113 |
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
with open(gitignore_path, "a+") as f:
|
118 |
-
f.seek(0)
|
119 |
-
existing = f.read().splitlines()
|
120 |
-
for line in lines_to_add:
|
121 |
-
if line not in existing:
|
122 |
-
f.write(f"{line}\n")
|
123 |
-
|
124 |
def main():
|
125 |
-
|
126 |
-
check_disk_space(min_free_gb=10)
|
127 |
-
|
128 |
-
if os.path.isfile(CHECKPOINT_FILE) and os.path.isfile(CHECKPOINT_FP8_FILE):
|
129 |
-
print("β
Checkpoints exist. Skipping download.")
|
130 |
-
else:
|
131 |
-
download_model()
|
132 |
-
|
133 |
run_sample_gpu_poor()
|
134 |
-
|
135 |
-
# Optional: Launch UIs
|
136 |
-
run_flask_audio()
|
137 |
time.sleep(5)
|
138 |
run_gradio_ui()
|
139 |
|
|
|
2 |
import sys
|
3 |
import subprocess
|
4 |
import time
|
5 |
+
from pathlib import Path
|
|
|
6 |
from huggingface_hub import snapshot_download
|
7 |
|
8 |
+
# --------------------
|
9 |
+
# CONFIGURATION
|
10 |
+
# --------------------
|
11 |
MODEL_REPO = "tencent/HunyuanVideo-Avatar"
|
12 |
+
HF_CACHE_DIR = Path("/home/user/.cache/huggingface/hf_cache/hunyuan_avatar")
|
13 |
+
HF_CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
|
|
14 |
|
15 |
+
CHECKPOINT_FILE = HF_CACHE_DIR / "ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt"
|
16 |
+
CHECKPOINT_FP8_FILE = HF_CACHE_DIR / "ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states_fp8.pt"
|
|
|
17 |
|
18 |
+
ASSETS_CSV = "assets/test.csv"
|
19 |
+
OUTPUT_DIR = Path("results-poor")
|
20 |
+
OUTPUT_DIR.mkdir(exist_ok=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
+
# --------------------
|
23 |
+
# Download the model (if needed)
|
24 |
+
# --------------------
|
25 |
def download_model():
|
26 |
+
if CHECKPOINT_FILE.exists() and CHECKPOINT_FP8_FILE.exists():
|
27 |
+
print("β
Model checkpoint already exists. Skipping download.")
|
28 |
+
return
|
29 |
|
30 |
+
print("β¬οΈ Downloading model into HF Space cache...")
|
31 |
snapshot_download(
|
32 |
repo_id=MODEL_REPO,
|
33 |
+
local_dir=HF_CACHE_DIR,
|
34 |
+
local_dir_use_symlinks=False
|
|
|
35 |
)
|
36 |
|
37 |
+
if not CHECKPOINT_FILE.exists():
|
38 |
+
print(f"β Missing checkpoint: {CHECKPOINT_FILE}")
|
39 |
sys.exit(1)
|
40 |
+
|
41 |
+
if not CHECKPOINT_FP8_FILE.exists():
|
42 |
+
print(f"β Missing FP8 checkpoint: {CHECKPOINT_FP8_FILE}")
|
43 |
sys.exit(1)
|
44 |
|
45 |
+
print("β
Model download complete.")
|
|
|
46 |
|
47 |
+
# --------------------
|
48 |
+
# Run sample_gpu_poor.py
|
49 |
+
# --------------------
|
50 |
def run_sample_gpu_poor():
|
51 |
print("π¬ Running sample_gpu_poor.py...")
|
|
|
52 |
cmd = [
|
53 |
"python3", "hymm_sp/sample_gpu_poor.py",
|
54 |
+
"--input", ASSETS_CSV,
|
55 |
+
"--ckpt", str(CHECKPOINT_FP8_FILE),
|
56 |
"--sample-n-frames", "129",
|
57 |
"--seed", "128",
|
58 |
"--image-size", "704",
|
|
|
60 |
"--infer-steps", "50",
|
61 |
"--use-deepcache", "1",
|
62 |
"--flow-shift-eval-video", "5.0",
|
63 |
+
"--save-path", str(OUTPUT_DIR),
|
64 |
"--use-fp8",
|
65 |
"--cpu-offload",
|
66 |
"--infer-min"
|
67 |
]
|
68 |
|
69 |
env = os.environ.copy()
|
|
|
70 |
env["PYTHONPATH"] = "./"
|
71 |
+
env["MODEL_BASE"] = str(HF_CACHE_DIR)
|
72 |
env["CPU_OFFLOAD"] = "1"
|
73 |
env["CUDA_VISIBLE_DEVICES"] = "0"
|
74 |
|
75 |
+
proc = subprocess.run(cmd, env=env)
|
76 |
+
if proc.returncode != 0:
|
77 |
print("β sample_gpu_poor.py failed.")
|
78 |
sys.exit(1)
|
79 |
+
print("β
sample_gpu_poor.py completed successfully.")
|
80 |
|
81 |
+
# --------------------
|
82 |
+
# Optional: Start UI
|
83 |
+
# --------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
def run_gradio_ui():
|
85 |
+
print("π’ Launching Gradio interface...")
|
86 |
cmd = ["python3", "hymm_gradio/gradio_audio.py"]
|
87 |
subprocess.Popen(cmd)
|
88 |
|
89 |
+
# --------------------
|
90 |
+
# Entry point
|
91 |
+
# --------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
def main():
|
93 |
+
download_model()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
run_sample_gpu_poor()
|
|
|
|
|
|
|
95 |
time.sleep(5)
|
96 |
run_gradio_ui()
|
97 |
|