Bils commited on
Commit
4588e7b
·
verified ·
1 Parent(s): b29db40

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +112 -199
app.py CHANGED
@@ -2,10 +2,9 @@ import os, sys, json, tempfile, subprocess, shutil, uuid, glob, traceback, datet
2
  from pathlib import Path
3
  from typing import Tuple, List
4
 
5
- # ================= Crash trap & verbose logs =================
6
  import faulthandler
7
  faulthandler.enable()
8
-
9
  os.environ.setdefault("GRADIO_ANALYTICS_ENABLED", "false")
10
  os.environ.setdefault("GRADIO_NUM_PORTS", "1")
11
  os.environ.setdefault("HF_HUB_VERBOSE", "1")
@@ -17,35 +16,40 @@ def _crash_trap(exctype, value, tb):
17
  print(f"\n===== FATAL ({ts}Z) =====================================")
18
  traceback.print_exception(exctype, value, tb)
19
  print("=========================================================\n", flush=True)
20
-
21
  sys.excepthook = _crash_trap
22
- # ============================================================
23
 
 
24
  import gradio as gr
25
- from spaces import GPU # <-- explicit import so startup checker can see it
26
- from huggingface_hub import snapshot_download
27
  from loguru import logger
28
- import torch, torchaudio
29
 
30
- # ========= Paths & Config =========
 
 
 
 
 
 
31
  ROOT = Path(__file__).parent.resolve()
32
  REPO_DIR = ROOT / "HunyuanVideo-Foley"
33
  WEIGHTS_DIR = ROOT / "weights"
34
  CACHE_DIR = ROOT / "cache"
35
  OUT_DIR = ROOT / "outputs"
36
  ASSETS = ROOT / "assets"
37
- ASSETS.mkdir(exist_ok=True)
 
38
 
39
  APP_TITLE = os.environ.get("APP_TITLE", "Foley Studio · ZeroGPU")
40
  APP_TAGLINE = os.environ.get("APP_TAGLINE", "Generate scene-true foley for short clips (ZeroGPU-ready).")
41
  PRIMARY_COLOR = os.environ.get("PRIMARY_COLOR", "#6B5BFF")
42
 
43
- # ZeroGPU-safe defaults (tweak in Space Secrets if needed)
44
  MAX_SECS = int(os.environ.get("MAX_SECS", "15"))
45
  TARGET_H = int(os.environ.get("TARGET_H", "480"))
46
  SR = int(os.environ.get("TARGET_SR", "48000"))
47
  ZEROGPU_DURATION = int(os.environ.get("ZEROGPU_DURATION", "110"))
48
 
 
49
  def sh(cmd: str):
50
  print(">>", cmd)
51
  subprocess.run(cmd, shell=True, check=True)
@@ -61,10 +65,6 @@ def ffprobe_duration(path: str) -> float:
61
  return 0.0
62
 
63
  def _clone_without_lfs():
64
- """
65
- Clone repo while skipping LFS smudge to avoid huge demo assets.
66
- Falls back to sparse checkout with only essential paths.
67
- """
68
  if REPO_DIR.exists():
69
  return
70
  try:
@@ -104,14 +104,11 @@ def _clone_without_lfs():
104
  sh(f"git -C {REPO_DIR} fetch --depth 1 origin master")
105
  sh(f"git -C {REPO_DIR} checkout master")
106
 
107
- def prepare_once():
108
- """Clone code (skip LFS), download weights, set env, prepare dirs."""
109
  _clone_without_lfs()
110
-
111
  if str(REPO_DIR) not in sys.path:
112
  sys.path.insert(0, str(REPO_DIR))
113
-
114
- WEIGHTS_DIR.mkdir(parents=True, exist_ok=True)
115
  snapshot_download(
116
  repo_id="tencent/HunyuanVideo-Foley",
117
  local_dir=str(WEIGHTS_DIR),
@@ -121,20 +118,45 @@ def prepare_once():
121
  )
122
  os.environ["HIFI_FOLEY_MODEL_PATH"] = str(WEIGHTS_DIR)
123
 
124
- CACHE_DIR.mkdir(exist_ok=True)
125
- OUT_DIR.mkdir(exist_ok=True)
126
-
127
- prepare_once()
128
 
129
- # Prefer safetensors & fast transfer
130
  os.environ["TRANSFORMERS_PREFER_SAFETENSORS"] = "1"
131
  os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
132
 
133
- def ensure_clap_safetensors():
134
- """
135
- Pre-cache ONLY safetensors for laion/larger_clap_general so
136
- Transformers never selects a stale/corrupt *.bin.
137
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
  snapshot_download(
139
  repo_id="laion/larger_clap_general",
140
  allow_patterns=[
@@ -146,13 +168,9 @@ def ensure_clap_safetensors():
146
  local_dir=None,
147
  local_dir_use_symlinks=False,
148
  )
149
-
150
- def _purge_clap_pt_bins():
151
- """Remove any cached .bin for laion/larger_clap_general."""
152
  cache_root = Path.home() / ".cache" / "huggingface" / "hub"
153
- for pat in [
154
- cache_root / "models--laion--larger_clap_general" / "snapshots" / "*" / "*.bin",
155
- ]:
156
  for f in glob.glob(str(pat)):
157
  try:
158
  Path(f).unlink()
@@ -160,39 +178,8 @@ def _purge_clap_pt_bins():
160
  except Exception:
161
  pass
162
 
163
- # ---- Dependency guards (early / clear errors) -------------------------------
164
- try:
165
- import audiotools # provided by PyPI package 'descript-audiotools'
166
- except Exception as e:
167
- raise RuntimeError(
168
- "Missing module 'audiotools'. Install via PyPI package "
169
- "'descript-audiotools' (add 'descript-audiotools>=0.7.2' to requirements.txt)."
170
- ) from e
171
-
172
- try:
173
- import omegaconf # noqa: F401
174
- import yaml # from pyyaml
175
- import easydict # noqa: F401
176
- except Exception as e:
177
- raise RuntimeError(
178
- "Missing config deps. Add to requirements.txt: "
179
- "'omegaconf>=2.3.0', 'pyyaml', 'easydict'."
180
- ) from e
181
-
182
- # Import Tencent internals after guards
183
- from hunyuanvideo_foley.utils.model_utils import load_model, denoise_process
184
- from hunyuanvideo_foley.utils.feature_utils import feature_process
185
- from hunyuanvideo_foley.utils.media_utils import merge_audio_video
186
-
187
- # ========= Native Model Setup =========
188
- MODEL_PATH = os.environ.get("HIFI_FOLEY_MODEL_PATH", str(WEIGHTS_DIR))
189
- CONFIG_PATH = str(REPO_DIR / "configs" / "hunyuanvideo-foley-xxl.yaml")
190
-
191
- _model_dict = None
192
- _cfg = None
193
- _device = None
194
-
195
- def _setup_device(device_str: str = "auto", gpu_id: int = 0) -> torch.device:
196
  if device_str == "auto":
197
  if torch.cuda.is_available():
198
  d = torch.device(f"cuda:{gpu_id}")
@@ -209,11 +196,16 @@ def _setup_device(device_str: str = "auto", gpu_id: int = 0) -> torch.device:
209
  return d
210
 
211
  def auto_load_models() -> str:
212
- """Load model natively (weights already downloaded to MODEL_PATH)."""
213
  global _model_dict, _cfg, _device
 
 
 
 
 
214
 
215
- if not os.path.exists(MODEL_PATH):
216
- os.makedirs(MODEL_PATH, exist_ok=True)
217
  if not os.path.exists(CONFIG_PATH):
218
  return f"❌ Config file not found: {CONFIG_PATH}"
219
 
@@ -222,40 +214,18 @@ def auto_load_models() -> str:
222
  logger.info(f"MODEL_PATH: {MODEL_PATH}")
223
  logger.info(f"CONFIG_PATH: {CONFIG_PATH}")
224
 
225
- # Ensure CLAP uses safetensors; nuke any .bin first
226
- ensure_clap_safetensors()
227
- _purge_clap_pt_bins()
228
-
229
- # Lock HF Hub to offline so Transformers can't fetch a fresh .bin again
230
  os.environ["HF_HUB_OFFLINE"] = "1"
231
  os.environ["TRANSFORMERS_OFFLINE"] = "1"
232
 
 
233
  _model_dict, _cfg = load_model(MODEL_PATH, CONFIG_PATH, _device)
234
  logger.info("✅ Model loaded")
235
  return "✅ Model loaded"
236
 
237
- # Init logger and load model once (with explicit crash surface)
238
- logger.remove()
239
- logger.add(lambda msg: print(msg, end=''), level="INFO")
240
-
241
- try:
242
- msg = auto_load_models()
243
- logger.info(msg)
244
- except Exception as e:
245
- print("\n[BOOT][ERROR] auto_load_models() failed:")
246
- traceback.print_exc()
247
- with gr.Blocks(title="Foley Studio · Boot Error") as demo:
248
- gr.Markdown("### ❌ Boot failure\n```\n" + "".join(traceback.format_exc()) + "\n```")
249
- demo.launch(server_name="0.0.0.0")
250
- raise
251
-
252
- # ========= Preprocessing =========
253
  def preprocess_video(in_path: str) -> Tuple[str, float]:
254
- """
255
- - Trim to <= MAX_SECS
256
- - Downscale to TARGET_H (keep AR), strip audio
257
- - Return processed mp4 path and final duration
258
- """
259
  dur = ffprobe_duration(in_path)
260
  if dur == 0:
261
  raise RuntimeError("Unable to read the video duration.")
@@ -265,68 +235,61 @@ def preprocess_video(in_path: str) -> Tuple[str, float]:
265
  processed = temp_dir / "proc.mp4"
266
  trim_args = ["-t", str(MAX_SECS)] if dur > MAX_SECS else []
267
 
268
- # Normalize & remove audio
269
  sh(" ".join([
270
- "ffmpeg", "-y", "-i", f"\"{in_path}\"",
271
- *trim_args,
272
- "-an",
273
- "-vcodec", "libx264", "-preset", "veryfast", "-crf", "23",
274
- "-movflags", "+faststart",
275
- f"\"{trimmed}\""
276
  ]))
277
-
278
- # Downscale to TARGET_H; ensure mod2 width
279
  vf = f"scale=-2:{TARGET_H}:flags=bicubic"
280
  sh(" ".join([
281
  "ffmpeg", "-y", "-i", f"\"{trimmed}\"",
282
- "-vf", f"\"{vf}\"",
283
- "-an",
284
  "-vcodec", "libx264", "-profile:v", "baseline", "-level", "3.1",
285
- "-pix_fmt", "yuv420p",
286
- "-preset", "veryfast", "-crf", "24",
287
- "-movflags", "+faststart",
288
- f"\"{processed}\""
289
  ]))
 
290
 
291
- final_dur = min(dur, float(MAX_SECS))
292
- return str(processed), final_dur
293
-
294
- # ========= ZeroGPU marker (so startup checker is happy) =========
295
- @GPU(duration=5)
296
- def _zgpu_marker(_: int = 0) -> int:
297
- """No-op; only to advertise that this Space has GPU-decorated functions."""
298
- return _
299
 
300
- # ========= Inference (ZeroGPU) =========
301
  @GPU(duration=ZEROGPU_DURATION)
302
- @torch.inference_mode()
303
  def run_model(video_path: str, prompt_text: str,
304
  guidance_scale: float = 4.5,
305
  num_inference_steps: int = 50,
306
  sample_nums: int = 1):
307
  """
308
- Native inference (no shell). Returns ([wav_paths], sample_rate).
309
  """
310
- if _model_dict is None or _cfg is None:
311
- raise RuntimeError("Model not loaded yet.")
 
 
 
 
 
 
 
312
 
313
  text_prompt = (prompt_text or "").strip()
314
 
315
- # Extract features
316
  visual_feats, text_feats, audio_len_s = feature_process(
317
  video_path, text_prompt, _model_dict, _cfg
318
  )
319
-
320
- # Generate audio (B x C x T)
321
  logger.info(f"Generating {sample_nums} sample(s)...")
322
  audio_batch, sr = denoise_process(
323
  visual_feats, text_feats, audio_len_s, _model_dict, _cfg,
324
- guidance_scale=guidance_scale,
325
- num_inference_steps=num_inference_steps,
326
  batch_size=sample_nums
327
  )
328
 
329
- # Save each sample as WAV
330
  out_dir = OUT_DIR / f"job_{uuid.uuid4().hex[:8]}"
331
  out_dir.mkdir(parents=True, exist_ok=True)
332
  wav_paths = []
@@ -334,23 +297,8 @@ def run_model(video_path: str, prompt_text: str,
334
  wav_p = out_dir / f"generated_audio_{i+1}.wav"
335
  torchaudio.save(str(wav_p), audio_batch[i], sr)
336
  wav_paths.append(str(wav_p))
337
-
338
  return wav_paths, sr
339
 
340
- # ========= Optional: Mux Foley back to video =========
341
- def mux_audio_with_video(video_path: str, audio_path: str) -> str:
342
- out_path = Path(tempfile.mkdtemp(prefix="mux_")) / "with_foley.mp4"
343
- sh(" ".join([
344
- "ffmpeg", "-y",
345
- "-i", f"\"{video_path}\"",
346
- "-i", f"\"{audio_path}\"",
347
- "-map", "0:v:0", "-map", "1:a:0",
348
- "-c:v", "copy", "-c:a", "aac", "-b:a", "192k",
349
- "-shortest",
350
- f"\"{out_path}\""
351
- ]))
352
- return str(out_path)
353
-
354
  # ========= UI Handlers =========
355
  def single_generate(video: str, prompt: str, want_mux: bool, project_name: str):
356
  history = []
@@ -361,18 +309,12 @@ def single_generate(video: str, prompt: str, want_mux: bool, project_name: str):
361
  pre_path, final_dur = preprocess_video(video)
362
 
363
  history.append(["Inference", "ZeroGPU native pipeline"])
364
- wav_list, sr = run_model(
365
- pre_path, prompt or "", guidance_scale=4.5, num_inference_steps=50, sample_nums=1
366
- )
367
  if not wav_list:
368
  raise RuntimeError("No audio produced.")
369
  wav = wav_list[0]
370
 
371
- muxed = None
372
- if want_mux:
373
- history.append(["Mux", "Merging foley with video"])
374
- muxed = mux_audio_with_video(pre_path, wav)
375
-
376
  history.append(["Done", f"OK · ~{final_dur:.1f}s"])
377
  return wav, muxed, f"✅ Completed (~{final_dur:.1f}s)", history
378
  except Exception as e:
@@ -431,35 +373,19 @@ THEME_CSS = f"""
431
  color: white;
432
  box-shadow: 0 10px 30px rgba(0,0,0,.35);
433
  }}
434
- #hero h1 {{
435
- margin: 0 0 6px 0;
436
- font-size: 20px;
437
- font-weight: 700;
438
- letter-spacing: .2px;
439
- }}
440
- #hero p {{
441
- margin: 0;
442
- opacity: .95;
443
- }}
444
  .gr-tabitem, .gr-block.gr-group, .gr-panel {{
445
  background: var(--panel);
446
  border-radius: 16px !important;
447
  box-shadow: 0 6px 18px rgba(0,0,0,.28);
448
  border: 1px solid rgba(255,255,255,.04);
449
  }}
450
- .gr-button {{
451
- border-radius: 12px !important;
452
- border: 1px solid rgba(255,255,255,.08) !important;
453
- }}
454
  .gradio-container .tabs .tab-nav button.selected {{
455
- background: rgba(255,255,255,.06);
456
- border-radius: 12px;
457
- border: 1px solid rgba(255,255,255,.08);
458
- }}
459
- .badge {{
460
- display:inline-block; padding:2px 8px; border-radius:999px;
461
- background: rgba(255,255,255,.12); color:#fff; font-size:12px
462
  }}
 
463
  """
464
 
465
  with gr.Blocks(css=THEME_CSS, title=APP_TITLE, analytics_enabled=False) as demo:
@@ -475,16 +401,10 @@ with gr.Blocks(css=THEME_CSS, title=APP_TITLE, analytics_enabled=False) as demo:
475
  with gr.Tabs():
476
  with gr.Tab("🎬 Single Clip"):
477
  with gr.Group():
478
- project_name = gr.Textbox(
479
- label="Project name (optional)",
480
- placeholder="Enter a short label for this clip"
481
- )
482
  with gr.Row():
483
  v_single = gr.Video(label=f"Video (≤ ~{MAX_SECS}s recommended)")
484
- p_single = gr.Textbox(
485
- label="Sound prompt (optional)",
486
- placeholder="e.g., soft footsteps on wood, light rain, indoor reverb"
487
- )
488
  with gr.Row():
489
  want_mux_single = gr.Checkbox(value=True, label="Mux foley into MP4 output")
490
  run_btn = gr.Button("Generate", variant="primary")
@@ -492,10 +412,7 @@ with gr.Blocks(css=THEME_CSS, title=APP_TITLE, analytics_enabled=False) as demo:
492
  out_audio = gr.Audio(label=f"Generated Foley ({SR//1000} kHz WAV)", type="filepath")
493
  out_mux = gr.Video(label="Video + Foley (MP4)", visible=True)
494
  status_md = gr.Markdown()
495
- history_table = gr.Dataframe(
496
- headers=["Step", "Note"], datatype=["str","str"],
497
- interactive=False, wrap=True, label="Activity"
498
- )
499
 
500
  run_btn.click(
501
  single_generate,
@@ -509,16 +426,9 @@ with gr.Blocks(css=THEME_CSS, title=APP_TITLE, analytics_enabled=False) as demo:
509
  want_mux_b = gr.Checkbox(value=True, label="Mux each output")
510
  go_b = gr.Button("Run batch-lite")
511
  batch_status = gr.Markdown()
512
- batch_log = gr.Dataframe(
513
- headers=["Step","Note"], datatype=["str","str"],
514
- interactive=False, wrap=True, label="Batch Log"
515
- )
516
 
517
- go_b.click(
518
- batch_lite_generate,
519
- inputs=[files, prompt_b, want_mux_b],
520
- outputs=[batch_status, batch_log]
521
- )
522
 
523
  with gr.Tab("ℹ️ Tips"):
524
  gr.Markdown(f"""
@@ -532,16 +442,19 @@ with gr.Blocks(css=THEME_CSS, title=APP_TITLE, analytics_enabled=False) as demo:
532
  - Enable **Mux** to get a ready MP4 with the generated foley track.
533
  """)
534
 
535
- # ---- Health endpoint & guarded launch ---------------------------------------
536
  try:
537
  from fastapi import FastAPI
538
- fastapi_app = demo.app # Gradio's FastAPI app
539
  @fastapi_app.get("/health")
540
  def _health():
541
- return {"ok": True, "model_loaded": _model_dict is not None, "device": str(_device)}
542
  except Exception:
543
  pass
544
 
 
 
 
545
  try:
546
  demo.queue(max_size=24).launch(server_name="0.0.0.0")
547
  except Exception:
 
2
  from pathlib import Path
3
  from typing import Tuple, List
4
 
5
+ # ========= Crash trap & env =========
6
  import faulthandler
7
  faulthandler.enable()
 
8
  os.environ.setdefault("GRADIO_ANALYTICS_ENABLED", "false")
9
  os.environ.setdefault("GRADIO_NUM_PORTS", "1")
10
  os.environ.setdefault("HF_HUB_VERBOSE", "1")
 
16
  print(f"\n===== FATAL ({ts}Z) =====================================")
17
  traceback.print_exception(exctype, value, tb)
18
  print("=========================================================\n", flush=True)
 
19
  sys.excepthook = _crash_trap
 
20
 
21
+ # ========= Minimal imports for startup =========
22
  import gradio as gr
23
+ from spaces import GPU # ensure checker can see decorator
 
24
  from loguru import logger
 
25
 
26
+ # ---- ZeroGPU marker FIRST (so startup detector finds it) ----
27
+ @GPU(duration=5)
28
+ def _zgpu_marker(_: int = 0) -> int:
29
+ """No-op; only to advertise a GPU-decorated function at import-time."""
30
+ return _
31
+
32
+ # ========= Paths & Configs =========
33
  ROOT = Path(__file__).parent.resolve()
34
  REPO_DIR = ROOT / "HunyuanVideo-Foley"
35
  WEIGHTS_DIR = ROOT / "weights"
36
  CACHE_DIR = ROOT / "cache"
37
  OUT_DIR = ROOT / "outputs"
38
  ASSETS = ROOT / "assets"
39
+ for p in (ASSETS, WEIGHTS_DIR, CACHE_DIR, OUT_DIR):
40
+ p.mkdir(parents=True, exist_ok=True)
41
 
42
  APP_TITLE = os.environ.get("APP_TITLE", "Foley Studio · ZeroGPU")
43
  APP_TAGLINE = os.environ.get("APP_TAGLINE", "Generate scene-true foley for short clips (ZeroGPU-ready).")
44
  PRIMARY_COLOR = os.environ.get("PRIMARY_COLOR", "#6B5BFF")
45
 
46
+ # ZeroGPU-friendly defaults
47
  MAX_SECS = int(os.environ.get("MAX_SECS", "15"))
48
  TARGET_H = int(os.environ.get("TARGET_H", "480"))
49
  SR = int(os.environ.get("TARGET_SR", "48000"))
50
  ZEROGPU_DURATION = int(os.environ.get("ZEROGPU_DURATION", "110"))
51
 
52
+ # ========= Light utils (safe at import) =========
53
  def sh(cmd: str):
54
  print(">>", cmd)
55
  subprocess.run(cmd, shell=True, check=True)
 
65
  return 0.0
66
 
67
  def _clone_without_lfs():
 
 
 
 
68
  if REPO_DIR.exists():
69
  return
70
  try:
 
104
  sh(f"git -C {REPO_DIR} fetch --depth 1 origin master")
105
  sh(f"git -C {REPO_DIR} checkout master")
106
 
107
+ def prepare_code_and_weights():
108
+ from huggingface_hub import snapshot_download
109
  _clone_without_lfs()
 
110
  if str(REPO_DIR) not in sys.path:
111
  sys.path.insert(0, str(REPO_DIR))
 
 
112
  snapshot_download(
113
  repo_id="tencent/HunyuanVideo-Foley",
114
  local_dir=str(WEIGHTS_DIR),
 
118
  )
119
  os.environ["HIFI_FOLEY_MODEL_PATH"] = str(WEIGHTS_DIR)
120
 
121
+ # Do lightweight prep (no model init) at import-time
122
+ prepare_code_and_weights()
 
 
123
 
124
+ # Prefer safetensors & fast transfer for later downloads
125
  os.environ["TRANSFORMERS_PREFER_SAFETENSORS"] = "1"
126
  os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
127
 
128
+ # ========= Heavy deps & model utilities (deferred import) =========
129
+ _model_dict = None
130
+ _cfg = None
131
+ _device = None
132
+
133
+ def _lazy_heavy_imports():
134
+ global torch, torchaudio
135
+ import torch, torchaudio # noqa
136
+ try:
137
+ import audiotools # provided by 'descript-audiotools'
138
+ except Exception as e:
139
+ raise RuntimeError(
140
+ "Missing 'audiotools'. Add 'descript-audiotools>=0.7.2' to requirements.txt."
141
+ ) from e
142
+ try:
143
+ import omegaconf # noqa
144
+ import yaml # noqa
145
+ import easydict # noqa
146
+ except Exception as e:
147
+ raise RuntimeError(
148
+ "Missing config deps. Add: omegaconf>=2.3.0, pyyaml, easydict."
149
+ ) from e
150
+
151
+ # Tencent internals
152
+ from hunyuanvideo_foley.utils.model_utils import load_model, denoise_process # noqa
153
+ from hunyuanvideo_foley.utils.feature_utils import feature_process # noqa
154
+ from hunyuanvideo_foley.utils.media_utils import merge_audio_video # noqa
155
+ return torch, torchaudio
156
+
157
+ def _ensure_clap_safetensors_only():
158
+ from huggingface_hub import snapshot_download
159
+ # Pre-cache only safetensors; block .bin selection
160
  snapshot_download(
161
  repo_id="laion/larger_clap_general",
162
  allow_patterns=[
 
168
  local_dir=None,
169
  local_dir_use_symlinks=False,
170
  )
171
+ # Purge any cached .bin for the model
 
 
172
  cache_root = Path.home() / ".cache" / "huggingface" / "hub"
173
+ for pat in [cache_root / "models--laion--larger_clap_general" / "snapshots" / "*" / "*.bin"]:
 
 
174
  for f in glob.glob(str(pat)):
175
  try:
176
  Path(f).unlink()
 
178
  except Exception:
179
  pass
180
 
181
+ def _setup_device(device_str: str = "auto", gpu_id: int = 0):
182
+ import torch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
  if device_str == "auto":
184
  if torch.cuda.is_available():
185
  d = torch.device(f"cuda:{gpu_id}")
 
196
  return d
197
 
198
  def auto_load_models() -> str:
199
+ """Load the full Tencent pipeline (lazy; call when needed)."""
200
  global _model_dict, _cfg, _device
201
+ if _model_dict is not None:
202
+ return "✅ Model already loaded"
203
+
204
+ # Imports & guards
205
+ torch, _ = _lazy_heavy_imports()
206
 
207
+ MODEL_PATH = os.environ.get("HIFI_FOLEY_MODEL_PATH", str(WEIGHTS_DIR))
208
+ CONFIG_PATH = str(REPO_DIR / "configs" / "hunyuanvideo-foley-xxl.yaml")
209
  if not os.path.exists(CONFIG_PATH):
210
  return f"❌ Config file not found: {CONFIG_PATH}"
211
 
 
214
  logger.info(f"MODEL_PATH: {MODEL_PATH}")
215
  logger.info(f"CONFIG_PATH: {CONFIG_PATH}")
216
 
217
+ # Force CLAP to safetensors path
218
+ _ensure_clap_safetensors_only()
 
 
 
219
  os.environ["HF_HUB_OFFLINE"] = "1"
220
  os.environ["TRANSFORMERS_OFFLINE"] = "1"
221
 
222
+ from hunyuanvideo_foley.utils.model_utils import load_model
223
  _model_dict, _cfg = load_model(MODEL_PATH, CONFIG_PATH, _device)
224
  logger.info("✅ Model loaded")
225
  return "✅ Model loaded"
226
 
227
+ # ========= Pre/Post-processing =========
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228
  def preprocess_video(in_path: str) -> Tuple[str, float]:
 
 
 
 
 
229
  dur = ffprobe_duration(in_path)
230
  if dur == 0:
231
  raise RuntimeError("Unable to read the video duration.")
 
235
  processed = temp_dir / "proc.mp4"
236
  trim_args = ["-t", str(MAX_SECS)] if dur > MAX_SECS else []
237
 
 
238
  sh(" ".join([
239
+ "ffmpeg", "-y", "-i", f"\"{in_path}\"", *trim_args,
240
+ "-an", "-vcodec", "libx264", "-preset", "veryfast", "-crf", "23",
241
+ "-movflags", "+faststart", f"\"{trimmed}\""
 
 
 
242
  ]))
 
 
243
  vf = f"scale=-2:{TARGET_H}:flags=bicubic"
244
  sh(" ".join([
245
  "ffmpeg", "-y", "-i", f"\"{trimmed}\"",
246
+ "-vf", f"\"{vf}\"", "-an",
 
247
  "-vcodec", "libx264", "-profile:v", "baseline", "-level", "3.1",
248
+ "-pix_fmt", "yuv420p", "-preset", "veryfast", "-crf", "24",
249
+ "-movflags", "+faststart", f"\"{processed}\""
 
 
250
  ]))
251
+ return str(processed), min(dur, float(MAX_SECS))
252
 
253
+ def mux_audio_with_video(video_path: str, audio_path: str) -> str:
254
+ out_path = Path(tempfile.mkdtemp(prefix="mux_")) / "with_foley.mp4"
255
+ sh(" ".join([
256
+ "ffmpeg", "-y", "-i", f"\"{video_path}\"", "-i", f"\"{audio_path}\"",
257
+ "-map", "0:v:0", "-map", "1:a:0", "-c:v", "copy", "-c:a", "aac", "-b:a", "192k",
258
+ "-shortest", f"\"{out_path}\""
259
+ ]))
260
+ return str(out_path)
261
 
262
+ # ========= Inference (GPU-decorated) =========
263
  @GPU(duration=ZEROGPU_DURATION)
 
264
  def run_model(video_path: str, prompt_text: str,
265
  guidance_scale: float = 4.5,
266
  num_inference_steps: int = 50,
267
  sample_nums: int = 1):
268
  """
269
+ ZeroGPU-safe native pipeline. Returns ([wav_paths], sample_rate).
270
  """
271
+ # Lazy load model the first time this runs
272
+ if _model_dict is None:
273
+ msg = auto_load_models()
274
+ logger.info(msg)
275
+
276
+ # heavy imports (after model load prepared)
277
+ import torchaudio
278
+ from hunyuanvideo_foley.utils.feature_utils import feature_process
279
+ from hunyuanvideo_foley.utils.model_utils import denoise_process
280
 
281
  text_prompt = (prompt_text or "").strip()
282
 
 
283
  visual_feats, text_feats, audio_len_s = feature_process(
284
  video_path, text_prompt, _model_dict, _cfg
285
  )
 
 
286
  logger.info(f"Generating {sample_nums} sample(s)...")
287
  audio_batch, sr = denoise_process(
288
  visual_feats, text_feats, audio_len_s, _model_dict, _cfg,
289
+ guidance_scale=guidance_scale, num_inference_steps=num_inference_steps,
 
290
  batch_size=sample_nums
291
  )
292
 
 
293
  out_dir = OUT_DIR / f"job_{uuid.uuid4().hex[:8]}"
294
  out_dir.mkdir(parents=True, exist_ok=True)
295
  wav_paths = []
 
297
  wav_p = out_dir / f"generated_audio_{i+1}.wav"
298
  torchaudio.save(str(wav_p), audio_batch[i], sr)
299
  wav_paths.append(str(wav_p))
 
300
  return wav_paths, sr
301
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302
  # ========= UI Handlers =========
303
  def single_generate(video: str, prompt: str, want_mux: bool, project_name: str):
304
  history = []
 
309
  pre_path, final_dur = preprocess_video(video)
310
 
311
  history.append(["Inference", "ZeroGPU native pipeline"])
312
+ wav_list, sr = run_model(pre_path, prompt or "", guidance_scale=4.5, num_inference_steps=50, sample_nums=1)
 
 
313
  if not wav_list:
314
  raise RuntimeError("No audio produced.")
315
  wav = wav_list[0]
316
 
317
+ muxed = mux_audio_with_video(pre_path, wav) if want_mux else None
 
 
 
 
318
  history.append(["Done", f"OK · ~{final_dur:.1f}s"])
319
  return wav, muxed, f"✅ Completed (~{final_dur:.1f}s)", history
320
  except Exception as e:
 
373
  color: white;
374
  box-shadow: 0 10px 30px rgba(0,0,0,.35);
375
  }}
376
+ #hero h1 {{ margin: 0 0 6px 0; font-size: 20px; font-weight: 700; letter-spacing: .2px; }}
377
+ #hero p {{ margin: 0; opacity: .95; }}
 
 
 
 
 
 
 
 
378
  .gr-tabitem, .gr-block.gr-group, .gr-panel {{
379
  background: var(--panel);
380
  border-radius: 16px !important;
381
  box-shadow: 0 6px 18px rgba(0,0,0,.28);
382
  border: 1px solid rgba(255,255,255,.04);
383
  }}
384
+ .gr-button {{ border-radius: 12px !important; border: 1px solid rgba(255,255,255,.08) !important; }}
 
 
 
385
  .gradio-container .tabs .tab-nav button.selected {{
386
+ background: rgba(255,255,255,.06); border-radius: 12px; border: 1px solid rgba(255,255,255,.08);
 
 
 
 
 
 
387
  }}
388
+ .badge {{ display:inline-block; padding:2px 8px; border-radius:999px; background: rgba(255,255,255,.12); color:#fff; font-size:12px }}
389
  """
390
 
391
  with gr.Blocks(css=THEME_CSS, title=APP_TITLE, analytics_enabled=False) as demo:
 
401
  with gr.Tabs():
402
  with gr.Tab("🎬 Single Clip"):
403
  with gr.Group():
404
+ project_name = gr.Textbox(label="Project name (optional)", placeholder="Enter a short label for this clip")
 
 
 
405
  with gr.Row():
406
  v_single = gr.Video(label=f"Video (≤ ~{MAX_SECS}s recommended)")
407
+ p_single = gr.Textbox(label="Sound prompt (optional)", placeholder="e.g., soft footsteps on wood, light rain, indoor reverb")
 
 
 
408
  with gr.Row():
409
  want_mux_single = gr.Checkbox(value=True, label="Mux foley into MP4 output")
410
  run_btn = gr.Button("Generate", variant="primary")
 
412
  out_audio = gr.Audio(label=f"Generated Foley ({SR//1000} kHz WAV)", type="filepath")
413
  out_mux = gr.Video(label="Video + Foley (MP4)", visible=True)
414
  status_md = gr.Markdown()
415
+ history_table = gr.Dataframe(headers=["Step", "Note"], datatype=["str","str"], interactive=False, wrap=True, label="Activity")
 
 
 
416
 
417
  run_btn.click(
418
  single_generate,
 
426
  want_mux_b = gr.Checkbox(value=True, label="Mux each output")
427
  go_b = gr.Button("Run batch-lite")
428
  batch_status = gr.Markdown()
429
+ batch_log = gr.Dataframe(headers=["Step","Note"], datatype=["str","str"], interactive=False, wrap=True, label="Batch Log")
 
 
 
430
 
431
+ go_b.click(batch_lite_generate, inputs=[files, prompt_b, want_mux_b], outputs=[batch_status, batch_log])
 
 
 
 
432
 
433
  with gr.Tab("ℹ️ Tips"):
434
  gr.Markdown(f"""
 
442
  - Enable **Mux** to get a ready MP4 with the generated foley track.
443
  """)
444
 
445
+ # Health endpoint
446
  try:
447
  from fastapi import FastAPI
448
+ fastapi_app = demo.app
449
  @fastapi_app.get("/health")
450
  def _health():
451
+ return {"ok": True, "model_loaded": _model_dict is not None, "device": str(_device) if _device else None}
452
  except Exception:
453
  pass
454
 
455
+ # Launch
456
+ logger.remove()
457
+ logger.add(lambda msg: print(msg, end=''), level="INFO")
458
  try:
459
  demo.queue(max_size=24).launch(server_name="0.0.0.0")
460
  except Exception: