James Zhou commited on
Commit
4ab7964
·
1 Parent(s): 9867d34

[update] app

Browse files
Files changed (3) hide show
  1. .gitignore +155 -0
  2. app.py +53 -9
  3. requirements.txt +1 -0
.gitignore ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ pip-wheel-metadata/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ *.py,cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ target/
76
+
77
+ # Jupyter Notebook
78
+ .ipynb_checkpoints
79
+
80
+ # IPython
81
+ profile_default/
82
+ ipython_config.py
83
+
84
+ # pyenv
85
+ .python-version
86
+
87
+ # pipenv
88
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
90
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
91
+ # install all needed dependencies.
92
+ #Pipfile.lock
93
+
94
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95
+ __pypackages__/
96
+
97
+ # Celery stuff
98
+ celerybeat-schedule
99
+ celerybeat.pid
100
+
101
+ # SageMath parsed files
102
+ *.sage.py
103
+
104
+ # Environments
105
+ .env
106
+ .venv
107
+ env/
108
+ venv/
109
+ ENV/
110
+ env.bak/
111
+ venv.bak/
112
+
113
+ # Spyder project settings
114
+ .spyderproject
115
+ .spyproject
116
+
117
+ # Rope project settings
118
+ .ropeproject
119
+
120
+ # mkdocs documentation
121
+ /site
122
+
123
+ # mypy
124
+ .mypy_cache/
125
+ .dmypy.json
126
+ dmypy.json
127
+
128
+ # Pyre type checker
129
+ .pyre/
130
+
131
+ # ==========================================
132
+ # Custom settings
133
+ # ==========================================
134
+
135
+ # For MacOS
136
+ .DS_Store
137
+
138
+ # For IDEs
139
+ .idea/
140
+ .vscode/
141
+ pyrightconfig.json
142
+ .cursorignore
143
+
144
+ # For global settings
145
+ __*/
146
+ **/my_*
147
+ tmp*.*
148
+ .my*
149
+ # Model checkpoints
150
+ *.pt
151
+ *.ckpt
152
+ *.pth
153
+ *.safetensors
154
+
155
+ CLAUDE.md
app.py CHANGED
@@ -7,6 +7,8 @@ from loguru import logger
7
  from typing import Optional, Tuple
8
  import random
9
  import numpy as np
 
 
10
 
11
  from hunyuanvideo_foley.utils.model_utils import load_model
12
  from hunyuanvideo_foley.utils.feature_utils import feature_process
@@ -22,6 +24,31 @@ device = None
22
  MODEL_PATH = os.environ.get("HIFI_FOLEY_MODEL_PATH", "./pretrained_models/")
23
  CONFIG_PATH = "configs/hunyuanvideo-foley-xxl.yaml"
24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  def setup_device(device_str: str = "auto", gpu_id: int = 0) -> torch.device:
26
  """Setup computing device"""
27
  if device_str == "auto":
@@ -48,27 +75,44 @@ def auto_load_models() -> str:
48
  global model_dict, cfg, device
49
 
50
  try:
 
51
  if not os.path.exists(MODEL_PATH):
52
- return f" Model file not found: {MODEL_PATH}"
 
 
 
 
 
53
  if not os.path.exists(CONFIG_PATH):
54
- return f" Config file not found: {CONFIG_PATH}"
 
 
 
 
 
 
 
 
 
 
 
55
 
56
  # Use GPU by default
57
  device = setup_device("auto", 0)
58
 
59
  # Load model
60
- logger.info("Auto-loading model...")
61
- logger.info(f"Model path: {MODEL_PATH}")
62
- logger.info(f"Config path: {CONFIG_PATH}")
63
 
64
  model_dict, cfg = load_model(MODEL_PATH, CONFIG_PATH, device)
65
 
66
- logger.info("✅ Model loaded successfully!")
67
- return "✅ Model loaded successfully!"
68
 
69
  except Exception as e:
70
- logger.error(f"Model loading failed: {str(e)}")
71
- return f"❌ Model loading failed: {str(e)}"
72
 
73
  def infer_single_video(
74
  video_file,
 
7
  from typing import Optional, Tuple
8
  import random
9
  import numpy as np
10
+ from huggingface_hub import snapshot_download
11
+ import shutil
12
 
13
  from hunyuanvideo_foley.utils.model_utils import load_model
14
  from hunyuanvideo_foley.utils.feature_utils import feature_process
 
24
  MODEL_PATH = os.environ.get("HIFI_FOLEY_MODEL_PATH", "./pretrained_models/")
25
  CONFIG_PATH = "configs/hunyuanvideo-foley-xxl.yaml"
26
 
27
+ def download_model_from_hf(repo_id: str = "tencent/HunyuanVideo-Foley", local_dir: str = "./pretrained_models") -> str:
28
+ """从HuggingFace自动下载模型到本地目录"""
29
+ try:
30
+ logger.info(f"开始从HuggingFace下载模型:{repo_id}")
31
+ logger.info(f"下载目标目录:{local_dir}")
32
+
33
+ # 确保本地目录存在
34
+ os.makedirs(local_dir, exist_ok=True)
35
+
36
+ # 下载整个仓库
37
+ snapshot_download(
38
+ repo_id=repo_id,
39
+ local_dir=local_dir,
40
+ resume_download=True, # 支持断点续传
41
+ local_files_only=False, # 允许从网络下载
42
+ )
43
+
44
+ logger.info(f"✅ 模型下载成功!保存在:{local_dir}")
45
+ return f"✅ 模型从 {repo_id} 下载成功!"
46
+
47
+ except Exception as e:
48
+ error_msg = f"❌ 模型下载失败:{str(e)}"
49
+ logger.error(error_msg)
50
+ return error_msg
51
+
52
  def setup_device(device_str: str = "auto", gpu_id: int = 0) -> torch.device:
53
  """Setup computing device"""
54
  if device_str == "auto":
 
75
  global model_dict, cfg, device
76
 
77
  try:
78
+ # 如果模型路径不存在,尝试从HuggingFace下载
79
  if not os.path.exists(MODEL_PATH):
80
+ logger.info(f"模型路径 {MODEL_PATH} 不存在,开始从HuggingFace下载...")
81
+ download_result = download_model_from_hf(local_dir=MODEL_PATH.rstrip('/'))
82
+ if "失败" in download_result:
83
+ return download_result
84
+
85
+ # 如果配置文件不存在,也尝试从HuggingFace下载
86
  if not os.path.exists(CONFIG_PATH):
87
+ logger.info(f"配置文件 {CONFIG_PATH} 不存在,尝试从HuggingFace下载...")
88
+ # 如果是从pretrained_models/配置路径,也尝试下载
89
+ if CONFIG_PATH.startswith("configs/"):
90
+ config_dir = os.path.dirname(CONFIG_PATH)
91
+ if not os.path.exists(config_dir):
92
+ download_result = download_model_from_hf(local_dir="./")
93
+ if "失败" in download_result:
94
+ return download_result
95
+
96
+ # 最后检查配置文件是否存在
97
+ if not os.path.exists(CONFIG_PATH):
98
+ return f"❌ 配置文件未找到: {CONFIG_PATH}"
99
 
100
  # Use GPU by default
101
  device = setup_device("auto", 0)
102
 
103
  # Load model
104
+ logger.info("正在加载模型...")
105
+ logger.info(f"模型路径: {MODEL_PATH}")
106
+ logger.info(f"配置路径: {CONFIG_PATH}")
107
 
108
  model_dict, cfg = load_model(MODEL_PATH, CONFIG_PATH, device)
109
 
110
+ logger.info("✅ 模型加载成功!")
111
+ return "✅ 模型加载成功!"
112
 
113
  except Exception as e:
114
+ logger.error(f"模型加载失败: {str(e)}")
115
+ return f"❌ 模型加载失败: {str(e)}"
116
 
117
  def infer_single_video(
118
  video_file,
requirements.txt CHANGED
@@ -13,6 +13,7 @@ accelerate
13
  # Transformers and NLP
14
  git+https://github.com/huggingface/[email protected]
15
  sentencepiece
 
16
 
17
  # Audio processing
18
  git+https://github.com/descriptinc/audiotools
 
13
  # Transformers and NLP
14
  git+https://github.com/huggingface/[email protected]
15
  sentencepiece
16
+ huggingface_hub
17
 
18
  # Audio processing
19
  git+https://github.com/descriptinc/audiotools