harry900000 commited on
Commit
17c2360
·
1 Parent(s): 6d7fc1c

change checkpoints path

Browse files
Files changed (3) hide show
  1. app.py +22 -15
  2. download_checkpoints.py +12 -0
  3. requirements.txt +1 -6
app.py CHANGED
@@ -5,26 +5,33 @@ import gradio as gr
5
  import spaces
6
 
7
  PWD = os.path.dirname(__file__)
 
8
 
9
- import subprocess
10
 
11
  # copy cudnn files
12
- subprocess.run("cp /usr/local/lib/python3.10/site-packages/nvidia/cudnn/include/*.h /usr/local/cuda/include", env={}, shell=True)
13
- subprocess.run("cp /usr/local/lib/python3.10/site-packages/nvidia/cudnn/lib/*.so* /usr/local/cuda/lib64", env={}, shell=True)
14
 
15
  # setup env
16
- os.environ["CUDA_HOME"] = "/usr/local/cuda"
17
- os.environ["LD_LIBRARY_PATH"] = "$CUDA_HOME/lib:$CUDA_HOME/lib64:$LD_LIBRARY_PATH"
18
- os.environ["PATH"] = "$CUDA_HOME/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:$PATH"
 
 
19
 
20
  # install packages
21
- subprocess.run("pip install flash-attn --no-build-isolation", env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"}, shell=True)
22
- subprocess.run("pip install vllm==0.9.0", env={"VLLM_ATTENTION_BACKEND": "FLASHINFER"}, shell=True)
23
- subprocess.run(
24
- "pip install transformer-engine[pytorch] --no-build-isolation",
25
- env={"PATH": os.environ["PATH"], "LD_LIBRARY_PATH": os.environ["LD_LIBRARY_PATH"], "CUDA_HOME": os.environ["CUDA_HOME"]},
26
- shell=True,
27
  )
 
 
 
 
 
 
 
28
 
29
  from test_environment import main as check_environment
30
 
@@ -48,8 +55,8 @@ except Exception as e:
48
  # download checkpoints
49
  from download_checkpoints import main as download_checkpoints
50
 
51
- os.makedirs(os.path.join(PWD, "checkpoints"), exist_ok=True)
52
- download_checkpoints(hf_token="", output_dir=os.path.join(PWD, "checkpoints"), model="7b_av")
53
 
54
  os.environ["TOKENIZERS_PARALLELISM"] = "false" # Workaround to suppress MP warning
55
 
@@ -292,7 +299,7 @@ def generate_video(
292
  "hdmap": {"control_weight": 0.3, "input_control": hdmap_video_input},
293
  "lidar": {"control_weight": 0.7, "input_control": lidar_video_input},
294
  },
295
- checkpoint_dir=os.path.join(PWD, "checkpoints"),
296
  prompt=prompt,
297
  negative_prompt=negative_prompt,
298
  sigma_max=80,
 
5
  import spaces
6
 
7
  PWD = os.path.dirname(__file__)
8
+ CHECKPOINTS_PATH = "/data/checkpoints"
9
 
10
+ # import subprocess
11
 
12
  # copy cudnn files
13
+ # subprocess.run("cp /usr/local/lib/python3.10/site-packages/nvidia/cudnn/include/*.h /usr/local/cuda/include", env={}, shell=True)
14
+ # subprocess.run("cp /usr/local/lib/python3.10/site-packages/nvidia/cudnn/lib/*.so* /usr/local/cuda/lib64", env={}, shell=True)
15
 
16
  # setup env
17
+ # os.environ["CUDA_HOME"] = "/usr/local/cuda"
18
+ # os.environ["LD_LIBRARY_PATH"] = "$CUDA_HOME/lib:$CUDA_HOME/lib64:$LD_LIBRARY_PATH"
19
+ # os.environ["PATH"] = "$CUDA_HOME/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:$PATH"
20
+
21
+ os.system("apt-get update && apt-get install -qqy libmagickwand-dev")
22
 
23
  # install packages
24
+ os.system('export FLASH_ATTENTION_SKIP_CUDA_BUILD=TRUE && pip install "flash-attn<=2.7.4.post1" --no-build-isolation')
25
+ os.system(
26
+ "pip install https://download.pytorch.org/whl/cu128/flashinfer/flashinfer_python-0.2.5%2Bcu128torch2.7-cp38-abi3-linux_x86_64.whl"
 
 
 
27
  )
28
+ os.system('export VLLM_ATTENTION_BACKEND=FLASHINFER && pip install "vllm==0.9.0"')
29
+ os.system('pip install "decord==0.6.0"')
30
+
31
+ os.system("ln -sf $CONDA_PREFIX/lib/python3.10/site-packages/nvidia/*/include/* $CONDA_PREFIX/include/")
32
+ os.system("ln -sf $CONDA_PREFIX/lib/python3.10/site-packages/nvidia/*/include/* $CONDA_PREFIX/include/python3.10")
33
+
34
+ os.system('pip install "transformer-engine[pytorch]"')
35
 
36
  from test_environment import main as check_environment
37
 
 
55
  # download checkpoints
56
  from download_checkpoints import main as download_checkpoints
57
 
58
+ os.makedirs(CHECKPOINTS_PATH, exist_ok=True)
59
+ download_checkpoints(hf_token="", output_dir=CHECKPOINTS_PATH, model="7b_av")
60
 
61
  os.environ["TOKENIZERS_PARALLELISM"] = "false" # Workaround to suppress MP warning
62
 
 
299
  "hdmap": {"control_weight": 0.3, "input_control": hdmap_video_input},
300
  "lidar": {"control_weight": 0.7, "input_control": lidar_video_input},
301
  },
302
+ checkpoint_dir=CHECKPOINTS_PATH,
303
  prompt=prompt,
304
  negative_prompt=negative_prompt,
305
  sigma_max=80,
download_checkpoints.py CHANGED
@@ -115,6 +115,18 @@ def main(hf_token: str = os.environ.get("HF_TOKEN"), output_dir: str = "./checkp
115
 
116
  print(f"Found {len(checkpoint_vars)} checkpoints to download")
117
  print(checkpoint_vars)
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
  # Download each checkpoint
120
  for checkpoint in checkpoint_vars:
 
115
 
116
  print(f"Found {len(checkpoint_vars)} checkpoints to download")
117
  print(checkpoint_vars)
118
+ checkpoint_vars = [
119
+ # "nvidia/Cosmos-Guardrail1",
120
+ # "nvidia/Cosmos-Tokenize1-CV8x8x8-720p",
121
+ # "nvidia/Cosmos-Transfer1-7B-Sample-AV-Single2MultiView",
122
+ # "nvidia/Cosmos-Transfer1-7B-Sample-AV",
123
+ # "nvidia/Cosmos-UpsamplePrompt1-12B-Transfer",
124
+ # "depth-anything/Depth-Anything-V2-Small-hf",
125
+ # "IDEA-Research/grounding-dino-tiny",
126
+ # "meta-llama/Llama-Guard-3-8B",
127
+ # "facebook/sam2-hiera-large",
128
+ # "google-t5/t5-11b",
129
+ ]
130
 
131
  # Download each checkpoint
132
  for checkpoint in checkpoint_vars:
requirements.txt CHANGED
@@ -1,12 +1,7 @@
1
  # essentials
 
2
  ninja
3
 
4
- # torch
5
- # torch==2.7.0
6
- # torchvision==0.22.0
7
- # torch==2.5.1
8
- # torchvision==0.20.1
9
-
10
  # cosmos-transfer1
11
  git+https://github.com/nvidia-cosmos/cosmos-transfer1
12
 
 
1
  # essentials
2
+ wheel
3
  ninja
4
 
 
 
 
 
 
 
5
  # cosmos-transfer1
6
  git+https://github.com/nvidia-cosmos/cosmos-transfer1
7