Spaces:
Runtime error
Runtime error
class Arguments:
Browse files
app.py
CHANGED
|
@@ -16,10 +16,28 @@ from hyvideo.constants import NEGATIVE_PROMPT
|
|
| 16 |
|
| 17 |
from huggingface_hub import snapshot_download
|
| 18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
if torch.cuda.device_count() > 0:
|
| 20 |
snapshot_download(repo_id="tencent/HunyuanVideo", repo_type="model", local_dir="ckpts", force_download=True)
|
| 21 |
snapshot_download(repo_id="xtuner/llava-llama-3-8b-v1_1-transformers", repo_type="model", local_dir="ckpts/llava-llama-3-8b-v1_1-transformers", force_download=True)
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
snapshot_download(repo_id="openai/clip-vit-large-patch14", repo_type="model", local_dir="ckpts/text_encoder_2", force_download=True)
|
| 24 |
|
| 25 |
def initialize_model(model_path):
|
|
|
|
| 16 |
|
| 17 |
from huggingface_hub import snapshot_download
|
| 18 |
|
| 19 |
+
if torch.cuda.device_count() == 0:
|
| 20 |
+
class Arguments:
|
| 21 |
+
def __init__(self, input_dir, output_dir):
|
| 22 |
+
self.input_dir = input_dir
|
| 23 |
+
self.output_dir = output_dir
|
| 24 |
+
|
| 25 |
+
# Create the object
|
| 26 |
+
args = Arguments("ckpts/llava-llama-3-8b-v1_1-transformers", "ckpts/text_encoder")
|
| 27 |
+
preprocess_text_encoder_tokenizer(args)
|
| 28 |
+
|
| 29 |
if torch.cuda.device_count() > 0:
|
| 30 |
snapshot_download(repo_id="tencent/HunyuanVideo", repo_type="model", local_dir="ckpts", force_download=True)
|
| 31 |
snapshot_download(repo_id="xtuner/llava-llama-3-8b-v1_1-transformers", repo_type="model", local_dir="ckpts/llava-llama-3-8b-v1_1-transformers", force_download=True)
|
| 32 |
+
|
| 33 |
+
class Args:
|
| 34 |
+
def __init__(self, input_dir, output_dir):
|
| 35 |
+
self.input_dir = input_dir
|
| 36 |
+
self.output_dir = output_dir
|
| 37 |
+
|
| 38 |
+
# Create the object
|
| 39 |
+
args = Args("ckpts/llava-llama-3-8b-v1_1-transformers", "ckpts/text_encoder")
|
| 40 |
+
preprocess_text_encoder_tokenizer(args)
|
| 41 |
snapshot_download(repo_id="openai/clip-vit-large-patch14", repo_type="model", local_dir="ckpts/text_encoder_2", force_download=True)
|
| 42 |
|
| 43 |
def initialize_model(model_path):
|