Spaces:
Running
on
Zero
Running
on
Zero
add use_auth_token
Browse files
app.py
CHANGED
@@ -54,9 +54,8 @@ def load_model():
|
|
54 |
tokenizer = AutoTokenizer.from_pretrained(
|
55 |
MODEL_NAME,
|
56 |
trust_remote_code=True,
|
57 |
-
token=hf_token
|
58 |
-
|
59 |
-
)
|
60 |
|
61 |
print("Loading model...")
|
62 |
# Load model with token
|
@@ -65,9 +64,8 @@ def load_model():
|
|
65 |
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
66 |
device_map="auto" if torch.cuda.is_available() else None,
|
67 |
trust_remote_code=True,
|
68 |
-
token=hf_token
|
69 |
-
|
70 |
-
)
|
71 |
|
72 |
print("β
Osmosis Structure model loaded successfully!")
|
73 |
return True
|
|
|
54 |
tokenizer = AutoTokenizer.from_pretrained(
|
55 |
MODEL_NAME,
|
56 |
trust_remote_code=True,
|
57 |
+
token=hf_token
|
58 |
+
)
|
|
|
59 |
|
60 |
print("Loading model...")
|
61 |
# Load model with token
|
|
|
64 |
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
65 |
device_map="auto" if torch.cuda.is_available() else None,
|
66 |
trust_remote_code=True,
|
67 |
+
token=hf_token
|
68 |
+
)
|
|
|
69 |
|
70 |
print("β
Osmosis Structure model loaded successfully!")
|
71 |
return True
|