Spaces:
Running
on
A10G
Running
on
A10G
MekkCyber
commited on
Commit
·
00dfc3d
1
Parent(s):
47d6fc0
small update
Browse files
app.py
CHANGED
|
@@ -107,7 +107,7 @@ It's quantized using the BitsAndBytes library to 4-bit using the [bnb-my-repo](h
|
|
| 107 |
|
| 108 |
# Append original README content if available
|
| 109 |
if original_readme and not original_readme.isspace():
|
| 110 |
-
model_card += "\n\n# Original Model Information\n" + original_readme
|
| 111 |
|
| 112 |
return model_card
|
| 113 |
|
|
@@ -147,7 +147,7 @@ def quantize_model(
|
|
| 147 |
quantization_config=quantization_config,
|
| 148 |
device_map="cpu",
|
| 149 |
use_auth_token=auth_token.token,
|
| 150 |
-
torch_dtype=
|
| 151 |
)
|
| 152 |
progress(0.33, desc="Quantizing")
|
| 153 |
|
|
|
|
| 107 |
|
| 108 |
# Append original README content if available
|
| 109 |
if original_readme and not original_readme.isspace():
|
| 110 |
+
model_card += "\n\n# 📄 Original Model Information\n\n" + original_readme
|
| 111 |
|
| 112 |
return model_card
|
| 113 |
|
|
|
|
| 147 |
quantization_config=quantization_config,
|
| 148 |
device_map="cpu",
|
| 149 |
use_auth_token=auth_token.token,
|
| 150 |
+
torch_dtype="auto",
|
| 151 |
)
|
| 152 |
progress(0.33, desc="Quantizing")
|
| 153 |
|