Update convert.py
Browse files- convert.py +7 -0
convert.py
CHANGED
|
@@ -14,11 +14,18 @@ from huggingface_hub import CommitInfo, Discussion, HfApi, hf_hub_download
|
|
| 14 |
from huggingface_hub.file_download import repo_folder_name
|
| 15 |
from diffusers import StableDiffusionXLPipeline
|
| 16 |
from transformers import CONFIG_MAPPING
|
|
|
|
| 17 |
|
| 18 |
|
| 19 |
COMMIT_MESSAGE = " This PR adds fp32 and fp16 weights in safetensors format to {}"
|
|
|
|
| 20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
|
|
|
| 22 |
def convert_single(model_id: str, filename: str, folder: str, progress: Any, token: str):
|
| 23 |
progress(0, desc="Downloading model")
|
| 24 |
local_file = os.path.join(model_id, filename)
|
|
|
|
| 14 |
from huggingface_hub.file_download import repo_folder_name
|
| 15 |
from diffusers import StableDiffusionXLPipeline
|
| 16 |
from transformers import CONFIG_MAPPING
|
| 17 |
+
import spaces
|
| 18 |
|
| 19 |
|
| 20 |
COMMIT_MESSAGE = " This PR adds fp32 and fp16 weights in safetensors format to {}"
|
| 21 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 22 |
|
| 23 |
+
if torch.cuda.is_available():
|
| 24 |
+
torch_dtype = torch.bfloat16
|
| 25 |
+
else:
|
| 26 |
+
torch_dtype = torch.float32
|
| 27 |
|
| 28 |
+
@spaces.GPU
|
| 29 |
def convert_single(model_id: str, filename: str, folder: str, progress: Any, token: str):
|
| 30 |
progress(0, desc="Downloading model")
|
| 31 |
local_file = os.path.join(model_id, filename)
|