Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -1,8 +1,10 @@
|
|
1 |
import spaces
|
2 |
import os
|
3 |
|
|
|
|
|
4 |
os.environ["SAFETENSORS_FAST_GPU"] = "1"
|
5 |
-
os.putenv(
|
6 |
|
7 |
import gradio as gr
|
8 |
import numpy as np
|
@@ -15,8 +17,8 @@ torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
|
15 |
torch.backends.cudnn.allow_tf32 = False
|
16 |
torch.backends.cudnn.deterministic = False
|
17 |
torch.backends.cudnn.benchmark = False
|
18 |
-
|
19 |
-
|
20 |
torch.set_float32_matmul_precision("highest")
|
21 |
|
22 |
from diffusers import StableDiffusion3Pipeline, SD3Transformer2DModel, AutoencoderKL
|
|
|
1 |
import spaces
|
2 |
import os
|
3 |
|
4 |
+
os.putenv('TORCH_LINALG_PREFER_CUSOLVER','1')
|
5 |
+
os.putenv('PYTORCH_CUDA_ALLOC_CONF','max_split_size_mb:128')
|
6 |
os.environ["SAFETENSORS_FAST_GPU"] = "1"
|
7 |
+
os.putenv('HF_HUB_ENABLE_HF_TRANSFER','1')
|
8 |
|
9 |
import gradio as gr
|
10 |
import numpy as np
|
|
|
17 |
torch.backends.cudnn.allow_tf32 = False
|
18 |
torch.backends.cudnn.deterministic = False
|
19 |
torch.backends.cudnn.benchmark = False
|
20 |
+
torch.backends.cuda.preferred_blas_library="cublas"
|
21 |
+
torch.backends.cuda.preferred_linalg_library="cusolver"
|
22 |
torch.set_float32_matmul_precision("highest")
|
23 |
|
24 |
from diffusers import StableDiffusion3Pipeline, SD3Transformer2DModel, AutoencoderKL
|