Spaces:
Runtime error
Runtime error
no need to save GPU mem
Browse files- app-img2img.py +3 -1
- app-txt2img.py +2 -1
app-img2img.py
CHANGED
|
@@ -31,7 +31,9 @@ HEIGHT = 512
|
|
| 31 |
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
| 32 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 33 |
torch_device = device
|
| 34 |
-
|
|
|
|
|
|
|
| 35 |
|
| 36 |
print(f"TIMEOUT: {TIMEOUT}")
|
| 37 |
print(f"SAFETY_CHECKER: {SAFETY_CHECKER}")
|
|
|
|
| 31 |
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
| 32 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 33 |
torch_device = device
|
| 34 |
+
|
| 35 |
+
# change to torch.float16 to save GPU memory
|
| 36 |
+
torch_dtype = torch.float32
|
| 37 |
|
| 38 |
print(f"TIMEOUT: {TIMEOUT}")
|
| 39 |
print(f"SAFETY_CHECKER: {SAFETY_CHECKER}")
|
app-txt2img.py
CHANGED
|
@@ -31,7 +31,8 @@ HEIGHT = 512
|
|
| 31 |
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
| 32 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 33 |
torch_device = device
|
| 34 |
-
|
|
|
|
| 35 |
|
| 36 |
print(f"TIMEOUT: {TIMEOUT}")
|
| 37 |
print(f"SAFETY_CHECKER: {SAFETY_CHECKER}")
|
|
|
|
| 31 |
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
| 32 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 33 |
torch_device = device
|
| 34 |
+
# change to torch.float16 to save GPU memory
|
| 35 |
+
torch_dtype = torch.float32
|
| 36 |
|
| 37 |
print(f"TIMEOUT: {TIMEOUT}")
|
| 38 |
print(f"SAFETY_CHECKER: {SAFETY_CHECKER}")
|