Update app.py
Browse files
app.py
CHANGED
@@ -16,12 +16,12 @@ import numpy as np
|
|
16 |
import random
|
17 |
|
18 |
import torch
|
19 |
-
torch.backends.cuda.matmul.allow_tf32 =
|
20 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
21 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
22 |
-
torch.backends.cudnn.allow_tf32 =
|
23 |
torch.backends.cudnn.deterministic = False
|
24 |
-
torch.backends.cudnn.benchmark =
|
25 |
torch.backends.cuda.preferred_blas_library="cublas"
|
26 |
torch.backends.cuda.preferred_linalg_library="cusolver"
|
27 |
torch.set_float32_matmul_precision("highest")
|
@@ -150,7 +150,7 @@ def infer_60(
|
|
150 |
sd35_path = f"sd35ll_{timestamp}.png"
|
151 |
sd_image.save(sd35_path,optimize=False,compress_level=0)
|
152 |
pyx.upload_to_ftp(sd35_path)
|
153 |
-
upscaler_2.to(torch.device('cuda'))
|
154 |
with torch.no_grad():
|
155 |
upscale = upscaler_2(sd_image, tiling=True, tile_width=256, tile_height=256)
|
156 |
upscale2 = upscaler_2(upscale, tiling=True, tile_width=256, tile_height=256)
|
@@ -200,7 +200,7 @@ def infer_90(
|
|
200 |
sd35_path = f"sd35ll_{timestamp}.png"
|
201 |
sd_image.save(sd35_path,optimize=False,compress_level=0)
|
202 |
pyx.upload_to_ftp(sd35_path)
|
203 |
-
upscaler_2.to(torch.device('cuda'))
|
204 |
with torch.no_grad():
|
205 |
upscale = upscaler_2(sd_image, tiling=True, tile_width=256, tile_height=256)
|
206 |
upscale2 = upscaler_2(upscale, tiling=True, tile_width=256, tile_height=256)
|
@@ -250,7 +250,7 @@ def infer_110(
|
|
250 |
sd35_path = f"sd35ll_{timestamp}.png"
|
251 |
sd_image.save(sd35_path,optimize=False,compress_level=0)
|
252 |
pyx.upload_to_ftp(sd35_path)
|
253 |
-
upscaler_2.to(torch.device('cuda'))
|
254 |
with torch.no_grad():
|
255 |
upscale = upscaler_2(sd_image, tiling=True, tile_width=256, tile_height=256)
|
256 |
upscale2 = upscaler_2(upscale, tiling=True, tile_width=256, tile_height=256)
|
|
|
16 |
import random
|
17 |
|
18 |
import torch
|
19 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
20 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
21 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
22 |
+
torch.backends.cudnn.allow_tf32 = True
|
23 |
torch.backends.cudnn.deterministic = False
|
24 |
+
torch.backends.cudnn.benchmark = True
|
25 |
torch.backends.cuda.preferred_blas_library="cublas"
|
26 |
torch.backends.cuda.preferred_linalg_library="cusolver"
|
27 |
torch.set_float32_matmul_precision("highest")
|
|
|
150 |
sd35_path = f"sd35ll_{timestamp}.png"
|
151 |
sd_image.save(sd35_path,optimize=False,compress_level=0)
|
152 |
pyx.upload_to_ftp(sd35_path)
|
153 |
+
upscaler_2.to(torch.device('cuda',non_blocking=True))
|
154 |
with torch.no_grad():
|
155 |
upscale = upscaler_2(sd_image, tiling=True, tile_width=256, tile_height=256)
|
156 |
upscale2 = upscaler_2(upscale, tiling=True, tile_width=256, tile_height=256)
|
|
|
200 |
sd35_path = f"sd35ll_{timestamp}.png"
|
201 |
sd_image.save(sd35_path,optimize=False,compress_level=0)
|
202 |
pyx.upload_to_ftp(sd35_path)
|
203 |
+
upscaler_2.to(torch.device('cuda',non_blocking=True))
|
204 |
with torch.no_grad():
|
205 |
upscale = upscaler_2(sd_image, tiling=True, tile_width=256, tile_height=256)
|
206 |
upscale2 = upscaler_2(upscale, tiling=True, tile_width=256, tile_height=256)
|
|
|
250 |
sd35_path = f"sd35ll_{timestamp}.png"
|
251 |
sd_image.save(sd35_path,optimize=False,compress_level=0)
|
252 |
pyx.upload_to_ftp(sd35_path)
|
253 |
+
upscaler_2.to(torch.device('cuda',non_blocking=True))
|
254 |
with torch.no_grad():
|
255 |
upscale = upscaler_2(sd_image, tiling=True, tile_width=256, tile_height=256)
|
256 |
upscale2 = upscaler_2(upscale, tiling=True, tile_width=256, tile_height=256)
|