Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -29,7 +29,7 @@ sampler_list = [
|
|
29 |
torch.backends.cudnn.deterministic = True
|
30 |
torch.backends.cudnn.benchmark = False
|
31 |
|
32 |
-
device = torch.device("cuda
|
33 |
|
34 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
35 |
if randomize_seed:
|
@@ -72,7 +72,7 @@ def load_pipeline(model_name):
|
|
72 |
add_watermarker=False,
|
73 |
use_auth_token=HF_TOKEN
|
74 |
)
|
75 |
-
pipe.to(
|
76 |
return pipe
|
77 |
|
78 |
@spaces.GPU
|
@@ -118,6 +118,7 @@ def generate(
|
|
118 |
|
119 |
if torch.cuda.is_available():
|
120 |
pipe = load_pipeline(MODEL)
|
|
|
121 |
print("Loaded on Device!")
|
122 |
else:
|
123 |
pipe = None
|
|
|
29 |
torch.backends.cudnn.deterministic = True
|
30 |
torch.backends.cudnn.benchmark = False
|
31 |
|
32 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
33 |
|
34 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
35 |
if randomize_seed:
|
|
|
72 |
add_watermarker=False,
|
73 |
use_auth_token=HF_TOKEN
|
74 |
)
|
75 |
+
pipe.to(device)
|
76 |
return pipe
|
77 |
|
78 |
@spaces.GPU
|
|
|
118 |
|
119 |
if torch.cuda.is_available():
|
120 |
pipe = load_pipeline(MODEL)
|
121 |
+
pipe.to(device)
|
122 |
print("Loaded on Device!")
|
123 |
else:
|
124 |
pipe = None
|