Optimize GPU device handling in generate function
Browse files- Simplify model device transfer method
- Remove redundant `.to("cuda")` assignment
- Ensure model is correctly moved to GPU when available
webui.py
CHANGED
@@ -59,7 +59,7 @@ def generate(text,
|
|
59 |
|
60 |
# if gpu available, move model to gpu
|
61 |
if torch.cuda.is_available():
|
62 |
-
model
|
63 |
|
64 |
with torch.no_grad():
|
65 |
wav = model.inference(
|
|
|
59 |
|
60 |
# if gpu available, move model to gpu
|
61 |
if torch.cuda.is_available():
|
62 |
+
model.to("cuda")
|
63 |
|
64 |
with torch.no_grad():
|
65 |
wav = model.inference(
|