Update app.py
Browse files
app.py
CHANGED
@@ -8,10 +8,6 @@ from pipeline import StableDiffusionControlLoraV3Pipeline
|
|
8 |
from PIL import Image
|
9 |
import os
|
10 |
from huggingface_hub import login
|
11 |
-
import huggingface_hub
|
12 |
-
|
13 |
-
# Initialize ZeroGPU environment
|
14 |
-
huggingface_hub.initialize_zerogpu()
|
15 |
|
16 |
# Login using the token
|
17 |
login(token=os.environ.get("HF_TOKEN"))
|
@@ -20,11 +16,18 @@ login(token=os.environ.get("HF_TOKEN"))
|
|
20 |
base_model = "runwayml/stable-diffusion-v1-5"
|
21 |
dtype = torch.float16 # A100 works better with float16
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
# Load the custom UNet
|
24 |
unet = UNet2DConditionModelEx.from_pretrained(
|
25 |
base_model,
|
26 |
subfolder="unet",
|
27 |
-
torch_dtype=dtype
|
|
|
28 |
)
|
29 |
|
30 |
# Add conditioning with ow-gbi-control-lora
|
@@ -34,7 +37,8 @@ unet = unet.add_extra_conditions("ow-gbi-control-lora")
|
|
34 |
pipe = StableDiffusionControlLoraV3Pipeline.from_pretrained(
|
35 |
base_model,
|
36 |
unet=unet,
|
37 |
-
torch_dtype=dtype
|
|
|
38 |
)
|
39 |
|
40 |
# Use a faster scheduler
|
|
|
8 |
from PIL import Image
|
9 |
import os
|
10 |
from huggingface_hub import login
|
|
|
|
|
|
|
|
|
11 |
|
12 |
# Login using the token
|
13 |
login(token=os.environ.get("HF_TOKEN"))
|
|
|
16 |
base_model = "runwayml/stable-diffusion-v1-5"
|
17 |
dtype = torch.float16 # A100 works better with float16
|
18 |
|
19 |
+
try:
|
20 |
+
# Check if CUDA is available
|
21 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
22 |
+
except:
|
23 |
+
device = "cpu"
|
24 |
+
|
25 |
# Load the custom UNet
|
26 |
unet = UNet2DConditionModelEx.from_pretrained(
|
27 |
base_model,
|
28 |
subfolder="unet",
|
29 |
+
torch_dtype=dtype,
|
30 |
+
device_map="auto" # Let the model handle device placement
|
31 |
)
|
32 |
|
33 |
# Add conditioning with ow-gbi-control-lora
|
|
|
37 |
pipe = StableDiffusionControlLoraV3Pipeline.from_pretrained(
|
38 |
base_model,
|
39 |
unet=unet,
|
40 |
+
torch_dtype=dtype,
|
41 |
+
device_map="auto" # Let the model handle device placement
|
42 |
)
|
43 |
|
44 |
# Use a faster scheduler
|