naonauno commited on
Commit
e14967b
·
verified ·
1 Parent(s): a84d2fb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -10
app.py CHANGED
@@ -8,6 +8,7 @@ from pipeline import StableDiffusionControlLoraV3Pipeline
8
  from PIL import Image
9
  import os
10
  from huggingface_hub import login
 
11
 
12
  # Login using the token
13
  login(token=os.environ.get("HF_TOKEN"))
@@ -16,18 +17,11 @@ login(token=os.environ.get("HF_TOKEN"))
16
  base_model = "runwayml/stable-diffusion-v1-5"
17
  dtype = torch.float16 # A100 works better with float16
18
 
19
- try:
20
- # Check if CUDA is available
21
- device = "cuda" if torch.cuda.is_available() else "cpu"
22
- except:
23
- device = "cpu"
24
-
25
  # Load the custom UNet
26
  unet = UNet2DConditionModelEx.from_pretrained(
27
  base_model,
28
  subfolder="unet",
29
- torch_dtype=dtype,
30
- device_map="auto" # Let the model handle device placement
31
  )
32
 
33
  # Add conditioning with ow-gbi-control-lora
@@ -37,8 +31,7 @@ unet = unet.add_extra_conditions("ow-gbi-control-lora")
37
  pipe = StableDiffusionControlLoraV3Pipeline.from_pretrained(
38
  base_model,
39
  unet=unet,
40
- torch_dtype=dtype,
41
- device_map="auto" # Let the model handle device placement
42
  )
43
 
44
  # Use a faster scheduler
@@ -61,6 +54,7 @@ def get_canny_image(image, low_threshold=100, high_threshold=200):
61
  canny_image = np.stack([canny_image] * 3, axis=-1)
62
  return Image.fromarray(canny_image)
63
 
 
64
  def generate_image(input_image, prompt, negative_prompt, guidance_scale, steps, low_threshold, high_threshold):
65
  canny_image = get_canny_image(input_image, low_threshold, high_threshold)
66
 
 
8
  from PIL import Image
9
  import os
10
  from huggingface_hub import login
11
+ import spaces
12
 
13
  # Login using the token
14
  login(token=os.environ.get("HF_TOKEN"))
 
17
  base_model = "runwayml/stable-diffusion-v1-5"
18
  dtype = torch.float16 # A100 works better with float16
19
 
 
 
 
 
 
 
20
  # Load the custom UNet
21
  unet = UNet2DConditionModelEx.from_pretrained(
22
  base_model,
23
  subfolder="unet",
24
+ torch_dtype=dtype
 
25
  )
26
 
27
  # Add conditioning with ow-gbi-control-lora
 
31
  pipe = StableDiffusionControlLoraV3Pipeline.from_pretrained(
32
  base_model,
33
  unet=unet,
34
+ torch_dtype=dtype
 
35
  )
36
 
37
  # Use a faster scheduler
 
54
  canny_image = np.stack([canny_image] * 3, axis=-1)
55
  return Image.fromarray(canny_image)
56
 
57
+ @spaces.GPU(duration=120) # Set GPU allocation duration to 120 seconds
58
  def generate_image(input_image, prompt, negative_prompt, guidance_scale, steps, low_threshold, high_threshold):
59
  canny_image = get_canny_image(input_image, low_threshold, high_threshold)
60