multimodalart HF Staff commited on
Commit
9de7855
·
verified ·
1 Parent(s): b22da46

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -9
app.py CHANGED
@@ -11,7 +11,7 @@ from PIL import Image
11
  from diffusers import QwenImageControlNetPipeline, QwenImageControlNetModel
12
 
13
  # --- Preprocessor Imports ---
14
- from controlnet_aux import OpenposeDetector #, AnylineDetector
15
  from depth_anything_v2.dpt import DepthAnythingV2
16
 
17
  # --- Prompt Enhancement Imports ---
@@ -99,7 +99,7 @@ depth_anything = depth_anything.to(device).eval()
99
  # Load Pose and Soft Edge Detectors
100
  print("Loading other detectors...")
101
  openpose_detector = OpenposeDetector.from_pretrained("lllyasviel/Annotators")
102
- #anyline_detector = AnylineDetector.from_pretrained("lllyasviel/Annotators", filename="anyline.pth").to(device)
103
 
104
  print("All models loaded.")
105
 
@@ -107,8 +107,8 @@ def get_control_image(input_image, control_mode):
107
  """A master function to select and run the correct preprocessor."""
108
  if control_mode == "Canny":
109
  return extract_canny(input_image)
110
- #elif control_mode == "Soft Edge":
111
- # return anyline_detector(input_image, to_pil=True)
112
  elif control_mode == "Depth":
113
  image_np = np.array(input_image)
114
  with torch.no_grad():
@@ -118,10 +118,6 @@ def get_control_image(input_image, control_mode):
118
  return Image.fromarray(depth).convert('RGB')
119
  elif control_mode == "Pose":
120
  return openpose_detector(input_image, hand_and_face=True)
121
- elif control_mode == "Recolor":
122
- return convert_to_grayscale(input_image)
123
- elif control_mode == "Tile":
124
- return tile_image(input_image, 16)
125
  else:
126
  raise ValueError(f"Unknown control mode: {control_mode}")
127
 
@@ -185,7 +181,7 @@ with gr.Blocks(css="footer {display: none !important;}") as demo:
185
  input_image = gr.Image(type="pil", label="Input Image", height=512)
186
  prompt = gr.Textbox(label="Prompt", placeholder="A detailed description of the desired image...")
187
  conditioning = gr.Radio(
188
- choices=["Canny", "Soft Edge", "Depth", "Pose", "Recolor", "Tile"],
189
  value="Pose",
190
  label="Conditioning Type"
191
  )
 
11
  from diffusers import QwenImageControlNetPipeline, QwenImageControlNetModel
12
 
13
  # --- Preprocessor Imports ---
14
+ from controlnet_aux import OpenposeDetector, AnylineDetector
15
  from depth_anything_v2.dpt import DepthAnythingV2
16
 
17
  # --- Prompt Enhancement Imports ---
 
99
  # Load Pose and Soft Edge Detectors
100
  print("Loading other detectors...")
101
  openpose_detector = OpenposeDetector.from_pretrained("lllyasviel/Annotators")
102
+ anyline_detector = AnylineDetector.from_pretrained("lllyasviel/Annotators", filename="anyline.pth").to(device)
103
 
104
  print("All models loaded.")
105
 
 
107
  """A master function to select and run the correct preprocessor."""
108
  if control_mode == "Canny":
109
  return extract_canny(input_image)
110
+ elif control_mode == "Soft Edge":
111
+ return anyline_detector(input_image, to_pil=True)
112
  elif control_mode == "Depth":
113
  image_np = np.array(input_image)
114
  with torch.no_grad():
 
118
  return Image.fromarray(depth).convert('RGB')
119
  elif control_mode == "Pose":
120
  return openpose_detector(input_image, hand_and_face=True)
 
 
 
 
121
  else:
122
  raise ValueError(f"Unknown control mode: {control_mode}")
123
 
 
181
  input_image = gr.Image(type="pil", label="Input Image", height=512)
182
  prompt = gr.Textbox(label="Prompt", placeholder="A detailed description of the desired image...")
183
  conditioning = gr.Radio(
184
+ choices=["Canny", "Soft Edge", "Depth", "Pose"],
185
  value="Pose",
186
  label="Conditioning Type"
187
  )