multimodalart HF Staff commited on
Commit
c0d0ae5
·
verified ·
1 Parent(s): 6a2dbc2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -13
app.py CHANGED
@@ -68,27 +68,21 @@ print("Loading models and preprocessors...")
68
  device = "cuda" if torch.cuda.is_available() else "cpu"
69
  torch_dtype = torch.bfloat16
70
 
71
- # Load the base and ControlNet models
72
  base_model = "Qwen/Qwen-Image"
73
  controlnet_model = "InstantX/Qwen-Image-ControlNet-Union"
74
  controlnet = QwenImageControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch_dtype)
75
 
76
- # Use the lightning-fast scheduler
77
  scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(base_model, subfolder="scheduler")
78
 
79
  pipe = QwenImageControlNetPipeline.from_pretrained(
80
  base_model, controlnet=controlnet, scheduler=scheduler, torch_dtype=torch_dtype
81
  ).to(device)
82
 
83
- # Load the preprocessors from controlnet_aux
84
- # We create a dictionary to easily access them by name.
85
- # Note: "depth-anything" is not yet available in controlnet_aux, so we use MiDaS as a strong alternative.
86
- processors = {
87
- "Canny": CannyDetector(),
88
- "Soft Edge": AnylineDetector.from_pretrained("TheMistoAI/MistoLine", filename="MTEED.pth", subfolder="Anyline"),
89
- "Depth": MidasDetector.from_pretrained("lllyasviel/Annotators").to(device),
90
- "Pose": DWposeDetector().to(device),
91
- }
92
  print("Loading complete.")
93
 
94
 
@@ -125,8 +119,16 @@ def generate(
125
  prompt = enhanced_prompt
126
 
127
  # Select and run the appropriate preprocessor
128
- processor = processors[conditioning]
129
- control_image = processor(image, to_pil=True)
 
 
 
 
 
 
 
 
130
 
131
  generator = torch.Generator(device=device).manual_seed(int(seed))
132
 
 
68
  device = "cuda" if torch.cuda.is_available() else "cpu"
69
  torch_dtype = torch.bfloat16
70
 
 
71
  base_model = "Qwen/Qwen-Image"
72
  controlnet_model = "InstantX/Qwen-Image-ControlNet-Union"
73
  controlnet = QwenImageControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch_dtype)
74
 
 
75
  scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(base_model, subfolder="scheduler")
76
 
77
  pipe = QwenImageControlNetPipeline.from_pretrained(
78
  base_model, controlnet=controlnet, scheduler=scheduler, torch_dtype=torch_dtype
79
  ).to(device)
80
 
81
+ canny = CannyDetector()
82
+ soft = AnylineDetector.from_pretrained("TheMistoAI/MistoLine", filename="MTEED.pth", subfolder="Anyline").to(device)
83
+ depth = MidasDetector.from_pretrained("lllyasviel/Annotators").to(device)
84
+ pose = DWposeDetector().to(device)
85
+
 
 
 
 
86
  print("Loading complete.")
87
 
88
 
 
119
  prompt = enhanced_prompt
120
 
121
  # Select and run the appropriate preprocessor
122
+ if(conditioning == "Canny"):
123
+ processor = canny
124
+ if(conditioning == "Soft Edge"):
125
+ processor = soft
126
+ if(conditioning == "Depth"):
127
+ processor = depth
128
+ if(conditioning == "Pose"):
129
+ processor = pose
130
+
131
+ control_image = processor(image)
132
 
133
  generator = torch.Generator(device=device).manual_seed(int(seed))
134