Spaces:
Runtime error
Runtime error
zhiweili
commited on
Commit
·
3a274a9
1
Parent(s):
598f3c2
test single adapter
Browse files
app_haircolor_inpaint_adapter_15.py
CHANGED
@@ -41,21 +41,26 @@ lineart_detector = lineart_detector.to(DEVICE)
|
|
41 |
pidiNet_detector = PidiNetDetector.from_pretrained('lllyasviel/Annotators')
|
42 |
pidiNet_detector = pidiNet_detector.to(DEVICE)
|
43 |
|
44 |
-
adapters = MultiAdapter(
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
)
|
58 |
-
adapters = adapters.to(torch.float16)
|
59 |
|
60 |
basepipeline = DiffusionPipeline.from_pretrained(
|
61 |
BASE_MODEL,
|
@@ -80,8 +85,8 @@ def image_to_image(
|
|
80 |
num_steps: int,
|
81 |
guidance_scale: float,
|
82 |
generate_size: int,
|
83 |
-
cond_scale1: float =
|
84 |
-
cond_scale2: float =
|
85 |
):
|
86 |
run_task_time = 0
|
87 |
time_cost_str = ''
|
@@ -92,6 +97,9 @@ def image_to_image(
|
|
92 |
pidiNet_image = pidiNet_detector(input_image, int(generate_size*1), generate_size)
|
93 |
cond_image = [canny_image, pidiNet_image]
|
94 |
cond_scale = [cond_scale1, cond_scale2]
|
|
|
|
|
|
|
95 |
|
96 |
generator = torch.Generator(device=DEVICE).manual_seed(seed)
|
97 |
generated_image = basepipeline(
|
|
|
41 |
pidiNet_detector = PidiNetDetector.from_pretrained('lllyasviel/Annotators')
|
42 |
pidiNet_detector = pidiNet_detector.to(DEVICE)
|
43 |
|
44 |
+
# adapters = MultiAdapter(
|
45 |
+
# [
|
46 |
+
# T2IAdapter.from_pretrained(
|
47 |
+
# "TencentARC/t2iadapter_canny_sd15v2",
|
48 |
+
# torch_dtype=torch.float16,
|
49 |
+
# varient="fp16",
|
50 |
+
# ),
|
51 |
+
# T2IAdapter.from_pretrained(
|
52 |
+
# "TencentARC/t2iadapter_sketch_sd15v2",
|
53 |
+
# torch_dtype=torch.float16,
|
54 |
+
# varient="fp16",
|
55 |
+
# ),
|
56 |
+
# ]
|
57 |
+
# )
|
58 |
+
# adapters = adapters.to(torch.float16)
|
59 |
+
adapters = T2IAdapter.from_pretrained(
|
60 |
+
"TencentARC/t2iadapter_canny_sd15v2",
|
61 |
+
torch_dtype=torch.float16,
|
62 |
+
varient="fp16",
|
63 |
)
|
|
|
64 |
|
65 |
basepipeline = DiffusionPipeline.from_pretrained(
|
66 |
BASE_MODEL,
|
|
|
85 |
num_steps: int,
|
86 |
guidance_scale: float,
|
87 |
generate_size: int,
|
88 |
+
cond_scale1: float = 1.2,
|
89 |
+
cond_scale2: float = 1.2,
|
90 |
):
|
91 |
run_task_time = 0
|
92 |
time_cost_str = ''
|
|
|
97 |
pidiNet_image = pidiNet_detector(input_image, int(generate_size*1), generate_size)
|
98 |
cond_image = [canny_image, pidiNet_image]
|
99 |
cond_scale = [cond_scale1, cond_scale2]
|
100 |
+
|
101 |
+
cond_image = canny_image
|
102 |
+
cond_scale = cond_scale1
|
103 |
|
104 |
generator = torch.Generator(device=DEVICE).manual_seed(seed)
|
105 |
generated_image = basepipeline(
|