Spaces:
Runtime error
Runtime error
Ming Li
commited on
Commit
Β·
cce8954
1
Parent(s):
1f3a0a5
update demo
Browse files- app.py +2 -24
- app_canny.py +21 -2
- app_depth.py +22 -2
- app_lineart.py +40 -4
- app_segmentation.py +25 -2
- app_softedge.py +21 -2
- images/{canny_demo.jpg β canny/canny_demo.jpg} +0 -0
- images/{depth_demo.png β depth/depth_demo.png} +0 -0
- images/{hed_demo.jpeg β hed/hed_demo.jpeg} +0 -0
- images/lineart/Ryan Reynolds_1.png +0 -0
- images/lineart/Ryan Reynolds_2.png +0 -0
- images/lineart/Ryan Reynolds_3.png +0 -0
- images/{lineart_demo.jpg β lineart/tube.jpg} +0 -0
- images/seg/33.png +0 -0
- images/{seg_demo.png β seg/seg_demo.png} +0 -0
- model.py +6 -1
app.py
CHANGED
|
@@ -14,6 +14,8 @@ from app_segmentation import create_demo as create_demo_segmentation
|
|
| 14 |
from app_softedge import create_demo as create_demo_softedge
|
| 15 |
from model import Model
|
| 16 |
from settings import ALLOW_CHANGING_BASE_MODEL, DEFAULT_MODEL_ID, SHOW_DUPLICATE_BUTTON
|
|
|
|
|
|
|
| 17 |
|
| 18 |
DESCRIPTION = "# [ControlNet++: Improving Conditional Controls with Efficient Consistency Feedback](https://arxiv.org/abs/2404.07987)"
|
| 19 |
|
|
@@ -23,30 +25,6 @@ if not torch.cuda.is_available():
|
|
| 23 |
model = Model(base_model_id=DEFAULT_MODEL_ID, task_name="Canny")
|
| 24 |
|
| 25 |
|
| 26 |
-
# examples = [
|
| 27 |
-
# [
|
| 28 |
-
# "images/canny_demo.jpg",
|
| 29 |
-
# "BEAUTIFUL PORTRAIT PAINTINGS BY EMMA UBER",
|
| 30 |
-
# ],
|
| 31 |
-
# [
|
| 32 |
-
# "images/depth_demo.jpg",
|
| 33 |
-
# "BEAUTIFUL PORTRAIT PAINTINGS BY EMMA UBER",
|
| 34 |
-
# ],
|
| 35 |
-
# [
|
| 36 |
-
# "images/hed_demo.jpg",
|
| 37 |
-
# "BEAUTIFUL PORTRAIT PAINTINGS BY EMMA UBER",
|
| 38 |
-
# ],
|
| 39 |
-
# [
|
| 40 |
-
# "images/lineart_demo.jpg",
|
| 41 |
-
# "BEAUTIFUL PORTRAIT PAINTINGS BY EMMA UBER",
|
| 42 |
-
# ],
|
| 43 |
-
# [
|
| 44 |
-
# "images/seg_demo.jpg",
|
| 45 |
-
# "BEAUTIFUL PORTRAIT PAINTINGS BY EMMA UBER",
|
| 46 |
-
# ],
|
| 47 |
-
# ]
|
| 48 |
-
|
| 49 |
-
|
| 50 |
with gr.Blocks(css="style.css") as demo:
|
| 51 |
gr.Markdown(DESCRIPTION)
|
| 52 |
gr.DuplicateButton(
|
|
|
|
| 14 |
from app_softedge import create_demo as create_demo_softedge
|
| 15 |
from model import Model
|
| 16 |
from settings import ALLOW_CHANGING_BASE_MODEL, DEFAULT_MODEL_ID, SHOW_DUPLICATE_BUTTON
|
| 17 |
+
from transformers.utils.hub import move_cache
|
| 18 |
+
move_cache()
|
| 19 |
|
| 20 |
DESCRIPTION = "# [ControlNet++: Improving Conditional Controls with Efficient Consistency Feedback](https://arxiv.org/abs/2404.07987)"
|
| 21 |
|
|
|
|
| 25 |
model = Model(base_model_id=DEFAULT_MODEL_ID, task_name="Canny")
|
| 26 |
|
| 27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
with gr.Blocks(css="style.css") as demo:
|
| 29 |
gr.Markdown(DESCRIPTION)
|
| 30 |
gr.DuplicateButton(
|
app_canny.py
CHANGED
|
@@ -11,13 +11,19 @@ from settings import (
|
|
| 11 |
)
|
| 12 |
from utils import randomize_seed_fn
|
| 13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
def create_demo(process):
|
| 16 |
with gr.Blocks() as demo:
|
| 17 |
with gr.Row():
|
| 18 |
with gr.Column():
|
| 19 |
-
image = gr.Image(
|
| 20 |
-
prompt = gr.Textbox(label="Prompt"
|
| 21 |
run_button = gr.Button("Run")
|
| 22 |
with gr.Accordion("Advanced options", open=False):
|
| 23 |
num_samples = gr.Slider(
|
|
@@ -47,6 +53,19 @@ def create_demo(process):
|
|
| 47 |
)
|
| 48 |
with gr.Column():
|
| 49 |
result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
inputs = [
|
| 51 |
image,
|
| 52 |
prompt,
|
|
|
|
| 11 |
)
|
| 12 |
from utils import randomize_seed_fn
|
| 13 |
|
| 14 |
+
examples = [
|
| 15 |
+
[
|
| 16 |
+
"images/canny/canny_demo.jpg",
|
| 17 |
+
"BEAUTIFUL PORTRAIT PAINTINGS BY EMMA UBER",
|
| 18 |
+
],
|
| 19 |
+
]
|
| 20 |
|
| 21 |
def create_demo(process):
|
| 22 |
with gr.Blocks() as demo:
|
| 23 |
with gr.Row():
|
| 24 |
with gr.Column():
|
| 25 |
+
image = gr.Image()
|
| 26 |
+
prompt = gr.Textbox(label="Prompt")
|
| 27 |
run_button = gr.Button("Run")
|
| 28 |
with gr.Accordion("Advanced options", open=False):
|
| 29 |
num_samples = gr.Slider(
|
|
|
|
| 53 |
)
|
| 54 |
with gr.Column():
|
| 55 |
result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
|
| 56 |
+
|
| 57 |
+
gr.Examples(
|
| 58 |
+
examples=examples,
|
| 59 |
+
inputs=[
|
| 60 |
+
image,
|
| 61 |
+
prompt,
|
| 62 |
+
guidance_scale,
|
| 63 |
+
seed,
|
| 64 |
+
],
|
| 65 |
+
outputs=result,
|
| 66 |
+
fn=process,
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
inputs = [
|
| 70 |
image,
|
| 71 |
prompt,
|
app_depth.py
CHANGED
|
@@ -11,13 +11,20 @@ from settings import (
|
|
| 11 |
)
|
| 12 |
from utils import randomize_seed_fn
|
| 13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
def create_demo(process):
|
| 16 |
with gr.Blocks() as demo:
|
| 17 |
with gr.Row():
|
| 18 |
with gr.Column():
|
| 19 |
-
image = gr.Image(
|
| 20 |
-
prompt = gr.Textbox(label="Prompt"
|
| 21 |
run_button = gr.Button("Run")
|
| 22 |
with gr.Accordion("Advanced options", open=False):
|
| 23 |
preprocessor_name = gr.Radio(
|
|
@@ -47,6 +54,19 @@ def create_demo(process):
|
|
| 47 |
)
|
| 48 |
with gr.Column():
|
| 49 |
result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
inputs = [
|
| 51 |
image,
|
| 52 |
prompt,
|
|
|
|
| 11 |
)
|
| 12 |
from utils import randomize_seed_fn
|
| 13 |
|
| 14 |
+
examples = [
|
| 15 |
+
[
|
| 16 |
+
"images/depth/depth_demo.png",
|
| 17 |
+
"heart, mountains, and nature image",
|
| 18 |
+
],
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
|
| 22 |
def create_demo(process):
|
| 23 |
with gr.Blocks() as demo:
|
| 24 |
with gr.Row():
|
| 25 |
with gr.Column():
|
| 26 |
+
image = gr.Image()
|
| 27 |
+
prompt = gr.Textbox(label="Prompt")
|
| 28 |
run_button = gr.Button("Run")
|
| 29 |
with gr.Accordion("Advanced options", open=False):
|
| 30 |
preprocessor_name = gr.Radio(
|
|
|
|
| 54 |
)
|
| 55 |
with gr.Column():
|
| 56 |
result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
|
| 57 |
+
|
| 58 |
+
gr.Examples(
|
| 59 |
+
examples=examples,
|
| 60 |
+
inputs=[
|
| 61 |
+
image,
|
| 62 |
+
prompt,
|
| 63 |
+
guidance_scale,
|
| 64 |
+
seed,
|
| 65 |
+
],
|
| 66 |
+
outputs=result,
|
| 67 |
+
fn=process,
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
inputs = [
|
| 71 |
image,
|
| 72 |
prompt,
|
app_lineart.py
CHANGED
|
@@ -12,12 +12,36 @@ from settings import (
|
|
| 12 |
from utils import randomize_seed_fn
|
| 13 |
|
| 14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
def create_demo(process):
|
| 16 |
with gr.Blocks() as demo:
|
| 17 |
with gr.Row():
|
| 18 |
with gr.Column():
|
| 19 |
-
image = gr.Image(
|
| 20 |
-
prompt = gr.Textbox(label="Prompt"
|
| 21 |
run_button = gr.Button("Run")
|
| 22 |
with gr.Accordion("Advanced options", open=False):
|
| 23 |
preprocessor_name = gr.Radio(
|
|
@@ -50,13 +74,25 @@ def create_demo(process):
|
|
| 50 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
| 51 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
| 52 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
| 53 |
-
a_prompt = gr.Textbox(label="Additional prompt"
|
| 54 |
n_prompt = gr.Textbox(
|
| 55 |
label="Negative prompt",
|
| 56 |
-
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
| 57 |
)
|
| 58 |
with gr.Column():
|
| 59 |
result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
inputs = [
|
| 61 |
image,
|
| 62 |
prompt,
|
|
|
|
| 12 |
from utils import randomize_seed_fn
|
| 13 |
|
| 14 |
|
| 15 |
+
examples = [
|
| 16 |
+
[
|
| 17 |
+
"images/lineart/Ryan Reynolds_1.png",
|
| 18 |
+
"Ryan Reynolds",
|
| 19 |
+
"None",
|
| 20 |
+
],
|
| 21 |
+
[
|
| 22 |
+
"images/lineart/Ryan Reynolds_2.png",
|
| 23 |
+
"Ryan Reynolds",
|
| 24 |
+
"None",
|
| 25 |
+
],
|
| 26 |
+
[
|
| 27 |
+
"images/lineart/Ryan Reynolds_3.png",
|
| 28 |
+
"Ryan Reynolds",
|
| 29 |
+
"None",
|
| 30 |
+
],
|
| 31 |
+
[
|
| 32 |
+
"images/lineart/tube.jpg",
|
| 33 |
+
"Picture Of Looking Through A View Finder",
|
| 34 |
+
"Lineart",
|
| 35 |
+
],
|
| 36 |
+
]
|
| 37 |
+
|
| 38 |
+
|
| 39 |
def create_demo(process):
|
| 40 |
with gr.Blocks() as demo:
|
| 41 |
with gr.Row():
|
| 42 |
with gr.Column():
|
| 43 |
+
image = gr.Image()
|
| 44 |
+
prompt = gr.Textbox(label="Prompt")
|
| 45 |
run_button = gr.Button("Run")
|
| 46 |
with gr.Accordion("Advanced options", open=False):
|
| 47 |
preprocessor_name = gr.Radio(
|
|
|
|
| 74 |
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
|
| 75 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
| 76 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
| 77 |
+
a_prompt = gr.Textbox(label="Additional prompt")
|
| 78 |
n_prompt = gr.Textbox(
|
| 79 |
label="Negative prompt",
|
|
|
|
| 80 |
)
|
| 81 |
with gr.Column():
|
| 82 |
result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
gr.Examples(
|
| 86 |
+
examples=examples,
|
| 87 |
+
inputs=[
|
| 88 |
+
image,
|
| 89 |
+
prompt,
|
| 90 |
+
preprocessor_name
|
| 91 |
+
],
|
| 92 |
+
outputs=result,
|
| 93 |
+
fn=process,
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
inputs = [
|
| 97 |
image,
|
| 98 |
prompt,
|
app_segmentation.py
CHANGED
|
@@ -11,13 +11,23 @@ from settings import (
|
|
| 11 |
)
|
| 12 |
from utils import randomize_seed_fn
|
| 13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
def create_demo(process):
|
| 16 |
with gr.Blocks() as demo:
|
| 17 |
with gr.Row():
|
| 18 |
with gr.Column():
|
| 19 |
-
image = gr.Image(
|
| 20 |
-
prompt = gr.Textbox(label="Prompt"
|
| 21 |
run_button = gr.Button("Run")
|
| 22 |
with gr.Accordion("Advanced options", open=False):
|
| 23 |
preprocessor_name = gr.Radio(
|
|
@@ -47,6 +57,19 @@ def create_demo(process):
|
|
| 47 |
)
|
| 48 |
with gr.Column():
|
| 49 |
result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
inputs = [
|
| 51 |
image,
|
| 52 |
prompt,
|
|
|
|
| 11 |
)
|
| 12 |
from utils import randomize_seed_fn
|
| 13 |
|
| 14 |
+
examples = [
|
| 15 |
+
[
|
| 16 |
+
"images/seg/33.png",
|
| 17 |
+
"A man standing in front of a wall with several framed artworks hanging on it",
|
| 18 |
+
],
|
| 19 |
+
[
|
| 20 |
+
"images/seg/seg_demo.png",
|
| 21 |
+
"A large building with a pointed roof and several chimneys",
|
| 22 |
+
],
|
| 23 |
+
]
|
| 24 |
|
| 25 |
def create_demo(process):
|
| 26 |
with gr.Blocks() as demo:
|
| 27 |
with gr.Row():
|
| 28 |
with gr.Column():
|
| 29 |
+
image = gr.Image()
|
| 30 |
+
prompt = gr.Textbox(label="Prompt")
|
| 31 |
run_button = gr.Button("Run")
|
| 32 |
with gr.Accordion("Advanced options", open=False):
|
| 33 |
preprocessor_name = gr.Radio(
|
|
|
|
| 57 |
)
|
| 58 |
with gr.Column():
|
| 59 |
result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
|
| 60 |
+
|
| 61 |
+
gr.Examples(
|
| 62 |
+
examples=examples,
|
| 63 |
+
inputs=[
|
| 64 |
+
image,
|
| 65 |
+
prompt,
|
| 66 |
+
guidance_scale,
|
| 67 |
+
seed,
|
| 68 |
+
],
|
| 69 |
+
outputs=result,
|
| 70 |
+
fn=process,
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
inputs = [
|
| 74 |
image,
|
| 75 |
prompt,
|
app_softedge.py
CHANGED
|
@@ -11,13 +11,19 @@ from settings import (
|
|
| 11 |
)
|
| 12 |
from utils import randomize_seed_fn
|
| 13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
def create_demo(process):
|
| 16 |
with gr.Blocks() as demo:
|
| 17 |
with gr.Row():
|
| 18 |
with gr.Column():
|
| 19 |
-
image = gr.Image(
|
| 20 |
-
prompt = gr.Textbox(label="Prompt"
|
| 21 |
run_button = gr.Button("Run")
|
| 22 |
with gr.Accordion("Advanced options", open=False):
|
| 23 |
preprocessor_name = gr.Radio(
|
|
@@ -56,6 +62,19 @@ def create_demo(process):
|
|
| 56 |
)
|
| 57 |
with gr.Column():
|
| 58 |
result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
inputs = [
|
| 60 |
image,
|
| 61 |
prompt,
|
|
|
|
| 11 |
)
|
| 12 |
from utils import randomize_seed_fn
|
| 13 |
|
| 14 |
+
examples = [
|
| 15 |
+
[
|
| 16 |
+
"images/hed/hed_demo.jpeg",
|
| 17 |
+
"Language trip to Laon",
|
| 18 |
+
],
|
| 19 |
+
]
|
| 20 |
|
| 21 |
def create_demo(process):
|
| 22 |
with gr.Blocks() as demo:
|
| 23 |
with gr.Row():
|
| 24 |
with gr.Column():
|
| 25 |
+
image = gr.Image()
|
| 26 |
+
prompt = gr.Textbox(label="Prompt")
|
| 27 |
run_button = gr.Button("Run")
|
| 28 |
with gr.Accordion("Advanced options", open=False):
|
| 29 |
preprocessor_name = gr.Radio(
|
|
|
|
| 62 |
)
|
| 63 |
with gr.Column():
|
| 64 |
result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
|
| 65 |
+
|
| 66 |
+
gr.Examples(
|
| 67 |
+
examples=examples,
|
| 68 |
+
inputs=[
|
| 69 |
+
image,
|
| 70 |
+
prompt,
|
| 71 |
+
guidance_scale,
|
| 72 |
+
seed,
|
| 73 |
+
],
|
| 74 |
+
outputs=result,
|
| 75 |
+
fn=process,
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
inputs = [
|
| 79 |
image,
|
| 80 |
prompt,
|
images/{canny_demo.jpg β canny/canny_demo.jpg}
RENAMED
|
File without changes
|
images/{depth_demo.png β depth/depth_demo.png}
RENAMED
|
File without changes
|
images/{hed_demo.jpeg β hed/hed_demo.jpeg}
RENAMED
|
File without changes
|
images/lineart/Ryan Reynolds_1.png
ADDED
|
images/lineart/Ryan Reynolds_2.png
ADDED
|
images/lineart/Ryan Reynolds_3.png
ADDED
|
images/{lineart_demo.jpg β lineart/tube.jpg}
RENAMED
|
File without changes
|
images/seg/33.png
ADDED
|
images/{seg_demo.png β seg/seg_demo.png}
RENAMED
|
File without changes
|
model.py
CHANGED
|
@@ -585,7 +585,7 @@ class Model:
|
|
| 585 |
raise ValueError
|
| 586 |
|
| 587 |
if preprocessor_name in ["None", "None (anime)"]:
|
| 588 |
-
image = HWC3(image)
|
| 589 |
image = resize_image(image, resolution=image_resolution)
|
| 590 |
control_image = PIL.Image.fromarray(image)
|
| 591 |
elif preprocessor_name in ["Lineart", "Lineart coarse"]:
|
|
@@ -618,6 +618,7 @@ class Model:
|
|
| 618 |
guidance_scale=guidance_scale,
|
| 619 |
seed=seed,
|
| 620 |
)
|
|
|
|
| 621 |
conditions_of_generated_imgs = [
|
| 622 |
self.preprocessor(
|
| 623 |
image=x,
|
|
@@ -625,6 +626,10 @@ class Model:
|
|
| 625 |
detect_resolution=preprocess_resolution,
|
| 626 |
) for x in results
|
| 627 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 628 |
return [control_image] * num_images + results + conditions_of_generated_imgs
|
| 629 |
|
| 630 |
@torch.inference_mode()
|
|
|
|
| 585 |
raise ValueError
|
| 586 |
|
| 587 |
if preprocessor_name in ["None", "None (anime)"]:
|
| 588 |
+
image = 255 - HWC3(image)
|
| 589 |
image = resize_image(image, resolution=image_resolution)
|
| 590 |
control_image = PIL.Image.fromarray(image)
|
| 591 |
elif preprocessor_name in ["Lineart", "Lineart coarse"]:
|
|
|
|
| 618 |
guidance_scale=guidance_scale,
|
| 619 |
seed=seed,
|
| 620 |
)
|
| 621 |
+
self.preprocessor.load("Lineart")
|
| 622 |
conditions_of_generated_imgs = [
|
| 623 |
self.preprocessor(
|
| 624 |
image=x,
|
|
|
|
| 626 |
detect_resolution=preprocess_resolution,
|
| 627 |
) for x in results
|
| 628 |
]
|
| 629 |
+
|
| 630 |
+
control_image = PIL.Image.fromarray((255 - np.array(control_image)).astype(np.uint8))
|
| 631 |
+
conditions_of_generated_imgs = [PIL.Image.fromarray((255 - np.array(x)).astype(np.uint8)) for x in conditions_of_generated_imgs]
|
| 632 |
+
|
| 633 |
return [control_image] * num_images + results + conditions_of_generated_imgs
|
| 634 |
|
| 635 |
@torch.inference_mode()
|