Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
#1
by
Nush88
- opened
app.py
CHANGED
@@ -9,28 +9,22 @@ from translatepy import Translator
|
|
9 |
import numpy as np
|
10 |
import random
|
11 |
|
|
|
12 |
translator = Translator()
|
13 |
|
14 |
# Constants
|
15 |
model = "Shakker-Labs/AWPortrait-FL"
|
16 |
-
|
17 |
-
CSS = """
|
18 |
-
.gradio-container {
|
19 |
-
max-width: 690px !important;
|
20 |
-
}
|
21 |
-
footer {
|
22 |
-
visibility: hidden;
|
23 |
-
}
|
24 |
-
"""
|
25 |
-
|
26 |
MAX_SEED = np.iinfo(np.int32).max
|
27 |
|
28 |
-
#
|
29 |
-
if torch.cuda.is_available()
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
32 |
|
33 |
-
#
|
34 |
@spaces.GPU()
|
35 |
def generate_image(
|
36 |
prompt,
|
@@ -46,11 +40,10 @@ def generate_image(
|
|
46 |
seed = random.randint(0, MAX_SEED)
|
47 |
seed = int(seed)
|
48 |
|
49 |
-
generator = torch.Generator().manual_seed(seed)
|
50 |
-
|
51 |
-
prompt = str(translator.translate(prompt, 'English'))
|
52 |
|
53 |
-
|
|
|
54 |
|
55 |
image = pipe(
|
56 |
prompt,
|
@@ -63,106 +56,58 @@ def generate_image(
|
|
63 |
max_sequence_length=512,
|
64 |
num_images_per_prompt=nums,
|
65 |
).images
|
66 |
-
|
67 |
return image, seed
|
68 |
|
|
|
69 |
examples = [
|
70 |
-
"close up portrait, Amidst the interplay of light and shadows in a photography studio,a soft spotlight traces the contours of a face,highlighting a figure clad in a sleek black turtleneck. The garment,hugging the skin with subtle luxury,complements the Caucasian model's understated makeup,embodying minimalist elegance.
|
71 |
-
"Caucasian,The image features a young woman of European descent standing in an studio setting,surrounded by silk. (She is wearing a silk dress),paired with a bold. Her brown hair is wet and tousled,falling naturally around her face,giving her a raw and edgy look.
|
72 |
-
"A black and white portrait of a young woman with a captivating gaze. She's bundled up in a cozy black sweater,hands gently cupped near her face.
|
73 |
"Fashion photography portrait,close up portrait,(a woman of European descent is surrounded by lava rock and magma from head to neck, red magma hair, wear volcanic lava rock magma outfit coat lava rock magma fashion costume with ruffled layers"
|
74 |
]
|
75 |
|
76 |
-
|
77 |
# Gradio Interface
|
78 |
-
|
79 |
css = """
|
80 |
footer {
|
81 |
visibility: hidden;
|
82 |
}
|
83 |
"""
|
84 |
|
85 |
-
|
86 |
with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
|
87 |
|
88 |
with gr.Group():
|
89 |
with gr.Row():
|
90 |
-
prompt = gr.Textbox(label='Enter Your Prompt(multilingual)', scale=6)
|
91 |
submit = gr.Button(scale=1, variant='primary')
|
92 |
-
|
|
|
|
|
93 |
with gr.Accordion("Advanced Options", open=False):
|
94 |
with gr.Row():
|
95 |
-
width = gr.Slider(
|
96 |
-
|
97 |
-
minimum=512,
|
98 |
-
maximum=1280,
|
99 |
-
step=8,
|
100 |
-
value=768,
|
101 |
-
)
|
102 |
-
height = gr.Slider(
|
103 |
-
label="Height",
|
104 |
-
minimum=512,
|
105 |
-
maximum=1280,
|
106 |
-
step=8,
|
107 |
-
value=1024,
|
108 |
-
)
|
109 |
with gr.Row():
|
110 |
-
scale = gr.Slider(
|
111 |
-
|
112 |
-
minimum=0,
|
113 |
-
maximum=50,
|
114 |
-
step=0.1,
|
115 |
-
value=3.5,
|
116 |
-
)
|
117 |
-
steps = gr.Slider(
|
118 |
-
label="Steps",
|
119 |
-
minimum=1,
|
120 |
-
maximum=50,
|
121 |
-
step=1,
|
122 |
-
value=24,
|
123 |
-
)
|
124 |
with gr.Row():
|
125 |
-
seed = gr.Slider(
|
126 |
-
|
127 |
-
|
128 |
-
maximum=MAX_SEED,
|
129 |
-
step=1,
|
130 |
-
value=0,
|
131 |
-
visible=True
|
132 |
-
)
|
133 |
-
nums = gr.Slider(
|
134 |
-
label="Image Numbers",
|
135 |
-
minimum=1,
|
136 |
-
maximum=4,
|
137 |
-
step=1,
|
138 |
-
value=1,
|
139 |
-
scale=1,
|
140 |
-
)
|
141 |
gr.Examples(
|
142 |
examples=examples,
|
143 |
inputs=prompt,
|
144 |
-
outputs=[img,seed],
|
145 |
fn=generate_image,
|
146 |
cache_examples="lazy",
|
147 |
)
|
148 |
|
149 |
gr.on(
|
150 |
-
triggers=[
|
151 |
-
prompt.submit,
|
152 |
-
submit.click,
|
153 |
-
],
|
154 |
fn=generate_image,
|
155 |
-
inputs=[
|
156 |
-
prompt,
|
157 |
-
width,
|
158 |
-
height,
|
159 |
-
scale,
|
160 |
-
steps,
|
161 |
-
seed,
|
162 |
-
nums
|
163 |
-
],
|
164 |
outputs=[img, seed],
|
165 |
api_name="run",
|
166 |
)
|
167 |
-
|
168 |
-
demo.queue().launch()
|
|
|
9 |
import numpy as np
|
10 |
import random
|
11 |
|
12 |
+
# Initialize Translator
|
13 |
translator = Translator()
|
14 |
|
15 |
# Constants
|
16 |
model = "Shakker-Labs/AWPortrait-FL"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
MAX_SEED = np.iinfo(np.int32).max
|
18 |
|
19 |
+
# Device & dtype setup
|
20 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
21 |
+
dtype = torch.bfloat16 if device == "cuda" else torch.float32
|
22 |
+
|
23 |
+
# Load model and move to device
|
24 |
+
pipe = FluxPipeline.from_pretrained(model, torch_dtype=dtype)
|
25 |
+
pipe.to(device)
|
26 |
|
27 |
+
# Image generation function
|
28 |
@spaces.GPU()
|
29 |
def generate_image(
|
30 |
prompt,
|
|
|
40 |
seed = random.randint(0, MAX_SEED)
|
41 |
seed = int(seed)
|
42 |
|
43 |
+
generator = torch.Generator(device=device).manual_seed(seed)
|
|
|
|
|
44 |
|
45 |
+
prompt = str(translator.translate(prompt, 'English'))
|
46 |
+
print(f'Prompt: {prompt}')
|
47 |
|
48 |
image = pipe(
|
49 |
prompt,
|
|
|
56 |
max_sequence_length=512,
|
57 |
num_images_per_prompt=nums,
|
58 |
).images
|
59 |
+
|
60 |
return image, seed
|
61 |
|
62 |
+
# Example prompts
|
63 |
examples = [
|
64 |
+
"close up portrait, Amidst the interplay of light and shadows in a photography studio,a soft spotlight traces the contours of a face,highlighting a figure clad in a sleek black turtleneck. The garment,hugging the skin with subtle luxury,complements the Caucasian model's understated makeup,embodying minimalist elegance.",
|
65 |
+
"Caucasian,The image features a young woman of European descent standing in an studio setting,surrounded by silk. (She is wearing a silk dress),paired with a bold. Her brown hair is wet and tousled,falling naturally around her face,giving her a raw and edgy look.",
|
66 |
+
"A black and white portrait of a young woman with a captivating gaze. She's bundled up in a cozy black sweater,hands gently cupped near her face.",
|
67 |
"Fashion photography portrait,close up portrait,(a woman of European descent is surrounded by lava rock and magma from head to neck, red magma hair, wear volcanic lava rock magma outfit coat lava rock magma fashion costume with ruffled layers"
|
68 |
]
|
69 |
|
|
|
70 |
# Gradio Interface
|
|
|
71 |
css = """
|
72 |
footer {
|
73 |
visibility: hidden;
|
74 |
}
|
75 |
"""
|
76 |
|
|
|
77 |
with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
|
78 |
|
79 |
with gr.Group():
|
80 |
with gr.Row():
|
81 |
+
prompt = gr.Textbox(label='Enter Your Prompt (multilingual)', scale=6)
|
82 |
submit = gr.Button(scale=1, variant='primary')
|
83 |
+
|
84 |
+
img = gr.Gallery(label="Gallery", columns=1, preview=True)
|
85 |
+
|
86 |
with gr.Accordion("Advanced Options", open=False):
|
87 |
with gr.Row():
|
88 |
+
width = gr.Slider(label="Width", minimum=512, maximum=1280, step=8, value=768)
|
89 |
+
height = gr.Slider(label="Height", minimum=512, maximum=1280, step=8, value=1024)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
with gr.Row():
|
91 |
+
scale = gr.Slider(label="Guidance Scale", minimum=0, maximum=50, step=0.1, value=3.5)
|
92 |
+
steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=24)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
with gr.Row():
|
94 |
+
seed = gr.Slider(label="Seed (-1 = Random)", minimum=-1, maximum=MAX_SEED, step=1, value=0, visible=True)
|
95 |
+
nums = gr.Slider(label="Image Numbers", minimum=1, maximum=4, step=1, value=1, scale=1)
|
96 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
gr.Examples(
|
98 |
examples=examples,
|
99 |
inputs=prompt,
|
100 |
+
outputs=[img, seed],
|
101 |
fn=generate_image,
|
102 |
cache_examples="lazy",
|
103 |
)
|
104 |
|
105 |
gr.on(
|
106 |
+
triggers=[prompt.submit, submit.click],
|
|
|
|
|
|
|
107 |
fn=generate_image,
|
108 |
+
inputs=[prompt, width, height, scale, steps, seed, nums],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
outputs=[img, seed],
|
110 |
api_name="run",
|
111 |
)
|
112 |
+
|
113 |
+
demo.queue().launch()
|