Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -20,7 +20,7 @@ pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev
|
|
| 20 |
|
| 21 |
# Load LoRA data (you'll need to create this JSON file or modify to load your LoRAs)
|
| 22 |
|
| 23 |
-
with open("flux_loras.json", "r") as file:
|
| 24 |
data = json.load(file)
|
| 25 |
flux_loras_raw = [
|
| 26 |
{
|
|
@@ -30,6 +30,7 @@ with open("flux_loras.json", "r") as file:
|
|
| 30 |
"trigger_word": item.get("trigger_word", ""),
|
| 31 |
"trigger_position": item.get("trigger_position", "prepend"),
|
| 32 |
"weights": item.get("weights", "pytorch_lora_weights.safetensors"),
|
|
|
|
| 33 |
}
|
| 34 |
for item in data
|
| 35 |
]
|
|
@@ -126,12 +127,12 @@ def classify_gallery(flux_loras):
|
|
| 126 |
sorted_gallery = sorted(flux_loras, key=lambda x: x.get("likes", 0), reverse=True)
|
| 127 |
return [(item["image"], item["title"]) for item in sorted_gallery], sorted_gallery
|
| 128 |
|
| 129 |
-
def infer_with_lora_wrapper(input_image, prompt, selected_index, custom_lora, seed=42, randomize_seed=False, guidance_scale=2.5, lora_scale=1.75, flux_loras=None, progress=gr.Progress(track_tqdm=True)):
|
| 130 |
"""Wrapper function to handle state serialization"""
|
| 131 |
return infer_with_lora(input_image, prompt, selected_index, custom_lora, seed, randomize_seed, guidance_scale, lora_scale, flux_loras, progress)
|
| 132 |
|
| 133 |
@spaces.GPU
|
| 134 |
-
def infer_with_lora(input_image, prompt, selected_index, custom_lora, seed=42, randomize_seed=False, guidance_scale=2.5, lora_scale=1.0, flux_loras=None,
|
| 135 |
"""Generate image with selected LoRA"""
|
| 136 |
global current_lora, pipe
|
| 137 |
|
|
@@ -169,13 +170,19 @@ def infer_with_lora(input_image, prompt, selected_index, custom_lora, seed=42, r
|
|
| 169 |
input_image = input_image.convert("RGB")
|
| 170 |
# Add trigger word to prompt
|
| 171 |
trigger_word = lora_to_use["trigger_word"]
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 179 |
try:
|
| 180 |
image = pipe(
|
| 181 |
image=input_image,
|
|
@@ -242,7 +249,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 242 |
with gr.Column(scale=4, elem_id="box_column"):
|
| 243 |
with gr.Group(elem_id="gallery_box"):
|
| 244 |
input_image = gr.Image(label="Upload a picture of yourself", type="pil", height=300)
|
| 245 |
-
|
| 246 |
gallery = gr.Gallery(
|
| 247 |
label="Pick a LoRA",
|
| 248 |
allow_preview=False,
|
|
@@ -328,7 +335,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 328 |
gr.on(
|
| 329 |
triggers=[run_button.click, prompt.submit],
|
| 330 |
fn=infer_with_lora_wrapper,
|
| 331 |
-
inputs=[input_image, prompt, selected_state, custom_loaded_lora, seed, randomize_seed, guidance_scale, lora_scale, gr_flux_loras],
|
| 332 |
outputs=[result, seed, reuse_button]
|
| 333 |
)
|
| 334 |
|
|
|
|
| 20 |
|
| 21 |
# Load LoRA data (you'll need to create this JSON file or modify to load your LoRAs)
|
| 22 |
|
| 23 |
+
with open("flux_loras-v2.json", "r") as file:
|
| 24 |
data = json.load(file)
|
| 25 |
flux_loras_raw = [
|
| 26 |
{
|
|
|
|
| 30 |
"trigger_word": item.get("trigger_word", ""),
|
| 31 |
"trigger_position": item.get("trigger_position", "prepend"),
|
| 32 |
"weights": item.get("weights", "pytorch_lora_weights.safetensors"),
|
| 33 |
+
"lora_type": item.get("lora_type", "flux"),
|
| 34 |
}
|
| 35 |
for item in data
|
| 36 |
]
|
|
|
|
| 127 |
sorted_gallery = sorted(flux_loras, key=lambda x: x.get("likes", 0), reverse=True)
|
| 128 |
return [(item["image"], item["title"]) for item in sorted_gallery], sorted_gallery
|
| 129 |
|
| 130 |
+
def infer_with_lora_wrapper(input_image, prompt, selected_index, custom_lora, seed=42, randomize_seed=False, guidance_scale=2.5, lora_scale=1.75,portrait_mode=False, flux_loras=None, progress=gr.Progress(track_tqdm=True)):
|
| 131 |
"""Wrapper function to handle state serialization"""
|
| 132 |
return infer_with_lora(input_image, prompt, selected_index, custom_lora, seed, randomize_seed, guidance_scale, lora_scale, flux_loras, progress)
|
| 133 |
|
| 134 |
@spaces.GPU
|
| 135 |
+
def infer_with_lora(input_image, prompt, selected_index, custom_lora, seed=42, randomize_seed=False, guidance_scale=2.5, lora_scale=1.0, portrait_mode=False, flux_loras=None, progress=gr.Progress(track_tqdm=True)):
|
| 136 |
"""Generate image with selected LoRA"""
|
| 137 |
global current_lora, pipe
|
| 138 |
|
|
|
|
| 170 |
input_image = input_image.convert("RGB")
|
| 171 |
# Add trigger word to prompt
|
| 172 |
trigger_word = lora_to_use["trigger_word"]
|
| 173 |
+
is_kontext_lora = lora_to_use["lora_type"] == "kontext"
|
| 174 |
+
if not is_kontext_lora:
|
| 175 |
+
if trigger_word == ", How2Draw":
|
| 176 |
+
prompt = f"create a How2Draw sketch of the person of the photo {prompt}, maintain the facial identity of the person and general features"
|
| 177 |
+
elif trigger_word == ", video game screenshot in the style of THSMS":
|
| 178 |
+
prompt = f"create a video game screenshot in the style of THSMS with the person from the photo, {prompt}. maintain the facial identity of the person and general features"
|
| 179 |
+
else:
|
| 180 |
+
prompt = f"convert the style of this portrait photo to {trigger_word} while maintaining the identity of the person. {prompt}. Make sure to maintain the person's facial identity and features, while still changing the overall style to {trigger_word}."
|
| 181 |
+
else:
|
| 182 |
+
if portrait_mode:
|
| 183 |
+
prompt = f"{trigger_word} while maintaining the identity of the person. {prompt}. Make sure to maintain the person's facial identity and features."
|
| 184 |
+
else:
|
| 185 |
+
prompt = f"{trigger_word}. {prompt}."
|
| 186 |
try:
|
| 187 |
image = pipe(
|
| 188 |
image=input_image,
|
|
|
|
| 249 |
with gr.Column(scale=4, elem_id="box_column"):
|
| 250 |
with gr.Group(elem_id="gallery_box"):
|
| 251 |
input_image = gr.Image(label="Upload a picture of yourself", type="pil", height=300)
|
| 252 |
+
portrait_mode = gr.Checkbox(label="portrait mode", value=True)
|
| 253 |
gallery = gr.Gallery(
|
| 254 |
label="Pick a LoRA",
|
| 255 |
allow_preview=False,
|
|
|
|
| 335 |
gr.on(
|
| 336 |
triggers=[run_button.click, prompt.submit],
|
| 337 |
fn=infer_with_lora_wrapper,
|
| 338 |
+
inputs=[input_image, prompt, selected_state, custom_loaded_lora, seed, randomize_seed, guidance_scale, lora_scale, portrait_mode, gr_flux_loras],
|
| 339 |
outputs=[result, seed, reuse_button]
|
| 340 |
)
|
| 341 |
|