FLUX-REALISM / app.py
prithivMLmods's picture
Update app.py
571721c verified
import spaces
import gradio as gr
import torch
from PIL import Image
from diffusers import DiffusionPipeline
import random
import uuid
from typing import Tuple
import numpy as np
DESCRIPTION = """##
"""
# Function to save an image with a unique name
def save_image(img):
unique_name = str(uuid.uuid4()) + ".png"
img.save(unique_name)
return unique_name
# Function to handle seed randomization
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
if randomize_seed:
seed = random.randint(0, MAX_SEED)
return seed
# Maximum seed value for 32-bit integer
MAX_SEED = np.iinfo(np.int32).max
# Load the diffusion model
base_model = "black-forest-labs/FLUX.1-dev"
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
lora_repo = "strangerzonehf/Flux-Super-Realism-LoRA"
trigger_word = "Super Realism" # Leave blank if not used
pipe.load_lora_weights(lora_repo)
pipe.to("cuda")
# Define style options with negative prompts
style_list = [
{
"name": "3840 x 2160",
"prompt": "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
"negative_prompt": "",
},
{
"name": "2560 x 1440",
"prompt": "hyper-realistic 4K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
"negative_prompt": "",
},
{
"name": "HD+",
"prompt": "hyper-realistic 2K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
"negative_prompt": "",
},
{
"name": "Style Zero",
"prompt": "{prompt}",
"negative_prompt": "",
},
]
styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
DEFAULT_STYLE_NAME = "3840 x 2160"
STYLE_NAMES = list(styles.keys())
# Apply selected style to the prompt
def apply_style(style_name: str, positive: str) -> Tuple[str, str]:
p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
return p.replace("{prompt}", positive), n
# Image generation function with Spaces GPU support
@spaces.GPU(duration=60, enable_queue=True)
def generate(
prompt: str,
negative_prompt: str = "",
use_negative_prompt: bool = False,
seed: int = 0,
width: int = 1024,
height: int = 1024,
guidance_scale: float = 3,
randomize_seed: bool = False,
style_name: str = DEFAULT_STYLE_NAME,
num_inference_steps: int = 30,
progress=gr.Progress(track_tqdm=True),
):
positive_prompt, style_negative_prompt = apply_style(style_name, prompt)
if use_negative_prompt:
final_negative_prompt = style_negative_prompt + " " + negative_prompt
else:
final_negative_prompt = style_negative_prompt
final_negative_prompt = final_negative_prompt.strip()
if trigger_word:
positive_prompt = f"{trigger_word} {positive_prompt}"
seed = int(randomize_seed_fn(seed, randomize_seed))
generator = torch.Generator(device="cuda").manual_seed(seed)
images = pipe(
prompt=positive_prompt,
negative_prompt=final_negative_prompt if final_negative_prompt else None,
width=width,
height=height,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
num_images_per_prompt=1,
generator=generator,
output_type="pil",
).images
image_paths = [save_image(img) for img in images]
return image_paths, seed
# Example prompts
examples = [
"Super Realism, High-resolution photograph, woman, UHD, photorealistic, shot on a Sony A7III --chaos 20 --ar 1:2 --style raw --stylize 250",
"Woman in a red jacket, snowy, in the style of hyper-realistic portraiture, caninecore, mountainous vistas, timeless beauty, palewave, iconic, distinctive noses --ar 72:101 --stylize 750 --v 6",
"Super Realism, Headshot of handsome young man, wearing dark gray sweater with buttons and big shawl collar, brown hair and short beard, serious look on his face, black background, soft studio lighting, portrait photography --ar 85:128 --v 6.0 --style",
"Super-realism, Purple Dreamy, a medium-angle shot of a young woman with long brown hair, wearing a pair of eye-level glasses, stands in front of a backdrop of purple and white lights. The womans eyes are closed, her lips are slightly parted, as if she is looking up at the sky. Her hair is cascading over her shoulders, framing her face. She is wearing a sleeveless top, adorned with tiny white dots, and a gold chain necklace around her neck. Her left earrings are dangling from her ears, adding a pop of color to the scene."
]
# CSS to center the UI and style components
css = '''
.gradio-container {
max-width: 590px !important;
margin: 0 auto !important;
}
h1 {
text-align: center;
}
footer {
visibility: hidden;
}
'''
# Gradio interface
with gr.Blocks(css=css) as demo:
gr.Markdown(DESCRIPTION)
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0, variant="primary")
result = gr.Gallery(label="Result", columns=1, show_label=False, preview=True)
with gr.Accordion("Advanced options", open=False):
style_selection = gr.Dropdown(
label="Quality Style",
choices=STYLE_NAMES,
value=DEFAULT_STYLE_NAME,
interactive=True,
)
use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
visible=False,
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=512,
maximum=2048,
step=64,
value=1280,
)
height = gr.Slider(
label="Height",
minimum=512,
maximum=2048,
step=64,
value=832,
)
guidance_scale = gr.Slider(
label="Guidance Scale",
minimum=0.1,
maximum=20.0,
step=0.1,
value=3.0,
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=40,
step=1,
value=30,
)
gr.Examples(
examples=examples,
inputs=prompt,
outputs=[result, seed],
fn=generate,
cache_examples=False,
)
# Handle visibility of negative prompt
use_negative_prompt.change(
fn=lambda x: gr.update(visible=x),
inputs=use_negative_prompt,
outputs=negative_prompt,
api_name=False,
)
# Trigger generate on prompt submit or run button click
gr.on(
triggers=[
prompt.submit,
run_button.click,
],
fn=generate,
inputs=[
prompt,
negative_prompt,
use_negative_prompt,
seed,
width,
height,
guidance_scale,
randomize_seed,
style_selection,
num_inference_steps,
],
outputs=[result, seed],
api_name="run",
)
if __name__ == "__main__":
demo.queue(max_size=40).launch(ssr_mode=False)