Spaces:
Sleeping
Sleeping
import gradio as gr | |
import spaces | |
from diffusers import FluxPipeline | |
import torch | |
# Replace with your actual model and LoRA paths | |
BASE_MODEL = "black-forest-labs/FLUX-1-dev" | |
LORA_MODEL = "MegaTronX/MetartLoRA" # Replace with your repo | |
# Load base model pipeline | |
pipe = FluxPipeline.from_pretrained(BASE_MODEL, torch_dtype=torch.float16) | |
# Load LoRA weights | |
pipe.unet.load_attn_procs(LORA_MODEL) | |
pipe = pipe.to("cuda") | |
def generate_image(prompt, num_inference_steps=25, guidance_scale=7.5, seed=None): | |
"""Generates an image using the FLUX.1-dev LoRA model.""" | |
generator = torch.Generator("cuda").manual_seed(seed) if seed else None | |
image = pipe( | |
prompt, | |
num_inference_steps=num_inference_steps, | |
guidance_scale=guidance_scale, | |
generator=generator, | |
).images[0] | |
return image | |
# Gradio Interface | |
iface = gr.Interface( | |
fn=generate_image, | |
inputs=[ | |
gr.Textbox(lines=3, label="Prompt"), | |
gr.Slider(minimum=10, maximum=100, value=25, label="Inference Steps"), | |
gr.Slider(minimum=1, maximum=15, value=7.5, label="Guidance Scale"), | |
gr.Number(label="Seed (Optional)"), | |
], | |
outputs=gr.Image(label="Generated Image"), | |
title="FLUX.1-dev LoRA Demo", | |
description="A demo of your FLUX.1-dev LoRA model.", | |
) | |
iface.launch() |