|
import gradio as gr |
|
import config |
|
from inference import DiffusionInference |
|
from controlnet_pipeline import ControlNetPipeline |
|
from PIL import Image |
|
import io |
|
|
|
|
|
inference = DiffusionInference() |
|
|
|
|
|
controlnet = ControlNetPipeline() |
|
|
|
def text_to_image_fn(prompt, model, negative_prompt=None, guidance_scale=7.5, num_inference_steps=50, seed=None): |
|
try: |
|
|
|
if not model or model.strip() == '': |
|
model = config.DEFAULT_TEXT2IMG_MODEL |
|
|
|
|
|
seed_value = None |
|
if seed and seed.strip() != '': |
|
try: |
|
seed_value = int(seed) |
|
except (ValueError, TypeError): |
|
|
|
pass |
|
|
|
|
|
kwargs = { |
|
"prompt": prompt, |
|
"model_name": model, |
|
"guidance_scale": guidance_scale, |
|
"num_inference_steps": num_inference_steps, |
|
"seed": seed_value |
|
} |
|
|
|
|
|
if negative_prompt is not None: |
|
kwargs["negative_prompt"] = negative_prompt |
|
|
|
|
|
image = inference.text_to_image(**kwargs) |
|
|
|
if image is None: |
|
return None, "No image was generated. Check the model and parameters." |
|
|
|
return image, None |
|
except Exception as e: |
|
error_msg = f"Error: {str(e)}" |
|
print(error_msg) |
|
return None, error_msg |
|
|
|
def image_to_image_fn(image, prompt, model, use_controlnet=False, negative_prompt=None, guidance_scale=7.5, num_inference_steps=50): |
|
""" |
|
Handle image to image transformation request |
|
""" |
|
if image is None: |
|
return None, "No input image provided." |
|
|
|
|
|
if prompt is None or prompt.strip() == "": |
|
prompt = config.DEFAULT_IMG2IMG_PROMPT |
|
|
|
try: |
|
if use_controlnet: |
|
|
|
result = controlnet.generate( |
|
prompt=prompt, |
|
image=image, |
|
negative_prompt=negative_prompt, |
|
guidance_scale=float(guidance_scale), |
|
num_inference_steps=int(num_inference_steps) |
|
) |
|
return result, None |
|
else: |
|
|
|
if not model or model.strip() == '': |
|
model = config.DEFAULT_IMG2IMG_MODEL |
|
|
|
|
|
result = inference.image_to_image( |
|
image=image, |
|
prompt=prompt, |
|
model_name=model, |
|
negative_prompt=negative_prompt, |
|
guidance_scale=float(guidance_scale) if guidance_scale is not None else None, |
|
num_inference_steps=int(num_inference_steps) if num_inference_steps is not None else None |
|
) |
|
|
|
if result is None: |
|
return None, "No image was generated. Check the model and parameters." |
|
|
|
return result, None |
|
except Exception as e: |
|
error_msg = f"Error: {str(e)}" |
|
print(error_msg) |
|
print(f"Input image type: {type(image)}") |
|
print(f"Prompt: {prompt}") |
|
print(f"Model: {model}") |
|
return None, error_msg |
|
|
|
|
|
with gr.Blocks(title="Diffusion Models") as app: |
|
gr.Markdown("# Hugging Face Diffusion Models") |
|
|
|
with gr.Tab("Text to Image"): |
|
with gr.Row(): |
|
with gr.Column(): |
|
txt2img_prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...", value=config.DEFAULT_TEXT2IMG_PROMPT) |
|
txt2img_negative = gr.Textbox(label="Negative Prompt (Optional)", placeholder="What to exclude from the image", value=config.DEFAULT_NEGATIVE_PROMPT) |
|
txt2img_model = gr.Textbox(label="Model", placeholder=f"Enter model name", value=config.DEFAULT_TEXT2IMG_MODEL) |
|
txt2img_guidance = gr.Slider(minimum=1.0, maximum=20.0, value=7.5, step=0.5, label="Guidance Scale") |
|
txt2img_steps = gr.Slider(minimum=10, maximum=100, value=50, step=1, label="Inference Steps") |
|
txt2img_seed = gr.Textbox(label="Seed (Optional)", placeholder="Leave empty for random seed", value="") |
|
txt2img_button = gr.Button("Generate Image") |
|
|
|
with gr.Column(): |
|
txt2img_output = gr.Image(type="pil", label="Generated Image") |
|
txt2img_error = gr.Textbox(label="Error", visible=True) |
|
|
|
txt2img_button.click( |
|
fn=text_to_image_fn, |
|
inputs=[txt2img_prompt, txt2img_model, txt2img_negative, txt2img_guidance, txt2img_steps, txt2img_seed], |
|
outputs=[txt2img_output, txt2img_error] |
|
) |
|
|
|
with gr.Tab("Image to Image"): |
|
with gr.Row(): |
|
with gr.Column(): |
|
img2img_input = gr.Image(type="pil", label="Input Image") |
|
img2img_prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...", value=config.DEFAULT_IMG2IMG_PROMPT) |
|
img2img_negative = gr.Textbox(label="Negative Prompt (Optional)", placeholder="What to exclude from the image", value=config.DEFAULT_NEGATIVE_PROMPT) |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
img2img_controlnet = gr.Checkbox(label="Use ControlNet (Depth)", value=False) |
|
with gr.Column(scale=2): |
|
img2img_model = gr.Textbox(label="Model (used only if ControlNet is disabled)", placeholder=f"Enter model name", value=config.DEFAULT_IMG2IMG_MODEL, visible=True) |
|
|
|
img2img_guidance = gr.Slider(minimum=1.0, maximum=20.0, value=7.5, step=0.5, label="Guidance Scale") |
|
img2img_steps = gr.Slider(minimum=10, maximum=100, value=50, step=1, label="Inference Steps") |
|
img2img_button = gr.Button("Transform Image") |
|
|
|
with gr.Column(): |
|
img2img_output = gr.Image(type="pil", label="Generated Image") |
|
img2img_error = gr.Textbox(label="Error", visible=True) |
|
|
|
img2img_button.click( |
|
fn=image_to_image_fn, |
|
inputs=[img2img_input, img2img_prompt, img2img_model, img2img_controlnet, img2img_negative, img2img_guidance, img2img_steps], |
|
outputs=[img2img_output, img2img_error] |
|
) |
|
|
|
|
|
def toggle_model_visibility(use_controlnet): |
|
return not use_controlnet |
|
|
|
img2img_controlnet.change( |
|
fn=toggle_model_visibility, |
|
inputs=[img2img_controlnet], |
|
outputs=[img2img_model] |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
app.launch(server_name=config.GRADIO_HOST, server_port=config.GRADIO_PORT) |
|
|