File size: 5,316 Bytes
8247a04
 
 
 
 
 
 
 
 
 
 
 
 
 
fc21604
 
8247a04
 
 
 
 
 
 
 
 
 
 
7ddc847
 
 
8247a04
 
7ddc847
 
 
8247a04
 
 
 
 
7ddc847
 
 
fc21604
 
7ddc847
 
fc21604
 
 
7ddc847
8247a04
7ddc847
8247a04
 
879971e
8247a04
879971e
 
 
8247a04
 
7ddc847
 
 
8247a04
 
7ddc847
 
 
 
 
 
8247a04
 
 
 
 
 
 
 
fc21604
 
 
8247a04
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fc21604
 
 
8247a04
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import gradio as gr
import config
from inference import DiffusionInference
from PIL import Image
import io

# Initialize the inference class
inference = DiffusionInference()

def text_to_image_fn(prompt, model, negative_prompt=None, guidance_scale=7.5, num_inference_steps=50):
    """
    Handle text to image generation request
    """
    try:
        # Model validation - fallback to default if empty
        if not model or model.strip() == '':
            model = config.DEFAULT_TEXT2IMG_MODEL
            
        # Call the inference module
        image = inference.text_to_image(
            prompt=prompt,
            model_name=model,
            negative_prompt=negative_prompt,
            guidance_scale=guidance_scale,
            num_inference_steps=num_inference_steps
        )
        
        if image is None:
            return None, "No image was generated. Check the model and parameters."
        
        return image, None
    except Exception as e:
        error_msg = f"Error: {str(e)}"
        print(error_msg)
        return None, error_msg

def image_to_image_fn(image, prompt, model, negative_prompt=None, guidance_scale=7.5, num_inference_steps=50):
    """
    Handle image to image transformation request
    """
    if image is None:
        return None, "No input image provided."
    
    # Model validation - fallback to default if empty
    if not model or model.strip() == '':
        model = config.DEFAULT_IMG2IMG_MODEL
    
    # Handle empty prompt - use default if completely empty
    if prompt is None or prompt.strip() == "":
        prompt = config.DEFAULT_IMG2IMG_PROMPT
    
    try:
        # Call the inference module with explicit parameters
        result = inference.image_to_image(
            image=image,
            prompt=prompt,  # This can be None
            model_name=model,
            negative_prompt=negative_prompt,
            guidance_scale=float(guidance_scale) if guidance_scale is not None else None,
            num_inference_steps=int(num_inference_steps) if num_inference_steps is not None else None
        )
        
        if result is None:
            return None, "No image was generated. Check the model and parameters."
        
        return result, None
    except Exception as e:
        error_msg = f"Error: {str(e)}"
        print(error_msg)
        print(f"Input image type: {type(image)}")
        print(f"Prompt: {prompt}")
        print(f"Model: {model}")
        return None, error_msg

# Create Gradio UI
with gr.Blocks(title="Diffusion Models") as app:
    gr.Markdown("# Hugging Face Diffusion Models")
    
    with gr.Tab("Text to Image"):
        with gr.Row():
            with gr.Column():
                txt2img_prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...", value=config.DEFAULT_TEXT2IMG_PROMPT)
                txt2img_negative = gr.Textbox(label="Negative Prompt (Optional)", placeholder="What to exclude from the image", value=config.DEFAULT_NEGATIVE_PROMPT)
                txt2img_model = gr.Textbox(label="Model", placeholder=f"Enter model name", value=config.DEFAULT_TEXT2IMG_MODEL)
                txt2img_guidance = gr.Slider(minimum=1.0, maximum=20.0, value=7.5, step=0.5, label="Guidance Scale")
                txt2img_steps = gr.Slider(minimum=10, maximum=100, value=50, step=1, label="Inference Steps")
                txt2img_button = gr.Button("Generate Image")
            
            with gr.Column():
                txt2img_output = gr.Image(type="pil", label="Generated Image")
                txt2img_error = gr.Textbox(label="Error", visible=True)
        
        txt2img_button.click(
            fn=text_to_image_fn,
            inputs=[txt2img_prompt, txt2img_model, txt2img_negative, txt2img_guidance, txt2img_steps],
            outputs=[txt2img_output, txt2img_error]
        )
    
    with gr.Tab("Image to Image"):
        with gr.Row():
            with gr.Column():
                img2img_input = gr.Image(type="pil", label="Input Image")
                img2img_prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...", value=config.DEFAULT_IMG2IMG_PROMPT)
                img2img_negative = gr.Textbox(label="Negative Prompt (Optional)", placeholder="What to exclude from the image", value=config.DEFAULT_NEGATIVE_PROMPT)
                img2img_model = gr.Textbox(label="Model", placeholder=f"Enter model name", value=config.DEFAULT_IMG2IMG_MODEL)
                img2img_guidance = gr.Slider(minimum=1.0, maximum=20.0, value=7.5, step=0.5, label="Guidance Scale")
                img2img_steps = gr.Slider(minimum=10, maximum=100, value=50, step=1, label="Inference Steps")
                img2img_button = gr.Button("Transform Image")
            
            with gr.Column():
                img2img_output = gr.Image(type="pil", label="Generated Image")
                img2img_error = gr.Textbox(label="Error", visible=True)
        
        img2img_button.click(
            fn=image_to_image_fn,
            inputs=[img2img_input, img2img_prompt, img2img_model, img2img_negative, img2img_guidance, img2img_steps],
            outputs=[img2img_output, img2img_error]
        )

# Launch the Gradio app
if __name__ == "__main__":
    app.launch(server_name=config.GRADIO_HOST, server_port=config.GRADIO_PORT)