|
import gradio as gr |
|
from PIL import Image |
|
from utils import generate_thumbnail |
|
|
|
def process(image, text, font_size, position, text_color): |
|
result = generate_thumbnail(image, text, font_size=font_size, position=position, text_color=text_color) |
|
return result |
|
|
|
demo = gr.Interface( |
|
fn=process, |
|
inputs=[ |
|
gr.Image(type="pil", label="Upload Background Image"), |
|
gr.Textbox(label="Thumbnail Text"), |
|
gr.Slider(20, 100, step=5, value=60, label="Font Size"), |
|
gr.Radio(["top", "center", "bottom"], label="Text Position", value="bottom"), |
|
gr.ColorPicker(label="Text Color", value="#FFFFFF"), |
|
], |
|
outputs=gr.Image(label="Generated Thumbnail"), |
|
title="🖼️ AI Thumbnail Generator", |
|
description="Upload an image and generate a custom thumbnail with your text.", |
|
allow_flagging="never" |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|
|
from diffusers import StableDiffusionPipeline |
|
import torch |
|
from fastapi import FastAPI |
|
from pydantic import BaseModel |
|
|
|
app = FastAPI() |
|
|
|
class Prompt(BaseModel): |
|
prompt: str |
|
|
|
|
|
pipe = StableDiffusionPipeline.from_pretrained( |
|
"runwayml/stable-diffusion-v1-5", |
|
torch_dtype=torch.float16, |
|
use_safetensors=True, |
|
revision="fp16" |
|
).to("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
@app.post("/generate") |
|
def generate_image(data: Prompt): |
|
image = pipe(data.prompt).images[0] |
|
image.save("output.png") |
|
return {"message": "Image saved as output.png"} |
|
|