|
import inspect |
|
print(inspect.signature(ShapEImg2ImgPipeline.__call__)) |
|
import gradio as gr |
|
import torch |
|
import math |
|
from PIL import Image |
|
from diffusers import ShapEImg2ImgPipeline |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
|
|
pipe = ShapEImg2ImgPipeline.from_pretrained("openai/shap-e-img2img", torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32) |
|
pipe = pipe.to(device) |
|
|
|
|
|
def resize(value, img_path): |
|
img = Image.open(img_path) |
|
img = img.resize((value, value)) |
|
return img |
|
|
|
|
|
def infer(source_img, prompt, steps, seed, strength): |
|
generator = torch.Generator(device).manual_seed(seed) |
|
if int(steps * strength) < 1: |
|
steps = math.ceil(1 / max(0.10, strength)) |
|
source_image = resize(512, source_img) |
|
source_image.save('source.png') |
|
|
|
result = pipe(images=source_image, strength=strength, guidance_scale=7.5, num_inference_steps=steps, generator=generator) |
|
return result.images[0] |
|
|
|
|
|
gr.Interface( |
|
fn=infer, |
|
inputs=[ |
|
gr.Image(sources=["upload", "webcam", "clipboard"], type="filepath", label="Raw Image"), |
|
gr.Textbox(label='Creative Touch (prompt)'), |
|
gr.Slider(1, 50, value=25, step=1, label='Number of Iterations'), |
|
gr.Slider(minimum=0, maximum=987654321987654321, step=1, randomize=True, label='Seed'), |
|
gr.Slider(minimum=0.1, maximum=1, step=0.05, value=0.5, label='Strength') |
|
], |
|
outputs='image', |
|
title="Creative Touch" |
|
).queue(max_size=10).launch() |
|
|