File size: 1,633 Bytes
336f84b 91ebbc0 42cdd46 91ebbc0 a317b2d 91ebbc0 42cdd46 91ebbc0 42cdd46 91ebbc0 42cdd46 91ebbc0 42cdd46 91ebbc0 336f84b 42cdd46 91ebbc0 42cdd46 336f84b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import inspect
print(inspect.signature(ShapEImg2ImgPipeline.__call__))
import gradio as gr
import torch
import math
from PIL import Image
from diffusers import ShapEImg2ImgPipeline
# Set device
device = "cuda" if torch.cuda.is_available() else "cpu"
# Load the pipeline
pipe = ShapEImg2ImgPipeline.from_pretrained("openai/shap-e-img2img", torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32)
pipe = pipe.to(device)
# Resize function
def resize(value, img_path):
img = Image.open(img_path)
img = img.resize((value, value))
return img
# Inference function
def infer(source_img, prompt, steps, seed, strength):
generator = torch.Generator(device).manual_seed(seed)
if int(steps * strength) < 1:
steps = math.ceil(1 / max(0.10, strength))
source_image = resize(512, source_img)
source_image.save('source.png')
# Adjust this line based on the correct method signature
result = pipe(images=source_image, strength=strength, guidance_scale=7.5, num_inference_steps=steps, generator=generator)
return result.images[0]
# Gradio interface
gr.Interface(
fn=infer,
inputs=[
gr.Image(sources=["upload", "webcam", "clipboard"], type="filepath", label="Raw Image"),
gr.Textbox(label='Creative Touch (prompt)'),
gr.Slider(1, 50, value=25, step=1, label='Number of Iterations'),
gr.Slider(minimum=0, maximum=987654321987654321, step=1, randomize=True, label='Seed'),
gr.Slider(minimum=0.1, maximum=1, step=0.05, value=0.5, label='Strength')
],
outputs='image',
title="Creative Touch"
).queue(max_size=10).launch()
|