adpro's picture
Duplicate from adpro/Stable-Diffusion-Side-by-Side
db3636d
import grequests
import os
import gradio as gr
import numpy as np
import random
import torch
import subprocess
import time
import json
import base64
from io import BytesIO
from PIL import Image
url = "http://54.91.63.201:80"
print('=='*20)
print(os.system("hostname -i"))
def url_requests(req_list,sizeImg):
img_list = []
res_list = grequests.map(req_list)
for resp in res_list:
img_str = json.loads(resp.text)["img_str"]
print("location: ", json.loads(resp.text)["ip"])
img_byte = base64.b64decode(img_str)
img_io = BytesIO(img_byte) # convert image to file-like object
img = Image.open(img_io) # img is now PIL Image object
_img = img.resize((sizeImg))
img_list.append(_img)
return img_list
def img2img_generate(source_img, prompt, steps=25, strength=0.25, seed=42, guidance_scale=15):
# cpu info
# print(subprocess.check_output(["cat /proc/cpuinfo | grep 'model name' |uniq"], stderr=subprocess.STDOUT).decode("utf8"))
print('=*'*20)
print(type(source_img))
print("prompt: ", prompt)
buffered = BytesIO()
source_img.save(buffered, format="JPEG")
print(source_img.size)
img_b64 = base64.b64encode(buffered.getvalue())
data = {"source_img": img_b64.decode(), "prompt": prompt, "steps": steps,
"guidance_scale": guidance_scale, "seed": seed, "strength": strength}
start_time = time.time()
req_list = [
grequests.post(url, data=json.dumps(data)),
grequests.post(url, data=json.dumps(data)),
]
img_list = url_requests(req_list,source_img.size)
return img_list
def txt2img_generate(prompt, steps=25, seed=42, guidance_scale=7.5):
# cpu info
# print(subprocess.check_output(["cat /proc/cpuinfo | grep 'model name' |uniq"], stderr=subprocess.STDOUT).decode("utf8"))
print("prompt: ", prompt)
print("steps: ", steps)
data = {"prompt": prompt,
"steps": steps, "guidance_scale": guidance_scale, "seed": seed}
start_time = time.time()
req_list = [
grequests.post(url, data=json.dumps(data)),
grequests.post(url, data=json.dumps(data)),
]
img_list = url_requests(req_list)
return img_list
md = '''
'''
css = '''
.instruction{position: absolute; top: 0;right: 0;margin-top: 0px !important}
.arrow{position: absolute;top: 0;right: -110px;margin-top: -8px !important}
#component-4, #component-3, #component-10{min-height: 0}
.duplicate-button img{margin: 0}
img_1{height:5rem}
img_2{height:15rem}
img_3{height:15rem}
img_4{height:15rem}
img_5{height:15rem}
img_6{height:15rem}
'''
random_seed = random.randint(0, 2147483647)
with gr.Blocks(css=css) as demo:
gr.Markdown("# Stable Diffusion Inference Demo Side-by-Side")
gr.Markdown(md)
with gr.Tab("Text-to-Image"):
with gr.Row() as text_to_image:
with gr.Column():
prompt = gr.inputs.Textbox(label='Prompt', default='a photo of an astronaut riding a horse on mars')
inference_steps = gr.inputs.Slider(1, 100, label='Inference Steps - increase the steps for better quality (e.g., avoiding black image) ', default=20, step=1)
seed = gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1)
guidance_scale = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1)
txt2img_button = gr.Button("Generate Image")
with gr.Column():
result_image_1 = gr.Image(label="4th Gen Intel Xeon Scalable Processors (SPR)").style(height='1', rounded=True)
result_image_2 = gr.Image(label="3rd Gen Intel Xeon Scalable Processors (ICX)").style(height='100', rounded=False)
with gr.Tab("Image-to-Image text-guided generation"):
with gr.Row() as image_to_image:
with gr.Column():
source_img = gr.Image(source="upload", type="pil", value="https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg")
prompt_2 = gr.inputs.Textbox(label='Prompt', default='A fantasy landscape, trending on artstation')
inference_steps_2 = gr.inputs.Slider(1, 100, label='Inference Steps - increase the steps for better quality (e.g., avoiding black image) ', default=20, step=1)
seed_2 = gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1)
guidance_scale_2 = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1)
strength = gr.inputs.Slider(0.0, 1.0, label='Strength - adding more noise to it the larger the strength', default=0.25, step=0.01)
img2img_button = gr.Button("Generate Image")
with gr.Column():
result_image_3 = gr.Image(label="Result01", elem_id="img_1")
result_image_4 = gr.Image(label="Result02", elem_id="img_1")
txt2img_button.click(fn=txt2img_generate, inputs=[prompt, inference_steps, seed, guidance_scale], outputs=[result_image_1, result_image_2], queue=False)
img2img_button.click(fn=img2img_generate, inputs=[source_img, prompt_2, inference_steps_2, strength, seed_2, guidance_scale_2], outputs=[result_image_3, result_image_4], queue=False)
demo.queue(default_enabled=False).launch(debug=True)