File size: 5,525 Bytes
db3636d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import grequests
import os
import gradio as gr
import numpy as np
import random
import torch
import subprocess
import time
import json
import base64
from io import BytesIO
from PIL import Image


url = "http://54.91.63.201:80"

print('=='*20)
print(os.system("hostname -i"))

def url_requests(req_list,sizeImg):
    img_list = []
    res_list = grequests.map(req_list)
    for resp in res_list:
        img_str = json.loads(resp.text)["img_str"]
        print("location: ", json.loads(resp.text)["ip"])

        img_byte = base64.b64decode(img_str)
        img_io = BytesIO(img_byte)  # convert image to file-like object
        img = Image.open(img_io)   # img is now PIL Image object
        _img = img.resize((sizeImg))
        img_list.append(_img)
        
    return img_list


def img2img_generate(source_img, prompt, steps=25, strength=0.25, seed=42, guidance_scale=15):
    # cpu info
    # print(subprocess.check_output(["cat /proc/cpuinfo | grep 'model name' |uniq"], stderr=subprocess.STDOUT).decode("utf8"))
    print('=*'*20)
    print(type(source_img))
    print("prompt: ", prompt)
    buffered = BytesIO()
    
    source_img.save(buffered, format="JPEG")
    print(source_img.size)
    img_b64 = base64.b64encode(buffered.getvalue())

    data = {"source_img": img_b64.decode(), "prompt": prompt, "steps": steps,
            "guidance_scale": guidance_scale, "seed": seed, "strength": strength}

    start_time = time.time()
    req_list = [ 
        grequests.post(url, data=json.dumps(data)),
        grequests.post(url, data=json.dumps(data)),
    ]
    img_list = url_requests(req_list,source_img.size)
    
    return img_list


def txt2img_generate(prompt, steps=25, seed=42, guidance_scale=7.5):
    # cpu info
    # print(subprocess.check_output(["cat /proc/cpuinfo | grep 'model name' |uniq"], stderr=subprocess.STDOUT).decode("utf8"))
    print("prompt: ", prompt)
    print("steps: ", steps)
    data = {"prompt": prompt,
        "steps": steps, "guidance_scale": guidance_scale, "seed": seed}
    start_time = time.time()
    req_list = [ 
        grequests.post(url, data=json.dumps(data)),
        grequests.post(url, data=json.dumps(data)),
    ]
    img_list = url_requests(req_list)

    return img_list
md = '''
'''

css = '''
    .instruction{position: absolute; top: 0;right: 0;margin-top: 0px !important}
    .arrow{position: absolute;top: 0;right: -110px;margin-top: -8px !important}
    #component-4, #component-3, #component-10{min-height: 0}
    .duplicate-button img{margin: 0}
    img_1{height:5rem}
    img_2{height:15rem}
    img_3{height:15rem}
    img_4{height:15rem}
    img_5{height:15rem}
    img_6{height:15rem}
'''

random_seed = random.randint(0, 2147483647)

with gr.Blocks(css=css) as demo:
    gr.Markdown("# Stable Diffusion Inference Demo Side-by-Side")
    gr.Markdown(md)

    with gr.Tab("Text-to-Image"):
        with gr.Row() as text_to_image:
            with gr.Column():
                prompt = gr.inputs.Textbox(label='Prompt', default='a photo of an astronaut riding a horse on mars')
                inference_steps = gr.inputs.Slider(1, 100, label='Inference Steps - increase the steps for better quality (e.g., avoiding black image) ', default=20, step=1)
                seed = gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1)
                guidance_scale = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1)
                txt2img_button = gr.Button("Generate Image")

            with gr.Column():
                result_image_1 = gr.Image(label="4th Gen Intel Xeon Scalable Processors (SPR)").style(height='1', rounded=True)
                result_image_2 = gr.Image(label="3rd Gen Intel Xeon Scalable Processors (ICX)").style(height='100', rounded=False)

    with gr.Tab("Image-to-Image text-guided generation"):
        with gr.Row() as image_to_image:
            with gr.Column():
                source_img = gr.Image(source="upload", type="pil", value="https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg")
                prompt_2 = gr.inputs.Textbox(label='Prompt', default='A fantasy landscape, trending on artstation')
                inference_steps_2 = gr.inputs.Slider(1, 100, label='Inference Steps - increase the steps for better quality (e.g., avoiding black image) ', default=20, step=1)
                seed_2 = gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1)
                guidance_scale_2 = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1)
                strength = gr.inputs.Slider(0.0, 1.0, label='Strength - adding more noise to it the larger the strength', default=0.25, step=0.01)
                img2img_button = gr.Button("Generate Image")

            with gr.Column():
                result_image_3 = gr.Image(label="Result01", elem_id="img_1")
                result_image_4 = gr.Image(label="Result02", elem_id="img_1")
                
    txt2img_button.click(fn=txt2img_generate, inputs=[prompt, inference_steps, seed, guidance_scale], outputs=[result_image_1, result_image_2], queue=False)
    img2img_button.click(fn=img2img_generate, inputs=[source_img, prompt_2, inference_steps_2, strength, seed_2, guidance_scale_2], outputs=[result_image_3, result_image_4], queue=False)
   
demo.queue(default_enabled=False).launch(debug=True)