Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -7,30 +7,17 @@ import subprocess
|
|
7 |
import time
|
8 |
import requests
|
9 |
import json
|
10 |
-
import threading
|
11 |
|
12 |
import base64
|
13 |
from io import BytesIO
|
14 |
from PIL import Image
|
15 |
|
16 |
-
|
17 |
-
url_ICX = "http://54.221.56.4:80"
|
18 |
|
19 |
print('=='*20)
|
20 |
print(os.system("hostname -i"))
|
21 |
|
22 |
-
|
23 |
-
def url_requests(url, data):
|
24 |
-
resp = requests.post(url, data=json.dumps(data))
|
25 |
-
img_str = json.loads(resp.text)["img_str"]
|
26 |
-
|
27 |
-
img_byte = base64.b64decode(img_str)
|
28 |
-
img_io = BytesIO(img_byte) # convert image to file-like object
|
29 |
-
img = Image.open(img_io) # img is now PIL Image object
|
30 |
-
|
31 |
-
return img
|
32 |
-
|
33 |
-
def img2img_generate(url, source_img, prompt, steps=25, strength=0.75, seed=42, guidance_scale=7.5):
|
34 |
# cpu info
|
35 |
# print(subprocess.check_output(["cat /proc/cpuinfo | grep 'model name' |uniq"], stderr=subprocess.STDOUT).decode("utf8"))
|
36 |
print('=*'*20)
|
@@ -45,12 +32,18 @@ def img2img_generate(url, source_img, prompt, steps=25, strength=0.75, seed=42,
|
|
45 |
"token": os.environ["access_token"]}
|
46 |
|
47 |
start_time = time.time()
|
48 |
-
|
|
|
|
|
49 |
|
|
|
|
|
|
|
|
|
50 |
return img
|
51 |
|
52 |
|
53 |
-
def txt2img_generate(
|
54 |
# cpu info
|
55 |
# print(subprocess.check_output(["cat /proc/cpuinfo | grep 'model name' |uniq"], stderr=subprocess.STDOUT).decode("utf8"))
|
56 |
print("prompt: ", prompt)
|
@@ -59,12 +52,20 @@ def txt2img_generate(url, prompt, steps=25, seed=42, guidance_scale=7.5):
|
|
59 |
"steps": steps, "guidance_scale": guidance_scale, "seed": seed,
|
60 |
"token": os.environ["access_token"]}
|
61 |
start_time = time.time()
|
62 |
-
|
63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
return img
|
65 |
|
66 |
md = """
|
67 |
-
This demo shows the accelerated inference performance of a Stable Diffusion model on **4th Gen Intel Xeon Scalable Processors (Sapphire Rapids)**
|
|
|
68 |
"""
|
69 |
|
70 |
css = '''
|
@@ -72,13 +73,12 @@ css = '''
|
|
72 |
.arrow{position: absolute;top: 0;right: -110px;margin-top: -8px !important}
|
73 |
#component-4, #component-3, #component-10{min-height: 0}
|
74 |
.duplicate-button img{margin: 0}
|
75 |
-
#img_1, #img_2, #img_3, #img_4{height:15rem}
|
76 |
'''
|
77 |
|
78 |
random_seed = random.randint(0, 2147483647)
|
79 |
|
80 |
with gr.Blocks(css=css) as demo:
|
81 |
-
gr.Markdown("# Stable Diffusion Inference Demo
|
82 |
gr.Markdown(md)
|
83 |
|
84 |
with gr.Tab("Text-to-Image"):
|
@@ -89,13 +89,11 @@ with gr.Blocks(css=css) as demo:
|
|
89 |
seed = gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1)
|
90 |
guidance_scale = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1)
|
91 |
txt2img_button = gr.Button("Generate Image")
|
92 |
-
url_SPR_txt = gr.Textbox(label='url_SPR_txt', value="http://34.229.166.42:80", visible=False)
|
93 |
-
url_CLX_txt = gr.Textbox(label='url_CLX_txt', value="http://54.221.56.4:80", visible=False)
|
94 |
|
95 |
with gr.Column():
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
with gr.Tab("Image-to-Image text-guided generation"):
|
100 |
with gr.Row() as image_to_image:
|
101 |
with gr.Column():
|
@@ -106,17 +104,13 @@ with gr.Blocks(css=css) as demo:
|
|
106 |
guidance_scale_2 = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1)
|
107 |
strength = gr.inputs.Slider(0.0, 1.0, label='Strength - adding more noise to it the larger the strength', default=0.75, step=0.01)
|
108 |
img2img_button = gr.Button("Generate Image")
|
109 |
-
url_SPR = gr.Textbox(label='url_SPR', value="http://34.229.166.42:80", visible=False)
|
110 |
-
url_CLX = gr.Textbox(label='url_CLX', value="http://54.221.56.4:80", visible=False)
|
111 |
|
112 |
-
|
113 |
with gr.Column():
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
txt2img_button.click(fn=txt2img_generate, inputs=[
|
118 |
-
|
119 |
-
|
120 |
-
img2img_button.click(fn=img2img_generate, inputs=[url_CLX, source_img, prompt_2, inference_steps_2, strength, seed_2, guidance_scale_2], outputs=result_image_4, queue=False)
|
121 |
|
122 |
demo.queue(default_enabled=False).launch(debug=True)
|
|
|
7 |
import time
|
8 |
import requests
|
9 |
import json
|
|
|
10 |
|
11 |
import base64
|
12 |
from io import BytesIO
|
13 |
from PIL import Image
|
14 |
|
15 |
+
url = "http://54.91.63.201:80"
|
|
|
16 |
|
17 |
print('=='*20)
|
18 |
print(os.system("hostname -i"))
|
19 |
|
20 |
+
def img2img_generate(source_img, prompt, steps=25, strength=0.75, seed=42, guidance_scale=7.5):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
# cpu info
|
22 |
# print(subprocess.check_output(["cat /proc/cpuinfo | grep 'model name' |uniq"], stderr=subprocess.STDOUT).decode("utf8"))
|
23 |
print('=*'*20)
|
|
|
32 |
"token": os.environ["access_token"]}
|
33 |
|
34 |
start_time = time.time()
|
35 |
+
resp = requests.post(url, data=json.dumps(data))
|
36 |
+
img_str = json.loads(resp.text)["img_str"]
|
37 |
+
print("location: ", json.loads(resp.text)["ip"])
|
38 |
|
39 |
+
img_byte = base64.b64decode(img_str)
|
40 |
+
img_io = BytesIO(img_byte) # convert image to file-like object
|
41 |
+
img = Image.open(img_io) # img is now PIL Image object
|
42 |
+
print("cost: ", time.time() - start_time)
|
43 |
return img
|
44 |
|
45 |
|
46 |
+
def txt2img_generate(prompt, steps=25, seed=42, guidance_scale=7.5):
|
47 |
# cpu info
|
48 |
# print(subprocess.check_output(["cat /proc/cpuinfo | grep 'model name' |uniq"], stderr=subprocess.STDOUT).decode("utf8"))
|
49 |
print("prompt: ", prompt)
|
|
|
52 |
"steps": steps, "guidance_scale": guidance_scale, "seed": seed,
|
53 |
"token": os.environ["access_token"]}
|
54 |
start_time = time.time()
|
55 |
+
resp = requests.post(url, data=json.dumps(data))
|
56 |
|
57 |
+
img_str = json.loads(resp.text)["img_str"]
|
58 |
+
print("location: ", json.loads(resp.text)["ip"])
|
59 |
+
|
60 |
+
img_byte = base64.b64decode(img_str)
|
61 |
+
img_io = BytesIO(img_byte) # convert image to file-like object
|
62 |
+
img = Image.open(img_io) # img is now PIL Image object
|
63 |
+
print("cost: ", time.time() - start_time)
|
64 |
return img
|
65 |
|
66 |
md = """
|
67 |
+
This demo shows the accelerated inference performance of a Stable Diffusion model on **4th Gen Intel Xeon Scalable Processors (Sapphire Rapids)** on Amazon Web Services. Try it and generate photorealistic images from text!
|
68 |
+
You may also want to try creating your own Stable Diffusion model with few-shot fine-tuning. Please refer to our <a href=\"https://medium.com/intel-analytics-software/personalized-stable-diffusion-with-few-shot-fine-tuning-on-a-single-cpu-f01a3316b13\">blog</a> and <a href=\"https://github.com/intel/neural-compressor/tree/master/examples/pytorch/diffusion_model/diffusers/textual_inversion\">code</a> available in <a href=\"https://github.com/intel/neural-compressor\">Intel Neural Compressor</a> and <a href=\"https://github.com/huggingface/diffusers\">Hugging Face Diffusers</a>.
|
69 |
"""
|
70 |
|
71 |
css = '''
|
|
|
73 |
.arrow{position: absolute;top: 0;right: -110px;margin-top: -8px !important}
|
74 |
#component-4, #component-3, #component-10{min-height: 0}
|
75 |
.duplicate-button img{margin: 0}
|
|
|
76 |
'''
|
77 |
|
78 |
random_seed = random.randint(0, 2147483647)
|
79 |
|
80 |
with gr.Blocks(css=css) as demo:
|
81 |
+
gr.Markdown("# Stable Diffusion Inference Demo on 4th Gen Intel Xeon Scalable Processors")
|
82 |
gr.Markdown(md)
|
83 |
|
84 |
with gr.Tab("Text-to-Image"):
|
|
|
89 |
seed = gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1)
|
90 |
guidance_scale = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1)
|
91 |
txt2img_button = gr.Button("Generate Image")
|
|
|
|
|
92 |
|
93 |
with gr.Column():
|
94 |
+
result_image = gr.Image()
|
95 |
+
|
96 |
+
|
97 |
with gr.Tab("Image-to-Image text-guided generation"):
|
98 |
with gr.Row() as image_to_image:
|
99 |
with gr.Column():
|
|
|
104 |
guidance_scale_2 = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1)
|
105 |
strength = gr.inputs.Slider(0.0, 1.0, label='Strength - adding more noise to it the larger the strength', default=0.75, step=0.01)
|
106 |
img2img_button = gr.Button("Generate Image")
|
|
|
|
|
107 |
|
|
|
108 |
with gr.Column():
|
109 |
+
result_image_2 = gr.Image()
|
110 |
+
|
111 |
+
|
112 |
+
txt2img_button.click(fn=txt2img_generate, inputs=[prompt, inference_steps, seed, guidance_scale], outputs=result_image, queue=False)
|
113 |
+
img2img_button.click(fn=img2img_generate, inputs=[source_img, prompt_2, inference_steps_2, strength, seed_2, guidance_scale_2], outputs=result_image_2, queue=False)
|
114 |
+
|
|
|
115 |
|
116 |
demo.queue(default_enabled=False).launch(debug=True)
|