adpro wenjiao commited on
Commit
e2fdfc6
·
0 Parent(s):

Duplicate from wenjiao/Stable-Diffusion-Side-by-Side

Browse files

Co-authored-by: Wenjiao Yue <[email protected]>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +14 -0
  3. app.py +123 -0
  4. requirements.txt +6 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Stable Diffusion On Intel CPUs
3
+ emoji: 🏢
4
+ colorFrom: pink
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 3.15.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ duplicated_from: wenjiao/Stable-Diffusion-Side-by-Side
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import grequests
2
+ import os
3
+ import gradio as gr
4
+ import numpy as np
5
+ import random
6
+ import torch
7
+ import subprocess
8
+ import time
9
+ import json
10
+ import base64
11
+ from io import BytesIO
12
+ from PIL import Image
13
+
14
+
15
+ url = "http://54.91.63.201:80"
16
+
17
+ print('=='*20)
18
+ print(os.system("hostname -i"))
19
+
20
+ def url_requests(req_list):
21
+ img_list = []
22
+ res_list = grequests.map(req_list)
23
+ for resp in res_list:
24
+ img_str = json.loads(resp.text)["img_str"]
25
+ print("location: ", json.loads(resp.text)["ip"])
26
+
27
+ img_byte = base64.b64decode(img_str)
28
+ img_io = BytesIO(img_byte) # convert image to file-like object
29
+ img = Image.open(img_io) # img is now PIL Image object
30
+ img_list.append(img)
31
+
32
+ return img_list
33
+
34
+
35
+ def img2img_generate(source_img, prompt, steps=25, strength=0.75, seed=42, guidance_scale=7.5):
36
+ # cpu info
37
+ # print(subprocess.check_output(["cat /proc/cpuinfo | grep 'model name' |uniq"], stderr=subprocess.STDOUT).decode("utf8"))
38
+ print('=*'*20)
39
+ print(type(source_img))
40
+ print("prompt: ", prompt)
41
+ buffered = BytesIO()
42
+ source_img.save(buffered, format="JPEG")
43
+ img_b64 = base64.b64encode(buffered.getvalue())
44
+
45
+ data = {"source_img": img_b64.decode(), "prompt": prompt, "steps": steps,
46
+ "guidance_scale": guidance_scale, "seed": seed, "strength": strength}
47
+
48
+ start_time = time.time()
49
+ req_list = [
50
+ grequests.post(url, data=json.dumps(data)),
51
+ grequests.post(url, data=json.dumps(data)),
52
+ ]
53
+ img_list = url_requests(req_list)
54
+
55
+ return img_list
56
+
57
+
58
+ def txt2img_generate(prompt, steps=25, seed=42, guidance_scale=7.5):
59
+ # cpu info
60
+ # print(subprocess.check_output(["cat /proc/cpuinfo | grep 'model name' |uniq"], stderr=subprocess.STDOUT).decode("utf8"))
61
+ print("prompt: ", prompt)
62
+ print("steps: ", steps)
63
+ data = {"prompt": prompt,
64
+ "steps": steps, "guidance_scale": guidance_scale, "seed": seed}
65
+ start_time = time.time()
66
+ req_list = [
67
+ grequests.post(url, data=json.dumps(data)),
68
+ grequests.post(url, data=json.dumps(data)),
69
+ ]
70
+ img_list = url_requests(req_list)
71
+
72
+ return img_list
73
+
74
+ md = """
75
+ This Spaces app is to demonstrate the inference of Stable Diffusion on **4th Gen Intel Xeon Scalable Processors (Sapphire Rapids)** vs. **3rd Gen Intel Xeon Scalable Processors (Ice Lake)** on Amazon Web Services. Now, you can have a try and see the significant performance speedup on 4th Gen Intel Xeon Scalable Processors!
76
+ """
77
+
78
+ css = '''
79
+ .instruction{position: absolute; top: 0;right: 0;margin-top: 0px !important}
80
+ .arrow{position: absolute;top: 0;right: -110px;margin-top: -8px !important}
81
+ #component-4, #component-3, #component-10{min-height: 0}
82
+ .duplicate-button img{margin: 0}
83
+ #img_1{height:15rem}
84
+ '''
85
+
86
+ random_seed = random.randint(0, 2147483647)
87
+
88
+ with gr.Blocks(css=css) as demo:
89
+ gr.Markdown("# Stable Diffusion Inference Demo Side-by-Side")
90
+ gr.Markdown(md)
91
+
92
+ with gr.Tab("Text-to-Image"):
93
+ with gr.Row() as text_to_image:
94
+ with gr.Column():
95
+ prompt = gr.inputs.Textbox(label='Prompt', default='a photo of an astronaut riding a horse on mars')
96
+ inference_steps = gr.inputs.Slider(1, 100, label='Inference Steps - increase the steps for better quality (e.g., avoiding black image) ', default=20, step=1)
97
+ seed = gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1)
98
+ guidance_scale = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1)
99
+ txt2img_button = gr.Button("Generate Image")
100
+
101
+ with gr.Column():
102
+ result_image_1 = gr.Image(label="4th Gen Intel Xeon Scalable Processors (SPR)").style(height='1', rounded=True)
103
+ result_image_2 = gr.Image(label="3rd Gen Intel Xeon Scalable Processors (ICX)").style(height='100', rounded=False)
104
+
105
+ with gr.Tab("Image-to-Image text-guided generation"):
106
+ with gr.Row() as image_to_image:
107
+ with gr.Column():
108
+ source_img = gr.Image(source="upload", type="pil", value="https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg")
109
+ prompt_2 = gr.inputs.Textbox(label='Prompt', default='A fantasy landscape, trending on artstation')
110
+ inference_steps_2 = gr.inputs.Slider(1, 100, label='Inference Steps - increase the steps for better quality (e.g., avoiding black image) ', default=20, step=1)
111
+ seed_2 = gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1)
112
+ guidance_scale_2 = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1)
113
+ strength = gr.inputs.Slider(0.0, 1.0, label='Strength - adding more noise to it the larger the strength', default=0.75, step=0.01)
114
+ img2img_button = gr.Button("Generate Image")
115
+
116
+ with gr.Column():
117
+ result_image_3 = gr.Image(label="4th Gen Intel Xeon Scalable Processors (SPR)", elem_id="img_1")
118
+ result_image_4 = gr.Image(label="3rd Gen Intel Xeon Scalable Processors (ICX)").style(height='24', rounded=False)
119
+
120
+ txt2img_button.click(fn=txt2img_generate, inputs=[prompt, inference_steps, seed, guidance_scale], outputs=[result_image_1, result_image_2], queue=False)
121
+ img2img_button.click(fn=img2img_generate, inputs=[source_img, prompt_2, inference_steps_2, strength, seed_2, guidance_scale_2], outputs=[result_image_3, result_image_4], queue=False)
122
+
123
+ demo.queue(default_enabled=False).launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ torch
2
+ diffusers==0.8.1
3
+ transformers==4.21.2
4
+ requests
5
+ gevent
6
+ grequests