adpro commited on
Commit
db3636d
·
0 Parent(s):

Duplicate from adpro/Stable-Diffusion-Side-by-Side

Browse files
Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +14 -0
  3. app.py +129 -0
  4. requirements.txt +6 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Stable Diffusion On Intel CPUs
3
+ emoji: 🏢
4
+ colorFrom: pink
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 3.15.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ duplicated_from: adpro/Stable-Diffusion-Side-by-Side
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import grequests
2
+ import os
3
+ import gradio as gr
4
+ import numpy as np
5
+ import random
6
+ import torch
7
+ import subprocess
8
+ import time
9
+ import json
10
+ import base64
11
+ from io import BytesIO
12
+ from PIL import Image
13
+
14
+
15
+ url = "http://54.91.63.201:80"
16
+
17
+ print('=='*20)
18
+ print(os.system("hostname -i"))
19
+
20
+ def url_requests(req_list,sizeImg):
21
+ img_list = []
22
+ res_list = grequests.map(req_list)
23
+ for resp in res_list:
24
+ img_str = json.loads(resp.text)["img_str"]
25
+ print("location: ", json.loads(resp.text)["ip"])
26
+
27
+ img_byte = base64.b64decode(img_str)
28
+ img_io = BytesIO(img_byte) # convert image to file-like object
29
+ img = Image.open(img_io) # img is now PIL Image object
30
+ _img = img.resize((sizeImg))
31
+ img_list.append(_img)
32
+
33
+ return img_list
34
+
35
+
36
+ def img2img_generate(source_img, prompt, steps=25, strength=0.25, seed=42, guidance_scale=15):
37
+ # cpu info
38
+ # print(subprocess.check_output(["cat /proc/cpuinfo | grep 'model name' |uniq"], stderr=subprocess.STDOUT).decode("utf8"))
39
+ print('=*'*20)
40
+ print(type(source_img))
41
+ print("prompt: ", prompt)
42
+ buffered = BytesIO()
43
+
44
+ source_img.save(buffered, format="JPEG")
45
+ print(source_img.size)
46
+ img_b64 = base64.b64encode(buffered.getvalue())
47
+
48
+ data = {"source_img": img_b64.decode(), "prompt": prompt, "steps": steps,
49
+ "guidance_scale": guidance_scale, "seed": seed, "strength": strength}
50
+
51
+ start_time = time.time()
52
+ req_list = [
53
+ grequests.post(url, data=json.dumps(data)),
54
+ grequests.post(url, data=json.dumps(data)),
55
+ ]
56
+ img_list = url_requests(req_list,source_img.size)
57
+
58
+ return img_list
59
+
60
+
61
+ def txt2img_generate(prompt, steps=25, seed=42, guidance_scale=7.5):
62
+ # cpu info
63
+ # print(subprocess.check_output(["cat /proc/cpuinfo | grep 'model name' |uniq"], stderr=subprocess.STDOUT).decode("utf8"))
64
+ print("prompt: ", prompt)
65
+ print("steps: ", steps)
66
+ data = {"prompt": prompt,
67
+ "steps": steps, "guidance_scale": guidance_scale, "seed": seed}
68
+ start_time = time.time()
69
+ req_list = [
70
+ grequests.post(url, data=json.dumps(data)),
71
+ grequests.post(url, data=json.dumps(data)),
72
+ ]
73
+ img_list = url_requests(req_list)
74
+
75
+ return img_list
76
+ md = '''
77
+ '''
78
+
79
+ css = '''
80
+ .instruction{position: absolute; top: 0;right: 0;margin-top: 0px !important}
81
+ .arrow{position: absolute;top: 0;right: -110px;margin-top: -8px !important}
82
+ #component-4, #component-3, #component-10{min-height: 0}
83
+ .duplicate-button img{margin: 0}
84
+ img_1{height:5rem}
85
+ img_2{height:15rem}
86
+ img_3{height:15rem}
87
+ img_4{height:15rem}
88
+ img_5{height:15rem}
89
+ img_6{height:15rem}
90
+ '''
91
+
92
+ random_seed = random.randint(0, 2147483647)
93
+
94
+ with gr.Blocks(css=css) as demo:
95
+ gr.Markdown("# Stable Diffusion Inference Demo Side-by-Side")
96
+ gr.Markdown(md)
97
+
98
+ with gr.Tab("Text-to-Image"):
99
+ with gr.Row() as text_to_image:
100
+ with gr.Column():
101
+ prompt = gr.inputs.Textbox(label='Prompt', default='a photo of an astronaut riding a horse on mars')
102
+ inference_steps = gr.inputs.Slider(1, 100, label='Inference Steps - increase the steps for better quality (e.g., avoiding black image) ', default=20, step=1)
103
+ seed = gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1)
104
+ guidance_scale = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1)
105
+ txt2img_button = gr.Button("Generate Image")
106
+
107
+ with gr.Column():
108
+ result_image_1 = gr.Image(label="4th Gen Intel Xeon Scalable Processors (SPR)").style(height='1', rounded=True)
109
+ result_image_2 = gr.Image(label="3rd Gen Intel Xeon Scalable Processors (ICX)").style(height='100', rounded=False)
110
+
111
+ with gr.Tab("Image-to-Image text-guided generation"):
112
+ with gr.Row() as image_to_image:
113
+ with gr.Column():
114
+ source_img = gr.Image(source="upload", type="pil", value="https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg")
115
+ prompt_2 = gr.inputs.Textbox(label='Prompt', default='A fantasy landscape, trending on artstation')
116
+ inference_steps_2 = gr.inputs.Slider(1, 100, label='Inference Steps - increase the steps for better quality (e.g., avoiding black image) ', default=20, step=1)
117
+ seed_2 = gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1)
118
+ guidance_scale_2 = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1)
119
+ strength = gr.inputs.Slider(0.0, 1.0, label='Strength - adding more noise to it the larger the strength', default=0.25, step=0.01)
120
+ img2img_button = gr.Button("Generate Image")
121
+
122
+ with gr.Column():
123
+ result_image_3 = gr.Image(label="Result01", elem_id="img_1")
124
+ result_image_4 = gr.Image(label="Result02", elem_id="img_1")
125
+
126
+ txt2img_button.click(fn=txt2img_generate, inputs=[prompt, inference_steps, seed, guidance_scale], outputs=[result_image_1, result_image_2], queue=False)
127
+ img2img_button.click(fn=img2img_generate, inputs=[source_img, prompt_2, inference_steps_2, strength, seed_2, guidance_scale_2], outputs=[result_image_3, result_image_4], queue=False)
128
+
129
+ demo.queue(default_enabled=False).launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ torch
2
+ diffusers==0.8.1
3
+ transformers==4.21.2
4
+ requests
5
+ gevent
6
+ grequests