adpro commited on
Commit
4d4bf00
·
1 Parent(s): 1888963

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -17
app.py CHANGED
@@ -12,17 +12,19 @@ import base64
12
  from io import BytesIO
13
  from PIL import Image
14
 
15
- url = "http://54.91.63.201:80"
 
 
 
 
 
16
 
17
- print('=='*20)
18
- print(os.system("hostname -i"))
19
 
20
- def img2img_generate(source_img, prompt, steps=25, strength=0.75, seed=42, guidance_scale=7.5):
21
  # cpu info
22
  # print(subprocess.check_output(["cat /proc/cpuinfo | grep 'model name' |uniq"], stderr=subprocess.STDOUT).decode("utf8"))
23
- print('=*'*20)
24
- print(type(source_img))
25
  print("prompt: ", prompt)
 
26
  buffered = BytesIO()
27
  source_img.save(buffered, format="JPEG")
28
  img_b64 = base64.b64encode(buffered.getvalue())
@@ -33,19 +35,29 @@ def img2img_generate(source_img, prompt, steps=25, strength=0.75, seed=42, guida
33
 
34
  start_time = time.time()
35
  resp = requests.post(url, data=json.dumps(data))
36
- img_str = json.loads(resp.text)["img_str"]
37
- print("location: ", json.loads(resp.text)["ip"])
 
 
 
 
 
38
 
39
  img_byte = base64.b64decode(img_str)
40
  img_io = BytesIO(img_byte) # convert image to file-like object
41
  img = Image.open(img_io) # img is now PIL Image object
42
- print("cost: ", time.time() - start_time)
43
  return img
44
 
45
 
46
- def txt2img_generate(prompt, steps=25, seed=42, guidance_scale=7.5):
 
 
 
 
47
  # cpu info
48
  # print(subprocess.check_output(["cat /proc/cpuinfo | grep 'model name' |uniq"], stderr=subprocess.STDOUT).decode("utf8"))
 
49
  print("prompt: ", prompt)
50
  print("steps: ", steps)
51
  data = {"prompt": prompt,
@@ -53,21 +65,33 @@ def txt2img_generate(prompt, steps=25, seed=42, guidance_scale=7.5):
53
  "token": os.environ["access_token"]}
54
  start_time = time.time()
55
  resp = requests.post(url, data=json.dumps(data))
56
-
57
- img_str = json.loads(resp.text)["img_str"]
58
- print("location: ", json.loads(resp.text)["ip"])
 
 
 
59
 
60
  img_byte = base64.b64decode(img_str)
61
  img_io = BytesIO(img_byte) # convert image to file-like object
62
  img = Image.open(img_io) # img is now PIL Image object
63
- print("cost: ", time.time() - start_time)
64
  return img
65
 
66
  md = """
67
- This demo shows the accelerated inference performance of a Stable Diffusion model on **4th Gen Intel Xeon Scalable Processors (Sapphire Rapids)** on Amazon Web Services. Try it and generate photorealistic images from text!
68
  You may also want to try creating your own Stable Diffusion model with few-shot fine-tuning. Please refer to our <a href=\"https://medium.com/intel-analytics-software/personalized-stable-diffusion-with-few-shot-fine-tuning-on-a-single-cpu-f01a3316b13\">blog</a> and <a href=\"https://github.com/intel/neural-compressor/tree/master/examples/pytorch/diffusion_model/diffusers/textual_inversion\">code</a> available in <a href=\"https://github.com/intel/neural-compressor\">Intel Neural Compressor</a> and <a href=\"https://github.com/huggingface/diffusers\">Hugging Face Diffusers</a>.
69
  """
70
 
 
 
 
 
 
 
 
 
 
71
  css = '''
72
  .instruction{position: absolute; top: 0;right: 0;margin-top: 0px !important}
73
  .arrow{position: absolute;top: 0;right: -110px;margin-top: -8px !important}
@@ -88,6 +112,7 @@ with gr.Blocks(css=css) as demo:
88
  inference_steps = gr.inputs.Slider(1, 100, label='Inference Steps - increase the steps for better quality (e.g., avoiding black image) ', default=20, step=1)
89
  seed = gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1)
90
  guidance_scale = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1)
 
91
  txt2img_button = gr.Button("Generate Image")
92
 
93
  with gr.Column():
@@ -98,19 +123,26 @@ with gr.Blocks(css=css) as demo:
98
  with gr.Row() as image_to_image:
99
  with gr.Column():
100
  source_img = gr.Image(source="upload", type="pil", value="https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg")
 
101
  prompt_2 = gr.inputs.Textbox(label='Prompt', default='A fantasy landscape, trending on artstation')
102
  inference_steps_2 = gr.inputs.Slider(1, 100, label='Inference Steps - increase the steps for better quality (e.g., avoiding black image) ', default=20, step=1)
103
  seed_2 = gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1)
104
  guidance_scale_2 = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1)
105
  strength = gr.inputs.Slider(0.0, 1.0, label='Strength - adding more noise to it the larger the strength', default=0.75, step=0.01)
 
106
  img2img_button = gr.Button("Generate Image")
107
 
108
  with gr.Column():
109
  result_image_2 = gr.Image()
110
 
111
 
112
- txt2img_button.click(fn=txt2img_generate, inputs=[prompt, inference_steps, seed, guidance_scale], outputs=result_image, queue=False)
113
- img2img_button.click(fn=img2img_generate, inputs=[source_img, prompt_2, inference_steps_2, strength, seed_2, guidance_scale_2], outputs=result_image_2, queue=False)
 
 
 
114
 
 
 
115
 
116
  demo.queue(default_enabled=False).launch(debug=True)
 
12
  from io import BytesIO
13
  from PIL import Image
14
 
15
+ url = "http://107.23.90.209:80"
16
+
17
+ # print('=='*20)
18
+ # print(os.system("hostname -i"))
19
+
20
+ def img2img_generate(source_img, prompt, steps=25, strength=0.75, seed=42, guidance_scale=7.5, hidden=""):
21
 
 
 
22
 
 
23
  # cpu info
24
  # print(subprocess.check_output(["cat /proc/cpuinfo | grep 'model name' |uniq"], stderr=subprocess.STDOUT).decode("utf8"))
25
+ print('image-to-image')
 
26
  print("prompt: ", prompt)
27
+ print("steps: ", steps)
28
  buffered = BytesIO()
29
  source_img.save(buffered, format="JPEG")
30
  img_b64 = base64.b64encode(buffered.getvalue())
 
35
 
36
  start_time = time.time()
37
  resp = requests.post(url, data=json.dumps(data))
38
+
39
+ try:
40
+ img_str = json.loads(resp.text)["img_str"]
41
+ print("compute node: ", json.loads(resp.text)["ip"])
42
+ except:
43
+ print('no inference result. please check server connection')
44
+ return None
45
 
46
  img_byte = base64.b64decode(img_str)
47
  img_io = BytesIO(img_byte) # convert image to file-like object
48
  img = Image.open(img_io) # img is now PIL Image object
49
+ print("elapsed time: ", time.time() - start_time)
50
  return img
51
 
52
 
53
+ def txt2img_generate(prompt, steps=25, seed=42, guidance_scale=7.5, hidden=""):
54
+
55
+ if hidden != os.environ["front_token"]:
56
+ return None
57
+
58
  # cpu info
59
  # print(subprocess.check_output(["cat /proc/cpuinfo | grep 'model name' |uniq"], stderr=subprocess.STDOUT).decode("utf8"))
60
+ print('text-to-image')
61
  print("prompt: ", prompt)
62
  print("steps: ", steps)
63
  data = {"prompt": prompt,
 
65
  "token": os.environ["access_token"]}
66
  start_time = time.time()
67
  resp = requests.post(url, data=json.dumps(data))
68
+ try:
69
+ img_str = json.loads(resp.text)["img_str"]
70
+ print("compute node: ", json.loads(resp.text)["ip"])
71
+ except:
72
+ print('no inference result. please check server connection')
73
+ return None
74
 
75
  img_byte = base64.b64decode(img_str)
76
  img_io = BytesIO(img_byte) # convert image to file-like object
77
  img = Image.open(img_io) # img is now PIL Image object
78
+ print("elapsed time: ", time.time() - start_time)
79
  return img
80
 
81
  md = """
82
+ This demo shows the accelerated inference performance of a Stable Diffusion model on **Intel Xeon Gold 64xx (4th Gen Intel Xeon Scalable Processors codenamed Sapphire Rapids)**. Try it and generate photorealistic images from text!
83
  You may also want to try creating your own Stable Diffusion model with few-shot fine-tuning. Please refer to our <a href=\"https://medium.com/intel-analytics-software/personalized-stable-diffusion-with-few-shot-fine-tuning-on-a-single-cpu-f01a3316b13\">blog</a> and <a href=\"https://github.com/intel/neural-compressor/tree/master/examples/pytorch/diffusion_model/diffusers/textual_inversion\">code</a> available in <a href=\"https://github.com/intel/neural-compressor\">Intel Neural Compressor</a> and <a href=\"https://github.com/huggingface/diffusers\">Hugging Face Diffusers</a>.
84
  """
85
 
86
+ legal = """
87
+ Performance varies by use, configuration and other factors. Learn more at www.Intel.com/PerformanceIndex. Performance results are based on testing as of dates shown in configurations and may not reflect all publicly available updates. See backup for configuration details. No product or component can be absolutely secure.
88
+ © Intel Corporation. Intel, the Intel logo, and other Intel marks are trademarks of Intel Corporation or its subsidiaries. Other names and brands may be claimed as the property of others.
89
+ """
90
+
91
+ details = """
92
+ 4th Gen Intel Xeon Scalable Processor Inference. Test by Intel on 01/06/2023. 1 node, 1S, Intel(R) Xeon(R) Gold 64xx CPU @ 3.0GHz 32 cores and software with 512GB (8x64GB DDR5 4800 MT/s [4800 MT/s]), microcode 0x2a000080, HT on, Turbo on, Ubuntu 22.04.1 LTS, 5.15.0-1026-aws, 200G Amazon Elastic Block Store. Multiple nodes connected with Elastic Network Adapter (ENA). PyTorch Nightly build (2.0.0.dev20230105+cpu), Transformers 4.25.1, Diffusers 0.11.1, oneDNN v2.7.2.
93
+ """
94
+
95
  css = '''
96
  .instruction{position: absolute; top: 0;right: 0;margin-top: 0px !important}
97
  .arrow{position: absolute;top: 0;right: -110px;margin-top: -8px !important}
 
112
  inference_steps = gr.inputs.Slider(1, 100, label='Inference Steps - increase the steps for better quality (e.g., avoiding black image) ', default=20, step=1)
113
  seed = gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1)
114
  guidance_scale = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1)
115
+ hidden = gr.Textbox(label='hidden', value=os.environ["front_token"], visible=False)
116
  txt2img_button = gr.Button("Generate Image")
117
 
118
  with gr.Column():
 
123
  with gr.Row() as image_to_image:
124
  with gr.Column():
125
  source_img = gr.Image(source="upload", type="pil", value="https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg")
126
+ # source_img = gr.Image(source="upload", type="pil")
127
  prompt_2 = gr.inputs.Textbox(label='Prompt', default='A fantasy landscape, trending on artstation')
128
  inference_steps_2 = gr.inputs.Slider(1, 100, label='Inference Steps - increase the steps for better quality (e.g., avoiding black image) ', default=20, step=1)
129
  seed_2 = gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1)
130
  guidance_scale_2 = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1)
131
  strength = gr.inputs.Slider(0.0, 1.0, label='Strength - adding more noise to it the larger the strength', default=0.75, step=0.01)
132
+ hidden_2 = gr.Textbox(label='hidden', value=os.environ["front_token"], visible=False)
133
  img2img_button = gr.Button("Generate Image")
134
 
135
  with gr.Column():
136
  result_image_2 = gr.Image()
137
 
138
 
139
+ txt2img_button.click(fn=txt2img_generate, inputs=[prompt, inference_steps, seed, guidance_scale, hidden], outputs=result_image, queue=False)
140
+ img2img_button.click(fn=img2img_generate, inputs=[source_img, prompt_2, inference_steps_2, strength, seed_2, guidance_scale_2, hidden_2], outputs=result_image_2, queue=False)
141
+
142
+ gr.Markdown("**Additional Test Configuration Details:**")
143
+ gr.Markdown(details)
144
 
145
+ gr.Markdown("**Notices and Disclaimers:**")
146
+ gr.Markdown(legal)
147
 
148
  demo.queue(default_enabled=False).launch(debug=True)