adpro commited on
Commit
f9f328b
·
1 Parent(s): 97fa09c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -20
app.py CHANGED
@@ -1,4 +1,3 @@
1
- import grequests
2
  import os
3
  import gradio as gr
4
  import numpy as np
@@ -6,31 +5,32 @@ import random
6
  import torch
7
  import subprocess
8
  import time
 
9
  import json
 
 
10
  import base64
11
  from io import BytesIO
12
  from PIL import Image
13
 
14
 
15
- url = "http://54.91.63.201:80"
 
16
 
17
  print('=='*20)
18
  print(os.system("hostname -i"))
19
 
20
- def url_requests(req_list,sizeImg):
21
- img_list = []
22
- res_list = grequests.map(req_list)
23
- for resp in res_list:
24
- img_str = json.loads(resp.text)["img_str"]
25
- print("location: ", json.loads(resp.text)["ip"])
26
-
27
- img_byte = base64.b64decode(img_str)
28
- img_io = BytesIO(img_byte) # convert image to file-like object
29
- img = Image.open(img_io) # img is now PIL Image object
30
- _img = img.resize((sizeImg))
31
- img_list.append(_img)
32
 
33
- return img_list
34
 
35
 
36
  def img2img_generate(source_img, prompt, steps=25, strength=0.25, seed=42, guidance_scale=15):
@@ -46,16 +46,17 @@ def img2img_generate(source_img, prompt, steps=25, strength=0.25, seed=42, guida
46
  img_b64 = base64.b64encode(buffered.getvalue())
47
 
48
  data = {"source_img": img_b64.decode(), "prompt": prompt, "steps": steps,
49
- "guidance_scale": guidance_scale, "seed": seed, "strength": strength}
 
50
 
51
  start_time = time.time()
52
  req_list = [
53
  grequests.post(url, data=json.dumps(data)),
54
  grequests.post(url, data=json.dumps(data)),
55
  ]
56
- img_list = url_requests(req_list,source_img.size)
57
 
58
- return img_list
59
 
60
 
61
  def txt2img_generate(prompt, steps=25, seed=42, guidance_scale=7.5):
@@ -124,6 +125,6 @@ with gr.Blocks(css=css) as demo:
124
  result_image_4 = gr.Image(label="Result02", elem_id="img_1")
125
 
126
  txt2img_button.click(fn=txt2img_generate, inputs=[prompt, inference_steps, seed, guidance_scale], outputs=[result_image_1, result_image_2], queue=False)
127
- img2img_button.click(fn=img2img_generate, inputs=[source_img, prompt_2, inference_steps_2, strength, seed_2, guidance_scale_2], outputs=[result_image_3, result_image_4], queue=False)
128
-
129
  demo.queue(default_enabled=False).launch(debug=True)
 
 
1
  import os
2
  import gradio as gr
3
  import numpy as np
 
5
  import torch
6
  import subprocess
7
  import time
8
+ import requests
9
  import json
10
+ import threading
11
+
12
  import base64
13
  from io import BytesIO
14
  from PIL import Image
15
 
16
 
17
+ url_SPR = "http://34.229.166.42:80"
18
+ url_ICX = "http://54.221.56.4:80"
19
 
20
  print('=='*20)
21
  print(os.system("hostname -i"))
22
 
23
+ def url_requests(url,sizeImg):
24
+ resp = requests.post(url, data=json.dumps(data))
25
+ img_str = json.loads(resp.text)["img_str"]
26
+
27
+ img_byte = base64.b64decode(img_str)
28
+ img_io = BytesIO(img_byte) # convert image to file-like object
29
+ img = Image.open(img_io) # img is now PIL Image object
30
+ _img = img.resize((sizeImg))
31
+ return _img
32
+
 
 
33
 
 
34
 
35
 
36
  def img2img_generate(source_img, prompt, steps=25, strength=0.25, seed=42, guidance_scale=15):
 
46
  img_b64 = base64.b64encode(buffered.getvalue())
47
 
48
  data = {"source_img": img_b64.decode(), "prompt": prompt, "steps": steps,
49
+ "guidance_scale": guidance_scale, "seed": seed, "strength": strength,
50
+ "token": os.environ["access_token"]}
51
 
52
  start_time = time.time()
53
  req_list = [
54
  grequests.post(url, data=json.dumps(data)),
55
  grequests.post(url, data=json.dumps(data)),
56
  ]
57
+ img = url_requests(url,source_img.size)
58
 
59
+ return img
60
 
61
 
62
  def txt2img_generate(prompt, steps=25, seed=42, guidance_scale=7.5):
 
125
  result_image_4 = gr.Image(label="Result02", elem_id="img_1")
126
 
127
  txt2img_button.click(fn=txt2img_generate, inputs=[prompt, inference_steps, seed, guidance_scale], outputs=[result_image_1, result_image_2], queue=False)
128
+ img2img_button.click(fn=img2img_generate, inputs=[url_SPR, source_img, prompt_2, inference_steps_2, strength, seed_2, guidance_scale_2], outputs=result_image_3, queue=False)
129
+ img2img_button.click(fn=img2img_generate, inputs=[url_CLX, source_img, prompt_2, inference_steps_2, strength, seed_2, guidance_scale_2], outputs=result_image_4, queue=False)
130
  demo.queue(default_enabled=False).launch(debug=True)