File size: 3,305 Bytes
796cb1f 0276c07 796cb1f f00f46e 796cb1f 7ccee96 796cb1f 0276c07 bb33d1a 0276c07 796cb1f f00f46e 796cb1f f00f46e 796cb1f 59406a8 796cb1f 4cd7c68 796cb1f 4cd7c68 796cb1f b6016a7 796cb1f 7ccee96 e1b5fd9 7ccee96 847bd45 7ccee96 d6f84ff 7ccee96 796cb1f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
import threading
buffer = []
outputs = []
def worker():
global buffer, outputs
import time
import shared
import random
import modules.default_pipeline as pipeline
import modules.path
import modules.patch
from modules.sdxl_styles import apply_style, aspect_ratios
from modules.private_logger import log
try:
async_gradio_app = shared.gradio_root
flag = f'''App started successful. Use the app with {str(async_gradio_app.local_url)} or {str(async_gradio_app.server_name)}:{str(async_gradio_app.server_port)}'''
if async_gradio_app.share:
flag += f''' or {async_gradio_app.share_url}'''
print(flag)
except Exception as e:
print(e)
def handler(task):
prompt, negative_prompt, style_selction, performance_selction, \
aspect_ratios_selction, image_number, image_seed, sharpness, base_model_name, refiner_model_name, \
l1, w1, l2, w2, l3, w3, l4, w4, l5, w5 = task
loras = [(l1, w1), (l2, w2), (l3, w3), (l4, w4), (l5, w5)]
modules.patch.sharpness = sharpness
pipeline.refresh_base_model(base_model_name)
pipeline.refresh_refiner_model(refiner_model_name)
pipeline.refresh_loras(loras)
pipeline.clean_prompt_cond_caches()
p_txt, n_txt = apply_style(style_selction, prompt, negative_prompt)
if performance_selction == 'Speed':
steps = 30
switch = 15
else:
steps = 60
switch = 30
width, height = aspect_ratios[aspect_ratios_selction]
results = []
seed = image_seed
max_seed = int(1024*1024*1024)
if not isinstance(seed, int):
seed = random.randint(1, max_seed)
if seed < 0:
seed = - seed
seed = seed % max_seed
all_steps = steps * image_number
def callback(step, x0, x, total_steps, y):
done_steps = i * steps + step
outputs.append(['preview', (
int(100.0 * float(done_steps) / float(all_steps)),
f'Step {step}/{total_steps} in the {i}-th Sampling',
y)])
for i in range(image_number):
imgs = pipeline.process(p_txt, n_txt, steps, switch, width, height, seed, callback=callback)
for x in imgs:
d = [
('Prompt', prompt),
('Negative Prompt', negative_prompt),
('Style', style_selction),
('Performance', performance_selction),
('Resolution', str((width, height))),
('Sharpness', sharpness),
('Base Model', base_model_name),
('Refiner Model', refiner_model_name),
('Seed', seed)
]
for n, w in loras:
if n != 'None':
d.append((f'LoRA [{n}] weight', w))
log(x, d)
seed += 1
results += imgs
outputs.append(['results', results])
return
while True:
time.sleep(0.01)
if len(buffer) > 0:
task = buffer.pop(0)
handler(task)
pass
threading.Thread(target=worker, daemon=True).start()
|