Spaces:
Sleeping
Sleeping
File size: 11,380 Bytes
61c89cd e21a983 61c89cd 16f2307 06d3f6e e21a983 7985d5f 455379c 7985d5f 61c89cd 9d3a848 61c89cd 552490f 562b4d5 a1e8f93 562b4d5 bb70c22 0fc6336 1b7ec1b 552490f 6d89f09 aaacefd 01cfb27 6d89f09 bb70c22 73b6943 562b4d5 552490f 70f75dc 552490f 7985d5f 552490f 61c89cd fc523d1 084f948 a00bc49 d849b8e 455379c 20bb366 fc523d1 c6d02b3 9f1f2bf 1d16cc9 9f1f2bf c6d02b3 2c7ffe4 a597e6b fd34825 706151f 084f948 84291d5 7206ba2 a597e6b 48e1ac1 084f948 f2fa35d d849b8e 83d3e5a 61c89cd 455379c fc523d1 084f948 b4f9b4b 61c89cd 455379c e1ff384 7219c3f bbff51c 455379c 0fc6336 455379c 7219c3f 455379c 1b7ec1b 7219c3f 455379c 1eb986b 3f070dc b7e54fe 455379c b7e54fe 455379c b7e54fe 9a43a98 455379c 84810a2 455379c bb63a49 84810a2 378fec2 cde99b9 9642724 455379c 084f948 f2d1065 084f948 e47b6e5 084f948 091a4dd 455379c fc523d1 455379c 7985d5f f94caf3 455379c 86f936d 455379c f94caf3 455379c f94caf3 455379c f94caf3 455379c f94caf3 455379c f94caf3 455379c f94caf3 455379c f94caf3 455379c f94caf3 455379c f94caf3 9a43a98 f94caf3 59db4bc 455379c 1d0d2c1 455379c 7985d5f 5930a22 27a4916 7985d5f 455379c 552490f e13a4c7 455379c e13a4c7 61c89cd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 |
# built-in
import os
import subprocess
import logging
import re
import random
import string
import requests
import sys
import warnings
# external
#import spaces
import torch
import gradio as gr
import numpy as np
from lxml.html import fromstring
#from transformers import pipeline
#from diffusers.pipelines.flux import FluxPipeline
from diffusers.utils import export_to_gif, load_image
from diffusers.models.modeling_utils import ModelMixin
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file, save_file
from diffusers import DiffusionPipeline, AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler, DDIMScheduler, StableDiffusionXLPipeline, UNet2DConditionModel, AutoencoderKL, UNet3DConditionModel
#import jax
#import jax.numpy as jnp
from numba import cuda, njit as cpu, void, int64 as int, float64 as float, boolean as bool
from numba.cuda import jit as gpu, grid
from numba.types import unicode_type as string
# logging
warnings.filterwarnings("ignore")
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('\n >>> [%(levelname)s] %(asctime)s %(name)s: %(message)s\n')
handler.setFormatter(formatter)
root.addHandler(handler)
handler2 = logging.StreamHandler(sys.stderr)
handler2.setLevel(logging.DEBUG)
formatter = logging.Formatter('\n >>> [%(levelname)s] %(asctime)s %(name)s: %(message)s\n')
handler2.setFormatter(formatter)
root.addHandler(handler2)
# data
last_motion=None
dtype = torch.float16
device = "cuda"
#repo = "ByteDance/AnimateDiff-Lightning"
#ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
base = "emilianJR/epiCRealism"
#base = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse").to(device, dtype=dtype)
#unet = UNet2DConditionModel.from_config("emilianJR/epiCRealism",subfolder="unet").to(device, dtype).load_state_dict(load_file(hf_hub_download("emilianJR/epiCRealism", "unet/diffusion_pytorch_model.safetensors"), device=device), strict=False)
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=dtype, device=device)
fast=True
fps=10
time=1
width=384
height=768
step=40
accu=10
css="""
input, input::placeholder {
text-align: center !important;
}
*, *::placeholder {
font-family: Suez One !important;
}
h1,h2,h3,h4,h5,h6 {
width: 100%;
text-align: center;
}
footer {
display: none !important;
}
#col-container {
margin: 0 auto;
max-width: 15cm;
}
.image-container {
aspect-ratio: """+str(width)+"/"+str(height)+""" !important;
}
.dropdown-arrow {
display: none !important;
}
*:has(>.btn) {
display: flex;
justify-content: space-evenly;
align-items: center;
}
.btn {
display: flex;
}
"""
js="""
function custom(){
document.querySelector("div#prompt input").setAttribute("maxlength","38")
document.querySelector("div#prompt2 input").setAttribute("maxlength","38")
}
"""
# functionality
def run(cmd):
return str(subprocess.run(cmd, shell=True, capture_output=True, env=None).stdout)
def xpath_finder(str,pattern):
try:
return ""+fromstring(str).xpath(pattern)[0].text_content().lower().strip()
except:
return ""
@gpu(string(string,string),device=True,inline=True)
def translate(text,lang):
if text == None or lang == None:
return ""
text = re.sub(f'[{string.punctuation}]', '', re.sub('[\s+]', ' ', text)).lower().strip()
lang = re.sub(f'[{string.punctuation}]', '', re.sub('[\s+]', ' ', lang)).lower().strip()
if text == "" or lang == "":
return ""
if len(text) > 38:
raise Exception("Translation Error: Too long text!")
user_agents = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.1 Safari/605.1.15',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 13_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.1 Safari/605.1.15'
]
padded_chars = re.sub("[(^\-)(\-$)]","",text.replace("","-").replace("- -"," ")).strip()
query_text = f'Please translate {padded_chars}, into {lang}'
url = f'https://www.google.com/search?q={query_text}'
content = str(requests.get(
url = url,
headers = {
'User-Agent': random.choice(user_agents)
}
).content)
translated = text
src_lang = xpath_finder(content,'//*[@class="source-language"]')
trgt_lang = xpath_finder(content,'//*[@class="target-language"]')
src_text = xpath_finder(content,'//*[@id="tw-source-text"]/*')
trgt_text = xpath_finder(content,'//*[@id="tw-target-text"]/*')
if trgt_lang == lang:
translated = trgt_text
ret = re.sub(f'[{string.punctuation}]', '', re.sub('[\s+]', ' ', translated)).lower().strip()
print(ret)
return ret
@gpu(string(int),device=True,inline=True)
def generate_random_string(length):
characters = str(string.ascii_letters + string.digits)
return ''.join(random.choice(characters) for _ in range(length))
@gpu(void(),device=True,inline=True)
def Piper():
global last_motion
global ip_loaded
global out
x = grid(1)
if last_motion != pinp["motion"]:
pipe.unload_lora_weights()
if pinp["motion"] != "":
pipe.load_lora_weights(pinp["motion"], adapter_name="motion")
pipe.fuse_lora()
pipe.set_adapters(["motion"], [0.7])
last_motion = pinp["motion"]
pipe.to(device,dtype)
if pinp["negative"]=="":
out[x] = pipe(
prompt=pinp["positive"],
height=height,
width=width,
ip_adapter_image=pinp["image"].convert("RGB").resize((width,height)),
num_inference_steps=step,
guidance_scale=accu,
num_frames=(fps*time)
)
out[x] = pipe(
prompt=pinp["positive"],
negative_prompt=pinp["negative"],
height=height,
width=width,
ip_adapter_image=pinp["image"].convert("RGB").resize((width,height)),
num_inference_steps=step,
guidance_scale=accu,
num_frames=(fps*time)
)
@gpu(void(),device=True,inline=True)
def infer():
global pinp
global out
out = [""]
out.remove("")
p1 = str(pm["p"])
neg = str(pm["n"])
if neg != "":
neg = f"{neg} where in the image"
_do = ['photographed', 'realistic', 'dynamic poze', 'deep field', 'reasonable', "natural", 'rough', 'best quality', 'focused', "highly detailed"]
if p1 != "":
_do.append(f"a new {p1} content in the image")
posi = ", ".join(_do)
if pm["i"] == None:
return None
pinp={"image":pm["i"],"positive":posi,"negative":neg,"motion":pm["m"]}
ln = len(result)
Piper[ln,32]()
for i in range(ln):
name = generate_random_string[1,32](12)+".png"
export_to_gif(out[i].frames[0],name,fps=fps)
out[i] = name
@cpu(string[:](),cache=True,parallel=True)
def handle():
global pm
p1_en = translate[1,32](p1,"english")
p2_en = translate[1,32](p2,"english")
pm = {"p":p1_en,"n":p2_en,"m":m,"i":i}
infer[1,32]()
return out
@gpu(void(),device=True,inline=True)
def ui():
with gr.Blocks(theme=gr.themes.Soft(),css=css,js=js) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# MULTI-LANGUAGE IMAGE GENERATOR
""")
with gr.Row():
global img
img = gr.Image(label="STATIC PHOTO",show_label=True,container=True,type="pil")
with gr.Row():
global prompt
prompt = gr.Textbox(
elem_id="prompt",
placeholder="INCLUDE",
container=False,
max_lines=1
)
with gr.Row():
global prompt2
prompt2 = gr.Textbox(
elem_id="prompt2",
placeholder="EXCLUDE",
container=False,
max_lines=1
)
with gr.Row():
global motion
motion = gr.Dropdown(
label='CAMERA',
show_label=True,
container=True,
choices=[
("(No Effect)", ""),
("Zoom in", "guoyww/animatediff-motion-lora-zoom-in"),
("Zoom out", "guoyww/animatediff-motion-lora-zoom-out"),
("Tilt up", "guoyww/animatediff-motion-lora-tilt-up"),
("Tilt down", "guoyww/animatediff-motion-lora-tilt-down"),
("Pan left", "guoyww/animatediff-motion-lora-pan-left"),
("Pan right", "guoyww/animatediff-motion-lora-pan-right"),
("Roll left", "guoyww/animatediff-motion-lora-rolling-anticlockwise"),
("Roll right", "guoyww/animatediff-motion-lora-rolling-clockwise"),
],
value="",
interactive=True
)
with gr.Row():
global run_button
run_button = gr.Button("START",elem_classes="btn",scale=0)
with gr.Row():
global result
result = []
result.append(gr.Image(interactive=False,elem_classes="image-container", label="Result", show_label=False, type='filepath', show_share_button=False))
result.append(gr.Image(interactive=False,elem_classes="image-container", label="Result", show_label=False, type='filepath', show_share_button=False))
demo.queue().launch()
@gpu(void(),device=True,inline=True)
def pre():
global pipe
pipe = AnimateDiffPipeline.from_pretrained(base, vae=vae, motion_adapter=adapter, torch_dtype=dtype).to(device)
pipe.scheduler = DDIMScheduler(
clip_sample=False,
beta_start=0.00085,
beta_end=0.012,
beta_schedule="linear",
timestep_spacing="trailing",
steps_offset=1
)
pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin")
pipe.enable_vae_slicing()
pipe.enable_free_init(method="butterworth", use_fast_sampling=fast)
@gpu(void(),device=True,inline=True)
def events():
gr.on(
triggers=[
run_button.click,
prompt.submit,
prompt2.submit
],
fn=handle,
output=result
)
@cpu(void(),cache=True,parallel=True)
def entry():
os.chdir(os.path.abspath(os.path.dirname(__file__)))
pre[1,32]()
ui[1,32]()
events[1,32]()
# entry
entry[1,32]()
# end
|