Kokoro-API-5 / app.py
Yaron Koresh
Update app.py
48e1ac1 verified
raw
history blame
3.83 kB
import gradio as gr
#from tempfile import NamedTemporaryFile
import numpy as np
import spaces
import random
import string
from diffusers import StableDiffusionPipeline as DiffusionPipeline
import torch
from pathos.multiprocessing import ProcessingPool as ProcessPoolExecutor
import requests
from lxml.html import fromstring
pool = ProcessPoolExecutor(16)
pool.__enter__()
model_id = "runwayml/stable-diffusion-v1-5"
device = "cuda" if torch.cuda.is_available() else "cpu"
if torch.cuda.is_available():
torch.cuda.max_memory_allocated(device=device)
pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
pipe = pipe.to(device)
else:
pipe = DiffusionPipeline.from_pretrained(model_id, use_safetensors=True)
pipe = pipe.to(device)
def translate(text,lang):
resp = requests.post(
url = "https://www.bing.com/ttranslatev3?isVertical=1&&IG=13172331D0494B12ABFA8F4454EEB479&IID=translator.5026",
"referrer": "https://www.bing.com/translator?to=en",
"referrerPolicy": "origin-when-cross-origin",
"data": f"&fromLang=auto-detect&to={lang}}&token=cdkbEXg93_iQE28MFPv9ScrPY_fs2OAw&key=1722124106496&text={text}&tryFetchingGenderDebiasedTranslations=true",
"method": "POST",
"mode": "cors",
"credentials": "include"
"headers": {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9,he;q=0.8,ha;q=0.7",
"content-type": "application/x-www-form-urlencoded",
"priority": "u=1, i",
"sec-ch-ua": "\"Not/A)Brand\";v=\"8\", \"Chromium\";v=\"126\", \"Google Chrome\";v=\"126\"",
"sec-ch-ua-arch": "\"x86\"",
"sec-ch-ua-bitness": "\"64\"",
"sec-ch-ua-full-version": "\"126.0.6478.185\"",
"sec-ch-ua-full-version-list": "\"Not/A)Brand\";v=\"8.0.0.0\", \"Chromium\";v=\"126.0.6478.185\", \"Google Chrome\";v=\"126.0.6478.185\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-model": "\"\"",
"sec-ch-ua-platform": "\"Windows\"",
"sec-ch-ua-platform-version": "\"10.0.0\"",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin"
}
)
print(resp)
jsn = resp.json()
print(jsn)
translated = jsn[0]["translations"][0]["text"]
return translated
def generate_random_string(length):
characters = string.ascii_letters + string.digits
return ''.join(random.choice(characters) for _ in range(length))
@spaces.GPU(duration=20)
def infer(prompt):
name = generate_random_string(12)+".png"
english_prompt = "Generate the most true and authentic and real and genuine single photograph, for " + translate(prompt,"en")
print(f'Final prompt: {english_prompt}')
image = pipe(english_prompt).images[0].save(name)
return name
css="""
#col-container {
margin: 0 auto;
max-width: 12cm;
}
#image-container {
aspect-ratio: 1 / 1;
}
"""
if torch.cuda.is_available():
power_device = "GPU"
else:
power_device = "CPU"
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# Image Generator
Currently running on {power_device}.
""")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Image(elem_id="image-container", label="Result", show_label=False, type='filepath')
run_button.click(
fn = infer,
inputs = [prompt],
outputs = [result]
)
demo.queue().launch()