Spaces:
Sleeping
Sleeping
File size: 3,204 Bytes
b4f9b4b 210ed13 b1328e8 62c5b0c 210ed13 76b48d0 b4f9b4b 62c5b0c 76b48d0 62c5b0c 210ed13 5087a64 210ed13 62c5b0c 210ed13 62c5b0c 210ed13 3ed5fef 5087a64 b4f9b4b 5087a64 3ed5fef b4f9b4b 62c5b0c b4f9b4b 3ed5fef b4f9b4b 210ed13 62c5b0c 210ed13 62c5b0c 210ed13 62c5b0c 210ed13 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
import gradio as gr
#from tempfile import NamedTemporaryFile
import numpy as np
import random
import string
from diffusers import StableDiffusionPipeline as DiffusionPipeline
import torch
from pathos.multiprocessing import ProcessingPool as ProcessPoolExecutor
import requests
from lxml.html.soupparser import fromstring
pool = ProcessPoolExecutor(4)
pool.__enter__()
model_id = "runwayml/stable-diffusion-v1-5"
device = "cuda" if torch.cuda.is_available() else "cpu"
user_agents = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36'
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36'
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36'
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36'
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36'
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.1 Safari/605.1.15'
'Mozilla/5.0 (Macintosh; Intel Mac OS X 13_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.1 Safari/605.1.15'
]
if torch.cuda.is_available():
torch.cuda.max_memory_allocated(device=device)
pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
pipe = pipe.to(device)
else:
pipe = DiffusionPipeline.from_pretrained(model_id, use_safetensors=True)
pipe = pipe.to(device)
def translate(text,lang):
html_str = requests.get(
url = "http://translate.google.com",
params = {"sl": "auto", "tl": lang, "op": "translate", "text": text},
headers = {'User-Agent': random.choice(user_agents)}
).text
tree = fromstring(html_str)
translated = tree.xpath('//span[@lang="'+lang+'"]/span/span[text()]')[0]
return translated
def generate_random_string(length):
characters = string.ascii_letters + string.digits
return ''.join(random.choice(characters) for _ in range(length))
def infer(prompt):
name = generate_random_string(12)+".png"
image = pipe(translate(prompt,"en")).images[0].save(name)
return name
css="""
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
if torch.cuda.is_available():
power_device = "GPU"
else:
power_device = "CPU"
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# Image Generator
Currently running on {power_device}.
""")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Image(label="Result", show_label=False, type='filepath')
run_button.click(
fn = infer,
inputs = [prompt],
outputs = [result]
)
demo.queue().launch() |