import os import re import spaces import random import string import torch import requests import gradio as gr import numpy as np from lxml.html import fromstring from diffusers import AutoPipelineForText2Image #from tempfile import NamedTemporaryFile from pathos.threading import ThreadPool as Pool #model_id = "runwayml/stable-diffusion-v1-5" model_id = "kandinsky-community/kandinsky-3" #model_id = "stabilityai/stable-diffusion-3-medium-diffusers" device = "cuda" if torch.cuda.is_available() else "cpu" if torch.cuda.is_available(): torch.cuda.max_memory_allocated(device=device) pipe = AutoPipelineForText2Image.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16", use_safetensors=True, token=os.getenv('hf_token')) pipe = pipe.to(device) else: pipe = AutoPipelineForText2Image.from_pretrained(model_id, use_safetensors=True, token=os.getenv('hf_token')) pipe = pipe.to(device) def translate(text,lang): if text == None or lang == None: return "" text = re.sub(f'[{string.punctuation}]', '', re.sub('[\s+]', ' ', text)).lower().strip() lang = re.sub(f'[{string.punctuation}]', '', re.sub('[\s+]', ' ', lang)).lower().strip() if text == "" or lang == "": return "" if len(text) > 38: raise Exception("Translation Error: Too long text!") user_agents = [ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.1 Safari/605.1.15', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 13_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.1 Safari/605.1.15' ] padded_chars = re.sub("[(^\-)(\-$)]","",text.replace("","-").replace("- -"," ")).strip() query_text = f'Please translate {padded_chars}, into {lang}' url = f'https://www.google.com/search?q={query_text}' print(url) resp = requests.get( url = url, headers = { 'User-Agent': random.choice(user_agents) } ) content = resp.content html = fromstring(content) translated = text try: src_lang = html.xpath('//*[@class="source-language"]')[0].text_content().lower().strip() trgt_lang = html.xpath('//*[@class="target-language"]')[0].text_content().lower().strip() src_text = html.xpath('//*[@id="tw-source-text"]/*')[0].text_content().lower().strip() trgt_text = html.xpath('//*[@id="tw-target-text"]/*')[0].text_content().lower().strip() if trgt_lang == lang: translated = trgt_text except: print(f'Translation Warning: Failed To Translate!') ret = re.sub(f'[{string.punctuation}]', '', re.sub('[\s+]', ' ', translated)).lower().strip() print(ret) return ret def generate_random_string(length): characters = string.ascii_letters + string.digits return ''.join(random.choice(characters) for _ in range(length)) @spaces.GPU(duration=45) def Piper(_do,_dont): return pipe( _do, height=256, width=768, negative_prompt=_dont, num_inference_steps=50, guidance_scale=7 ) def infer(prompt_en,prompt2_en): name = generate_random_string(12)+".png" if prompt_en == "": _do = 'photograph' else: _do = f'photographed { prompt_en }' if prompt2_en == "": _dont = 'complex scene, ugly human body, partial human body, smooth texture, fictional content, blurred content, amputated human body, distorted palm fingers, missing legs, unreal eyes, squinting eyes, text anywhere, prints anywhere' else: _dont = f'{prompt2_en} anywhere, complex scene, ugly human body, partial human body, smooth texture, fictional content, blurred content, amputated human body, distorted palm fingers, missing legs, unreal eyes, squinting eyes, text anywhere, prints anywhere' image = Piper(_do,_dont).images[0].save(name) return name css=""" input, input::placeholder { text-align: center !important; } *, *::placeholder { direction: rtl !important; font-family: Suez One !important; } h1,h2,h3,h4,h5,h6,span,p,pre { width: 100% !important; text-align: center !important; display: block !important; } footer { display: none !important; } #col-container { margin: 0 auto !important; max-width: 15cm !important; } .image-container { aspect-ratio: 768 / 256 !important; } .dropdown-arrow { display: none !important; } *:has(.btn), .btn { width: 100% !important; margin: 0 auto !important; } """ js=""" function custom(){ document.querySelector("div#prompt input").setAttribute("maxlength","27"); document.querySelector("div#prompt2 input").setAttribute("maxlength","27"); } """ if torch.cuda.is_available(): power_device = "GPU" else: power_device = "CPU" with gr.Blocks(theme=gr.themes.Soft(),css=css,js=js) as demo: result = [] with gr.Column(elem_id="col-container"): gr.Markdown(f""" # מחולל תמונות {power_device} """) with gr.Row(): prompt = gr.Textbox( elem_id="prompt", placeholder="מה *כן* להוסיף", container=False, rtl=True, max_lines=1 ) with gr.Row(): prompt2 = gr.Textbox( elem_id="prompt2", placeholder="מה *לא* להוסיף", container=False, rtl=True, max_lines=1 ) with gr.Row(): run_button = gr.Button("התחלה",elem_classes="btn",scale=0) with gr.Row(): result.append(gr.Image(interactive=False,elem_classes="image-container", label="Result", show_label=False, type='filepath', show_share_button=False)) with gr.Row(): result.append(gr.Image(interactive=False,elem_classes="image-container", label="Result", show_label=False, type='filepath', show_share_button=False)) def _ret(idx,p1,p2): print(f'Starting {idx}: {p1} {p2}') v = infer(p1,p2) print(f'Finished {idx}: {v}') return v def _rets(p1,p2): p1_en = translate(p1,"english") p2_en = translate(p2,"english") ln = len(result) idxs = list(range(ln)) p1s = [p1_en for _ in idxs] p2s = [p2_en for _ in idxs] return list(Pool(ln).imap( _ret, idxs, p1s, p2s )) run_button.click(fn=_rets,inputs=[prompt,prompt2],outputs=result) demo.queue().launch()