Spaces:
Runtime error
Runtime error
| import os | |
| import gradio as gr | |
| import numpy as np | |
| import random | |
| from huggingface_hub import AsyncInferenceClient | |
| from translatepy import Translator | |
| import requests | |
| import re | |
| import asyncio | |
| from PIL import Image | |
| translator = Translator() | |
| HF_TOKEN = os.environ.get("HF_TOKEN", None) | |
| basemodel = "black-forest-labs/FLUX.1-dev" | |
| MAX_SEED = np.iinfo(np.int32).max | |
| CSS = """ | |
| footer { | |
| visibility: hidden; | |
| } | |
| """ | |
| JS = """function () { | |
| gradioURL = window.location.href | |
| if (!gradioURL.endsWith('?__theme=dark')) { | |
| window.location.replace(gradioURL + '?__theme=dark'); | |
| } | |
| }""" | |
| def enable_lora(lora_add): | |
| if not lora_add: | |
| return basemodel | |
| else: | |
| return lora_add | |
| async def generate_image( | |
| prompt:str, | |
| model:str, | |
| lora_word:str, | |
| width:int=768, | |
| height:int=1024, | |
| scales:float=3.5, | |
| steps:int=24, | |
| seed:int=-1): | |
| if seed == -1: | |
| seed = random.randint(0, MAX_SEED) | |
| seed = int(seed) | |
| print(f'prompt:{prompt}') | |
| text = str(translator.translate(prompt, 'English')) + "," + lora_word | |
| client = AsyncInferenceClient() | |
| try: | |
| image = await client.text_to_image( | |
| prompt=text, | |
| height=height, | |
| width=width, | |
| guidance_scale=scales, | |
| num_inference_steps=steps, | |
| model=model, | |
| ) | |
| except Exception as e: | |
| raise gr.Error(f"Error in {e}") | |
| return image, seed | |
| async def upscale_image(image, upscale_factor): | |
| client = AsyncInferenceClient() | |
| try: | |
| result = await client.predict( | |
| input_image=image, | |
| prompt="", | |
| negative_prompt="", | |
| seed=42, | |
| upscale_factor=upscale_factor, | |
| controlnet_scale=0.6, | |
| controlnet_decay=1, | |
| condition_scale=6, | |
| tile_width=112, | |
| tile_height=144, | |
| denoise_strength=0.35, | |
| num_inference_steps=18, | |
| solver="DDIM", | |
| api_name="/process", | |
| model="finegrain/finegrain-image-enhancer" | |
| ) | |
| except Exception as e: | |
| raise gr.Error(f"Error in {e}") | |
| return result[1] | |
| async def gen( | |
| prompt:str, | |
| lora_add:str="XLabs-AI/flux-RealismLora", | |
| lora_word:str="", | |
| width:int=768, | |
| height:int=1024, | |
| scales:float=3.5, | |
| steps:int=24, | |
| seed:int=-1, | |
| upscale_factor:int=2, | |
| progress=gr.Progress(track_tqdm=True) | |
| ): | |
| model = enable_lora(lora_add) | |
| print(model) | |
| image, seed = await generate_image(prompt,model,lora_word,width,height,scales,steps,seed) | |
| upscaled_image = await upscale_image(image, upscale_factor) | |
| return upscaled_image, seed | |
| with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo: | |
| gr.HTML("<h1><center>Flux Lab Light</center></h1>") | |
| with gr.Row(): | |
| with gr.Column(scale=4): | |
| with gr.Row(): | |
| img = gr.Image(type="filepath", label='Imagen generada por Flux', height=600) | |
| with gr.Row(): | |
| prompt = gr.Textbox(label='Ingresa tu prompt (Multi-Idiomas)', placeholder="Ingresa prompt...", scale=6) | |
| sendBtn = gr.Button(scale=1, variant='primary') | |
| with gr.Accordion("Opciones avanzadas", open=True): | |
| with gr.Column(scale=1): | |
| width = gr.Slider( | |
| label="Ancho", | |
| minimum=512, | |
| maximum=1280, | |
| step=8, | |
| value=768, | |
| ) | |
| height = gr.Slider( | |
| label="Alto", | |
| minimum=512, | |
| maximum=1280, | |
| step=8, | |
| value=1024, | |
| ) | |
| scales = gr.Slider( | |
| label="Guía", | |
| minimum=3.5, | |
| maximum=7, | |
| step=0.1, | |
| value=3.5, | |
| ) | |
| steps = gr.Slider( | |
| label="Pasos", | |
| minimum=1, | |
| maximum=100, | |
| step=1, | |
| value=24, | |
| ) | |
| seed = gr.Slider( | |
| label="Semillas", | |
| minimum=-1, | |
| maximum=MAX_SEED, | |
| step=1, | |
| value=-1, | |
| ) | |
| lora_add = gr.Textbox( | |
| label="Agregar Flux LoRA", | |
| info="Modelo de LoRA a agregar", | |
| lines=1, | |
| value="XLabs-AI/flux-RealismLora", | |
| ) | |
| lora_word = gr.Textbox( | |
| label="Palabra clave de LoRA", | |
| info="Palabra clave para activar el modelo de LoRA", | |
| lines=1, | |
| value="", | |
| ) | |
| upscale_factor = gr.Radio( | |
| label="Factor de escalado", | |
| choices=[2, 3, 4], | |
| value=2, | |
| ) | |
| gr.on( | |
| triggers=[ | |
| prompt.submit, | |
| sendBtn.click, | |
| ], | |
| fn=gen, | |
| inputs=[ | |
| prompt, | |
| lora_add, | |
| lora_word, | |
| width, | |
| height, | |
| scales, | |
| steps, | |
| seed, | |
| upscale_factor | |
| ], | |
| outputs=[img, seed] | |
| ) | |
| if __name__ == "__main__": | |
| demo.queue(api_open=False).launch(show_api=False, share=False) |