Spaces:
aiqtech
/
Running on Zero

File size: 3,774 Bytes
a15af69
 
 
 
 
 
 
 
5e67b4c
a15af69
 
 
 
 
 
 
 
 
 
 
5e67b4c
 
a15af69
 
 
 
 
 
 
 
 
5e67b4c
 
 
 
a15af69
5e67b4c
 
 
a15af69
5e67b4c
a15af69
5e67b4c
a15af69
 
5e67b4c
a15af69
 
 
 
 
 
 
 
 
5e67b4c
a15af69
 
 
5e67b4c
a15af69
 
 
5e67b4c
 
 
a15af69
 
5e67b4c
a15af69
 
5e67b4c
 
a15af69
5e67b4c
 
 
a15af69
5e67b4c
a15af69
 
 
 
 
5e67b4c
a15af69
 
5e67b4c
a15af69
 
 
 
 
 
5e67b4c
a15af69
 
 
 
 
5e67b4c
a15af69
 
 
 
 
5e67b4c
a15af69
5e67b4c
a15af69
 
 
 
5e67b4c
a15af69
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import random
import gradio as gr
import numpy as np
import torch
import spaces
from diffusers import FluxPipeline
from PIL import Image
from diffusers.utils import export_to_gif
from transformers import pipeline

HEIGHT = 256
WIDTH = 1024
MAX_SEED = np.iinfo(np.int32).max

device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = FluxPipeline.from_pretrained(
    "black-forest-labs/FLUX.1-dev",
    torch_dtype=torch.bfloat16
).to(device)

translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")

def split_image(input_image, num_splits=4):
    output_images = []
    for i in range(num_splits):
        left = i * 256
        right = (i + 1) * 256
        box = (left, 0, right, 256)
        output_images.append(input_image.crop(box))
    return output_images

def translate_to_english(text):
    return translator(text)[0]['translation_text']

@spaces.GPU()
def predict(prompt, seed=42, randomize_seed=False, guidance_scale=5.0, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
    if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
        prompt = translate_to_english(prompt)
    
    prompt_template = f"""
    A side by side 4 frame image showing consecutive stills from a looped gif moving from left to right. The gif is of {prompt}.
    """
    
    if randomize_seed:
        seed = random.randint(0, MAX_SEED)
    
    image = pipe(
        prompt=prompt_template,
        guidance_scale=guidance_scale,
        num_inference_steps=num_inference_steps,
        num_images_per_prompt=1,
        generator=torch.Generator("cpu").manual_seed(seed),
        height=HEIGHT,
        width=WIDTH
    ).images[0]
    
    return export_to_gif(split_image(image, 4), "flux.gif", fps=4), image, seed

css = """
footer { visibility: hidden;}
"""

examples = [
    "๊ณ ์–‘์ด๊ฐ€ ๊ณต์ค‘์—์„œ ๋ฐœ์„ ํ”๋“œ๋Š” ๋ชจ์Šต",
    "ํŒฌ๋”๊ฐ€ ์—‰๋ฉ์ด๋ฅผ ์ขŒ์šฐ๋กœ ํ”๋“œ๋Š” ๋ชจ์Šต",
    "๊ฝƒ์ด ํ”ผ์–ด๋‚˜๋Š” ๊ณผ์ •"
]

with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
    with gr.Column(elem_id="col-container"):
        with gr.Row():
            prompt = gr.Text(label="ํ”„๋กฌํ”„ํŠธ", show_label=False, max_lines=1, placeholder="ํ”„๋กฌํ”„ํŠธ๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”")
            submit = gr.Button("์ œ์ถœ", scale=0)
        output = gr.Image(label="GIF", show_label=False)
        output_stills = gr.Image(label="์Šคํ‹ธ ์ด๋ฏธ์ง€", show_label=False, elem_id="stills")
        
        with gr.Accordion("๊ณ ๊ธ‰ ์„ค์ •", open=False):
            seed = gr.Slider(
                label="์‹œ๋“œ",
                minimum=0,
                maximum=MAX_SEED,
                step=1,
                value=0,
            )
            randomize_seed = gr.Checkbox(label="์‹œ๋“œ ๋ฌด์ž‘์œ„ํ™”", value=True)
            with gr.Row():
                guidance_scale = gr.Slider(
                    label="๊ฐ€์ด๋˜์Šค ์Šค์ผ€์ผ",
                    minimum=1,
                    maximum=15,
                    step=0.1,
                    value=3.5,
                )
                num_inference_steps = gr.Slider(
                    label="์ถ”๋ก  ๋‹จ๊ณ„ ์ˆ˜",
                    minimum=1,
                    maximum=50,
                    step=1,
                    value=28,
                )
        
        gr.Examples(
            examples=examples,
            fn=predict,
            inputs=[prompt],
            outputs=[output, output_stills, seed],
            cache_examples="lazy"            
        )
        
        gr.on(
            triggers=[submit.click, prompt.submit],
            fn=predict,
            inputs=[prompt, seed, randomize_seed, guidance_scale, num_inference_steps],
            outputs=[output, output_stills, seed]
        )

demo.launch()