File size: 3,084 Bytes
9390c83
aefa600
9977408
 
9390c83
531a00a
71f80a1
 
9390c83
 
09f390b
aefa600
9390c83
 
 
09f390b
9390c83
09f390b
 
 
9390c83
 
 
09f390b
 
 
9390c83
09f390b
aefa600
09f390b
bfa801f
71f80a1
09f390b
 
 
 
aefa600
9390c83
aefa600
 
 
 
9390c83
09f390b
71f80a1
 
9390c83
 
 
 
 
 
09f390b
 
9390c83
 
 
 
 
 
09f390b
 
 
9390c83
aefa600
 
9390c83
aefa600
bfa801f
71f80a1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import gradio as gr
from urllib.parse import quote
import re
import os
import openai
from gradio_client import Client

OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")

def generate_image_url(keywords):
    return f"https://image.pollinations.ai/prompt/{quote(keywords)}"

def infer(image_input, audience, keyword, protagonist):
    gr.Info('Calling CLIP Interrogator, ์ด๋ฏธ์ง€๋ฅผ ํ•ด์„ํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค...')
    clipi_result = clipi_client.predict(image_input, "best", 4, api_name="/clipi2")[0]

    story_intro = f"""
    # Illustrated Tales
    ## Created by [Sigkawat Pengnoo](https://flowgpt.com/prompt/qzv2D3OvHkzkfSE4rQCqv) at FlowGPT
    Keyword: {keyword}
    Protagonist: {protagonist}
    ํ•œ๊ตญ์–ด๋กœ ๋‹ต๋ณ€ํ•ด์ค˜.
    STORY : "{{ {clipi_result} }}"
    Let's begin with Chapter 1!
    """

    chapters = []
    images = []
    current_image_url = generate_image_url(clipi_result)
    for chapter_num in range(1, 2):  # 1๊ฐœ์˜ ์žฅ์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
        gr.Info(f'Chapter {chapter_num}๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค...')
        chapter_prompt = f"{story_intro}\n\n![Chapter {chapter_num} Image]({current_image_url})\n\nChapter {chapter_num} ๋‚ด์šฉ์„ ๋งŒ๋“ค์–ด์ค˜."
        chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=[{"role": "user", "content": chapter_prompt}])
        chapter_story = chat_completion.choices[0].message.content
        chapters.append(chapter_story)
        images.append(current_image_url)

    return {
        "chapter1_story": chapters[0],
        "chapter1_image": images[0],
    }, gr.Group.update(visible=True)

css = """
#col-container {max-width: 910px; margin-left: auto; margin-right: auto;}
a {text-decoration-line: underline; font-weight: 600;}
"""

with gr.Blocks(css=css) as demo:
    with gr.Column(elem_id="col-container"):
        gr.Markdown(
            """
            <h1 style="text-align: center">Illustrated Tales - Korean</h1>
            <p style="text-align: center">์ด๋ฏธ์ง€๋ฅผ ์—…๋กœ๋“œํ•˜์„ธ์š”, ChatGPT๋ฅผ ํ†ตํ•ด ํ•œ๊ตญ์–ด๋กœ ์ด์•ผ๊ธฐ์™€ ๊ทธ๋ฆผ์„ ๋งŒ๋“ค์–ด ์ค๋‹ˆ๋‹ค!</p>
            """
        )
        with gr.Row():
            with gr.Column():
                image_in = gr.Image(label="์ด๋ฏธ์ง€ ์ž…๋ ฅ", type="filepath", elem_id="image-in", height=420)
                audience = gr.Radio(label="๋Œ€์ƒ", choices=["Children", "Adult"], value="Children")
                keyword_in = gr.Textbox(label="ํ•ต์‹ฌ ํ‚ค์›Œ๋“œ")
                protagonist_in = gr.Textbox(label="์ฃผ์ธ๊ณต")
                submit_btn = gr.Button('์ด์•ผ๊ธฐ์™€ ๊ทธ๋ฆผ์„ ๋งŒ๋“ค์–ด ์ฃผ์„ธ์š”')
            with gr.Column():
                chapter1_story = gr.Markdown(label="Chapter 1: ์ด์•ผ๊ธฐ", elem_id="chapter1_story")
                chapter1_image = gr.Image(label="Chapter 1: ๊ทธ๋ฆผ", elem_id="chapter1_image")

    submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in], 
                     outputs=[chapter1_story, chapter1_image])
    demo.queue(max_size=12).launch()