File size: 3,946 Bytes
9390c83
aefa600
9977408
 
9390c83
531a00a
71f80a1
 
9390c83
 
2d47d11
 
 
 
84fc905
5d01521
09f390b
0ae568e
 
e09895d
a42f62d
2d47d11
e09895d
2d47d11
a42f62d
2d47d11
 
84fc905
2d47d11
84fc905
e09895d
5d01521
9390c83
84fc905
2d47d11
84fc905
9390c83
09f390b
9390c83
09f390b
 
 
9390c83
 
 
09f390b
 
 
9390c83
2d47d11
7ced387
9390c83
09f390b
71f80a1
 
9390c83
 
 
 
 
 
09f390b
 
9390c83
 
 
 
 
 
09f390b
 
 
5d01521
9390c83
2d47d11
 
84fc905
5d01521
aefa600
84fc905
a42f62d
84fc905
5d01521
71f80a1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import gradio as gr
from urllib.parse import quote
import re
import os
import openai
from gradio_client import Client

OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")

chapter_num = 1
story_intro = ""
current_story = ""
current_image_url = ""
chapters = []

def generate_image_url(keywords):
    truncated_keywords = keywords[:20]
    return f"https://image.pollinations.ai/prompt/{quote(truncated_keywords)}"

def next_chapter(audience, keyword, protagonist):
    global chapter_num, current_story, current_image_url
    current_image_url = generate_image_url(current_story)
    gr.Info(f'Chapter {chapter_num}를 생성하고 있습니다...')
    chapter_prompt = f"{story_intro}\n\nKeyword: {keyword}\nProtagonist: {protagonist}\n\n![Chapter {chapter_num} Image]({current_image_url})\n\nChapter {chapter_num} 내용을 만들어줘."
    chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=[{"role": "user", "content": chapter_prompt}])
    current_story = chat_completion.choices[0].message.content
    chapters.append({"story": current_story, "image": current_image_url})
    chapter_num += 1
    chapter_table = "| 챕터 | 이야기 | 그림 |\n|------|--------|------|\n" + "\n".join([f"| {i+1} | {c['story']} | ![image]({c['image']}) |" for i, c in enumerate(chapters)])
    return current_story, current_image_url, chapter_table

def infer(image_input, audience, keyword, protagonist):
    global story_intro, current_story, current_image_url, chapter_num, chapters
    chapter_num = 1
    chapters = []
    gr.Info('Calling CLIP Interrogator, 이미지를 해석하고 있습니다...')
    clipi_result = clipi_client.predict(image_input, "best", 4, api_name="/clipi2")[0]

    story_intro = f"""
    # Illustrated Tales
    ## Created by [Sigkawat Pengnoo](https://flowgpt.com/prompt/qzv2D3OvHkzkfSE4rQCqv) at FlowGPT
    Keyword: {keyword}
    Protagonist: {protagonist}
    한국어로 답변해줘.
    STORY : "{{ {clipi_result} }}"
    Let's begin with Chapter 1!
    """

    current_story = clipi_result
    return next_chapter(audience, keyword, protagonist)

css = """
#col-container {max-width: 910px; margin-left: auto; margin-right: auto;}
a {text-decoration-line: underline; font-weight: 600;}
"""

with gr.Blocks(css=css) as demo:
    with gr.Column(elem_id="col-container"):
        gr.Markdown(
            """
            <h1 style="text-align: center">Illustrated Tales - Korean</h1>
            <p style="text-align: center">이미지를 업로드하세요, ChatGPT를 통해 한국어로 이야기와 그림을 만들어 줍니다!</p>
            """
        )
        with gr.Row():
            with gr.Column():
                image_in = gr.Image(label="이미지 입력", type="filepath", elem_id="image-in", height=420)
                audience = gr.Radio(label="대상", choices=["Children", "Adult"], value="Children")
                keyword_in = gr.Textbox(label="핵심 키워드")
                protagonist_in = gr.Textbox(label="주인공")
                submit_btn = gr.Button('이야기와 그림을 만들어 주세요')
                next_chapter_btn = gr.Button('다음 이야기')
            with gr.Column():
                chapter_story = gr.Markdown(label="이야기", elem_id="chapter_story")
                chapter_image = gr.Image(label="그림", elem_id="chapter_image")
                chapter_table_md = gr.Markdown(label="모든 챕터")

    submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in], 
                     outputs=[chapter_story, chapter_image, chapter_table_md])
    next_chapter_btn.click(fn=lambda: next_chapter(audience=audience.value, keyword=keyword_in.value, protagonist=protagonist_in.value), 
                           outputs=[chapter_story, chapter_image, chapter_table_md])

    demo.queue(max_size=12).launch()