File size: 5,048 Bytes
a58e796
9390c83
aefa600
9977408
a58e796
531a00a
71f80a1
a58e796
 
 
 
ac68448
 
 
a58e796
ac68448
 
a58e796
ac68448
 
a58e796
 
ac68448
 
 
 
a58e796
 
 
 
 
 
 
 
 
 
970aa64
c6443c1
 
 
 
 
 
d962a25
71f80a1
9390c83
 
2d47d11
 
 
 
84fc905
5d01521
a42f62d
a58e796
e09895d
2d47d11
a42f62d
2d47d11
 
970aa64
a58e796
d962a25
5d01521
9390c83
84fc905
970aa64
a58e796
9390c83
09f390b
 
 
 
9390c83
 
 
09f390b
 
 
2d47d11
7ced387
9390c83
09f390b
71f80a1
 
9390c83
 
 
 
 
 
09f390b
 
9390c83
 
 
 
 
 
09f390b
 
 
5d01521
970aa64
9390c83
2d47d11
 
d962a25
5d01521
c6443c1
 
582f42c
5d01521
71f80a1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
from fpdf import FPDF
import gradio as gr
from urllib.parse import quote
import os
import openai
from gradio_client import Client

def generate_image_url(keywords):
    truncated_keywords = keywords[:20]
    return f"https://image.pollinations.ai/prompt/{quote(truncated_keywords)}"

class PDF(FPDF):
    def chapter_title(self, num, label):
        self.set_font('Arial', 'B', 12)
        self.cell(0, 10, f"Chapter {num} : {label}", 0, 1, 'C')
        self.ln(10)

    def chapter_body(self, body, image_url):
        self.set_font('Arial', '', 12)
        self.multi_cell(0, 10, body)
        self.image(image_url, x=10, w=100)
        self.ln(60)

    def add_chapter(self, num, title, body, image_url):
        self.add_page()
        self.chapter_title(num, title)
        self.chapter_body(body, image_url)

def save_as_pdf():
    global chapters
    pdf_path = "/mnt/data/chapters.pdf"
    pdf = PDF()
    for i, chapter in enumerate(chapters, start=1):
        pdf.add_chapter(i, f"Chapter {i}", chapter["story"], chapter["image"])
    pdf.output(pdf_path)
    return pdf_path

def create_markdown_table():
    global chapters
    markdown_table = "| 챕터 | 이야기 | 그림 |\n|-------|-------|------|\n"
    for i, chapter in enumerate(chapters, start=1):
        markdown_table += f"| Chapter {i} | {chapter['story']} | ![Chapter {i} Image]({chapter['image']}) |\n"
    return markdown_table

OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")

chapter_num = 1
story_intro = ""
current_story = ""
current_image_url = ""
chapters = []

def next_chapter(audience, keyword, protagonist):
    global chapter_num, current_story, current_image_url, chapters
    current_image_url = generate_image_url(current_story)
    gr.Info(f'Chapter {chapter_num}를 생성하고 있습니다...')
    chapter_prompt = f"{story_intro}\n\nKeyword: {keyword}\nProtagonist: {protagonist}\n\n![Chapter {chapter_num} Image]({current_image_url})\n\nChapter {chapter_num} 내용을 만들어줘."
    chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=[{"role": "user", "content": chapter_prompt}])
    current_story = chat_completion.choices[0].message.content
    chapters.append({"story": current_story, "image": current_image_url})
    chapter_num += 1
    return current_story, current_image_url, create_markdown_table()

def infer(image_input, audience, keyword, protagonist):
    global story_intro, current_story, current_image_url, chapter_num, chapters
    chapter_num = 1
    chapters = []
    gr.Info('Calling CLIP Interrogator, 이미지를 해석하고 있습니다...')
    clipi_result = clipi_client.predict(image_input, "best", 4, api_name="/clipi2")[0]
    story_intro = f"""
    # Illustrated Tales
    ## Created by [Sigkawat Pengnoo](https://flowgpt.com/prompt/qzv2D3OvHkzkfSE4rQCqv) at FlowGPT
    Keyword: {keyword}
    Protagonist: {protagonist}
    한국어로 답변해줘.
    STORY : "{{ {clipi_result} }}"
    Let's begin with Chapter 1!
    """
    current_story = clipi_result
    return next_chapter(audience, keyword, protagonist)

css = """
#col-container {max-width: 910px; margin-left: auto; margin-right: auto;}
a {text-decoration-line: underline; font-weight: 600;}
"""

with gr.Blocks(css=css) as demo:
    with gr.Column(elem_id="col-container"):
        gr.Markdown(
            """
            <h1 style="text-align: center">Illustrated Tales - Korean</h1>
            <p style="text-align: center">이미지를 업로드하세요, ChatGPT를 통해 한국어로 이야기와 그림을 만들어 줍니다!</p>
            """
        )
        with gr.Row():
            with gr.Column():
                image_in = gr.Image(label="이미지 입력", type="filepath", elem_id="image-in", height=420)
                audience = gr.Radio(label="대상", choices=["Children", "Adult"], value="Children")
                keyword_in = gr.Textbox(label="핵심 키워드")
                protagonist_in = gr.Textbox(label="주인공")
                submit_btn = gr.Button('이야기와 그림을 만들어 주세요')
                next_chapter_btn = gr.Button('다음 이야기')
                save_pdf_btn = gr.Button('PDF로 저장하기')
            with gr.Column():
                chapter_story = gr.Markdown(label="이야기", elem_id="chapter_story")
                chapter_image = gr.Image(label="그림", elem_id="chapter_image")
                table_markdown = gr.Markdown(label="이야기와 그림 정리", elem_id="table_markdown")

    submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in], outputs=[chapter_story, chapter_image, table_markdown])
    next_chapter_btn.click(fn=lambda: next_chapter(audience=audience.value, keyword=keyword_in.value, protagonist=protagonist_in.value), outputs=[chapter_story, chapter_image, table_markdown])
    save_pdf_btn.click(fn=save_as_pdf, outputs=gr.File(label="PDF 다운로드"))

    demo.queue(max_size=12).launch()