JUNGU's picture
Update app.py
bfa801f
raw
history blame
3.08 kB
import gradio as gr
from urllib.parse import quote
import re
import os
import openai
from gradio_client import Client
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
def generate_image_url(keywords):
return f"https://image.pollinations.ai/prompt/{quote(keywords)}"
def infer(image_input, audience, keyword, protagonist):
gr.Info('Calling CLIP Interrogator, ์ด๋ฏธ์ง€๋ฅผ ํ•ด์„ํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค...')
clipi_result = clipi_client.predict(image_input, "best", 4, api_name="/clipi2")[0]
story_intro = f"""
# Illustrated Tales
## Created by [Sigkawat Pengnoo](https://flowgpt.com/prompt/qzv2D3OvHkzkfSE4rQCqv) at FlowGPT
Keyword: {keyword}
Protagonist: {protagonist}
ํ•œ๊ตญ์–ด๋กœ ๋‹ต๋ณ€ํ•ด์ค˜.
STORY : "{{ {clipi_result} }}"
Let's begin with Chapter 1!
"""
chapters = []
images = []
current_image_url = generate_image_url(clipi_result)
for chapter_num in range(1, 2): # 1๊ฐœ์˜ ์žฅ์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
gr.Info(f'Chapter {chapter_num}๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค...')
chapter_prompt = f"{story_intro}\n\n![Chapter {chapter_num} Image]({current_image_url})\n\nChapter {chapter_num} ๋‚ด์šฉ์„ ๋งŒ๋“ค์–ด์ค˜."
chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=[{"role": "user", "content": chapter_prompt}])
chapter_story = chat_completion.choices[0].message.content
chapters.append(chapter_story)
images.append(current_image_url)
return {
"chapter1_story": chapters[0],
"chapter1_image": images[0],
}, gr.Group.update(visible=True)
css = """
#col-container {max-width: 910px; margin-left: auto; margin-right: auto;}
a {text-decoration-line: underline; font-weight: 600;}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(
"""
<h1 style="text-align: center">Illustrated Tales - Korean</h1>
<p style="text-align: center">์ด๋ฏธ์ง€๋ฅผ ์—…๋กœ๋“œํ•˜์„ธ์š”, ChatGPT๋ฅผ ํ†ตํ•ด ํ•œ๊ตญ์–ด๋กœ ์ด์•ผ๊ธฐ์™€ ๊ทธ๋ฆผ์„ ๋งŒ๋“ค์–ด ์ค๋‹ˆ๋‹ค!</p>
"""
)
with gr.Row():
with gr.Column():
image_in = gr.Image(label="์ด๋ฏธ์ง€ ์ž…๋ ฅ", type="filepath", elem_id="image-in", height=420)
audience = gr.Radio(label="๋Œ€์ƒ", choices=["Children", "Adult"], value="Children")
keyword_in = gr.Textbox(label="ํ•ต์‹ฌ ํ‚ค์›Œ๋“œ")
protagonist_in = gr.Textbox(label="์ฃผ์ธ๊ณต")
submit_btn = gr.Button('์ด์•ผ๊ธฐ์™€ ๊ทธ๋ฆผ์„ ๋งŒ๋“ค์–ด ์ฃผ์„ธ์š”')
with gr.Column():
chapter1_story = gr.Markdown(label="Chapter 1: ์ด์•ผ๊ธฐ", elem_id="chapter1_story")
chapter1_image = gr.Image(label="Chapter 1: ๊ทธ๋ฆผ", elem_id="chapter1_image")
submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in],
outputs=[chapter1_story, chapter1_image])
demo.queue(max_size=12).launch()