Spaces:
Build error
Build error
import gradio as gr | |
from urllib.parse import quote | |
import re | |
import os | |
import openai | |
from gradio_client import Client | |
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY') | |
clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/") | |
def generate_image_url(keywords): | |
return f"https://image.pollinations.ai/prompt/{quote(keywords)}" | |
def infer(image_input, audience, keyword, protagonist): | |
gr.Info('Calling CLIP Interrogator, ์ด๋ฏธ์ง๋ฅผ ํด์ํ๊ณ ์์ต๋๋ค...') | |
clipi_result = clipi_client.predict(image_input, "best", 4, api_name="/clipi2")[0] | |
story_intro = f""" | |
# Illustrated Tales | |
## Created by [Sigkawat Pengnoo](https://flowgpt.com/prompt/qzv2D3OvHkzkfSE4rQCqv) at FlowGPT | |
Keyword: {keyword} | |
Protagonist: {protagonist} | |
ํ๊ตญ์ด๋ก ๋ต๋ณํด์ค. | |
STORY : "{{ {clipi_result} }}" | |
Let's begin with Chapter 1! | |
""" | |
chapters = [] | |
images = [] | |
current_image_url = generate_image_url(clipi_result) | |
for chapter_num in range(1, 2): # 1๊ฐ์ ์ฅ์ ์์ฑํฉ๋๋ค. | |
gr.Info(f'Chapter {chapter_num}๋ฅผ ์์ฑํ๊ณ ์์ต๋๋ค...') | |
chapter_prompt = f"{story_intro}\n\n\n\nChapter {chapter_num} ๋ด์ฉ์ ๋ง๋ค์ด์ค." | |
chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=[{"role": "user", "content": chapter_prompt}]) | |
chapter_story = chat_completion.choices[0].message.content | |
chapters.append(chapter_story) | |
images.append(current_image_url) | |
return { | |
"chapter1_story": chapters[0], | |
"chapter1_image": images[0], | |
}, gr.Group.update(visible=True) | |
css = """ | |
#col-container {max-width: 910px; margin-left: auto; margin-right: auto;} | |
a {text-decoration-line: underline; font-weight: 600;} | |
""" | |
with gr.Blocks(css=css) as demo: | |
with gr.Column(elem_id="col-container"): | |
gr.Markdown( | |
""" | |
<h1 style="text-align: center">Illustrated Tales - Korean</h1> | |
<p style="text-align: center">์ด๋ฏธ์ง๋ฅผ ์ ๋ก๋ํ์ธ์, ChatGPT๋ฅผ ํตํด ํ๊ตญ์ด๋ก ์ด์ผ๊ธฐ์ ๊ทธ๋ฆผ์ ๋ง๋ค์ด ์ค๋๋ค!</p> | |
""" | |
) | |
with gr.Row(): | |
with gr.Column(): | |
image_in = gr.Image(label="์ด๋ฏธ์ง ์ ๋ ฅ", type="filepath", elem_id="image-in", height=420) | |
audience = gr.Radio(label="๋์", choices=["Children", "Adult"], value="Children") | |
keyword_in = gr.Textbox(label="ํต์ฌ ํค์๋") | |
protagonist_in = gr.Textbox(label="์ฃผ์ธ๊ณต") | |
submit_btn = gr.Button('์ด์ผ๊ธฐ์ ๊ทธ๋ฆผ์ ๋ง๋ค์ด ์ฃผ์ธ์') | |
with gr.Column(): | |
chapter1_story = gr.Markdown(label="Chapter 1: ์ด์ผ๊ธฐ", elem_id="chapter1_story") | |
chapter1_image = gr.Image(label="Chapter 1: ๊ทธ๋ฆผ", elem_id="chapter1_image") | |
submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in], | |
outputs=[chapter1_story, chapter1_image]) | |
demo.queue(max_size=12).launch() | |