Spaces:
Build error
Build error
File size: 3,084 Bytes
9390c83 aefa600 9977408 9390c83 531a00a 71f80a1 9390c83 09f390b aefa600 9390c83 09f390b 9390c83 09f390b 9390c83 09f390b 9390c83 09f390b aefa600 09f390b bfa801f 71f80a1 09f390b aefa600 9390c83 aefa600 9390c83 09f390b 71f80a1 9390c83 09f390b 9390c83 09f390b 9390c83 aefa600 9390c83 aefa600 bfa801f 71f80a1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
import gradio as gr
from urllib.parse import quote
import re
import os
import openai
from gradio_client import Client
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
def generate_image_url(keywords):
return f"https://image.pollinations.ai/prompt/{quote(keywords)}"
def infer(image_input, audience, keyword, protagonist):
gr.Info('Calling CLIP Interrogator, ์ด๋ฏธ์ง๋ฅผ ํด์ํ๊ณ ์์ต๋๋ค...')
clipi_result = clipi_client.predict(image_input, "best", 4, api_name="/clipi2")[0]
story_intro = f"""
# Illustrated Tales
## Created by [Sigkawat Pengnoo](https://flowgpt.com/prompt/qzv2D3OvHkzkfSE4rQCqv) at FlowGPT
Keyword: {keyword}
Protagonist: {protagonist}
ํ๊ตญ์ด๋ก ๋ต๋ณํด์ค.
STORY : "{{ {clipi_result} }}"
Let's begin with Chapter 1!
"""
chapters = []
images = []
current_image_url = generate_image_url(clipi_result)
for chapter_num in range(1, 2): # 1๊ฐ์ ์ฅ์ ์์ฑํฉ๋๋ค.
gr.Info(f'Chapter {chapter_num}๋ฅผ ์์ฑํ๊ณ ์์ต๋๋ค...')
chapter_prompt = f"{story_intro}\n\n\n\nChapter {chapter_num} ๋ด์ฉ์ ๋ง๋ค์ด์ค."
chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=[{"role": "user", "content": chapter_prompt}])
chapter_story = chat_completion.choices[0].message.content
chapters.append(chapter_story)
images.append(current_image_url)
return {
"chapter1_story": chapters[0],
"chapter1_image": images[0],
}, gr.Group.update(visible=True)
css = """
#col-container {max-width: 910px; margin-left: auto; margin-right: auto;}
a {text-decoration-line: underline; font-weight: 600;}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(
"""
<h1 style="text-align: center">Illustrated Tales - Korean</h1>
<p style="text-align: center">์ด๋ฏธ์ง๋ฅผ ์
๋ก๋ํ์ธ์, ChatGPT๋ฅผ ํตํด ํ๊ตญ์ด๋ก ์ด์ผ๊ธฐ์ ๊ทธ๋ฆผ์ ๋ง๋ค์ด ์ค๋๋ค!</p>
"""
)
with gr.Row():
with gr.Column():
image_in = gr.Image(label="์ด๋ฏธ์ง ์
๋ ฅ", type="filepath", elem_id="image-in", height=420)
audience = gr.Radio(label="๋์", choices=["Children", "Adult"], value="Children")
keyword_in = gr.Textbox(label="ํต์ฌ ํค์๋")
protagonist_in = gr.Textbox(label="์ฃผ์ธ๊ณต")
submit_btn = gr.Button('์ด์ผ๊ธฐ์ ๊ทธ๋ฆผ์ ๋ง๋ค์ด ์ฃผ์ธ์')
with gr.Column():
chapter1_story = gr.Markdown(label="Chapter 1: ์ด์ผ๊ธฐ", elem_id="chapter1_story")
chapter1_image = gr.Image(label="Chapter 1: ๊ทธ๋ฆผ", elem_id="chapter1_image")
submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in],
outputs=[chapter1_story, chapter1_image])
demo.queue(max_size=12).launch()
|