Spaces:
Build error
Build error
Update app.py
Browse filesmake it simple for speed
app.py
CHANGED
@@ -18,41 +18,32 @@ def infer(image_input, audience, keyword, protagonist):
|
|
18 |
story_intro = f"""
|
19 |
# Illustrated Tales
|
20 |
## Created by [Sigkawat Pengnoo](https://flowgpt.com/prompt/qzv2D3OvHkzkfSE4rQCqv) at FlowGPT
|
21 |
-
|
22 |
Keyword: {keyword}
|
23 |
Protagonist: {protagonist}
|
24 |
ํ๊ตญ์ด๋ก ๋ต๋ณํด์ค.
|
25 |
-
|
26 |
STORY : "{{ {clipi_result} }}"
|
27 |
-
|
28 |
Let's begin with Chapter 1!
|
29 |
"""
|
30 |
|
31 |
chapters = []
|
32 |
images = []
|
33 |
current_image_url = generate_image_url(clipi_result)
|
34 |
-
for chapter_num in range(1,
|
35 |
gr.Info(f'Chapter {chapter_num}๋ฅผ ์์ฑํ๊ณ ์์ต๋๋ค...')
|
36 |
chapter_prompt = f"{story_intro}\n\n\n\nChapter {chapter_num} ๋ด์ฉ์ ๋ง๋ค์ด์ค."
|
37 |
chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=[{"role": "user", "content": chapter_prompt}])
|
38 |
chapter_story = chat_completion.choices[0].message.content
|
39 |
chapters.append(chapter_story)
|
40 |
images.append(current_image_url)
|
41 |
-
current_image_url = generate_image_url(chapter_story) # ๋ค์ ์ฅ์ ์ด๋ฏธ์ง URL์ ์์ฑํฉ๋๋ค.
|
42 |
|
43 |
return {
|
44 |
"chapter1_story": chapters[0],
|
45 |
"chapter1_image": images[0],
|
46 |
-
"chapter2_story": chapters[1],
|
47 |
-
"chapter2_image": images[1],
|
48 |
-
"chapter3_story": chapters[2],
|
49 |
-
"chapter3_image": images[2],
|
50 |
}, gr.Group.update(visible=True)
|
51 |
|
52 |
css = """
|
53 |
#col-container {max-width: 910px; margin-left: auto; margin-right: auto;}
|
54 |
a {text-decoration-line: underline; font-weight: 600;}
|
55 |
-
a {text-decoration-line: underline; font-weight: 600;}
|
56 |
"""
|
57 |
|
58 |
with gr.Blocks(css=css) as demo:
|
@@ -73,11 +64,7 @@ with gr.Blocks(css=css) as demo:
|
|
73 |
with gr.Column():
|
74 |
chapter1_story = gr.Markdown(label="Chapter 1: ์ด์ผ๊ธฐ", elem_id="chapter1_story")
|
75 |
chapter1_image = gr.Image(label="Chapter 1: ๊ทธ๋ฆผ", elem_id="chapter1_image")
|
76 |
-
chapter2_story = gr.Markdown(label="Chapter 2: ์ด์ผ๊ธฐ", elem_id="chapter2_story")
|
77 |
-
chapter2_image = gr.Image(label="Chapter 2: ๊ทธ๋ฆผ", elem_id="chapter2_image")
|
78 |
-
chapter3_story = gr.Markdown(label="Chapter 3: ์ด์ผ๊ธฐ", elem_id="chapter3_story")
|
79 |
-
chapter3_image = gr.Image(label="Chapter 3: ๊ทธ๋ฆผ", elem_id="chapter3_image")
|
80 |
|
81 |
submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in],
|
82 |
-
outputs=[chapter1_story, chapter1_image
|
83 |
demo.queue(max_size=12).launch()
|
|
|
18 |
story_intro = f"""
|
19 |
# Illustrated Tales
|
20 |
## Created by [Sigkawat Pengnoo](https://flowgpt.com/prompt/qzv2D3OvHkzkfSE4rQCqv) at FlowGPT
|
|
|
21 |
Keyword: {keyword}
|
22 |
Protagonist: {protagonist}
|
23 |
ํ๊ตญ์ด๋ก ๋ต๋ณํด์ค.
|
|
|
24 |
STORY : "{{ {clipi_result} }}"
|
|
|
25 |
Let's begin with Chapter 1!
|
26 |
"""
|
27 |
|
28 |
chapters = []
|
29 |
images = []
|
30 |
current_image_url = generate_image_url(clipi_result)
|
31 |
+
for chapter_num in range(1, 2): # 1๊ฐ์ ์ฅ์ ์์ฑํฉ๋๋ค.
|
32 |
gr.Info(f'Chapter {chapter_num}๋ฅผ ์์ฑํ๊ณ ์์ต๋๋ค...')
|
33 |
chapter_prompt = f"{story_intro}\n\n\n\nChapter {chapter_num} ๋ด์ฉ์ ๋ง๋ค์ด์ค."
|
34 |
chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=[{"role": "user", "content": chapter_prompt}])
|
35 |
chapter_story = chat_completion.choices[0].message.content
|
36 |
chapters.append(chapter_story)
|
37 |
images.append(current_image_url)
|
|
|
38 |
|
39 |
return {
|
40 |
"chapter1_story": chapters[0],
|
41 |
"chapter1_image": images[0],
|
|
|
|
|
|
|
|
|
42 |
}, gr.Group.update(visible=True)
|
43 |
|
44 |
css = """
|
45 |
#col-container {max-width: 910px; margin-left: auto; margin-right: auto;}
|
46 |
a {text-decoration-line: underline; font-weight: 600;}
|
|
|
47 |
"""
|
48 |
|
49 |
with gr.Blocks(css=css) as demo:
|
|
|
64 |
with gr.Column():
|
65 |
chapter1_story = gr.Markdown(label="Chapter 1: ์ด์ผ๊ธฐ", elem_id="chapter1_story")
|
66 |
chapter1_image = gr.Image(label="Chapter 1: ๊ทธ๋ฆผ", elem_id="chapter1_image")
|
|
|
|
|
|
|
|
|
67 |
|
68 |
submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in],
|
69 |
+
outputs=[chapter1_story, chapter1_image])
|
70 |
demo.queue(max_size=12).launch()
|