Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -8,24 +8,27 @@ from gradio_client import Client
|
|
8 |
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
|
9 |
clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
|
10 |
|
11 |
-
chapter_num =
|
12 |
-
|
13 |
-
|
|
|
14 |
|
15 |
def generate_image_url(keywords):
|
16 |
return f"https://image.pollinations.ai/prompt/{quote(keywords)}"
|
17 |
|
18 |
def next_chapter():
|
19 |
-
global chapter_num
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
|
|
26 |
|
27 |
def infer(image_input, audience, keyword, protagonist):
|
28 |
-
global
|
|
|
29 |
gr.Info('Calling CLIP Interrogator, ์ด๋ฏธ์ง๋ฅผ ํด์ํ๊ณ ์์ต๋๋ค...')
|
30 |
clipi_result = clipi_client.predict(image_input, "best", 4, api_name="/clipi2")[0]
|
31 |
|
@@ -39,18 +42,7 @@ def infer(image_input, audience, keyword, protagonist):
|
|
39 |
Let's begin with Chapter 1!
|
40 |
"""
|
41 |
|
42 |
-
|
43 |
-
images.clear()
|
44 |
-
current_image_url = generate_image_url(clipi_result)
|
45 |
-
for chapter_num in range(1, 4): # 3๊ฐ์ ์ฅ์ ์์ฑํฉ๋๋ค.
|
46 |
-
gr.Info(f'Chapter {chapter_num}๋ฅผ ์์ฑํ๊ณ ์์ต๋๋ค...')
|
47 |
-
chapter_prompt = f"{story_intro}\n\n\n\nChapter {chapter_num} ๋ด์ฉ์ ๋ง๋ค์ด์ค."
|
48 |
-
chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=[{"role": "user", "content": chapter_prompt}])
|
49 |
-
chapter_story = chat_completion.choices[0].message.content
|
50 |
-
chapters.append(chapter_story)
|
51 |
-
images.append(current_image_url)
|
52 |
-
current_image_url = generate_image_url(chapter_story) # ๋ค์ ์ฅ์ ์ด๋ฏธ์ง URL์ ์์ฑํฉ๋๋ค.
|
53 |
-
|
54 |
return next_chapter()
|
55 |
|
56 |
css = """
|
@@ -75,13 +67,13 @@ with gr.Blocks(css=css) as demo:
|
|
75 |
submit_btn = gr.Button('์ด์ผ๊ธฐ์ ๊ทธ๋ฆผ์ ๋ง๋ค์ด ์ฃผ์ธ์')
|
76 |
next_chapter_btn = gr.Button('๋ค์ ์ด์ผ๊ธฐ')
|
77 |
with gr.Column():
|
78 |
-
|
79 |
-
|
80 |
|
81 |
submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in],
|
82 |
-
outputs=[
|
83 |
|
84 |
next_chapter_btn.click(fn=next_chapter,
|
85 |
-
outputs=[
|
86 |
|
87 |
demo.queue(max_size=12).launch()
|
|
|
8 |
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
|
9 |
clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
|
10 |
|
11 |
+
chapter_num = 1
|
12 |
+
story_intro = ""
|
13 |
+
current_story = ""
|
14 |
+
current_image_url = ""
|
15 |
|
16 |
def generate_image_url(keywords):
|
17 |
return f"https://image.pollinations.ai/prompt/{quote(keywords)}"
|
18 |
|
19 |
def next_chapter():
|
20 |
+
global chapter_num, current_story, current_image_url
|
21 |
+
current_image_url = generate_image_url(current_story) # ๋ค์ ์ฅ์ ์ด๋ฏธ์ง URL์ ์์ฑํฉ๋๋ค.
|
22 |
+
gr.Info(f'Chapter {chapter_num}๋ฅผ ์์ฑํ๊ณ ์์ต๋๋ค...')
|
23 |
+
chapter_prompt = f"{story_intro}\n\n\n\nChapter {chapter_num} ๋ด์ฉ์ ๋ง๋ค์ด์ค."
|
24 |
+
chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=[{"role": "user", "content": chapter_prompt}])
|
25 |
+
current_story = chat_completion.choices[0].message.content
|
26 |
+
chapter_num += 1
|
27 |
+
return current_story, current_image_url
|
28 |
|
29 |
def infer(image_input, audience, keyword, protagonist):
|
30 |
+
global story_intro, current_story, current_image_url, chapter_num
|
31 |
+
chapter_num = 1
|
32 |
gr.Info('Calling CLIP Interrogator, ์ด๋ฏธ์ง๋ฅผ ํด์ํ๊ณ ์์ต๋๋ค...')
|
33 |
clipi_result = clipi_client.predict(image_input, "best", 4, api_name="/clipi2")[0]
|
34 |
|
|
|
42 |
Let's begin with Chapter 1!
|
43 |
"""
|
44 |
|
45 |
+
current_story = clipi_result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
return next_chapter()
|
47 |
|
48 |
css = """
|
|
|
67 |
submit_btn = gr.Button('์ด์ผ๊ธฐ์ ๊ทธ๋ฆผ์ ๋ง๋ค์ด ์ฃผ์ธ์')
|
68 |
next_chapter_btn = gr.Button('๋ค์ ์ด์ผ๊ธฐ')
|
69 |
with gr.Column():
|
70 |
+
chapter_story = gr.Markdown(label="์ด์ผ๊ธฐ", elem_id="chapter_story")
|
71 |
+
chapter_image = gr.Image(label="๊ทธ๋ฆผ", elem_id="chapter_image")
|
72 |
|
73 |
submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in],
|
74 |
+
outputs=[chapter_story, chapter_image])
|
75 |
|
76 |
next_chapter_btn.click(fn=next_chapter,
|
77 |
+
outputs=[chapter_story, chapter_image])
|
78 |
|
79 |
demo.queue(max_size=12).launch()
|