JUNGU commited on
Commit
5d01521
ยท
1 Parent(s): 4998db3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -9
app.py CHANGED
@@ -8,10 +8,24 @@ from gradio_client import Client
8
  OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
9
  clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
10
 
 
 
 
 
11
  def generate_image_url(keywords):
12
  return f"https://image.pollinations.ai/prompt/{quote(keywords)}"
13
 
 
 
 
 
 
 
 
 
 
14
  def infer(image_input, audience, keyword, protagonist):
 
15
  gr.Info('Calling CLIP Interrogator, ์ด๋ฏธ์ง€๋ฅผ ํ•ด์„ํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค...')
16
  clipi_result = clipi_client.predict(image_input, "best", 4, api_name="/clipi2")[0]
17
 
@@ -25,22 +39,19 @@ def infer(image_input, audience, keyword, protagonist):
25
  Let's begin with Chapter 1!
26
  """
27
 
28
- chapters = []
29
- images = []
30
  current_image_url = generate_image_url(clipi_result)
31
- for chapter_num in range(1, 2): # 1๊ฐœ์˜ ์žฅ์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
32
  gr.Info(f'Chapter {chapter_num}๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค...')
33
  chapter_prompt = f"{story_intro}\n\n![Chapter {chapter_num} Image]({current_image_url})\n\nChapter {chapter_num} ๋‚ด์šฉ์„ ๋งŒ๋“ค์–ด์ค˜."
34
  chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=[{"role": "user", "content": chapter_prompt}])
35
  chapter_story = chat_completion.choices[0].message.content
36
  chapters.append(chapter_story)
37
  images.append(current_image_url)
38
-
39
- return chapters[0], images[0]
40
- # return {
41
- # "chapter1_story": chapters[0],
42
- # "chapter1_image": images[0],
43
- # }, gr.Group.update(visible=True)
44
 
45
  css = """
46
  #col-container {max-width: 910px; margin-left: auto; margin-right: auto;}
@@ -62,9 +73,15 @@ with gr.Blocks(css=css) as demo:
62
  keyword_in = gr.Textbox(label="ํ•ต์‹ฌ ํ‚ค์›Œ๋“œ")
63
  protagonist_in = gr.Textbox(label="์ฃผ์ธ๊ณต")
64
  submit_btn = gr.Button('์ด์•ผ๊ธฐ์™€ ๊ทธ๋ฆผ์„ ๋งŒ๋“ค์–ด ์ฃผ์„ธ์š”')
 
65
  with gr.Column():
66
  chapter1_story = gr.Markdown(label="Chapter 1: ์ด์•ผ๊ธฐ", elem_id="chapter1_story")
67
  chapter1_image = gr.Image(label="Chapter 1: ๊ทธ๋ฆผ", elem_id="chapter1_image")
 
68
  submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in],
69
  outputs=[chapter1_story, chapter1_image])
 
 
 
 
70
  demo.queue(max_size=12).launch()
 
8
  OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
9
  clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
10
 
11
+ chapter_num = 0
12
+ chapters = []
13
+ images = []
14
+
15
  def generate_image_url(keywords):
16
  return f"https://image.pollinations.ai/prompt/{quote(keywords)}"
17
 
18
+ def next_chapter():
19
+ global chapter_num
20
+ if chapter_num < len(chapters):
21
+ result = chapters[chapter_num], images[chapter_num]
22
+ chapter_num += 1
23
+ return result
24
+ else:
25
+ return "์ด๋ฏธ ๋ชจ๋“  ์žฅ์„ ์ฝ์—ˆ์Šต๋‹ˆ๋‹ค.", ""
26
+
27
  def infer(image_input, audience, keyword, protagonist):
28
+ global chapters, images
29
  gr.Info('Calling CLIP Interrogator, ์ด๋ฏธ์ง€๋ฅผ ํ•ด์„ํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค...')
30
  clipi_result = clipi_client.predict(image_input, "best", 4, api_name="/clipi2")[0]
31
 
 
39
  Let's begin with Chapter 1!
40
  """
41
 
42
+ chapters.clear()
43
+ images.clear()
44
  current_image_url = generate_image_url(clipi_result)
45
+ for chapter_num in range(1, 4): # 3๊ฐœ์˜ ์žฅ์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
46
  gr.Info(f'Chapter {chapter_num}๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค...')
47
  chapter_prompt = f"{story_intro}\n\n![Chapter {chapter_num} Image]({current_image_url})\n\nChapter {chapter_num} ๋‚ด์šฉ์„ ๋งŒ๋“ค์–ด์ค˜."
48
  chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=[{"role": "user", "content": chapter_prompt}])
49
  chapter_story = chat_completion.choices[0].message.content
50
  chapters.append(chapter_story)
51
  images.append(current_image_url)
52
+ current_image_url = generate_image_url(chapter_story) # ๋‹ค์Œ ์žฅ์˜ ์ด๋ฏธ์ง€ URL์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
53
+
54
+ return next_chapter()
 
 
 
55
 
56
  css = """
57
  #col-container {max-width: 910px; margin-left: auto; margin-right: auto;}
 
73
  keyword_in = gr.Textbox(label="ํ•ต์‹ฌ ํ‚ค์›Œ๋“œ")
74
  protagonist_in = gr.Textbox(label="์ฃผ์ธ๊ณต")
75
  submit_btn = gr.Button('์ด์•ผ๊ธฐ์™€ ๊ทธ๋ฆผ์„ ๋งŒ๋“ค์–ด ์ฃผ์„ธ์š”')
76
+ next_chapter_btn = gr.Button('๋‹ค์Œ ์ด์•ผ๊ธฐ')
77
  with gr.Column():
78
  chapter1_story = gr.Markdown(label="Chapter 1: ์ด์•ผ๊ธฐ", elem_id="chapter1_story")
79
  chapter1_image = gr.Image(label="Chapter 1: ๊ทธ๋ฆผ", elem_id="chapter1_image")
80
+
81
  submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in],
82
  outputs=[chapter1_story, chapter1_image])
83
+
84
+ next_chapter_btn.click(fn=next_chapter,
85
+ outputs=[chapter1_story, chapter1_image])
86
+
87
  demo.queue(max_size=12).launch()