JUNGU commited on
Commit
e09895d
·
1 Parent(s): 0ae568e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -6
app.py CHANGED
@@ -4,6 +4,7 @@ import re
4
  import os
5
  import openai
6
  from gradio_client import Client
 
7
 
8
  OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
9
  clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
@@ -12,22 +13,40 @@ chapter_num = 1
12
  story_intro = ""
13
  current_story = ""
14
  current_image_url = ""
 
15
 
16
  def generate_image_url(keywords):
17
  truncated_keywords = keywords[:20]
18
  return f"https://image.pollinations.ai/prompt/{quote(truncated_keywords)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  def next_chapter(audience, keyword, protagonist):
20
  global chapter_num, current_story, current_image_url
21
- current_image_url = generate_image_url(current_story) # 다음 장의 이미지 URL을 생성합니다.
22
  gr.Info(f'Chapter {chapter_num}를 생성하고 있습니다...')
23
  chapter_prompt = f"{story_intro}\n\nKeyword: {keyword}\nProtagonist: {protagonist}\n\n![Chapter {chapter_num} Image]({current_image_url})\n\nChapter {chapter_num} 내용을 만들어줘."
24
  chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=[{"role": "user", "content": chapter_prompt}])
25
  current_story = chat_completion.choices[0].message.content
 
26
  chapter_num += 1
27
- return current_story, current_image_url
 
28
 
29
  def infer(image_input, audience, keyword, protagonist):
30
  global story_intro, current_story, current_image_url, chapter_num
 
31
  chapter_num = 1
32
  gr.Info('Calling CLIP Interrogator, 이미지를 해석하고 있습니다...')
33
  clipi_result = clipi_client.predict(image_input, "best", 4, api_name="/clipi2")[0]
@@ -66,15 +85,16 @@ with gr.Blocks(css=css) as demo:
66
  protagonist_in = gr.Textbox(label="주인공")
67
  submit_btn = gr.Button('이야기와 그림을 만들어 주세요')
68
  next_chapter_btn = gr.Button('다음 이야기')
 
69
  with gr.Column():
70
  chapter_story = gr.Markdown(label="이야기", elem_id="chapter_story")
71
  chapter_image = gr.Image(label="그림", elem_id="chapter_image")
 
72
 
73
  submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in],
74
- outputs=[chapter_story, chapter_image])
75
  next_chapter_btn.click(fn=lambda: next_chapter(audience=audience.value, keyword=keyword_in.value, protagonist=protagonist_in.value),
76
- outputs=[chapter_story, chapter_image])
77
- # next_chapter_btn.click(fn=next_chapter,
78
- # outputs=[chapter_story, chapter_image])
79
 
80
  demo.queue(max_size=12).launch()
 
4
  import os
5
  import openai
6
  from gradio_client import Client
7
+ from fpdf import FPDF
8
 
9
  OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
10
  clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
 
13
  story_intro = ""
14
  current_story = ""
15
  current_image_url = ""
16
+ chapters = [] # 챕터별 이야기와 그림 저장
17
 
18
  def generate_image_url(keywords):
19
  truncated_keywords = keywords[:20]
20
  return f"https://image.pollinations.ai/prompt/{quote(truncated_keywords)}"
21
+
22
+ def save_as_pdf():
23
+ pdf = FPDF()
24
+ pdf.set_auto_page_break(auto=True, margin=15)
25
+ pdf.add_page()
26
+
27
+ for chapter in chapters:
28
+ pdf.set_font("Arial", size=12)
29
+ pdf.multi_cell(0, 10, chapter['story'])
30
+ pdf.image(chapter['image'], x=10, y=None, w=90)
31
+
32
+ pdf.output("illustrated_tales.pdf")
33
+ return "PDF 저장 완료!"
34
+
35
  def next_chapter(audience, keyword, protagonist):
36
  global chapter_num, current_story, current_image_url
37
+ current_image_url = generate_image_url(current_story)
38
  gr.Info(f'Chapter {chapter_num}를 생성하고 있습니다...')
39
  chapter_prompt = f"{story_intro}\n\nKeyword: {keyword}\nProtagonist: {protagonist}\n\n![Chapter {chapter_num} Image]({current_image_url})\n\nChapter {chapter_num} 내용을 만들어줘."
40
  chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=[{"role": "user", "content": chapter_prompt}])
41
  current_story = chat_completion.choices[0].message.content
42
+ chapters.append({"story": current_story, "image": current_image_url}) # 챕터 정보 저장
43
  chapter_num += 1
44
+ chapter_table = [[c['story'], c['image']] for c in chapters]
45
+ return current_story, current_image_url, chapter_table
46
 
47
  def infer(image_input, audience, keyword, protagonist):
48
  global story_intro, current_story, current_image_url, chapter_num
49
+ chapters.clear() # 새로운 이야기 시작 시 초기화
50
  chapter_num = 1
51
  gr.Info('Calling CLIP Interrogator, 이미지를 해석하고 있습니다...')
52
  clipi_result = clipi_client.predict(image_input, "best", 4, api_name="/clipi2")[0]
 
85
  protagonist_in = gr.Textbox(label="주인공")
86
  submit_btn = gr.Button('이야기와 그림을 만들어 주세요')
87
  next_chapter_btn = gr.Button('다음 이야기')
88
+ save_pdf_btn = gr.Button('PDF로 저장하기')
89
  with gr.Column():
90
  chapter_story = gr.Markdown(label="이야기", elem_id="chapter_story")
91
  chapter_image = gr.Image(label="그림", elem_id="chapter_image")
92
+ chapter_table = gr.Table(label="모든 챕터")
93
 
94
  submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in],
95
+ outputs=[chapter_story, chapter_image, chapter_table])
96
  next_chapter_btn.click(fn=lambda: next_chapter(audience=audience.value, keyword=keyword_in.value, protagonist=protagonist_in.value),
97
+ outputs=[chapter_story, chapter_image, chapter_table])
98
+ save_pdf_btn.click(fn=save_as_pdf, outputs="text")
 
99
 
100
  demo.queue(max_size=12).launch()