JUNGU commited on
Commit
84fc905
·
1 Parent(s): 46078f9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -24
app.py CHANGED
@@ -4,7 +4,6 @@ import re
4
  import os
5
  import openai
6
  from gradio_client import Client
7
- from fpdf import FPDF
8
 
9
  OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
10
  clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
@@ -13,25 +12,12 @@ chapter_num = 1
13
  story_intro = ""
14
  current_story = ""
15
  current_image_url = ""
16
- chapters = [] # 챕터별 이야기와 그림 저장
17
 
18
  def generate_image_url(keywords):
19
  truncated_keywords = keywords[:20]
20
  return f"https://image.pollinations.ai/prompt/{quote(truncated_keywords)}"
21
 
22
- def save_as_pdf():
23
- pdf = FPDF()
24
- pdf.set_auto_page_break(auto=True, margin=15)
25
- pdf.add_page()
26
-
27
- for chapter in chapters:
28
- pdf.set_font("Arial", size=12)
29
- pdf.multi_cell(0, 10, chapter['story'])
30
- pdf.image(chapter['image'], x=10, y=None, w=90)
31
-
32
- pdf.output("illustrated_tales.pdf")
33
- return "PDF 저장 완료!"
34
-
35
  def next_chapter(audience, keyword, protagonist):
36
  global chapter_num, current_story, current_image_url
37
  current_image_url = generate_image_url(current_story)
@@ -39,15 +25,15 @@ def next_chapter(audience, keyword, protagonist):
39
  chapter_prompt = f"{story_intro}\n\nKeyword: {keyword}\nProtagonist: {protagonist}\n\n![Chapter {chapter_num} Image]({current_image_url})\n\nChapter {chapter_num} 내용을 만들어줘."
40
  chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=[{"role": "user", "content": chapter_prompt}])
41
  current_story = chat_completion.choices[0].message.content
42
- chapters.append({"story": current_story, "image": current_image_url}) # 챕터 정보 저장
43
  chapter_num += 1
44
- chapter_table = [[c['story'], c['image']] for c in chapters]
45
  return current_story, current_image_url, chapter_table
46
 
47
  def infer(image_input, audience, keyword, protagonist):
48
- global story_intro, current_story, current_image_url, chapter_num
49
- chapters.clear() # 새로운 이야기 시작 시 초기화
50
  chapter_num = 1
 
51
  gr.Info('Calling CLIP Interrogator, 이미지를 해석하고 있습니다...')
52
  clipi_result = clipi_client.predict(image_input, "best", 4, api_name="/clipi2")[0]
53
 
@@ -85,16 +71,14 @@ with gr.Blocks(css=css) as demo:
85
  protagonist_in = gr.Textbox(label="주인공")
86
  submit_btn = gr.Button('이야기와 그림을 만들어 주세요')
87
  next_chapter_btn = gr.Button('다음 이야기')
88
- save_pdf_btn = gr.Button('PDF로 저장하기')
89
  with gr.Column():
90
  chapter_story = gr.Markdown(label="이야기", elem_id="chapter_story")
91
  chapter_image = gr.Image(label="그림", elem_id="chapter_image")
92
- chapter_table = gr.Table(label="모든 챕터")
93
 
94
  submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in],
95
- outputs=[chapter_story, chapter_image, chapter_table])
96
  next_chapter_btn.click(fn=lambda: next_chapter(audience=audience.value, keyword=keyword_in.value, protagonist=protagonist_in.value),
97
- outputs=[chapter_story, chapter_image, chapter_table])
98
- save_pdf_btn.click(fn=save_as_pdf, outputs="text")
99
 
100
  demo.queue(max_size=12).launch()
 
4
  import os
5
  import openai
6
  from gradio_client import Client
 
7
 
8
  OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
9
  clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
 
12
  story_intro = ""
13
  current_story = ""
14
  current_image_url = ""
15
+ chapters = []
16
 
17
  def generate_image_url(keywords):
18
  truncated_keywords = keywords[:20]
19
  return f"https://image.pollinations.ai/prompt/{quote(truncated_keywords)}"
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  def next_chapter(audience, keyword, protagonist):
22
  global chapter_num, current_story, current_image_url
23
  current_image_url = generate_image_url(current_story)
 
25
  chapter_prompt = f"{story_intro}\n\nKeyword: {keyword}\nProtagonist: {protagonist}\n\n![Chapter {chapter_num} Image]({current_image_url})\n\nChapter {chapter_num} 내용을 만들어줘."
26
  chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=[{"role": "user", "content": chapter_prompt}])
27
  current_story = chat_completion.choices[0].message.content
28
+ chapters.append({"story": current_story, "image": current_image_url})
29
  chapter_num += 1
30
+ chapter_table = "| 챕터 | 이야기 | 그림 |\n|------|--------|------|\n" + "\n".join([f"| {i+1} | {c['story']} | ![image]({c['image']}) |" for i, c in enumerate(chapters)])
31
  return current_story, current_image_url, chapter_table
32
 
33
  def infer(image_input, audience, keyword, protagonist):
34
+ global story_intro, current_story, current_image_url, chapter_num, chapters
 
35
  chapter_num = 1
36
+ chapters = []
37
  gr.Info('Calling CLIP Interrogator, 이미지를 해석하고 있습니다...')
38
  clipi_result = clipi_client.predict(image_input, "best", 4, api_name="/clipi2")[0]
39
 
 
71
  protagonist_in = gr.Textbox(label="주인공")
72
  submit_btn = gr.Button('이야기와 그림을 만들어 주세요')
73
  next_chapter_btn = gr.Button('다음 이야기')
 
74
  with gr.Column():
75
  chapter_story = gr.Markdown(label="이야기", elem_id="chapter_story")
76
  chapter_image = gr.Image(label="그림", elem_id="chapter_image")
77
+ chapter_table_md = gr.Markdown(label="모든 챕터")
78
 
79
  submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in],
80
+ outputs=[chapter_story, chapter_image, chapter_table_md])
81
  next_chapter_btn.click(fn=lambda: next_chapter(audience=audience.value, keyword=keyword_in.value, protagonist=protagonist_in.value),
82
+ outputs=[chapter_story, chapter_image, chapter_table_md])
 
83
 
84
  demo.queue(max_size=12).launch()