JUNGU commited on
Commit
970aa64
·
1 Parent(s): 84fc905

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -11
app.py CHANGED
@@ -1,10 +1,17 @@
1
  import gradio as gr
2
  from urllib.parse import quote
 
 
 
 
3
  import re
4
  import os
5
  import openai
6
  from gradio_client import Client
7
 
 
 
 
8
  OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
9
  clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
10
 
@@ -18,6 +25,21 @@ def generate_image_url(keywords):
18
  truncated_keywords = keywords[:20]
19
  return f"https://image.pollinations.ai/prompt/{quote(truncated_keywords)}"
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  def next_chapter(audience, keyword, protagonist):
22
  global chapter_num, current_story, current_image_url
23
  current_image_url = generate_image_url(current_story)
@@ -25,18 +47,16 @@ def next_chapter(audience, keyword, protagonist):
25
  chapter_prompt = f"{story_intro}\n\nKeyword: {keyword}\nProtagonist: {protagonist}\n\n![Chapter {chapter_num} Image]({current_image_url})\n\nChapter {chapter_num} 내용을 만들어줘."
26
  chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=[{"role": "user", "content": chapter_prompt}])
27
  current_story = chat_completion.choices[0].message.content
28
- chapters.append({"story": current_story, "image": current_image_url})
29
  chapter_num += 1
30
- chapter_table = "| 챕터 | 이야기 | 그림 |\n|------|--------|------|\n" + "\n".join([f"| {i+1} | {c['story']} | ![image]({c['image']}) |" for i, c in enumerate(chapters)])
31
- return current_story, current_image_url, chapter_table
32
 
33
  def infer(image_input, audience, keyword, protagonist):
34
  global story_intro, current_story, current_image_url, chapter_num, chapters
35
- chapter_num = 1
36
  chapters = []
 
37
  gr.Info('Calling CLIP Interrogator, 이미지를 해석하고 있습니다...')
38
  clipi_result = clipi_client.predict(image_input, "best", 4, api_name="/clipi2")[0]
39
-
40
  story_intro = f"""
41
  # Illustrated Tales
42
  ## Created by [Sigkawat Pengnoo](https://flowgpt.com/prompt/qzv2D3OvHkzkfSE4rQCqv) at FlowGPT
@@ -46,7 +66,6 @@ def infer(image_input, audience, keyword, protagonist):
46
  STORY : "{{ {clipi_result} }}"
47
  Let's begin with Chapter 1!
48
  """
49
-
50
  current_story = clipi_result
51
  return next_chapter(audience, keyword, protagonist)
52
 
@@ -71,14 +90,13 @@ with gr.Blocks(css=css) as demo:
71
  protagonist_in = gr.Textbox(label="주인공")
72
  submit_btn = gr.Button('이야기와 그림을 만들어 주세요')
73
  next_chapter_btn = gr.Button('다음 이야기')
 
74
  with gr.Column():
75
  chapter_story = gr.Markdown(label="이야기", elem_id="chapter_story")
76
  chapter_image = gr.Image(label="그림", elem_id="chapter_image")
77
- chapter_table_md = gr.Markdown(label="모든 챕터")
78
 
79
- submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in],
80
- outputs=[chapter_story, chapter_image, chapter_table_md])
81
- next_chapter_btn.click(fn=lambda: next_chapter(audience=audience.value, keyword=keyword_in.value, protagonist=protagonist_in.value),
82
- outputs=[chapter_story, chapter_image, chapter_table_md])
83
 
84
  demo.queue(max_size=12).launch()
 
1
  import gradio as gr
2
  from urllib.parse import quote
3
+ from reportlab.lib.pagesizes import letter
4
+ from reportlab.platypus import SimpleDocTemplate, Paragraph, Image
5
+ from reportlab.lib.styles import getSampleStyleSheet
6
+ import urllib.request
7
  import re
8
  import os
9
  import openai
10
  from gradio_client import Client
11
 
12
+ def download_image(image_url, image_path):
13
+ urllib.request.urlretrieve(image_url, image_path)
14
+
15
  OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
16
  clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
17
 
 
25
  truncated_keywords = keywords[:20]
26
  return f"https://image.pollinations.ai/prompt/{quote(truncated_keywords)}"
27
 
28
+ def save_as_pdf():
29
+ global chapters
30
+ pdf_path = "/mnt/data/chapters.pdf"
31
+ pdf = SimpleDocTemplate(pdf_path, pagesize=letter)
32
+ story = []
33
+ styles = getSampleStyleSheet()
34
+ for i, chapter in enumerate(chapters):
35
+ story.append(Paragraph(f"Chapter {i + 1}", styles["Heading1"]))
36
+ story.append(Paragraph(chapter["story"], styles["BodyText"]))
37
+ image_path = f"/mnt/data/image_{i}.png"
38
+ download_image(chapter["image"], image_path)
39
+ story.append(Image(image_path))
40
+ pdf.build(story)
41
+ return pdf_path
42
+
43
  def next_chapter(audience, keyword, protagonist):
44
  global chapter_num, current_story, current_image_url
45
  current_image_url = generate_image_url(current_story)
 
47
  chapter_prompt = f"{story_intro}\n\nKeyword: {keyword}\nProtagonist: {protagonist}\n\n![Chapter {chapter_num} Image]({current_image_url})\n\nChapter {chapter_num} 내용을 만들어줘."
48
  chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=[{"role": "user", "content": chapter_prompt}])
49
  current_story = chat_completion.choices[0].message.content
 
50
  chapter_num += 1
51
+ chapters.append({"story": current_story, "image": current_image_url})
52
+ return current_story, current_image_url
53
 
54
  def infer(image_input, audience, keyword, protagonist):
55
  global story_intro, current_story, current_image_url, chapter_num, chapters
 
56
  chapters = []
57
+ chapter_num = 1
58
  gr.Info('Calling CLIP Interrogator, 이미지를 해석하고 있습니다...')
59
  clipi_result = clipi_client.predict(image_input, "best", 4, api_name="/clipi2")[0]
 
60
  story_intro = f"""
61
  # Illustrated Tales
62
  ## Created by [Sigkawat Pengnoo](https://flowgpt.com/prompt/qzv2D3OvHkzkfSE4rQCqv) at FlowGPT
 
66
  STORY : "{{ {clipi_result} }}"
67
  Let's begin with Chapter 1!
68
  """
 
69
  current_story = clipi_result
70
  return next_chapter(audience, keyword, protagonist)
71
 
 
90
  protagonist_in = gr.Textbox(label="주인공")
91
  submit_btn = gr.Button('이야기와 그림을 만들어 주세요')
92
  next_chapter_btn = gr.Button('다음 이야기')
93
+ save_pdf_btn = gr.Button('PDF로 저장하기')
94
  with gr.Column():
95
  chapter_story = gr.Markdown(label="이야기", elem_id="chapter_story")
96
  chapter_image = gr.Image(label="그림", elem_id="chapter_image")
 
97
 
98
+ submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in], outputs=[chapter_story, chapter_image])
99
+ next_chapter_btn.click(fn=lambda: next_chapter(audience=audience.value, keyword=keyword_in.value, protagonist=protagonist_in.value), outputs=[chapter_story, chapter_image])
100
+ save_pdf_btn.click(fn=save_as_pdf, outputs=gr.File(label="PDF 다운로드"))
 
101
 
102
  demo.queue(max_size=12).launch()