JUNGU commited on
Commit
aefa600
ยท
1 Parent(s): ee3a788

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -59
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import gradio as gr
2
- from share_btn import community_icon_html, loading_icon_html, share_js
3
  import re
4
  import os
5
  import openai
@@ -9,7 +9,7 @@ OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
9
  clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
10
 
11
  def generate_image_url(keywords):
12
- return f"https://image.pollinations.ai/prompt/{keywords}"
13
 
14
  def infer(image_input, audience, keyword, protagonist):
15
  gr.Info('Calling CLIP Interrogator, ์ด๋ฏธ์ง€๋ฅผ ํ•ด์„ํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค...')
@@ -29,6 +29,7 @@ def infer(image_input, audience, keyword, protagonist):
29
  """
30
 
31
  chapters = []
 
32
  current_image_url = generate_image_url(clipi_result)
33
  for chapter_num in range(1, 4): # 3๊ฐœ์˜ ์žฅ์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
34
  gr.Info(f'Chapter {chapter_num}๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค...')
@@ -36,68 +37,22 @@ def infer(image_input, audience, keyword, protagonist):
36
  chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=[{"role": "user", "content": chapter_prompt}])
37
  chapter_story = chat_completion.choices[0].message.content
38
  chapters.append(chapter_story)
 
39
  current_image_url = generate_image_url(chapter_story) # ๋‹ค์Œ ์žฅ์˜ ์ด๋ฏธ์ง€ URL์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
40
 
41
- formatted_text = '\n'.join(chapters)
42
- return formatted_text, gr.Group.update(visible=True)
 
 
 
 
 
 
43
 
44
  css = """
45
  #col-container {max-width: 910px; margin-left: auto; margin-right: auto;}
46
  a {text-decoration-line: underline; font-weight: 600;}
47
  a {text-decoration-line: underline; font-weight: 600;}
48
- .animate-spin {
49
- animation: spin 1s linear infinite;
50
- }
51
- @keyframes spin {
52
- from {
53
- transform: rotate(0deg);
54
- }
55
- to {
56
- transform: rotate(360deg);
57
- }
58
- }
59
- #share-btn-container {
60
- display: flex;
61
- padding-left: 0.5rem !important;
62
- padding-right: 0.5rem !important;
63
- background-color: #000000;
64
- justify-content: center;
65
- align-items: center;
66
- border-radius: 9999px !important;
67
- max-width: 13rem;
68
- }
69
- div#share-btn-container > div {
70
- flex-direction: row;
71
- background: black;
72
- align-items: center;
73
- }
74
- #share-btn-container:hover {
75
- background-color: #060606;
76
- }
77
- #share-btn {
78
- all: initial;
79
- color: #ffffff;
80
- font-weight: 600;
81
- cursor:pointer;
82
- font-family: 'IBM Plex Sans', sans-serif;
83
- margin-left: 0.5rem !important;
84
- padding-top: 0.5rem !important;
85
- padding-bottom: 0.5rem !important;
86
- right:0;
87
- }
88
- #share-btn * {
89
- all: unset;
90
- }
91
- #share-btn-container div:nth-child(-n+2){
92
- width: auto !important;
93
- min-height: 0px !important;
94
- }
95
- #share-btn-container .wrap {
96
- display: none !important;
97
- }
98
- #share-btn-container.hidden {
99
- display: none!important;
100
- }
101
  """
102
 
103
  with gr.Blocks(css=css) as demo:
@@ -116,7 +71,13 @@ with gr.Blocks(css=css) as demo:
116
  protagonist_in = gr.Textbox(label="์ฃผ์ธ๊ณต")
117
  submit_btn = gr.Button('์ด์•ผ๊ธฐ์™€ ๊ทธ๋ฆผ์„ ๋งŒ๋“ค์–ด ์ฃผ์„ธ์š”')
118
  with gr.Column():
119
- story = gr.Markdown(label="์ƒ์„ฑ๋œ ์ด์•ผ๊ธฐ์™€ ๊ทธ๋ฆผ", elem_id="story")
 
 
 
 
 
120
 
121
- submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in], outputs=[story])
 
122
  demo.queue(max_size=12).launch()
 
1
  import gradio as gr
2
+ from urllib.parse import quote
3
  import re
4
  import os
5
  import openai
 
9
  clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
10
 
11
  def generate_image_url(keywords):
12
+ return f"https://image.pollinations.ai/prompt/{quote(keywords)}"
13
 
14
  def infer(image_input, audience, keyword, protagonist):
15
  gr.Info('Calling CLIP Interrogator, ์ด๋ฏธ์ง€๋ฅผ ํ•ด์„ํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค...')
 
29
  """
30
 
31
  chapters = []
32
+ images = []
33
  current_image_url = generate_image_url(clipi_result)
34
  for chapter_num in range(1, 4): # 3๊ฐœ์˜ ์žฅ์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
35
  gr.Info(f'Chapter {chapter_num}๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค...')
 
37
  chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", messages=[{"role": "user", "content": chapter_prompt}])
38
  chapter_story = chat_completion.choices[0].message.content
39
  chapters.append(chapter_story)
40
+ images.append(current_image_url)
41
  current_image_url = generate_image_url(chapter_story) # ๋‹ค์Œ ์žฅ์˜ ์ด๋ฏธ์ง€ URL์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
42
 
43
+ return {
44
+ "chapter1_story": chapters[0],
45
+ "chapter1_image": images[0],
46
+ "chapter2_story": chapters[1],
47
+ "chapter2_image": images[1],
48
+ "chapter3_story": chapters[2],
49
+ "chapter3_image": images[2],
50
+ }, gr.Group.update(visible=True)
51
 
52
  css = """
53
  #col-container {max-width: 910px; margin-left: auto; margin-right: auto;}
54
  a {text-decoration-line: underline; font-weight: 600;}
55
  a {text-decoration-line: underline; font-weight: 600;}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  """
57
 
58
  with gr.Blocks(css=css) as demo:
 
71
  protagonist_in = gr.Textbox(label="์ฃผ์ธ๊ณต")
72
  submit_btn = gr.Button('์ด์•ผ๊ธฐ์™€ ๊ทธ๋ฆผ์„ ๋งŒ๋“ค์–ด ์ฃผ์„ธ์š”')
73
  with gr.Column():
74
+ chapter1_story = gr.Markdown(label="Chapter 1: ์ด์•ผ๊ธฐ", elem_id="chapter1_story")
75
+ chapter1_image = gr.Image(label="Chapter 1: ๊ทธ๋ฆผ", elem_id="chapter1_image")
76
+ chapter2_story = gr.Markdown(label="Chapter 2: ์ด์•ผ๊ธฐ", elem_id="chapter2_story")
77
+ chapter2_image = gr.Image(label="Chapter 2: ๊ทธ๋ฆผ", elem_id="chapter2_image")
78
+ chapter3_story = gr.Markdown(label="Chapter 3: ์ด์•ผ๊ธฐ", elem_id="chapter3_story")
79
+ chapter3_image = gr.Image(label="Chapter 3: ๊ทธ๋ฆผ", elem_id="chapter3_image")
80
 
81
+ submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in],
82
+ outputs=[chapter1_story, chapter1_image, chapter2_story, chapter2_image, chapter3_story, chapter3_image])
83
  demo.queue(max_size=12).launch()