JUNGU commited on
Commit
39f294d
ยท
1 Parent(s): b065915

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -4
app.py CHANGED
@@ -22,7 +22,7 @@ def get_text_after_colon(input_text):
22
  # Return the original text if ":" is not found
23
  return input_text
24
 
25
- def infer(image_input, audience):
26
  gr.Info('Calling CLIP Interrogator, ์ด๋ฏธ์ง€๋ฅผ ํ•ด์„ํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค...')
27
  clipi_result = clipi_client.predict(
28
  image_input, # str (filepath or URL to image) in 'parameter_3' Image component
@@ -37,8 +37,9 @@ def infer(image_input, audience):
37
  I'll give you a simple image caption, please provide a fictional story for a {audience} audience that would fit well with the image. Please be creative, do not worry and only generate a cool fictional story.
38
  Here's the image description:
39
  '{clipi_result[0]}'
 
 
40
  ํ•œ๊ตญ์–ด๋กœ ๋‹ต๋ณ€ํ•ด์ค˜.
41
-
42
  """
43
  gr.Info('Calling ChatGPT, ์ด์•ผ๊ธฐ๋ฅผ ๋งŒ๋“ค๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค...')
44
  #result = client.predict(
@@ -131,10 +132,13 @@ with gr.Blocks(css=css) as demo:
131
  <p style="text-align: center">ChatGPT ์‘๋‹ต์ด ์˜ค๋ž˜ ์ง€์—ฐ๋˜๊ฑฐ๋‚˜ ์‚ฌ์šฉ์ œํ•œ์œผ๋กœ ์•ˆ๋  ๋•Œ๊ฐ€ ์žˆ์Šต๋‹ˆ๋‹ค.</p>
132
  """
133
  )
 
134
  with gr.Row():
135
  with gr.Column():
136
  image_in = gr.Image(label="์ด๋ฏธ์ง€ ์ž…๋ ฅ", type="filepath", elem_id="image-in", height=420)
137
  audience = gr.Radio(label="๋Œ€์ƒ", choices=["Children", "Adult"], value="Children")
 
 
138
  submit_btn = gr.Button('๊ธ€์„ ๋งŒ๋“ค์–ด ์ฃผ์„ธ์š”')
139
  with gr.Column():
140
  #caption = gr.Textbox(label="Generated Caption")
@@ -151,8 +155,9 @@ with gr.Blocks(css=css) as demo:
151
  outputs=[story, share_group],
152
  cache_examples=True
153
  )
154
-
155
- submit_btn.click(fn=infer, inputs=[image_in, audience], outputs=[story, share_group])
 
156
  share_button.click(None, [], [], _js=share_js)
157
 
158
  demo.queue(max_size=12).launch()
 
22
  # Return the original text if ":" is not found
23
  return input_text
24
 
25
+ def infer(image_input, audience, keyword, protagonist):
26
  gr.Info('Calling CLIP Interrogator, ์ด๋ฏธ์ง€๋ฅผ ํ•ด์„ํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค...')
27
  clipi_result = clipi_client.predict(
28
  image_input, # str (filepath or URL to image) in 'parameter_3' Image component
 
37
  I'll give you a simple image caption, please provide a fictional story for a {audience} audience that would fit well with the image. Please be creative, do not worry and only generate a cool fictional story.
38
  Here's the image description:
39
  '{clipi_result[0]}'
40
+ Keyword: {keyword}
41
+ Protagonist: {protagonist}
42
  ํ•œ๊ตญ์–ด๋กœ ๋‹ต๋ณ€ํ•ด์ค˜.
 
43
  """
44
  gr.Info('Calling ChatGPT, ์ด์•ผ๊ธฐ๋ฅผ ๋งŒ๋“ค๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค...')
45
  #result = client.predict(
 
132
  <p style="text-align: center">ChatGPT ์‘๋‹ต์ด ์˜ค๋ž˜ ์ง€์—ฐ๋˜๊ฑฐ๋‚˜ ์‚ฌ์šฉ์ œํ•œ์œผ๋กœ ์•ˆ๋  ๋•Œ๊ฐ€ ์žˆ์Šต๋‹ˆ๋‹ค.</p>
133
  """
134
  )
135
+
136
  with gr.Row():
137
  with gr.Column():
138
  image_in = gr.Image(label="์ด๋ฏธ์ง€ ์ž…๋ ฅ", type="filepath", elem_id="image-in", height=420)
139
  audience = gr.Radio(label="๋Œ€์ƒ", choices=["Children", "Adult"], value="Children")
140
+ keyword_in = gr.Textbox(label="ํ•ต์‹ฌ ํ‚ค์›Œ๋“œ") # ํ•ต์‹ฌ ํ‚ค์›Œ๋“œ ์ž…๋ ฅ ์ƒ์ž
141
+ protagonist_in = gr.Textbox(label="์ฃผ์ธ๊ณต") # ์ฃผ์ธ๊ณต ์ž…๋ ฅ ์ƒ์ž
142
  submit_btn = gr.Button('๊ธ€์„ ๋งŒ๋“ค์–ด ์ฃผ์„ธ์š”')
143
  with gr.Column():
144
  #caption = gr.Textbox(label="Generated Caption")
 
155
  outputs=[story, share_group],
156
  cache_examples=True
157
  )
158
+
159
+ submit_btn.click(fn=infer, inputs=[image_in, audience, keyword_in, protagonist_in], outputs=[story, share_group])
160
+ # submit_btn.click(fn=infer, inputs=[image_in, audience], outputs=[story, share_group])
161
  share_button.click(None, [], [], _js=share_js)
162
 
163
  demo.queue(max_size=12).launch()