seawolf2357 commited on
Commit
3e6b150
ยท
verified ยท
1 Parent(s): 324ddc8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -35
app.py CHANGED
@@ -2,22 +2,50 @@ import gradio as gr
2
  import requests
3
  import json
4
 
5
- # API URL ์„ค์ • (์˜ˆ์‹œ์šฉ, ์‹ค์ œ ์‚ฌ์šฉ์—๋Š” ์ ํ•ฉํ•œ API ์‚ฌ์šฉ)
6
- API_URL = "https://api.openai.com/v1/chat/completions"
7
 
8
- # ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์ด๋ฏธ์ง€ ์ƒ์„ฑ ํ”„๋กฌํ”„ํŠธ๋กœ ๋ณ€ํ™˜ํ•˜๋Š” ํ•จ์ˆ˜
9
- def script_to_prompts(script):
10
- lines = script.split('\n') # ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์ค„ ๋‹จ์œ„๋กœ ๋ถ„๋ฆฌ
11
- prompts = []
12
  for line in lines:
13
- # ๋“ฑ์žฅ ์ธ๋ฌผ, ํ–‰์œ„, ์žฅ์†Œ ๋“ฑ์„ ๊ธฐ๋ฐ˜์œผ๋กœ ํ”„๋กฌํ”„ํŠธ ์ƒ์„ฑ
14
- prompt = f"3d style, like Harry Potter, {line}, 4k"
15
- prompts.append(prompt)
16
- return prompts
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  # OpenAI API๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์ƒ์„ฑํ•˜๋Š” ํ•จ์ˆ˜
19
  def predict(inputs, top_p, temperature, openai_api_key):
20
- narration_prompt = f"์•„๋™์šฉ ์• ๋‹ˆ๋ฉ”์ด์…˜ ๋™์˜์ƒ์— ์‚ฌ์šฉํ•  ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์ž‘์„ฑํ•˜๋ผ. ๋ฐ˜๋“œ์‹œ ํ•œ๊ธ€๋กœ ์ž‘์„ฑํ• ๊ฒƒ. ์ž…๋ ฅ: '{inputs}'"
21
 
22
  headers = {
23
  "Content-Type": "application/json",
@@ -25,18 +53,17 @@ def predict(inputs, top_p, temperature, openai_api_key):
25
  }
26
 
27
  payload = {
28
- "model": "gpt-4-1106-preview",
29
- "messages": [{"role": "system", "content": narration_prompt}],
30
  "temperature": temperature,
31
  "top_p": top_p,
32
- "n": 1,
33
- "max_tokens": 1000
34
  }
35
 
36
- response = requests.post(API_URL, headers=headers, json=payload)
37
  if response.status_code == 200:
38
- response_data = response.json()
39
- generated_text = response_data['choices'][0]['message']['content']
40
  return generated_text
41
  else:
42
  return "Error: Unable to generate response."
@@ -45,28 +72,20 @@ def predict(inputs, top_p, temperature, openai_api_key):
45
  with gr.Blocks() as demo:
46
  gr.Markdown("<h1 align='center'>ํ† ๋ฆฌ์˜ ๋ชจํ—˜: 3D ์• ๋‹ˆ๋ฉ”์ด์…˜ ์ƒ์„ฑ๊ธฐ</h1>")
47
  with gr.Row():
48
- openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here")
49
- inputs = gr.Textbox(placeholder="์—ฌ๊ธฐ์— ์ž…๋ ฅํ•˜์„ธ์š”.", label="์•„๋™์šฉ ์• ๋‹ˆ๋ฉ”์ด์…˜ ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ์‹ถ์€ ์ฃผ์ œ์–ด๋‚˜ ๋ฌธ์žฅ์„ ์ž…๋ ฅํ•˜์„ธ์š”.")
50
  top_p = gr.Slider(minimum=0, maximum=1.0, value=1.0, step=0.05, label="Top-p (nucleus sampling)")
51
- temperature = gr.Slider(minimum=0, maximum=5.0, value=1.0, step=0.1, label="Temperature")
52
  script_output = gr.Textbox(label="Generated Script", multiline=True)
53
  prompt_output = gr.Textbox(label="Image Generation Prompts", multiline=True)
54
- with gr.Row():
55
- submit_button = gr.Button("Generate")
56
 
57
  # ๋ฒ„ํŠผ ํด๋ฆญ ์‹œ ์Šคํฌ๋ฆฝํŠธ ์ƒ์„ฑ ๋ฐ ํ”„๋กฌํ”„ํŠธ ๋ณ€ํ™˜
58
- def generate_and_convert(inputs, top_p, temperature, openai_api_key):
59
- script = predict(inputs, top_p, temperature, openai_api_key)
60
- prompts = script_to_prompts(script)
61
- return script, '\n'.join(prompts)
62
-
63
- submit_button.click(fn=generate_and_convert,
64
- inputs=[inputs, top_p, temperature, openai_api_key],
65
- outputs=[script_output, prompt_output])
66
 
67
- examples = gr.Examples(examples=[
68
- ["์Šคํฌ๋ฆฝํŠธ: ํ† ๋ฆฌ๋Š” ๊ฒ€์€ ์ˆฒ์œผ๋กœ ๋ชจํ—˜์„ ๋– ๋‚ฌ๋‹ค."],
69
- ["์Šคํฌ๋ฆฝํŠธ: ํ† ๋ฆฌ๋Š” ๋ฐ”๋‹ค๋กœ ๋ชจํ—˜์„ ๋– ๋‚ฌ๋‹ค."]
70
- ], inputs=[inputs], fn=predict, outputs=script_output)
71
 
72
  demo.launch()
 
2
  import requests
3
  import json
4
 
5
+ # API URL ์„ค์ • (OpenAI GPT-3 API)
6
+ API_URL = "https://api.openai.com/v1/engines/davinci-codex/completions"
7
 
8
+ # ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์ด๋ฏธ์ง€ ์ƒ์„ฑ ํ”„๋กฌํ”„ํŠธ๋กœ ๋ณ€ํ™˜ํ•˜๋Š” ํ•จ์ˆ˜ (๋ฒˆ์—ญ ํฌํ•จ)
9
+ def script_to_prompts(script, openai_api_key):
10
+ lines = script.split('\n')
11
+ translated_prompts = []
12
  for line in lines:
13
+ # ํ•œ๊ตญ์–ด ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์˜์–ด๋กœ ๋ฒˆ์—ญ
14
+ translated_line = translate_to_english(line, openai_api_key)
15
+ # ๋ฒˆ์—ญ๋œ ์˜์–ด ๋ผ์ธ์œผ๋กœ ์ด๋ฏธ์ง€ ์ƒ์„ฑ ํ”„๋กฌํ”„ํŠธ ์ƒ์„ฑ
16
+ prompt = f"3d style, like Harry Potter, {translated_line}, 4k"
17
+ translated_prompts.append(prompt)
18
+ return translated_prompts
19
+
20
+ # OpenAI API๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ํ…์ŠคํŠธ๋ฅผ ์˜์–ด๋กœ ๋ฒˆ์—ญํ•˜๋Š” ํ•จ์ˆ˜
21
+ def translate_to_english(korean_text, openai_api_key):
22
+ translation_prompt = f"Translate the following Korean text to English: '{korean_text}'"
23
+
24
+ headers = {
25
+ "Content-Type": "application/json",
26
+ "Authorization": f"Bearer {openai_api_key}"
27
+ }
28
+
29
+ payload = {
30
+ "prompt": translation_prompt,
31
+ "temperature": 0.5, # ๋ฒˆ์—ญ ์ž‘์—…์— ์ค‘๊ฐ„ ์˜จ๋„ ์„ค์ •
32
+ "max_tokens": 60, # ๋ฒˆ์—ญํ•  ๋‹จ์–ด ์ˆ˜ ์ œํ•œ
33
+ "top_p": 1.0,
34
+ "frequency_penalty": 0.0,
35
+ "presence_penalty": 0.0,
36
+ }
37
+
38
+ response = requests.post(API_URL, headers=headers, json=payload)
39
+ if response.status_code == 200:
40
+ response_data = response.json()
41
+ generated_text = response_data['choices'][0]['text'].strip()
42
+ return generated_text
43
+ else:
44
+ return "Error: Unable to generate response."
45
 
46
  # OpenAI API๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์ƒ์„ฑํ•˜๋Š” ํ•จ์ˆ˜
47
  def predict(inputs, top_p, temperature, openai_api_key):
48
+ narration_prompt = f"์•„๋™์šฉ ์• ๋‹ˆ๋ฉ”์ด์…˜ ๋™์˜์ƒ์— ์‚ฌ์šฉํ•  ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์ž‘์„ฑํ•˜๋ผ. ๋ฐ˜๋“œ์‹œ ํ•œ๊ธ€๋กœ ์ž‘์„ฑํ• ๊ฒƒ. ์ผ์ฒด์˜ ์ง€๋ฌธ์ด๋‚˜ ์ง€์‹œ, ๋ฐฐ๊ฒฝ ์„ค๋ช… ๋“ฑ์„ ๋…ธ์ถœ ํ•˜๊ฑฐ๋‚˜ ์ถœ๋ ฅํ•˜์ง€ ๋ง๊ณ  ๊ธฐ์Šน์ „๊ฒฐ์˜ ๊ตฌ์กฐ๋กœ ๋ชจํ—˜์˜ ์ด์œ /์œ„๊ธฐ/๋„์ „/๋ฌธ์ œํ•ด๊ฒฐ/๊ตํ›ˆ์„ ํฌํ•จํ•˜์—ฌ ์ˆœ์ˆ˜ํ•œ ๋‚˜๋ ˆ์ด์…˜๋งŒ 1์ค„์”ฉ ์ถœ๋ ฅํ•˜์—ฌ ์ตœ๋Œ€ 10์ค„ ์ด๋‚ด๋กœ ์ถœ๋ ฅ. ์ž…๋ ฅ:'{inputs}'"
49
 
50
  headers = {
51
  "Content-Type": "application/json",
 
53
  }
54
 
55
  payload = {
56
+ "model": "gpt-3.5-turbo",
57
+ "prompt": narration_prompt,
58
  "temperature": temperature,
59
  "top_p": top_p,
60
+ "max_tokens": 150
 
61
  }
62
 
63
+ response = requests.post(API_URL, headers=headers, data=json.dumps(payload))
64
  if response.status_code == 200:
65
+ response_data = json.loads(response.text)
66
+ generated_text = response_data['choices'][0]['text'].strip()
67
  return generated_text
68
  else:
69
  return "Error: Unable to generate response."
 
72
  with gr.Blocks() as demo:
73
  gr.Markdown("<h1 align='center'>ํ† ๋ฆฌ์˜ ๋ชจํ—˜: 3D ์• ๋‹ˆ๋ฉ”์ด์…˜ ์ƒ์„ฑ๊ธฐ</h1>")
74
  with gr.Row():
75
+ openai_api_key = gr.Textbox(label="Enter your OpenAI API key here", type='password')
76
+ inputs = gr.Textbox(label="์•„๋™์šฉ ์• ๋‹ˆ๋ฉ”์ด์…˜ ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ์‹ถ์€ ์ฃผ์ œ์–ด๋‚˜ ๋ฌธ์žฅ์„ ์ž…๋ ฅํ•˜์„ธ์š”.", placeholder="์—ฌ๊ธฐ์— ์ž…๋ ฅํ•˜์„ธ์š”.")
77
  top_p = gr.Slider(minimum=0, maximum=1.0, value=1.0, step=0.05, label="Top-p (nucleus sampling)")
78
+ temperature = gr.Slider(minimum=0, maximum=1.0, value=0.5, step=0.01, label="Temperature")
79
  script_output = gr.Textbox(label="Generated Script", multiline=True)
80
  prompt_output = gr.Textbox(label="Image Generation Prompts", multiline=True)
81
+ generate_button = gr.Button("Generate")
 
82
 
83
  # ๋ฒ„ํŠผ ํด๋ฆญ ์‹œ ์Šคํฌ๋ฆฝํŠธ ์ƒ์„ฑ ๋ฐ ํ”„๋กฌํ”„ํŠธ ๋ณ€ํ™˜
84
+ def generate_and_convert(input_text, top_p, temperature, api_key):
85
+ generated_script = predict(input_text, top_p, temperature, api_key)
86
+ prompts = script_to_prompts(generated_script, api_key)
87
+ return generated_script, "\n".join(prompts)
 
 
 
 
88
 
89
+ generate_button.click(fn=generate_and_convert, inputs=[inputs, top_p, temperature, openai_api_key], outputs=[script_output, prompt_output])
 
 
 
90
 
91
  demo.launch()