openfree commited on
Commit
6bdc489
ยท
verified ยท
1 Parent(s): 19dc940

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -356
app.py CHANGED
@@ -1,372 +1,100 @@
1
  import gradio as gr
2
- import websocket
3
- import json
4
- import base64
5
- import numpy as np
6
- import threading
7
- import queue
8
  import os
9
- from datetime import datetime
10
- import pyaudio
11
- import wave
12
- import io
13
 
14
- class RealtimeTranslator:
15
- def __init__(self):
16
- self.ws = None
17
- self.api_key = os.getenv("OPENAI_API_KEY")
18
- self.audio_queue = queue.Queue()
19
- self.transcript_queue = queue.Queue()
20
- self.translation_queue = queue.Queue()
21
- self.is_connected = False
22
- self.is_recording = False
23
- self.source_lang = "ko"
24
- self.target_lang = "en"
25
-
26
- # PyAudio ์„ค์ •
27
- self.p = pyaudio.PyAudio()
28
- self.sample_rate = 24000
29
- self.chunk_size = 1024
30
- self.audio_format = pyaudio.paInt16
31
-
32
- def connect_websocket(self):
33
- """WebSocket ์—ฐ๊ฒฐ ์„ค์ •"""
34
- try:
35
- url = "wss://api.openai.com/v1/realtime?model=gpt-4o-realtime-preview-2024-12-17"
36
- headers = {
37
- "Authorization": f"Bearer {self.api_key}",
38
- "OpenAI-Beta": "realtime=v1"
39
- }
40
-
41
- self.ws = websocket.WebSocketApp(
42
- url,
43
- header=headers,
44
- on_open=self.on_open,
45
- on_message=self.on_message,
46
- on_error=self.on_error,
47
- on_close=self.on_close
48
  )
49
-
50
- # WebSocket์„ ๋ณ„๋„ ์Šค๋ ˆ๋“œ์—์„œ ์‹คํ–‰
51
- wst = threading.Thread(target=self.ws.run_forever)
52
- wst.daemon = True
53
- wst.start()
54
-
55
- return "์—ฐ๊ฒฐ ์„ฑ๊ณต"
56
- except Exception as e:
57
- return f"์—ฐ๊ฒฐ ์‹คํŒจ: {str(e)}"
58
-
59
- def on_open(self, ws):
60
- """WebSocket ์—ฐ๊ฒฐ ์‹œ ํ˜ธ์ถœ"""
61
- self.is_connected = True
62
- print("WebSocket ์—ฐ๊ฒฐ๋จ")
63
-
64
- # ์„ธ์…˜ ์„ค์ •
65
- session_update = {
66
- "type": "session.update",
67
- "session": {
68
- "modalities": ["text", "audio"],
69
- "instructions": f"You are a helpful translator. Translate between {self.get_language_name(self.source_lang)} and {self.get_language_name(self.target_lang)}. Respond with both the transcription and translation.",
70
- "voice": "alloy",
71
- "input_audio_format": "pcm16",
72
- "output_audio_format": "pcm16",
73
- "input_audio_transcription": {
74
- "model": "whisper-1"
75
- },
76
- "turn_detection": {
77
- "type": "server_vad",
78
- "threshold": 0.5,
79
- "prefix_padding_ms": 300,
80
- "silence_duration_ms": 500
81
- }
82
- }
83
- }
84
- ws.send(json.dumps(session_update))
85
-
86
- def on_message(self, ws, message):
87
- """๋ฉ”์‹œ์ง€ ์ˆ˜์‹  ์‹œ ํ˜ธ์ถœ"""
88
- try:
89
- event = json.loads(message)
90
- event_type = event.get("type")
91
-
92
- if event_type == "conversation.item.input_audio_transcription.completed":
93
- # ์Œ์„ฑ ์ „์‚ฌ ์™„๋ฃŒ
94
- transcript = event.get("transcript", "")
95
- self.transcript_queue.put(transcript)
96
-
97
- # ๋ฒˆ์—ญ ์š”์ฒญ
98
- self.request_translation(transcript)
99
-
100
- elif event_type == "response.text.delta":
101
- # ๋ฒˆ์—ญ ๊ฒฐ๊ณผ ์ˆ˜์‹ 
102
- delta = event.get("delta", "")
103
- self.translation_queue.put(delta)
104
-
105
- elif event_type == "response.audio.delta":
106
- # ์˜ค๋””์˜ค ๋ฐ์ดํ„ฐ ์ˆ˜์‹ 
107
- audio_data = base64.b64decode(event.get("delta", ""))
108
- self.audio_queue.put(audio_data)
109
-
110
- elif event_type == "error":
111
- error_msg = event.get("error", {}).get("message", "Unknown error")
112
- print(f"Error: {error_msg}")
113
-
114
- except Exception as e:
115
- print(f"๋ฉ”์‹œ์ง€ ์ฒ˜๋ฆฌ ์˜ค๋ฅ˜: {str(e)}")
116
-
117
- def on_error(self, ws, error):
118
- """์˜ค๋ฅ˜ ๋ฐœ์ƒ ์‹œ ํ˜ธ์ถœ"""
119
- print(f"WebSocket ์˜ค๋ฅ˜: {error}")
120
- self.is_connected = False
121
-
122
- def on_close(self, ws, close_status_code, close_msg):
123
- """์—ฐ๊ฒฐ ์ข…๋ฃŒ ์‹œ ํ˜ธ์ถœ"""
124
- print("WebSocket ์—ฐ๊ฒฐ ์ข…๋ฃŒ")
125
- self.is_connected = False
126
-
127
- def get_language_name(self, lang_code):
128
- """์–ธ์–ด ์ฝ”๋“œ๋ฅผ ์–ธ์–ด ์ด๋ฆ„์œผ๋กœ ๋ณ€ํ™˜"""
129
- languages = {
130
- "ko": "Korean",
131
- "en": "English",
132
- "ja": "Japanese",
133
- "zh": "Chinese",
134
- "es": "Spanish",
135
- "fr": "French"
136
- }
137
- return languages.get(lang_code, lang_code)
138
-
139
- def request_translation(self, text):
140
- """๋ฒˆ์—ญ ์š”์ฒญ"""
141
- if not self.ws or not self.is_connected:
142
- return
143
-
144
- message = {
145
- "type": "conversation.item.create",
146
- "item": {
147
- "type": "message",
148
- "role": "user",
149
- "content": [{
150
- "type": "input_text",
151
- "text": f"Translate this {self.get_language_name(self.source_lang)} text to {self.get_language_name(self.target_lang)}: '{text}'"
152
- }]
153
- }
154
- }
155
-
156
- self.ws.send(json.dumps(message))
157
-
158
- # ์‘๋‹ต ์ƒ์„ฑ ์š”์ฒญ
159
- response_create = {"type": "response.create"}
160
- self.ws.send(json.dumps(response_create))
161
-
162
- def send_audio_chunk(self, audio_data):
163
- """์˜ค๋””์˜ค ์ฒญํฌ ์ „์†ก"""
164
- if not self.ws or not self.is_connected:
165
- return
166
 
167
- # PCM16 ํ˜•์‹์œผ๋กœ ์ธ์ฝ”๋”ฉ
168
- audio_base64 = base64.b64encode(audio_data).decode('utf-8')
 
 
 
 
169
 
170
- message = {
171
- "type": "input_audio_buffer.append",
172
- "audio": audio_base64
173
- }
174
 
175
- self.ws.send(json.dumps(message))
176
-
177
- def process_audio(self, audio_file):
178
- """์˜ค๋””์˜ค ํŒŒ์ผ ์ฒ˜๋ฆฌ ๋ฐ ์ „์†ก"""
179
- if not self.is_connected:
180
- return "WebSocket์ด ์—ฐ๊ฒฐ๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค.", ""
181
 
182
- try:
183
- # ์˜ค๋””์˜ค ํŒŒ์ผ ์ฝ๊ธฐ
184
- with wave.open(audio_file, 'rb') as wf:
185
- # ์˜ค๋””์˜ค๋ฅผ 24kHz PCM16์œผ๋กœ ๋ณ€ํ™˜ ํ•„์š”
186
- audio_data = wf.readframes(wf.getnframes())
187
-
188
- # ์˜ค๋””์˜ค ๋ฐ์ดํ„ฐ๋ฅผ ์ฒญํฌ๋กœ ๋‚˜๋ˆ„์–ด ์ „์†ก
189
- chunk_size = 4096
190
- for i in range(0, len(audio_data), chunk_size):
191
- chunk = audio_data[i:i+chunk_size]
192
- self.send_audio_chunk(chunk)
193
-
194
- # ์˜ค๋””์˜ค ๋ฒ„ํผ ์ปค๋ฐ‹
195
- commit_message = {"type": "input_audio_buffer.commit"}
196
- self.ws.send(json.dumps(commit_message))
197
-
198
- # ์ „์‚ฌ ๋ฐ ๋ฒˆ์—ญ ๊ฒฐ๊ณผ ๋Œ€๊ธฐ
199
- transcript = ""
200
- translation = ""
201
-
202
- # ํƒ€์ž„์•„์›ƒ ์„ค์ • (10์ดˆ)
203
- import time
204
- timeout = 10
205
- start_time = time.time()
206
-
207
- while time.time() - start_time < timeout:
208
- # ์ „์‚ฌ ๊ฒฐ๊ณผ ํ™•์ธ
209
- try:
210
- transcript = self.transcript_queue.get(timeout=0.1)
211
- except queue.Empty:
212
- pass
213
-
214
- # ๋ฒˆ์—ญ ๊ฒฐ๊ณผ ํ™•์ธ
215
- try:
216
- while not self.translation_queue.empty():
217
- translation += self.translation_queue.get()
218
- except queue.Empty:
219
- pass
220
-
221
- if transcript and translation:
222
- break
223
-
224
- return transcript, translation
225
-
226
- except Exception as e:
227
- return f"์˜ค๋ฅ˜: {str(e)}", ""
228
-
229
- def disconnect(self):
230
- """WebSocket ์—ฐ๊ฒฐ ์ข…๋ฃŒ"""
231
- if self.ws:
232
- self.ws.close()
233
- self.is_connected = False
234
- return "์—ฐ๊ฒฐ ์ข…๋ฃŒ๋จ"
235
 
236
- # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์ƒ์„ฑ
237
- def create_interface():
238
- translator = RealtimeTranslator()
239
-
240
- def connect():
241
- if not translator.api_key:
242
- return "API ํ‚ค๊ฐ€ ์„ค์ •๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค. ํ™˜๊ฒฝ ๋ณ€์ˆ˜ OPENAI_API_KEY๋ฅผ ์„ค์ •ํ•˜์„ธ์š”.", gr.update(value=False)
243
- result = translator.connect_websocket()
244
- return result, gr.update(value=translator.is_connected)
 
 
 
 
 
 
 
245
 
246
- def disconnect():
247
- result = translator.disconnect()
248
- return result, gr.update(value=False)
 
 
249
 
250
- def translate_audio(audio_file, source_lang, target_lang):
251
- if not audio_file:
252
- return "์˜ค๋””์˜ค ํŒŒ์ผ์„ ์„ ํƒํ•˜์„ธ์š”.", "", None
253
-
254
- translator.source_lang = source_lang
255
- translator.target_lang = target_lang
256
-
257
- transcript, translation = translator.process_audio(audio_file)
258
-
259
- # ์˜ค๋””์˜ค ์‘๋‹ต ์ฒ˜๋ฆฌ (ํ˜„์žฌ๋Š” ํ…์ŠคํŠธ๋งŒ ๋ฐ˜ํ™˜)
260
- return transcript, translation, None
261
 
262
- def swap_languages(source, target):
263
- return target, source
 
264
 
265
- with gr.Blocks(title="์‹ค์‹œ๊ฐ„ ์Œ์„ฑ ๋ฒˆ์—ญ๊ธฐ") as demo:
266
- gr.Markdown("# ๐ŸŽ™๏ธ OpenAI Realtime API ์Œ์„ฑ ๋ฒˆ์—ญ๊ธฐ")
267
- gr.Markdown("์‹ค์‹œ๊ฐ„์œผ๋กœ ์Œ์„ฑ์„ ์ „์‚ฌํ•˜๊ณ  ๋ฒˆ์—ญํ•ฉ๋‹ˆ๋‹ค.")
268
-
269
- with gr.Row():
270
- with gr.Column(scale=1):
271
- gr.Markdown("### ์—ฐ๊ฒฐ ์ƒํƒœ")
272
- connection_status = gr.Checkbox(label="์—ฐ๊ฒฐ๋จ", value=False, interactive=False)
273
- connect_btn = gr.Button("์—ฐ๊ฒฐ", variant="primary")
274
- disconnect_btn = gr.Button("์—ฐ๊ฒฐ ์ข…๋ฃŒ", variant="secondary")
275
- status_text = gr.Textbox(label="์ƒํƒœ ๋ฉ”์‹œ์ง€", value="์—ฐ๊ฒฐ๋˜์ง€ ์•Š์Œ")
276
-
277
- with gr.Row():
278
- with gr.Column(scale=2):
279
- gr.Markdown("### ์–ธ์–ด ์„ค์ •")
280
- with gr.Row():
281
- source_lang = gr.Dropdown(
282
- choices=[("ํ•œ๊ตญ์–ด", "ko"), ("์˜์–ด", "en"), ("์ผ๋ณธ์–ด", "ja"),
283
- ("์ค‘๊ตญ์–ด", "zh"), ("์ŠคํŽ˜์ธ์–ด", "es"), ("ํ”„๋ž‘์Šค์–ด", "fr")],
284
- value="ko",
285
- label="์ž…๋ ฅ ์–ธ์–ด"
286
- )
287
- swap_btn = gr.Button("โ†”๏ธ", scale=0)
288
- target_lang = gr.Dropdown(
289
- choices=[("ํ•œ๊ตญ์–ด", "ko"), ("์˜์–ด", "en"), ("์ผ๋ณธ์–ด", "ja"),
290
- ("์ค‘๊ตญ์–ด", "zh"), ("์ŠคํŽ˜์ธ์–ด", "es"), ("ํ”„๋ž‘์Šค์–ด", "fr")],
291
- value="en",
292
- label="์ถœ๋ ฅ ์–ธ์–ด"
293
- )
294
-
295
- with gr.Row():
296
- with gr.Column():
297
- gr.Markdown("### ์Œ์„ฑ ์ž…๋ ฅ")
298
- audio_input = gr.Audio(
299
- source="microphone",
300
- type="filepath",
301
- label="๋…น์Œํ•˜๊ธฐ"
302
- )
303
- translate_btn = gr.Button("๋ฒˆ์—ญํ•˜๊ธฐ", variant="primary")
304
-
305
- with gr.Row():
306
- with gr.Column():
307
- gr.Markdown("### ๊ฒฐ๊ณผ")
308
- transcript_output = gr.Textbox(
309
- label="์ „์‚ฌ๋œ ํ…์ŠคํŠธ",
310
- placeholder="์Œ์„ฑ ์ „์‚ฌ ๊ฒฐ๊ณผ๊ฐ€ ์—ฌ๊ธฐ์— ํ‘œ์‹œ๋ฉ๋‹ˆ๋‹ค...",
311
- lines=3
312
- )
313
- translation_output = gr.Textbox(
314
- label="๋ฒˆ์—ญ๋œ ํ…์ŠคํŠธ",
315
- placeholder="๋ฒˆ์—ญ ๊ฒฐ๊ณผ๊ฐ€ ์—ฌ๊ธฐ์— ํ‘œ์‹œ๋ฉ๋‹ˆ๋‹ค...",
316
- lines=3
317
- )
318
- audio_output = gr.Audio(
319
- label="๋ฒˆ์—ญ๋œ ์Œ์„ฑ",
320
- type="filepath"
321
- )
322
-
323
- # ์ด๋ฒคํŠธ ํ•ธ๋“ค๋Ÿฌ
324
- connect_btn.click(
325
- fn=connect,
326
- outputs=[status_text, connection_status]
327
- )
328
-
329
- disconnect_btn.click(
330
- fn=disconnect,
331
- outputs=[status_text, connection_status]
332
- )
333
-
334
- swap_btn.click(
335
- fn=swap_languages,
336
- inputs=[source_lang, target_lang],
337
- outputs=[source_lang, target_lang]
338
- )
339
-
340
- translate_btn.click(
341
- fn=translate_audio,
342
- inputs=[audio_input, source_lang, target_lang],
343
- outputs=[transcript_output, translation_output, audio_output]
344
- )
345
-
346
- gr.Markdown("""
347
- ### ๐Ÿ“ ์‚ฌ์šฉ ๋ฐฉ๋ฒ•
348
- 1. **์—ฐ๊ฒฐ** ๋ฒ„ํŠผ์„ ํด๋ฆญํ•˜์—ฌ OpenAI Realtime API์— ์—ฐ๊ฒฐํ•ฉ๋‹ˆ๋‹ค.
349
- 2. ์ž…๋ ฅ ์–ธ์–ด์™€ ์ถœ๋ ฅ ์–ธ์–ด๋ฅผ ์„ ํƒํ•ฉ๋‹ˆ๋‹ค.
350
- 3. ๋งˆ์ดํฌ ๋ฒ„ํŠผ์„ ํด๋ฆญํ•˜์—ฌ ์Œ์„ฑ์„ ๋…น์Œํ•ฉ๋‹ˆ๋‹ค.
351
- 4. **๋ฒˆ์—ญํ•˜๊ธฐ** ๋ฒ„ํŠผ์„ ํด๋ฆญํ•˜๋ฉด ์ „์‚ฌ ๋ฐ ๋ฒˆ์—ญ์ด ์ง„ํ–‰๋ฉ๋‹ˆ๋‹ค.
352
-
353
- ### โš ๏ธ ์ฃผ์˜์‚ฌํ•ญ
354
- - ํ™˜๊ฒฝ ๋ณ€์ˆ˜ `OPENAI_API_KEY`๊ฐ€ ์„ค์ •๋˜์–ด ์žˆ์–ด์•ผ ํ•ฉ๋‹ˆ๋‹ค.
355
- - ๊ธด ์˜ค๋””์˜ค์˜ ๊ฒฝ์šฐ ์ฒ˜๋ฆฌ ์‹œ๊ฐ„์ด ์˜ค๋ž˜ ๊ฑธ๋ฆด ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.
356
- """)
357
 
358
- return demo
 
 
 
 
359
 
360
- # ์‹คํ–‰
361
  if __name__ == "__main__":
362
- # ํ•„์š”ํ•œ ํŒจํ‚ค์ง€ ์„ค์น˜ ์•ˆ๋‚ด
363
- print("""
364
- ํ•„์š”ํ•œ ํŒจํ‚ค์ง€:
365
- pip install gradio websocket-client pyaudio wave numpy
366
-
367
- ํ™˜๊ฒฝ ๋ณ€์ˆ˜ ์„ค์ •:
368
- export OPENAI_API_KEY="your-api-key-here"
369
- """)
370
-
371
- demo = create_interface()
372
- demo.launch(share=True)
 
1
  import gradio as gr
2
+ import openai
 
 
 
 
 
3
  import os
4
+ from dotenv import load_dotenv
 
 
 
5
 
6
+ # ํ™˜๊ฒฝ๋ณ€์ˆ˜ ๋กœ๋“œ
7
+ load_dotenv()
8
+
9
+ # OpenAI ํด๋ผ์ด์–ธํŠธ ์„ค์ •
10
+ api_key = os.getenv("OPENAI_API_KEY")
11
+ if not api_key:
12
+ print("โš ๏ธ OPENAI_API_KEY๋ฅผ .env ํŒŒ์ผ์— ์„ค์ •ํ•˜์„ธ์š”!")
13
+ print("์˜ˆ: OPENAI_API_KEY=sk-...")
14
+
15
+ client = openai.OpenAI(api_key=api_key)
16
+
17
+ def translate_audio(audio_file, source_lang, target_lang):
18
+ """์Œ์„ฑ ํŒŒ์ผ์„ ๋ฒˆ์—ญํ•˜๋Š” ํ•จ์ˆ˜"""
19
+ if not audio_file:
20
+ return "์˜ค๋””์˜ค ํŒŒ์ผ์„ ์—…๋กœ๋“œํ•˜๊ฑฐ๋‚˜ ๋…น์Œํ•˜์„ธ์š”.", "", None
21
+
22
+ if not api_key:
23
+ return "API ํ‚ค๊ฐ€ ์„ค์ •๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค.", "", None
24
+
25
+ try:
26
+ # 1. Whisper๋กœ ์Œ์„ฑ์„ ํ…์ŠคํŠธ๋กœ ๋ณ€ํ™˜
27
+ with open(audio_file, "rb") as f:
28
+ transcript = client.audio.transcriptions.create(
29
+ model="whisper-1",
30
+ file=f
 
 
 
 
 
 
 
 
 
31
  )
32
+ original_text = transcript.text
33
+
34
+ # 2. GPT-4๋กœ ๋ฒˆ์—ญ
35
+ response = client.chat.completions.create(
36
+ model="gpt-4",
37
+ messages=[
38
+ {"role": "system", "content": f"Translate from {source_lang} to {target_lang}. Only provide the translation without any explanation."},
39
+ {"role": "user", "content": original_text}
40
+ ],
41
+ temperature=0.3
42
+ )
43
+ translated_text = response.choices[0].message.content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
+ # 3. TTS๋กœ ๋ฒˆ์—ญ๋œ ํ…์ŠคํŠธ๋ฅผ ์Œ์„ฑ์œผ๋กœ ๋ณ€ํ™˜
46
+ tts_response = client.audio.speech.create(
47
+ model="tts-1",
48
+ voice="alloy",
49
+ input=translated_text
50
+ )
51
 
52
+ # ์Œ์„ฑ ํŒŒ์ผ ์ €์žฅ
53
+ output_file = "translated_audio.mp3"
54
+ with open(output_file, "wb") as f:
55
+ f.write(tts_response.content)
56
 
57
+ return original_text, translated_text, output_file
 
 
 
 
 
58
 
59
+ except Exception as e:
60
+ return f"์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}", "", None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
+ # Gradio ์ธํ„ฐํŽ˜์ด์Šค
63
+ with gr.Blocks(title="์Œ์„ฑ ๋ฒˆ์—ญ๊ธฐ") as app:
64
+ gr.Markdown("# ๐ŸŽ™๏ธ AI ์Œ์„ฑ ๋ฒˆ์—ญ๊ธฐ")
65
+ gr.Markdown("์Œ์„ฑ์„ ๋…น์Œํ•˜๊ฑฐ๋‚˜ ์—…๋กœ๋“œํ•˜๋ฉด ์ž๋™์œผ๋กœ ๋ฒˆ์—ญํ•ฉ๋‹ˆ๋‹ค.")
66
+
67
+ with gr.Row():
68
+ source_lang = gr.Dropdown(
69
+ ["Korean", "English", "Japanese", "Chinese", "Spanish", "French"],
70
+ value="Korean",
71
+ label="์ž…๋ ฅ ์–ธ์–ด"
72
+ )
73
+ target_lang = gr.Dropdown(
74
+ ["Korean", "English", "Japanese", "Chinese", "Spanish", "French"],
75
+ value="English",
76
+ label="์ถœ๋ ฅ ์–ธ์–ด"
77
+ )
78
 
79
+ audio_input = gr.Audio(
80
+ sources=["microphone", "upload"],
81
+ type="filepath",
82
+ label="์Œ์„ฑ ์ž…๋ ฅ (๋…น์Œ ๋˜๋Š” ํŒŒ์ผ ์—…๋กœ๋“œ)"
83
+ )
84
 
85
+ translate_btn = gr.Button("๋ฒˆ์—ญํ•˜๊ธฐ", variant="primary")
 
 
 
 
 
 
 
 
 
 
86
 
87
+ with gr.Row():
88
+ original_text = gr.Textbox(label="์›๋ณธ ํ…์ŠคํŠธ", lines=3)
89
+ translated_text = gr.Textbox(label="๋ฒˆ์—ญ๋œ ํ…์ŠคํŠธ", lines=3)
90
 
91
+ audio_output = gr.Audio(label="๋ฒˆ์—ญ๋œ ์Œ์„ฑ", type="filepath")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
+ translate_btn.click(
94
+ translate_audio,
95
+ inputs=[audio_input, source_lang, target_lang],
96
+ outputs=[original_text, translated_text, audio_output]
97
+ )
98
 
 
99
  if __name__ == "__main__":
100
+ app.launch(server_name="0.0.0.0", server_port=7860, share=True)