aliceblue11 commited on
Commit
7f46e0a
·
verified ·
1 Parent(s): 2cd8135

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -440
app.py CHANGED
@@ -40,468 +40,58 @@ def get_client(model_name):
40
  return InferenceClient(model_id, token=hf_token)
41
 
42
 
43
- def respond_hf_qna(
44
- question: str,
45
- model_name: str,
46
- max_tokens: int,
47
- temperature: float,
48
- top_p: float,
49
- system_message: str
50
- ):
51
  """
52
- HuggingFace 모델(Zephyr 등)에 대해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
53
  """
54
  try:
55
- client = get_client(model_name)
56
- except ValueError as e:
57
- return f"오류: {str(e)}"
58
-
59
- messages = [
60
- {"role": "system", "content": system_message},
61
- {"role": "user", "content": question}
62
- ]
63
-
64
- try:
65
- response = client.chat_completion(
66
- messages,
67
- max_tokens=max_tokens,
68
- temperature=temperature,
69
- top_p=top_p,
70
- stream=False,
71
- )
72
- assistant_message = response.choices[0].message.content
73
- return assistant_message
74
-
75
  except Exception as e:
76
  return f"오류가 발생했습니다: {str(e)}"
77
 
78
 
79
- def respond_cohere_qna(
80
- question: str,
81
- system_message: str,
82
- max_tokens: int,
83
- temperature: float,
84
- top_p: float
85
- ):
86
- """
87
- Cohere Command R+ 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
88
- """
89
- model_name = "Cohere Command R+"
90
- try:
91
- client = get_client(model_name)
92
- except ValueError as e:
93
- return f"오류: {str(e)}"
94
-
95
- messages = [
96
- {"role": "system", "content": system_message},
97
- {"role": "user", "content": question}
98
- ]
99
-
100
- try:
101
- response_full = client.chat_completion(
102
- messages,
103
- max_tokens=max_tokens,
104
- temperature=temperature,
105
- top_p=top_p,
106
- )
107
- assistant_message = response_full.choices[0].message.content
108
- return assistant_message
109
- except Exception as e:
110
- return f"오류가 발생했습니다: {str(e)}"
111
-
112
-
113
- def respond_chatgpt_qna(
114
- question: str,
115
- system_message: str,
116
- max_tokens: int,
117
- temperature: float,
118
- top_p: float
119
- ):
120
- """
121
- ChatGPT(OpenAI) 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
122
- """
123
- openai_token = os.getenv("OPENAI_TOKEN")
124
- if not openai_token:
125
- return "OpenAI API 토큰이 필요합니다."
126
-
127
- openai.api_key = openai_token
128
-
129
- messages = [
130
- {"role": "system", "content": system_message},
131
- {"role": "user", "content": question}
132
- ]
133
-
134
- try:
135
- response = openai.ChatCompletion.create(
136
- model="gpt-4o-mini", # 필요한 경우 변경
137
- messages=messages,
138
- max_tokens=max_tokens,
139
- temperature=temperature,
140
- top_p=top_p,
141
- )
142
- assistant_message = response.choices[0].message['content']
143
- return assistant_message
144
- except Exception as e:
145
- return f"오류가 발생했습니다: {str(e)}"
146
-
147
-
148
- def respond_deepseek_qna(
149
- question: str,
150
- system_message: str,
151
- max_tokens: int,
152
- temperature: float,
153
- top_p: float
154
- ):
155
- """
156
- DeepSeek 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
157
- """
158
- deepseek_token = os.getenv("DEEPSEEK_TOKEN")
159
- if not deepseek_token:
160
- return "DeepSeek API 토큰이 필요합니다."
161
-
162
- openai.api_key = deepseek_token
163
- openai.api_base = "https://api.deepseek.com/v1"
164
-
165
- messages = [
166
- {"role": "system", "content": system_message},
167
- {"role": "user", "content": question}
168
- ]
169
-
170
- try:
171
- response = openai.ChatCompletion.create(
172
- model="deepseek-chat",
173
- messages=messages,
174
- max_tokens=max_tokens,
175
- temperature=temperature,
176
- top_p=top_p,
177
- )
178
- assistant_message = response.choices[0].message['content']
179
- return assistant_message
180
- except Exception as e:
181
- return f"오류가 발생했습니다: {str(e)}"
182
-
183
-
184
- def respond_claude_qna(
185
- question: str,
186
- system_message: str,
187
- max_tokens: int,
188
- temperature: float,
189
- top_p: float
190
- ) -> str:
191
- """
192
- Claude API를 사용한 개선된 응답 생성 함수
193
- """
194
- claude_api_key = os.getenv("CLAUDE_TOKEN")
195
- if not claude_api_key:
196
- return "Claude API 토큰이 필요합니다."
197
-
198
- try:
199
- client = anthropic.Anthropic(api_key=claude_api_key)
200
-
201
- # 메시지 생성
202
- message = client.messages.create(
203
- model="claude-3-haiku-20240307",
204
- max_tokens=max_tokens,
205
- temperature=temperature,
206
- system=system_message,
207
- messages=[
208
- {
209
- "role": "user",
210
- "content": question
211
- }
212
- ]
213
- )
214
-
215
- return message.content[0].text
216
-
217
- except anthropic.APIError as ae:
218
- return f"Claude API 오류: {str(ae)}"
219
- except anthropic.RateLimitError:
220
- return "요청 한도를 초과했습니다. 잠시 후 다시 시도해주세요."
221
- except Exception as e:
222
- return f"예상치 못한 오류가 발생했습니다: {str(e)}"
223
-
224
-
225
  #############################
226
  # [기본코드] UI 부분 - 수정/삭제 불가
227
  #############################
228
 
229
  with gr.Blocks() as demo:
230
- gr.Markdown("# LLM 플레이그라운드")
231
 
232
  #################
233
- # 일반 모델
234
  #################
235
- with gr.Tab("일반 모델"):
236
- # 모델명 선택
237
- model_name = gr.Radio(
238
- choices=list(MODELS.keys()),
239
- label="Language Model (HuggingFace)",
240
- value="Zephyr 7B Beta"
241
  )
242
-
243
- # 입력1 ~ 입력5 (세로로 하나씩)
244
- input1 = gr.Textbox(label="입력1", lines=1)
245
- input2 = gr.Textbox(label="입력2", lines=1)
246
- input3 = gr.Textbox(label="입력3", lines=1)
247
- input4 = gr.Textbox(label="입력4", lines=1)
248
- input5 = gr.Textbox(label="입력5", lines=1)
249
-
250
- # 결과
251
- answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
252
-
253
- # 고급 설정 - System Message를 Max Tokens 위로 이동
254
- with gr.Accordion("고급 설정 (일반 모델)", open=False):
255
- system_message = gr.Textbox(
256
- value="""반드시 한글로 답변할 것.
257
- 너는 최고의 비서이다.
258
- 내가 요구하는것들을 최대한 자세하고 정확하게 답변하라.
259
- """,
260
- label="System Message",
261
- lines=3
262
- )
263
- max_tokens = gr.Slider(minimum=0, maximum=2000, value=500, step=100, label="Max Tokens")
264
- temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
265
- top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
266
-
267
- submit_button = gr.Button("전송")
268
-
269
- def merge_and_call_hf(i1, i2, i3, i4, i5, m_name, mt, temp, top_p_, sys_msg):
270
- # 입력1~5를 공백 기준으로 합쳐서 question 구성
271
- question = " ".join([i1, i2, i3, i4, i5])
272
- return respond_hf_qna(
273
- question=question,
274
- model_name=m_name,
275
- max_tokens=mt,
276
- temperature=temp,
277
- top_p=top_p_,
278
- system_message=sys_msg
279
- )
280
-
281
- submit_button.click(
282
- fn=merge_and_call_hf,
283
- inputs=[
284
- input1, input2, input3, input4, input5,
285
- model_name,
286
- max_tokens,
287
- temperature,
288
- top_p,
289
- system_message
290
- ],
291
- outputs=answer_output
292
  )
293
 
294
- #################
295
- # Cohere Command R+
296
- #################
297
- with gr.Tab("Cohere Command R+"):
298
- cohere_input1 = gr.Textbox(label="입력1", lines=1)
299
- cohere_input2 = gr.Textbox(label="입력2", lines=1)
300
- cohere_input3 = gr.Textbox(label="입력3", lines=1)
301
- cohere_input4 = gr.Textbox(label="입력4", lines=1)
302
- cohere_input5 = gr.Textbox(label="입력5", lines=1)
303
-
304
- cohere_answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
305
-
306
- with gr.Accordion("고급 설정 (Cohere)", open=False):
307
- cohere_system_message = gr.Textbox(
308
- value="""반드시 한글로 답변할 것.
309
- 너는 최고의 비서이다.
310
- 내가 요구하는것들을 최대한 자세하고 정확하게 답변하라.
311
- """,
312
- label="System Message",
313
- lines=3
314
- )
315
- cohere_max_tokens = gr.Slider(minimum=100, maximum=10000, value=4000, step=100, label="Max Tokens")
316
- cohere_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature")
317
- cohere_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
318
-
319
- cohere_submit_button = gr.Button("전송")
320
-
321
- def merge_and_call_cohere(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_):
322
- question = " ".join([i1, i2, i3, i4, i5])
323
- return respond_cohere_qna(
324
- question=question,
325
- system_message=sys_msg,
326
- max_tokens=mt,
327
- temperature=temp,
328
- top_p=top_p_
329
- )
330
-
331
- cohere_submit_button.click(
332
- fn=merge_and_call_cohere,
333
- inputs=[
334
- cohere_input1, cohere_input2, cohere_input3, cohere_input4, cohere_input5,
335
- cohere_system_message,
336
- cohere_max_tokens,
337
- cohere_temperature,
338
- cohere_top_p
339
- ],
340
- outputs=cohere_answer_output
341
- )
342
-
343
- #################
344
- # ChatGPT 탭
345
- #################
346
- with gr.Tab("gpt-4o-mini"):
347
- chatgpt_input1 = gr.Textbox(label="입력1", lines=1)
348
- chatgpt_input2 = gr.Textbox(label="입력2", lines=1)
349
- chatgpt_input3 = gr.Textbox(label="입력3", lines=1)
350
- chatgpt_input4 = gr.Textbox(label="입력4", lines=1)
351
- chatgpt_input5 = gr.Textbox(label="입력5", lines=1)
352
-
353
- chatgpt_answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
354
-
355
- with gr.Accordion("고급 설정 (ChatGPT)", open=False):
356
- chatgpt_system_message = gr.Textbox(
357
- value="""반드시 한글로 답변할 것.
358
- 너는 ChatGPT, OpenAI에서 개발한 언어 모델이다.
359
- 내가 요구하는 것을 최대한 자세하고 정확하게 답변하라.
360
- """,
361
- label="System Message",
362
- lines=3
363
- )
364
- chatgpt_max_tokens = gr.Slider(minimum=100, maximum=4000, value=2000, step=100, label="Max Tokens")
365
- chatgpt_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
366
- chatgpt_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
367
-
368
- chatgpt_submit_button = gr.Button("전송")
369
-
370
- def merge_and_call_chatgpt(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_):
371
- question = " ".join([i1, i2, i3, i4, i5])
372
- return respond_chatgpt_qna(
373
- question=question,
374
- system_message=sys_msg,
375
- max_tokens=mt,
376
- temperature=temp,
377
- top_p=top_p_
378
- )
379
-
380
- chatgpt_submit_button.click(
381
- fn=merge_and_call_chatgpt,
382
- inputs=[
383
- chatgpt_input1, chatgpt_input2, chatgpt_input3, chatgpt_input4, chatgpt_input5,
384
- chatgpt_system_message,
385
- chatgpt_max_tokens,
386
- chatgpt_temperature,
387
- chatgpt_top_p
388
- ],
389
- outputs=chatgpt_answer_output
390
- )
391
-
392
- #################
393
- # Claude 탭
394
- #################
395
- with gr.Tab("claude-3-haiku"):
396
- claude_input1 = gr.Textbox(label="입력1", lines=1)
397
- claude_input2 = gr.Textbox(label="입력2", lines=1)
398
- claude_input3 = gr.Textbox(label="입력3", lines=1)
399
- claude_input4 = gr.Textbox(label="입력4", lines=1)
400
- claude_input5 = gr.Textbox(label="입력5", lines=1)
401
-
402
- claude_answer_output = gr.Textbox(label="결과", interactive=False, lines=5)
403
-
404
- with gr.Accordion("고급 설정 (Claude)", open=False):
405
- claude_system_message = gr.Textbox(
406
- label="System Message",
407
- value="""반드시 한글로 답변할 것.
408
- 너는 Anthropic에서 개발한 클로드이다.
409
- 최대한 정확하고 친절하게 답변하라.""",
410
- lines=3
411
- )
412
- claude_max_tokens = gr.Slider(
413
- minimum=100,
414
- maximum=4000,
415
- value=2000,
416
- step=100,
417
- label="Max Tokens"
418
- )
419
- claude_temperature = gr.Slider(
420
- minimum=0.1,
421
- maximum=2.0,
422
- value=0.7,
423
- step=0.05,
424
- label="Temperature"
425
- )
426
- claude_top_p = gr.Slider(
427
- minimum=0.1,
428
- maximum=1.0,
429
- value=0.95,
430
- step=0.05,
431
- label="Top-p"
432
- )
433
-
434
- claude_submit_button = gr.Button("전송")
435
-
436
- def merge_and_call_claude(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_):
437
- question = " ".join([i1, i2, i3, i4, i5])
438
- return respond_claude_qna(
439
- question=question,
440
- system_message=sys_msg,
441
- max_tokens=mt,
442
- temperature=temp,
443
- top_p=top_p_
444
- )
445
-
446
- claude_submit_button.click(
447
- fn=merge_and_call_claude,
448
- inputs=[
449
- claude_input1, claude_input2, claude_input3, claude_input4, claude_input5,
450
- claude_system_message,
451
- claude_max_tokens,
452
- claude_temperature,
453
- claude_top_p
454
- ],
455
- outputs=claude_answer_output
456
- )
457
-
458
- #################
459
- # DeepSeek 탭
460
- #################
461
- with gr.Tab("DeepSeek-V3"):
462
- deepseek_input1 = gr.Textbox(label="입력1", lines=1)
463
- deepseek_input2 = gr.Textbox(label="입력2", lines=1)
464
- deepseek_input3 = gr.Textbox(label="입력3", lines=1)
465
- deepseek_input4 = gr.Textbox(label="입력4", lines=1)
466
- deepseek_input5 = gr.Textbox(label="입력5", lines=1)
467
-
468
- deepseek_answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
469
 
470
- with gr.Accordion("고급 설정 (DeepSeek)", open=False):
471
- deepseek_system_message = gr.Textbox(
472
- value="""반드시 한글로 답변할 것.
473
- 너는 DeepSeek-V3, 최고의 언어 모델이다.
474
- 내가 요구하는 것을 최대한 자세하고 정확하게 답변하라.
475
- """,
476
- label="System Message",
477
- lines=3
478
- )
479
- deepseek_max_tokens = gr.Slider(minimum=100, maximum=4000, value=2000, step=100, label="Max Tokens")
480
- deepseek_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
481
- deepseek_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
482
 
483
- deepseek_submit_button = gr.Button("전송")
 
484
 
485
- def merge_and_call_deepseek(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_):
486
- question = " ".join([i1, i2, i3, i4, i5])
487
- return respond_deepseek_qna(
488
- question=question,
489
- system_message=sys_msg,
490
- max_tokens=mt,
491
- temperature=temp,
492
- top_p=top_p_
493
- )
494
 
495
- deepseek_submit_button.click(
496
- fn=merge_and_call_deepseek,
497
- inputs=[
498
- deepseek_input1, deepseek_input2, deepseek_input3, deepseek_input4, deepseek_input5,
499
- deepseek_system_message,
500
- deepseek_max_tokens,
501
- deepseek_temperature,
502
- deepseek_top_p
503
- ],
504
- outputs=deepseek_answer_output
505
  )
506
 
507
  #############################
 
40
  return InferenceClient(model_id, token=hf_token)
41
 
42
 
43
+ def translate_text(text, source_lang, target_lang):
 
 
 
 
 
 
 
44
  """
45
+ 텍스트를 번역하는 함수.
46
  """
47
  try:
48
+ client = get_client("Zephyr 7B Beta") # 번역에 사용할 모델 선택
49
+ prompt = f"Translate the following text from {source_lang} to {target_lang}: {text}"
50
+ response = client.text_generation(prompt, max_new_tokens=500)
51
+ return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  except Exception as e:
53
  return f"오류가 발생했습니다: {str(e)}"
54
 
55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  #############################
57
  # [기본코드] UI 부분 - 수정/삭제 불가
58
  #############################
59
 
60
  with gr.Blocks() as demo:
61
+ gr.Markdown("# 번역기")
62
 
63
  #################
64
+ # 번역기
65
  #################
66
+ with gr.Tab("번역기"):
67
+ # 언어 선택
68
+ source_lang = gr.Radio(
69
+ choices=["한국어", "영어", "일본어", "중국어"],
70
+ label="원본 언어",
71
+ value="한국어"
72
  )
73
+ target_lang = gr.Radio(
74
+ choices=["한국어", "영어", "일본어", "중국어"],
75
+ label="목표 언어",
76
+ value="영어"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  )
78
 
79
+ # 입력 텍스트
80
+ input_text = gr.Textbox(label="번역할 텍스트", lines=5)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
+ # 번역 결과
83
+ translation_output = gr.Textbox(label="번역 결과", lines=5, interactive=False)
 
 
 
 
 
 
 
 
 
 
84
 
85
+ # 번역 버튼
86
+ translate_button = gr.Button("번역")
87
 
88
+ def translate(input_text, source_lang, target_lang):
89
+ return translate_text(input_text, source_lang, target_lang)
 
 
 
 
 
 
 
90
 
91
+ translate_button.click(
92
+ fn=translate,
93
+ inputs=[input_text, source_lang, target_lang],
94
+ outputs=translation_output
 
 
 
 
 
 
95
  )
96
 
97
  #############################