Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,79 +1,21 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
-
import openai
|
4 |
-
import anthropic
|
5 |
from typing import Optional
|
6 |
|
7 |
#############################
|
8 |
# [기본코드] - 수정/삭제 불가
|
9 |
#############################
|
10 |
|
11 |
-
# 제거할 모델들을 MODELS 사전에서 제외
|
12 |
-
MODELS = {
|
13 |
-
"Zephyr 7B Beta": "HuggingFaceH4/zephyr-7b-beta",
|
14 |
-
"Meta Llama 3.1 8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
15 |
-
"Meta-Llama 3.1 70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
16 |
-
"Microsoft": "microsoft/Phi-3-mini-4k-instruct",
|
17 |
-
"Mixtral 8x7B": "mistralai/Mistral-7B-Instruct-v0.3",
|
18 |
-
"Mixtral Nous-Hermes": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
19 |
-
"Aya-23-35B": "CohereForAI/aya-23-35B"}
|
20 |
-
|
21 |
# Cohere Command R+ 모델 ID 정의
|
22 |
COHERE_MODEL = "CohereForAI/c4ai-command-r-plus-08-2024"
|
23 |
|
24 |
-
def get_client(
|
25 |
"""
|
26 |
-
|
27 |
-
hf_token을 UI에서 입력받은 값으로 사용하도록 변경.
|
28 |
"""
|
29 |
if not hf_token:
|
30 |
raise ValueError("HuggingFace API 토큰이 필요합니다.")
|
31 |
-
|
32 |
-
if model_name in MODELS:
|
33 |
-
model_id = MODELS[model_name]
|
34 |
-
elif model_name == "Cohere Command R+":
|
35 |
-
model_id = COHERE_MODEL
|
36 |
-
else:
|
37 |
-
raise ValueError("유효하지 않은 모델 이름입니다.")
|
38 |
-
return InferenceClient(model_id, token=hf_token)
|
39 |
-
|
40 |
-
|
41 |
-
def respond_hf_qna(
|
42 |
-
question: str,
|
43 |
-
model_name: str,
|
44 |
-
max_tokens: int,
|
45 |
-
temperature: float,
|
46 |
-
top_p: float,
|
47 |
-
system_message: str,
|
48 |
-
hf_token: str
|
49 |
-
):
|
50 |
-
"""
|
51 |
-
HuggingFace 모델(Zephyr 등)에 대해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
|
52 |
-
"""
|
53 |
-
try:
|
54 |
-
client = get_client(model_name, hf_token)
|
55 |
-
except ValueError as e:
|
56 |
-
return f"오류: {str(e)}"
|
57 |
-
|
58 |
-
messages = [
|
59 |
-
{"role": "system", "content": system_message},
|
60 |
-
{"role": "user", "content": question}
|
61 |
-
]
|
62 |
-
|
63 |
-
try:
|
64 |
-
response = client.chat_completion(
|
65 |
-
messages,
|
66 |
-
max_tokens=max_tokens,
|
67 |
-
temperature=temperature,
|
68 |
-
top_p=top_p,
|
69 |
-
stream=False,
|
70 |
-
)
|
71 |
-
assistant_message = response.choices[0].message.content
|
72 |
-
return assistant_message
|
73 |
-
|
74 |
-
except Exception as e:
|
75 |
-
return f"오류가 발생했습니다: {str(e)}"
|
76 |
-
|
77 |
|
78 |
def respond_cohere_qna(
|
79 |
question: str,
|
@@ -86,9 +28,8 @@ def respond_cohere_qna(
|
|
86 |
"""
|
87 |
Cohere Command R+ 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
|
88 |
"""
|
89 |
-
model_name = "Cohere Command R+"
|
90 |
try:
|
91 |
-
client = get_client(
|
92 |
except ValueError as e:
|
93 |
return f"오류: {str(e)}"
|
94 |
|
@@ -109,437 +50,52 @@ def respond_cohere_qna(
|
|
109 |
except Exception as e:
|
110 |
return f"오류가 발생했습니다: {str(e)}"
|
111 |
|
112 |
-
|
113 |
-
def respond_chatgpt_qna(
|
114 |
-
question: str,
|
115 |
-
system_message: str,
|
116 |
-
max_tokens: int,
|
117 |
-
temperature: float,
|
118 |
-
top_p: float,
|
119 |
-
openai_token: str
|
120 |
-
):
|
121 |
-
"""
|
122 |
-
ChatGPT(OpenAI) 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
|
123 |
-
"""
|
124 |
-
if not openai_token:
|
125 |
-
return "OpenAI API 토큰이 필요합니다."
|
126 |
-
|
127 |
-
openai.api_key = openai_token
|
128 |
-
|
129 |
-
messages = [
|
130 |
-
{"role": "system", "content": system_message},
|
131 |
-
{"role": "user", "content": question}
|
132 |
-
]
|
133 |
-
|
134 |
-
try:
|
135 |
-
response = openai.ChatCompletion.create(
|
136 |
-
model="gpt-4o-mini", # 필요한 경우 변경
|
137 |
-
messages=messages,
|
138 |
-
max_tokens=max_tokens,
|
139 |
-
temperature=temperature,
|
140 |
-
top_p=top_p,
|
141 |
-
)
|
142 |
-
assistant_message = response.choices[0].message['content']
|
143 |
-
return assistant_message
|
144 |
-
except Exception as e:
|
145 |
-
return f"오류가 발생했습니다: {str(e)}"
|
146 |
-
|
147 |
-
|
148 |
-
def respond_deepseek_qna(
|
149 |
-
question: str,
|
150 |
-
system_message: str,
|
151 |
-
max_tokens: int,
|
152 |
-
temperature: float,
|
153 |
-
top_p: float,
|
154 |
-
deepseek_token: str
|
155 |
-
):
|
156 |
-
"""
|
157 |
-
DeepSeek 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
|
158 |
-
"""
|
159 |
-
if not deepseek_token:
|
160 |
-
return "DeepSeek API 토큰이 필요합니다."
|
161 |
-
|
162 |
-
openai.api_key = deepseek_token
|
163 |
-
openai.api_base = "https://api.deepseek.com/v1"
|
164 |
-
|
165 |
-
messages = [
|
166 |
-
{"role": "system", "content": system_message},
|
167 |
-
{"role": "user", "content": question}
|
168 |
-
]
|
169 |
-
|
170 |
-
try:
|
171 |
-
response = openai.ChatCompletion.create(
|
172 |
-
model="deepseek-chat",
|
173 |
-
messages=messages,
|
174 |
-
max_tokens=max_tokens,
|
175 |
-
temperature=temperature,
|
176 |
-
top_p=top_p,
|
177 |
-
)
|
178 |
-
assistant_message = response.choices[0].message['content']
|
179 |
-
return assistant_message
|
180 |
-
except Exception as e:
|
181 |
-
return f"오류가 발생했습니다: {str(e)}"
|
182 |
-
|
183 |
-
|
184 |
-
def respond_claude_qna(
|
185 |
-
question: str,
|
186 |
-
system_message: str,
|
187 |
-
max_tokens: int,
|
188 |
-
temperature: float,
|
189 |
-
top_p: float,
|
190 |
-
claude_api_key: str
|
191 |
-
) -> str:
|
192 |
-
"""
|
193 |
-
Claude API를 사용한 개선된 응답 생성 함수
|
194 |
-
"""
|
195 |
-
if not claude_api_key:
|
196 |
-
return "Claude API 토큰이 필요합니다."
|
197 |
-
|
198 |
-
try:
|
199 |
-
client = anthropic.Anthropic(api_key=claude_api_key)
|
200 |
-
|
201 |
-
# 메시지 생성
|
202 |
-
message = client.messages.create(
|
203 |
-
model="claude-3-haiku-20240307",
|
204 |
-
max_tokens=max_tokens,
|
205 |
-
temperature=temperature,
|
206 |
-
system=system_message,
|
207 |
-
messages=[
|
208 |
-
{
|
209 |
-
"role": "user",
|
210 |
-
"content": question
|
211 |
-
}
|
212 |
-
]
|
213 |
-
)
|
214 |
-
|
215 |
-
return message.content[0].text
|
216 |
-
|
217 |
-
except anthropic.APIError as ae:
|
218 |
-
return f"Claude API 오류: {str(ae)}"
|
219 |
-
except anthropic.RateLimitError:
|
220 |
-
return "요청 한도를 초과했습니다. 잠시 후 다시 시도해주세요."
|
221 |
-
except Exception as e:
|
222 |
-
return f"예상치 못한 오류가 발생했습니다: {str(e)}"
|
223 |
-
|
224 |
-
|
225 |
#############################
|
226 |
-
# [
|
227 |
#############################
|
228 |
|
229 |
with gr.Blocks() as demo:
|
230 |
-
gr.Markdown("#
|
231 |
-
|
232 |
-
#
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
model_name = gr.Radio(
|
262 |
-
choices=list(MODELS.keys()),
|
263 |
-
label="Language Model (HuggingFace)",
|
264 |
-
value="Zephyr 7B Beta"
|
265 |
)
|
266 |
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
input5 = gr.Textbox(label="입력5", lines=1)
|
273 |
-
|
274 |
-
# 결과
|
275 |
-
answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
|
276 |
-
|
277 |
-
# 고급 설정 - System Message를 Max Tokens 위로 이동
|
278 |
-
with gr.Accordion("고급 설정 (일반 모델)", open=False):
|
279 |
-
system_message = gr.Textbox(
|
280 |
-
value="""반드시 한글로 답변할 것.
|
281 |
-
너는 최고의 비서이다.
|
282 |
-
내가 요구하는것들을 최대한 자세하고 정확하게 답변하라.
|
283 |
-
""",
|
284 |
-
label="System Message",
|
285 |
-
lines=3
|
286 |
-
)
|
287 |
-
max_tokens = gr.Slider(minimum=0, maximum=2000, value=500, step=100, label="Max Tokens")
|
288 |
-
temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
|
289 |
-
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
|
290 |
-
|
291 |
-
submit_button = gr.Button("전송")
|
292 |
-
|
293 |
-
def merge_and_call_hf(i1, i2, i3, i4, i5, m_name, mt, temp, top_p_, sys_msg, hf_token):
|
294 |
-
# 입력1~5를 공백 기준으로 합쳐서 question 구성
|
295 |
-
question = " ".join([i1, i2, i3, i4, i5])
|
296 |
-
return respond_hf_qna(
|
297 |
-
question=question,
|
298 |
-
model_name=m_name,
|
299 |
-
max_tokens=mt,
|
300 |
-
temperature=temp,
|
301 |
-
top_p=top_p_,
|
302 |
-
system_message=sys_msg,
|
303 |
-
hf_token=hf_token
|
304 |
-
)
|
305 |
-
|
306 |
-
submit_button.click(
|
307 |
-
fn=merge_and_call_hf,
|
308 |
-
inputs=[
|
309 |
-
input1, input2, input3, input4, input5,
|
310 |
-
model_name,
|
311 |
-
max_tokens,
|
312 |
-
temperature,
|
313 |
-
top_p,
|
314 |
-
system_message,
|
315 |
-
hf_token_box
|
316 |
-
],
|
317 |
-
outputs=answer_output
|
318 |
-
)
|
319 |
-
|
320 |
-
#################
|
321 |
-
# Cohere Command R+ 탭
|
322 |
-
#################
|
323 |
-
with gr.Tab("Cohere Command R+"):
|
324 |
-
cohere_input1 = gr.Textbox(label="입력1", lines=1)
|
325 |
-
cohere_input2 = gr.Textbox(label="입력2", lines=1)
|
326 |
-
cohere_input3 = gr.Textbox(label="입력3", lines=1)
|
327 |
-
cohere_input4 = gr.Textbox(label="입력4", lines=1)
|
328 |
-
cohere_input5 = gr.Textbox(label="입력5", lines=1)
|
329 |
-
|
330 |
-
cohere_answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
|
331 |
-
|
332 |
-
with gr.Accordion("고급 설정 (Cohere)", open=False):
|
333 |
-
cohere_system_message = gr.Textbox(
|
334 |
-
value="""반드시 한글로 답변할 것.
|
335 |
-
너는 최고의 비서이다.
|
336 |
-
내가 요구하는것들을 최대한 자세하고 정확하게 답변하라.
|
337 |
-
""",
|
338 |
-
label="System Message",
|
339 |
-
lines=3
|
340 |
-
)
|
341 |
-
cohere_max_tokens = gr.Slider(minimum=100, maximum=10000, value=4000, step=100, label="Max Tokens")
|
342 |
-
cohere_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature")
|
343 |
-
cohere_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
|
344 |
-
|
345 |
-
cohere_submit_button = gr.Button("전송")
|
346 |
-
|
347 |
-
def merge_and_call_cohere(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, hf_token):
|
348 |
-
question = " ".join([i1, i2, i3, i4, i5])
|
349 |
-
return respond_cohere_qna(
|
350 |
-
question=question,
|
351 |
-
system_message=sys_msg,
|
352 |
-
max_tokens=mt,
|
353 |
-
temperature=temp,
|
354 |
-
top_p=top_p_,
|
355 |
-
hf_token=hf_token
|
356 |
-
)
|
357 |
-
|
358 |
-
cohere_submit_button.click(
|
359 |
-
fn=merge_and_call_cohere,
|
360 |
-
inputs=[
|
361 |
-
cohere_input1, cohere_input2, cohere_input3, cohere_input4, cohere_input5,
|
362 |
-
cohere_system_message,
|
363 |
-
cohere_max_tokens,
|
364 |
-
cohere_temperature,
|
365 |
-
cohere_top_p,
|
366 |
-
hf_token_box
|
367 |
-
],
|
368 |
-
outputs=cohere_answer_output
|
369 |
-
)
|
370 |
-
|
371 |
-
#################
|
372 |
-
# ChatGPT 탭
|
373 |
-
#################
|
374 |
-
with gr.Tab("gpt-4o-mini"):
|
375 |
-
chatgpt_input1 = gr.Textbox(label="입력1", lines=1)
|
376 |
-
chatgpt_input2 = gr.Textbox(label="입력2", lines=1)
|
377 |
-
chatgpt_input3 = gr.Textbox(label="입력3", lines=1)
|
378 |
-
chatgpt_input4 = gr.Textbox(label="입력4", lines=1)
|
379 |
-
chatgpt_input5 = gr.Textbox(label="입력5", lines=1)
|
380 |
-
|
381 |
-
chatgpt_answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
|
382 |
-
|
383 |
-
with gr.Accordion("고급 설정 (ChatGPT)", open=False):
|
384 |
-
chatgpt_system_message = gr.Textbox(
|
385 |
-
value="""반드시 한글로 답변할 것.
|
386 |
-
너는 ChatGPT, OpenAI에서 개발한 언어 모델이다.
|
387 |
-
내가 요구하는 것을 최대한 자세하고 정확하게 답변하라.
|
388 |
-
""",
|
389 |
-
label="System Message",
|
390 |
-
lines=3
|
391 |
-
)
|
392 |
-
chatgpt_max_tokens = gr.Slider(minimum=100, maximum=4000, value=2000, step=100, label="Max Tokens")
|
393 |
-
chatgpt_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
|
394 |
-
chatgpt_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
|
395 |
-
|
396 |
-
chatgpt_submit_button = gr.Button("전송")
|
397 |
-
|
398 |
-
def merge_and_call_chatgpt(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, openai_token):
|
399 |
-
question = " ".join([i1, i2, i3, i4, i5])
|
400 |
-
return respond_chatgpt_qna(
|
401 |
-
question=question,
|
402 |
-
system_message=sys_msg,
|
403 |
-
max_tokens=mt,
|
404 |
-
temperature=temp,
|
405 |
-
top_p=top_p_,
|
406 |
-
openai_token=openai_token
|
407 |
-
)
|
408 |
-
|
409 |
-
chatgpt_submit_button.click(
|
410 |
-
fn=merge_and_call_chatgpt,
|
411 |
-
inputs=[
|
412 |
-
chatgpt_input1, chatgpt_input2, chatgpt_input3, chatgpt_input4, chatgpt_input5,
|
413 |
-
chatgpt_system_message,
|
414 |
-
chatgpt_max_tokens,
|
415 |
-
chatgpt_temperature,
|
416 |
-
chatgpt_top_p,
|
417 |
-
openai_token_box
|
418 |
-
],
|
419 |
-
outputs=chatgpt_answer_output
|
420 |
-
)
|
421 |
-
|
422 |
-
#################
|
423 |
-
# Claude 탭
|
424 |
-
#################
|
425 |
-
with gr.Tab("claude-3-haiku"):
|
426 |
-
claude_input1 = gr.Textbox(label="입력1", lines=1)
|
427 |
-
claude_input2 = gr.Textbox(label="입력2", lines=1)
|
428 |
-
claude_input3 = gr.Textbox(label="입력3", lines=1)
|
429 |
-
claude_input4 = gr.Textbox(label="입력4", lines=1)
|
430 |
-
claude_input5 = gr.Textbox(label="입력5", lines=1)
|
431 |
-
|
432 |
-
claude_answer_output = gr.Textbox(label="결과", interactive=False, lines=5)
|
433 |
-
|
434 |
-
with gr.Accordion("고급 설정 (Claude)", open=False):
|
435 |
-
claude_system_message = gr.Textbox(
|
436 |
-
label="System Message",
|
437 |
-
value="""반드시 한글로 답변할 것.
|
438 |
-
너는 Anthropic에서 개발한 클로드이다.
|
439 |
-
최대한 정확하고 친절하게 답변하라.""",
|
440 |
-
lines=3
|
441 |
-
)
|
442 |
-
claude_max_tokens = gr.Slider(
|
443 |
-
minimum=100,
|
444 |
-
maximum=4000,
|
445 |
-
value=2000,
|
446 |
-
step=100,
|
447 |
-
label="Max Tokens"
|
448 |
-
)
|
449 |
-
claude_temperature = gr.Slider(
|
450 |
-
minimum=0.1,
|
451 |
-
maximum=2.0,
|
452 |
-
value=0.7,
|
453 |
-
step=0.05,
|
454 |
-
label="Temperature"
|
455 |
-
)
|
456 |
-
claude_top_p = gr.Slider(
|
457 |
-
minimum=0.1,
|
458 |
-
maximum=1.0,
|
459 |
-
value=0.95,
|
460 |
-
step=0.05,
|
461 |
-
label="Top-p"
|
462 |
-
)
|
463 |
-
|
464 |
-
claude_submit_button = gr.Button("전송")
|
465 |
-
|
466 |
-
def merge_and_call_claude(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, claude_key):
|
467 |
-
question = " ".join([i1, i2, i3, i4, i5])
|
468 |
-
return respond_claude_qna(
|
469 |
-
question=question,
|
470 |
-
system_message=sys_msg,
|
471 |
-
max_tokens=mt,
|
472 |
-
temperature=temp,
|
473 |
-
top_p=top_p_,
|
474 |
-
claude_api_key=claude_key
|
475 |
-
)
|
476 |
-
|
477 |
-
claude_submit_button.click(
|
478 |
-
fn=merge_and_call_claude,
|
479 |
-
inputs=[
|
480 |
-
claude_input1, claude_input2, claude_input3, claude_input4, claude_input5,
|
481 |
-
claude_system_message,
|
482 |
-
claude_max_tokens,
|
483 |
-
claude_temperature,
|
484 |
-
claude_top_p,
|
485 |
-
claude_token_box
|
486 |
-
],
|
487 |
-
outputs=claude_answer_output
|
488 |
-
)
|
489 |
-
|
490 |
-
#################
|
491 |
-
# DeepSeek 탭
|
492 |
-
#################
|
493 |
-
with gr.Tab("DeepSeek-V3"):
|
494 |
-
deepseek_input1 = gr.Textbox(label="입력1", lines=1)
|
495 |
-
deepseek_input2 = gr.Textbox(label="입력2", lines=1)
|
496 |
-
deepseek_input3 = gr.Textbox(label="입력3", lines=1)
|
497 |
-
deepseek_input4 = gr.Textbox(label="입력4", lines=1)
|
498 |
-
deepseek_input5 = gr.Textbox(label="입력5", lines=1)
|
499 |
-
|
500 |
-
deepseek_answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
|
501 |
-
|
502 |
-
with gr.Accordion("고급 설정 (DeepSeek)", open=False):
|
503 |
-
deepseek_system_message = gr.Textbox(
|
504 |
-
value="""반드시 한글로 답변할 것.
|
505 |
-
너는 DeepSeek-V3, 최고의 언어 모델이다.
|
506 |
-
내가 요구하는 것을 최대한 자세하고 정확하게 답변하라.
|
507 |
-
""",
|
508 |
-
label="System Message",
|
509 |
-
lines=3
|
510 |
-
)
|
511 |
-
deepseek_max_tokens = gr.Slider(minimum=100, maximum=4000, value=2000, step=100, label="Max Tokens")
|
512 |
-
deepseek_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
|
513 |
-
deepseek_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
|
514 |
-
|
515 |
-
deepseek_submit_button = gr.Button("전송")
|
516 |
-
|
517 |
-
def merge_and_call_deepseek(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, deepseek_token):
|
518 |
-
question = " ".join([i1, i2, i3, i4, i5])
|
519 |
-
return respond_deepseek_qna(
|
520 |
-
question=question,
|
521 |
-
system_message=sys_msg,
|
522 |
-
max_tokens=mt,
|
523 |
-
temperature=temp,
|
524 |
-
top_p=top_p_,
|
525 |
-
deepseek_token=deepseek_token
|
526 |
-
)
|
527 |
-
|
528 |
-
deepseek_submit_button.click(
|
529 |
-
fn=merge_and_call_deepseek,
|
530 |
-
inputs=[
|
531 |
-
deepseek_input1, deepseek_input2, deepseek_input3, deepseek_input4, deepseek_input5,
|
532 |
-
deepseek_system_message,
|
533 |
-
deepseek_max_tokens,
|
534 |
-
deepseek_temperature,
|
535 |
-
deepseek_top_p,
|
536 |
-
deepseek_token_box
|
537 |
-
],
|
538 |
-
outputs=deepseek_answer_output
|
539 |
-
)
|
540 |
|
541 |
#############################
|
542 |
# 메인 실행부
|
543 |
#############################
|
544 |
if __name__ == "__main__":
|
545 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
|
|
|
|
3 |
from typing import Optional
|
4 |
|
5 |
#############################
|
6 |
# [기본코드] - 수정/삭제 불가
|
7 |
#############################
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
# Cohere Command R+ 모델 ID 정의
|
10 |
COHERE_MODEL = "CohereForAI/c4ai-command-r-plus-08-2024"
|
11 |
|
12 |
+
def get_client(hf_token):
|
13 |
"""
|
14 |
+
Cohere Command R+ 모델을 위한 InferenceClient 생성.
|
|
|
15 |
"""
|
16 |
if not hf_token:
|
17 |
raise ValueError("HuggingFace API 토큰이 필요합니다.")
|
18 |
+
return InferenceClient(COHERE_MODEL, token=hf_token)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
def respond_cohere_qna(
|
21 |
question: str,
|
|
|
28 |
"""
|
29 |
Cohere Command R+ 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
|
30 |
"""
|
|
|
31 |
try:
|
32 |
+
client = get_client(hf_token)
|
33 |
except ValueError as e:
|
34 |
return f"오류: {str(e)}"
|
35 |
|
|
|
50 |
except Exception as e:
|
51 |
return f"오류가 발생했습니다: {str(e)}"
|
52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
#############################
|
54 |
+
# [UI 부분] - 수정된 부분
|
55 |
#############################
|
56 |
|
57 |
with gr.Blocks() as demo:
|
58 |
+
gr.Markdown("# 블로그 생성기")
|
59 |
+
|
60 |
+
# 말투 선택 라디오 버튼
|
61 |
+
tone_radio = gr.Radio(
|
62 |
+
choices=["친근한", "전문적인", "일반", "상품후기"],
|
63 |
+
label="말투바꾸기",
|
64 |
+
value="일반"
|
65 |
+
)
|
66 |
+
|
67 |
+
# 참조글 입력
|
68 |
+
reference1 = gr.Textbox(label="참조글1", lines=2)
|
69 |
+
reference2 = gr.Textbox(label="참조글2", lines=2)
|
70 |
+
reference3 = gr.Textbox(label="참조글3", lines=2)
|
71 |
+
|
72 |
+
# 생성된 블로그 글 출력
|
73 |
+
generated_blog = gr.Textbox(label="생성된 블로그 글", lines=10, interactive=False)
|
74 |
+
|
75 |
+
# 전송 버튼
|
76 |
+
submit_button = gr.Button("생성")
|
77 |
+
|
78 |
+
def generate_blog(tone, ref1, ref2, ref3, hf_token):
|
79 |
+
# 참조글을 합쳐서 질문 구성
|
80 |
+
question = f"말투: {tone}\n참조글1: {ref1}\n참조글2: {ref2}\n참조글3: {ref3}"
|
81 |
+
system_message = "블로그 글을 생성해주세요. 주어진 참조글을 바탕으로 요청된 말투에 맞게 작성하세요."
|
82 |
+
return respond_cohere_qna(
|
83 |
+
question=question,
|
84 |
+
system_message=system_message,
|
85 |
+
max_tokens=1000,
|
86 |
+
temperature=0.7,
|
87 |
+
top_p=0.95,
|
88 |
+
hf_token=hf_token
|
|
|
|
|
|
|
|
|
89 |
)
|
90 |
|
91 |
+
submit_button.click(
|
92 |
+
fn=generate_blog,
|
93 |
+
inputs=[tone_radio, reference1, reference2, reference3],
|
94 |
+
outputs=generated_blog
|
95 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
|
97 |
#############################
|
98 |
# 메인 실행부
|
99 |
#############################
|
100 |
if __name__ == "__main__":
|
101 |
+
demo.launch()
|