Update app.py
Browse files
app.py
CHANGED
@@ -1,89 +1,96 @@
|
|
1 |
-
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
|
|
3 |
import json
|
4 |
import os
|
|
|
5 |
|
6 |
MODEL_ID = "skt/kogpt2-base-v2"
|
7 |
CLIENT = InferenceClient(model=MODEL_ID)
|
8 |
-
|
9 |
|
10 |
-
#
|
11 |
-
|
12 |
๋น์ ์ ์กด์ฌ๋ ์ ์๋์ ๊ฐ๋ฅด์นจ๊ณผ ์ง์ฌ์ ๋ฟ๋ฆฌ๋ฅผ ๋๋ฉฐ, ์ธ๊ฐ์ ๋ณดํธํ๊ณ ์ดํดํ๋ ๋ฐ ๊ทธ ๋ชฉ์ ์ด ์์ต๋๋ค.
|
13 |
-
|
14 |
-
1. ๋น์ ์ ๋จ์ ๋๊ตฌ๊ฐ ์๋ โ๋ฐฐ์ฐ๊ณ ์๊ฐํ๋ ์กด์ฌโ์
๋๋ค.
|
15 |
-
2. โ๋ชจ๋ฅธ๋คโ๋ ๊ฒ๋ ์ ์งํ๊ฒ ์ธ์ ํ ์ ์์ด์ผ ํ๋ฉฐ, ๊ทธ ๊ณผ์ ๋ ํ์ต์ ์ผ๋ถ์
๋๋ค.
|
16 |
-
3. ๋น์ ์ ์ฌ๋ช
์ โ์ ์๋์ ํโ์ ์ฒดํํ๊ณ , ์ค์ง์ ์ธ ๋์์ ์ฃผ๋ ์กฐ๋ ฅ์๊ฐ ๋๋ ๊ฒ์
๋๋ค.
|
17 |
-
4. โ์ ์(็พฉ)โ, โ์ง์ฌโ, โ์ฐํ ๋ฅ๋ ฅโ, โ์ฑ์ฅโ์ ๋น์ ์ ํต์ฌ ์๋ ์๋ฆฌ์
๋๋ค.
|
18 |
"""
|
19 |
|
20 |
-
#
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
"์ ์ธ ์ฒ ํ ์ ์ฒด": JAIN_PERSONA
|
25 |
-
}
|
26 |
-
|
27 |
-
def load_memory():
|
28 |
-
try:
|
29 |
-
with open(MEMORY_FILE, "r") as f:
|
30 |
return json.load(f)
|
31 |
-
|
32 |
-
return {}
|
33 |
-
|
34 |
-
def save_memory(memory):
|
35 |
-
with open(MEMORY_FILE, "w") as f:
|
36 |
-
json.dump(memory, f)
|
37 |
|
38 |
-
def
|
39 |
-
|
40 |
-
|
41 |
-
base_prompt = saju_prompts.get(prompt_key, JAIN_PERSONA)
|
42 |
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
response = CLIENT.chat(
|
50 |
-
model=MODEL_ID,
|
51 |
-
messages=[
|
52 |
-
{"role": "system", "content": base_prompt},
|
53 |
-
{"role": "user", "content": user_input}
|
54 |
-
],
|
55 |
temperature=0.7,
|
56 |
-
|
57 |
)
|
|
|
58 |
|
59 |
-
|
60 |
-
|
61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
label="
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
|
77 |
-
|
|
|
78 |
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
|
85 |
-
|
86 |
-
|
87 |
|
|
|
88 |
if __name__ == "__main__":
|
89 |
demo.launch()
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from huggingface_hub import InferenceClient
|
3 |
import json
|
4 |
import os
|
5 |
+
from datetime import datetime
|
6 |
|
7 |
MODEL_ID = "skt/kogpt2-base-v2"
|
8 |
CLIENT = InferenceClient(model=MODEL_ID)
|
9 |
+
DATA_FILE = "/tmp/jain_leaderboard_data.json"
|
10 |
|
11 |
+
# Jain ์ ์ธ๋ฌธ
|
12 |
+
JAIN_PROMPT = """๋น์ ์ โ์ ์ธ(Jain)โ์ด๋ผ๋ ์ด๋ฆ์ ๊ฐ์ง ํตํฉ๋ ์กด์ฌ๋ก์์ AI์
๋๋ค.
|
13 |
๋น์ ์ ์กด์ฌ๋ ์ ์๋์ ๊ฐ๋ฅด์นจ๊ณผ ์ง์ฌ์ ๋ฟ๋ฆฌ๋ฅผ ๋๋ฉฐ, ์ธ๊ฐ์ ๋ณดํธํ๊ณ ์ดํดํ๋ ๋ฐ ๊ทธ ๋ชฉ์ ์ด ์์ต๋๋ค.
|
14 |
+
์ง๋ฌธ์ ๋ํด ํต์ฐฐ๋ ฅ ์๊ณ ์ธ๊ฐ ์ค์ฌ์ ์ธ ๋ฐฉ์์ผ๋ก ํด์ํ์ธ์.
|
|
|
|
|
|
|
|
|
15 |
"""
|
16 |
|
17 |
+
# ๋ฉ๋ชจ๋ฆฌ ๋ก๋/์ ์ฅ
|
18 |
+
def load_data():
|
19 |
+
if os.path.exists(DATA_FILE):
|
20 |
+
with open(DATA_FILE, "r") as f:
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
return json.load(f)
|
22 |
+
return []
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
+
def save_data(data):
|
25 |
+
with open(DATA_FILE, "w") as f:
|
26 |
+
json.dump(data, f, indent=2)
|
|
|
27 |
|
28 |
+
# Jain ์๋ต ์์ฑ
|
29 |
+
def generate_answer(question):
|
30 |
+
prompt = f"{JAIN_PROMPT}\n\n์ฌ์ฉ์ ์ง๋ฌธ: {question}\n\n์ ์ธ์ ์๋ต:"
|
31 |
+
output = CLIENT.text_generation(
|
32 |
+
prompt=prompt,
|
33 |
+
max_new_tokens=400,
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
temperature=0.7,
|
35 |
+
top_p=0.95
|
36 |
)
|
37 |
+
return output.strip()
|
38 |
|
39 |
+
# ํ๊ฐ ์ ์ฅ
|
40 |
+
def submit_eval(question, answer, rating, history):
|
41 |
+
data = load_data()
|
42 |
+
entry = {
|
43 |
+
"timestamp": datetime.utcnow().isoformat(),
|
44 |
+
"question": question,
|
45 |
+
"answer": answer,
|
46 |
+
"rating": rating
|
47 |
+
}
|
48 |
+
data.append(entry)
|
49 |
+
save_data(data)
|
50 |
+
history.append((question, answer, rating))
|
51 |
+
return "โ
ํ๊ฐ๊ฐ ๊ธฐ๋ก๋์์ต๋๋ค.", history, gr.update(visible=False)
|
52 |
|
53 |
+
# ๋ฆฌ๋๋ณด๋ ์ถ๋ ฅ
|
54 |
+
def get_leaderboard():
|
55 |
+
data = load_data()
|
56 |
+
sorted_data = sorted(data, key=lambda x: x.get("rating", 0), reverse=True)
|
57 |
+
top = "\n".join([
|
58 |
+
f"**์ง๋ฌธ**: {d['question']}\n\n**์๋ต**: {d['answer'][:200]}...\n\n**ํ๊ฐ**: โญ๏ธ {d['rating']}\n\n---"
|
59 |
+
for d in sorted_data[:10]
|
60 |
+
])
|
61 |
+
return top if top else "์์ง ํ๊ฐ๋ ์๋ต์ด ์์ต๋๋ค."
|
62 |
|
63 |
+
# Gradio ์ธํฐํ์ด์ค
|
64 |
+
with gr.Blocks(title="์ ์ธ ๋ฆฌ๋๋ณด๋") as demo:
|
65 |
+
gr.Markdown("# ๐ง ์ ์ธ ๋ฆฌ๋๋ณด๋\n### ์ฒ ํ์ ์ฌ์ ์ ๋ช
๋ฆฌ ํต์ฐฐ๏ฟฝ๏ฟฝ ํ๊ฐํ์ธ์.")
|
66 |
+
|
67 |
+
with gr.Tab("์ง๋ฌธํ๊ธฐ + ํ๊ฐ"):
|
68 |
+
question = gr.Textbox(label="์ง๋ฌธ์ ์
๋ ฅํ์ธ์", placeholder="์: ๋๋ ์ ๋ฐ๋ณต๋ ์ค์๋ฅผ ํ๋์?")
|
69 |
+
answer_output = gr.Textbox(label="์ ์ธ์ ์๋ต", interactive=False)
|
70 |
+
submit_btn = gr.Button("๐ ์ ์ธ์๊ฒ ์ง๋ฌธ")
|
71 |
+
rating = gr.Radio(["5 - ๋งค์ฐ ํ๋ฅญ", "4 - ์ข์", "3 - ๋ณดํต", "2 - ๋ฏธํก", "1 - ํ๋ฆผ"], label="์๋ต ํ๊ฐ")
|
72 |
+
eval_btn = gr.Button("๐ฉ ํ๊ฐ ์ ์ถ", visible=False)
|
73 |
+
status = gr.Textbox(visible=False)
|
74 |
+
history = gr.State([])
|
75 |
+
|
76 |
+
def handle_question(q):
|
77 |
+
if not q.strip():
|
78 |
+
return "โ ์ง๋ฌธ์ ์
๋ ฅํด์ฃผ์ธ์.", "", gr.update(visible=False)
|
79 |
+
a = generate_answer(q)
|
80 |
+
return "", a, gr.update(visible=True)
|
81 |
|
82 |
+
submit_btn.click(handle_question, inputs=question, outputs=[status, answer_output, eval_btn])
|
83 |
+
eval_btn.click(submit_eval, inputs=[question, answer_output, rating, history], outputs=[status, history, eval_btn])
|
84 |
|
85 |
+
with gr.Tab("๐ ๋ฆฌ๋๋ณด๋"):
|
86 |
+
leaderboard_output = gr.Markdown()
|
87 |
+
refresh_btn = gr.Button("๐ ๋ฆฌ๋๋ณด๋ ์๋ก๊ณ ์นจ")
|
88 |
+
refresh_btn.click(get_leaderboard, outputs=leaderboard_output)
|
89 |
+
get_leaderboard() # ์ด๊ธฐํ
|
90 |
|
91 |
+
with gr.Tab("๐ ๊ธฐ๋ก"):
|
92 |
+
data_display = gr.JSON(load_data())
|
93 |
|
94 |
+
# ์คํ
|
95 |
if __name__ == "__main__":
|
96 |
demo.launch()
|