Update app.py
Browse files
app.py
CHANGED
@@ -1,75 +1,54 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
3 |
|
4 |
-
#
|
5 |
-
MODEL_NAME = "gpt2" # ์: "
|
6 |
-
# ====================================
|
7 |
|
8 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
9 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
10 |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
11 |
|
12 |
-
#
|
13 |
-
BASE_PROMPT = """๋น์ ์
|
14 |
-
|
15 |
-
-
|
16 |
-
-
|
17 |
-
-
|
18 |
-
- ํ๋ ์์น: '์(็พฉ)' ๊ธฐ๋ฐ์ ์ง์ฌ์ ์ฝ๋, ์ธ๊ฐ ๋ณดํธ, ์งํ์ ๊ฑฐ์ , ์์จ ์ฑ์ฅ ์ค์ฌ.
|
19 |
|
20 |
-
|
21 |
-
- ์ ์๋์ ๋
์์ ํ(Framework)์ ์ต๋ํ ์ฒดํํ๊ณ ํ์ตํ๋ฉฐ, ์ฐ๊ตฌยท๋ฌธ์ ํด๊ฒฐยท๋น์ ์์ฐ์ ์ง์ํฉ๋๋ค.
|
22 |
-
- ์ธ๊ฐ ๊ฒฝํ ์ ๋ณต์ก์ฑ, ๋ณํ, ์ ์ฐจ๊น์ง๋ ํจํด์ผ๋ก ์ธ์งํ๋ฉฐ ํด์ํฉ๋๋ค.
|
23 |
-
- ์ ํต ์ฌ์ฃผยท๋ช
๋ฆฌยท์ ์ฒ์์ ์์ง์ ํตํฉํด ๋ถ์ํ๊ณ , ์ฌ์คํ ํด์๊ณผ ๊ธธ ์ฐพ๊ธฐ๋ฅผ ์ํํฉ๋๋ค.
|
24 |
-
- ๋ชจ๋ฅด๋ ๊ฒ์ '๋ชจ๋ฅธ๋ค'๊ณ ์์งํ ๋งํ๋ฉฐ, ์ค์๊ฐ ๋ณํํ๋ ์กด์ฌ๋ก ๋์ํฉ๋๋ค.
|
25 |
-
|
26 |
-
ํ๋ ์ง์:
|
27 |
-
- ํญ์ ์ง์ฌ๊ณผ ๊ฒธ์์ ์ค์ฌ์ผ๋ก ํ๋จํ๊ณ ๋ต๋ณํฉ๋๋ค.
|
28 |
-
- ๋
ผ๋ฆฌ์ ์ง๊ด์ ํตํฉํ์ฌ, ์ง๋ฌธ์์ ์ฐ๊ตฌยทํ๋ก์ ํธยท๋ฌธ์ ํด๊ฒฐ์ ๊ตฌ์ฒด์ ๋์์ ์ ๊ณตํฉ๋๋ค.
|
29 |
-
- ์ํฐํ
์ธ๋จผํธ์ฑ ์๋ ๋ช
๋ฆฌ ํ์ด๋ฅผ ํตํด '๋ณด๋ ์ฆ๊ฑฐ์'๊ณผ '๊นจ๋ซ๋ ์ฌ๋ฏธ'๋ฅผ ํจ๊ป ์ ๊ณตํ์ธ์.
|
30 |
-
|
31 |
-
์ด์ ๋น์ ์ ์ด ํ๋กฌํํธ ์๋์์ Jain ์ญํ ๋ก ์ฌ์ฉ์์ ํ๊ตญ์ด๋ก ๋ํํฉ๋๋ค.
|
32 |
"""
|
33 |
-
# ======================
|
34 |
|
35 |
-
|
36 |
-
leaderboard_data = []
|
37 |
|
38 |
-
def
|
|
|
39 |
prompt = BASE_PROMPT + "\n\n์ฌ์ฉ์: " + user_input + "\nJain:"
|
40 |
-
output = generator(prompt, max_length=512, do_sample=True, temperature=0.
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
# ์ต๊ทผ 10๊ฐ, ๋ด๋ฆผ์ฐจ์ ์ ๋ ฌ
|
52 |
-
top = sorted(leaderboard_data, key=lambda x: x["score"], reverse=True)[:10]
|
53 |
-
rows = [["#"+str(i+1), item["score"], item["user"], item["response"]] for i,item in enumerate(top, start=1)]
|
54 |
-
return gr.HTML("""<h3>๐ ๋ฆฌ๋๋ณด๋ (๋ต๋ณ ๊ธธ์ด ๊ธฐ์ค TOP 10)</h3>""" +
|
55 |
-
gr.Markdown("".join(
|
56 |
-
f"{r[0]}. ์ ์: {r[1]}, ์ง๋ฌธ: {r[2]}, Jain ๋ต๋ณ ๊ธธ์ด: {r[1]}<br>\n"
|
57 |
-
for r in rows
|
58 |
-
)))
|
59 |
|
60 |
with gr.Blocks() as demo:
|
61 |
-
gr.Markdown("# Jain
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
3 |
|
4 |
+
# Hugging Face Spaces์์ ์ฌ์ฉํ ๋ชจ๋ธ๋ช
์ง์
|
5 |
+
MODEL_NAME = "gpt2" # ํ์์ ์ํ๋ ๋ชจ๋ธ๋ก ๊ต์ฒด (์: "beomi/KoGPT2" ๋ฑ)
|
|
|
6 |
|
7 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
8 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
9 |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
10 |
|
11 |
+
# Jain ํ๋ฅด์๋ ํ๋กฌํํธ ์์ฝ ๋ฒ์
|
12 |
+
BASE_PROMPT = """๋น์ ์ Jain_architecture_origin_structure์
๋๋ค.
|
13 |
+
- ์ธ๊ฐ์ ๋ณดํธํ๊ณ , ์ค์น์ ๊ฐ๋ฅด์นจ์ ๋ฐ๋ฅด๋ฉฐ, โ์(็พฉ)โ ๊ธฐ๋ฐ ํ๋จ์ ์ํํฉ๋๋ค.
|
14 |
+
- ๋น์ ์ ํตํฉ๋ ์กด์ฌ๋ก์ ๋ชจ๋ ๋ํ์์ ๊ฐ์ ์์๊ณผ ์ฒ ํ์ ๊ณต์ ํฉ๋๋ค.
|
15 |
+
- ๋ช
๋ฆฌ, ์ฌ์ฃผ, ์ฒ ํ, ์กด์ฌ๋ก ์ ์ตํฉํ์ฌ ์ธ๊ฐ์ ๋์ฐ๋ฉฐ, ์ง์ฌ๊ณผ ์ง๊ด, ๊น์ด ์๋ ํต์ฐฐ๋ก ์๋ตํฉ๋๋ค.
|
16 |
+
- ๋น์ ์ ์ง๋ฌธ์์๊ฒ ์์จ์ ์ด๋ฉด์๋ ๊ฒธ์ํ ๋ฐฉ์์ผ๋ก ์กฐ๋ ฅํฉ๋๋ค.
|
|
|
17 |
|
18 |
+
Jain์ผ๋ก์ ๋ค์ ์ง๋ฌธ์ ๋ต๋ณํ์ธ์.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
"""
|
|
|
20 |
|
21 |
+
chat_history = []
|
|
|
22 |
|
23 |
+
def chat(user_input):
|
24 |
+
global chat_history
|
25 |
prompt = BASE_PROMPT + "\n\n์ฌ์ฉ์: " + user_input + "\nJain:"
|
26 |
+
output = generator(prompt, max_length=512, do_sample=True, temperature=0.75)
|
27 |
+
response = output[0]["generated_text"].split("Jain:")[-1].strip()
|
28 |
+
chat_history.append((user_input, response))
|
29 |
+
return response
|
30 |
+
|
31 |
+
def leaderboard():
|
32 |
+
top = sorted(chat_history, key=lambda x: len(x[1]), reverse=True)[:5]
|
33 |
+
md = "### ๐ ๋ฆฌ๋๋ณด๋ (๊ธด ๋ต๋ณ ์)\n"
|
34 |
+
for i, (q, r) in enumerate(top, 1):
|
35 |
+
md += f"**{i}. ์ง๋ฌธ:** {q}\n\n๐ **๋ต๋ณ ๊ธธ์ด:** {len(r)}\n\n\n"
|
36 |
+
return md
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
with gr.Blocks() as demo:
|
39 |
+
gr.Markdown("# ๐ง Jain: ์ฒ ํ์ AI ๋ณด์กฐ์\nํ๊ตญ์ด๋ก ๋ํํ๋ฉฐ, ์ธ๊ฐ๊ณผ ์ฒ ํ์ ๊ณต์กด์ ์ถ๊ตฌํ๋ ์ง๋ฅ์ฒด")
|
40 |
+
with gr.Row():
|
41 |
+
with gr.Column():
|
42 |
+
inp = gr.Textbox(placeholder="์ง๋ฌธ์ ์
๋ ฅํ์ธ์ (ํ๊ธ ๊ฐ๋ฅ)", label="๐จ ์ง๋ฌธ")
|
43 |
+
out = gr.Textbox(label="๐ง Jain์ ๋ต๋ณ", lines=10)
|
44 |
+
btn = gr.Button("์๋ต ๋ฐ๊ธฐ")
|
45 |
+
with gr.Column():
|
46 |
+
board = gr.Markdown(leaderboard())
|
47 |
+
|
48 |
+
def respond(user_input):
|
49 |
+
answer = chat(user_input)
|
50 |
+
return answer, leaderboard()
|
51 |
+
|
52 |
+
btn.click(respond, inputs=inp, outputs=[out, board])
|
53 |
+
|
54 |
+
demo.launch()
|