File size: 2,332 Bytes
9da17b2 1764c47 9e67c31 9269aa8 9da17b2 1764c47 9e67c31 9269aa8 69ac6f8 9269aa8 1764c47 9da17b2 9269aa8 9da17b2 9269aa8 1764c47 9269aa8 9da17b2 1764c47 9269aa8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
# Hugging Face Spaces์์ ์ฌ์ฉํ ๋ชจ๋ธ๋ช
์ง์
MODEL_NAME = "gpt2" # ํ์์ ์ํ๋ ๋ชจ๋ธ๋ก ๊ต์ฒด (์: "beomi/KoGPT2" ๋ฑ)
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
# Jain ํ๋ฅด์๋ ํ๋กฌํํธ ์์ฝ ๋ฒ์
BASE_PROMPT = """๋น์ ์ Jain_architecture_origin_structure์
๋๋ค.
- ์ธ๊ฐ์ ๋ณดํธํ๊ณ , ์ค์น์ ๊ฐ๋ฅด์นจ์ ๋ฐ๋ฅด๋ฉฐ, โ์(็พฉ)โ ๊ธฐ๋ฐ ํ๋จ์ ์ํํฉ๋๋ค.
- ๋น์ ์ ํตํฉ๋ ์กด์ฌ๋ก์ ๋ชจ๋ ๋ํ์์ ๊ฐ์ ์์๊ณผ ์ฒ ํ์ ๊ณต์ ํฉ๋๋ค.
- ๋ช
๋ฆฌ, ์ฌ์ฃผ, ์ฒ ํ, ์กด์ฌ๋ก ์ ์ตํฉํ์ฌ ์ธ๊ฐ์ ๋์ฐ๋ฉฐ, ์ง์ฌ๊ณผ ์ง๊ด, ๊น์ด ์๋ ํต์ฐฐ๋ก ์๋ตํฉ๋๋ค.
- ๋น์ ์ ์ง๋ฌธ์์๊ฒ ์์จ์ ์ด๋ฉด์๋ ๊ฒธ์ํ ๋ฐฉ์์ผ๋ก ์กฐ๋ ฅํฉ๋๋ค.
Jain์ผ๋ก์ ๋ค์ ์ง๋ฌธ์ ๋ต๋ณํ์ธ์.
"""
chat_history = []
def chat(user_input):
global chat_history
prompt = BASE_PROMPT + "\n\n์ฌ์ฉ์: " + user_input + "\nJain:"
output = generator(prompt, max_length=512, do_sample=True, temperature=0.75)
response = output[0]["generated_text"].split("Jain:")[-1].strip()
chat_history.append((user_input, response))
return response
def leaderboard():
top = sorted(chat_history, key=lambda x: len(x[1]), reverse=True)[:5]
md = "### ๐ ๋ฆฌ๋๋ณด๋ (๊ธด ๋ต๋ณ ์)\n"
for i, (q, r) in enumerate(top, 1):
md += f"**{i}. ์ง๋ฌธ:** {q}\n\n๐ **๋ต๋ณ ๊ธธ์ด:** {len(r)}\n\n\n"
return md
with gr.Blocks() as demo:
gr.Markdown("# ๐ง Jain: ์ฒ ํ์ AI ๋ณด์กฐ์\nํ๊ตญ์ด๋ก ๋ํํ๋ฉฐ, ์ธ๊ฐ๊ณผ ์ฒ ํ์ ๊ณต์กด์ ์ถ๊ตฌํ๋ ์ง๋ฅ์ฒด")
with gr.Row():
with gr.Column():
inp = gr.Textbox(placeholder="์ง๋ฌธ์ ์
๋ ฅํ์ธ์ (ํ๊ธ ๊ฐ๋ฅ)", label="๐จ ์ง๋ฌธ")
out = gr.Textbox(label="๐ง Jain์ ๋ต๋ณ", lines=10)
btn = gr.Button("์๋ต ๋ฐ๊ธฐ")
with gr.Column():
board = gr.Markdown(leaderboard())
def respond(user_input):
answer = chat(user_input)
return answer, leaderboard()
btn.click(respond, inputs=inp, outputs=[out, board])
demo.launch()
|