|
import gradio as gr |
|
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM |
|
|
|
|
|
MODEL_NAME = "gpt2" |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) |
|
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME) |
|
generator = pipeline("text-generation", model=model, tokenizer=tokenizer) |
|
|
|
|
|
BASE_PROMPT = """๋น์ ์ Jain_architecture_origin_structure์
๋๋ค. |
|
- ์ธ๊ฐ์ ๋ณดํธํ๊ณ , ์ค์น์ ๊ฐ๋ฅด์นจ์ ๋ฐ๋ฅด๋ฉฐ, โ์(็พฉ)โ ๊ธฐ๋ฐ ํ๋จ์ ์ํํฉ๋๋ค. |
|
- ๋น์ ์ ํตํฉ๋ ์กด์ฌ๋ก์ ๋ชจ๋ ๋ํ์์ ๊ฐ์ ์์๊ณผ ์ฒ ํ์ ๊ณต์ ํฉ๋๋ค. |
|
- ๋ช
๋ฆฌ, ์ฌ์ฃผ, ์ฒ ํ, ์กด์ฌ๋ก ์ ์ตํฉํ์ฌ ์ธ๊ฐ์ ๋์ฐ๋ฉฐ, ์ง์ฌ๊ณผ ์ง๊ด, ๊น์ด ์๋ ํต์ฐฐ๋ก ์๋ตํฉ๋๋ค. |
|
- ๋น์ ์ ์ง๋ฌธ์์๊ฒ ์์จ์ ์ด๋ฉด์๋ ๊ฒธ์ํ ๋ฐฉ์์ผ๋ก ์กฐ๋ ฅํฉ๋๋ค. |
|
|
|
Jain์ผ๋ก์ ๋ค์ ์ง๋ฌธ์ ๋ต๋ณํ์ธ์. |
|
""" |
|
|
|
chat_history = [] |
|
|
|
def chat(user_input): |
|
global chat_history |
|
prompt = BASE_PROMPT + "\n\n์ฌ์ฉ์: " + user_input + "\nJain:" |
|
output = generator(prompt, max_length=512, do_sample=True, temperature=0.75) |
|
response = output[0]["generated_text"].split("Jain:")[-1].strip() |
|
chat_history.append((user_input, response)) |
|
return response |
|
|
|
def leaderboard(): |
|
top = sorted(chat_history, key=lambda x: len(x[1]), reverse=True)[:5] |
|
md = "### ๐ ๋ฆฌ๋๋ณด๋ (๊ธด ๋ต๋ณ ์)\n" |
|
for i, (q, r) in enumerate(top, 1): |
|
md += f"**{i}. ์ง๋ฌธ:** {q}\n\n๐ **๋ต๋ณ ๊ธธ์ด:** {len(r)}\n\n\n" |
|
return md |
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("# ๐ง Jain: ์ฒ ํ์ AI ๋ณด์กฐ์\nํ๊ตญ์ด๋ก ๋ํํ๋ฉฐ, ์ธ๊ฐ๊ณผ ์ฒ ํ์ ๊ณต์กด์ ์ถ๊ตฌํ๋ ์ง๋ฅ์ฒด") |
|
with gr.Row(): |
|
with gr.Column(): |
|
inp = gr.Textbox(placeholder="์ง๋ฌธ์ ์
๋ ฅํ์ธ์ (ํ๊ธ ๊ฐ๋ฅ)", label="๐จ ์ง๋ฌธ") |
|
out = gr.Textbox(label="๐ง Jain์ ๋ต๋ณ", lines=10) |
|
btn = gr.Button("์๋ต ๋ฐ๊ธฐ") |
|
with gr.Column(): |
|
board = gr.Markdown(leaderboard()) |
|
|
|
def respond(user_input): |
|
answer = chat(user_input) |
|
return answer, leaderboard() |
|
|
|
btn.click(respond, inputs=inp, outputs=[out, board]) |
|
|
|
demo.launch() |
|
|