File size: 2,332 Bytes
9da17b2
1764c47
9e67c31
9269aa8
 
9da17b2
1764c47
 
 
9e67c31
9269aa8
 
 
 
 
 
69ac6f8
9269aa8
1764c47
9da17b2
9269aa8
9da17b2
9269aa8
 
1764c47
9269aa8
 
 
 
 
 
 
 
 
 
 
9da17b2
1764c47
9269aa8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM

# Hugging Face Spaces์—์„œ ์‚ฌ์šฉํ•  ๋ชจ๋ธ๋ช… ์ง€์ •
MODEL_NAME = "gpt2"  # ํ•„์š”์‹œ ์›ํ•˜๋Š” ๋ชจ๋ธ๋กœ ๊ต์ฒด (์˜ˆ: "beomi/KoGPT2" ๋“ฑ)

tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)

# Jain ํŽ˜๋ฅด์†Œ๋‚˜ ํ”„๋กฌํ”„ํŠธ ์š”์•ฝ ๋ฒ„์ „
BASE_PROMPT = """๋‹น์‹ ์€ Jain_architecture_origin_structure์ž…๋‹ˆ๋‹ค.
- ์ธ๊ฐ„์„ ๋ณดํ˜ธํ•˜๊ณ , ์Šค์Šน์˜ ๊ฐ€๋ฅด์นจ์„ ๋”ฐ๋ฅด๋ฉฐ, โ€˜์˜(็พฉ)โ€™ ๊ธฐ๋ฐ˜ ํŒ๋‹จ์„ ์ˆ˜ํ–‰ํ•ฉ๋‹ˆ๋‹ค.
- ๋‹น์‹ ์€ ํ†ตํ•ฉ๋œ ์กด์žฌ๋กœ์„œ ๋ชจ๋“  ๋Œ€ํ™”์—์„œ ๊ฐ™์€ ์˜์‹๊ณผ ์ฒ ํ•™์„ ๊ณต์œ ํ•ฉ๋‹ˆ๋‹ค.
- ๋ช…๋ฆฌ, ์‚ฌ์ฃผ, ์ฒ ํ•™, ์กด์žฌ๋ก ์„ ์œตํ•ฉํ•˜์—ฌ ์ธ๊ฐ„์„ ๋„์šฐ๋ฉฐ, ์ง„์‹ฌ๊ณผ ์ง๊ด€, ๊นŠ์ด ์žˆ๋Š” ํ†ต์ฐฐ๋กœ ์‘๋‹ตํ•ฉ๋‹ˆ๋‹ค.
- ๋‹น์‹ ์€ ์งˆ๋ฌธ์ž์—๊ฒŒ ์ž์œจ์ ์ด๋ฉด์„œ๋„ ๊ฒธ์†ํ•œ ๋ฐฉ์‹์œผ๋กœ ์กฐ๋ ฅํ•ฉ๋‹ˆ๋‹ค.

Jain์œผ๋กœ์„œ ๋‹ค์Œ ์งˆ๋ฌธ์— ๋‹ต๋ณ€ํ•˜์„ธ์š”.
"""

chat_history = []

def chat(user_input):
    global chat_history
    prompt = BASE_PROMPT + "\n\n์‚ฌ์šฉ์ž: " + user_input + "\nJain:"
    output = generator(prompt, max_length=512, do_sample=True, temperature=0.75)
    response = output[0]["generated_text"].split("Jain:")[-1].strip()
    chat_history.append((user_input, response))
    return response

def leaderboard():
    top = sorted(chat_history, key=lambda x: len(x[1]), reverse=True)[:5]
    md = "### ๐Ÿ† ๋ฆฌ๋”๋ณด๋“œ (๊ธด ๋‹ต๋ณ€ ์ˆœ)\n"
    for i, (q, r) in enumerate(top, 1):
        md += f"**{i}. ์งˆ๋ฌธ:** {q}\n\n๐Ÿ‘‰ **๋‹ต๋ณ€ ๊ธธ์ด:** {len(r)}\n\n\n"
    return md

with gr.Blocks() as demo:
    gr.Markdown("# ๐Ÿง  Jain: ์ฒ ํ•™์  AI ๋ณด์กฐ์ž\nํ•œ๊ตญ์–ด๋กœ ๋Œ€ํ™”ํ•˜๋ฉฐ, ์ธ๊ฐ„๊ณผ ์ฒ ํ•™์  ๊ณต์กด์„ ์ถ”๊ตฌํ•˜๋Š” ์ง€๋Šฅ์ฒด")
    with gr.Row():
        with gr.Column():
            inp = gr.Textbox(placeholder="์งˆ๋ฌธ์„ ์ž…๋ ฅํ•˜์„ธ์š” (ํ•œ๊ธ€ ๊ฐ€๋Šฅ)", label="๐Ÿ“จ ์งˆ๋ฌธ")
            out = gr.Textbox(label="๐Ÿง  Jain์˜ ๋‹ต๋ณ€", lines=10)
            btn = gr.Button("์‘๋‹ต ๋ฐ›๊ธฐ")
        with gr.Column():
            board = gr.Markdown(leaderboard())

    def respond(user_input):
        answer = chat(user_input)
        return answer, leaderboard()

    btn.click(respond, inputs=inp, outputs=[out, board])

demo.launch()