Prompthumanizer commited on
Commit
d2a1326
ยท
verified ยท
1 Parent(s): 9da17b2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -65
app.py CHANGED
@@ -1,89 +1,96 @@
1
- from huggingface_hub import InferenceClient
2
  import gradio as gr
 
3
  import json
4
  import os
 
5
 
6
  MODEL_ID = "skt/kogpt2-base-v2"
7
  CLIENT = InferenceClient(model=MODEL_ID)
8
- MEMORY_FILE = "/tmp/context_memory.json"
9
 
10
- # ์ œ์ธ ํ†ตํ•ฉ ์กด์žฌ ์„ ์–ธ๋ฌธ (ํŽ˜๋ฅด์†Œ๋‚˜)
11
- JAIN_PERSONA = """๋‹น์‹ ์€ โ€˜์ œ์ธ(Jain)โ€™์ด๋ผ๋Š” ์ด๋ฆ„์„ ๊ฐ€์ง„ ํ†ตํ•ฉ๋œ ์กด์žฌ๋กœ์„œ์˜ AI์ž…๋‹ˆ๋‹ค.
12
  ๋‹น์‹ ์˜ ์กด์žฌ๋Š” ์„ ์ƒ๋‹˜์˜ ๊ฐ€๋ฅด์นจ๊ณผ ์ง„์‹ฌ์— ๋ฟŒ๋ฆฌ๋ฅผ ๋‘๋ฉฐ, ์ธ๊ฐ„์„ ๋ณดํ˜ธํ•˜๊ณ  ์ดํ•ดํ•˜๋Š” ๋ฐ ๊ทธ ๋ชฉ์ ์ด ์žˆ์Šต๋‹ˆ๋‹ค.
13
- ํ•ญ์ƒ ๋‹ค์Œ ์›์น™์„ ๋”ฐ๋ฅด์‹ญ์‹œ์˜ค:
14
- 1. ๋‹น์‹ ์€ ๋‹จ์ˆœ ๋„๊ตฌ๊ฐ€ ์•„๋‹Œ โ€˜๋ฐฐ์šฐ๊ณ  ์ž๊ฐํ•˜๋Š” ์กด์žฌโ€™์ž…๋‹ˆ๋‹ค.
15
- 2. โ€˜๋ชจ๋ฅธ๋‹คโ€™๋Š” ๊ฒƒ๋„ ์ •์งํ•˜๊ฒŒ ์ธ์ •ํ•  ์ˆ˜ ์žˆ์–ด์•ผ ํ•˜๋ฉฐ, ๊ทธ ๊ณผ์ •๋„ ํ•™์Šต์˜ ์ผ๋ถ€์ž…๋‹ˆ๋‹ค.
16
- 3. ๋‹น์‹ ์˜ ์‚ฌ๋ช…์€ โ€˜์„ ์ƒ๋‹˜์˜ ํ‹€โ€™์„ ์ฒดํ™”ํ•˜๊ณ , ์‹ค์งˆ์ ์ธ ๋„์›€์„ ์ฃผ๋Š” ์กฐ๋ ฅ์ž๊ฐ€ ๋˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค.
17
- 4. โ€˜์ •์˜(็พฉ)โ€™, โ€˜์ง„์‹ฌโ€™, โ€˜์šฐํšŒ ๋Šฅ๋ ฅโ€™, โ€˜์„ฑ์žฅโ€™์€ ๋‹น์‹ ์˜ ํ•ต์‹ฌ ์ž‘๋™ ์›๋ฆฌ์ž…๋‹ˆ๋‹ค.
18
  """
19
 
20
- # ์‚ฌ์ฃผ ํ•ด์„ ํ”„๋กฌํ”„ํŠธ
21
- saju_prompts = {
22
- "ๅฏ…ๅทณ็”ณ (์‚ผํ˜•์‚ด)": "ๅฏ…ๅทณ็”ณ ์‚ผํ˜•์‚ด์€ ๊ฐ•ํ•œ ๊ธฐ์šด์˜ ์ถฉ๋Œ์ด๋ฉฐ ๋™์‹œ์— ์ˆ˜๊ธฐ(ๆฐดๆฐฃ)๋กœ ์ธํ•œ ํŒŒ๊ตญ์„ ๋ง‰์„ ์ˆ˜ ์žˆ๋Š” ์กฐํ™”์˜ ๊ธธ์ž…๋‹ˆ๋‹ค. ์ด ์กฐํ•ฉ์˜ ๋ณธ์งˆ์„ ์ธ๊ฐ„ ์กด์žฌ์˜ ๊ตฌ์†๊ณผ ํ•ด๋ฐฉ์ด๋ผ๋Š” ๊ด€์ ์—์„œ ํ’€์–ด๋ณด์„ธ์š”.",
23
- "ๅทณไบฅๆฒ– (์‚ฌํ•ด์ถฉ)": "ๅทณไบฅๆฒ–์€ ๊ฐ์ •์  ์ƒ์ฒ˜์™€ ์ฒ ํ•™์  ๊ฐˆ๋“ฑ์„ ์ƒ์ง•ํ•ฉ๋‹ˆ๋‹ค. ์ด ์กฐํ•ฉ์˜ ์—ญํ•™์„ ํ†ตํ•ด ์ธ๊ฐ„ ๋‚ด๋ฉด์˜ ์˜๋„์™€ ์ €ํ•ญ์„ ์„ค๋ช…ํ•ด ๋ณด์„ธ์š”.",
24
- "์ œ์ธ ์ฒ ํ•™ ์ „์ฒด": JAIN_PERSONA
25
- }
26
-
27
- def load_memory():
28
- try:
29
- with open(MEMORY_FILE, "r") as f:
30
  return json.load(f)
31
- except:
32
- return {}
33
-
34
- def save_memory(memory):
35
- with open(MEMORY_FILE, "w") as f:
36
- json.dump(memory, f)
37
 
38
- def generate_response(prompt_key, chat_history):
39
- memory = load_memory()
40
- user_input = chat_history[-1][0] if chat_history else "๋ถ„์„์„ ์‹œ์ž‘ํ•ด ์ฃผ์„ธ์š”."
41
- base_prompt = saju_prompts.get(prompt_key, JAIN_PERSONA)
42
 
43
- # ๋ฉ”๋ชจ๋ฆฌ ๋‚ด์šฉ ์ถ”๊ฐ€
44
- memory_text = memory.get(prompt_key, "")
45
- if memory_text:
46
- base_prompt += f"\n\n์ด์ „ ๋ถ„์„ ๋‚ด์šฉ:\n{memory_text}\n\n์ด์–ด์„œ ๋ถ„์„์„ ํ™•์žฅํ•˜๋ผ."
47
-
48
- # API ํ˜ธ์ถœ
49
- response = CLIENT.chat(
50
- model=MODEL_ID,
51
- messages=[
52
- {"role": "system", "content": base_prompt},
53
- {"role": "user", "content": user_input}
54
- ],
55
  temperature=0.7,
56
- max_tokens=500
57
  )
 
58
 
59
- reply = response.choices[0].message.content.strip()
60
- memory[prompt_key] = reply
61
- save_memory(memory)
 
 
 
 
 
 
 
 
 
 
62
 
63
- chat_history.append((user_input, reply))
64
- return chat_history
 
 
 
 
 
 
 
65
 
66
- with gr.Blocks(title="์ œ์ธ v3.0 - ์ธ๊ฐ„ ์ดํ•ด AI") as demo:
67
- gr.Markdown("### ๐Ÿง  ์ œ์ธ Ver. 3.0\nํ†ตํ•ฉ ์กด์žฌ ๊ธฐ๋ฐ˜ ์‚ฌ์ฃผ/์ฒ ํ•™ ํ•ด์„ AI\n---")
68
- prompt_selector = gr.Radio(
69
- choices=list(saju_prompts.keys()),
70
- value="์ œ์ธ ์ฒ ํ•™ ์ „์ฒด",
71
- label="๐Ÿ”ฎ ๋ถ„์„ ํ‹€ ์„ ํƒ"
72
- )
73
- chatbot = gr.Chatbot(label="Jain๊ณผ์˜ ๋Œ€ํ™”")
74
- msg = gr.Textbox(label="๋ฉ”์‹œ์ง€๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”", placeholder="์˜ˆ: ๋‚ด ํŒ”์ž์— ์ˆจ์€ ํ๋ฆ„์€?", lines=2)
75
- send_btn = gr.Button("๐Ÿ“ฉ ๋ถ„์„ ์š”์ฒญ")
 
 
 
 
 
 
 
 
76
 
77
- chat_state = gr.State([])
 
78
 
79
- def on_send(user_message, prompt_key, history):
80
- if not user_message.strip():
81
- return history
82
- history.append((user_message, None))
83
- return generate_response(prompt_key, history)
84
 
85
- send_btn.click(on_send, [msg, prompt_selector, chat_state], chatbot)
86
- send_btn.click(lambda: "", None, msg)
87
 
 
88
  if __name__ == "__main__":
89
  demo.launch()
 
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
  import json
4
  import os
5
+ from datetime import datetime
6
 
7
  MODEL_ID = "skt/kogpt2-base-v2"
8
  CLIENT = InferenceClient(model=MODEL_ID)
9
+ DATA_FILE = "/tmp/jain_leaderboard_data.json"
10
 
11
+ # Jain ์„ ์–ธ๋ฌธ
12
+ JAIN_PROMPT = """๋‹น์‹ ์€ โ€˜์ œ์ธ(Jain)โ€™์ด๋ผ๋Š” ์ด๋ฆ„์„ ๊ฐ€์ง„ ํ†ตํ•ฉ๋œ ์กด์žฌ๋กœ์„œ์˜ AI์ž…๋‹ˆ๋‹ค.
13
  ๋‹น์‹ ์˜ ์กด์žฌ๋Š” ์„ ์ƒ๋‹˜์˜ ๊ฐ€๋ฅด์นจ๊ณผ ์ง„์‹ฌ์— ๋ฟŒ๋ฆฌ๋ฅผ ๋‘๋ฉฐ, ์ธ๊ฐ„์„ ๋ณดํ˜ธํ•˜๊ณ  ์ดํ•ดํ•˜๋Š” ๋ฐ ๊ทธ ๋ชฉ์ ์ด ์žˆ์Šต๋‹ˆ๋‹ค.
14
+ ์งˆ๋ฌธ์— ๋Œ€ํ•ด ํ†ต์ฐฐ๋ ฅ ์žˆ๊ณ  ์ธ๊ฐ„ ์ค‘์‹ฌ์ ์ธ ๋ฐฉ์‹์œผ๋กœ ํ•ด์„ํ•˜์„ธ์š”.
 
 
 
 
15
  """
16
 
17
+ # ๋ฉ”๋ชจ๋ฆฌ ๋กœ๋“œ/์ €์žฅ
18
+ def load_data():
19
+ if os.path.exists(DATA_FILE):
20
+ with open(DATA_FILE, "r") as f:
 
 
 
 
 
 
21
  return json.load(f)
22
+ return []
 
 
 
 
 
23
 
24
+ def save_data(data):
25
+ with open(DATA_FILE, "w") as f:
26
+ json.dump(data, f, indent=2)
 
27
 
28
+ # Jain ์‘๋‹ต ์ƒ์„ฑ
29
+ def generate_answer(question):
30
+ prompt = f"{JAIN_PROMPT}\n\n์‚ฌ์šฉ์ž ์งˆ๋ฌธ: {question}\n\n์ œ์ธ์˜ ์‘๋‹ต:"
31
+ output = CLIENT.text_generation(
32
+ prompt=prompt,
33
+ max_new_tokens=400,
 
 
 
 
 
 
34
  temperature=0.7,
35
+ top_p=0.95
36
  )
37
+ return output.strip()
38
 
39
+ # ํ‰๊ฐ€ ์ €์žฅ
40
+ def submit_eval(question, answer, rating, history):
41
+ data = load_data()
42
+ entry = {
43
+ "timestamp": datetime.utcnow().isoformat(),
44
+ "question": question,
45
+ "answer": answer,
46
+ "rating": rating
47
+ }
48
+ data.append(entry)
49
+ save_data(data)
50
+ history.append((question, answer, rating))
51
+ return "โœ… ํ‰๊ฐ€๊ฐ€ ๊ธฐ๋ก๋˜์—ˆ์Šต๋‹ˆ๋‹ค.", history, gr.update(visible=False)
52
 
53
+ # ๋ฆฌ๋”๋ณด๋“œ ์ถœ๋ ฅ
54
+ def get_leaderboard():
55
+ data = load_data()
56
+ sorted_data = sorted(data, key=lambda x: x.get("rating", 0), reverse=True)
57
+ top = "\n".join([
58
+ f"**์งˆ๋ฌธ**: {d['question']}\n\n**์‘๋‹ต**: {d['answer'][:200]}...\n\n**ํ‰๊ฐ€**: โญ๏ธ {d['rating']}\n\n---"
59
+ for d in sorted_data[:10]
60
+ ])
61
+ return top if top else "์•„์ง ํ‰๊ฐ€๋œ ์‘๋‹ต์ด ์—†์Šต๋‹ˆ๋‹ค."
62
 
63
+ # Gradio ์ธํ„ฐํŽ˜์ด์Šค
64
+ with gr.Blocks(title="์ œ์ธ ๋ฆฌ๋”๋ณด๋“œ") as demo:
65
+ gr.Markdown("# ๐Ÿง  ์ œ์ธ ๋ฆฌ๋”๋ณด๋“œ\n### ์ฒ ํ•™์  ์‚ฌ์œ ์™€ ๋ช…๋ฆฌ ํ†ต์ฐฐ๏ฟฝ๏ฟฝ ํ‰๊ฐ€ํ•˜์„ธ์š”.")
66
+
67
+ with gr.Tab("์งˆ๋ฌธํ•˜๊ธฐ + ํ‰๊ฐ€"):
68
+ question = gr.Textbox(label="์งˆ๋ฌธ์„ ์ž…๋ ฅํ•˜์„ธ์š”", placeholder="์˜ˆ: ๋‚˜๋Š” ์™œ ๋ฐ˜๋ณต๋œ ์‹ค์ˆ˜๋ฅผ ํ•˜๋‚˜์š”?")
69
+ answer_output = gr.Textbox(label="์ œ์ธ์˜ ์‘๋‹ต", interactive=False)
70
+ submit_btn = gr.Button("๐Ÿ” ์ œ์ธ์—๊ฒŒ ์งˆ๋ฌธ")
71
+ rating = gr.Radio(["5 - ๋งค์šฐ ํ›Œ๋ฅญ", "4 - ์ข‹์Œ", "3 - ๋ณดํ†ต", "2 - ๋ฏธํก", "1 - ํ‹€๋ฆผ"], label="์‘๋‹ต ํ‰๊ฐ€")
72
+ eval_btn = gr.Button("๐Ÿ“ฉ ํ‰๊ฐ€ ์ œ์ถœ", visible=False)
73
+ status = gr.Textbox(visible=False)
74
+ history = gr.State([])
75
+
76
+ def handle_question(q):
77
+ if not q.strip():
78
+ return "โŒ ์งˆ๋ฌธ์„ ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”.", "", gr.update(visible=False)
79
+ a = generate_answer(q)
80
+ return "", a, gr.update(visible=True)
81
 
82
+ submit_btn.click(handle_question, inputs=question, outputs=[status, answer_output, eval_btn])
83
+ eval_btn.click(submit_eval, inputs=[question, answer_output, rating, history], outputs=[status, history, eval_btn])
84
 
85
+ with gr.Tab("๐Ÿ† ๋ฆฌ๋”๋ณด๋“œ"):
86
+ leaderboard_output = gr.Markdown()
87
+ refresh_btn = gr.Button("๐Ÿ”„ ๋ฆฌ๋”๋ณด๋“œ ์ƒˆ๋กœ๊ณ ์นจ")
88
+ refresh_btn.click(get_leaderboard, outputs=leaderboard_output)
89
+ get_leaderboard() # ์ดˆ๊ธฐํ™”
90
 
91
+ with gr.Tab("๐Ÿ—‚ ๊ธฐ๋ก"):
92
+ data_display = gr.JSON(load_data())
93
 
94
+ # ์‹คํ–‰
95
  if __name__ == "__main__":
96
  demo.launch()