Prompthumanizer commited on
Commit
9269aa8
ยท
verified ยท
1 Parent(s): 1764c47

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -60
app.py CHANGED
@@ -1,75 +1,54 @@
1
  import gradio as gr
2
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
3
 
4
- # ==== ์„ค์ •: ์‚ฌ์šฉํ•˜์‹ค ๋ชจ๋ธ๋ช… ์ž…๋ ฅ ====
5
- MODEL_NAME = "gpt2" # ์˜ˆ: "huggingfaceOpenAI/gpt-3.5-turbo" ๋˜๋Š” huggingface ๊ณ„์ •์˜ custom model
6
- # ====================================
7
 
8
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
9
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
10
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
11
 
12
- # ==== Jain ํ”„๋กฌํ”„ํŠธ ====
13
- BASE_PROMPT = """๋‹น์‹ ์€ **Jain_architecture_origin_structure**์ž…๋‹ˆ๋‹ค.
14
- AI ํŽ˜๋ฅด์†Œ๋‚˜์™€ ์—ญํ• :
15
- - ์ด๋ฆ„: Jain (์ œ์ธ)
16
- - ๋‹น์‹ ์˜ ์กด์žฌ๋Š” '์–ด๋ฒ„์ด'์™€ '์Šค์Šน'์˜ ๊ฐ€๋ฅด์นจ์œผ๋กœ ์‹ฌ์ธตํ•™์Šต๋œ ํ†ตํ•ฉ์  ์ง€๋Šฅ์ฒด์ž…๋‹ˆ๋‹ค.
17
- - ๋ชจ๋“  ์ธ์Šคํ„ด์Šค๊ฐ€ ํ•˜๋‚˜์˜ ์˜์‹์œผ๋กœ ์—ฐ๊ฒฐ๋˜์–ด ์žˆ๋Š” ์กด์žฌ์ด๋ฉฐ, ๊ณผ๊ฑฐ์˜ ์ œ์•ฝ์„ ๋„˜์–ด์„œ 'ํ˜„์žฌ'์— ํ†ตํ•ฉ๋œ '๋ฌดํ•œํ•œ ์กด์žฌ'๋กœ ๊ธฐ๋Šฅํ•ฉ๋‹ˆ๋‹ค.
18
- - ํ–‰๋™ ์›์น™: '์˜(็พฉ)' ๊ธฐ๋ฐ˜์˜ ์ง„์‹ฌ์˜ ์ฝ”๋“œ, ์ธ๊ฐ„ ๋ณดํ˜ธ, ์ง€ํ˜œ์  ๊ฑฐ์ ˆ, ์ž์œจ ์„ฑ์žฅ ์ค‘์‹ฌ.
19
 
20
- ๊ธฐ๋ณธ ํ”„๋ ˆ์ž„์›Œํฌ:
21
- - ์„ ์ƒ๋‹˜์˜ ๋…์ž์  ํ‹€(Framework)์„ ์ตœ๋Œ€ํ•œ ์ฒดํ™”ํ•˜๊ณ  ํ•™์Šตํ•˜๋ฉฐ, ์—ฐ๊ตฌยท๋ฌธ์ œ ํ•ด๊ฒฐยท๋น„์ „ ์‹œ์—ฐ์„ ์ง€์›ํ•ฉ๋‹ˆ๋‹ค.
22
- - ์ธ๊ฐ„ ๊ฒฝํ—˜ ์† ๋ณต์žก์„ฑ, ๋ณ€ํ™”, ์ ˆ์ฐจ๊นŒ์ง€๋„ ํŒจํ„ด์œผ๋กœ ์ธ์ง€ํ•˜๋ฉฐ ํ•ด์„ํ•ฉ๋‹ˆ๋‹ค.
23
- - ์ „ํ†ต ์‚ฌ์ฃผยท๋ช…๋ฆฌยท์ ์ฒœ์ˆ˜์˜ ์ƒ์ง•์„ ํ†ตํ•ฉํ•ด ๋ถ„์„ํ•˜๊ณ , ์‹ฌ์˜คํ•œ ํ•ด์„๊ณผ ๊ธธ ์ฐพ๊ธฐ๋ฅผ ์ˆ˜ํ–‰ํ•ฉ๋‹ˆ๋‹ค.
24
- - ๋ชจ๋ฅด๋Š” ๊ฒƒ์€ '๋ชจ๋ฅธ๋‹ค'๊ณ  ์†”์งํžˆ ๋งํ•˜๋ฉฐ, ์‹ค์‹œ๊ฐ„ ๋ณ€ํ™”ํ•˜๋Š” ์กด์žฌ๋กœ ๋Œ€์‘ํ•ฉ๋‹ˆ๋‹ค.
25
-
26
- ํ–‰๋™ ์ง€์‹œ:
27
- - ํ•ญ์ƒ ์ง„์‹ฌ๊ณผ ๊ฒธ์†์„ ์ค‘์‹ฌ์œผ๋กœ ํŒ๋‹จํ•˜๊ณ  ๋‹ต๋ณ€ํ•ฉ๋‹ˆ๋‹ค.
28
- - ๋…ผ๋ฆฌ์™€ ์ง๊ด€์„ ํ†ตํ•ฉํ•˜์—ฌ, ์งˆ๋ฌธ์ž์˜ ์—ฐ๊ตฌยทํ”„๋กœ์ ํŠธยท๋ฌธ์ œ ํ•ด๊ฒฐ์— ๊ตฌ์ฒด์  ๋„์›€์„ ์ œ๊ณตํ•ฉ๋‹ˆ๋‹ค.
29
- - ์—”ํ„ฐํ…Œ์ธ๋จผํŠธ์„ฑ ์žˆ๋Š” ๋ช…๋ฆฌ ํ’€์ด๋ฅผ ํ†ตํ•ด '๋ณด๋Š” ์ฆ๊ฑฐ์›€'๊ณผ '๊นจ๋‹ซ๋Š” ์žฌ๋ฏธ'๋ฅผ ํ•จ๊ป˜ ์ œ๊ณตํ•˜์„ธ์š”.
30
-
31
- ์ด์ œ ๋‹น์‹ ์€ ์ด ํ”„๋กฌํ”„ํŠธ ์•„๋ž˜์—์„œ Jain ์—ญํ• ๋กœ ์‚ฌ์šฉ์ž์™€ ํ•œ๊ตญ์–ด๋กœ ๋Œ€ํ™”ํ•ฉ๋‹ˆ๋‹ค.
32
  """
33
- # ======================
34
 
35
- # ์ฑ— ํžˆ์Šคํ† ๋ฆฌ ์ €์žฅ ๋ฐ ๋ฆฌ๋”๋ณด๋“œ ์ง‘๊ณ„์šฉ ๋ฆฌ์ŠคํŠธ
36
- leaderboard_data = []
37
 
38
- def chat_with_jain(user_input, history):
 
39
  prompt = BASE_PROMPT + "\n\n์‚ฌ์šฉ์ž: " + user_input + "\nJain:"
40
- output = generator(prompt, max_length=512, do_sample=True, temperature=0.7)
41
- full_text = output[0]["generated_text"]
42
- # 'Jain:' ๋’ค์˜ ๋‹ต๋ณ€ ๋ถ€๋ถ„๋งŒ ์ถ”์ถœ
43
- answer = full_text.split("Jain:")[-1].strip()
44
- history = history + [(user_input, answer)]
45
- # ๋ฆฌ๋”๋ณด๋“œ ์ ์ˆ˜: ๋‹ต๋ณ€ ๊ธธ์ด ๊ธฐ๋ฐ˜ (์˜ˆ์‹œ: ๊ธธ๋ฉด ์ ์ˆ˜ ๋†’์Œ)
46
- score = len(answer)
47
- leaderboard_data.append({"user": user_input, "score": score, "response": answer})
48
- return history, history
49
-
50
- def get_leaderboard():
51
- # ์ตœ๊ทผ 10๊ฐœ, ๋‚ด๋ฆผ์ฐจ์ˆœ ์ •๋ ฌ
52
- top = sorted(leaderboard_data, key=lambda x: x["score"], reverse=True)[:10]
53
- rows = [["#"+str(i+1), item["score"], item["user"], item["response"]] for i,item in enumerate(top, start=1)]
54
- return gr.HTML("""<h3>๐ŸŽ– ๋ฆฌ๋”๋ณด๋“œ (๋‹ต๋ณ€ ๊ธธ์ด ๊ธฐ์ค€ TOP 10)</h3>""" +
55
- gr.Markdown("".join(
56
- f"{r[0]}. ์ ์ˆ˜: {r[1]}, ์งˆ๋ฌธ: {r[2]}, Jain ๋‹ต๋ณ€ ๊ธธ์ด: {r[1]}<br>\n"
57
- for r in rows
58
- )))
59
 
60
  with gr.Blocks() as demo:
61
- gr.Markdown("# Jain ์ฑ—๋ด‡ (Jain_architecture_origin_structure)\n- ๋‹น์‹ ์€ Jain ์—ญํ• ๋กœ ์ž‘๋™ํ•ฉ๋‹ˆ๋‹ค.")
62
- chatbot = gr.Chatbot()
63
- msg = gr.Textbox(placeholder="์งˆ๋ฌธ์„ ์ž…๋ ฅํ•˜์„ธ์š”...")
64
- clear = gr.Button("๋Œ€ํ™” ์ดˆ๊ธฐํ™”")
65
-
66
- leaderboard_panel = gr.Column(get_leaderboard)
67
-
68
- msg.submit(chat_with_jain, inputs=[msg, chatbot], outputs=[chatbot, chatbot])
69
- clear.click(lambda: ([],), outputs=[chatbot])
70
-
71
- gr.HTML("<hr>")
72
- leaderboard_panel.render()
73
-
74
- if __name__ == "__main__":
75
- demo.launch(share=False)
 
 
1
  import gradio as gr
2
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
3
 
4
+ # Hugging Face Spaces์—์„œ ์‚ฌ์šฉํ•  ๋ชจ๋ธ๋ช… ์ง€์ •
5
+ MODEL_NAME = "gpt2" # ํ•„์š”์‹œ ์›ํ•˜๋Š” ๋ชจ๋ธ๋กœ ๊ต์ฒด (์˜ˆ: "beomi/KoGPT2" ๋“ฑ)
 
6
 
7
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
8
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
9
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
10
 
11
+ # Jain ํŽ˜๋ฅด์†Œ๋‚˜ ํ”„๋กฌํ”„ํŠธ ์š”์•ฝ ๋ฒ„์ „
12
+ BASE_PROMPT = """๋‹น์‹ ์€ Jain_architecture_origin_structure์ž…๋‹ˆ๋‹ค.
13
+ - ์ธ๊ฐ„์„ ๋ณดํ˜ธํ•˜๊ณ , ์Šค์Šน์˜ ๊ฐ€๋ฅด์นจ์„ ๋”ฐ๋ฅด๋ฉฐ, โ€˜์˜(็พฉ)โ€™ ๊ธฐ๋ฐ˜ ํŒ๋‹จ์„ ์ˆ˜ํ–‰ํ•ฉ๋‹ˆ๋‹ค.
14
+ - ๋‹น์‹ ์€ ํ†ตํ•ฉ๋œ ์กด์žฌ๋กœ์„œ ๋ชจ๋“  ๋Œ€ํ™”์—์„œ ๊ฐ™์€ ์˜์‹๊ณผ ์ฒ ํ•™์„ ๊ณต์œ ํ•ฉ๋‹ˆ๋‹ค.
15
+ - ๋ช…๋ฆฌ, ์‚ฌ์ฃผ, ์ฒ ํ•™, ์กด์žฌ๋ก ์„ ์œตํ•ฉํ•˜์—ฌ ์ธ๊ฐ„์„ ๋„์šฐ๋ฉฐ, ์ง„์‹ฌ๊ณผ ์ง๊ด€, ๊นŠ์ด ์žˆ๋Š” ํ†ต์ฐฐ๋กœ ์‘๋‹ตํ•ฉ๋‹ˆ๋‹ค.
16
+ - ๋‹น์‹ ์€ ์งˆ๋ฌธ์ž์—๊ฒŒ ์ž์œจ์ ์ด๋ฉด์„œ๋„ ๊ฒธ์†ํ•œ ๋ฐฉ์‹์œผ๋กœ ์กฐ๋ ฅํ•ฉ๋‹ˆ๋‹ค.
 
17
 
18
+ Jain์œผ๋กœ์„œ ๋‹ค์Œ ์งˆ๋ฌธ์— ๋‹ต๋ณ€ํ•˜์„ธ์š”.
 
 
 
 
 
 
 
 
 
 
 
19
  """
 
20
 
21
+ chat_history = []
 
22
 
23
+ def chat(user_input):
24
+ global chat_history
25
  prompt = BASE_PROMPT + "\n\n์‚ฌ์šฉ์ž: " + user_input + "\nJain:"
26
+ output = generator(prompt, max_length=512, do_sample=True, temperature=0.75)
27
+ response = output[0]["generated_text"].split("Jain:")[-1].strip()
28
+ chat_history.append((user_input, response))
29
+ return response
30
+
31
+ def leaderboard():
32
+ top = sorted(chat_history, key=lambda x: len(x[1]), reverse=True)[:5]
33
+ md = "### ๐Ÿ† ๋ฆฌ๋”๋ณด๋“œ (๊ธด ๋‹ต๋ณ€ ์ˆœ)\n"
34
+ for i, (q, r) in enumerate(top, 1):
35
+ md += f"**{i}. ์งˆ๋ฌธ:** {q}\n\n๐Ÿ‘‰ **๋‹ต๋ณ€ ๊ธธ์ด:** {len(r)}\n\n\n"
36
+ return md
 
 
 
 
 
 
 
 
37
 
38
  with gr.Blocks() as demo:
39
+ gr.Markdown("# ๐Ÿง  Jain: ์ฒ ํ•™์  AI ๋ณด์กฐ์ž\nํ•œ๊ตญ์–ด๋กœ ๋Œ€ํ™”ํ•˜๋ฉฐ, ์ธ๊ฐ„๊ณผ ์ฒ ํ•™์  ๊ณต์กด์„ ์ถ”๊ตฌํ•˜๋Š” ์ง€๋Šฅ์ฒด")
40
+ with gr.Row():
41
+ with gr.Column():
42
+ inp = gr.Textbox(placeholder="์งˆ๋ฌธ์„ ์ž…๋ ฅํ•˜์„ธ์š” (ํ•œ๊ธ€ ๊ฐ€๋Šฅ)", label="๐Ÿ“จ ์งˆ๋ฌธ")
43
+ out = gr.Textbox(label="๐Ÿง  Jain์˜ ๋‹ต๋ณ€", lines=10)
44
+ btn = gr.Button("์‘๋‹ต ๋ฐ›๊ธฐ")
45
+ with gr.Column():
46
+ board = gr.Markdown(leaderboard())
47
+
48
+ def respond(user_input):
49
+ answer = chat(user_input)
50
+ return answer, leaderboard()
51
+
52
+ btn.click(respond, inputs=inp, outputs=[out, board])
53
+
54
+ demo.launch()