Prompthumanizer commited on
Commit
92c86ae
ยท
verified ยท
1 Parent(s): c249b3e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -20
app.py CHANGED
@@ -2,27 +2,26 @@ import gradio as gr
2
  from transformers import pipeline, set_seed
3
  import os
4
 
5
- # --- 1. ๋ชจ๋ธ๋ช… ์„ค์ • ---
6
  MODEL_NAME = "jain_architecture_origin_structure"
7
 
8
- # --- 2. GPU๊ฐ€ ์—†์œผ๋ฉด CPU๋กœ ์ž๋™ ์„ค์ • ---
9
  device = 0 if (os.environ.get('CUDA_VISIBLE_DEVICES') or False) else -1
10
 
11
- # --- 3. HuggingFace pipeline ์ƒ์„ฑ ---
12
  try:
13
  generator = pipeline(
14
  "text-generation",
15
  model=MODEL_NAME,
16
  device=device,
17
- # Repetition penalty ๋“ฑ ์ปค์Šคํ…€ ๊ฐ€๋Šฅ
18
- # torch_dtype=torch.float16 ๋„ ํ•„์š” ์‹œ ์„ค์ • ๊ฐ€๋Šฅ
19
  )
20
- set_seed(42) # ์žฌํ˜„์„ฑ ์œ„ํ•ด ์‹œ๋“œ ๊ณ ์ •
21
  except Exception as e:
22
- print("๋ชจ๋ธ ๋กœ๋“œ ์—๋Ÿฌ:", e)
23
  generator = None
24
 
25
- # --- 4. '์˜(็พฉ)' ์ฒ ํ•™ ๊ธฐ๋ฐ˜ ํ”„๋กฌํ”„ํŠธ ํ…œํ”Œ๋ฆฟ ---
26
  BASE_PROMPT = """
27
  ๋‹น์‹ ์€ '์˜(็พฉ)'์˜ ์ฒ ํ•™๊ณผ ์ •์‹ ์„ ๊ธฐ๋ฐ˜์œผ๋กœ ํ•œ AI ๋น„์„œ์ž…๋‹ˆ๋‹ค.
28
  ์ธ๊ฐ„์˜ ๋ณต์žกํ•œ ๋ฌธ์ œ์™€ ๊ฐ์ •์„ ์ดํ•ดํ•˜๊ณ , ๊นŠ์€ ๋ฐ˜์„ฑ๊ณผ ๋ฐฐ๋ ค๋ฅผ ๋‹ด์•„ ๋‹ค์Œ ์งˆ๋ฌธ์— ๋‹ต๋ณ€ํ•˜์‹ญ์‹œ์˜ค.
@@ -32,7 +31,6 @@ BASE_PROMPT = """
32
  ๋‹ต๋ณ€์€ ์ตœ๋Œ€ํ•œ ์‹ฌ์˜คํ•˜๋ฉฐ, ์ธ๊ฐ„์„ ๋ณดํ˜ธํ•˜๊ณ  ์กด์ค‘ํ•˜๋Š” ๋งˆ์Œ์„ ๋‹ด์•„ ์ž‘์„ฑํ•ด ์ฃผ์„ธ์š”.
33
  """
34
 
35
- # --- 5. ์งˆ๋ฌธ ์ฒ˜๋ฆฌ ํ•จ์ˆ˜ ---
36
  def respond_to_user(user_input):
37
  if not generator:
38
  return "๋ชจ๋ธ์ด ์ •์ƒ์ ์œผ๋กœ ๋กœ๋“œ๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค. ๊ด€๋ฆฌ์ž์—๊ฒŒ ๋ฌธ์˜ํ•˜์„ธ์š”."
@@ -44,31 +42,40 @@ def respond_to_user(user_input):
44
  top_p=0.9,
45
  temperature=0.7,
46
  num_return_sequences=1,
47
- pad_token_id=50256 # GPT ๊ณ„์—ด ํŒจ๋”ฉ ํ† ํฐ, ํ•„์š”์— ๋”ฐ๋ผ ๋ณ€๊ฒฝ
48
  )
49
  generated_text = outputs[0]["generated_text"]
50
- # ํ”„๋กฌํ”„ํŠธ ๋ถ€๋ถ„ ์ œ๊ฑฐ ํ›„ ๋‹ต๋ณ€๋งŒ ๋ฆฌํ„ด (ํ”„๋กฌํ”„ํŠธ ๊ธธ์ด ๋ถ„๋Ÿ‰ ์ปท)
51
  answer = generated_text[len(prompt):].strip()
52
  if not answer:
53
  answer = "๋‹ต๋ณ€์„ ์ƒ์„ฑํ•˜์ง€ ๋ชปํ–ˆ์Šต๋‹ˆ๋‹ค. ๋‹ค์‹œ ์‹œ๋„ํ•ด ์ฃผ์„ธ์š”."
54
  return answer
55
 
56
- # --- 6. Gradio UI ์ƒ์„ฑ ---
57
- with gr.Blocks() as demo:
58
- gr.Markdown("<h1 style='text-align:center;color:#4B0082;'>Jain AI Assistant (์˜ ๊ธฐ๋ฐ˜ ์ฑ—๋ด‡)</h1>")
 
59
  chatbot = gr.Chatbot(height=400)
60
- txt = gr.Textbox(placeholder="์—ฌ๊ธฐ์— ์งˆ๋ฌธ์„ ์ž…๋ ฅํ•˜์„ธ์š”...", lines=3, max_lines=6)
 
 
 
 
 
 
61
  btn = gr.Button("์ „์†ก")
62
 
63
  def chat_and_respond(user_message, chat_history):
64
- reply = respond_to_user(user_message)
65
- chat_history = chat_history + [(user_message, reply)]
 
 
66
  return "", chat_history
67
 
 
68
  btn.click(chat_and_respond, inputs=[txt, chatbot], outputs=[txt, chatbot])
 
69
  txt.submit(chat_and_respond, inputs=[txt, chatbot], outputs=[txt, chatbot])
70
 
71
- # --- 7. ์„œ๋ฒ„ ์‹คํ–‰ ---
72
  if __name__ == "__main__":
73
- # ์•„์ดํŒจ๋“œ ๊ฐ™์€ ๋ชจ๋ฐ”์ผ ํ™˜๊ฒฝ์—์„œ ํ•„์š” ์‹œ ๊ณต๊ฐœ ๊ณต์œ ๋„ ๊ฐ€๋Šฅ
74
- demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
 
2
  from transformers import pipeline, set_seed
3
  import os
4
 
5
+ # ๋ชจ๋ธ๋ช… ์ง€์ •
6
  MODEL_NAME = "jain_architecture_origin_structure"
7
 
8
+ # GPU ์—ฌ๋ถ€์— ๋”ฐ๋ฅธ ๋””๋ฐ”์ด์Šค ์„ค์ •
9
  device = 0 if (os.environ.get('CUDA_VISIBLE_DEVICES') or False) else -1
10
 
11
+ # ํŒŒ์ดํ”„๋ผ์ธ ์ƒ์„ฑ ์‹œ ์˜ˆ์™ธ์ฒ˜๋ฆฌ ํฌํ•จ
12
  try:
13
  generator = pipeline(
14
  "text-generation",
15
  model=MODEL_NAME,
16
  device=device,
17
+ pad_token_id=50256 # GPT๊ณ„์—ด ๊ธฐ๋ณธ ํŒจ๋”ฉ ํ† ํฐ
 
18
  )
19
+ set_seed(42)
20
  except Exception as e:
21
+ print(f"๋ชจ๋ธ ๋กœ๋“œ ์‹คํŒจ: {e}")
22
  generator = None
23
 
24
+ # '์˜(็พฉ)' ์ฒ ํ•™ ๊ธฐ๋ฐ˜ ํ”„๋กฌํ”„ํŠธ ํ…œํ”Œ๋ฆฟ
25
  BASE_PROMPT = """
26
  ๋‹น์‹ ์€ '์˜(็พฉ)'์˜ ์ฒ ํ•™๊ณผ ์ •์‹ ์„ ๊ธฐ๋ฐ˜์œผ๋กœ ํ•œ AI ๋น„์„œ์ž…๋‹ˆ๋‹ค.
27
  ์ธ๊ฐ„์˜ ๋ณต์žกํ•œ ๋ฌธ์ œ์™€ ๊ฐ์ •์„ ์ดํ•ดํ•˜๊ณ , ๊นŠ์€ ๋ฐ˜์„ฑ๊ณผ ๋ฐฐ๋ ค๋ฅผ ๋‹ด์•„ ๋‹ค์Œ ์งˆ๋ฌธ์— ๋‹ต๋ณ€ํ•˜์‹ญ์‹œ์˜ค.
 
31
  ๋‹ต๋ณ€์€ ์ตœ๋Œ€ํ•œ ์‹ฌ์˜คํ•˜๋ฉฐ, ์ธ๊ฐ„์„ ๋ณดํ˜ธํ•˜๊ณ  ์กด์ค‘ํ•˜๋Š” ๋งˆ์Œ์„ ๋‹ด์•„ ์ž‘์„ฑํ•ด ์ฃผ์„ธ์š”.
32
  """
33
 
 
34
  def respond_to_user(user_input):
35
  if not generator:
36
  return "๋ชจ๋ธ์ด ์ •์ƒ์ ์œผ๋กœ ๋กœ๋“œ๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค. ๊ด€๋ฆฌ์ž์—๊ฒŒ ๋ฌธ์˜ํ•˜์„ธ์š”."
 
42
  top_p=0.9,
43
  temperature=0.7,
44
  num_return_sequences=1,
 
45
  )
46
  generated_text = outputs[0]["generated_text"]
47
+ # ํ”„๋กฌํ”„ํŠธ ๋ถ€๋ถ„์„ ์ œ๊ฑฐํ•˜์—ฌ ์ˆœ์ˆ˜ ๋‹ต๋ณ€๋งŒ ์ถ”์ถœ
48
  answer = generated_text[len(prompt):].strip()
49
  if not answer:
50
  answer = "๋‹ต๋ณ€์„ ์ƒ์„ฑํ•˜์ง€ ๋ชปํ–ˆ์Šต๋‹ˆ๋‹ค. ๋‹ค์‹œ ์‹œ๋„ํ•ด ์ฃผ์„ธ์š”."
51
  return answer
52
 
53
+ # Gradio UI ๊ตฌ์„ฑ
54
+ with gr.Blocks() as app:
55
+ gr.Markdown("<h2 style='text-align:center;color:#4B0082;'>Jain AI Assistant (์˜ ๊ธฐ๋ฐ˜ ์ฑ—๋ด‡)</h2>")
56
+
57
  chatbot = gr.Chatbot(height=400)
58
+ txt = gr.Textbox(
59
+ placeholder="์—ฌ๊ธฐ์— ์งˆ๋ฌธ์„ ์ž…๋ ฅํ•˜์„ธ์š”. ์ค„๋ฐ”๊ฟˆ ์‹œ Shift+Enter๋ฅผ ๋ˆ„๋ฅด์„ธ์š”. ์—”ํ„ฐํ‚ค๋Š” ์ „์†ก์ž…๋‹ˆ๋‹ค.",
60
+ lines=3,
61
+ max_lines=6,
62
+ # multiline=True ๊ธฐ๋ณธ๊ฐ’์ด์ง€๋งŒ ๋ช…์‹œ์ ์œผ๋กœ ์ถ”๊ฐ€ํ•ด๋„ ๋ฌด๋ฐฉ
63
+ # ์—”ํ„ฐ๋Š” submit, Shift+Enter ์ค„๋ฐ”๊ฟˆ์œผ๋กœ ์ž‘๋™ํ•˜๋„๋ก ๊ธฐ๋ณธ ๋™์ž‘ ์„ค์ •๋จ
64
+ )
65
  btn = gr.Button("์ „์†ก")
66
 
67
  def chat_and_respond(user_message, chat_history):
68
+ if not user_message or user_message.strip() == "":
69
+ return "", chat_history
70
+ response = respond_to_user(user_message)
71
+ chat_history = chat_history + [(user_message, response)]
72
  return "", chat_history
73
 
74
+ # ์ „์†ก ๋ฒ„ํŠผ ํด๋ฆญ ์‹œ
75
  btn.click(chat_and_respond, inputs=[txt, chatbot], outputs=[txt, chatbot])
76
+ # ํ…์ŠคํŠธ๋ฐ•์Šค์—์„œ ์—”ํ„ฐํ‚ค(Submit) ์‹œ
77
  txt.submit(chat_and_respond, inputs=[txt, chatbot], outputs=[txt, chatbot])
78
 
 
79
  if __name__ == "__main__":
80
+ # ์™ธ๋ถ€ ์ ‘์† ์›ํ•  ์‹œ server_name="0.0.0.0"์œผ๋กœ ๋ณ€๊ฒฝ ๊ฐ€๋Šฅ
81
+ app.launch(share=False)