devvtaco commited on
Commit
fb5e935
ยท
verified ยท
1 Parent(s): 171f69d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -28
app.py CHANGED
@@ -1,45 +1,45 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
- import time
4
 
5
- # ๋ชจ๋ธ ๋กœ๋”ฉ ์ค‘ ํ‘œ์‹œ
6
- status_message = "โณ ๋ชจ๋ธ ๋กœ๋”ฉ ์ค‘์ž…๋‹ˆ๋‹ค... ์ตœ๋Œ€ 5๋ถ„ ์ •๋„ ๊ฑธ๋ฆด ์ˆ˜ ์žˆ์–ด์š”."
7
-
8
- # ๊ธฐ๋ณธ ํŒŒ์ดํ”„๋ผ์ธ์„ None์œผ๋กœ ์„ค์ •ํ•ด๋‘๊ณ , ์•„๋ž˜์—์„œ ๋กœ๋”ฉ
9
  chat_model = None
 
 
 
 
 
 
 
 
 
10
 
11
- # ๋Œ€๋‹ต ํ•จ์ˆ˜ ์ •์˜
12
  def chat_with_model(message, history):
13
- global chat_model
14
- if chat_model is None:
15
- return history + [[message, "โš ๏ธ ๋ชจ๋ธ์ด ์•„์ง ๋กœ๋”ฉ๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค. ์ž ์‹œ๋งŒ ๊ธฐ๋‹ค๋ ค ์ฃผ์„ธ์š”."]]
16
 
17
  prompt = f"๋‹ค์Œ ๋ฌธ์žฅ์„ ๋ถ„์„ํ•˜๊ณ , ๋ฌด๋ก€ํ•˜๊ฑฐ๋‚˜ ๊ณต๊ฒฉ์ ์ธ ํ‘œํ˜„์ด ์žˆ์œผ๋ฉด ์•Œ๋ ค์ฃผ๊ณ  ๋” ์˜ˆ์˜ ์žˆ๊ฒŒ ๋ฐ”๊ฟ”์ค˜:\n\n{message}"
18
  response = chat_model(prompt, max_new_tokens=200)[0]['generated_text']
19
  return history + [[message, response]]
20
 
21
- # ๋ชจ๋ธ ๋กœ๋”ฉ ํ•จ์ˆ˜ (๋กœ๋”ฉ ์ค‘ ๋ฉ”์‹œ์ง€๋ฅผ UI์— ๋ณด์—ฌ์ฃผ๊ธฐ ์œ„ํ•ด ๋”ฐ๋กœ ๋ถ„๋ฆฌ)
22
- def load_model():
23
- global chat_model
24
- time.sleep(1) # ์ž ๊น ๊ธฐ๋‹ค๋ฆฌ๊ฒŒ ํ•ด์„œ status ํ‘œ์‹œ๊ฐ€ ์ ์šฉ๋˜๋„๋ก
25
- chat_model = pipeline("text-generation", model="beomi/KoAlpaca-Polyglot-5.8B")
26
- return "โœ… ๋ชจ๋ธ์ด ์ค€๋น„๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ์ด์ œ ๋ฌธ์žฅ์„ ์ž…๋ ฅํ•ด ๋ณด์„ธ์š”!"
27
 
28
- # Gradio Blocks ๊ธฐ๋ฐ˜ UI
 
 
 
29
  with gr.Blocks() as demo:
30
- chatbot = gr.Chatbot(label="AI ๋ฌธ์žฅ ๋ถ„์„๊ธฐ")
31
- msg = gr.Textbox(label="๋ฌธ์žฅ์„ ์ž…๋ ฅํ•˜์„ธ์š”", placeholder="์˜ˆ: ๋„ˆ๋Š” ์ •๋ง ์ด์ƒํ•˜๊ฒŒ ํ–‰๋™ํ•ด")
32
- status = gr.Markdown(status_message)
33
-
34
- # ๋ฒ„ํŠผ ์ž…๋ ฅ ์‹œ ์—ฐ๊ฒฐ
35
- def respond_and_clear(user_input, history):
36
- new_history = chat_with_model(user_input, history)
37
- return "", new_history
38
 
39
- msg.submit(respond_and_clear, [msg, chatbot], [msg, chatbot])
 
 
40
 
41
- # ๋ชจ๋ธ ๋กœ๋”ฉ ๋น„๋™๊ธฐ ์ฒ˜๋ฆฌ
42
- gr.on_page_load(load_model, outputs=status)
43
 
44
- # ์‹คํ–‰
45
  demo.launch()
 
1
  import gradio as gr
2
  from transformers import pipeline
3
+ import threading
4
 
 
 
 
 
5
  chat_model = None
6
+ loading_done = False
7
+ status_text = "โณ ๋ชจ๋ธ ๋กœ๋”ฉ ์ค‘์ž…๋‹ˆ๋‹ค..."
8
+
9
+ # ๋ฐฑ๊ทธ๋ผ์šด๋“œ์—์„œ ๋ชจ๋ธ ๋กœ๋”ฉ
10
+ def load_model_bg():
11
+ global chat_model, loading_done, status_text
12
+ chat_model = pipeline("text-generation", model="beomi/KoAlpaca-Polyglot-5.8B")
13
+ loading_done = True
14
+ status_text = "โœ… ๋ชจ๋ธ ๋กœ๋”ฉ ์™„๋ฃŒ! ๋ฌธ์žฅ์„ ์ž…๋ ฅํ•ด๋ณด์„ธ์š”."
15
 
16
+ # ๋Œ€๋‹ต ํ•จ์ˆ˜
17
  def chat_with_model(message, history):
18
+ global chat_model, loading_done
19
+ if not loading_done:
20
+ return history + [[message, "โš ๏ธ ๋ชจ๋ธ์ด ์•„์ง ๋กœ๋”ฉ ์ค‘์ž…๋‹ˆ๋‹ค. ์ž ์‹œ๋งŒ ๊ธฐ๋‹ค๋ ค ์ฃผ์„ธ์š”."]]
21
 
22
  prompt = f"๋‹ค์Œ ๋ฌธ์žฅ์„ ๋ถ„์„ํ•˜๊ณ , ๋ฌด๋ก€ํ•˜๊ฑฐ๋‚˜ ๊ณต๊ฒฉ์ ์ธ ํ‘œํ˜„์ด ์žˆ์œผ๋ฉด ์•Œ๋ ค์ฃผ๊ณ  ๋” ์˜ˆ์˜ ์žˆ๊ฒŒ ๋ฐ”๊ฟ”์ค˜:\n\n{message}"
23
  response = chat_model(prompt, max_new_tokens=200)[0]['generated_text']
24
  return history + [[message, response]]
25
 
26
+ # ์ƒํƒœ ํ…์ŠคํŠธ ๋ฐ˜ํ™˜ ํ•จ์ˆ˜ (๋งค๋ฒˆ ์ƒˆ๋กœ ์ฝ์–ด์˜ด)
27
+ def get_status():
28
+ return status_text
 
 
 
29
 
30
+ # ๋ฐฑ๊ทธ๋ผ์šด๋“œ์—์„œ ๋ชจ๋ธ ๋กœ๋”ฉ ์‹œ์ž‘
31
+ threading.Thread(target=load_model_bg).start()
32
+
33
+ # Gradio ์•ฑ
34
  with gr.Blocks() as demo:
35
+ chatbot = gr.Chatbot()
36
+ msg = gr.Textbox(label="๋ฌธ์žฅ์„ ์ž…๋ ฅํ•˜์„ธ์š”", placeholder="์˜ˆ: ๋„ˆ ์ •๋ง ์™œ ๊ทธ๋ ‡๊ฒŒ ๋งํ•ด?")
37
+ status = gr.Markdown(get_status)
 
 
 
 
 
38
 
39
+ def respond_and_clear(user_input, history):
40
+ updated_history = chat_with_model(user_input, history)
41
+ return "", updated_history, get_status()
42
 
43
+ msg.submit(respond_and_clear, [msg, chatbot], [msg, chatbot, status])
 
44
 
 
45
  demo.launch()