devvtaco commited on
Commit
171f69d
ยท
verified ยท
1 Parent(s): 6c50f6d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -16
app.py CHANGED
@@ -1,23 +1,45 @@
1
  import gradio as gr
2
  from transformers import pipeline
 
3
 
4
- # ํ•œ๊ตญ์–ด ์ฑ„ํŒ… ๋ชจ๋ธ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ (์˜ˆ: KoAlpaca, LLaMA, Mistral ๋“ฑ)
5
- chat = pipeline("text-generation", model="beomi/KoAlpaca-Polyglot-5.8B")
6
 
7
- # ๋ถ„์„ ํ•จ์ˆ˜ ์ •์˜
8
- def analyze_korean_text(text):
9
- prompt = f"๋‹ค์Œ ๋ฌธ์žฅ์„ ๋ถ„์„ํ•˜๊ณ  ๋ฌด๋ก€ํ•˜๊ฑฐ๋‚˜ ๊ณผ๊ฒฉํ•œ ํ‘œํ˜„์ด ์žˆ์œผ๋ฉด ์ง€์ ํ•˜๊ณ , ๋” ์นœ์ ˆํ•˜๊ฒŒ ๋ฐ”๊ฟ”์ค˜:\n\n{text}"
10
- response = chat(prompt, max_new_tokens=200)[0]['generated_text']
11
- return response
12
 
13
- # Gradio ์ธํ„ฐํŽ˜์ด์Šค
14
- iface = gr.Interface(
15
- fn=analyze_korean_text,
16
- inputs=gr.Textbox(label="๋ถ„์„ํ•  ๋ฌธ์žฅ ์ž…๋ ฅ"),
17
- outputs=gr.Textbox(label="๋ถ„์„ ๊ฒฐ๊ณผ"),
18
- title="๋ฌธ์žฅ ๋ถ„์„๊ธฐ (์นœ์ ˆํ•˜๊ฒŒ ๋ฐ”๊ฟ”์ฃผ๋Š” AI)",
19
- description="๋ฌธ์žฅ์„ ์ž…๋ ฅํ•˜๋ฉด ๋ฌด๋ก€ํ•œ ํ‘œํ˜„์„ ์ง€์ ํ•˜๊ณ  ๋” ์˜ˆ์˜ ์žˆ๊ฒŒ ๋ฐ”๊ฟ”์ฃผ๋Š” ๋„์šฐ๋ฏธ์ž…๋‹ˆ๋‹ค."
20
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  # ์‹คํ–‰
23
- iface.launch()
 
1
  import gradio as gr
2
  from transformers import pipeline
3
+ import time
4
 
5
+ # ๋ชจ๋ธ ๋กœ๋”ฉ ์ค‘ ํ‘œ์‹œ
6
+ status_message = "โณ ๋ชจ๋ธ ๋กœ๋”ฉ ์ค‘์ž…๋‹ˆ๋‹ค... ์ตœ๋Œ€ 5๋ถ„ ์ •๋„ ๊ฑธ๋ฆด ์ˆ˜ ์žˆ์–ด์š”."
7
 
8
+ # ๊ธฐ๋ณธ ํŒŒ์ดํ”„๋ผ์ธ์„ None์œผ๋กœ ์„ค์ •ํ•ด๋‘๊ณ , ์•„๋ž˜์—์„œ ๋กœ๋”ฉ
9
+ chat_model = None
 
 
 
10
 
11
+ # ๋Œ€๋‹ต ํ•จ์ˆ˜ ์ •์˜
12
+ def chat_with_model(message, history):
13
+ global chat_model
14
+ if chat_model is None:
15
+ return history + [[message, "โš ๏ธ ๋ชจ๋ธ์ด ์•„์ง ๋กœ๋”ฉ๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค. ์ž ์‹œ๋งŒ ๊ธฐ๋‹ค๋ ค ์ฃผ์„ธ์š”."]]
16
+
17
+ prompt = f"๋‹ค์Œ ๋ฌธ์žฅ์„ ๋ถ„์„ํ•˜๊ณ , ๋ฌด๋ก€ํ•˜๊ฑฐ๋‚˜ ๊ณต๊ฒฉ์ ์ธ ํ‘œํ˜„์ด ์žˆ์œผ๋ฉด ์•Œ๋ ค์ฃผ๊ณ  ๋” ์˜ˆ์˜ ์žˆ๊ฒŒ ๋ฐ”๊ฟ”์ค˜:\n\n{message}"
18
+ response = chat_model(prompt, max_new_tokens=200)[0]['generated_text']
19
+ return history + [[message, response]]
20
+
21
+ # ๋ชจ๋ธ ๋กœ๋”ฉ ํ•จ์ˆ˜ (๋กœ๋”ฉ ์ค‘ ๋ฉ”์‹œ์ง€๋ฅผ UI์— ๋ณด์—ฌ์ฃผ๊ธฐ ์œ„ํ•ด ๋”ฐ๋กœ ๋ถ„๋ฆฌ)
22
+ def load_model():
23
+ global chat_model
24
+ time.sleep(1) # ์ž ๊น ๊ธฐ๋‹ค๋ฆฌ๊ฒŒ ํ•ด์„œ status ํ‘œ์‹œ๊ฐ€ ์ ์šฉ๋˜๋„๋ก
25
+ chat_model = pipeline("text-generation", model="beomi/KoAlpaca-Polyglot-5.8B")
26
+ return "โœ… ๋ชจ๋ธ์ด ์ค€๋น„๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ์ด์ œ ๋ฌธ์žฅ์„ ์ž…๋ ฅํ•ด ๋ณด์„ธ์š”!"
27
+
28
+ # Gradio Blocks ๊ธฐ๋ฐ˜ UI
29
+ with gr.Blocks() as demo:
30
+ chatbot = gr.Chatbot(label="AI ๋ฌธ์žฅ ๋ถ„์„๊ธฐ")
31
+ msg = gr.Textbox(label="๋ฌธ์žฅ์„ ์ž…๋ ฅํ•˜์„ธ์š”", placeholder="์˜ˆ: ๋„ˆ๋Š” ์ •๋ง ์ด์ƒํ•˜๊ฒŒ ํ–‰๋™ํ•ด")
32
+ status = gr.Markdown(status_message)
33
+
34
+ # ๋ฒ„ํŠผ ์ž…๋ ฅ ์‹œ ์—ฐ๊ฒฐ
35
+ def respond_and_clear(user_input, history):
36
+ new_history = chat_with_model(user_input, history)
37
+ return "", new_history
38
+
39
+ msg.submit(respond_and_clear, [msg, chatbot], [msg, chatbot])
40
+
41
+ # ๋ชจ๋ธ ๋กœ๋”ฉ ๋น„๋™๊ธฐ ์ฒ˜๋ฆฌ
42
+ gr.on_page_load(load_model, outputs=status)
43
 
44
  # ์‹คํ–‰
45
+ demo.launch()