Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,23 +1,45 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
|
|
3 |
|
4 |
-
#
|
5 |
-
|
6 |
|
7 |
-
#
|
8 |
-
|
9 |
-
prompt = f"๋ค์ ๋ฌธ์ฅ์ ๋ถ์ํ๊ณ ๋ฌด๋กํ๊ฑฐ๋ ๊ณผ๊ฒฉํ ํํ์ด ์์ผ๋ฉด ์ง์ ํ๊ณ , ๋ ์น์ ํ๊ฒ ๋ฐ๊ฟ์ค:\n\n{text}"
|
10 |
-
response = chat(prompt, max_new_tokens=200)[0]['generated_text']
|
11 |
-
return response
|
12 |
|
13 |
-
#
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
# ์คํ
|
23 |
-
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
+
import time
|
4 |
|
5 |
+
# ๋ชจ๋ธ ๋ก๋ฉ ์ค ํ์
|
6 |
+
status_message = "โณ ๋ชจ๋ธ ๋ก๋ฉ ์ค์
๋๋ค... ์ต๋ 5๋ถ ์ ๋ ๊ฑธ๋ฆด ์ ์์ด์."
|
7 |
|
8 |
+
# ๊ธฐ๋ณธ ํ์ดํ๋ผ์ธ์ None์ผ๋ก ์ค์ ํด๋๊ณ , ์๋์์ ๋ก๋ฉ
|
9 |
+
chat_model = None
|
|
|
|
|
|
|
10 |
|
11 |
+
# ๋๋ต ํจ์ ์ ์
|
12 |
+
def chat_with_model(message, history):
|
13 |
+
global chat_model
|
14 |
+
if chat_model is None:
|
15 |
+
return history + [[message, "โ ๏ธ ๋ชจ๋ธ์ด ์์ง ๋ก๋ฉ๋์ง ์์์ต๋๋ค. ์ ์๋ง ๊ธฐ๋ค๋ ค ์ฃผ์ธ์."]]
|
16 |
+
|
17 |
+
prompt = f"๋ค์ ๋ฌธ์ฅ์ ๋ถ์ํ๊ณ , ๋ฌด๋กํ๊ฑฐ๋ ๊ณต๊ฒฉ์ ์ธ ํํ์ด ์์ผ๋ฉด ์๋ ค์ฃผ๊ณ ๋ ์์ ์๊ฒ ๋ฐ๊ฟ์ค:\n\n{message}"
|
18 |
+
response = chat_model(prompt, max_new_tokens=200)[0]['generated_text']
|
19 |
+
return history + [[message, response]]
|
20 |
+
|
21 |
+
# ๋ชจ๋ธ ๋ก๋ฉ ํจ์ (๋ก๋ฉ ์ค ๋ฉ์์ง๋ฅผ UI์ ๋ณด์ฌ์ฃผ๊ธฐ ์ํด ๋ฐ๋ก ๋ถ๋ฆฌ)
|
22 |
+
def load_model():
|
23 |
+
global chat_model
|
24 |
+
time.sleep(1) # ์ ๊น ๊ธฐ๋ค๋ฆฌ๊ฒ ํด์ status ํ์๊ฐ ์ ์ฉ๋๋๋ก
|
25 |
+
chat_model = pipeline("text-generation", model="beomi/KoAlpaca-Polyglot-5.8B")
|
26 |
+
return "โ
๋ชจ๋ธ์ด ์ค๋น๋์์ต๋๋ค. ์ด์ ๋ฌธ์ฅ์ ์
๋ ฅํด ๋ณด์ธ์!"
|
27 |
+
|
28 |
+
# Gradio Blocks ๊ธฐ๋ฐ UI
|
29 |
+
with gr.Blocks() as demo:
|
30 |
+
chatbot = gr.Chatbot(label="AI ๋ฌธ์ฅ ๋ถ์๊ธฐ")
|
31 |
+
msg = gr.Textbox(label="๋ฌธ์ฅ์ ์
๋ ฅํ์ธ์", placeholder="์: ๋๋ ์ ๋ง ์ด์ํ๊ฒ ํ๋ํด")
|
32 |
+
status = gr.Markdown(status_message)
|
33 |
+
|
34 |
+
# ๋ฒํผ ์
๋ ฅ ์ ์ฐ๊ฒฐ
|
35 |
+
def respond_and_clear(user_input, history):
|
36 |
+
new_history = chat_with_model(user_input, history)
|
37 |
+
return "", new_history
|
38 |
+
|
39 |
+
msg.submit(respond_and_clear, [msg, chatbot], [msg, chatbot])
|
40 |
+
|
41 |
+
# ๋ชจ๋ธ ๋ก๋ฉ ๋น๋๊ธฐ ์ฒ๋ฆฌ
|
42 |
+
gr.on_page_load(load_model, outputs=status)
|
43 |
|
44 |
# ์คํ
|
45 |
+
demo.launch()
|