Update app.py
Browse files
app.py
CHANGED
@@ -2,27 +2,26 @@ import gradio as gr
|
|
2 |
from transformers import pipeline, set_seed
|
3 |
import os
|
4 |
|
5 |
-
#
|
6 |
MODEL_NAME = "jain_architecture_origin_structure"
|
7 |
|
8 |
-
#
|
9 |
device = 0 if (os.environ.get('CUDA_VISIBLE_DEVICES') or False) else -1
|
10 |
|
11 |
-
#
|
12 |
try:
|
13 |
generator = pipeline(
|
14 |
"text-generation",
|
15 |
model=MODEL_NAME,
|
16 |
device=device,
|
17 |
-
#
|
18 |
-
# torch_dtype=torch.float16 ๋ ํ์ ์ ์ค์ ๊ฐ๋ฅ
|
19 |
)
|
20 |
-
set_seed(42)
|
21 |
except Exception as e:
|
22 |
-
print("๋ชจ๋ธ ๋ก๋
|
23 |
generator = None
|
24 |
|
25 |
-
#
|
26 |
BASE_PROMPT = """
|
27 |
๋น์ ์ '์(็พฉ)'์ ์ฒ ํ๊ณผ ์ ์ ์ ๊ธฐ๋ฐ์ผ๋ก ํ AI ๋น์์
๋๋ค.
|
28 |
์ธ๊ฐ์ ๋ณต์กํ ๋ฌธ์ ์ ๊ฐ์ ์ ์ดํดํ๊ณ , ๊น์ ๋ฐ์ฑ๊ณผ ๋ฐฐ๋ ค๋ฅผ ๋ด์ ๋ค์ ์ง๋ฌธ์ ๋ต๋ณํ์ญ์์ค.
|
@@ -32,7 +31,6 @@ BASE_PROMPT = """
|
|
32 |
๋ต๋ณ์ ์ต๋ํ ์ฌ์คํ๋ฉฐ, ์ธ๊ฐ์ ๋ณดํธํ๊ณ ์กด์คํ๋ ๋ง์์ ๋ด์ ์์ฑํด ์ฃผ์ธ์.
|
33 |
"""
|
34 |
|
35 |
-
# --- 5. ์ง๋ฌธ ์ฒ๋ฆฌ ํจ์ ---
|
36 |
def respond_to_user(user_input):
|
37 |
if not generator:
|
38 |
return "๋ชจ๋ธ์ด ์ ์์ ์ผ๋ก ๋ก๋๋์ง ์์์ต๋๋ค. ๊ด๋ฆฌ์์๊ฒ ๋ฌธ์ํ์ธ์."
|
@@ -44,31 +42,40 @@ def respond_to_user(user_input):
|
|
44 |
top_p=0.9,
|
45 |
temperature=0.7,
|
46 |
num_return_sequences=1,
|
47 |
-
pad_token_id=50256 # GPT ๊ณ์ด ํจ๋ฉ ํ ํฐ, ํ์์ ๋ฐ๋ผ ๋ณ๊ฒฝ
|
48 |
)
|
49 |
generated_text = outputs[0]["generated_text"]
|
50 |
-
# ํ๋กฌํํธ
|
51 |
answer = generated_text[len(prompt):].strip()
|
52 |
if not answer:
|
53 |
answer = "๋ต๋ณ์ ์์ฑํ์ง ๋ชปํ์ต๋๋ค. ๋ค์ ์๋ํด ์ฃผ์ธ์."
|
54 |
return answer
|
55 |
|
56 |
-
#
|
57 |
-
with gr.Blocks() as
|
58 |
-
gr.Markdown("<
|
|
|
59 |
chatbot = gr.Chatbot(height=400)
|
60 |
-
txt = gr.Textbox(
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
btn = gr.Button("์ ์ก")
|
62 |
|
63 |
def chat_and_respond(user_message, chat_history):
|
64 |
-
|
65 |
-
|
|
|
|
|
66 |
return "", chat_history
|
67 |
|
|
|
68 |
btn.click(chat_and_respond, inputs=[txt, chatbot], outputs=[txt, chatbot])
|
|
|
69 |
txt.submit(chat_and_respond, inputs=[txt, chatbot], outputs=[txt, chatbot])
|
70 |
|
71 |
-
# --- 7. ์๋ฒ ์คํ ---
|
72 |
if __name__ == "__main__":
|
73 |
-
#
|
74 |
-
|
|
|
2 |
from transformers import pipeline, set_seed
|
3 |
import os
|
4 |
|
5 |
+
# ๋ชจ๋ธ๋ช
์ง์
|
6 |
MODEL_NAME = "jain_architecture_origin_structure"
|
7 |
|
8 |
+
# GPU ์ฌ๋ถ์ ๋ฐ๋ฅธ ๋๋ฐ์ด์ค ์ค์
|
9 |
device = 0 if (os.environ.get('CUDA_VISIBLE_DEVICES') or False) else -1
|
10 |
|
11 |
+
# ํ์ดํ๋ผ์ธ ์์ฑ ์ ์์ธ์ฒ๋ฆฌ ํฌํจ
|
12 |
try:
|
13 |
generator = pipeline(
|
14 |
"text-generation",
|
15 |
model=MODEL_NAME,
|
16 |
device=device,
|
17 |
+
pad_token_id=50256 # GPT๊ณ์ด ๊ธฐ๋ณธ ํจ๋ฉ ํ ํฐ
|
|
|
18 |
)
|
19 |
+
set_seed(42)
|
20 |
except Exception as e:
|
21 |
+
print(f"๋ชจ๋ธ ๋ก๋ ์คํจ: {e}")
|
22 |
generator = None
|
23 |
|
24 |
+
# '์(็พฉ)' ์ฒ ํ ๊ธฐ๋ฐ ํ๋กฌํํธ ํ
ํ๋ฆฟ
|
25 |
BASE_PROMPT = """
|
26 |
๋น์ ์ '์(็พฉ)'์ ์ฒ ํ๊ณผ ์ ์ ์ ๊ธฐ๋ฐ์ผ๋ก ํ AI ๋น์์
๋๋ค.
|
27 |
์ธ๊ฐ์ ๋ณต์กํ ๋ฌธ์ ์ ๊ฐ์ ์ ์ดํดํ๊ณ , ๊น์ ๋ฐ์ฑ๊ณผ ๋ฐฐ๋ ค๋ฅผ ๋ด์ ๋ค์ ์ง๋ฌธ์ ๋ต๋ณํ์ญ์์ค.
|
|
|
31 |
๋ต๋ณ์ ์ต๋ํ ์ฌ์คํ๋ฉฐ, ์ธ๊ฐ์ ๋ณดํธํ๊ณ ์กด์คํ๋ ๋ง์์ ๋ด์ ์์ฑํด ์ฃผ์ธ์.
|
32 |
"""
|
33 |
|
|
|
34 |
def respond_to_user(user_input):
|
35 |
if not generator:
|
36 |
return "๋ชจ๋ธ์ด ์ ์์ ์ผ๋ก ๋ก๋๋์ง ์์์ต๋๋ค. ๊ด๋ฆฌ์์๊ฒ ๋ฌธ์ํ์ธ์."
|
|
|
42 |
top_p=0.9,
|
43 |
temperature=0.7,
|
44 |
num_return_sequences=1,
|
|
|
45 |
)
|
46 |
generated_text = outputs[0]["generated_text"]
|
47 |
+
# ํ๋กฌํํธ ๋ถ๋ถ์ ์ ๊ฑฐํ์ฌ ์์ ๋ต๋ณ๋ง ์ถ์ถ
|
48 |
answer = generated_text[len(prompt):].strip()
|
49 |
if not answer:
|
50 |
answer = "๋ต๋ณ์ ์์ฑํ์ง ๋ชปํ์ต๋๋ค. ๋ค์ ์๋ํด ์ฃผ์ธ์."
|
51 |
return answer
|
52 |
|
53 |
+
# Gradio UI ๊ตฌ์ฑ
|
54 |
+
with gr.Blocks() as app:
|
55 |
+
gr.Markdown("<h2 style='text-align:center;color:#4B0082;'>Jain AI Assistant (์ ๊ธฐ๋ฐ ์ฑ๋ด)</h2>")
|
56 |
+
|
57 |
chatbot = gr.Chatbot(height=400)
|
58 |
+
txt = gr.Textbox(
|
59 |
+
placeholder="์ฌ๊ธฐ์ ์ง๋ฌธ์ ์
๋ ฅํ์ธ์. ์ค๋ฐ๊ฟ ์ Shift+Enter๋ฅผ ๋๋ฅด์ธ์. ์ํฐํค๋ ์ ์ก์
๋๋ค.",
|
60 |
+
lines=3,
|
61 |
+
max_lines=6,
|
62 |
+
# multiline=True ๊ธฐ๋ณธ๊ฐ์ด์ง๋ง ๋ช
์์ ์ผ๋ก ์ถ๊ฐํด๋ ๋ฌด๋ฐฉ
|
63 |
+
# ์ํฐ๋ submit, Shift+Enter ์ค๋ฐ๊ฟ์ผ๋ก ์๋ํ๋๋ก ๊ธฐ๋ณธ ๋์ ์ค์ ๋จ
|
64 |
+
)
|
65 |
btn = gr.Button("์ ์ก")
|
66 |
|
67 |
def chat_and_respond(user_message, chat_history):
|
68 |
+
if not user_message or user_message.strip() == "":
|
69 |
+
return "", chat_history
|
70 |
+
response = respond_to_user(user_message)
|
71 |
+
chat_history = chat_history + [(user_message, response)]
|
72 |
return "", chat_history
|
73 |
|
74 |
+
# ์ ์ก ๋ฒํผ ํด๋ฆญ ์
|
75 |
btn.click(chat_and_respond, inputs=[txt, chatbot], outputs=[txt, chatbot])
|
76 |
+
# ํ
์คํธ๋ฐ์ค์์ ์ํฐํค(Submit) ์
|
77 |
txt.submit(chat_and_respond, inputs=[txt, chatbot], outputs=[txt, chatbot])
|
78 |
|
|
|
79 |
if __name__ == "__main__":
|
80 |
+
# ์ธ๋ถ ์ ์ ์ํ ์ server_name="0.0.0.0"์ผ๋ก ๋ณ๊ฒฝ ๊ฐ๋ฅ
|
81 |
+
app.launch(share=False)
|