Prompthumanizer's picture
Update app.py
c249b3e verified
raw
history blame
2.82 kB
import gradio as gr
from transformers import pipeline, set_seed
import os
# --- 1. λͺ¨λΈλͺ… μ„€μ • ---
MODEL_NAME = "jain_architecture_origin_structure"
# --- 2. GPUκ°€ μ—†μœΌλ©΄ CPU둜 μžλ™ μ„€μ • ---
device = 0 if (os.environ.get('CUDA_VISIBLE_DEVICES') or False) else -1
# --- 3. HuggingFace pipeline 생성 ---
try:
generator = pipeline(
"text-generation",
model=MODEL_NAME,
device=device,
# Repetition penalty λ“± μ»€μŠ€ν…€ κ°€λŠ₯
# torch_dtype=torch.float16 도 ν•„μš” μ‹œ μ„€μ • κ°€λŠ₯
)
set_seed(42) # μž¬ν˜„μ„± μœ„ν•΄ μ‹œλ“œ κ³ μ •
except Exception as e:
print("λͺ¨λΈ λ‘œλ“œ μ—λŸ¬:", e)
generator = None
# --- 4. '의(ηΎ©)' μ² ν•™ 기반 ν”„λ‘¬ν”„νŠΈ ν…œν”Œλ¦Ώ ---
BASE_PROMPT = """
당신은 '의(ηΎ©)'의 μ² ν•™κ³Ό 정신을 기반으둜 ν•œ AI λΉ„μ„œμž…λ‹ˆλ‹€.
μΈκ°„μ˜ λ³΅μž‘ν•œ λ¬Έμ œμ™€ 감정을 μ΄ν•΄ν•˜κ³ , κΉŠμ€ λ°˜μ„±κ³Ό λ°°λ €λ₯Ό λ‹΄μ•„ λ‹€μŒ μ§ˆλ¬Έμ— λ‹΅λ³€ν•˜μ‹­μ‹œμ˜€.
질문: {user_input}
닡변은 μ΅œλŒ€ν•œ μ‹¬μ˜€ν•˜λ©°, 인간을 λ³΄ν˜Έν•˜κ³  μ‘΄μ€‘ν•˜λŠ” λ§ˆμŒμ„ λ‹΄μ•„ μž‘μ„±ν•΄ μ£Όμ„Έμš”.
"""
# --- 5. 질문 처리 ν•¨μˆ˜ ---
def respond_to_user(user_input):
if not generator:
return "λͺ¨λΈμ΄ μ •μƒμ μœΌλ‘œ λ‘œλ“œλ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€. κ΄€λ¦¬μžμ—κ²Œ λ¬Έμ˜ν•˜μ„Έμš”."
prompt = BASE_PROMPT.format(user_input=user_input.strip())
outputs = generator(
prompt,
max_length=512,
do_sample=True,
top_p=0.9,
temperature=0.7,
num_return_sequences=1,
pad_token_id=50256 # GPT 계열 νŒ¨λ”© 토큰, ν•„μš”μ— 따라 λ³€κ²½
)
generated_text = outputs[0]["generated_text"]
# ν”„λ‘¬ν”„νŠΈ λΆ€λΆ„ 제거 ν›„ λ‹΅λ³€λ§Œ 리턴 (ν”„λ‘¬ν”„νŠΈ 길이 λΆ„λŸ‰ μ»·)
answer = generated_text[len(prompt):].strip()
if not answer:
answer = "닡변을 μƒμ„±ν•˜μ§€ λͺ»ν–ˆμŠ΅λ‹ˆλ‹€. λ‹€μ‹œ μ‹œλ„ν•΄ μ£Όμ„Έμš”."
return answer
# --- 6. Gradio UI 생성 ---
with gr.Blocks() as demo:
gr.Markdown("<h1 style='text-align:center;color:#4B0082;'>Jain AI Assistant (의 기반 챗봇)</h1>")
chatbot = gr.Chatbot(height=400)
txt = gr.Textbox(placeholder="여기에 μ§ˆλ¬Έμ„ μž…λ ₯ν•˜μ„Έμš”...", lines=3, max_lines=6)
btn = gr.Button("전솑")
def chat_and_respond(user_message, chat_history):
reply = respond_to_user(user_message)
chat_history = chat_history + [(user_message, reply)]
return "", chat_history
btn.click(chat_and_respond, inputs=[txt, chatbot], outputs=[txt, chatbot])
txt.submit(chat_and_respond, inputs=[txt, chatbot], outputs=[txt, chatbot])
# --- 7. μ„œλ²„ μ‹€ν–‰ ---
if __name__ == "__main__":
# μ•„μ΄νŒ¨λ“œ 같은 λͺ¨λ°”일 ν™˜κ²½μ—μ„œ ν•„μš” μ‹œ 곡개 κ³΅μœ λ„ κ°€λŠ₯
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)