projectBongBong / app.py
devvtaco's picture
Update app.py
dc5e80d verified
raw
history blame
1.76 kB
import gradio as gr
from transformers import pipeline
import threading
chat_model = None
loading_done = False
status_text = "⏳ λͺ¨λΈ λ‘œλ”© μ€‘μž…λ‹ˆλ‹€..."
# λ°±κ·ΈλΌμš΄λ“œμ—μ„œ λͺ¨λΈ λ‘œλ”©
def load_model_bg():
global chat_model, loading_done, status_text
chat_model = pipeline("text-generation", model="beomi/KoAlpaca-Polyglot-5.8B")
loading_done = True
status_text = "βœ… λͺ¨λΈ λ‘œλ”© μ™„λ£Œ! λ¬Έμž₯을 μž…λ ₯ν•΄λ³΄μ„Έμš”."
# λŒ€λ‹΅ ν•¨μˆ˜
def chat_with_model(message, history):
global chat_model, loading_done
if not loading_done:
return history + [[message, "⚠️ λͺ¨λΈμ΄ 아직 λ‘œλ”© μ€‘μž…λ‹ˆλ‹€. μž μ‹œλ§Œ κΈ°λ‹€λ € μ£Όμ„Έμš”."]]
prompt = f"""
### Instruction: λ‹€μŒ λ¬Έμž₯을 λΆ„μ„ν•΄μ„œ λ¬΄λ‘€ν•˜κ±°λ‚˜ 곡격적인 ν‘œν˜„μ΄ μžˆλŠ”μ§€ νŒλ‹¨ν•˜κ³ , μžˆλ‹€λ©΄ 더 예의 있게 고쳐쀘.
### Input:
{message}
### Response:
"""
response = chat_model(prompt, max_new_tokens=200)[0]['generated_text']
response_only = response[len(prompt):].strip()
return history + [[message, response_only]]
# μƒνƒœ ν…μŠ€νŠΈ λ°˜ν™˜ ν•¨μˆ˜ (맀번 μƒˆλ‘œ μ½μ–΄μ˜΄)
def get_status():
return status_text
# λ°±κ·ΈλΌμš΄λ“œμ—μ„œ λͺ¨λΈ λ‘œλ”© μ‹œμž‘
threading.Thread(target=load_model_bg).start()
# Gradio μ•±
with gr.Blocks() as demo:
chatbot = gr.Chatbot()
msg = gr.Textbox(label="λ¬Έμž₯을 μž…λ ₯ν•˜μ„Έμš”", placeholder="예: λ„ˆ 정말 μ™œ κ·Έλ ‡κ²Œ 말해?")
status = gr.Markdown(get_status)
def respond_and_clear(user_input, history):
updated_history = chat_with_model(user_input, history)
return "", updated_history, get_status()
msg.submit(respond_and_clear, [msg, chatbot], [msg, chatbot, status])
demo.launch()