Li / app.py
QiLi520's picture
Update app.py
c9ee3a0 verified
raw
history blame
1.75 kB
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
# 使用公开模型初始化
emotion_analyzer = pipeline(
"text-classification",
model="j-hartmann/emotion-english-distilroberta-base"
)
safety_checker = pipeline(
"text-classification",
model="unitary/toxic-bert" # 替换为开源模型
)
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
# 安全回复模板
SAFE_RESPONSES = {
"crisis": "我注意到您可能需要专业帮助,建议联系心理咨询师或拨打援助热线。",
"sadness": "听起来您压力很大,试试深呼吸或听舒缓音乐?",
"toxic": "检测到敏感内容,已启动安全保护机制。"
}
def generate_response(user_input, history):
# 安全检查
if safety_checker(user_input)[0]["label"] == "toxic":
return SAFE_RESPONSES["toxic"]
# 情绪分析
emotion = emotion_analyzer(user_input)[0]["label"]
# 根据情绪生成回复
if emotion in ["sadness", "fear"]:
return SAFE_RESPONSES.get(emotion, "可以多聊聊您的感受吗?")
# 生成对话
inputs = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt")
reply_ids = model.generate(
inputs,
max_length=1000,
pad_token_id=tokenizer.eos_token_id,
no_repeat_ngram_size=3
)
return tokenizer.decode(reply_ids[:, inputs.shape[-1]:][0], skip_special_tokens=True)
# 创建界面
demo = gr.ChatInterface(
fn=generate_response,
examples=["最近学习压力好大", "和父母吵架了很难过"],
title="青少年心理健康助手"
)
demo.launch()