File size: 1,753 Bytes
496c911
cd6b68c
 
c9ee3a0
 
 
 
 
 
 
 
 
 
 
cd6b68c
 
 
c9ee3a0
cd6b68c
c9ee3a0
 
 
cd6b68c
 
 
 
c9ee3a0
 
cd6b68c
 
 
 
c9ee3a0
cd6b68c
c9ee3a0
cd6b68c
 
 
 
 
 
 
c9ee3a0
cd6b68c
c9ee3a0
cd6b68c
c9ee3a0
496c911
cd6b68c
c9ee3a0
 
496c911
 
cd6b68c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM

# 使用公开模型初始化
emotion_analyzer = pipeline(
    "text-classification", 
    model="j-hartmann/emotion-english-distilroberta-base"
)

safety_checker = pipeline(
    "text-classification", 
    model="unitary/toxic-bert"  # 替换为开源模型
)

tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")

# 安全回复模板
SAFE_RESPONSES = {
    "crisis": "我注意到您可能需要专业帮助,建议联系心理咨询师或拨打援助热线。",
    "sadness": "听起来您压力很大,试试深呼吸或听舒缓音乐?",
    "toxic": "检测到敏感内容,已启动安全保护机制。"
}

def generate_response(user_input, history):
    # 安全检查
    if safety_checker(user_input)[0]["label"] == "toxic":
        return SAFE_RESPONSES["toxic"]
    
    # 情绪分析
    emotion = emotion_analyzer(user_input)[0]["label"]
    
    # 根据情绪生成回复
    if emotion in ["sadness", "fear"]:
        return SAFE_RESPONSES.get(emotion, "可以多聊聊您的感受吗?")
    
    # 生成对话
    inputs = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt")
    reply_ids = model.generate(
        inputs, 
        max_length=1000,
        pad_token_id=tokenizer.eos_token_id,
        no_repeat_ngram_size=3
    )
    return tokenizer.decode(reply_ids[:, inputs.shape[-1]:][0], skip_special_tokens=True)

# 创建界面
demo = gr.ChatInterface(
    fn=generate_response,
    examples=["最近学习压力好大", "和父母吵架了很难过"],
    title="青少年心理健康助手"
)

demo.launch()