JenniferHJF commited on
Commit
c69f58d
·
verified ·
1 Parent(s): 2bead6c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -34
app.py CHANGED
@@ -1,36 +1,49 @@
 
 
1
  import streamlit as st
2
- from agent import classify_emoji_text
3
 
4
- # ✅ 页面配置
5
- st.set_page_config(page_title="Emoji Offensive Text Detector", page_icon="🚨", layout="wide")
6
-
7
- # 页面标题
8
- st.title("🧠 Emoji-based Offensive Language Classifier")
9
-
10
- st.markdown("""
11
- This application translates emojis in a sentence and classifies whether the final sentence is offensive or not using two AI models.
12
- - The **first model** translates emoji or symbolic phrases into standard Chinese text.
13
- - The **second model** performs offensive language detection.
14
- """)
15
-
16
- # ✅ 输入区域
17
- default_text = "你是🐷"
18
- text = st.text_area("✍️ Input your sentence here:", value=default_text, height=150)
19
-
20
- # ✅ 触发按钮
21
- if st.button("🚦 Analyze"):
22
- with st.spinner("🔍 Processing..."):
23
- try:
24
- translated, label, score = classify_emoji_text(text)
25
-
26
- # 输出结果显示(修复多行字符串语法)
27
- st.markdown("### 🔄 Translated sentence:")
28
- st.code(translated, language="text")
29
-
30
- st.markdown(f"### 🎯 Prediction: `{label}`")
31
- st.markdown(f"### 📊 Confidence Score: `{score:.2%}`")
32
-
33
- except Exception as e:
34
- st.error(f"❌ An error occurred during processing:\n\n{e}")
35
- else:
36
- st.info("👈 Please input text and click the button to classify.")
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
2
+ import torch
3
  import streamlit as st
 
4
 
5
+ # ✅ Step 1: Emoji 翻译模型(你自己训练的模型)
6
+ emoji_model_id = "JenniferHJF/qwen1.5-emoji-finetuned"
7
+ emoji_tokenizer = AutoTokenizer.from_pretrained(emoji_model_id, trust_remote_code=True)
8
+ emoji_model = AutoModelForCausalLM.from_pretrained(
9
+ emoji_model_id,
10
+ trust_remote_code=True,
11
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
12
+ ).to("cuda" if torch.cuda.is_available() else "cpu")
13
+ emoji_model.eval()
14
+
15
+ # ✅ Step 2: 可选择的冒犯性文本识别模型
16
+ model_options = {
17
+ "Toxic-BERT": "unitary/toxic-bert",
18
+ "Roberta Offensive": "cardiffnlp/twitter-roberta-base-offensive",
19
+ "BERT Emotion": "bhadresh-savani/bert-base-go-emotion"
20
+ }
21
+
22
+ # Streamlit 侧边栏模型选择
23
+ selected_model = st.sidebar.selectbox("Choose classification model", list(model_options.keys()))
24
+ selected_model_id = model_options[selected_model]
25
+
26
+ classifier = pipeline("text-classification", model=selected_model_id, device=0 if torch.cuda.is_available() else -1)
27
+
28
+ def classify_emoji_text(text: str):
29
+ """
30
+ Step 1: 翻译文本中的 emoji
31
+ Step 2: 使用分类器判断是否冒犯
32
+ """
33
+ prompt = f"输入:{text}\n输出:"
34
+ input_ids = emoji_tokenizer(prompt, return_tensors="pt").to(emoji_model.device)
35
+ with torch.no_grad():
36
+ output_ids = emoji_model.generate(**input_ids, max_new_tokens=64, do_sample=False)
37
+ decoded = emoji_tokenizer.decode(output_ids[0], skip_special_tokens=True)
38
+
39
+ # 保留真正输出部分(移除 prompt)
40
+ if "输出:" in decoded:
41
+ translated_text = decoded.split("输出:")[-1].strip()
42
+ else:
43
+ translated_text = decoded.strip()
44
+
45
+ result = classifier(translated_text)[0]
46
+ label = result["label"]
47
+ score = result["score"]
48
+
49
+ return translated_text, label, score