Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,8 @@ import gradio as gr
|
|
2 |
import torch
|
3 |
import random
|
4 |
import os
|
|
|
|
|
5 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
6 |
from huggingface_hub import login
|
7 |
|
@@ -13,7 +15,7 @@ else:
|
|
13 |
raise ValueError("HUGGINGFACE_TOKEN environment variable not set.")
|
14 |
|
15 |
# Load the trained model and tokenizer
|
16 |
-
model_name = "
|
17 |
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
18 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
19 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
@@ -30,8 +32,7 @@ game_responses = [
|
|
30 |
"We need to capture the objective now!",
|
31 |
"Healing incoming, stay close!",
|
32 |
"I got eliminated, need a revive!",
|
33 |
-
"Nice strategy, let's keep it up!"
|
34 |
-
"You are a Fucking Bastard",
|
35 |
]
|
36 |
|
37 |
# Function for classification
|
@@ -44,31 +45,48 @@ def classify_message(message):
|
|
44 |
return "Hate speech/Offensive" if prediction == 1 else "Not hate speech/Offensive"
|
45 |
|
46 |
# Chat simulation function
|
47 |
-
def chat_interface(history):
|
48 |
if history is None:
|
49 |
history = []
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
username = random.choice(usernames)
|
52 |
new_message = random.choice(game_responses)
|
53 |
classification = classify_message(new_message)
|
54 |
blurred_message = "****" if classification == "Hate speech/Offensive" else new_message
|
55 |
history.append({"role": "user", "content": f"{username}: {blurred_message}"})
|
56 |
|
57 |
-
# Generate
|
58 |
bot_username = "GameMaster"
|
59 |
bot_response = random.choice(game_responses)
|
60 |
history.append({"role": "assistant", "content": f"{bot_username}: {bot_response}"})
|
61 |
|
62 |
return history
|
63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
# Create Gradio interface
|
65 |
def main():
|
66 |
with gr.Blocks() as app:
|
67 |
gr.Markdown("# Game Chat Hate Speech Detection Simulator")
|
68 |
chatbot = gr.Chatbot(type="messages")
|
69 |
-
|
|
|
|
|
|
|
70 |
|
71 |
-
|
|
|
72 |
|
73 |
app.launch()
|
74 |
|
|
|
2 |
import torch
|
3 |
import random
|
4 |
import os
|
5 |
+
import time
|
6 |
+
import threading
|
7 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
8 |
from huggingface_hub import login
|
9 |
|
|
|
15 |
raise ValueError("HUGGINGFACE_TOKEN environment variable not set.")
|
16 |
|
17 |
# Load the trained model and tokenizer
|
18 |
+
model_name = "your-username/hate-speech-classifier" # Replace with your actual Hugging Face model repo
|
19 |
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
20 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
21 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
32 |
"We need to capture the objective now!",
|
33 |
"Healing incoming, stay close!",
|
34 |
"I got eliminated, need a revive!",
|
35 |
+
"Nice strategy, let's keep it up!"
|
|
|
36 |
]
|
37 |
|
38 |
# Function for classification
|
|
|
45 |
return "Hate speech/Offensive" if prediction == 1 else "Not hate speech/Offensive"
|
46 |
|
47 |
# Chat simulation function
|
48 |
+
def chat_interface(history, user_message=""):
|
49 |
if history is None:
|
50 |
history = []
|
51 |
|
52 |
+
# Process user input
|
53 |
+
if user_message:
|
54 |
+
classification = classify_message(user_message)
|
55 |
+
blurred_message = "****" if classification == "Hate speech/Offensive" else user_message
|
56 |
+
history.append({"role": "user", "content": f"You: {blurred_message}"})
|
57 |
+
|
58 |
+
# Generate automated response
|
59 |
username = random.choice(usernames)
|
60 |
new_message = random.choice(game_responses)
|
61 |
classification = classify_message(new_message)
|
62 |
blurred_message = "****" if classification == "Hate speech/Offensive" else new_message
|
63 |
history.append({"role": "user", "content": f"{username}: {blurred_message}"})
|
64 |
|
65 |
+
# Generate bot response
|
66 |
bot_username = "GameMaster"
|
67 |
bot_response = random.choice(game_responses)
|
68 |
history.append({"role": "assistant", "content": f"{bot_username}: {bot_response}"})
|
69 |
|
70 |
return history
|
71 |
|
72 |
+
# Background chat simulation
|
73 |
+
def simulate_chat(chatbot):
|
74 |
+
while True:
|
75 |
+
chatbot.update(chat_interface(chatbot.value))
|
76 |
+
time.sleep(2)
|
77 |
+
|
78 |
# Create Gradio interface
|
79 |
def main():
|
80 |
with gr.Blocks() as app:
|
81 |
gr.Markdown("# Game Chat Hate Speech Detection Simulator")
|
82 |
chatbot = gr.Chatbot(type="messages")
|
83 |
+
user_input = gr.Textbox(label="Enter your message")
|
84 |
+
log_box = gr.Textbox(label="Classification Log", interactive=False)
|
85 |
+
|
86 |
+
user_input.submit(chat_interface, inputs=[chatbot, user_input], outputs=[chatbot])
|
87 |
|
88 |
+
# Start chat simulation in a separate thread
|
89 |
+
threading.Thread(target=simulate_chat, args=(chatbot,), daemon=True).start()
|
90 |
|
91 |
app.launch()
|
92 |
|