import gradio as gr import torch import random import os from transformers import AutoTokenizer, AutoModelForSequenceClassification from huggingface_hub import login # Login to Hugging Face using token from environment variable hf_token = os.getenv("HUGGINGFACE_TOKEN") if hf_token: login(token=hf_token) else: raise ValueError("HUGGINGFACE_TOKEN environment variable not set.") # Load the trained model and tokenizer model_name = "chaitravi/hate-speech-classifier" # Replace with your actual Hugging Face model repo model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) model.eval() # Predefined usernames and chat messages for a game scenario usernames = ["ShadowSlayer", "DragonKnight", "PixelMage", "CyberRogue", "PhantomArcher"] game_responses = [ "I need backup at the base!", "Watch out for enemies on the left!", "Let's team up and attack together.", "Great shot! That was amazing!", "We need to capture the objective now!", "Healing incoming, stay close!", "I got eliminated, need a revive!", "Nice strategy, let's keep it up!" ] # Function for classification def classify_message(message): inputs = tokenizer(message, padding="max_length", truncation=True, return_tensors="pt").to(device) with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits prediction = torch.argmax(logits, dim=1).item() return "Hate speech/Offensive" if prediction == 1 else "Not hate speech/Offensive" # Chat simulation function def chat_interface(history): if history is None: history = [] username = random.choice(usernames) new_message = random.choice(game_responses) classification = classify_message(new_message) blurred_message = "****" if classification == "Hate speech/Offensive" else new_message history.append({"role": "user", "content": f"{username}: {blurred_message}"}) # Generate automated game response bot_username = "GameMaster" bot_response = random.choice(game_responses) history.append({"role": "assistant", "content": f"{bot_username}: {bot_response}"}) return history # Create Gradio interface def main(): with gr.Blocks() as app: gr.Markdown("# Game Chat Hate Speech Detection Simulator") chatbot = gr.Chatbot(type="messages") submit = gr.Button("Generate Message") submit.click(chat_interface, inputs=[chatbot], outputs=[chatbot]) app.launch() if __name__ == "__main__": main()