pg3 / app.py
umang018's picture
Update app.py
1426c29 verified
raw
history blame
1.93 kB
import pandas as pd
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
from datasets import load_dataset
# Check if GPU is available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load the Enron dataset
dataset = load_dataset("Hellisotherpeople/enron_emails_parsed")
enron_data = pd.DataFrame(dataset['train'])
# Load the model and tokenizer
model_name = "modelSamLowe/roberta-base-go_emotions"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name).to(device)
# Define the emotion labels (based on the GoEmotions dataset)
emotion_labels = ["admiration", "amusement", "anger", "annoyance", "approval",
"caring", "confusion", "curiosity", "desire", "disappointment",
"disapproval", "disgust", "embarrassment", "excitement", "fear",
"gratitude", "grief", "joy", "love", "nervousness", "optimism",
"pride", "realization", "relief", "remorse", "sadness", "surprise",
"neutral"]
# Function to classify emotions in batches
def classify_emotions_in_batches(texts, batch_size=32):
results = []
for i in range(0, len(texts), batch_size):
batch = texts[i:i+batch_size]
inputs = tokenizer(batch, return_tensors="pt", truncation=True, padding=True).to(device)
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
predicted_class_ids = torch.argmax(logits, dim=-1).tolist()
results.extend(predicted_class_ids)
return results
# Apply emotion classification to the email content in batches
email_texts = enron_data['body'].tolist()
enron_data['emotion'] = classify_emotions_in_batches(email_texts, batch_size=32)
# Save the results to a CSV file
enron_data.to_csv("enron_emails_with_emotions.csv", index=False)