File size: 3,286 Bytes
c1880b4 a72bbed c1880b4 a72bbed c1880b4 1426c29 c1880b4 616d1b5 c1880b4 1426c29 c1880b4 1426c29 a72bbed 1426c29 a72bbed 1426c29 a72bbed 1426c29 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
import pandas as pd
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
from datasets import load_dataset
import time
# Check if GPU is available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load the model and tokenizer
model_name = "SamLowe/roberta-base-go_emotions"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name).to(device)
# Define the emotion labels (based on the GoEmotions dataset)
emotion_labels = ["admiration", "amusement", "anger", "annoyance", "approval",
"caring", "confusion", "curiosity", "desire", "disappointment",
"disapproval", "disgust", "embarrassment", "excitement", "fear",
"gratitude", "grief", "joy", "love", "nervousness", "optimism",
"pride", "realization", "relief", "remorse", "sadness", "surprise",
"neutral"]
# Function to classify emotions in batches
def classify_emotions_in_batches(texts, batch_size=32):
results = []
start_time = time.time()
for i in range(0, len(texts), batch_size):
batch = texts[i:i+batch_size]
inputs = tokenizer(batch, return_tensors="pt", truncation=True, padding=True).to(device)
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
predicted_class_ids = torch.argmax(logits, dim=-1).tolist()
results.extend(predicted_class_ids)
# Log progress
batch_time = time.time() - start_time
st.write(f"Processed batch {i//batch_size + 1} of {len(texts)//batch_size + 1} in {batch_time:.2f} seconds")
start_time = time.time()
return results
# Streamlit interface
st.title("Enron Emails Emotion Analysis")
# Button to run the inference script
if st.button("Run Inference"):
# Load the Enron dataset
with st.spinner('Loading dataset...'):
dataset = load_dataset("Hellisotherpeople/enron_emails_parsed")
enron_data = pd.DataFrame(dataset['train'])
# Apply emotion classification to the email content
with st.spinner('Running inference...'):
email_texts = enron_data['body'].tolist()
enron_data['emotion'] = classify_emotions_in_batches(email_texts, batch_size=32)
# Save the results to a CSV file
enron_data.to_csv("enron_emails_with_emotions.csv", index=False)
st.success("Inference completed and results saved!")
# Check if the results file exists and load it
try:
enron_data = pd.read_csv("enron_emails_with_emotions.csv")
# Dropdown for selecting an emotion
selected_emotion = st.selectbox("Select Emotion", emotion_labels)
# Filter emails based on the selected emotion
filtered_emails = enron_data[enron_data['emotion'] == selected_emotion].head(10)
# Display the filtered emails in a table
if not filtered_emails.empty:
st.write("Top 10 emails with emotion:", selected_emotion)
st.table(filtered_emails[['From', 'To', 'body', 'emotion']])
else:
st.write("No emails found with the selected emotion.")
except FileNotFoundError:
st.warning("Run inference first by clicking the 'Run Inference' button.")
|