import pandas as pd from datasets import Dataset from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer, TrainingArguments from sklearn.metrics import accuracy_score, precision_recall_fscore_support import numpy as np import torch from torch.nn import functional as F from transformers import Trainer # Load data train_df = pd.read_csv("./data/DISAPERE-main/SELFExtractedData/disapere_polarity_train.csv") dev_df = pd.read_csv("./data/DISAPERE-main/SELFExtractedData/disapere_polarity_dev.csv") test_df = pd.read_csv("./data/DISAPERE-main/SELFExtractedData/disapere_polarity_test.csv") # Convert to HuggingFace Datasets train_ds = Dataset.from_pandas(train_df) dev_ds = Dataset.from_pandas(dev_df) test_ds = Dataset.from_pandas(test_df) print(train_df['label'].value_counts().sort_index()) # Compute class weights label_counts = train_df['label'].value_counts() total_samples = len(train_df) class_weights = torch.tensor([total_samples / (len(label_counts) * count) for count in label_counts.sort_index().values]) class_weights = class_weights.to(dtype=torch.float32) print("Class weights:", class_weights) class WeightedTrainer(Trainer): def compute_loss(self, model, inputs, return_outputs=False, **kwargs): labels = inputs.pop("labels") outputs = model(**inputs) logits = outputs.logits weights = class_weights.to(logits.device) loss = F.cross_entropy(logits, labels, weight=weights) return (loss, outputs) if return_outputs else loss # Tokenize model_name = "allenai/scibert_scivocab_uncased" tokenizer = AutoTokenizer.from_pretrained(model_name) def tokenize(batch): return tokenizer(batch["text"], padding="max_length", truncation=True, max_length=256) train_ds = train_ds.map(tokenize, batched=True) dev_ds = dev_ds.map(tokenize, batched=True) test_ds = test_ds.map(tokenize, batched=True) # Set format for PyTorch train_ds.set_format(type="torch", columns=["input_ids", "attention_mask", "label"]) dev_ds.set_format(type="torch", columns=["input_ids", "attention_mask", "label"]) test_ds.set_format(type="torch", columns=["input_ids", "attention_mask", "label"]) # Load model model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=3) # Metrics def compute_metrics(eval_pred): logits, labels = eval_pred preds = np.argmax(logits, axis=1) precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average="macro") acc = accuracy_score(labels, preds) return {"accuracy": acc, "f1": f1, "precision": precision, "recall": recall} # Training arguments args = TrainingArguments( output_dir="./scibert/scibert_polarity/checkpoints", eval_strategy="epoch", save_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=8, per_device_eval_batch_size=16, num_train_epochs=4, weight_decay=0.01, load_best_model_at_end=True, metric_for_best_model="f1" ) # Trainer trainer = WeightedTrainer( model=model, args=args, train_dataset=train_ds, eval_dataset=dev_ds, tokenizer=tokenizer, compute_metrics=compute_metrics ) # Train trainer.train() # Evaluate on test results = trainer.evaluate(test_ds) print("Test results:", results) # Save the model and tokenizer model.save_pretrained("./scibert/scibert_polarity/final_model") tokenizer.save_pretrained("./scibert/scibert_polarity/final_model")