File size: 7,814 Bytes
87a4f0b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
import torch
import numpy as np
import random
from transformers import T5Tokenizer, T5ForConditionalGeneration
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import f1_score

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def set_seed(seed_value=30):
    """Set seed for reproducibility."""
    random.seed(seed_value)  # Python random module
    np.random.seed(seed_value)  # Numpy module
    torch.manual_seed(seed_value)  # Torch
    torch.cuda.manual_seed_all(seed_value)  # if you are using multi-GPU.
    torch.backends.cudnn.deterministic = True  # CUDNN determinism
    torch.backends.cudnn.benchmark = False

# Example usage
set_seed(30)  


# Load your dataset
data_path = 'final_dataset.csv'  # Update this path to where your data is stored in Colab
data = pd.read_csv(data_path)

# Set up the device for GPU usage
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Load the model and tokenizer
tokenizer = T5Tokenizer.from_pretrained('t5-small')
model = T5ForConditionalGeneration.from_pretrained('t5-small')
model.to(device)
model.eval()

# Function to generate summaries
def generate_summaries(texts, model, tokenizer, device, max_length=150):
    summaries = []
    for text in texts:
        encoded_text = tokenizer.encode("summarize: " + text, return_tensors='pt', max_length=512, truncation=True).to(device)
        summary_ids = model.generate(encoded_text, max_length=max_length, num_beams=4, early_stopping=True)
        summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
        summaries.append(summary)
    return summaries

# Split the data into chunks to manage memory more effectively (if needed)
chunk_size = 10  # Adjust chunk size based on your dataset size and memory constraints
num_chunks = len(data) // chunk_size + (1 if len(data) % chunk_size != 0 else 0)

all_summaries = []
for i in range(num_chunks):
    batch = data['Content'][i * chunk_size:(i + 1) * chunk_size]
    batch_summaries = generate_summaries(batch, model, tokenizer, device)
    all_summaries.extend(batch_summaries)

# Add summaries to the DataFrame
data['Summary'] = all_summaries

# Save the DataFrame with summaries to a new CSV file
output_path = '/content/summarized_data.csv'
data.to_csv(output_path, index=False)
print(f"Data with summaries saved to {output_path}")

class PolicyDataset(Dataset):
    def __init__(self, data, tokenizer, max_input_length=512, max_target_length=128):
        self.data = data
        self.tokenizer = tokenizer
        self.max_input_length = max_input_length
        self.max_target_length = max_target_length

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        policy_text = self.data.iloc[idx]['Content']
        summary_text = self.data.iloc[idx]['Summary']

        input_encoding = self.tokenizer.encode_plus(
            policy_text,
            max_length=self.max_input_length,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )

        target_encoding = self.tokenizer.encode_plus(
            summary_text,
            max_length=self.max_target_length,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )

        return {
            'input_ids': input_encoding['input_ids'].squeeze(),
            'attention_mask': input_encoding['attention_mask'].squeeze(),
            'labels': target_encoding['input_ids'].squeeze(),
            'labels_mask': target_encoding['attention_mask'].squeeze()
        }

data = pd.read_csv('summarized_data.csv')  # Ensure this points to your CSV file
tokenizer = T5Tokenizer.from_pretrained('t5-small')
model = T5ForConditionalGeneration.from_pretrained('t5-small').to(device)

# Prepare data splits and loaders
train_data, eval_data = train_test_split(data, test_size=0.1, random_state=42)
train_dataset = PolicyDataset(train_data, tokenizer)
eval_dataset = PolicyDataset(eval_data, tokenizer)
train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)
eval_loader = DataLoader(eval_dataset, batch_size=16, shuffle=False)


def train(model, train_loader, optimizer, criterion, device):
    model.train()
    total_loss = 0
    for batch in train_loader:
        optimizer.zero_grad()

        input_ids = batch['input_ids'].to(device)
        attention_mask = batch['attention_mask'].to(device)
        labels = batch['labels'].to(device)  # Labels should be of the shape [batch_size, seq_length]

        outputs = model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
        logits = outputs.logits  # Output logits are typically [batch_size, seq_length, vocab_size]

        # Reshape labels to match the output logits dimensions if needed
        # labels should be [batch_size * seq_length] when passed to CrossEntropyLoss
        loss = criterion(logits.view(-1, logits.size(-1)), labels.view(-1))
        loss.backward()
        optimizer.step()

        total_loss += loss.item()

    return total_loss / len(train_loader)

def evaluate(model, eval_loader, criterion, device):
    model.eval()
    total_loss = 0
    all_predictions = []
    all_labels = []
    with torch.no_grad():
        for batch in eval_loader:
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['labels'].to(device)

            outputs = model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
            logits = outputs.logits

            # Calculate loss
            loss = criterion(logits.view(-1, logits.size(-1)), labels.view(-1))
            total_loss += loss.item()

            # Calculate F1 score
            predictions = torch.argmax(logits, dim=-1).flatten().cpu().numpy()
            labels_flat = labels.flatten().cpu().numpy()
            valid_indices = labels_flat != -100
            valid_predictions = predictions[valid_indices]
            valid_labels = labels_flat[valid_indices]
            all_predictions.extend(valid_predictions)
            all_labels.extend(valid_labels)

    f1 = f1_score(all_labels, all_predictions, average='macro')
    return total_loss / len(eval_loader), f1

optimizer = optim.AdamW(model.parameters(), lr=5e-5)
criterion = nn.CrossEntropyLoss()

# Training loop
for epoch in range(5):  # Adjust the number of epochs as needed
    train_loss = train(model, train_loader, optimizer, criterion, device)
    eval_loss, eval_f1 = evaluate(model, eval_loader, criterion, device)
    print(f"Epoch {epoch + 1}: Train Loss = {train_loss:.4f}, Eval Loss = {eval_loss:.4f}, Eval F1 = {eval_f1:.4f}")


# Function to run training
def run_training(lr, batch_size, number_of_epochs=5):
    model = T5ForConditionalGeneration.from_pretrained('t5-small').to(device)
    model.train()
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    optimizer = optim.AdamW(model.parameters(), lr=lr)
    criterion = torch.nn.CrossEntropyLoss()

    # Training loop
    for epoch in range(number_of_epochs):
        train_loss = train(model, train_loader, optimizer, criterion, device)
        eval_loss, eval_f1 = evaluate(model, eval_loader, criterion, device)
        print(f"LR: {lr}, Batch size: {batch_size}, Epoch: {epoch+1}, Train Loss: {train_loss:.4f}, Eval Loss: {eval_loss:.4f}, Eval F1: {eval_f1:.4f}")

# Define hyperparameters to test
learning_rates = [1e-5, 3e-5, 5e-5]
batch_sizes = [16, 32, 64]

# Run grid search
for lr in learning_rates:
    for batch_size in batch_sizes:
        run_training(lr, batch_size, number_of_epochs=5)  # Specify the number of epochs here