File size: 6,487 Bytes
b4e298c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
import streamlit as st
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.legacy import data, datasets
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np

# Define the RNN model
class RNN(nn.Module):
    def __init__(self, vocab_size, embed_size, hidden_size, output_size, n_layers, dropout):
        super(RNN, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.rnn = nn.RNN(embed_size, hidden_size, n_layers, dropout=dropout, batch_first=True)
        self.fc = nn.Linear(hidden_size, output_size)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        x = self.dropout(self.embedding(x))
        h0 = torch.zeros(n_layers, x.size(0), hidden_size).to(device)
        out, _ = self.rnn(x, h0)
        out = self.fc(out[:, -1, :])
        return out

# Load the data
@st.cache(allow_output_mutation=True)
def load_data():
    TEXT = data.Field(tokenize='spacy', tokenizer_language='en_core_web_sm', include_lengths=True)
    LABEL = data.LabelField(dtype=torch.long)
    train_data, test_data = datasets.AG_NEWS.splits(TEXT, LABEL)
    train_data, valid_data = train_data.split(split_ratio=0.8)

    TEXT.build_vocab(train_data, max_size=25000, vectors="glove.6B.100d", unk_init=torch.Tensor.normal_)
    LABEL.build_vocab(train_data)

    BATCH_SIZE = 64

    train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
        (train_data, valid_data, test_data),
        batch_size=BATCH_SIZE,
        sort_within_batch=True,
        device=device)

    return TEXT, LABEL, train_iterator, valid_iterator, test_iterator

# Train the network
def train_network(net, iterator, optimizer, criterion, epochs):
    loss_values = []
    for epoch in range(epochs):
        epoch_loss = 0
        net.train()
        for batch in iterator:
            optimizer.zero_grad()
            text, text_lengths = batch.text
            predictions = net(text).squeeze(1)
            loss = criterion(predictions, batch.label)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
        epoch_loss /= len(iterator)
        loss_values.append(epoch_loss)
        st.write(f'Epoch {epoch + 1}: loss {epoch_loss:.3f}')
    st.write('Finished Training')
    return loss_values

# Evaluate the network
def evaluate_network(net, iterator, criterion):
    epoch_loss = 0
    correct = 0
    total = 0
    all_labels = []
    all_predictions = []
    net.eval()
    with torch.no_grad():
        for batch in iterator:
            text, text_lengths = batch.text
            predictions = net(text).squeeze(1)
            loss = criterion(predictions, batch.label)
            epoch_loss += loss.item()
            _, predicted = torch.max(predictions, 1)
            correct += (predicted == batch.label).sum().item()
            total += len(batch.label)
            all_labels.extend(batch.label.cpu().numpy())
            all_predictions.extend(predicted.cpu().numpy())
    accuracy = 100 * correct / total
    st.write(f'Loss: {epoch_loss / len(iterator):.4f}, Accuracy: {accuracy:.2f}%')
    return accuracy, all_labels, all_predictions

# Load data
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
TEXT, LABEL, train_iterator, valid_iterator, test_iterator = load_data()

# Streamlit interface
st.title("RNN for Text Classification on AG News Dataset")

st.write("""
This application demonstrates how to build and train a Recurrent Neural Network (RNN) for text classification using the AG News dataset. You can adjust hyperparameters, visualize sample data, and see the model's performance.
""")

# Sidebar for input parameters
st.sidebar.header('Model Hyperparameters')
embed_size = st.sidebar.slider('Embedding Size', 50, 300, 100)
hidden_size = st.sidebar.slider('Hidden Size', 50, 300, 256)
n_layers = st.sidebar.slider('Number of RNN Layers', 1, 3, 2)
dropout = st.sidebar.slider('Dropout', 0.0, 0.5, 0.2, step=0.1)
learning_rate = st.sidebar.slider('Learning Rate', 0.001, 0.1, 0.01, step=0.001)
epochs = st.sidebar.slider('Epochs', 1, 20, 5)

# Create the network
vocab_size = len(TEXT.vocab)
output_size = len(LABEL.vocab)
net = RNN(vocab_size, embed_size, hidden_size, output_size, n_layers, dropout).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=learning_rate)

# Add vertical space
st.write('\n' * 10)

# Train the network
if st.sidebar.button('Train Network'):
    loss_values = train_network(net, train_iterator, optimizer, criterion, epochs)
    
    # Plot the loss values
    plt.figure(figsize=(10, 5))
    plt.plot(range(1, epochs + 1), loss_values, marker='o')
    plt.title('Training Loss Over Epochs')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.grid(True)
    st.pyplot(plt)
    
    # Store the trained model in the session state
    st.session_state['trained_model'] = net

# Test the network
if 'trained_model' in st.session_state and st.sidebar.button('Test Network'):
    accuracy, all_labels, all_predictions = evaluate_network(st.session_state['trained_model'], test_iterator, criterion)
    st.write(f'Test Accuracy: {accuracy:.2f}%')
    
    # Display results in a table
    st.write('Ground Truth vs Predicted')
    results = pd.DataFrame({
        'Ground Truth': [LABEL.vocab.itos[label] for label in all_labels],
        'Predicted': [LABEL.vocab.itos[label] for label in all_predictions]
    })
    st.table(results.head(50))  # Display first 50 results for brevity

# Visualize some test results
def visualize_text_predictions(iterator, net):
    net.eval()
    samples = []
    with torch.no_grad():
        for batch in iterator:
            text, text_lengths = batch.text
            predictions = torch.max(net(text), 1)[1]
            samples.extend(zip(text.cpu(), batch.label.cpu(), predictions.cpu()))
            if len(samples) >= 10:
                break
    return samples[:10]

if 'trained_model' in st.session_state and st.sidebar.button('Show Test Results'):
    samples = visualize_text_predictions(test_iterator, st.session_state['trained_model'])
    st.write('Ground Truth vs Predicted for Sample Texts')
    for i, (text, true_label, predicted) in enumerate(samples):
        st.write(f'Sample {i+1}')
        st.text(' '.join([TEXT.vocab.itos[token] for token in text]))
        st.write(f'Ground Truth: {LABEL.vocab.itos[true_label.item()]}, Predicted: {LABEL.vocab.itos[predicted.item()]}')