File size: 1,528 Bytes
23a5053
09f0cd3
2f2edb0
ceea26e
57dfdd4
d23a6f2
54bb1a5
0eff587
 
 
 
 
 
d23a6f2
54bb1a5
f5a2b6c
 
fdef447
cad50da
27c948e
0eff587
f5a2b6c
 
 
cad50da
ceea26e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fdef447
23a5053
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import torch
import torch.nn as nn

class EvoEncoder(nn.Module):
    def __init__(self, d_model=384, num_heads=6, ffn_dim=1024, num_layers=6, memory_enabled=True):
        super().__init__()
        self.embedding = nn.Embedding(30522, d_model)
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=d_model,
            nhead=num_heads,
            dim_feedforward=ffn_dim,
            batch_first=True
        )
        self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
        self.memory_enabled = memory_enabled
        if memory_enabled:
            self.memory_token = nn.Parameter(torch.zeros(1, 1, d_model))
            self.memory_proj = nn.Linear(d_model, d_model)

    def forward(self, input_ids):
        x = self.embedding(input_ids)
        if self.memory_enabled:
            mem = self.memory_token.expand(x.size(0), -1, -1)
            x = torch.cat([mem, x], dim=1)
        x = self.transformer(x)
        return x

class EvoTransformerV22(nn.Module):
    def __init__(self):
        super().__init__()
        self.encoder = EvoEncoder(
            d_model=384,
            num_heads=6,
            ffn_dim=1024,
            num_layers=6,
            memory_enabled=True
        )
        self.pooling = nn.AdaptiveAvgPool1d(1)
        self.classifier = nn.Linear(384, 2)

    def forward(self, input_ids):
        x = self.encoder(input_ids)
        x = x.permute(0, 2, 1)  # [B, D, T]
        x = self.pooling(x).squeeze(-1)
        return self.classifier(x)