Spaces:
Running
Running
File size: 1,150 Bytes
a040341 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
# ✅ model.py
import torch
import torch.nn as nn
class EvoTransformerBlock(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward):
super().__init__()
self.layer = nn.TransformerEncoderLayer(
d_model=d_model,
nhead=nhead,
dim_feedforward=dim_feedforward,
batch_first=True
)
def forward(self, x):
return self.layer(x)
class EvoTransformer(nn.Module):
def __init__(self, vocab_size, d_model=256, nhead=4, dim_feedforward=512, num_layers=4):
super().__init__()
self.embedding = nn.Embedding(vocab_size, d_model)
self.encoder = nn.Sequential(*[
EvoTransformerBlock(d_model, nhead, dim_feedforward) for _ in range(num_layers)
])
self.pooler = nn.AdaptiveAvgPool1d(1)
self.classifier = nn.Sequential(
nn.Linear(d_model, d_model // 2),
nn.ReLU(),
nn.Linear(d_model // 2, 2)
)
def forward(self, x):
x = self.embedding(x)
x = self.encoder(x)
x = self.pooler(x.transpose(1, 2)).squeeze(-1)
return self.classifier(x) |