# ✅ model.py import torch import torch.nn as nn class EvoTransformerBlock(nn.Module): def __init__(self, d_model, nhead, dim_feedforward): super().__init__() self.layer = nn.TransformerEncoderLayer( d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward, batch_first=True ) def forward(self, x): return self.layer(x) class EvoTransformer(nn.Module): def __init__(self, vocab_size, d_model=256, nhead=4, dim_feedforward=512, num_layers=4): super().__init__() self.embedding = nn.Embedding(vocab_size, d_model) self.encoder = nn.Sequential(*[ EvoTransformerBlock(d_model, nhead, dim_feedforward) for _ in range(num_layers) ]) self.pooler = nn.AdaptiveAvgPool1d(1) self.classifier = nn.Sequential( nn.Linear(d_model, d_model // 2), nn.ReLU(), nn.Linear(d_model // 2, 2) ) def forward(self, x): x = self.embedding(x) x = self.encoder(x) x = self.pooler(x.transpose(1, 2)).squeeze(-1) return self.classifier(x)