EvoPlatform / evo_model.py
HemanM's picture
Update evo_model.py
a568566 verified
import torch
import torch.nn as nn
import torch.nn.functional as F
class EvoEncoder(nn.Module):
def __init__(self, d_model=512, num_heads=8, ffn_dim=1024, num_layers=6, memory_enabled=True):
super().__init__()
self.embedding = nn.Embedding(30522, d_model)
self.memory_enabled = memory_enabled
if memory_enabled:
self.memory_proj = nn.Linear(d_model, d_model)
self.memory_token = nn.Parameter(torch.zeros(1, 1, d_model))
else:
self.memory_token = None
encoder_layer = nn.TransformerEncoderLayer(
d_model=d_model,
nhead=num_heads,
dim_feedforward=ffn_dim,
batch_first=True
)
self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
def forward(self, input_ids):
x = self.embedding(input_ids)
if self.memory_enabled and self.memory_token is not None:
mem = self.memory_token.expand(x.size(0), 1, x.size(2))
x = torch.cat([mem, x], dim=1)
x = self.transformer(x)
return x
class EvoTransformerV22(nn.Module):
def __init__(self):
super().__init__()
self.encoder = EvoEncoder(d_model=512, num_heads=8, ffn_dim=1024, num_layers=6, memory_enabled=True)
self.pool = nn.AdaptiveAvgPool1d(1)
self.classifier = nn.Linear(512, 1) # ✅ Matches checkpoint
def forward(self, input_ids):
x = self.encoder(input_ids)
x = self.pool(x.transpose(1, 2)).squeeze(-1)
return self.classifier(x) # Output: [batch_size, 1]