HemanM commited on
Commit
ccff75d
·
verified ·
1 Parent(s): ca3a588

Update evo_model.py

Browse files
Files changed (1) hide show
  1. evo_model.py +41 -0
evo_model.py CHANGED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # evo_model.py — Defines EvoDecoderModel used in inference and training
2
+ import torch
3
+ import torch.nn as nn
4
+ import math
5
+
6
+ class PositionalEncoding(nn.Module):
7
+ def __init__(self, d_model, max_len=128):
8
+ super().__init__()
9
+ pe = torch.zeros(max_len, d_model)
10
+ position = torch.arange(0, max_len).unsqueeze(1)
11
+ div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
12
+ pe[:, 0::2] = torch.sin(position * div_term)
13
+ pe[:, 1::2] = torch.cos(position * div_term)
14
+ pe = pe.unsqueeze(0) # [1, max_len, d_model]
15
+ self.register_buffer('pe', pe)
16
+
17
+ def forward(self, x):
18
+ return x + self.pe[:, :x.size(1)]
19
+
20
+ class EvoDecoderModel(nn.Module):
21
+ def __init__(self, vocab_size, d_model=512, nhead=8, num_layers=6, dim_ff=2048, max_len=128):
22
+ super().__init__()
23
+ self.token_embed = nn.Embedding(vocab_size, d_model)
24
+ self.pos_encoder = PositionalEncoding(d_model, max_len)
25
+
26
+ decoder_layer = nn.TransformerDecoderLayer(d_model, nhead, dim_ff, batch_first=True)
27
+ self.decoder = nn.TransformerDecoder(decoder_layer, num_layers=num_layers)
28
+
29
+ self.lm_head = nn.Linear(d_model, vocab_size)
30
+
31
+ def generate_square_subsequent_mask(self, sz):
32
+ return torch.triu(torch.full((sz, sz), float('-inf')), diagonal=1)
33
+
34
+ def forward(self, input_ids):
35
+ x = self.token_embed(input_ids)
36
+ x = self.pos_encoder(x)
37
+
38
+ tgt_mask = self.generate_square_subsequent_mask(x.size(1)).to(x.device)
39
+ x = self.decoder(x, x, tgt_mask=tgt_mask)
40
+
41
+ return self.lm_head(x)