HemanM commited on
Commit
7eed6c3
·
verified ·
1 Parent(s): 1e1224e

Update evo_model.py

Browse files
Files changed (1) hide show
  1. evo_model.py +29 -4
evo_model.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  import torch
2
  import torch.nn as nn
3
  import torch.nn.functional as F
@@ -7,6 +9,7 @@ class EvoEncoder(nn.Module):
7
  super().__init__()
8
  self.embedding = nn.Embedding(30522, d_model)
9
  self.memory_enabled = memory_enabled
 
10
  if memory_enabled:
11
  self.memory_proj = nn.Linear(d_model, d_model)
12
  self.memory_token = nn.Parameter(torch.zeros(1, 1, d_model))
@@ -31,14 +34,36 @@ class EvoEncoder(nn.Module):
31
  x = self.transformer(x)
32
  return x
33
 
 
34
  class EvoTransformerV22(nn.Module):
35
- def __init__(self):
36
  super().__init__()
37
- self.encoder = EvoEncoder(d_model=512, num_heads=8, ffn_dim=1024, num_layers=6, memory_enabled=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  self.pool = nn.AdaptiveAvgPool1d(1)
39
- self.classifier = nn.Linear(512, 1) # ✅ Matches checkpoint
40
 
41
  def forward(self, input_ids):
42
  x = self.encoder(input_ids)
43
  x = self.pool(x.transpose(1, 2)).squeeze(-1)
44
- return self.classifier(x) # Output: [batch_size, 1]
 
 
 
 
 
 
1
+ # evo_model.py
2
+
3
  import torch
4
  import torch.nn as nn
5
  import torch.nn.functional as F
 
9
  super().__init__()
10
  self.embedding = nn.Embedding(30522, d_model)
11
  self.memory_enabled = memory_enabled
12
+
13
  if memory_enabled:
14
  self.memory_proj = nn.Linear(d_model, d_model)
15
  self.memory_token = nn.Parameter(torch.zeros(1, 1, d_model))
 
34
  x = self.transformer(x)
35
  return x
36
 
37
+
38
  class EvoTransformerV22(nn.Module):
39
+ def __init__(self, config=None):
40
  super().__init__()
41
+
42
+ # Default architecture if no config is passed
43
+ if config is None:
44
+ config = {
45
+ "num_layers": 6,
46
+ "ffn_dim": 1024,
47
+ "num_heads": 8,
48
+ "memory_enabled": True
49
+ }
50
+
51
+ self.encoder = EvoEncoder(
52
+ d_model=512,
53
+ num_heads=config["num_heads"],
54
+ ffn_dim=config["ffn_dim"],
55
+ num_layers=config["num_layers"],
56
+ memory_enabled=config["memory_enabled"]
57
+ )
58
  self.pool = nn.AdaptiveAvgPool1d(1)
59
+ self.classifier = nn.Linear(512, 1)
60
 
61
  def forward(self, input_ids):
62
  x = self.encoder(input_ids)
63
  x = self.pool(x.transpose(1, 2)).squeeze(-1)
64
+ return self.classifier(x)
65
+
66
+
67
+ # ✅ For loading models dynamically during mutation or feedback retrain
68
+ def build_model_from_config(config):
69
+ return EvoTransformerV22(config)