Create run.py
Browse files
run.py
ADDED
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
from datetime import datetime
|
6 |
+
from torch.nn.utils import clip_grad_norm_
|
7 |
+
import torch.optim as optim
|
8 |
+
from torch.utils.data import DataLoader
|
9 |
+
import json
|
10 |
+
|
11 |
+
from nets.model import Model
|
12 |
+
from Actor.actor import Actor
|
13 |
+
from dataloader import VRP_Dataset
|
14 |
+
from google_solver.google_model import evaluate_google_model
|
15 |
+
|
16 |
+
# Load params
|
17 |
+
with open('params.json', 'r') as f:
|
18 |
+
params = json.load(f)
|
19 |
+
|
20 |
+
# Settings
|
21 |
+
device = params['device']
|
22 |
+
run_tests = params['run_tests']
|
23 |
+
save_results = params['save_results']
|
24 |
+
dataset_path = params['dataset_path']
|
25 |
+
|
26 |
+
# Create results directory
|
27 |
+
now = datetime.now()
|
28 |
+
dt_string = now.strftime("%d-%m-%y %H-%M-%S")
|
29 |
+
print('Current time: ' + dt_string + '\n')
|
30 |
+
|
31 |
+
if save_results:
|
32 |
+
results_dir = 'results'
|
33 |
+
os.makedirs(results_dir, exist_ok=True)
|
34 |
+
experiment_path = os.path.join(results_dir, dt_string)
|
35 |
+
os.makedirs(experiment_path, exist_ok=True)
|
36 |
+
open(os.path.join(experiment_path, 'train_results.txt'), 'w').close()
|
37 |
+
open(os.path.join(experiment_path, 'test_results.txt'), 'w').close()
|
38 |
+
|
39 |
+
with open(os.path.join(experiment_path, 'params.json'), 'w') as f:
|
40 |
+
json.dump(params, f)
|
41 |
+
os.mkdir(os.path.join(experiment_path, 'problem_instances'))
|
42 |
+
|
43 |
+
# Dataset sizes
|
44 |
+
train_dataset_size = params['train_dataset_size']
|
45 |
+
validation_dataset_size = params['validation_dataset_size']
|
46 |
+
baseline_dataset_size = params['baseline_dataset_size']
|
47 |
+
|
48 |
+
# Problem config
|
49 |
+
num_nodes = params['num_nodes']
|
50 |
+
num_depots = params['num_depots']
|
51 |
+
embedding_size = params['embedding_size']
|
52 |
+
sample_size = params['sample_size']
|
53 |
+
gradient_clipping = params['gradient_clipping']
|
54 |
+
num_neighbors_encoder = params['num_neighbors_encoder']
|
55 |
+
num_neighbors_action = params['num_neighbors_action']
|
56 |
+
num_movers = params['num_movers']
|
57 |
+
learning_rate = params['learning_rate']
|
58 |
+
batch_size = params['batch_size']
|
59 |
+
test_batch_size = params['test_batch_size']
|
60 |
+
baseline_update_period = params['baseline_update_period']
|
61 |
+
|
62 |
+
# Datasets
|
63 |
+
validation_dataset = VRP_Dataset(validation_dataset_size, num_nodes, num_depots, dataset_path, device)
|
64 |
+
baseline_dataset = VRP_Dataset(train_dataset_size, num_nodes, num_depots, dataset_path, device)
|
65 |
+
|
66 |
+
if params['overfit_test']:
|
67 |
+
train_dataset = VRP_Dataset(train_dataset_size, num_nodes, num_depots, dataset_path, device)
|
68 |
+
baseline_dataset = train_dataset
|
69 |
+
validation_dataset = train_dataset
|
70 |
+
|
71 |
+
# Evaluate Google solver
|
72 |
+
google_scores = evaluate_google_model(validation_dataset)
|
73 |
+
tot_google_scores = google_scores.sum().item()
|
74 |
+
input_size = validation_dataset.model_input_length()
|
75 |
+
|
76 |
+
# Models
|
77 |
+
model = Model(input_size=input_size, embedding_size=embedding_size)
|
78 |
+
actor = Actor(model=model, num_movers=num_movers,
|
79 |
+
num_neighbors_encoder=num_neighbors_encoder,
|
80 |
+
num_neighbors_action=num_neighbors_action,
|
81 |
+
device=device, normalize=False)
|
82 |
+
actor.train_mode()
|
83 |
+
|
84 |
+
baseline_model = Model(input_size=input_size, embedding_size=embedding_size)
|
85 |
+
baseline_actor = Actor(model=baseline_model, num_movers=num_movers,
|
86 |
+
num_neighbors_encoder=num_neighbors_encoder,
|
87 |
+
num_neighbors_action=num_neighbors_action,
|
88 |
+
device=device, normalize=False)
|
89 |
+
baseline_actor.greedy_search()
|
90 |
+
baseline_actor.load_state_dict(actor.state_dict())
|
91 |
+
|
92 |
+
nn_actor = Actor(model=None, num_movers=1, num_neighbors_action=1, device=device)
|
93 |
+
nn_actor.nearest_neighbors()
|
94 |
+
|
95 |
+
optimizer = optim.Adam(params=actor.parameters(), lr=learning_rate)
|
96 |
+
|
97 |
+
train_batch_record = 100
|
98 |
+
validation_record = 100
|
99 |
+
baseline_record = None
|
100 |
+
|
101 |
+
# Training loop
|
102 |
+
for epoch in range(params['num_epochs']):
|
103 |
+
if not params['overfit_test']:
|
104 |
+
train_dataset = VRP_Dataset(train_dataset_size, num_nodes, num_depots, dataset_path, device)
|
105 |
+
|
106 |
+
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, collate_fn=train_dataset.collate)
|
107 |
+
for i, batch in enumerate(train_dataloader):
|
108 |
+
with torch.no_grad():
|
109 |
+
nn_actor.nearest_neighbors()
|
110 |
+
nn_output = nn_actor(batch)
|
111 |
+
tot_nn_cost = nn_output['total_time'].sum().item()
|
112 |
+
|
113 |
+
baseline_actor.greedy_search()
|
114 |
+
baseline_cost = baseline_actor(batch)['total_time']
|
115 |
+
|
116 |
+
actor.train_mode()
|
117 |
+
actor_output = actor(batch)
|
118 |
+
actor_cost, log_probs = actor_output['total_time'], actor_output['log_probs']
|
119 |
+
|
120 |
+
loss = ((actor_cost - baseline_cost).detach() * log_probs).mean()
|
121 |
+
|
122 |
+
optimizer.zero_grad()
|
123 |
+
loss.backward()
|
124 |
+
if gradient_clipping:
|
125 |
+
for group in optimizer.param_groups:
|
126 |
+
clip_grad_norm_(group['params'], 1, norm_type=2)
|
127 |
+
optimizer.step()
|
128 |
+
|
129 |
+
tot_actor_cost = actor_cost.sum().item()
|
130 |
+
tot_baseline_cost = baseline_cost.sum().item()
|
131 |
+
|
132 |
+
actor_nn_ratio = tot_actor_cost / tot_nn_cost
|
133 |
+
actor_baseline_ratio = tot_actor_cost / tot_baseline_cost
|
134 |
+
train_batch_record = min(train_batch_record, actor_nn_ratio)
|
135 |
+
|
136 |
+
result = f"{epoch}, {i}, {actor_nn_ratio:.4f}, {actor_baseline_ratio:.4f}, {train_batch_record:.4f}"
|
137 |
+
print(result, flush=True)
|
138 |
+
if save_results:
|
139 |
+
with open(os.path.join(experiment_path, 'train_results.txt'), 'a') as f:
|
140 |
+
f.write(result + '\n')
|
141 |
+
del batch
|
142 |
+
|
143 |
+
# Validation
|
144 |
+
if epoch % 5 == 0:
|
145 |
+
baseline_dataloader = DataLoader(baseline_dataset, batch_size=batch_size, collate_fn=baseline_dataset.collate)
|
146 |
+
tot_cost = []
|
147 |
+
for batch in baseline_dataloader:
|
148 |
+
with torch.no_grad():
|
149 |
+
actor.greedy_search()
|
150 |
+
actor_output = actor(batch)
|
151 |
+
cost = actor_output['total_time']
|
152 |
+
tot_cost.append(cost)
|
153 |
+
del batch
|
154 |
+
|
155 |
+
tot_cost = torch.cat(tot_cost, dim=0)
|
156 |
+
if baseline_record is None or (tot_cost < baseline_record).float().mean().item() > 0.9:
|
157 |
+
baseline_record = tot_cost
|
158 |
+
baseline_actor.load_state_dict(actor.state_dict())
|
159 |
+
print('\nNew baseline record\n')
|
160 |
+
|
161 |
+
# Test every 10 epochs
|
162 |
+
if (epoch % 10 == 0) and run_tests:
|
163 |
+
b = max(int(batch_size // sample_size**2), 1)
|
164 |
+
validation_dataloader = DataLoader(validation_dataset, batch_size=b, collate_fn=validation_dataset.collate)
|
165 |
+
|
166 |
+
tot_cost = 0
|
167 |
+
tot_nn_cost = 0
|
168 |
+
for batch in validation_dataloader:
|
169 |
+
with torch.no_grad():
|
170 |
+
actor.beam_search(sample_size)
|
171 |
+
actor_output = actor(batch)
|
172 |
+
cost = actor_output['total_time']
|
173 |
+
|
174 |
+
nn_actor.nearest_neighbors()
|
175 |
+
nn_output = nn_actor(batch)
|
176 |
+
nn_cost = nn_output['total_time']
|
177 |
+
|
178 |
+
tot_cost += cost.sum().item()
|
179 |
+
tot_nn_cost += nn_cost.sum().item()
|
180 |
+
|
181 |
+
ratio = tot_cost / tot_nn_cost
|
182 |
+
validation_record = min(validation_record, ratio)
|
183 |
+
|
184 |
+
if save_results:
|
185 |
+
torch.save(actor.state_dict(), os.path.join(experiment_path, 'model_state_dict.pt'))
|
186 |
+
torch.save(optimizer.state_dict(), os.path.join(experiment_path, 'optimizer_state_dict.pt'))
|
187 |
+
|
188 |
+
actor_google_ratio = tot_cost / tot_google_scores
|
189 |
+
print(f"\nTest results:\nActor/Google: {actor_google_ratio:.4f}, Actor/NN: {ratio:.4f}, Best NN Ratio: {validation_record:.4f}\n")
|
190 |
+
if save_results:
|
191 |
+
with open(os.path.join(experiment_path, 'test_results.txt'), 'a') as f:
|
192 |
+
f.write(f"{epoch}, {actor_google_ratio:.4f}, {ratio:.4f}, {validation_record:.4f}\n")
|