diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_motion_xset/config.yaml b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_motion_xset/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fcd10ccd4707d90cfa7f98c553e556b3fcf693ca --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_motion_xset/config.yaml @@ -0,0 +1,59 @@ +Experiment_name: ntu120_bone_motion_xset +base_lr: 0.1 +batch_size: 64 +config: ./config/ntu120_xset/train_bone_motion.yaml +device: +- 2 +- 3 +eval_interval: 5 +feeder: feeders.feeder.Feeder +groups: 8 +ignore_weights: [] +keep_rate: 0.9 +log_interval: 100 +model: model.decouple_gcn.Model +model_args: + block_size: 41 + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + groups: 16 + num_class: 120 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu120_bone_motion_xset +nesterov: true +num_epoch: 100 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_data_bone_motion.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_data_bone_motion.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu120_bone_motion_xset diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_motion_xset/decouple_gcn.py b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_motion_xset/decouple_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..6dcce4552ced280fe5b2060df92daebd2452cf7c --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_motion_xset/decouple_gcn.py @@ -0,0 +1,235 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math +from model.dropSke import DropBlock_Ske +from model.dropT import DropBlockT_1d + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + + +def conv_branch_init(conv): + weight = conv.weight + n = weight.size(0) + k1 = weight.size(1) + k2 = weight.size(2) + nn.init.normal(weight, 0, math.sqrt(2. / (n * k1 * k2))) + nn.init.constant(conv.bias, 0) + + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class unit_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1, num_point=25, block_size=41): + super(unit_tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + self.dropS = DropBlock_Ske(num_point=num_point) + self.dropT = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob, A): + x = self.bn(self.conv(x)) + x = self.dropT(self.dropS(x, keep_prob, A), keep_prob) + return x + + +class unit_tcn_skip(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(unit_tcn_skip, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class unit_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, coff_embedding=4, num_subset=3): + super(unit_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.num_point = num_point + self.groups = groups + self.num_subset = num_subset + self.DecoupleA = nn.Parameter(torch.tensor(np.reshape(A.astype(np.float32), [ + 3, 1, num_point, num_point]), dtype=torch.float32, requires_grad=True).repeat(1, groups, 1, 1), requires_grad=True) + + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.bn0 = nn.BatchNorm2d(out_channels * num_subset) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + bn_init(self.bn, 1e-6) + + self.Linear_weight = nn.Parameter(torch.zeros( + in_channels, out_channels * num_subset, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0, math.sqrt( + 0.5 / (out_channels * num_subset))) + + self.Linear_bias = nn.Parameter(torch.zeros( + 1, out_channels * num_subset, 1, 1, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.constant(self.Linear_bias, 1e-6) + + eye_array = [] + for i in range(out_channels): + eye_array.append(torch.eye(num_point)) + self.eyes = nn.Parameter(torch.tensor(torch.stack( + eye_array), requires_grad=False, device='cuda'), requires_grad=False) # [c,25,25] + + def norm(self, A): + b, c, h, w = A.size() + A = A.view(c, self.num_point, self.num_point) + D_list = torch.sum(A, 1).view(c, 1, self.num_point) + D_list_12 = (D_list + 0.001)**(-1) + D_12 = self.eyes * D_list_12 + A = torch.bmm(A, D_12).view(b, c, h, w) + return A + + def forward(self, x0): + learn_A = self.DecoupleA.repeat( + 1, self.out_channels // self.groups, 1, 1) + norm_learn_A = torch.cat([self.norm(learn_A[0:1, ...]), self.norm( + learn_A[1:2, ...]), self.norm(learn_A[2:3, ...])], 0) + + x = torch.einsum( + 'nctw,cd->ndtw', (x0, self.Linear_weight)).contiguous() + x = x + self.Linear_bias + x = self.bn0(x) + + n, kc, t, v = x.size() + x = x.view(n, self.num_subset, kc // self.num_subset, t, v) + x = torch.einsum('nkctv,kcvw->nctw', (x, norm_learn_A)) + + x = self.bn(x) + x += self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, block_size, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = unit_gcn(in_channels, out_channels, A, groups, num_point) + self.tcn1 = unit_tcn(out_channels, out_channels, + stride=stride, num_point=num_point) + self.relu = nn.ReLU() + + self.A = nn.Parameter(torch.tensor(np.sum(np.reshape(A.astype(np.float32), [ + 3, num_point, num_point]), axis=0), dtype=torch.float32, requires_grad=False, device='cuda'), requires_grad=False) + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + + else: + self.residual = unit_tcn_skip( + in_channels, out_channels, kernel_size=1, stride=stride) + self.dropSke = DropBlock_Ske(num_point=num_point) + self.dropT_skip = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob): + x = self.tcn1(self.gcn1(x), keep_prob, self.A) + self.dropT_skip( + self.dropSke(self.residual(x), keep_prob, self.A), keep_prob) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, groups, num_point, + block_size, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l5 = TCN_GCN_unit( + 64, 128, A, groups, num_point, block_size, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l8 = TCN_GCN_unit(128, 256, A, groups, + num_point, block_size, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x, keep_prob=0.9): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute( + 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x, 1.0) + x = self.l2(x, 1.0) + x = self.l3(x, 1.0) + x = self.l4(x, 1.0) + x = self.l5(x, 1.0) + x = self.l6(x, 1.0) + x = self.l7(x, keep_prob) + x = self.l8(x, keep_prob) + x = self.l9(x, keep_prob) + x = self.l10(x, keep_prob) + + # N*M,C,T,V + c_new = x.size(1) + x = x.reshape(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_motion_xset/eval_results/best_acc.pkl b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_motion_xset/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..3b6e23616458855ce14f9121f33c86515bc7c17d --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_motion_xset/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a40bb22e41beb6e992d7435ddd1c8bc4c1dcb696d63f95763226fc85d45adec +size 34946665 diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_motion_xset/log.txt b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_motion_xset/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..a2da8b2488481e73dd87a9f3c9a0265b9ab2b531 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_motion_xset/log.txt @@ -0,0 +1,665 @@ +[ Tue Sep 13 18:24:44 2022 ] Parameters: +{'work_dir': './work_dir/ntu120_bone_motion_xset', 'model_saved_name': './save_models/ntu120_bone_motion_xset', 'Experiment_name': 'ntu120_bone_motion_xset', 'config': './config/ntu120_xset/train_bone_motion.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_data_bone_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_data_bone_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_label.pkl'}, 'model': 'model.decouple_gcn.Model', 'model_args': {'num_class': 120, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'groups': 16, 'block_size': 41, 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80], 'device': [2, 3], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 100, 'weight_decay': 0.0001, 'keep_rate': 0.9, 'groups': 8, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Tue Sep 13 18:24:44 2022 ] Training epoch: 1 +[ Tue Sep 13 18:25:39 2022 ] Batch(99/162) done. Loss: 3.8043 lr:0.100000 +[ Tue Sep 13 18:26:06 2022 ] Eval epoch: 1 +[ Tue Sep 13 18:29:00 2022 ] Mean test loss of 930 batches: 5.199268817901611. +[ Tue Sep 13 18:29:01 2022 ] Top1: 4.51% +[ Tue Sep 13 18:29:01 2022 ] Top5: 15.33% +[ Tue Sep 13 18:29:01 2022 ] Training epoch: 2 +[ Tue Sep 13 18:29:25 2022 ] Batch(37/162) done. Loss: 3.4914 lr:0.100000 +[ Tue Sep 13 18:30:17 2022 ] Batch(137/162) done. Loss: 3.1869 lr:0.100000 +[ Tue Sep 13 18:30:30 2022 ] Eval epoch: 2 +[ Tue Sep 13 18:33:25 2022 ] Mean test loss of 930 batches: 4.734589576721191. +[ Tue Sep 13 18:33:26 2022 ] Top1: 8.77% +[ Tue Sep 13 18:33:26 2022 ] Top5: 27.13% +[ Tue Sep 13 18:33:26 2022 ] Training epoch: 3 +[ Tue Sep 13 18:34:10 2022 ] Batch(75/162) done. Loss: 2.8329 lr:0.100000 +[ Tue Sep 13 18:34:55 2022 ] Eval epoch: 3 +[ Tue Sep 13 18:37:50 2022 ] Mean test loss of 930 batches: 4.638702392578125. +[ Tue Sep 13 18:37:51 2022 ] Top1: 11.38% +[ Tue Sep 13 18:37:51 2022 ] Top5: 32.37% +[ Tue Sep 13 18:37:51 2022 ] Training epoch: 4 +[ Tue Sep 13 18:38:02 2022 ] Batch(13/162) done. Loss: 2.4933 lr:0.100000 +[ Tue Sep 13 18:38:55 2022 ] Batch(113/162) done. Loss: 2.5471 lr:0.100000 +[ Tue Sep 13 18:39:20 2022 ] Eval epoch: 4 +[ Tue Sep 13 18:42:15 2022 ] Mean test loss of 930 batches: 4.334815979003906. +[ Tue Sep 13 18:42:15 2022 ] Top1: 16.17% +[ Tue Sep 13 18:42:16 2022 ] Top5: 38.91% +[ Tue Sep 13 18:42:16 2022 ] Training epoch: 5 +[ Tue Sep 13 18:42:47 2022 ] Batch(51/162) done. Loss: 1.9323 lr:0.100000 +[ Tue Sep 13 18:43:40 2022 ] Batch(151/162) done. Loss: 1.8449 lr:0.100000 +[ Tue Sep 13 18:43:45 2022 ] Eval epoch: 5 +[ Tue Sep 13 18:46:39 2022 ] Mean test loss of 930 batches: 4.109243869781494. +[ Tue Sep 13 18:46:40 2022 ] Top1: 15.85% +[ Tue Sep 13 18:46:41 2022 ] Top5: 40.64% +[ Tue Sep 13 18:46:41 2022 ] Training epoch: 6 +[ Tue Sep 13 18:47:32 2022 ] Batch(89/162) done. Loss: 1.7456 lr:0.100000 +[ Tue Sep 13 18:48:10 2022 ] Eval epoch: 6 +[ Tue Sep 13 18:51:04 2022 ] Mean test loss of 930 batches: 4.857425212860107. +[ Tue Sep 13 18:51:05 2022 ] Top1: 12.64% +[ Tue Sep 13 18:51:05 2022 ] Top5: 35.87% +[ Tue Sep 13 18:51:06 2022 ] Training epoch: 7 +[ Tue Sep 13 18:51:23 2022 ] Batch(27/162) done. Loss: 1.4699 lr:0.100000 +[ Tue Sep 13 18:52:16 2022 ] Batch(127/162) done. Loss: 1.3623 lr:0.100000 +[ Tue Sep 13 18:52:35 2022 ] Eval epoch: 7 +[ Tue Sep 13 18:55:28 2022 ] Mean test loss of 930 batches: 3.478107213973999. +[ Tue Sep 13 18:55:29 2022 ] Top1: 25.40% +[ Tue Sep 13 18:55:29 2022 ] Top5: 52.54% +[ Tue Sep 13 18:55:30 2022 ] Training epoch: 8 +[ Tue Sep 13 18:56:08 2022 ] Batch(65/162) done. Loss: 1.5486 lr:0.100000 +[ Tue Sep 13 18:56:59 2022 ] Eval epoch: 8 +[ Tue Sep 13 18:59:52 2022 ] Mean test loss of 930 batches: 4.49910306930542. +[ Tue Sep 13 18:59:53 2022 ] Top1: 15.79% +[ Tue Sep 13 18:59:53 2022 ] Top5: 42.98% +[ Tue Sep 13 18:59:53 2022 ] Training epoch: 9 +[ Tue Sep 13 18:59:58 2022 ] Batch(3/162) done. Loss: 1.4525 lr:0.100000 +[ Tue Sep 13 19:00:51 2022 ] Batch(103/162) done. Loss: 1.3946 lr:0.100000 +[ Tue Sep 13 19:01:22 2022 ] Eval epoch: 9 +[ Tue Sep 13 19:04:16 2022 ] Mean test loss of 930 batches: 4.081279277801514. +[ Tue Sep 13 19:04:17 2022 ] Top1: 18.95% +[ Tue Sep 13 19:04:17 2022 ] Top5: 51.57% +[ Tue Sep 13 19:04:17 2022 ] Training epoch: 10 +[ Tue Sep 13 19:04:43 2022 ] Batch(41/162) done. Loss: 1.3731 lr:0.100000 +[ Tue Sep 13 19:05:36 2022 ] Batch(141/162) done. Loss: 1.2596 lr:0.100000 +[ Tue Sep 13 19:05:47 2022 ] Eval epoch: 10 +[ Tue Sep 13 19:08:42 2022 ] Mean test loss of 930 batches: 4.165366172790527. +[ Tue Sep 13 19:08:43 2022 ] Top1: 23.41% +[ Tue Sep 13 19:08:43 2022 ] Top5: 54.06% +[ Tue Sep 13 19:08:43 2022 ] Training epoch: 11 +[ Tue Sep 13 19:09:29 2022 ] Batch(79/162) done. Loss: 1.6100 lr:0.100000 +[ Tue Sep 13 19:10:12 2022 ] Eval epoch: 11 +[ Tue Sep 13 19:13:06 2022 ] Mean test loss of 930 batches: 3.0280818939208984. +[ Tue Sep 13 19:13:06 2022 ] Top1: 33.52% +[ Tue Sep 13 19:13:07 2022 ] Top5: 66.71% +[ Tue Sep 13 19:13:07 2022 ] Training epoch: 12 +[ Tue Sep 13 19:13:20 2022 ] Batch(17/162) done. Loss: 1.0361 lr:0.100000 +[ Tue Sep 13 19:14:13 2022 ] Batch(117/162) done. Loss: 1.5490 lr:0.100000 +[ Tue Sep 13 19:14:36 2022 ] Eval epoch: 12 +[ Tue Sep 13 19:17:30 2022 ] Mean test loss of 930 batches: 3.2200257778167725. +[ Tue Sep 13 19:17:30 2022 ] Top1: 31.72% +[ Tue Sep 13 19:17:31 2022 ] Top5: 62.12% +[ Tue Sep 13 19:17:31 2022 ] Training epoch: 13 +[ Tue Sep 13 19:18:03 2022 ] Batch(55/162) done. Loss: 1.3301 lr:0.100000 +[ Tue Sep 13 19:18:56 2022 ] Batch(155/162) done. Loss: 0.8611 lr:0.100000 +[ Tue Sep 13 19:19:00 2022 ] Eval epoch: 13 +[ Tue Sep 13 19:21:53 2022 ] Mean test loss of 930 batches: 3.0923314094543457. +[ Tue Sep 13 19:21:54 2022 ] Top1: 30.39% +[ Tue Sep 13 19:21:54 2022 ] Top5: 62.35% +[ Tue Sep 13 19:21:55 2022 ] Training epoch: 14 +[ Tue Sep 13 19:22:48 2022 ] Batch(93/162) done. Loss: 1.0040 lr:0.100000 +[ Tue Sep 13 19:23:24 2022 ] Eval epoch: 14 +[ Tue Sep 13 19:26:17 2022 ] Mean test loss of 930 batches: 2.770219087600708. +[ Tue Sep 13 19:26:18 2022 ] Top1: 38.51% +[ Tue Sep 13 19:26:19 2022 ] Top5: 70.18% +[ Tue Sep 13 19:26:19 2022 ] Training epoch: 15 +[ Tue Sep 13 19:26:39 2022 ] Batch(31/162) done. Loss: 1.2515 lr:0.100000 +[ Tue Sep 13 19:27:32 2022 ] Batch(131/162) done. Loss: 1.3614 lr:0.100000 +[ Tue Sep 13 19:27:48 2022 ] Eval epoch: 15 +[ Tue Sep 13 19:30:42 2022 ] Mean test loss of 930 batches: 2.787174940109253. +[ Tue Sep 13 19:30:42 2022 ] Top1: 35.36% +[ Tue Sep 13 19:30:43 2022 ] Top5: 68.80% +[ Tue Sep 13 19:30:43 2022 ] Training epoch: 16 +[ Tue Sep 13 19:31:23 2022 ] Batch(69/162) done. Loss: 0.7219 lr:0.100000 +[ Tue Sep 13 19:32:12 2022 ] Eval epoch: 16 +[ Tue Sep 13 19:35:06 2022 ] Mean test loss of 930 batches: 4.6869659423828125. +[ Tue Sep 13 19:35:07 2022 ] Top1: 26.56% +[ Tue Sep 13 19:35:07 2022 ] Top5: 52.24% +[ Tue Sep 13 19:35:07 2022 ] Training epoch: 17 +[ Tue Sep 13 19:35:15 2022 ] Batch(7/162) done. Loss: 0.4922 lr:0.100000 +[ Tue Sep 13 19:36:08 2022 ] Batch(107/162) done. Loss: 0.9194 lr:0.100000 +[ Tue Sep 13 19:36:37 2022 ] Eval epoch: 17 +[ Tue Sep 13 19:39:31 2022 ] Mean test loss of 930 batches: 3.368222236633301. +[ Tue Sep 13 19:39:31 2022 ] Top1: 30.47% +[ Tue Sep 13 19:39:32 2022 ] Top5: 61.70% +[ Tue Sep 13 19:39:32 2022 ] Training epoch: 18 +[ Tue Sep 13 19:39:59 2022 ] Batch(45/162) done. Loss: 0.9355 lr:0.100000 +[ Tue Sep 13 19:40:52 2022 ] Batch(145/162) done. Loss: 1.0187 lr:0.100000 +[ Tue Sep 13 19:41:01 2022 ] Eval epoch: 18 +[ Tue Sep 13 19:43:55 2022 ] Mean test loss of 930 batches: 3.0660781860351562. +[ Tue Sep 13 19:43:56 2022 ] Top1: 36.15% +[ Tue Sep 13 19:43:56 2022 ] Top5: 70.34% +[ Tue Sep 13 19:43:57 2022 ] Training epoch: 19 +[ Tue Sep 13 19:44:44 2022 ] Batch(83/162) done. Loss: 1.0415 lr:0.100000 +[ Tue Sep 13 19:45:26 2022 ] Eval epoch: 19 +[ Tue Sep 13 19:48:19 2022 ] Mean test loss of 930 batches: 3.684891939163208. +[ Tue Sep 13 19:48:20 2022 ] Top1: 32.52% +[ Tue Sep 13 19:48:20 2022 ] Top5: 63.57% +[ Tue Sep 13 19:48:21 2022 ] Training epoch: 20 +[ Tue Sep 13 19:48:35 2022 ] Batch(21/162) done. Loss: 0.7119 lr:0.100000 +[ Tue Sep 13 19:49:28 2022 ] Batch(121/162) done. Loss: 0.8407 lr:0.100000 +[ Tue Sep 13 19:49:49 2022 ] Eval epoch: 20 +[ Tue Sep 13 19:52:43 2022 ] Mean test loss of 930 batches: 3.269587993621826. +[ Tue Sep 13 19:52:44 2022 ] Top1: 36.59% +[ Tue Sep 13 19:52:45 2022 ] Top5: 68.75% +[ Tue Sep 13 19:52:45 2022 ] Training epoch: 21 +[ Tue Sep 13 19:53:19 2022 ] Batch(59/162) done. Loss: 0.8355 lr:0.100000 +[ Tue Sep 13 19:54:12 2022 ] Batch(159/162) done. Loss: 0.9426 lr:0.100000 +[ Tue Sep 13 19:54:14 2022 ] Eval epoch: 21 +[ Tue Sep 13 19:57:08 2022 ] Mean test loss of 930 batches: 3.8170042037963867. +[ Tue Sep 13 19:57:08 2022 ] Top1: 35.14% +[ Tue Sep 13 19:57:09 2022 ] Top5: 65.00% +[ Tue Sep 13 19:57:09 2022 ] Training epoch: 22 +[ Tue Sep 13 19:58:04 2022 ] Batch(97/162) done. Loss: 0.9230 lr:0.100000 +[ Tue Sep 13 19:58:38 2022 ] Eval epoch: 22 +[ Tue Sep 13 20:01:32 2022 ] Mean test loss of 930 batches: 2.589581251144409. +[ Tue Sep 13 20:01:33 2022 ] Top1: 45.12% +[ Tue Sep 13 20:01:33 2022 ] Top5: 75.72% +[ Tue Sep 13 20:01:34 2022 ] Training epoch: 23 +[ Tue Sep 13 20:01:56 2022 ] Batch(35/162) done. Loss: 0.6825 lr:0.100000 +[ Tue Sep 13 20:02:48 2022 ] Batch(135/162) done. Loss: 0.6183 lr:0.100000 +[ Tue Sep 13 20:03:02 2022 ] Eval epoch: 23 +[ Tue Sep 13 20:05:55 2022 ] Mean test loss of 930 batches: 2.98416805267334. +[ Tue Sep 13 20:05:56 2022 ] Top1: 39.40% +[ Tue Sep 13 20:05:56 2022 ] Top5: 69.77% +[ Tue Sep 13 20:05:56 2022 ] Training epoch: 24 +[ Tue Sep 13 20:06:39 2022 ] Batch(73/162) done. Loss: 0.5535 lr:0.100000 +[ Tue Sep 13 20:07:25 2022 ] Eval epoch: 24 +[ Tue Sep 13 20:10:19 2022 ] Mean test loss of 930 batches: 4.608972072601318. +[ Tue Sep 13 20:10:20 2022 ] Top1: 32.01% +[ Tue Sep 13 20:10:20 2022 ] Top5: 65.21% +[ Tue Sep 13 20:10:20 2022 ] Training epoch: 25 +[ Tue Sep 13 20:10:30 2022 ] Batch(11/162) done. Loss: 0.5998 lr:0.100000 +[ Tue Sep 13 20:11:23 2022 ] Batch(111/162) done. Loss: 0.6096 lr:0.100000 +[ Tue Sep 13 20:11:49 2022 ] Eval epoch: 25 +[ Tue Sep 13 20:14:43 2022 ] Mean test loss of 930 batches: 4.218852519989014. +[ Tue Sep 13 20:14:44 2022 ] Top1: 31.82% +[ Tue Sep 13 20:14:44 2022 ] Top5: 62.13% +[ Tue Sep 13 20:14:45 2022 ] Training epoch: 26 +[ Tue Sep 13 20:15:14 2022 ] Batch(49/162) done. Loss: 0.4509 lr:0.100000 +[ Tue Sep 13 20:16:07 2022 ] Batch(149/162) done. Loss: 0.5795 lr:0.100000 +[ Tue Sep 13 20:16:14 2022 ] Eval epoch: 26 +[ Tue Sep 13 20:19:07 2022 ] Mean test loss of 930 batches: 4.979351997375488. +[ Tue Sep 13 20:19:08 2022 ] Top1: 31.84% +[ Tue Sep 13 20:19:08 2022 ] Top5: 64.70% +[ Tue Sep 13 20:19:09 2022 ] Training epoch: 27 +[ Tue Sep 13 20:19:58 2022 ] Batch(87/162) done. Loss: 1.0953 lr:0.100000 +[ Tue Sep 13 20:20:37 2022 ] Eval epoch: 27 +[ Tue Sep 13 20:23:32 2022 ] Mean test loss of 930 batches: 3.376075267791748. +[ Tue Sep 13 20:23:33 2022 ] Top1: 36.73% +[ Tue Sep 13 20:23:33 2022 ] Top5: 68.99% +[ Tue Sep 13 20:23:34 2022 ] Training epoch: 28 +[ Tue Sep 13 20:23:50 2022 ] Batch(25/162) done. Loss: 0.5626 lr:0.100000 +[ Tue Sep 13 20:24:43 2022 ] Batch(125/162) done. Loss: 0.4676 lr:0.100000 +[ Tue Sep 13 20:25:02 2022 ] Eval epoch: 28 +[ Tue Sep 13 20:27:56 2022 ] Mean test loss of 930 batches: 2.888674020767212. +[ Tue Sep 13 20:27:57 2022 ] Top1: 39.02% +[ Tue Sep 13 20:27:57 2022 ] Top5: 72.62% +[ Tue Sep 13 20:27:58 2022 ] Training epoch: 29 +[ Tue Sep 13 20:28:34 2022 ] Batch(63/162) done. Loss: 0.6923 lr:0.100000 +[ Tue Sep 13 20:29:26 2022 ] Eval epoch: 29 +[ Tue Sep 13 20:32:20 2022 ] Mean test loss of 930 batches: 8.909499168395996. +[ Tue Sep 13 20:32:20 2022 ] Top1: 16.27% +[ Tue Sep 13 20:32:21 2022 ] Top5: 36.47% +[ Tue Sep 13 20:32:21 2022 ] Training epoch: 30 +[ Tue Sep 13 20:32:25 2022 ] Batch(1/162) done. Loss: 0.4918 lr:0.100000 +[ Tue Sep 13 20:33:18 2022 ] Batch(101/162) done. Loss: 0.4076 lr:0.100000 +[ Tue Sep 13 20:33:50 2022 ] Eval epoch: 30 +[ Tue Sep 13 20:36:44 2022 ] Mean test loss of 930 batches: 3.943101167678833. +[ Tue Sep 13 20:36:44 2022 ] Top1: 38.68% +[ Tue Sep 13 20:36:44 2022 ] Top5: 70.67% +[ Tue Sep 13 20:36:45 2022 ] Training epoch: 31 +[ Tue Sep 13 20:37:09 2022 ] Batch(39/162) done. Loss: 0.4131 lr:0.100000 +[ Tue Sep 13 20:38:02 2022 ] Batch(139/162) done. Loss: 0.5514 lr:0.100000 +[ Tue Sep 13 20:38:13 2022 ] Eval epoch: 31 +[ Tue Sep 13 20:41:07 2022 ] Mean test loss of 930 batches: 6.147310256958008. +[ Tue Sep 13 20:41:08 2022 ] Top1: 29.80% +[ Tue Sep 13 20:41:08 2022 ] Top5: 62.57% +[ Tue Sep 13 20:41:09 2022 ] Training epoch: 32 +[ Tue Sep 13 20:41:53 2022 ] Batch(77/162) done. Loss: 0.4296 lr:0.100000 +[ Tue Sep 13 20:42:38 2022 ] Eval epoch: 32 +[ Tue Sep 13 20:45:32 2022 ] Mean test loss of 930 batches: 2.972085475921631. +[ Tue Sep 13 20:45:32 2022 ] Top1: 45.22% +[ Tue Sep 13 20:45:33 2022 ] Top5: 75.38% +[ Tue Sep 13 20:45:33 2022 ] Training epoch: 33 +[ Tue Sep 13 20:45:44 2022 ] Batch(15/162) done. Loss: 0.1958 lr:0.100000 +[ Tue Sep 13 20:46:37 2022 ] Batch(115/162) done. Loss: 0.6321 lr:0.100000 +[ Tue Sep 13 20:47:02 2022 ] Eval epoch: 33 +[ Tue Sep 13 20:49:55 2022 ] Mean test loss of 930 batches: 3.6302051544189453. +[ Tue Sep 13 20:49:56 2022 ] Top1: 35.10% +[ Tue Sep 13 20:49:56 2022 ] Top5: 68.23% +[ Tue Sep 13 20:49:56 2022 ] Training epoch: 34 +[ Tue Sep 13 20:50:28 2022 ] Batch(53/162) done. Loss: 0.3220 lr:0.100000 +[ Tue Sep 13 20:51:21 2022 ] Batch(153/162) done. Loss: 0.4241 lr:0.100000 +[ Tue Sep 13 20:51:25 2022 ] Eval epoch: 34 +[ Tue Sep 13 20:54:19 2022 ] Mean test loss of 930 batches: 5.966952323913574. +[ Tue Sep 13 20:54:20 2022 ] Top1: 23.96% +[ Tue Sep 13 20:54:20 2022 ] Top5: 54.38% +[ Tue Sep 13 20:54:21 2022 ] Training epoch: 35 +[ Tue Sep 13 20:55:12 2022 ] Batch(91/162) done. Loss: 0.2236 lr:0.100000 +[ Tue Sep 13 20:55:50 2022 ] Eval epoch: 35 +[ Tue Sep 13 20:58:44 2022 ] Mean test loss of 930 batches: 3.8734185695648193. +[ Tue Sep 13 20:58:44 2022 ] Top1: 36.80% +[ Tue Sep 13 20:58:45 2022 ] Top5: 68.49% +[ Tue Sep 13 20:58:45 2022 ] Training epoch: 36 +[ Tue Sep 13 20:59:04 2022 ] Batch(29/162) done. Loss: 0.3525 lr:0.100000 +[ Tue Sep 13 20:59:57 2022 ] Batch(129/162) done. Loss: 0.6794 lr:0.100000 +[ Tue Sep 13 21:00:14 2022 ] Eval epoch: 36 +[ Tue Sep 13 21:03:08 2022 ] Mean test loss of 930 batches: 3.38310170173645. +[ Tue Sep 13 21:03:08 2022 ] Top1: 36.34% +[ Tue Sep 13 21:03:09 2022 ] Top5: 66.81% +[ Tue Sep 13 21:03:09 2022 ] Training epoch: 37 +[ Tue Sep 13 21:03:48 2022 ] Batch(67/162) done. Loss: 0.3958 lr:0.100000 +[ Tue Sep 13 21:04:38 2022 ] Eval epoch: 37 +[ Tue Sep 13 21:07:31 2022 ] Mean test loss of 930 batches: 3.379849672317505. +[ Tue Sep 13 21:07:32 2022 ] Top1: 40.11% +[ Tue Sep 13 21:07:32 2022 ] Top5: 72.22% +[ Tue Sep 13 21:07:33 2022 ] Training epoch: 38 +[ Tue Sep 13 21:07:39 2022 ] Batch(5/162) done. Loss: 0.1820 lr:0.100000 +[ Tue Sep 13 21:08:31 2022 ] Batch(105/162) done. Loss: 0.3579 lr:0.100000 +[ Tue Sep 13 21:09:01 2022 ] Eval epoch: 38 +[ Tue Sep 13 21:11:55 2022 ] Mean test loss of 930 batches: 5.3892083168029785. +[ Tue Sep 13 21:11:55 2022 ] Top1: 29.35% +[ Tue Sep 13 21:11:56 2022 ] Top5: 54.10% +[ Tue Sep 13 21:11:56 2022 ] Training epoch: 39 +[ Tue Sep 13 21:12:22 2022 ] Batch(43/162) done. Loss: 0.3955 lr:0.100000 +[ Tue Sep 13 21:13:15 2022 ] Batch(143/162) done. Loss: 0.2325 lr:0.100000 +[ Tue Sep 13 21:13:25 2022 ] Eval epoch: 39 +[ Tue Sep 13 21:16:18 2022 ] Mean test loss of 930 batches: 9.454630851745605. +[ Tue Sep 13 21:16:19 2022 ] Top1: 19.79% +[ Tue Sep 13 21:16:19 2022 ] Top5: 45.30% +[ Tue Sep 13 21:16:20 2022 ] Training epoch: 40 +[ Tue Sep 13 21:17:06 2022 ] Batch(81/162) done. Loss: 0.4193 lr:0.100000 +[ Tue Sep 13 21:17:48 2022 ] Eval epoch: 40 +[ Tue Sep 13 21:20:41 2022 ] Mean test loss of 930 batches: 10.120402336120605. +[ Tue Sep 13 21:20:42 2022 ] Top1: 16.91% +[ Tue Sep 13 21:20:42 2022 ] Top5: 38.84% +[ Tue Sep 13 21:20:42 2022 ] Training epoch: 41 +[ Tue Sep 13 21:20:56 2022 ] Batch(19/162) done. Loss: 0.3093 lr:0.100000 +[ Tue Sep 13 21:21:49 2022 ] Batch(119/162) done. Loss: 0.3140 lr:0.100000 +[ Tue Sep 13 21:22:11 2022 ] Eval epoch: 41 +[ Tue Sep 13 21:25:05 2022 ] Mean test loss of 930 batches: 4.387808322906494. +[ Tue Sep 13 21:25:06 2022 ] Top1: 32.27% +[ Tue Sep 13 21:25:06 2022 ] Top5: 63.54% +[ Tue Sep 13 21:25:07 2022 ] Training epoch: 42 +[ Tue Sep 13 21:25:40 2022 ] Batch(57/162) done. Loss: 0.1977 lr:0.100000 +[ Tue Sep 13 21:26:33 2022 ] Batch(157/162) done. Loss: 0.7468 lr:0.100000 +[ Tue Sep 13 21:26:35 2022 ] Eval epoch: 42 +[ Tue Sep 13 21:29:29 2022 ] Mean test loss of 930 batches: 4.2767720222473145. +[ Tue Sep 13 21:29:29 2022 ] Top1: 36.36% +[ Tue Sep 13 21:29:30 2022 ] Top5: 66.10% +[ Tue Sep 13 21:29:30 2022 ] Training epoch: 43 +[ Tue Sep 13 21:30:23 2022 ] Batch(95/162) done. Loss: 0.4878 lr:0.100000 +[ Tue Sep 13 21:30:58 2022 ] Eval epoch: 43 +[ Tue Sep 13 21:33:52 2022 ] Mean test loss of 930 batches: 5.679744243621826. +[ Tue Sep 13 21:33:52 2022 ] Top1: 27.87% +[ Tue Sep 13 21:33:53 2022 ] Top5: 60.73% +[ Tue Sep 13 21:33:53 2022 ] Training epoch: 44 +[ Tue Sep 13 21:34:14 2022 ] Batch(33/162) done. Loss: 0.2142 lr:0.100000 +[ Tue Sep 13 21:35:06 2022 ] Batch(133/162) done. Loss: 0.4588 lr:0.100000 +[ Tue Sep 13 21:35:21 2022 ] Eval epoch: 44 +[ Tue Sep 13 21:38:15 2022 ] Mean test loss of 930 batches: 4.639147758483887. +[ Tue Sep 13 21:38:16 2022 ] Top1: 36.21% +[ Tue Sep 13 21:38:16 2022 ] Top5: 67.00% +[ Tue Sep 13 21:38:16 2022 ] Training epoch: 45 +[ Tue Sep 13 21:38:57 2022 ] Batch(71/162) done. Loss: 0.4394 lr:0.100000 +[ Tue Sep 13 21:39:45 2022 ] Eval epoch: 45 +[ Tue Sep 13 21:42:39 2022 ] Mean test loss of 930 batches: 3.90327787399292. +[ Tue Sep 13 21:42:39 2022 ] Top1: 38.62% +[ Tue Sep 13 21:42:40 2022 ] Top5: 69.91% +[ Tue Sep 13 21:42:40 2022 ] Training epoch: 46 +[ Tue Sep 13 21:42:48 2022 ] Batch(9/162) done. Loss: 0.3888 lr:0.100000 +[ Tue Sep 13 21:43:41 2022 ] Batch(109/162) done. Loss: 0.2235 lr:0.100000 +[ Tue Sep 13 21:44:08 2022 ] Eval epoch: 46 +[ Tue Sep 13 21:47:02 2022 ] Mean test loss of 930 batches: 6.589352130889893. +[ Tue Sep 13 21:47:02 2022 ] Top1: 25.58% +[ Tue Sep 13 21:47:03 2022 ] Top5: 54.44% +[ Tue Sep 13 21:47:03 2022 ] Training epoch: 47 +[ Tue Sep 13 21:47:31 2022 ] Batch(47/162) done. Loss: 0.3766 lr:0.100000 +[ Tue Sep 13 21:48:24 2022 ] Batch(147/162) done. Loss: 0.5963 lr:0.100000 +[ Tue Sep 13 21:48:32 2022 ] Eval epoch: 47 +[ Tue Sep 13 21:51:26 2022 ] Mean test loss of 930 batches: 6.4680495262146. +[ Tue Sep 13 21:51:26 2022 ] Top1: 29.32% +[ Tue Sep 13 21:51:27 2022 ] Top5: 57.34% +[ Tue Sep 13 21:51:27 2022 ] Training epoch: 48 +[ Tue Sep 13 21:52:15 2022 ] Batch(85/162) done. Loss: 0.2175 lr:0.100000 +[ Tue Sep 13 21:52:56 2022 ] Eval epoch: 48 +[ Tue Sep 13 21:55:50 2022 ] Mean test loss of 930 batches: 5.157181739807129. +[ Tue Sep 13 21:55:50 2022 ] Top1: 34.15% +[ Tue Sep 13 21:55:51 2022 ] Top5: 67.10% +[ Tue Sep 13 21:55:51 2022 ] Training epoch: 49 +[ Tue Sep 13 21:56:06 2022 ] Batch(23/162) done. Loss: 0.3487 lr:0.100000 +[ Tue Sep 13 21:56:59 2022 ] Batch(123/162) done. Loss: 0.2986 lr:0.100000 +[ Tue Sep 13 21:57:19 2022 ] Eval epoch: 49 +[ Tue Sep 13 22:00:13 2022 ] Mean test loss of 930 batches: 3.3797988891601562. +[ Tue Sep 13 22:00:14 2022 ] Top1: 43.03% +[ Tue Sep 13 22:00:14 2022 ] Top5: 71.34% +[ Tue Sep 13 22:00:15 2022 ] Training epoch: 50 +[ Tue Sep 13 22:00:50 2022 ] Batch(61/162) done. Loss: 0.2856 lr:0.100000 +[ Tue Sep 13 22:01:43 2022 ] Batch(161/162) done. Loss: 0.2811 lr:0.100000 +[ Tue Sep 13 22:01:43 2022 ] Eval epoch: 50 +[ Tue Sep 13 22:04:37 2022 ] Mean test loss of 930 batches: 6.036594390869141. +[ Tue Sep 13 22:04:38 2022 ] Top1: 26.76% +[ Tue Sep 13 22:04:38 2022 ] Top5: 54.91% +[ Tue Sep 13 22:04:38 2022 ] Training epoch: 51 +[ Tue Sep 13 22:05:34 2022 ] Batch(99/162) done. Loss: 0.3003 lr:0.100000 +[ Tue Sep 13 22:06:07 2022 ] Eval epoch: 51 +[ Tue Sep 13 22:09:00 2022 ] Mean test loss of 930 batches: 3.1611266136169434. +[ Tue Sep 13 22:09:01 2022 ] Top1: 45.07% +[ Tue Sep 13 22:09:01 2022 ] Top5: 73.64% +[ Tue Sep 13 22:09:01 2022 ] Training epoch: 52 +[ Tue Sep 13 22:09:25 2022 ] Batch(37/162) done. Loss: 0.3267 lr:0.100000 +[ Tue Sep 13 22:10:17 2022 ] Batch(137/162) done. Loss: 0.3280 lr:0.100000 +[ Tue Sep 13 22:10:30 2022 ] Eval epoch: 52 +[ Tue Sep 13 22:13:24 2022 ] Mean test loss of 930 batches: 3.928891658782959. +[ Tue Sep 13 22:13:25 2022 ] Top1: 42.29% +[ Tue Sep 13 22:13:25 2022 ] Top5: 73.62% +[ Tue Sep 13 22:13:25 2022 ] Training epoch: 53 +[ Tue Sep 13 22:14:08 2022 ] Batch(75/162) done. Loss: 0.2201 lr:0.100000 +[ Tue Sep 13 22:14:54 2022 ] Eval epoch: 53 +[ Tue Sep 13 22:17:48 2022 ] Mean test loss of 930 batches: 5.713135242462158. +[ Tue Sep 13 22:17:48 2022 ] Top1: 26.39% +[ Tue Sep 13 22:17:48 2022 ] Top5: 50.29% +[ Tue Sep 13 22:17:49 2022 ] Training epoch: 54 +[ Tue Sep 13 22:17:59 2022 ] Batch(13/162) done. Loss: 0.1089 lr:0.100000 +[ Tue Sep 13 22:18:52 2022 ] Batch(113/162) done. Loss: 0.3991 lr:0.100000 +[ Tue Sep 13 22:19:18 2022 ] Eval epoch: 54 +[ Tue Sep 13 22:22:11 2022 ] Mean test loss of 930 batches: 4.794914722442627. +[ Tue Sep 13 22:22:12 2022 ] Top1: 34.96% +[ Tue Sep 13 22:22:12 2022 ] Top5: 64.15% +[ Tue Sep 13 22:22:13 2022 ] Training epoch: 55 +[ Tue Sep 13 22:22:43 2022 ] Batch(51/162) done. Loss: 0.2343 lr:0.100000 +[ Tue Sep 13 22:23:36 2022 ] Batch(151/162) done. Loss: 0.2005 lr:0.100000 +[ Tue Sep 13 22:23:41 2022 ] Eval epoch: 55 +[ Tue Sep 13 22:26:35 2022 ] Mean test loss of 930 batches: 11.087225914001465. +[ Tue Sep 13 22:26:36 2022 ] Top1: 21.75% +[ Tue Sep 13 22:26:36 2022 ] Top5: 43.36% +[ Tue Sep 13 22:26:36 2022 ] Training epoch: 56 +[ Tue Sep 13 22:27:27 2022 ] Batch(89/162) done. Loss: 0.3925 lr:0.100000 +[ Tue Sep 13 22:28:05 2022 ] Eval epoch: 56 +[ Tue Sep 13 22:30:59 2022 ] Mean test loss of 930 batches: 4.006224632263184. +[ Tue Sep 13 22:30:59 2022 ] Top1: 40.97% +[ Tue Sep 13 22:31:00 2022 ] Top5: 70.19% +[ Tue Sep 13 22:31:00 2022 ] Training epoch: 57 +[ Tue Sep 13 22:31:18 2022 ] Batch(27/162) done. Loss: 0.1392 lr:0.100000 +[ Tue Sep 13 22:32:11 2022 ] Batch(127/162) done. Loss: 0.3604 lr:0.100000 +[ Tue Sep 13 22:32:29 2022 ] Eval epoch: 57 +[ Tue Sep 13 22:35:23 2022 ] Mean test loss of 930 batches: 5.908833026885986. +[ Tue Sep 13 22:35:23 2022 ] Top1: 32.79% +[ Tue Sep 13 22:35:24 2022 ] Top5: 61.89% +[ Tue Sep 13 22:35:24 2022 ] Training epoch: 58 +[ Tue Sep 13 22:36:01 2022 ] Batch(65/162) done. Loss: 0.3371 lr:0.100000 +[ Tue Sep 13 22:36:52 2022 ] Eval epoch: 58 +[ Tue Sep 13 22:39:46 2022 ] Mean test loss of 930 batches: 5.860087871551514. +[ Tue Sep 13 22:39:47 2022 ] Top1: 36.03% +[ Tue Sep 13 22:39:47 2022 ] Top5: 67.77% +[ Tue Sep 13 22:39:47 2022 ] Training epoch: 59 +[ Tue Sep 13 22:39:53 2022 ] Batch(3/162) done. Loss: 0.4174 lr:0.100000 +[ Tue Sep 13 22:40:45 2022 ] Batch(103/162) done. Loss: 0.0923 lr:0.100000 +[ Tue Sep 13 22:41:16 2022 ] Eval epoch: 59 +[ Tue Sep 13 22:44:09 2022 ] Mean test loss of 930 batches: 9.257292747497559. +[ Tue Sep 13 22:44:10 2022 ] Top1: 20.87% +[ Tue Sep 13 22:44:10 2022 ] Top5: 44.52% +[ Tue Sep 13 22:44:11 2022 ] Training epoch: 60 +[ Tue Sep 13 22:44:36 2022 ] Batch(41/162) done. Loss: 0.1475 lr:0.100000 +[ Tue Sep 13 22:45:29 2022 ] Batch(141/162) done. Loss: 0.2011 lr:0.100000 +[ Tue Sep 13 22:45:39 2022 ] Eval epoch: 60 +[ Tue Sep 13 22:48:33 2022 ] Mean test loss of 930 batches: 6.781947135925293. +[ Tue Sep 13 22:48:33 2022 ] Top1: 29.67% +[ Tue Sep 13 22:48:34 2022 ] Top5: 57.45% +[ Tue Sep 13 22:48:34 2022 ] Training epoch: 61 +[ Tue Sep 13 22:49:20 2022 ] Batch(79/162) done. Loss: 0.1082 lr:0.010000 +[ Tue Sep 13 22:50:03 2022 ] Eval epoch: 61 +[ Tue Sep 13 22:52:57 2022 ] Mean test loss of 930 batches: 2.992205858230591. +[ Tue Sep 13 22:52:58 2022 ] Top1: 53.45% +[ Tue Sep 13 22:52:58 2022 ] Top5: 80.64% +[ Tue Sep 13 22:52:59 2022 ] Training epoch: 62 +[ Tue Sep 13 22:53:11 2022 ] Batch(17/162) done. Loss: 0.0523 lr:0.010000 +[ Tue Sep 13 22:54:04 2022 ] Batch(117/162) done. Loss: 0.0956 lr:0.010000 +[ Tue Sep 13 22:54:28 2022 ] Eval epoch: 62 +[ Tue Sep 13 22:57:21 2022 ] Mean test loss of 930 batches: 3.067354917526245. +[ Tue Sep 13 22:57:21 2022 ] Top1: 53.46% +[ Tue Sep 13 22:57:22 2022 ] Top5: 80.63% +[ Tue Sep 13 22:57:22 2022 ] Training epoch: 63 +[ Tue Sep 13 22:57:55 2022 ] Batch(55/162) done. Loss: 0.0558 lr:0.010000 +[ Tue Sep 13 22:58:48 2022 ] Batch(155/162) done. Loss: 0.0274 lr:0.010000 +[ Tue Sep 13 22:58:51 2022 ] Eval epoch: 63 +[ Tue Sep 13 23:01:45 2022 ] Mean test loss of 930 batches: 3.045286178588867. +[ Tue Sep 13 23:01:45 2022 ] Top1: 54.33% +[ Tue Sep 13 23:01:46 2022 ] Top5: 81.25% +[ Tue Sep 13 23:01:46 2022 ] Training epoch: 64 +[ Tue Sep 13 23:02:38 2022 ] Batch(93/162) done. Loss: 0.0190 lr:0.010000 +[ Tue Sep 13 23:03:15 2022 ] Eval epoch: 64 +[ Tue Sep 13 23:06:09 2022 ] Mean test loss of 930 batches: 3.025174140930176. +[ Tue Sep 13 23:06:09 2022 ] Top1: 54.49% +[ Tue Sep 13 23:06:09 2022 ] Top5: 81.48% +[ Tue Sep 13 23:06:10 2022 ] Training epoch: 65 +[ Tue Sep 13 23:06:29 2022 ] Batch(31/162) done. Loss: 0.0290 lr:0.010000 +[ Tue Sep 13 23:07:22 2022 ] Batch(131/162) done. Loss: 0.0235 lr:0.010000 +[ Tue Sep 13 23:07:38 2022 ] Eval epoch: 65 +[ Tue Sep 13 23:10:33 2022 ] Mean test loss of 930 batches: 3.1589932441711426. +[ Tue Sep 13 23:10:33 2022 ] Top1: 52.91% +[ Tue Sep 13 23:10:34 2022 ] Top5: 80.61% +[ Tue Sep 13 23:10:34 2022 ] Training epoch: 66 +[ Tue Sep 13 23:11:14 2022 ] Batch(69/162) done. Loss: 0.0282 lr:0.010000 +[ Tue Sep 13 23:12:03 2022 ] Eval epoch: 66 +[ Tue Sep 13 23:14:56 2022 ] Mean test loss of 930 batches: 3.023002862930298. +[ Tue Sep 13 23:14:57 2022 ] Top1: 54.05% +[ Tue Sep 13 23:14:57 2022 ] Top5: 81.27% +[ Tue Sep 13 23:14:57 2022 ] Training epoch: 67 +[ Tue Sep 13 23:15:05 2022 ] Batch(7/162) done. Loss: 0.0235 lr:0.010000 +[ Tue Sep 13 23:15:57 2022 ] Batch(107/162) done. Loss: 0.1305 lr:0.010000 +[ Tue Sep 13 23:16:26 2022 ] Eval epoch: 67 +[ Tue Sep 13 23:19:20 2022 ] Mean test loss of 930 batches: 3.1956241130828857. +[ Tue Sep 13 23:19:20 2022 ] Top1: 52.66% +[ Tue Sep 13 23:19:21 2022 ] Top5: 80.47% +[ Tue Sep 13 23:19:21 2022 ] Training epoch: 68 +[ Tue Sep 13 23:19:48 2022 ] Batch(45/162) done. Loss: 0.0245 lr:0.010000 +[ Tue Sep 13 23:20:41 2022 ] Batch(145/162) done. Loss: 0.0290 lr:0.010000 +[ Tue Sep 13 23:20:50 2022 ] Eval epoch: 68 +[ Tue Sep 13 23:23:44 2022 ] Mean test loss of 930 batches: 3.221503734588623. +[ Tue Sep 13 23:23:44 2022 ] Top1: 52.79% +[ Tue Sep 13 23:23:45 2022 ] Top5: 80.55% +[ Tue Sep 13 23:23:45 2022 ] Training epoch: 69 +[ Tue Sep 13 23:24:33 2022 ] Batch(83/162) done. Loss: 0.0249 lr:0.010000 +[ Tue Sep 13 23:25:15 2022 ] Eval epoch: 69 +[ Tue Sep 13 23:28:08 2022 ] Mean test loss of 930 batches: 3.1967437267303467. +[ Tue Sep 13 23:28:09 2022 ] Top1: 53.74% +[ Tue Sep 13 23:28:09 2022 ] Top5: 80.80% +[ Tue Sep 13 23:28:10 2022 ] Training epoch: 70 +[ Tue Sep 13 23:28:25 2022 ] Batch(21/162) done. Loss: 0.0995 lr:0.010000 +[ Tue Sep 13 23:29:17 2022 ] Batch(121/162) done. Loss: 0.0304 lr:0.010000 +[ Tue Sep 13 23:29:38 2022 ] Eval epoch: 70 +[ Tue Sep 13 23:32:31 2022 ] Mean test loss of 930 batches: 3.089479923248291. +[ Tue Sep 13 23:32:32 2022 ] Top1: 54.28% +[ Tue Sep 13 23:32:32 2022 ] Top5: 81.39% +[ Tue Sep 13 23:32:32 2022 ] Training epoch: 71 +[ Tue Sep 13 23:33:07 2022 ] Batch(59/162) done. Loss: 0.0558 lr:0.010000 +[ Tue Sep 13 23:34:00 2022 ] Batch(159/162) done. Loss: 0.0395 lr:0.010000 +[ Tue Sep 13 23:34:02 2022 ] Eval epoch: 71 +[ Tue Sep 13 23:36:55 2022 ] Mean test loss of 930 batches: 3.0850040912628174. +[ Tue Sep 13 23:36:55 2022 ] Top1: 54.34% +[ Tue Sep 13 23:36:56 2022 ] Top5: 81.43% +[ Tue Sep 13 23:36:56 2022 ] Training epoch: 72 +[ Tue Sep 13 23:37:51 2022 ] Batch(97/162) done. Loss: 0.0396 lr:0.010000 +[ Tue Sep 13 23:38:25 2022 ] Eval epoch: 72 +[ Tue Sep 13 23:41:18 2022 ] Mean test loss of 930 batches: 3.267887592315674. +[ Tue Sep 13 23:41:19 2022 ] Top1: 53.87% +[ Tue Sep 13 23:41:19 2022 ] Top5: 80.92% +[ Tue Sep 13 23:41:19 2022 ] Training epoch: 73 +[ Tue Sep 13 23:41:42 2022 ] Batch(35/162) done. Loss: 0.0307 lr:0.010000 +[ Tue Sep 13 23:42:34 2022 ] Batch(135/162) done. Loss: 0.0440 lr:0.010000 +[ Tue Sep 13 23:42:48 2022 ] Eval epoch: 73 +[ Tue Sep 13 23:45:42 2022 ] Mean test loss of 930 batches: 3.163264036178589. +[ Tue Sep 13 23:45:42 2022 ] Top1: 54.46% +[ Tue Sep 13 23:45:43 2022 ] Top5: 81.37% +[ Tue Sep 13 23:45:43 2022 ] Training epoch: 74 +[ Tue Sep 13 23:46:25 2022 ] Batch(73/162) done. Loss: 0.0482 lr:0.010000 +[ Tue Sep 13 23:47:12 2022 ] Eval epoch: 74 +[ Tue Sep 13 23:50:05 2022 ] Mean test loss of 930 batches: 3.155078172683716. +[ Tue Sep 13 23:50:05 2022 ] Top1: 54.46% +[ Tue Sep 13 23:50:06 2022 ] Top5: 81.15% +[ Tue Sep 13 23:50:06 2022 ] Training epoch: 75 +[ Tue Sep 13 23:50:16 2022 ] Batch(11/162) done. Loss: 0.0118 lr:0.010000 +[ Tue Sep 13 23:51:09 2022 ] Batch(111/162) done. Loss: 0.0536 lr:0.010000 +[ Tue Sep 13 23:51:35 2022 ] Eval epoch: 75 +[ Tue Sep 13 23:54:28 2022 ] Mean test loss of 930 batches: 3.2689106464385986. +[ Tue Sep 13 23:54:29 2022 ] Top1: 53.18% +[ Tue Sep 13 23:54:29 2022 ] Top5: 80.74% +[ Tue Sep 13 23:54:29 2022 ] Training epoch: 76 +[ Tue Sep 13 23:54:59 2022 ] Batch(49/162) done. Loss: 0.0446 lr:0.010000 +[ Tue Sep 13 23:55:52 2022 ] Batch(149/162) done. Loss: 0.0947 lr:0.010000 +[ Tue Sep 13 23:55:58 2022 ] Eval epoch: 76 +[ Tue Sep 13 23:58:52 2022 ] Mean test loss of 930 batches: 3.3122920989990234. +[ Tue Sep 13 23:58:52 2022 ] Top1: 53.77% +[ Tue Sep 13 23:58:53 2022 ] Top5: 80.86% +[ Tue Sep 13 23:58:53 2022 ] Training epoch: 77 +[ Tue Sep 13 23:59:43 2022 ] Batch(87/162) done. Loss: 0.0506 lr:0.010000 +[ Wed Sep 14 00:00:22 2022 ] Eval epoch: 77 +[ Wed Sep 14 00:03:15 2022 ] Mean test loss of 930 batches: 3.2514548301696777. +[ Wed Sep 14 00:03:16 2022 ] Top1: 54.14% +[ Wed Sep 14 00:03:16 2022 ] Top5: 81.17% +[ Wed Sep 14 00:03:16 2022 ] Training epoch: 78 +[ Wed Sep 14 00:03:33 2022 ] Batch(25/162) done. Loss: 0.0222 lr:0.010000 +[ Wed Sep 14 00:04:26 2022 ] Batch(125/162) done. Loss: 0.0258 lr:0.010000 +[ Wed Sep 14 00:04:45 2022 ] Eval epoch: 78 +[ Wed Sep 14 00:07:39 2022 ] Mean test loss of 930 batches: 3.293084144592285. +[ Wed Sep 14 00:07:39 2022 ] Top1: 53.73% +[ Wed Sep 14 00:07:40 2022 ] Top5: 80.95% +[ Wed Sep 14 00:07:40 2022 ] Training epoch: 79 +[ Wed Sep 14 00:08:17 2022 ] Batch(63/162) done. Loss: 0.0131 lr:0.010000 +[ Wed Sep 14 00:09:09 2022 ] Eval epoch: 79 +[ Wed Sep 14 00:12:02 2022 ] Mean test loss of 930 batches: 3.262202739715576. +[ Wed Sep 14 00:12:03 2022 ] Top1: 53.55% +[ Wed Sep 14 00:12:03 2022 ] Top5: 80.66% +[ Wed Sep 14 00:12:03 2022 ] Training epoch: 80 +[ Wed Sep 14 00:12:07 2022 ] Batch(1/162) done. Loss: 0.0178 lr:0.010000 +[ Wed Sep 14 00:13:00 2022 ] Batch(101/162) done. Loss: 0.0411 lr:0.010000 +[ Wed Sep 14 00:13:32 2022 ] Eval epoch: 80 +[ Wed Sep 14 00:16:26 2022 ] Mean test loss of 930 batches: 3.2297379970550537. +[ Wed Sep 14 00:16:27 2022 ] Top1: 54.15% +[ Wed Sep 14 00:16:27 2022 ] Top5: 81.03% +[ Wed Sep 14 00:16:27 2022 ] Training epoch: 81 +[ Wed Sep 14 00:16:52 2022 ] Batch(39/162) done. Loss: 0.0240 lr:0.001000 +[ Wed Sep 14 00:17:45 2022 ] Batch(139/162) done. Loss: 0.0447 lr:0.001000 +[ Wed Sep 14 00:17:56 2022 ] Eval epoch: 81 +[ Wed Sep 14 00:20:50 2022 ] Mean test loss of 930 batches: 3.254833698272705. +[ Wed Sep 14 00:20:50 2022 ] Top1: 54.14% +[ Wed Sep 14 00:20:51 2022 ] Top5: 81.11% +[ Wed Sep 14 00:20:51 2022 ] Training epoch: 82 +[ Wed Sep 14 00:21:35 2022 ] Batch(77/162) done. Loss: 0.0049 lr:0.001000 +[ Wed Sep 14 00:22:20 2022 ] Eval epoch: 82 +[ Wed Sep 14 00:25:13 2022 ] Mean test loss of 930 batches: 3.3861968517303467. +[ Wed Sep 14 00:25:14 2022 ] Top1: 53.41% +[ Wed Sep 14 00:25:14 2022 ] Top5: 80.56% +[ Wed Sep 14 00:25:14 2022 ] Training epoch: 83 +[ Wed Sep 14 00:25:26 2022 ] Batch(15/162) done. Loss: 0.0557 lr:0.001000 +[ Wed Sep 14 00:26:19 2022 ] Batch(115/162) done. Loss: 0.0230 lr:0.001000 +[ Wed Sep 14 00:26:43 2022 ] Eval epoch: 83 +[ Wed Sep 14 00:29:36 2022 ] Mean test loss of 930 batches: 3.257885456085205. +[ Wed Sep 14 00:29:37 2022 ] Top1: 54.50% +[ Wed Sep 14 00:29:37 2022 ] Top5: 81.17% +[ Wed Sep 14 00:29:37 2022 ] Training epoch: 84 +[ Wed Sep 14 00:30:09 2022 ] Batch(53/162) done. Loss: 0.0179 lr:0.001000 +[ Wed Sep 14 00:31:02 2022 ] Batch(153/162) done. Loss: 0.0429 lr:0.001000 +[ Wed Sep 14 00:31:07 2022 ] Eval epoch: 84 +[ Wed Sep 14 00:34:00 2022 ] Mean test loss of 930 batches: 3.3241426944732666. +[ Wed Sep 14 00:34:00 2022 ] Top1: 54.11% +[ Wed Sep 14 00:34:01 2022 ] Top5: 81.14% +[ Wed Sep 14 00:34:01 2022 ] Training epoch: 85 +[ Wed Sep 14 00:34:53 2022 ] Batch(91/162) done. Loss: 0.0074 lr:0.001000 +[ Wed Sep 14 00:35:30 2022 ] Eval epoch: 85 +[ Wed Sep 14 00:38:23 2022 ] Mean test loss of 930 batches: 3.2798843383789062. +[ Wed Sep 14 00:38:23 2022 ] Top1: 54.26% +[ Wed Sep 14 00:38:24 2022 ] Top5: 81.27% +[ Wed Sep 14 00:38:24 2022 ] Training epoch: 86 +[ Wed Sep 14 00:38:43 2022 ] Batch(29/162) done. Loss: 0.0892 lr:0.001000 +[ Wed Sep 14 00:39:35 2022 ] Batch(129/162) done. Loss: 0.0756 lr:0.001000 +[ Wed Sep 14 00:39:53 2022 ] Eval epoch: 86 +[ Wed Sep 14 00:42:46 2022 ] Mean test loss of 930 batches: 3.2872209548950195. +[ Wed Sep 14 00:42:46 2022 ] Top1: 54.41% +[ Wed Sep 14 00:42:47 2022 ] Top5: 81.32% +[ Wed Sep 14 00:42:47 2022 ] Training epoch: 87 +[ Wed Sep 14 00:43:26 2022 ] Batch(67/162) done. Loss: 0.0296 lr:0.001000 +[ Wed Sep 14 00:44:16 2022 ] Eval epoch: 87 +[ Wed Sep 14 00:47:09 2022 ] Mean test loss of 930 batches: 3.325481653213501. +[ Wed Sep 14 00:47:09 2022 ] Top1: 53.61% +[ Wed Sep 14 00:47:10 2022 ] Top5: 80.88% +[ Wed Sep 14 00:47:10 2022 ] Training epoch: 88 +[ Wed Sep 14 00:47:16 2022 ] Batch(5/162) done. Loss: 0.0617 lr:0.001000 +[ Wed Sep 14 00:48:09 2022 ] Batch(105/162) done. Loss: 0.0109 lr:0.001000 +[ Wed Sep 14 00:48:39 2022 ] Eval epoch: 88 +[ Wed Sep 14 00:51:32 2022 ] Mean test loss of 930 batches: 3.276432752609253. +[ Wed Sep 14 00:51:33 2022 ] Top1: 54.77% +[ Wed Sep 14 00:51:33 2022 ] Top5: 81.54% +[ Wed Sep 14 00:51:33 2022 ] Training epoch: 89 +[ Wed Sep 14 00:51:59 2022 ] Batch(43/162) done. Loss: 0.0557 lr:0.001000 +[ Wed Sep 14 00:52:52 2022 ] Batch(143/162) done. Loss: 0.0207 lr:0.001000 +[ Wed Sep 14 00:53:02 2022 ] Eval epoch: 89 +[ Wed Sep 14 00:55:56 2022 ] Mean test loss of 930 batches: 3.3057165145874023. +[ Wed Sep 14 00:55:57 2022 ] Top1: 53.95% +[ Wed Sep 14 00:55:57 2022 ] Top5: 81.01% +[ Wed Sep 14 00:55:57 2022 ] Training epoch: 90 +[ Wed Sep 14 00:56:44 2022 ] Batch(81/162) done. Loss: 0.0615 lr:0.001000 +[ Wed Sep 14 00:57:26 2022 ] Eval epoch: 90 +[ Wed Sep 14 01:00:20 2022 ] Mean test loss of 930 batches: 3.2415735721588135. +[ Wed Sep 14 01:00:21 2022 ] Top1: 54.78% +[ Wed Sep 14 01:00:21 2022 ] Top5: 81.43% +[ Wed Sep 14 01:00:22 2022 ] Training epoch: 91 +[ Wed Sep 14 01:00:35 2022 ] Batch(19/162) done. Loss: 0.0479 lr:0.001000 +[ Wed Sep 14 01:01:28 2022 ] Batch(119/162) done. Loss: 0.0326 lr:0.001000 +[ Wed Sep 14 01:01:51 2022 ] Eval epoch: 91 +[ Wed Sep 14 01:04:44 2022 ] Mean test loss of 930 batches: 3.2997372150421143. +[ Wed Sep 14 01:04:44 2022 ] Top1: 54.55% +[ Wed Sep 14 01:04:45 2022 ] Top5: 81.29% +[ Wed Sep 14 01:04:45 2022 ] Training epoch: 92 +[ Wed Sep 14 01:05:19 2022 ] Batch(57/162) done. Loss: 0.0322 lr:0.001000 +[ Wed Sep 14 01:06:12 2022 ] Batch(157/162) done. Loss: 0.0710 lr:0.001000 +[ Wed Sep 14 01:06:14 2022 ] Eval epoch: 92 +[ Wed Sep 14 01:09:07 2022 ] Mean test loss of 930 batches: 3.306330919265747. +[ Wed Sep 14 01:09:08 2022 ] Top1: 53.68% +[ Wed Sep 14 01:09:08 2022 ] Top5: 80.86% +[ Wed Sep 14 01:09:08 2022 ] Training epoch: 93 +[ Wed Sep 14 01:10:02 2022 ] Batch(95/162) done. Loss: 0.0152 lr:0.001000 +[ Wed Sep 14 01:10:38 2022 ] Eval epoch: 93 +[ Wed Sep 14 01:13:32 2022 ] Mean test loss of 930 batches: 3.4323089122772217. +[ Wed Sep 14 01:13:32 2022 ] Top1: 52.69% +[ Wed Sep 14 01:13:32 2022 ] Top5: 80.27% +[ Wed Sep 14 01:13:33 2022 ] Training epoch: 94 +[ Wed Sep 14 01:13:54 2022 ] Batch(33/162) done. Loss: 0.0922 lr:0.001000 +[ Wed Sep 14 01:14:47 2022 ] Batch(133/162) done. Loss: 0.0339 lr:0.001000 +[ Wed Sep 14 01:15:02 2022 ] Eval epoch: 94 +[ Wed Sep 14 01:17:55 2022 ] Mean test loss of 930 batches: 3.324550151824951. +[ Wed Sep 14 01:17:55 2022 ] Top1: 54.07% +[ Wed Sep 14 01:17:56 2022 ] Top5: 80.87% +[ Wed Sep 14 01:17:56 2022 ] Training epoch: 95 +[ Wed Sep 14 01:18:37 2022 ] Batch(71/162) done. Loss: 0.0160 lr:0.001000 +[ Wed Sep 14 01:19:25 2022 ] Eval epoch: 95 +[ Wed Sep 14 01:22:18 2022 ] Mean test loss of 930 batches: 3.3670194149017334. +[ Wed Sep 14 01:22:19 2022 ] Top1: 52.92% +[ Wed Sep 14 01:22:19 2022 ] Top5: 80.41% +[ Wed Sep 14 01:22:20 2022 ] Training epoch: 96 +[ Wed Sep 14 01:22:28 2022 ] Batch(9/162) done. Loss: 0.0091 lr:0.001000 +[ Wed Sep 14 01:23:21 2022 ] Batch(109/162) done. Loss: 0.0671 lr:0.001000 +[ Wed Sep 14 01:23:48 2022 ] Eval epoch: 96 +[ Wed Sep 14 01:26:42 2022 ] Mean test loss of 930 batches: 3.3024790287017822. +[ Wed Sep 14 01:26:43 2022 ] Top1: 53.83% +[ Wed Sep 14 01:26:43 2022 ] Top5: 81.13% +[ Wed Sep 14 01:26:44 2022 ] Training epoch: 97 +[ Wed Sep 14 01:27:12 2022 ] Batch(47/162) done. Loss: 0.0389 lr:0.001000 +[ Wed Sep 14 01:28:05 2022 ] Batch(147/162) done. Loss: 0.0605 lr:0.001000 +[ Wed Sep 14 01:28:13 2022 ] Eval epoch: 97 +[ Wed Sep 14 01:31:06 2022 ] Mean test loss of 930 batches: 3.321824789047241. +[ Wed Sep 14 01:31:06 2022 ] Top1: 53.47% +[ Wed Sep 14 01:31:07 2022 ] Top5: 80.94% +[ Wed Sep 14 01:31:07 2022 ] Training epoch: 98 +[ Wed Sep 14 01:31:56 2022 ] Batch(85/162) done. Loss: 0.0401 lr:0.001000 +[ Wed Sep 14 01:32:36 2022 ] Eval epoch: 98 +[ Wed Sep 14 01:35:29 2022 ] Mean test loss of 930 batches: 3.3567888736724854. +[ Wed Sep 14 01:35:29 2022 ] Top1: 54.22% +[ Wed Sep 14 01:35:30 2022 ] Top5: 81.11% +[ Wed Sep 14 01:35:30 2022 ] Training epoch: 99 +[ Wed Sep 14 01:35:46 2022 ] Batch(23/162) done. Loss: 0.0865 lr:0.001000 +[ Wed Sep 14 01:36:39 2022 ] Batch(123/162) done. Loss: 0.0793 lr:0.001000 +[ Wed Sep 14 01:36:59 2022 ] Eval epoch: 99 +[ Wed Sep 14 01:39:53 2022 ] Mean test loss of 930 batches: 3.289196491241455. +[ Wed Sep 14 01:39:54 2022 ] Top1: 53.88% +[ Wed Sep 14 01:39:54 2022 ] Top5: 80.97% +[ Wed Sep 14 01:39:54 2022 ] Training epoch: 100 +[ Wed Sep 14 01:40:31 2022 ] Batch(61/162) done. Loss: 0.0328 lr:0.001000 +[ Wed Sep 14 01:41:24 2022 ] Batch(161/162) done. Loss: 0.0169 lr:0.001000 +[ Wed Sep 14 01:41:24 2022 ] Eval epoch: 100 +[ Wed Sep 14 01:44:18 2022 ] Mean test loss of 930 batches: 3.32149076461792. +[ Wed Sep 14 01:44:18 2022 ] Top1: 53.94% +[ Wed Sep 14 01:44:19 2022 ] Top5: 80.92% diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_xset/config.yaml b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_xset/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cb06a0c76ccaa085dba049c3c045e252920159ff --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_xset/config.yaml @@ -0,0 +1,61 @@ +Experiment_name: ntu120_bone_xset +base_lr: 0.1 +batch_size: 64 +config: ./config/ntu120_xset/train_bone.yaml +device: +- 0 +- 1 +- 2 +- 3 +eval_interval: 5 +feeder: feeders.feeder.Feeder +groups: 8 +ignore_weights: [] +keep_rate: 0.9 +log_interval: 100 +model: model.decouple_gcn.Model +model_args: + block_size: 41 + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + groups: 16 + num_class: 120 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu120_bone_xset +nesterov: true +num_epoch: 100 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_data_bone.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_data_bone.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu120_bone_xset diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_xset/decouple_gcn.py b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_xset/decouple_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..6dcce4552ced280fe5b2060df92daebd2452cf7c --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_xset/decouple_gcn.py @@ -0,0 +1,235 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math +from model.dropSke import DropBlock_Ske +from model.dropT import DropBlockT_1d + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + + +def conv_branch_init(conv): + weight = conv.weight + n = weight.size(0) + k1 = weight.size(1) + k2 = weight.size(2) + nn.init.normal(weight, 0, math.sqrt(2. / (n * k1 * k2))) + nn.init.constant(conv.bias, 0) + + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class unit_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1, num_point=25, block_size=41): + super(unit_tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + self.dropS = DropBlock_Ske(num_point=num_point) + self.dropT = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob, A): + x = self.bn(self.conv(x)) + x = self.dropT(self.dropS(x, keep_prob, A), keep_prob) + return x + + +class unit_tcn_skip(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(unit_tcn_skip, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class unit_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, coff_embedding=4, num_subset=3): + super(unit_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.num_point = num_point + self.groups = groups + self.num_subset = num_subset + self.DecoupleA = nn.Parameter(torch.tensor(np.reshape(A.astype(np.float32), [ + 3, 1, num_point, num_point]), dtype=torch.float32, requires_grad=True).repeat(1, groups, 1, 1), requires_grad=True) + + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.bn0 = nn.BatchNorm2d(out_channels * num_subset) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + bn_init(self.bn, 1e-6) + + self.Linear_weight = nn.Parameter(torch.zeros( + in_channels, out_channels * num_subset, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0, math.sqrt( + 0.5 / (out_channels * num_subset))) + + self.Linear_bias = nn.Parameter(torch.zeros( + 1, out_channels * num_subset, 1, 1, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.constant(self.Linear_bias, 1e-6) + + eye_array = [] + for i in range(out_channels): + eye_array.append(torch.eye(num_point)) + self.eyes = nn.Parameter(torch.tensor(torch.stack( + eye_array), requires_grad=False, device='cuda'), requires_grad=False) # [c,25,25] + + def norm(self, A): + b, c, h, w = A.size() + A = A.view(c, self.num_point, self.num_point) + D_list = torch.sum(A, 1).view(c, 1, self.num_point) + D_list_12 = (D_list + 0.001)**(-1) + D_12 = self.eyes * D_list_12 + A = torch.bmm(A, D_12).view(b, c, h, w) + return A + + def forward(self, x0): + learn_A = self.DecoupleA.repeat( + 1, self.out_channels // self.groups, 1, 1) + norm_learn_A = torch.cat([self.norm(learn_A[0:1, ...]), self.norm( + learn_A[1:2, ...]), self.norm(learn_A[2:3, ...])], 0) + + x = torch.einsum( + 'nctw,cd->ndtw', (x0, self.Linear_weight)).contiguous() + x = x + self.Linear_bias + x = self.bn0(x) + + n, kc, t, v = x.size() + x = x.view(n, self.num_subset, kc // self.num_subset, t, v) + x = torch.einsum('nkctv,kcvw->nctw', (x, norm_learn_A)) + + x = self.bn(x) + x += self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, block_size, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = unit_gcn(in_channels, out_channels, A, groups, num_point) + self.tcn1 = unit_tcn(out_channels, out_channels, + stride=stride, num_point=num_point) + self.relu = nn.ReLU() + + self.A = nn.Parameter(torch.tensor(np.sum(np.reshape(A.astype(np.float32), [ + 3, num_point, num_point]), axis=0), dtype=torch.float32, requires_grad=False, device='cuda'), requires_grad=False) + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + + else: + self.residual = unit_tcn_skip( + in_channels, out_channels, kernel_size=1, stride=stride) + self.dropSke = DropBlock_Ske(num_point=num_point) + self.dropT_skip = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob): + x = self.tcn1(self.gcn1(x), keep_prob, self.A) + self.dropT_skip( + self.dropSke(self.residual(x), keep_prob, self.A), keep_prob) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, groups, num_point, + block_size, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l5 = TCN_GCN_unit( + 64, 128, A, groups, num_point, block_size, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l8 = TCN_GCN_unit(128, 256, A, groups, + num_point, block_size, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x, keep_prob=0.9): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute( + 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x, 1.0) + x = self.l2(x, 1.0) + x = self.l3(x, 1.0) + x = self.l4(x, 1.0) + x = self.l5(x, 1.0) + x = self.l6(x, 1.0) + x = self.l7(x, keep_prob) + x = self.l8(x, keep_prob) + x = self.l9(x, keep_prob) + x = self.l10(x, keep_prob) + + # N*M,C,T,V + c_new = x.size(1) + x = x.reshape(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_xset/eval_results/best_acc.pkl b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_xset/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..6bdfb98489f90d5c0e3c87a640e52a48fce7a7a9 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_xset/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4cc39dedd1c7b93b150d9bb906c078558113b002b4b6788a3dd9298b21a6549f +size 34946665 diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_xset/log.txt b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_xset/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..70e98d24e1b72be5e244cfab5719e34fe9aab0a1 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_bone_xset/log.txt @@ -0,0 +1,665 @@ +[ Wed Sep 14 13:01:27 2022 ] Parameters: +{'work_dir': './work_dir/ntu120_bone_xset', 'model_saved_name': './save_models/ntu120_bone_xset', 'Experiment_name': 'ntu120_bone_xset', 'config': './config/ntu120_xset/train_bone.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_data_bone.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_data_bone.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_label.pkl'}, 'model': 'model.decouple_gcn.Model', 'model_args': {'num_class': 120, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'groups': 16, 'block_size': 41, 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80], 'device': [0, 1, 2, 3], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 100, 'weight_decay': 0.0001, 'keep_rate': 0.9, 'groups': 8, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Wed Sep 14 13:01:27 2022 ] Training epoch: 1 +[ Wed Sep 14 13:02:04 2022 ] Batch(99/162) done. Loss: 3.9355 lr:0.100000 +[ Wed Sep 14 13:02:19 2022 ] Eval epoch: 1 +[ Wed Sep 14 13:04:04 2022 ] Mean test loss of 930 batches: 5.392993927001953. +[ Wed Sep 14 13:04:04 2022 ] Top1: 4.91% +[ Wed Sep 14 13:04:05 2022 ] Top5: 17.70% +[ Wed Sep 14 13:04:05 2022 ] Training epoch: 2 +[ Wed Sep 14 13:04:20 2022 ] Batch(37/162) done. Loss: 3.5251 lr:0.100000 +[ Wed Sep 14 13:04:50 2022 ] Batch(137/162) done. Loss: 3.2359 lr:0.100000 +[ Wed Sep 14 13:04:58 2022 ] Eval epoch: 2 +[ Wed Sep 14 13:06:44 2022 ] Mean test loss of 930 batches: 4.477888584136963. +[ Wed Sep 14 13:06:45 2022 ] Top1: 9.46% +[ Wed Sep 14 13:06:45 2022 ] Top5: 28.49% +[ Wed Sep 14 13:06:45 2022 ] Training epoch: 3 +[ Wed Sep 14 13:07:12 2022 ] Batch(75/162) done. Loss: 2.8039 lr:0.100000 +[ Wed Sep 14 13:07:38 2022 ] Eval epoch: 3 +[ Wed Sep 14 13:09:23 2022 ] Mean test loss of 930 batches: 4.6684160232543945. +[ Wed Sep 14 13:09:24 2022 ] Top1: 8.49% +[ Wed Sep 14 13:09:24 2022 ] Top5: 29.99% +[ Wed Sep 14 13:09:24 2022 ] Training epoch: 4 +[ Wed Sep 14 13:09:33 2022 ] Batch(13/162) done. Loss: 2.8484 lr:0.100000 +[ Wed Sep 14 13:10:03 2022 ] Batch(113/162) done. Loss: 3.0293 lr:0.100000 +[ Wed Sep 14 13:10:18 2022 ] Eval epoch: 4 +[ Wed Sep 14 13:12:04 2022 ] Mean test loss of 930 batches: 4.1727519035339355. +[ Wed Sep 14 13:12:05 2022 ] Top1: 12.35% +[ Wed Sep 14 13:12:05 2022 ] Top5: 37.33% +[ Wed Sep 14 13:12:06 2022 ] Training epoch: 5 +[ Wed Sep 14 13:12:25 2022 ] Batch(51/162) done. Loss: 2.1966 lr:0.100000 +[ Wed Sep 14 13:12:55 2022 ] Batch(151/162) done. Loss: 2.2492 lr:0.100000 +[ Wed Sep 14 13:12:58 2022 ] Eval epoch: 5 +[ Wed Sep 14 13:14:44 2022 ] Mean test loss of 930 batches: 4.021293640136719. +[ Wed Sep 14 13:14:45 2022 ] Top1: 16.43% +[ Wed Sep 14 13:14:45 2022 ] Top5: 41.27% +[ Wed Sep 14 13:14:45 2022 ] Training epoch: 6 +[ Wed Sep 14 13:15:16 2022 ] Batch(89/162) done. Loss: 1.6511 lr:0.100000 +[ Wed Sep 14 13:15:38 2022 ] Eval epoch: 6 +[ Wed Sep 14 13:17:24 2022 ] Mean test loss of 930 batches: 3.7663354873657227. +[ Wed Sep 14 13:17:24 2022 ] Top1: 19.50% +[ Wed Sep 14 13:17:25 2022 ] Top5: 45.88% +[ Wed Sep 14 13:17:25 2022 ] Training epoch: 7 +[ Wed Sep 14 13:17:38 2022 ] Batch(27/162) done. Loss: 1.9444 lr:0.100000 +[ Wed Sep 14 13:18:08 2022 ] Batch(127/162) done. Loss: 1.4796 lr:0.100000 +[ Wed Sep 14 13:18:19 2022 ] Eval epoch: 7 +[ Wed Sep 14 13:20:05 2022 ] Mean test loss of 930 batches: 3.9329254627227783. +[ Wed Sep 14 13:20:06 2022 ] Top1: 17.07% +[ Wed Sep 14 13:20:06 2022 ] Top5: 44.80% +[ Wed Sep 14 13:20:06 2022 ] Training epoch: 8 +[ Wed Sep 14 13:20:30 2022 ] Batch(65/162) done. Loss: 1.8294 lr:0.100000 +[ Wed Sep 14 13:21:00 2022 ] Eval epoch: 8 +[ Wed Sep 14 13:22:45 2022 ] Mean test loss of 930 batches: 3.2654027938842773. +[ Wed Sep 14 13:22:46 2022 ] Top1: 26.08% +[ Wed Sep 14 13:22:46 2022 ] Top5: 53.97% +[ Wed Sep 14 13:22:47 2022 ] Training epoch: 9 +[ Wed Sep 14 13:22:52 2022 ] Batch(3/162) done. Loss: 1.6582 lr:0.100000 +[ Wed Sep 14 13:23:22 2022 ] Batch(103/162) done. Loss: 1.8690 lr:0.100000 +[ Wed Sep 14 13:23:40 2022 ] Eval epoch: 9 +[ Wed Sep 14 13:25:26 2022 ] Mean test loss of 930 batches: 3.132371187210083. +[ Wed Sep 14 13:25:27 2022 ] Top1: 28.38% +[ Wed Sep 14 13:25:27 2022 ] Top5: 57.74% +[ Wed Sep 14 13:25:27 2022 ] Training epoch: 10 +[ Wed Sep 14 13:25:44 2022 ] Batch(41/162) done. Loss: 1.3916 lr:0.100000 +[ Wed Sep 14 13:26:14 2022 ] Batch(141/162) done. Loss: 1.4474 lr:0.100000 +[ Wed Sep 14 13:26:20 2022 ] Eval epoch: 10 +[ Wed Sep 14 13:28:06 2022 ] Mean test loss of 930 batches: 3.1850171089172363. +[ Wed Sep 14 13:28:06 2022 ] Top1: 29.50% +[ Wed Sep 14 13:28:07 2022 ] Top5: 59.01% +[ Wed Sep 14 13:28:07 2022 ] Training epoch: 11 +[ Wed Sep 14 13:28:35 2022 ] Batch(79/162) done. Loss: 1.5578 lr:0.100000 +[ Wed Sep 14 13:29:00 2022 ] Eval epoch: 11 +[ Wed Sep 14 13:30:45 2022 ] Mean test loss of 930 batches: 3.093348264694214. +[ Wed Sep 14 13:30:46 2022 ] Top1: 32.56% +[ Wed Sep 14 13:30:46 2022 ] Top5: 66.21% +[ Wed Sep 14 13:30:46 2022 ] Training epoch: 12 +[ Wed Sep 14 13:30:56 2022 ] Batch(17/162) done. Loss: 1.2190 lr:0.100000 +[ Wed Sep 14 13:31:26 2022 ] Batch(117/162) done. Loss: 1.4434 lr:0.100000 +[ Wed Sep 14 13:31:39 2022 ] Eval epoch: 12 +[ Wed Sep 14 13:33:25 2022 ] Mean test loss of 930 batches: 2.7024426460266113. +[ Wed Sep 14 13:33:25 2022 ] Top1: 34.69% +[ Wed Sep 14 13:33:26 2022 ] Top5: 65.19% +[ Wed Sep 14 13:33:26 2022 ] Training epoch: 13 +[ Wed Sep 14 13:33:47 2022 ] Batch(55/162) done. Loss: 1.7219 lr:0.100000 +[ Wed Sep 14 13:34:17 2022 ] Batch(155/162) done. Loss: 1.1206 lr:0.100000 +[ Wed Sep 14 13:34:19 2022 ] Eval epoch: 13 +[ Wed Sep 14 13:36:05 2022 ] Mean test loss of 930 batches: 2.4375510215759277. +[ Wed Sep 14 13:36:06 2022 ] Top1: 38.73% +[ Wed Sep 14 13:36:06 2022 ] Top5: 70.53% +[ Wed Sep 14 13:36:06 2022 ] Training epoch: 14 +[ Wed Sep 14 13:36:39 2022 ] Batch(93/162) done. Loss: 1.0880 lr:0.100000 +[ Wed Sep 14 13:37:00 2022 ] Eval epoch: 14 +[ Wed Sep 14 13:38:45 2022 ] Mean test loss of 930 batches: 3.0172321796417236. +[ Wed Sep 14 13:38:45 2022 ] Top1: 34.33% +[ Wed Sep 14 13:38:46 2022 ] Top5: 68.36% +[ Wed Sep 14 13:38:46 2022 ] Training epoch: 15 +[ Wed Sep 14 13:39:00 2022 ] Batch(31/162) done. Loss: 1.3558 lr:0.100000 +[ Wed Sep 14 13:39:29 2022 ] Batch(131/162) done. Loss: 1.3403 lr:0.100000 +[ Wed Sep 14 13:39:39 2022 ] Eval epoch: 15 +[ Wed Sep 14 13:41:25 2022 ] Mean test loss of 930 batches: 2.841313362121582. +[ Wed Sep 14 13:41:25 2022 ] Top1: 35.99% +[ Wed Sep 14 13:41:25 2022 ] Top5: 68.93% +[ Wed Sep 14 13:41:26 2022 ] Training epoch: 16 +[ Wed Sep 14 13:41:51 2022 ] Batch(69/162) done. Loss: 0.9169 lr:0.100000 +[ Wed Sep 14 13:42:19 2022 ] Eval epoch: 16 +[ Wed Sep 14 13:44:04 2022 ] Mean test loss of 930 batches: 2.3747642040252686. +[ Wed Sep 14 13:44:05 2022 ] Top1: 41.51% +[ Wed Sep 14 13:44:05 2022 ] Top5: 73.76% +[ Wed Sep 14 13:44:05 2022 ] Training epoch: 17 +[ Wed Sep 14 13:44:12 2022 ] Batch(7/162) done. Loss: 0.6578 lr:0.100000 +[ Wed Sep 14 13:44:42 2022 ] Batch(107/162) done. Loss: 1.0512 lr:0.100000 +[ Wed Sep 14 13:44:59 2022 ] Eval epoch: 17 +[ Wed Sep 14 13:46:44 2022 ] Mean test loss of 930 batches: 2.545225143432617. +[ Wed Sep 14 13:46:45 2022 ] Top1: 40.27% +[ Wed Sep 14 13:46:45 2022 ] Top5: 73.80% +[ Wed Sep 14 13:46:45 2022 ] Training epoch: 18 +[ Wed Sep 14 13:47:03 2022 ] Batch(45/162) done. Loss: 1.1284 lr:0.100000 +[ Wed Sep 14 13:47:33 2022 ] Batch(145/162) done. Loss: 1.1653 lr:0.100000 +[ Wed Sep 14 13:47:39 2022 ] Eval epoch: 18 +[ Wed Sep 14 13:49:24 2022 ] Mean test loss of 930 batches: 3.1756742000579834. +[ Wed Sep 14 13:49:25 2022 ] Top1: 37.12% +[ Wed Sep 14 13:49:25 2022 ] Top5: 69.36% +[ Wed Sep 14 13:49:25 2022 ] Training epoch: 19 +[ Wed Sep 14 13:49:54 2022 ] Batch(83/162) done. Loss: 1.1490 lr:0.100000 +[ Wed Sep 14 13:50:18 2022 ] Eval epoch: 19 +[ Wed Sep 14 13:52:03 2022 ] Mean test loss of 930 batches: 2.689011573791504. +[ Wed Sep 14 13:52:04 2022 ] Top1: 37.80% +[ Wed Sep 14 13:52:04 2022 ] Top5: 70.91% +[ Wed Sep 14 13:52:04 2022 ] Training epoch: 20 +[ Wed Sep 14 13:52:15 2022 ] Batch(21/162) done. Loss: 0.6293 lr:0.100000 +[ Wed Sep 14 13:52:45 2022 ] Batch(121/162) done. Loss: 1.2528 lr:0.100000 +[ Wed Sep 14 13:52:57 2022 ] Eval epoch: 20 +[ Wed Sep 14 13:54:43 2022 ] Mean test loss of 930 batches: 2.447504758834839. +[ Wed Sep 14 13:54:43 2022 ] Top1: 43.61% +[ Wed Sep 14 13:54:44 2022 ] Top5: 75.39% +[ Wed Sep 14 13:54:44 2022 ] Training epoch: 21 +[ Wed Sep 14 13:55:06 2022 ] Batch(59/162) done. Loss: 1.0095 lr:0.100000 +[ Wed Sep 14 13:55:37 2022 ] Batch(159/162) done. Loss: 1.2219 lr:0.100000 +[ Wed Sep 14 13:55:38 2022 ] Eval epoch: 21 +[ Wed Sep 14 13:57:24 2022 ] Mean test loss of 930 batches: 2.4144906997680664. +[ Wed Sep 14 13:57:24 2022 ] Top1: 42.64% +[ Wed Sep 14 13:57:25 2022 ] Top5: 73.40% +[ Wed Sep 14 13:57:25 2022 ] Training epoch: 22 +[ Wed Sep 14 13:57:58 2022 ] Batch(97/162) done. Loss: 0.9796 lr:0.100000 +[ Wed Sep 14 13:58:18 2022 ] Eval epoch: 22 +[ Wed Sep 14 14:00:04 2022 ] Mean test loss of 930 batches: 2.3580615520477295. +[ Wed Sep 14 14:00:05 2022 ] Top1: 44.59% +[ Wed Sep 14 14:00:05 2022 ] Top5: 76.79% +[ Wed Sep 14 14:00:06 2022 ] Training epoch: 23 +[ Wed Sep 14 14:00:20 2022 ] Batch(35/162) done. Loss: 0.7063 lr:0.100000 +[ Wed Sep 14 14:00:50 2022 ] Batch(135/162) done. Loss: 0.8184 lr:0.100000 +[ Wed Sep 14 14:00:58 2022 ] Eval epoch: 23 +[ Wed Sep 14 14:02:44 2022 ] Mean test loss of 930 batches: 7.181952476501465. +[ Wed Sep 14 14:02:45 2022 ] Top1: 23.00% +[ Wed Sep 14 14:02:45 2022 ] Top5: 48.61% +[ Wed Sep 14 14:02:46 2022 ] Training epoch: 24 +[ Wed Sep 14 14:03:12 2022 ] Batch(73/162) done. Loss: 0.6685 lr:0.100000 +[ Wed Sep 14 14:03:39 2022 ] Eval epoch: 24 +[ Wed Sep 14 14:05:24 2022 ] Mean test loss of 930 batches: 2.6629889011383057. +[ Wed Sep 14 14:05:25 2022 ] Top1: 41.00% +[ Wed Sep 14 14:05:25 2022 ] Top5: 73.77% +[ Wed Sep 14 14:05:25 2022 ] Training epoch: 25 +[ Wed Sep 14 14:05:33 2022 ] Batch(11/162) done. Loss: 0.7258 lr:0.100000 +[ Wed Sep 14 14:06:03 2022 ] Batch(111/162) done. Loss: 0.5101 lr:0.100000 +[ Wed Sep 14 14:06:19 2022 ] Eval epoch: 25 +[ Wed Sep 14 14:08:04 2022 ] Mean test loss of 930 batches: 2.775555372238159. +[ Wed Sep 14 14:08:04 2022 ] Top1: 43.26% +[ Wed Sep 14 14:08:04 2022 ] Top5: 73.96% +[ Wed Sep 14 14:08:05 2022 ] Training epoch: 26 +[ Wed Sep 14 14:08:24 2022 ] Batch(49/162) done. Loss: 0.6546 lr:0.100000 +[ Wed Sep 14 14:08:54 2022 ] Batch(149/162) done. Loss: 0.5799 lr:0.100000 +[ Wed Sep 14 14:08:58 2022 ] Eval epoch: 26 +[ Wed Sep 14 14:10:44 2022 ] Mean test loss of 930 batches: 2.257340431213379. +[ Wed Sep 14 14:10:44 2022 ] Top1: 47.48% +[ Wed Sep 14 14:10:45 2022 ] Top5: 78.46% +[ Wed Sep 14 14:10:45 2022 ] Training epoch: 27 +[ Wed Sep 14 14:11:15 2022 ] Batch(87/162) done. Loss: 1.2185 lr:0.100000 +[ Wed Sep 14 14:11:38 2022 ] Eval epoch: 27 +[ Wed Sep 14 14:13:23 2022 ] Mean test loss of 930 batches: 2.4034464359283447. +[ Wed Sep 14 14:13:24 2022 ] Top1: 44.35% +[ Wed Sep 14 14:13:24 2022 ] Top5: 77.35% +[ Wed Sep 14 14:13:24 2022 ] Training epoch: 28 +[ Wed Sep 14 14:13:36 2022 ] Batch(25/162) done. Loss: 0.7420 lr:0.100000 +[ Wed Sep 14 14:14:07 2022 ] Batch(125/162) done. Loss: 0.7080 lr:0.100000 +[ Wed Sep 14 14:14:18 2022 ] Eval epoch: 28 +[ Wed Sep 14 14:16:05 2022 ] Mean test loss of 930 batches: 2.270505428314209. +[ Wed Sep 14 14:16:05 2022 ] Top1: 47.75% +[ Wed Sep 14 14:16:06 2022 ] Top5: 79.27% +[ Wed Sep 14 14:16:06 2022 ] Training epoch: 29 +[ Wed Sep 14 14:16:28 2022 ] Batch(63/162) done. Loss: 0.6288 lr:0.100000 +[ Wed Sep 14 14:16:58 2022 ] Eval epoch: 29 +[ Wed Sep 14 14:18:44 2022 ] Mean test loss of 930 batches: 2.4446792602539062. +[ Wed Sep 14 14:18:45 2022 ] Top1: 46.85% +[ Wed Sep 14 14:18:45 2022 ] Top5: 77.90% +[ Wed Sep 14 14:18:45 2022 ] Training epoch: 30 +[ Wed Sep 14 14:18:50 2022 ] Batch(1/162) done. Loss: 0.4565 lr:0.100000 +[ Wed Sep 14 14:19:20 2022 ] Batch(101/162) done. Loss: 0.3626 lr:0.100000 +[ Wed Sep 14 14:19:38 2022 ] Eval epoch: 30 +[ Wed Sep 14 14:21:24 2022 ] Mean test loss of 930 batches: 3.0422732830047607. +[ Wed Sep 14 14:21:25 2022 ] Top1: 43.09% +[ Wed Sep 14 14:21:25 2022 ] Top5: 75.57% +[ Wed Sep 14 14:21:25 2022 ] Training epoch: 31 +[ Wed Sep 14 14:21:41 2022 ] Batch(39/162) done. Loss: 0.4943 lr:0.100000 +[ Wed Sep 14 14:22:11 2022 ] Batch(139/162) done. Loss: 0.6466 lr:0.100000 +[ Wed Sep 14 14:22:19 2022 ] Eval epoch: 31 +[ Wed Sep 14 14:24:03 2022 ] Mean test loss of 930 batches: 2.4501664638519287. +[ Wed Sep 14 14:24:04 2022 ] Top1: 47.30% +[ Wed Sep 14 14:24:04 2022 ] Top5: 77.81% +[ Wed Sep 14 14:24:04 2022 ] Training epoch: 32 +[ Wed Sep 14 14:24:32 2022 ] Batch(77/162) done. Loss: 0.5112 lr:0.100000 +[ Wed Sep 14 14:24:58 2022 ] Eval epoch: 32 +[ Wed Sep 14 14:26:43 2022 ] Mean test loss of 930 batches: 2.6631052494049072. +[ Wed Sep 14 14:26:44 2022 ] Top1: 46.35% +[ Wed Sep 14 14:26:44 2022 ] Top5: 76.90% +[ Wed Sep 14 14:26:45 2022 ] Training epoch: 33 +[ Wed Sep 14 14:26:53 2022 ] Batch(15/162) done. Loss: 0.6779 lr:0.100000 +[ Wed Sep 14 14:27:23 2022 ] Batch(115/162) done. Loss: 0.7528 lr:0.100000 +[ Wed Sep 14 14:27:37 2022 ] Eval epoch: 33 +[ Wed Sep 14 14:29:22 2022 ] Mean test loss of 930 batches: 2.6296942234039307. +[ Wed Sep 14 14:29:23 2022 ] Top1: 45.78% +[ Wed Sep 14 14:29:23 2022 ] Top5: 76.56% +[ Wed Sep 14 14:29:24 2022 ] Training epoch: 34 +[ Wed Sep 14 14:29:43 2022 ] Batch(53/162) done. Loss: 0.3927 lr:0.100000 +[ Wed Sep 14 14:30:13 2022 ] Batch(153/162) done. Loss: 0.5658 lr:0.100000 +[ Wed Sep 14 14:30:16 2022 ] Eval epoch: 34 +[ Wed Sep 14 14:32:01 2022 ] Mean test loss of 930 batches: 2.6114041805267334. +[ Wed Sep 14 14:32:02 2022 ] Top1: 47.60% +[ Wed Sep 14 14:32:02 2022 ] Top5: 77.87% +[ Wed Sep 14 14:32:02 2022 ] Training epoch: 35 +[ Wed Sep 14 14:32:34 2022 ] Batch(91/162) done. Loss: 0.4018 lr:0.100000 +[ Wed Sep 14 14:32:55 2022 ] Eval epoch: 35 +[ Wed Sep 14 14:34:40 2022 ] Mean test loss of 930 batches: 2.69102144241333. +[ Wed Sep 14 14:34:41 2022 ] Top1: 47.25% +[ Wed Sep 14 14:34:41 2022 ] Top5: 78.07% +[ Wed Sep 14 14:34:42 2022 ] Training epoch: 36 +[ Wed Sep 14 14:34:55 2022 ] Batch(29/162) done. Loss: 0.3885 lr:0.100000 +[ Wed Sep 14 14:35:25 2022 ] Batch(129/162) done. Loss: 0.6378 lr:0.100000 +[ Wed Sep 14 14:35:35 2022 ] Eval epoch: 36 +[ Wed Sep 14 14:37:21 2022 ] Mean test loss of 930 batches: 2.399557590484619. +[ Wed Sep 14 14:37:21 2022 ] Top1: 49.29% +[ Wed Sep 14 14:37:22 2022 ] Top5: 80.08% +[ Wed Sep 14 14:37:22 2022 ] Training epoch: 37 +[ Wed Sep 14 14:37:46 2022 ] Batch(67/162) done. Loss: 0.6209 lr:0.100000 +[ Wed Sep 14 14:38:15 2022 ] Eval epoch: 37 +[ Wed Sep 14 14:39:59 2022 ] Mean test loss of 930 batches: 2.537379741668701. +[ Wed Sep 14 14:40:00 2022 ] Top1: 47.78% +[ Wed Sep 14 14:40:00 2022 ] Top5: 78.34% +[ Wed Sep 14 14:40:00 2022 ] Training epoch: 38 +[ Wed Sep 14 14:40:06 2022 ] Batch(5/162) done. Loss: 0.2133 lr:0.100000 +[ Wed Sep 14 14:40:36 2022 ] Batch(105/162) done. Loss: 0.6252 lr:0.100000 +[ Wed Sep 14 14:40:53 2022 ] Eval epoch: 38 +[ Wed Sep 14 14:42:39 2022 ] Mean test loss of 930 batches: 2.464905261993408. +[ Wed Sep 14 14:42:39 2022 ] Top1: 50.67% +[ Wed Sep 14 14:42:40 2022 ] Top5: 80.14% +[ Wed Sep 14 14:42:40 2022 ] Training epoch: 39 +[ Wed Sep 14 14:42:57 2022 ] Batch(43/162) done. Loss: 0.6274 lr:0.100000 +[ Wed Sep 14 14:43:28 2022 ] Batch(143/162) done. Loss: 0.3125 lr:0.100000 +[ Wed Sep 14 14:43:34 2022 ] Eval epoch: 39 +[ Wed Sep 14 14:45:18 2022 ] Mean test loss of 930 batches: 2.442579507827759. +[ Wed Sep 14 14:45:19 2022 ] Top1: 48.40% +[ Wed Sep 14 14:45:19 2022 ] Top5: 79.57% +[ Wed Sep 14 14:45:20 2022 ] Training epoch: 40 +[ Wed Sep 14 14:45:48 2022 ] Batch(81/162) done. Loss: 0.3952 lr:0.100000 +[ Wed Sep 14 14:46:13 2022 ] Eval epoch: 40 +[ Wed Sep 14 14:47:58 2022 ] Mean test loss of 930 batches: 2.7350897789001465. +[ Wed Sep 14 14:47:59 2022 ] Top1: 49.77% +[ Wed Sep 14 14:47:59 2022 ] Top5: 79.67% +[ Wed Sep 14 14:47:59 2022 ] Training epoch: 41 +[ Wed Sep 14 14:48:10 2022 ] Batch(19/162) done. Loss: 0.4260 lr:0.100000 +[ Wed Sep 14 14:48:40 2022 ] Batch(119/162) done. Loss: 0.5597 lr:0.100000 +[ Wed Sep 14 14:48:53 2022 ] Eval epoch: 41 +[ Wed Sep 14 14:50:38 2022 ] Mean test loss of 930 batches: 2.485272169113159. +[ Wed Sep 14 14:50:39 2022 ] Top1: 48.08% +[ Wed Sep 14 14:50:39 2022 ] Top5: 79.36% +[ Wed Sep 14 14:50:39 2022 ] Training epoch: 42 +[ Wed Sep 14 14:51:01 2022 ] Batch(57/162) done. Loss: 0.3172 lr:0.100000 +[ Wed Sep 14 14:51:31 2022 ] Batch(157/162) done. Loss: 0.5246 lr:0.100000 +[ Wed Sep 14 14:51:33 2022 ] Eval epoch: 42 +[ Wed Sep 14 14:53:18 2022 ] Mean test loss of 930 batches: 3.099501132965088. +[ Wed Sep 14 14:53:18 2022 ] Top1: 45.55% +[ Wed Sep 14 14:53:19 2022 ] Top5: 76.71% +[ Wed Sep 14 14:53:19 2022 ] Training epoch: 43 +[ Wed Sep 14 14:53:52 2022 ] Batch(95/162) done. Loss: 0.6298 lr:0.100000 +[ Wed Sep 14 14:54:13 2022 ] Eval epoch: 43 +[ Wed Sep 14 14:55:58 2022 ] Mean test loss of 930 batches: 2.642946720123291. +[ Wed Sep 14 14:55:58 2022 ] Top1: 47.10% +[ Wed Sep 14 14:55:59 2022 ] Top5: 77.90% +[ Wed Sep 14 14:55:59 2022 ] Training epoch: 44 +[ Wed Sep 14 14:56:13 2022 ] Batch(33/162) done. Loss: 0.3098 lr:0.100000 +[ Wed Sep 14 14:56:43 2022 ] Batch(133/162) done. Loss: 0.4637 lr:0.100000 +[ Wed Sep 14 14:56:52 2022 ] Eval epoch: 44 +[ Wed Sep 14 14:58:37 2022 ] Mean test loss of 930 batches: 2.9123451709747314. +[ Wed Sep 14 14:58:38 2022 ] Top1: 48.74% +[ Wed Sep 14 14:58:38 2022 ] Top5: 78.19% +[ Wed Sep 14 14:58:39 2022 ] Training epoch: 45 +[ Wed Sep 14 14:59:04 2022 ] Batch(71/162) done. Loss: 0.3870 lr:0.100000 +[ Wed Sep 14 14:59:32 2022 ] Eval epoch: 45 +[ Wed Sep 14 15:01:18 2022 ] Mean test loss of 930 batches: 2.6414036750793457. +[ Wed Sep 14 15:01:18 2022 ] Top1: 48.72% +[ Wed Sep 14 15:01:18 2022 ] Top5: 79.11% +[ Wed Sep 14 15:01:19 2022 ] Training epoch: 46 +[ Wed Sep 14 15:01:26 2022 ] Batch(9/162) done. Loss: 0.4615 lr:0.100000 +[ Wed Sep 14 15:01:56 2022 ] Batch(109/162) done. Loss: 0.4647 lr:0.100000 +[ Wed Sep 14 15:02:12 2022 ] Eval epoch: 46 +[ Wed Sep 14 15:03:57 2022 ] Mean test loss of 930 batches: 2.5460541248321533. +[ Wed Sep 14 15:03:58 2022 ] Top1: 48.67% +[ Wed Sep 14 15:03:58 2022 ] Top5: 78.74% +[ Wed Sep 14 15:03:58 2022 ] Training epoch: 47 +[ Wed Sep 14 15:04:17 2022 ] Batch(47/162) done. Loss: 0.3628 lr:0.100000 +[ Wed Sep 14 15:04:47 2022 ] Batch(147/162) done. Loss: 0.4634 lr:0.100000 +[ Wed Sep 14 15:04:51 2022 ] Eval epoch: 47 +[ Wed Sep 14 15:06:37 2022 ] Mean test loss of 930 batches: 2.6031250953674316. +[ Wed Sep 14 15:06:38 2022 ] Top1: 51.11% +[ Wed Sep 14 15:06:38 2022 ] Top5: 80.63% +[ Wed Sep 14 15:06:38 2022 ] Training epoch: 48 +[ Wed Sep 14 15:07:08 2022 ] Batch(85/162) done. Loss: 0.4638 lr:0.100000 +[ Wed Sep 14 15:07:32 2022 ] Eval epoch: 48 +[ Wed Sep 14 15:09:17 2022 ] Mean test loss of 930 batches: 2.731889247894287. +[ Wed Sep 14 15:09:17 2022 ] Top1: 48.96% +[ Wed Sep 14 15:09:18 2022 ] Top5: 78.49% +[ Wed Sep 14 15:09:18 2022 ] Training epoch: 49 +[ Wed Sep 14 15:09:29 2022 ] Batch(23/162) done. Loss: 0.3121 lr:0.100000 +[ Wed Sep 14 15:09:59 2022 ] Batch(123/162) done. Loss: 0.5220 lr:0.100000 +[ Wed Sep 14 15:10:11 2022 ] Eval epoch: 49 +[ Wed Sep 14 15:11:56 2022 ] Mean test loss of 930 batches: 2.6333515644073486. +[ Wed Sep 14 15:11:57 2022 ] Top1: 50.57% +[ Wed Sep 14 15:11:57 2022 ] Top5: 79.49% +[ Wed Sep 14 15:11:57 2022 ] Training epoch: 50 +[ Wed Sep 14 15:12:20 2022 ] Batch(61/162) done. Loss: 0.4016 lr:0.100000 +[ Wed Sep 14 15:12:50 2022 ] Batch(161/162) done. Loss: 0.3247 lr:0.100000 +[ Wed Sep 14 15:12:51 2022 ] Eval epoch: 50 +[ Wed Sep 14 15:14:35 2022 ] Mean test loss of 930 batches: 2.8169643878936768. +[ Wed Sep 14 15:14:36 2022 ] Top1: 47.59% +[ Wed Sep 14 15:14:36 2022 ] Top5: 79.00% +[ Wed Sep 14 15:14:37 2022 ] Training epoch: 51 +[ Wed Sep 14 15:15:11 2022 ] Batch(99/162) done. Loss: 0.2663 lr:0.100000 +[ Wed Sep 14 15:15:30 2022 ] Eval epoch: 51 +[ Wed Sep 14 15:17:15 2022 ] Mean test loss of 930 batches: 2.810101270675659. +[ Wed Sep 14 15:17:16 2022 ] Top1: 47.55% +[ Wed Sep 14 15:17:16 2022 ] Top5: 76.58% +[ Wed Sep 14 15:17:16 2022 ] Training epoch: 52 +[ Wed Sep 14 15:17:32 2022 ] Batch(37/162) done. Loss: 0.3549 lr:0.100000 +[ Wed Sep 14 15:18:02 2022 ] Batch(137/162) done. Loss: 0.1950 lr:0.100000 +[ Wed Sep 14 15:18:09 2022 ] Eval epoch: 52 +[ Wed Sep 14 15:19:55 2022 ] Mean test loss of 930 batches: 2.8436119556427. +[ Wed Sep 14 15:19:55 2022 ] Top1: 47.13% +[ Wed Sep 14 15:19:56 2022 ] Top5: 77.29% +[ Wed Sep 14 15:19:56 2022 ] Training epoch: 53 +[ Wed Sep 14 15:20:23 2022 ] Batch(75/162) done. Loss: 0.2384 lr:0.100000 +[ Wed Sep 14 15:20:49 2022 ] Eval epoch: 53 +[ Wed Sep 14 15:22:34 2022 ] Mean test loss of 930 batches: 2.886247158050537. +[ Wed Sep 14 15:22:34 2022 ] Top1: 50.27% +[ Wed Sep 14 15:22:35 2022 ] Top5: 78.67% +[ Wed Sep 14 15:22:35 2022 ] Training epoch: 54 +[ Wed Sep 14 15:22:43 2022 ] Batch(13/162) done. Loss: 0.2992 lr:0.100000 +[ Wed Sep 14 15:23:13 2022 ] Batch(113/162) done. Loss: 0.5068 lr:0.100000 +[ Wed Sep 14 15:23:28 2022 ] Eval epoch: 54 +[ Wed Sep 14 15:25:14 2022 ] Mean test loss of 930 batches: 3.0723490715026855. +[ Wed Sep 14 15:25:14 2022 ] Top1: 44.91% +[ Wed Sep 14 15:25:15 2022 ] Top5: 75.83% +[ Wed Sep 14 15:25:15 2022 ] Training epoch: 55 +[ Wed Sep 14 15:25:35 2022 ] Batch(51/162) done. Loss: 0.3286 lr:0.100000 +[ Wed Sep 14 15:26:05 2022 ] Batch(151/162) done. Loss: 0.3133 lr:0.100000 +[ Wed Sep 14 15:26:08 2022 ] Eval epoch: 55 +[ Wed Sep 14 15:27:54 2022 ] Mean test loss of 930 batches: 3.1500563621520996. +[ Wed Sep 14 15:27:54 2022 ] Top1: 48.24% +[ Wed Sep 14 15:27:54 2022 ] Top5: 78.41% +[ Wed Sep 14 15:27:55 2022 ] Training epoch: 56 +[ Wed Sep 14 15:28:26 2022 ] Batch(89/162) done. Loss: 0.6380 lr:0.100000 +[ Wed Sep 14 15:28:48 2022 ] Eval epoch: 56 +[ Wed Sep 14 15:30:33 2022 ] Mean test loss of 930 batches: 2.7924721240997314. +[ Wed Sep 14 15:30:33 2022 ] Top1: 49.68% +[ Wed Sep 14 15:30:34 2022 ] Top5: 78.68% +[ Wed Sep 14 15:30:34 2022 ] Training epoch: 57 +[ Wed Sep 14 15:30:47 2022 ] Batch(27/162) done. Loss: 0.2815 lr:0.100000 +[ Wed Sep 14 15:31:17 2022 ] Batch(127/162) done. Loss: 0.5505 lr:0.100000 +[ Wed Sep 14 15:31:27 2022 ] Eval epoch: 57 +[ Wed Sep 14 15:33:13 2022 ] Mean test loss of 930 batches: 3.1129703521728516. +[ Wed Sep 14 15:33:14 2022 ] Top1: 46.56% +[ Wed Sep 14 15:33:15 2022 ] Top5: 77.12% +[ Wed Sep 14 15:33:15 2022 ] Training epoch: 58 +[ Wed Sep 14 15:33:39 2022 ] Batch(65/162) done. Loss: 0.4868 lr:0.100000 +[ Wed Sep 14 15:34:08 2022 ] Eval epoch: 58 +[ Wed Sep 14 15:35:53 2022 ] Mean test loss of 930 batches: 3231.03759765625. +[ Wed Sep 14 15:35:53 2022 ] Top1: 0.88% +[ Wed Sep 14 15:35:54 2022 ] Top5: 4.78% +[ Wed Sep 14 15:35:54 2022 ] Training epoch: 59 +[ Wed Sep 14 15:35:59 2022 ] Batch(3/162) done. Loss: 0.4287 lr:0.100000 +[ Wed Sep 14 15:36:29 2022 ] Batch(103/162) done. Loss: 0.3732 lr:0.100000 +[ Wed Sep 14 15:36:47 2022 ] Eval epoch: 59 +[ Wed Sep 14 15:38:32 2022 ] Mean test loss of 930 batches: 2.689409017562866. +[ Wed Sep 14 15:38:33 2022 ] Top1: 49.12% +[ Wed Sep 14 15:38:33 2022 ] Top5: 78.96% +[ Wed Sep 14 15:38:33 2022 ] Training epoch: 60 +[ Wed Sep 14 15:38:50 2022 ] Batch(41/162) done. Loss: 0.5933 lr:0.100000 +[ Wed Sep 14 15:39:20 2022 ] Batch(141/162) done. Loss: 0.2460 lr:0.100000 +[ Wed Sep 14 15:39:26 2022 ] Eval epoch: 60 +[ Wed Sep 14 15:41:11 2022 ] Mean test loss of 930 batches: 2.883899211883545. +[ Wed Sep 14 15:41:12 2022 ] Top1: 48.02% +[ Wed Sep 14 15:41:12 2022 ] Top5: 78.20% +[ Wed Sep 14 15:41:13 2022 ] Training epoch: 61 +[ Wed Sep 14 15:41:41 2022 ] Batch(79/162) done. Loss: 0.1999 lr:0.010000 +[ Wed Sep 14 15:42:06 2022 ] Eval epoch: 61 +[ Wed Sep 14 15:43:51 2022 ] Mean test loss of 930 batches: 2.3885531425476074. +[ Wed Sep 14 15:43:52 2022 ] Top1: 56.03% +[ Wed Sep 14 15:43:52 2022 ] Top5: 84.00% +[ Wed Sep 14 15:43:53 2022 ] Training epoch: 62 +[ Wed Sep 14 15:44:02 2022 ] Batch(17/162) done. Loss: 0.0761 lr:0.010000 +[ Wed Sep 14 15:44:32 2022 ] Batch(117/162) done. Loss: 0.2031 lr:0.010000 +[ Wed Sep 14 15:44:46 2022 ] Eval epoch: 62 +[ Wed Sep 14 15:46:32 2022 ] Mean test loss of 930 batches: 2.4345755577087402. +[ Wed Sep 14 15:46:32 2022 ] Top1: 56.35% +[ Wed Sep 14 15:46:33 2022 ] Top5: 83.88% +[ Wed Sep 14 15:46:33 2022 ] Training epoch: 63 +[ Wed Sep 14 15:46:54 2022 ] Batch(55/162) done. Loss: 0.0445 lr:0.010000 +[ Wed Sep 14 15:47:24 2022 ] Batch(155/162) done. Loss: 0.0461 lr:0.010000 +[ Wed Sep 14 15:47:26 2022 ] Eval epoch: 63 +[ Wed Sep 14 15:49:12 2022 ] Mean test loss of 930 batches: 2.4465322494506836. +[ Wed Sep 14 15:49:13 2022 ] Top1: 56.64% +[ Wed Sep 14 15:49:13 2022 ] Top5: 83.96% +[ Wed Sep 14 15:49:14 2022 ] Training epoch: 64 +[ Wed Sep 14 15:49:45 2022 ] Batch(93/162) done. Loss: 0.0613 lr:0.010000 +[ Wed Sep 14 15:50:06 2022 ] Eval epoch: 64 +[ Wed Sep 14 15:51:52 2022 ] Mean test loss of 930 batches: 2.425952434539795. +[ Wed Sep 14 15:51:52 2022 ] Top1: 56.74% +[ Wed Sep 14 15:51:53 2022 ] Top5: 84.13% +[ Wed Sep 14 15:51:53 2022 ] Training epoch: 65 +[ Wed Sep 14 15:52:06 2022 ] Batch(31/162) done. Loss: 0.0433 lr:0.010000 +[ Wed Sep 14 15:52:37 2022 ] Batch(131/162) done. Loss: 0.0838 lr:0.010000 +[ Wed Sep 14 15:52:46 2022 ] Eval epoch: 65 +[ Wed Sep 14 15:54:31 2022 ] Mean test loss of 930 batches: 2.4919207096099854. +[ Wed Sep 14 15:54:32 2022 ] Top1: 56.39% +[ Wed Sep 14 15:54:32 2022 ] Top5: 83.99% +[ Wed Sep 14 15:54:33 2022 ] Training epoch: 66 +[ Wed Sep 14 15:54:57 2022 ] Batch(69/162) done. Loss: 0.1099 lr:0.010000 +[ Wed Sep 14 15:55:25 2022 ] Eval epoch: 66 +[ Wed Sep 14 15:57:11 2022 ] Mean test loss of 930 batches: 2.545520305633545. +[ Wed Sep 14 15:57:11 2022 ] Top1: 56.37% +[ Wed Sep 14 15:57:12 2022 ] Top5: 84.07% +[ Wed Sep 14 15:57:12 2022 ] Training epoch: 67 +[ Wed Sep 14 15:57:19 2022 ] Batch(7/162) done. Loss: 0.0940 lr:0.010000 +[ Wed Sep 14 15:57:48 2022 ] Batch(107/162) done. Loss: 0.0599 lr:0.010000 +[ Wed Sep 14 15:58:05 2022 ] Eval epoch: 67 +[ Wed Sep 14 15:59:50 2022 ] Mean test loss of 930 batches: 2.5489892959594727. +[ Wed Sep 14 15:59:51 2022 ] Top1: 56.97% +[ Wed Sep 14 15:59:51 2022 ] Top5: 84.13% +[ Wed Sep 14 15:59:52 2022 ] Training epoch: 68 +[ Wed Sep 14 16:00:09 2022 ] Batch(45/162) done. Loss: 0.0327 lr:0.010000 +[ Wed Sep 14 16:00:39 2022 ] Batch(145/162) done. Loss: 0.0760 lr:0.010000 +[ Wed Sep 14 16:00:44 2022 ] Eval epoch: 68 +[ Wed Sep 14 16:02:30 2022 ] Mean test loss of 930 batches: 2.5511016845703125. +[ Wed Sep 14 16:02:30 2022 ] Top1: 56.73% +[ Wed Sep 14 16:02:31 2022 ] Top5: 84.09% +[ Wed Sep 14 16:02:31 2022 ] Training epoch: 69 +[ Wed Sep 14 16:03:00 2022 ] Batch(83/162) done. Loss: 0.0340 lr:0.010000 +[ Wed Sep 14 16:03:24 2022 ] Eval epoch: 69 +[ Wed Sep 14 16:05:09 2022 ] Mean test loss of 930 batches: 2.63191819190979. +[ Wed Sep 14 16:05:10 2022 ] Top1: 56.40% +[ Wed Sep 14 16:05:10 2022 ] Top5: 84.01% +[ Wed Sep 14 16:05:10 2022 ] Training epoch: 70 +[ Wed Sep 14 16:05:21 2022 ] Batch(21/162) done. Loss: 0.0284 lr:0.010000 +[ Wed Sep 14 16:05:51 2022 ] Batch(121/162) done. Loss: 0.0410 lr:0.010000 +[ Wed Sep 14 16:06:03 2022 ] Eval epoch: 70 +[ Wed Sep 14 16:07:50 2022 ] Mean test loss of 930 batches: 2.535250425338745. +[ Wed Sep 14 16:07:50 2022 ] Top1: 57.11% +[ Wed Sep 14 16:07:50 2022 ] Top5: 84.39% +[ Wed Sep 14 16:07:51 2022 ] Training epoch: 71 +[ Wed Sep 14 16:08:13 2022 ] Batch(59/162) done. Loss: 0.0551 lr:0.010000 +[ Wed Sep 14 16:08:43 2022 ] Batch(159/162) done. Loss: 0.1404 lr:0.010000 +[ Wed Sep 14 16:08:44 2022 ] Eval epoch: 71 +[ Wed Sep 14 16:10:29 2022 ] Mean test loss of 930 batches: 2.6160361766815186. +[ Wed Sep 14 16:10:29 2022 ] Top1: 56.59% +[ Wed Sep 14 16:10:30 2022 ] Top5: 83.92% +[ Wed Sep 14 16:10:30 2022 ] Training epoch: 72 +[ Wed Sep 14 16:11:03 2022 ] Batch(97/162) done. Loss: 0.0723 lr:0.010000 +[ Wed Sep 14 16:11:23 2022 ] Eval epoch: 72 +[ Wed Sep 14 16:13:08 2022 ] Mean test loss of 930 batches: 2.5643980503082275. +[ Wed Sep 14 16:13:09 2022 ] Top1: 57.26% +[ Wed Sep 14 16:13:09 2022 ] Top5: 84.50% +[ Wed Sep 14 16:13:09 2022 ] Training epoch: 73 +[ Wed Sep 14 16:13:24 2022 ] Batch(35/162) done. Loss: 0.1179 lr:0.010000 +[ Wed Sep 14 16:13:55 2022 ] Batch(135/162) done. Loss: 0.0451 lr:0.010000 +[ Wed Sep 14 16:14:03 2022 ] Eval epoch: 73 +[ Wed Sep 14 16:15:49 2022 ] Mean test loss of 930 batches: 2.588409662246704. +[ Wed Sep 14 16:15:49 2022 ] Top1: 57.01% +[ Wed Sep 14 16:15:49 2022 ] Top5: 84.40% +[ Wed Sep 14 16:15:50 2022 ] Training epoch: 74 +[ Wed Sep 14 16:16:16 2022 ] Batch(73/162) done. Loss: 0.0769 lr:0.010000 +[ Wed Sep 14 16:16:43 2022 ] Eval epoch: 74 +[ Wed Sep 14 16:18:28 2022 ] Mean test loss of 930 batches: 2.6296372413635254. +[ Wed Sep 14 16:18:29 2022 ] Top1: 57.02% +[ Wed Sep 14 16:18:29 2022 ] Top5: 84.30% +[ Wed Sep 14 16:18:29 2022 ] Training epoch: 75 +[ Wed Sep 14 16:18:37 2022 ] Batch(11/162) done. Loss: 0.0481 lr:0.010000 +[ Wed Sep 14 16:19:07 2022 ] Batch(111/162) done. Loss: 0.0518 lr:0.010000 +[ Wed Sep 14 16:19:22 2022 ] Eval epoch: 75 +[ Wed Sep 14 16:21:07 2022 ] Mean test loss of 930 batches: 2.7006022930145264. +[ Wed Sep 14 16:21:07 2022 ] Top1: 56.80% +[ Wed Sep 14 16:21:08 2022 ] Top5: 83.95% +[ Wed Sep 14 16:21:08 2022 ] Training epoch: 76 +[ Wed Sep 14 16:21:27 2022 ] Batch(49/162) done. Loss: 0.0650 lr:0.010000 +[ Wed Sep 14 16:21:58 2022 ] Batch(149/162) done. Loss: 0.1161 lr:0.010000 +[ Wed Sep 14 16:22:02 2022 ] Eval epoch: 76 +[ Wed Sep 14 16:23:47 2022 ] Mean test loss of 930 batches: 2.6388227939605713. +[ Wed Sep 14 16:23:47 2022 ] Top1: 56.99% +[ Wed Sep 14 16:23:48 2022 ] Top5: 84.26% +[ Wed Sep 14 16:23:48 2022 ] Training epoch: 77 +[ Wed Sep 14 16:24:19 2022 ] Batch(87/162) done. Loss: 0.0455 lr:0.010000 +[ Wed Sep 14 16:24:41 2022 ] Eval epoch: 77 +[ Wed Sep 14 16:26:27 2022 ] Mean test loss of 930 batches: 2.7077810764312744. +[ Wed Sep 14 16:26:27 2022 ] Top1: 56.39% +[ Wed Sep 14 16:26:27 2022 ] Top5: 83.60% +[ Wed Sep 14 16:26:28 2022 ] Training epoch: 78 +[ Wed Sep 14 16:26:39 2022 ] Batch(25/162) done. Loss: 0.0488 lr:0.010000 +[ Wed Sep 14 16:27:09 2022 ] Batch(125/162) done. Loss: 0.0851 lr:0.010000 +[ Wed Sep 14 16:27:21 2022 ] Eval epoch: 78 +[ Wed Sep 14 16:29:06 2022 ] Mean test loss of 930 batches: 2.682739734649658. +[ Wed Sep 14 16:29:06 2022 ] Top1: 56.73% +[ Wed Sep 14 16:29:06 2022 ] Top5: 84.07% +[ Wed Sep 14 16:29:07 2022 ] Training epoch: 79 +[ Wed Sep 14 16:29:30 2022 ] Batch(63/162) done. Loss: 0.0371 lr:0.010000 +[ Wed Sep 14 16:30:00 2022 ] Eval epoch: 79 +[ Wed Sep 14 16:31:45 2022 ] Mean test loss of 930 batches: 2.8168888092041016. +[ Wed Sep 14 16:31:46 2022 ] Top1: 56.25% +[ Wed Sep 14 16:31:46 2022 ] Top5: 83.55% +[ Wed Sep 14 16:31:46 2022 ] Training epoch: 80 +[ Wed Sep 14 16:31:51 2022 ] Batch(1/162) done. Loss: 0.0522 lr:0.010000 +[ Wed Sep 14 16:32:21 2022 ] Batch(101/162) done. Loss: 0.0506 lr:0.010000 +[ Wed Sep 14 16:32:40 2022 ] Eval epoch: 80 +[ Wed Sep 14 16:34:25 2022 ] Mean test loss of 930 batches: 2.7519114017486572. +[ Wed Sep 14 16:34:26 2022 ] Top1: 56.65% +[ Wed Sep 14 16:34:26 2022 ] Top5: 84.02% +[ Wed Sep 14 16:34:26 2022 ] Training epoch: 81 +[ Wed Sep 14 16:34:42 2022 ] Batch(39/162) done. Loss: 0.0524 lr:0.001000 +[ Wed Sep 14 16:35:12 2022 ] Batch(139/162) done. Loss: 0.1031 lr:0.001000 +[ Wed Sep 14 16:35:19 2022 ] Eval epoch: 81 +[ Wed Sep 14 16:37:04 2022 ] Mean test loss of 930 batches: 2.7525532245635986. +[ Wed Sep 14 16:37:05 2022 ] Top1: 56.75% +[ Wed Sep 14 16:37:05 2022 ] Top5: 84.01% +[ Wed Sep 14 16:37:06 2022 ] Training epoch: 82 +[ Wed Sep 14 16:37:33 2022 ] Batch(77/162) done. Loss: 0.0096 lr:0.001000 +[ Wed Sep 14 16:37:59 2022 ] Eval epoch: 82 +[ Wed Sep 14 16:39:44 2022 ] Mean test loss of 930 batches: 2.7929162979125977. +[ Wed Sep 14 16:39:44 2022 ] Top1: 56.84% +[ Wed Sep 14 16:39:45 2022 ] Top5: 83.93% +[ Wed Sep 14 16:39:45 2022 ] Training epoch: 83 +[ Wed Sep 14 16:39:54 2022 ] Batch(15/162) done. Loss: 0.0441 lr:0.001000 +[ Wed Sep 14 16:40:24 2022 ] Batch(115/162) done. Loss: 0.0524 lr:0.001000 +[ Wed Sep 14 16:40:39 2022 ] Eval epoch: 83 +[ Wed Sep 14 16:42:23 2022 ] Mean test loss of 930 batches: 2.7270166873931885. +[ Wed Sep 14 16:42:24 2022 ] Top1: 56.75% +[ Wed Sep 14 16:42:24 2022 ] Top5: 83.99% +[ Wed Sep 14 16:42:24 2022 ] Training epoch: 84 +[ Wed Sep 14 16:42:45 2022 ] Batch(53/162) done. Loss: 0.0641 lr:0.001000 +[ Wed Sep 14 16:43:15 2022 ] Batch(153/162) done. Loss: 0.0311 lr:0.001000 +[ Wed Sep 14 16:43:18 2022 ] Eval epoch: 84 +[ Wed Sep 14 16:45:04 2022 ] Mean test loss of 930 batches: 2.7539007663726807. +[ Wed Sep 14 16:45:05 2022 ] Top1: 56.78% +[ Wed Sep 14 16:45:05 2022 ] Top5: 83.91% +[ Wed Sep 14 16:45:05 2022 ] Training epoch: 85 +[ Wed Sep 14 16:45:37 2022 ] Batch(91/162) done. Loss: 0.0335 lr:0.001000 +[ Wed Sep 14 16:45:58 2022 ] Eval epoch: 85 +[ Wed Sep 14 16:47:44 2022 ] Mean test loss of 930 batches: 2.699486017227173. +[ Wed Sep 14 16:47:44 2022 ] Top1: 56.91% +[ Wed Sep 14 16:47:45 2022 ] Top5: 84.25% +[ Wed Sep 14 16:47:45 2022 ] Training epoch: 86 +[ Wed Sep 14 16:47:58 2022 ] Batch(29/162) done. Loss: 0.1003 lr:0.001000 +[ Wed Sep 14 16:48:28 2022 ] Batch(129/162) done. Loss: 0.1065 lr:0.001000 +[ Wed Sep 14 16:48:38 2022 ] Eval epoch: 86 +[ Wed Sep 14 16:50:23 2022 ] Mean test loss of 930 batches: 2.7011775970458984. +[ Wed Sep 14 16:50:23 2022 ] Top1: 57.23% +[ Wed Sep 14 16:50:24 2022 ] Top5: 84.35% +[ Wed Sep 14 16:50:24 2022 ] Training epoch: 87 +[ Wed Sep 14 16:50:49 2022 ] Batch(67/162) done. Loss: 0.0922 lr:0.001000 +[ Wed Sep 14 16:51:17 2022 ] Eval epoch: 87 +[ Wed Sep 14 16:53:02 2022 ] Mean test loss of 930 batches: 2.7254478931427. +[ Wed Sep 14 16:53:03 2022 ] Top1: 56.81% +[ Wed Sep 14 16:53:03 2022 ] Top5: 84.22% +[ Wed Sep 14 16:53:04 2022 ] Training epoch: 88 +[ Wed Sep 14 16:53:10 2022 ] Batch(5/162) done. Loss: 0.0968 lr:0.001000 +[ Wed Sep 14 16:53:40 2022 ] Batch(105/162) done. Loss: 0.1023 lr:0.001000 +[ Wed Sep 14 16:53:57 2022 ] Eval epoch: 88 +[ Wed Sep 14 16:55:42 2022 ] Mean test loss of 930 batches: 2.6768813133239746. +[ Wed Sep 14 16:55:43 2022 ] Top1: 57.08% +[ Wed Sep 14 16:55:43 2022 ] Top5: 84.17% +[ Wed Sep 14 16:55:43 2022 ] Training epoch: 89 +[ Wed Sep 14 16:56:00 2022 ] Batch(43/162) done. Loss: 0.0105 lr:0.001000 +[ Wed Sep 14 16:56:31 2022 ] Batch(143/162) done. Loss: 0.0479 lr:0.001000 +[ Wed Sep 14 16:56:37 2022 ] Eval epoch: 89 +[ Wed Sep 14 16:58:23 2022 ] Mean test loss of 930 batches: 2.690788984298706. +[ Wed Sep 14 16:58:23 2022 ] Top1: 56.82% +[ Wed Sep 14 16:58:24 2022 ] Top5: 84.20% +[ Wed Sep 14 16:58:24 2022 ] Training epoch: 90 +[ Wed Sep 14 16:58:53 2022 ] Batch(81/162) done. Loss: 0.0452 lr:0.001000 +[ Wed Sep 14 16:59:18 2022 ] Eval epoch: 90 +[ Wed Sep 14 17:01:03 2022 ] Mean test loss of 930 batches: 2.699519157409668. +[ Wed Sep 14 17:01:03 2022 ] Top1: 56.97% +[ Wed Sep 14 17:01:04 2022 ] Top5: 84.16% +[ Wed Sep 14 17:01:04 2022 ] Training epoch: 91 +[ Wed Sep 14 17:01:14 2022 ] Batch(19/162) done. Loss: 0.0271 lr:0.001000 +[ Wed Sep 14 17:01:44 2022 ] Batch(119/162) done. Loss: 0.0189 lr:0.001000 +[ Wed Sep 14 17:01:58 2022 ] Eval epoch: 91 +[ Wed Sep 14 17:03:43 2022 ] Mean test loss of 930 batches: 2.772402048110962. +[ Wed Sep 14 17:03:43 2022 ] Top1: 56.94% +[ Wed Sep 14 17:03:44 2022 ] Top5: 84.07% +[ Wed Sep 14 17:03:44 2022 ] Training epoch: 92 +[ Wed Sep 14 17:04:05 2022 ] Batch(57/162) done. Loss: 0.0531 lr:0.001000 +[ Wed Sep 14 17:04:36 2022 ] Batch(157/162) done. Loss: 0.1023 lr:0.001000 +[ Wed Sep 14 17:04:37 2022 ] Eval epoch: 92 +[ Wed Sep 14 17:06:23 2022 ] Mean test loss of 930 batches: 2.7590200901031494. +[ Wed Sep 14 17:06:23 2022 ] Top1: 56.91% +[ Wed Sep 14 17:06:24 2022 ] Top5: 83.99% +[ Wed Sep 14 17:06:24 2022 ] Training epoch: 93 +[ Wed Sep 14 17:06:57 2022 ] Batch(95/162) done. Loss: 0.0669 lr:0.001000 +[ Wed Sep 14 17:07:17 2022 ] Eval epoch: 93 +[ Wed Sep 14 17:09:02 2022 ] Mean test loss of 930 batches: 2.83962082862854. +[ Wed Sep 14 17:09:03 2022 ] Top1: 56.15% +[ Wed Sep 14 17:09:03 2022 ] Top5: 83.64% +[ Wed Sep 14 17:09:03 2022 ] Training epoch: 94 +[ Wed Sep 14 17:09:18 2022 ] Batch(33/162) done. Loss: 0.0561 lr:0.001000 +[ Wed Sep 14 17:09:48 2022 ] Batch(133/162) done. Loss: 0.0357 lr:0.001000 +[ Wed Sep 14 17:09:57 2022 ] Eval epoch: 94 +[ Wed Sep 14 17:11:42 2022 ] Mean test loss of 930 batches: 2.825902223587036. +[ Wed Sep 14 17:11:42 2022 ] Top1: 56.73% +[ Wed Sep 14 17:11:42 2022 ] Top5: 83.91% +[ Wed Sep 14 17:11:43 2022 ] Training epoch: 95 +[ Wed Sep 14 17:12:09 2022 ] Batch(71/162) done. Loss: 0.0574 lr:0.001000 +[ Wed Sep 14 17:12:36 2022 ] Eval epoch: 95 +[ Wed Sep 14 17:14:21 2022 ] Mean test loss of 930 batches: 2.796349287033081. +[ Wed Sep 14 17:14:22 2022 ] Top1: 56.57% +[ Wed Sep 14 17:14:22 2022 ] Top5: 83.89% +[ Wed Sep 14 17:14:23 2022 ] Training epoch: 96 +[ Wed Sep 14 17:14:29 2022 ] Batch(9/162) done. Loss: 0.0458 lr:0.001000 +[ Wed Sep 14 17:14:59 2022 ] Batch(109/162) done. Loss: 0.0299 lr:0.001000 +[ Wed Sep 14 17:15:15 2022 ] Eval epoch: 96 +[ Wed Sep 14 17:17:01 2022 ] Mean test loss of 930 batches: 2.685493230819702. +[ Wed Sep 14 17:17:02 2022 ] Top1: 56.98% +[ Wed Sep 14 17:17:02 2022 ] Top5: 84.24% +[ Wed Sep 14 17:17:03 2022 ] Training epoch: 97 +[ Wed Sep 14 17:17:21 2022 ] Batch(47/162) done. Loss: 0.0455 lr:0.001000 +[ Wed Sep 14 17:17:51 2022 ] Batch(147/162) done. Loss: 0.0995 lr:0.001000 +[ Wed Sep 14 17:17:56 2022 ] Eval epoch: 97 +[ Wed Sep 14 17:19:41 2022 ] Mean test loss of 930 batches: 2.769493818283081. +[ Wed Sep 14 17:19:42 2022 ] Top1: 56.75% +[ Wed Sep 14 17:19:42 2022 ] Top5: 83.97% +[ Wed Sep 14 17:19:43 2022 ] Training epoch: 98 +[ Wed Sep 14 17:20:12 2022 ] Batch(85/162) done. Loss: 0.0896 lr:0.001000 +[ Wed Sep 14 17:20:36 2022 ] Eval epoch: 98 +[ Wed Sep 14 17:22:22 2022 ] Mean test loss of 930 batches: 2.7722766399383545. +[ Wed Sep 14 17:22:22 2022 ] Top1: 56.70% +[ Wed Sep 14 17:22:23 2022 ] Top5: 83.93% +[ Wed Sep 14 17:22:23 2022 ] Training epoch: 99 +[ Wed Sep 14 17:22:34 2022 ] Batch(23/162) done. Loss: 0.0967 lr:0.001000 +[ Wed Sep 14 17:23:04 2022 ] Batch(123/162) done. Loss: 0.0458 lr:0.001000 +[ Wed Sep 14 17:23:16 2022 ] Eval epoch: 99 +[ Wed Sep 14 17:25:01 2022 ] Mean test loss of 930 batches: 2.800001621246338. +[ Wed Sep 14 17:25:02 2022 ] Top1: 56.59% +[ Wed Sep 14 17:25:02 2022 ] Top5: 83.88% +[ Wed Sep 14 17:25:02 2022 ] Training epoch: 100 +[ Wed Sep 14 17:25:25 2022 ] Batch(61/162) done. Loss: 0.0443 lr:0.001000 +[ Wed Sep 14 17:25:55 2022 ] Batch(161/162) done. Loss: 0.0364 lr:0.001000 +[ Wed Sep 14 17:25:55 2022 ] Eval epoch: 100 +[ Wed Sep 14 17:27:41 2022 ] Mean test loss of 930 batches: 2.7916245460510254. +[ Wed Sep 14 17:27:41 2022 ] Top1: 56.77% +[ Wed Sep 14 17:27:41 2022 ] Top5: 83.95% diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_motion_xset/config.yaml b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_motion_xset/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0d77b4ff8965eaaa3e17b672908556ce7ce99415 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_motion_xset/config.yaml @@ -0,0 +1,59 @@ +Experiment_name: ntu120_joint_motion_xset +base_lr: 0.1 +batch_size: 64 +config: ./config/ntu120_xset/train_joint_motion.yaml +device: +- 6 +- 7 +eval_interval: 5 +feeder: feeders.feeder.Feeder +groups: 8 +ignore_weights: [] +keep_rate: 0.9 +log_interval: 100 +model: model.decouple_gcn.Model +model_args: + block_size: 41 + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + groups: 16 + num_class: 120 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu120_joint_motion_xset +nesterov: true +num_epoch: 100 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_data_joint_motion.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_data_joint_motion.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu120_joint_motion_xset diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_motion_xset/decouple_gcn.py b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_motion_xset/decouple_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..6dcce4552ced280fe5b2060df92daebd2452cf7c --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_motion_xset/decouple_gcn.py @@ -0,0 +1,235 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math +from model.dropSke import DropBlock_Ske +from model.dropT import DropBlockT_1d + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + + +def conv_branch_init(conv): + weight = conv.weight + n = weight.size(0) + k1 = weight.size(1) + k2 = weight.size(2) + nn.init.normal(weight, 0, math.sqrt(2. / (n * k1 * k2))) + nn.init.constant(conv.bias, 0) + + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class unit_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1, num_point=25, block_size=41): + super(unit_tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + self.dropS = DropBlock_Ske(num_point=num_point) + self.dropT = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob, A): + x = self.bn(self.conv(x)) + x = self.dropT(self.dropS(x, keep_prob, A), keep_prob) + return x + + +class unit_tcn_skip(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(unit_tcn_skip, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class unit_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, coff_embedding=4, num_subset=3): + super(unit_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.num_point = num_point + self.groups = groups + self.num_subset = num_subset + self.DecoupleA = nn.Parameter(torch.tensor(np.reshape(A.astype(np.float32), [ + 3, 1, num_point, num_point]), dtype=torch.float32, requires_grad=True).repeat(1, groups, 1, 1), requires_grad=True) + + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.bn0 = nn.BatchNorm2d(out_channels * num_subset) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + bn_init(self.bn, 1e-6) + + self.Linear_weight = nn.Parameter(torch.zeros( + in_channels, out_channels * num_subset, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0, math.sqrt( + 0.5 / (out_channels * num_subset))) + + self.Linear_bias = nn.Parameter(torch.zeros( + 1, out_channels * num_subset, 1, 1, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.constant(self.Linear_bias, 1e-6) + + eye_array = [] + for i in range(out_channels): + eye_array.append(torch.eye(num_point)) + self.eyes = nn.Parameter(torch.tensor(torch.stack( + eye_array), requires_grad=False, device='cuda'), requires_grad=False) # [c,25,25] + + def norm(self, A): + b, c, h, w = A.size() + A = A.view(c, self.num_point, self.num_point) + D_list = torch.sum(A, 1).view(c, 1, self.num_point) + D_list_12 = (D_list + 0.001)**(-1) + D_12 = self.eyes * D_list_12 + A = torch.bmm(A, D_12).view(b, c, h, w) + return A + + def forward(self, x0): + learn_A = self.DecoupleA.repeat( + 1, self.out_channels // self.groups, 1, 1) + norm_learn_A = torch.cat([self.norm(learn_A[0:1, ...]), self.norm( + learn_A[1:2, ...]), self.norm(learn_A[2:3, ...])], 0) + + x = torch.einsum( + 'nctw,cd->ndtw', (x0, self.Linear_weight)).contiguous() + x = x + self.Linear_bias + x = self.bn0(x) + + n, kc, t, v = x.size() + x = x.view(n, self.num_subset, kc // self.num_subset, t, v) + x = torch.einsum('nkctv,kcvw->nctw', (x, norm_learn_A)) + + x = self.bn(x) + x += self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, block_size, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = unit_gcn(in_channels, out_channels, A, groups, num_point) + self.tcn1 = unit_tcn(out_channels, out_channels, + stride=stride, num_point=num_point) + self.relu = nn.ReLU() + + self.A = nn.Parameter(torch.tensor(np.sum(np.reshape(A.astype(np.float32), [ + 3, num_point, num_point]), axis=0), dtype=torch.float32, requires_grad=False, device='cuda'), requires_grad=False) + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + + else: + self.residual = unit_tcn_skip( + in_channels, out_channels, kernel_size=1, stride=stride) + self.dropSke = DropBlock_Ske(num_point=num_point) + self.dropT_skip = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob): + x = self.tcn1(self.gcn1(x), keep_prob, self.A) + self.dropT_skip( + self.dropSke(self.residual(x), keep_prob, self.A), keep_prob) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, groups, num_point, + block_size, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l5 = TCN_GCN_unit( + 64, 128, A, groups, num_point, block_size, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l8 = TCN_GCN_unit(128, 256, A, groups, + num_point, block_size, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x, keep_prob=0.9): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute( + 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x, 1.0) + x = self.l2(x, 1.0) + x = self.l3(x, 1.0) + x = self.l4(x, 1.0) + x = self.l5(x, 1.0) + x = self.l6(x, 1.0) + x = self.l7(x, keep_prob) + x = self.l8(x, keep_prob) + x = self.l9(x, keep_prob) + x = self.l10(x, keep_prob) + + # N*M,C,T,V + c_new = x.size(1) + x = x.reshape(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_motion_xset/eval_results/best_acc.pkl b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_motion_xset/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..0815ceac29177a319f40be1d4ee03a66c89f817d --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_motion_xset/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfa1cf0029564599e84c2ba25684a77dd065f91c6669170c0e43d0a99c77e693 +size 34946665 diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_motion_xset/log.txt b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_motion_xset/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..0517efc8558830597ed5e92b006dd6a0012ebf7d --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_motion_xset/log.txt @@ -0,0 +1,665 @@ +[ Tue Sep 13 18:24:57 2022 ] Parameters: +{'work_dir': './work_dir/ntu120_joint_motion_xset', 'model_saved_name': './save_models/ntu120_joint_motion_xset', 'Experiment_name': 'ntu120_joint_motion_xset', 'config': './config/ntu120_xset/train_joint_motion.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_data_joint_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_data_joint_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_label.pkl'}, 'model': 'model.decouple_gcn.Model', 'model_args': {'num_class': 120, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'groups': 16, 'block_size': 41, 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80], 'device': [6, 7], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 100, 'weight_decay': 0.0001, 'keep_rate': 0.9, 'groups': 8, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Tue Sep 13 18:24:57 2022 ] Training epoch: 1 +[ Tue Sep 13 18:25:50 2022 ] Batch(99/162) done. Loss: 3.6515 lr:0.100000 +[ Tue Sep 13 18:26:18 2022 ] Eval epoch: 1 +[ Tue Sep 13 18:29:16 2022 ] Mean test loss of 930 batches: 5.235726833343506. +[ Tue Sep 13 18:29:16 2022 ] Top1: 2.53% +[ Tue Sep 13 18:29:17 2022 ] Top5: 12.36% +[ Tue Sep 13 18:29:17 2022 ] Training epoch: 2 +[ Tue Sep 13 18:29:40 2022 ] Batch(37/162) done. Loss: 3.5670 lr:0.100000 +[ Tue Sep 13 18:30:33 2022 ] Batch(137/162) done. Loss: 3.3648 lr:0.100000 +[ Tue Sep 13 18:30:46 2022 ] Eval epoch: 2 +[ Tue Sep 13 18:33:42 2022 ] Mean test loss of 930 batches: 4.729725360870361. +[ Tue Sep 13 18:33:43 2022 ] Top1: 8.87% +[ Tue Sep 13 18:33:43 2022 ] Top5: 24.11% +[ Tue Sep 13 18:33:43 2022 ] Training epoch: 3 +[ Tue Sep 13 18:34:26 2022 ] Batch(75/162) done. Loss: 2.6142 lr:0.100000 +[ Tue Sep 13 18:35:13 2022 ] Eval epoch: 3 +[ Tue Sep 13 18:38:09 2022 ] Mean test loss of 930 batches: 5.563536643981934. +[ Tue Sep 13 18:38:09 2022 ] Top1: 11.57% +[ Tue Sep 13 18:38:09 2022 ] Top5: 34.96% +[ Tue Sep 13 18:38:10 2022 ] Training epoch: 4 +[ Tue Sep 13 18:38:20 2022 ] Batch(13/162) done. Loss: 2.3234 lr:0.100000 +[ Tue Sep 13 18:39:13 2022 ] Batch(113/162) done. Loss: 2.2909 lr:0.100000 +[ Tue Sep 13 18:39:39 2022 ] Eval epoch: 4 +[ Tue Sep 13 18:42:34 2022 ] Mean test loss of 930 batches: 5.212625980377197. +[ Tue Sep 13 18:42:35 2022 ] Top1: 14.93% +[ Tue Sep 13 18:42:35 2022 ] Top5: 39.18% +[ Tue Sep 13 18:42:35 2022 ] Training epoch: 5 +[ Tue Sep 13 18:43:06 2022 ] Batch(51/162) done. Loss: 1.8288 lr:0.100000 +[ Tue Sep 13 18:43:59 2022 ] Batch(151/162) done. Loss: 1.8895 lr:0.100000 +[ Tue Sep 13 18:44:05 2022 ] Eval epoch: 5 +[ Tue Sep 13 18:47:00 2022 ] Mean test loss of 930 batches: 4.0911173820495605. +[ Tue Sep 13 18:47:01 2022 ] Top1: 18.45% +[ Tue Sep 13 18:47:01 2022 ] Top5: 46.17% +[ Tue Sep 13 18:47:02 2022 ] Training epoch: 6 +[ Tue Sep 13 18:47:52 2022 ] Batch(89/162) done. Loss: 1.3659 lr:0.100000 +[ Tue Sep 13 18:48:31 2022 ] Eval epoch: 6 +[ Tue Sep 13 18:51:27 2022 ] Mean test loss of 930 batches: 5.5289459228515625. +[ Tue Sep 13 18:51:28 2022 ] Top1: 18.95% +[ Tue Sep 13 18:51:28 2022 ] Top5: 45.14% +[ Tue Sep 13 18:51:28 2022 ] Training epoch: 7 +[ Tue Sep 13 18:51:46 2022 ] Batch(27/162) done. Loss: 1.2843 lr:0.100000 +[ Tue Sep 13 18:52:39 2022 ] Batch(127/162) done. Loss: 1.2709 lr:0.100000 +[ Tue Sep 13 18:52:58 2022 ] Eval epoch: 7 +[ Tue Sep 13 18:55:54 2022 ] Mean test loss of 930 batches: 3.4792916774749756. +[ Tue Sep 13 18:55:54 2022 ] Top1: 24.92% +[ Tue Sep 13 18:55:55 2022 ] Top5: 52.68% +[ Tue Sep 13 18:55:55 2022 ] Training epoch: 8 +[ Tue Sep 13 18:56:33 2022 ] Batch(65/162) done. Loss: 1.8981 lr:0.100000 +[ Tue Sep 13 18:57:25 2022 ] Eval epoch: 8 +[ Tue Sep 13 19:00:21 2022 ] Mean test loss of 930 batches: 3.5104548931121826. +[ Tue Sep 13 19:00:21 2022 ] Top1: 27.18% +[ Tue Sep 13 19:00:21 2022 ] Top5: 55.61% +[ Tue Sep 13 19:00:22 2022 ] Training epoch: 9 +[ Tue Sep 13 19:00:27 2022 ] Batch(3/162) done. Loss: 1.4355 lr:0.100000 +[ Tue Sep 13 19:01:20 2022 ] Batch(103/162) done. Loss: 1.3561 lr:0.100000 +[ Tue Sep 13 19:01:51 2022 ] Eval epoch: 9 +[ Tue Sep 13 19:04:48 2022 ] Mean test loss of 930 batches: 3.0909206867218018. +[ Tue Sep 13 19:04:48 2022 ] Top1: 30.46% +[ Tue Sep 13 19:04:49 2022 ] Top5: 60.58% +[ Tue Sep 13 19:04:49 2022 ] Training epoch: 10 +[ Tue Sep 13 19:05:14 2022 ] Batch(41/162) done. Loss: 1.1506 lr:0.100000 +[ Tue Sep 13 19:06:07 2022 ] Batch(141/162) done. Loss: 1.2948 lr:0.100000 +[ Tue Sep 13 19:06:18 2022 ] Eval epoch: 10 +[ Tue Sep 13 19:09:14 2022 ] Mean test loss of 930 batches: 3.8743605613708496. +[ Tue Sep 13 19:09:15 2022 ] Top1: 25.93% +[ Tue Sep 13 19:09:15 2022 ] Top5: 53.59% +[ Tue Sep 13 19:09:16 2022 ] Training epoch: 11 +[ Tue Sep 13 19:10:01 2022 ] Batch(79/162) done. Loss: 1.3390 lr:0.100000 +[ Tue Sep 13 19:10:45 2022 ] Eval epoch: 11 +[ Tue Sep 13 19:13:41 2022 ] Mean test loss of 930 batches: 3.0287907123565674. +[ Tue Sep 13 19:13:41 2022 ] Top1: 30.51% +[ Tue Sep 13 19:13:42 2022 ] Top5: 63.40% +[ Tue Sep 13 19:13:42 2022 ] Training epoch: 12 +[ Tue Sep 13 19:13:54 2022 ] Batch(17/162) done. Loss: 1.0993 lr:0.100000 +[ Tue Sep 13 19:14:48 2022 ] Batch(117/162) done. Loss: 1.5553 lr:0.100000 +[ Tue Sep 13 19:15:11 2022 ] Eval epoch: 12 +[ Tue Sep 13 19:18:08 2022 ] Mean test loss of 930 batches: 2.681297540664673. +[ Tue Sep 13 19:18:08 2022 ] Top1: 36.16% +[ Tue Sep 13 19:18:09 2022 ] Top5: 65.88% +[ Tue Sep 13 19:18:09 2022 ] Training epoch: 13 +[ Tue Sep 13 19:18:41 2022 ] Batch(55/162) done. Loss: 1.2107 lr:0.100000 +[ Tue Sep 13 19:19:35 2022 ] Batch(155/162) done. Loss: 1.1203 lr:0.100000 +[ Tue Sep 13 19:19:38 2022 ] Eval epoch: 13 +[ Tue Sep 13 19:22:34 2022 ] Mean test loss of 930 batches: 3.446503162384033. +[ Tue Sep 13 19:22:34 2022 ] Top1: 32.32% +[ Tue Sep 13 19:22:35 2022 ] Top5: 63.73% +[ Tue Sep 13 19:22:35 2022 ] Training epoch: 14 +[ Tue Sep 13 19:23:28 2022 ] Batch(93/162) done. Loss: 1.0726 lr:0.100000 +[ Tue Sep 13 19:24:05 2022 ] Eval epoch: 14 +[ Tue Sep 13 19:27:01 2022 ] Mean test loss of 930 batches: 2.7903835773468018. +[ Tue Sep 13 19:27:01 2022 ] Top1: 39.31% +[ Tue Sep 13 19:27:02 2022 ] Top5: 69.23% +[ Tue Sep 13 19:27:02 2022 ] Training epoch: 15 +[ Tue Sep 13 19:27:22 2022 ] Batch(31/162) done. Loss: 1.3534 lr:0.100000 +[ Tue Sep 13 19:28:15 2022 ] Batch(131/162) done. Loss: 1.4148 lr:0.100000 +[ Tue Sep 13 19:28:32 2022 ] Eval epoch: 15 +[ Tue Sep 13 19:31:27 2022 ] Mean test loss of 930 batches: 3.082577705383301. +[ Tue Sep 13 19:31:28 2022 ] Top1: 37.26% +[ Tue Sep 13 19:31:28 2022 ] Top5: 68.58% +[ Tue Sep 13 19:31:28 2022 ] Training epoch: 16 +[ Tue Sep 13 19:32:09 2022 ] Batch(69/162) done. Loss: 0.9850 lr:0.100000 +[ Tue Sep 13 19:32:58 2022 ] Eval epoch: 16 +[ Tue Sep 13 19:35:53 2022 ] Mean test loss of 930 batches: 2.5744996070861816. +[ Tue Sep 13 19:35:54 2022 ] Top1: 40.64% +[ Tue Sep 13 19:35:54 2022 ] Top5: 72.34% +[ Tue Sep 13 19:35:55 2022 ] Training epoch: 17 +[ Tue Sep 13 19:36:01 2022 ] Batch(7/162) done. Loss: 0.6388 lr:0.100000 +[ Tue Sep 13 19:36:55 2022 ] Batch(107/162) done. Loss: 0.7803 lr:0.100000 +[ Tue Sep 13 19:37:24 2022 ] Eval epoch: 17 +[ Tue Sep 13 19:40:20 2022 ] Mean test loss of 930 batches: 2.6255459785461426. +[ Tue Sep 13 19:40:20 2022 ] Top1: 41.27% +[ Tue Sep 13 19:40:21 2022 ] Top5: 73.48% +[ Tue Sep 13 19:40:21 2022 ] Training epoch: 18 +[ Tue Sep 13 19:40:48 2022 ] Batch(45/162) done. Loss: 0.6380 lr:0.100000 +[ Tue Sep 13 19:41:42 2022 ] Batch(145/162) done. Loss: 0.9929 lr:0.100000 +[ Tue Sep 13 19:41:51 2022 ] Eval epoch: 18 +[ Tue Sep 13 19:44:47 2022 ] Mean test loss of 930 batches: 2.5052144527435303. +[ Tue Sep 13 19:44:47 2022 ] Top1: 43.61% +[ Tue Sep 13 19:44:47 2022 ] Top5: 74.95% +[ Tue Sep 13 19:44:48 2022 ] Training epoch: 19 +[ Tue Sep 13 19:45:35 2022 ] Batch(83/162) done. Loss: 0.9538 lr:0.100000 +[ Tue Sep 13 19:46:17 2022 ] Eval epoch: 19 +[ Tue Sep 13 19:49:12 2022 ] Mean test loss of 930 batches: 3.247473955154419. +[ Tue Sep 13 19:49:13 2022 ] Top1: 34.91% +[ Tue Sep 13 19:49:13 2022 ] Top5: 65.97% +[ Tue Sep 13 19:49:13 2022 ] Training epoch: 20 +[ Tue Sep 13 19:49:28 2022 ] Batch(21/162) done. Loss: 0.8027 lr:0.100000 +[ Tue Sep 13 19:50:21 2022 ] Batch(121/162) done. Loss: 0.9911 lr:0.100000 +[ Tue Sep 13 19:50:43 2022 ] Eval epoch: 20 +[ Tue Sep 13 19:53:39 2022 ] Mean test loss of 930 batches: 5.476712226867676. +[ Tue Sep 13 19:53:39 2022 ] Top1: 24.59% +[ Tue Sep 13 19:53:40 2022 ] Top5: 53.93% +[ Tue Sep 13 19:53:40 2022 ] Training epoch: 21 +[ Tue Sep 13 19:54:15 2022 ] Batch(59/162) done. Loss: 0.8640 lr:0.100000 +[ Tue Sep 13 19:55:08 2022 ] Batch(159/162) done. Loss: 0.8501 lr:0.100000 +[ Tue Sep 13 19:55:09 2022 ] Eval epoch: 21 +[ Tue Sep 13 19:58:05 2022 ] Mean test loss of 930 batches: 2.893510580062866. +[ Tue Sep 13 19:58:06 2022 ] Top1: 41.86% +[ Tue Sep 13 19:58:06 2022 ] Top5: 73.83% +[ Tue Sep 13 19:58:07 2022 ] Training epoch: 22 +[ Tue Sep 13 19:59:02 2022 ] Batch(97/162) done. Loss: 0.8610 lr:0.100000 +[ Tue Sep 13 19:59:36 2022 ] Eval epoch: 22 +[ Tue Sep 13 20:02:33 2022 ] Mean test loss of 930 batches: 2.469637870788574. +[ Tue Sep 13 20:02:34 2022 ] Top1: 43.61% +[ Tue Sep 13 20:02:34 2022 ] Top5: 77.11% +[ Tue Sep 13 20:02:35 2022 ] Training epoch: 23 +[ Tue Sep 13 20:02:57 2022 ] Batch(35/162) done. Loss: 0.8158 lr:0.100000 +[ Tue Sep 13 20:03:50 2022 ] Batch(135/162) done. Loss: 0.5242 lr:0.100000 +[ Tue Sep 13 20:04:05 2022 ] Eval epoch: 23 +[ Tue Sep 13 20:07:01 2022 ] Mean test loss of 930 batches: 2.4923813343048096. +[ Tue Sep 13 20:07:01 2022 ] Top1: 44.61% +[ Tue Sep 13 20:07:02 2022 ] Top5: 76.41% +[ Tue Sep 13 20:07:02 2022 ] Training epoch: 24 +[ Tue Sep 13 20:07:45 2022 ] Batch(73/162) done. Loss: 0.4336 lr:0.100000 +[ Tue Sep 13 20:08:32 2022 ] Eval epoch: 24 +[ Tue Sep 13 20:11:28 2022 ] Mean test loss of 930 batches: 2.9429335594177246. +[ Tue Sep 13 20:11:29 2022 ] Top1: 42.74% +[ Tue Sep 13 20:11:29 2022 ] Top5: 74.01% +[ Tue Sep 13 20:11:29 2022 ] Training epoch: 25 +[ Tue Sep 13 20:11:39 2022 ] Batch(11/162) done. Loss: 0.6002 lr:0.100000 +[ Tue Sep 13 20:12:32 2022 ] Batch(111/162) done. Loss: 0.4435 lr:0.100000 +[ Tue Sep 13 20:12:59 2022 ] Eval epoch: 25 +[ Tue Sep 13 20:15:55 2022 ] Mean test loss of 930 batches: 3.080543279647827. +[ Tue Sep 13 20:15:55 2022 ] Top1: 40.29% +[ Tue Sep 13 20:15:55 2022 ] Top5: 70.90% +[ Tue Sep 13 20:15:56 2022 ] Training epoch: 26 +[ Tue Sep 13 20:16:25 2022 ] Batch(49/162) done. Loss: 0.3770 lr:0.100000 +[ Tue Sep 13 20:17:18 2022 ] Batch(149/162) done. Loss: 0.3701 lr:0.100000 +[ Tue Sep 13 20:17:25 2022 ] Eval epoch: 26 +[ Tue Sep 13 20:20:21 2022 ] Mean test loss of 930 batches: 3.1447954177856445. +[ Tue Sep 13 20:20:21 2022 ] Top1: 40.04% +[ Tue Sep 13 20:20:22 2022 ] Top5: 71.77% +[ Tue Sep 13 20:20:22 2022 ] Training epoch: 27 +[ Tue Sep 13 20:21:12 2022 ] Batch(87/162) done. Loss: 1.1478 lr:0.100000 +[ Tue Sep 13 20:21:51 2022 ] Eval epoch: 27 +[ Tue Sep 13 20:24:48 2022 ] Mean test loss of 930 batches: 3.4919514656066895. +[ Tue Sep 13 20:24:48 2022 ] Top1: 40.34% +[ Tue Sep 13 20:24:49 2022 ] Top5: 72.57% +[ Tue Sep 13 20:24:49 2022 ] Training epoch: 28 +[ Tue Sep 13 20:25:06 2022 ] Batch(25/162) done. Loss: 0.5289 lr:0.100000 +[ Tue Sep 13 20:26:00 2022 ] Batch(125/162) done. Loss: 0.3959 lr:0.100000 +[ Tue Sep 13 20:26:19 2022 ] Eval epoch: 28 +[ Tue Sep 13 20:29:16 2022 ] Mean test loss of 930 batches: 3.084304094314575. +[ Tue Sep 13 20:29:16 2022 ] Top1: 41.98% +[ Tue Sep 13 20:29:17 2022 ] Top5: 71.62% +[ Tue Sep 13 20:29:17 2022 ] Training epoch: 29 +[ Tue Sep 13 20:29:54 2022 ] Batch(63/162) done. Loss: 0.5615 lr:0.100000 +[ Tue Sep 13 20:30:47 2022 ] Eval epoch: 29 +[ Tue Sep 13 20:33:42 2022 ] Mean test loss of 930 batches: 4.129952430725098. +[ Tue Sep 13 20:33:43 2022 ] Top1: 34.69% +[ Tue Sep 13 20:33:43 2022 ] Top5: 63.24% +[ Tue Sep 13 20:33:43 2022 ] Training epoch: 30 +[ Tue Sep 13 20:33:47 2022 ] Batch(1/162) done. Loss: 0.3964 lr:0.100000 +[ Tue Sep 13 20:34:41 2022 ] Batch(101/162) done. Loss: 0.6367 lr:0.100000 +[ Tue Sep 13 20:35:13 2022 ] Eval epoch: 30 +[ Tue Sep 13 20:38:08 2022 ] Mean test loss of 930 batches: 3.9512550830841064. +[ Tue Sep 13 20:38:09 2022 ] Top1: 38.94% +[ Tue Sep 13 20:38:09 2022 ] Top5: 70.13% +[ Tue Sep 13 20:38:09 2022 ] Training epoch: 31 +[ Tue Sep 13 20:38:34 2022 ] Batch(39/162) done. Loss: 0.3871 lr:0.100000 +[ Tue Sep 13 20:39:27 2022 ] Batch(139/162) done. Loss: 0.5418 lr:0.100000 +[ Tue Sep 13 20:39:39 2022 ] Eval epoch: 31 +[ Tue Sep 13 20:42:34 2022 ] Mean test loss of 930 batches: 3.003483533859253. +[ Tue Sep 13 20:42:35 2022 ] Top1: 44.00% +[ Tue Sep 13 20:42:35 2022 ] Top5: 75.59% +[ Tue Sep 13 20:42:35 2022 ] Training epoch: 32 +[ Tue Sep 13 20:43:20 2022 ] Batch(77/162) done. Loss: 0.4624 lr:0.100000 +[ Tue Sep 13 20:44:05 2022 ] Eval epoch: 32 +[ Tue Sep 13 20:47:01 2022 ] Mean test loss of 930 batches: 2.6536295413970947. +[ Tue Sep 13 20:47:02 2022 ] Top1: 47.46% +[ Tue Sep 13 20:47:02 2022 ] Top5: 77.98% +[ Tue Sep 13 20:47:02 2022 ] Training epoch: 33 +[ Tue Sep 13 20:47:14 2022 ] Batch(15/162) done. Loss: 0.2946 lr:0.100000 +[ Tue Sep 13 20:48:07 2022 ] Batch(115/162) done. Loss: 0.8799 lr:0.100000 +[ Tue Sep 13 20:48:32 2022 ] Eval epoch: 33 +[ Tue Sep 13 20:51:28 2022 ] Mean test loss of 930 batches: 4.496440410614014. +[ Tue Sep 13 20:51:28 2022 ] Top1: 34.02% +[ Tue Sep 13 20:51:29 2022 ] Top5: 64.93% +[ Tue Sep 13 20:51:29 2022 ] Training epoch: 34 +[ Tue Sep 13 20:52:01 2022 ] Batch(53/162) done. Loss: 0.4099 lr:0.100000 +[ Tue Sep 13 20:52:54 2022 ] Batch(153/162) done. Loss: 0.2651 lr:0.100000 +[ Tue Sep 13 20:52:58 2022 ] Eval epoch: 34 +[ Tue Sep 13 20:55:54 2022 ] Mean test loss of 930 batches: 3.864461898803711. +[ Tue Sep 13 20:55:54 2022 ] Top1: 41.27% +[ Tue Sep 13 20:55:55 2022 ] Top5: 70.98% +[ Tue Sep 13 20:55:55 2022 ] Training epoch: 35 +[ Tue Sep 13 20:56:47 2022 ] Batch(91/162) done. Loss: 0.1810 lr:0.100000 +[ Tue Sep 13 20:57:25 2022 ] Eval epoch: 35 +[ Tue Sep 13 21:00:20 2022 ] Mean test loss of 930 batches: 3.152431011199951. +[ Tue Sep 13 21:00:21 2022 ] Top1: 40.42% +[ Tue Sep 13 21:00:21 2022 ] Top5: 70.64% +[ Tue Sep 13 21:00:21 2022 ] Training epoch: 36 +[ Tue Sep 13 21:00:40 2022 ] Batch(29/162) done. Loss: 0.3657 lr:0.100000 +[ Tue Sep 13 21:01:34 2022 ] Batch(129/162) done. Loss: 0.4337 lr:0.100000 +[ Tue Sep 13 21:01:51 2022 ] Eval epoch: 36 +[ Tue Sep 13 21:04:47 2022 ] Mean test loss of 930 batches: 3.287761926651001. +[ Tue Sep 13 21:04:47 2022 ] Top1: 45.88% +[ Tue Sep 13 21:04:48 2022 ] Top5: 76.60% +[ Tue Sep 13 21:04:48 2022 ] Training epoch: 37 +[ Tue Sep 13 21:05:27 2022 ] Batch(67/162) done. Loss: 0.2672 lr:0.100000 +[ Tue Sep 13 21:06:17 2022 ] Eval epoch: 37 +[ Tue Sep 13 21:09:13 2022 ] Mean test loss of 930 batches: 3.207329273223877. +[ Tue Sep 13 21:09:13 2022 ] Top1: 47.81% +[ Tue Sep 13 21:09:14 2022 ] Top5: 78.13% +[ Tue Sep 13 21:09:14 2022 ] Training epoch: 38 +[ Tue Sep 13 21:09:20 2022 ] Batch(5/162) done. Loss: 0.1246 lr:0.100000 +[ Tue Sep 13 21:10:13 2022 ] Batch(105/162) done. Loss: 0.3872 lr:0.100000 +[ Tue Sep 13 21:10:43 2022 ] Eval epoch: 38 +[ Tue Sep 13 21:13:39 2022 ] Mean test loss of 930 batches: 3.925532341003418. +[ Tue Sep 13 21:13:39 2022 ] Top1: 41.53% +[ Tue Sep 13 21:13:40 2022 ] Top5: 72.47% +[ Tue Sep 13 21:13:40 2022 ] Training epoch: 39 +[ Tue Sep 13 21:14:06 2022 ] Batch(43/162) done. Loss: 0.4519 lr:0.100000 +[ Tue Sep 13 21:14:59 2022 ] Batch(143/162) done. Loss: 0.3730 lr:0.100000 +[ Tue Sep 13 21:15:09 2022 ] Eval epoch: 39 +[ Tue Sep 13 21:18:05 2022 ] Mean test loss of 930 batches: 3.4361915588378906. +[ Tue Sep 13 21:18:05 2022 ] Top1: 41.13% +[ Tue Sep 13 21:18:06 2022 ] Top5: 72.29% +[ Tue Sep 13 21:18:06 2022 ] Training epoch: 40 +[ Tue Sep 13 21:18:52 2022 ] Batch(81/162) done. Loss: 0.3425 lr:0.100000 +[ Tue Sep 13 21:19:35 2022 ] Eval epoch: 40 +[ Tue Sep 13 21:22:30 2022 ] Mean test loss of 930 batches: 3.311009407043457. +[ Tue Sep 13 21:22:31 2022 ] Top1: 44.99% +[ Tue Sep 13 21:22:31 2022 ] Top5: 72.37% +[ Tue Sep 13 21:22:31 2022 ] Training epoch: 41 +[ Tue Sep 13 21:22:45 2022 ] Batch(19/162) done. Loss: 0.2725 lr:0.100000 +[ Tue Sep 13 21:23:38 2022 ] Batch(119/162) done. Loss: 0.2891 lr:0.100000 +[ Tue Sep 13 21:24:01 2022 ] Eval epoch: 41 +[ Tue Sep 13 21:26:56 2022 ] Mean test loss of 930 batches: 3.1727330684661865. +[ Tue Sep 13 21:26:57 2022 ] Top1: 46.30% +[ Tue Sep 13 21:26:57 2022 ] Top5: 74.79% +[ Tue Sep 13 21:26:58 2022 ] Training epoch: 42 +[ Tue Sep 13 21:27:31 2022 ] Batch(57/162) done. Loss: 0.2355 lr:0.100000 +[ Tue Sep 13 21:28:24 2022 ] Batch(157/162) done. Loss: 0.3807 lr:0.100000 +[ Tue Sep 13 21:28:27 2022 ] Eval epoch: 42 +[ Tue Sep 13 21:31:22 2022 ] Mean test loss of 930 batches: 5.087271690368652. +[ Tue Sep 13 21:31:22 2022 ] Top1: 36.97% +[ Tue Sep 13 21:31:23 2022 ] Top5: 65.04% +[ Tue Sep 13 21:31:23 2022 ] Training epoch: 43 +[ Tue Sep 13 21:32:17 2022 ] Batch(95/162) done. Loss: 0.5819 lr:0.100000 +[ Tue Sep 13 21:32:52 2022 ] Eval epoch: 43 +[ Tue Sep 13 21:35:48 2022 ] Mean test loss of 930 batches: 4.4214887619018555. +[ Tue Sep 13 21:35:48 2022 ] Top1: 32.79% +[ Tue Sep 13 21:35:49 2022 ] Top5: 61.90% +[ Tue Sep 13 21:35:49 2022 ] Training epoch: 44 +[ Tue Sep 13 21:36:10 2022 ] Batch(33/162) done. Loss: 0.4286 lr:0.100000 +[ Tue Sep 13 21:37:03 2022 ] Batch(133/162) done. Loss: 0.4537 lr:0.100000 +[ Tue Sep 13 21:37:18 2022 ] Eval epoch: 44 +[ Tue Sep 13 21:40:14 2022 ] Mean test loss of 930 batches: 3.6953487396240234. +[ Tue Sep 13 21:40:14 2022 ] Top1: 44.60% +[ Tue Sep 13 21:40:14 2022 ] Top5: 75.06% +[ Tue Sep 13 21:40:15 2022 ] Training epoch: 45 +[ Tue Sep 13 21:40:55 2022 ] Batch(71/162) done. Loss: 0.3127 lr:0.100000 +[ Tue Sep 13 21:41:44 2022 ] Eval epoch: 45 +[ Tue Sep 13 21:44:39 2022 ] Mean test loss of 930 batches: 4.165959358215332. +[ Tue Sep 13 21:44:40 2022 ] Top1: 41.86% +[ Tue Sep 13 21:44:40 2022 ] Top5: 70.45% +[ Tue Sep 13 21:44:40 2022 ] Training epoch: 46 +[ Tue Sep 13 21:44:48 2022 ] Batch(9/162) done. Loss: 0.3008 lr:0.100000 +[ Tue Sep 13 21:45:42 2022 ] Batch(109/162) done. Loss: 0.3582 lr:0.100000 +[ Tue Sep 13 21:46:10 2022 ] Eval epoch: 46 +[ Tue Sep 13 21:49:06 2022 ] Mean test loss of 930 batches: 4.4879326820373535. +[ Tue Sep 13 21:49:06 2022 ] Top1: 39.41% +[ Tue Sep 13 21:49:07 2022 ] Top5: 70.74% +[ Tue Sep 13 21:49:07 2022 ] Training epoch: 47 +[ Tue Sep 13 21:49:35 2022 ] Batch(47/162) done. Loss: 0.3825 lr:0.100000 +[ Tue Sep 13 21:50:28 2022 ] Batch(147/162) done. Loss: 0.2699 lr:0.100000 +[ Tue Sep 13 21:50:36 2022 ] Eval epoch: 47 +[ Tue Sep 13 21:53:32 2022 ] Mean test loss of 930 batches: 3.048259735107422. +[ Tue Sep 13 21:53:32 2022 ] Top1: 48.36% +[ Tue Sep 13 21:53:33 2022 ] Top5: 77.78% +[ Tue Sep 13 21:53:33 2022 ] Training epoch: 48 +[ Tue Sep 13 21:54:21 2022 ] Batch(85/162) done. Loss: 0.1663 lr:0.100000 +[ Tue Sep 13 21:55:02 2022 ] Eval epoch: 48 +[ Tue Sep 13 21:57:58 2022 ] Mean test loss of 930 batches: 3.4193732738494873. +[ Tue Sep 13 21:57:58 2022 ] Top1: 47.52% +[ Tue Sep 13 21:57:59 2022 ] Top5: 76.02% +[ Tue Sep 13 21:57:59 2022 ] Training epoch: 49 +[ Tue Sep 13 21:58:14 2022 ] Batch(23/162) done. Loss: 0.2840 lr:0.100000 +[ Tue Sep 13 21:59:07 2022 ] Batch(123/162) done. Loss: 0.2047 lr:0.100000 +[ Tue Sep 13 21:59:28 2022 ] Eval epoch: 49 +[ Tue Sep 13 22:02:24 2022 ] Mean test loss of 930 batches: 3.1765494346618652. +[ Tue Sep 13 22:02:25 2022 ] Top1: 49.31% +[ Tue Sep 13 22:02:25 2022 ] Top5: 77.90% +[ Tue Sep 13 22:02:26 2022 ] Training epoch: 50 +[ Tue Sep 13 22:03:02 2022 ] Batch(61/162) done. Loss: 0.2112 lr:0.100000 +[ Tue Sep 13 22:03:55 2022 ] Batch(161/162) done. Loss: 0.2298 lr:0.100000 +[ Tue Sep 13 22:03:56 2022 ] Eval epoch: 50 +[ Tue Sep 13 22:06:51 2022 ] Mean test loss of 930 batches: 4.108170986175537. +[ Tue Sep 13 22:06:52 2022 ] Top1: 41.71% +[ Tue Sep 13 22:06:52 2022 ] Top5: 70.49% +[ Tue Sep 13 22:06:52 2022 ] Training epoch: 51 +[ Tue Sep 13 22:07:48 2022 ] Batch(99/162) done. Loss: 0.2841 lr:0.100000 +[ Tue Sep 13 22:08:22 2022 ] Eval epoch: 51 +[ Tue Sep 13 22:11:17 2022 ] Mean test loss of 930 batches: 3.8305201530456543. +[ Tue Sep 13 22:11:18 2022 ] Top1: 43.83% +[ Tue Sep 13 22:11:18 2022 ] Top5: 73.38% +[ Tue Sep 13 22:11:19 2022 ] Training epoch: 52 +[ Tue Sep 13 22:11:41 2022 ] Batch(37/162) done. Loss: 0.2311 lr:0.100000 +[ Tue Sep 13 22:12:35 2022 ] Batch(137/162) done. Loss: 0.3498 lr:0.100000 +[ Tue Sep 13 22:12:48 2022 ] Eval epoch: 52 +[ Tue Sep 13 22:15:43 2022 ] Mean test loss of 930 batches: 3.149076461791992. +[ Tue Sep 13 22:15:44 2022 ] Top1: 46.20% +[ Tue Sep 13 22:15:44 2022 ] Top5: 76.61% +[ Tue Sep 13 22:15:45 2022 ] Training epoch: 53 +[ Tue Sep 13 22:16:28 2022 ] Batch(75/162) done. Loss: 0.2594 lr:0.100000 +[ Tue Sep 13 22:17:14 2022 ] Eval epoch: 53 +[ Tue Sep 13 22:20:10 2022 ] Mean test loss of 930 batches: 3.767601728439331. +[ Tue Sep 13 22:20:10 2022 ] Top1: 46.00% +[ Tue Sep 13 22:20:11 2022 ] Top5: 75.40% +[ Tue Sep 13 22:20:11 2022 ] Training epoch: 54 +[ Tue Sep 13 22:20:21 2022 ] Batch(13/162) done. Loss: 0.2445 lr:0.100000 +[ Tue Sep 13 22:21:15 2022 ] Batch(113/162) done. Loss: 0.1727 lr:0.100000 +[ Tue Sep 13 22:21:40 2022 ] Eval epoch: 54 +[ Tue Sep 13 22:24:36 2022 ] Mean test loss of 930 batches: 3.2365827560424805. +[ Tue Sep 13 22:24:36 2022 ] Top1: 47.60% +[ Tue Sep 13 22:24:37 2022 ] Top5: 77.30% +[ Tue Sep 13 22:24:37 2022 ] Training epoch: 55 +[ Tue Sep 13 22:25:07 2022 ] Batch(51/162) done. Loss: 0.2887 lr:0.100000 +[ Tue Sep 13 22:26:01 2022 ] Batch(151/162) done. Loss: 0.3823 lr:0.100000 +[ Tue Sep 13 22:26:06 2022 ] Eval epoch: 55 +[ Tue Sep 13 22:29:01 2022 ] Mean test loss of 930 batches: 3.87300968170166. +[ Tue Sep 13 22:29:02 2022 ] Top1: 43.79% +[ Tue Sep 13 22:29:02 2022 ] Top5: 70.42% +[ Tue Sep 13 22:29:03 2022 ] Training epoch: 56 +[ Tue Sep 13 22:29:53 2022 ] Batch(89/162) done. Loss: 0.4036 lr:0.100000 +[ Tue Sep 13 22:30:32 2022 ] Eval epoch: 56 +[ Tue Sep 13 22:33:27 2022 ] Mean test loss of 930 batches: 3.2829749584198. +[ Tue Sep 13 22:33:28 2022 ] Top1: 47.72% +[ Tue Sep 13 22:33:28 2022 ] Top5: 78.42% +[ Tue Sep 13 22:33:29 2022 ] Training epoch: 57 +[ Tue Sep 13 22:33:46 2022 ] Batch(27/162) done. Loss: 0.1076 lr:0.100000 +[ Tue Sep 13 22:34:39 2022 ] Batch(127/162) done. Loss: 0.2584 lr:0.100000 +[ Tue Sep 13 22:34:58 2022 ] Eval epoch: 57 +[ Tue Sep 13 22:37:53 2022 ] Mean test loss of 930 batches: 3.3539950847625732. +[ Tue Sep 13 22:37:54 2022 ] Top1: 47.14% +[ Tue Sep 13 22:37:54 2022 ] Top5: 76.89% +[ Tue Sep 13 22:37:54 2022 ] Training epoch: 58 +[ Tue Sep 13 22:38:32 2022 ] Batch(65/162) done. Loss: 0.6299 lr:0.100000 +[ Tue Sep 13 22:39:24 2022 ] Eval epoch: 58 +[ Tue Sep 13 22:42:19 2022 ] Mean test loss of 930 batches: 93.60208129882812. +[ Tue Sep 13 22:42:19 2022 ] Top1: 1.13% +[ Tue Sep 13 22:42:20 2022 ] Top5: 6.35% +[ Tue Sep 13 22:42:20 2022 ] Training epoch: 59 +[ Tue Sep 13 22:42:25 2022 ] Batch(3/162) done. Loss: 0.4038 lr:0.100000 +[ Tue Sep 13 22:43:18 2022 ] Batch(103/162) done. Loss: 0.1457 lr:0.100000 +[ Tue Sep 13 22:43:49 2022 ] Eval epoch: 59 +[ Tue Sep 13 22:46:44 2022 ] Mean test loss of 930 batches: 4.4294657707214355. +[ Tue Sep 13 22:46:45 2022 ] Top1: 39.32% +[ Tue Sep 13 22:46:45 2022 ] Top5: 68.19% +[ Tue Sep 13 22:46:46 2022 ] Training epoch: 60 +[ Tue Sep 13 22:47:11 2022 ] Batch(41/162) done. Loss: 0.2347 lr:0.100000 +[ Tue Sep 13 22:48:04 2022 ] Batch(141/162) done. Loss: 0.2860 lr:0.100000 +[ Tue Sep 13 22:48:15 2022 ] Eval epoch: 60 +[ Tue Sep 13 22:51:10 2022 ] Mean test loss of 930 batches: 3.385327100753784. +[ Tue Sep 13 22:51:11 2022 ] Top1: 46.98% +[ Tue Sep 13 22:51:11 2022 ] Top5: 74.47% +[ Tue Sep 13 22:51:11 2022 ] Training epoch: 61 +[ Tue Sep 13 22:51:56 2022 ] Batch(79/162) done. Loss: 0.1330 lr:0.010000 +[ Tue Sep 13 22:52:40 2022 ] Eval epoch: 61 +[ Tue Sep 13 22:55:37 2022 ] Mean test loss of 930 batches: 3.0223019123077393. +[ Tue Sep 13 22:55:37 2022 ] Top1: 54.31% +[ Tue Sep 13 22:55:37 2022 ] Top5: 81.27% +[ Tue Sep 13 22:55:38 2022 ] Training epoch: 62 +[ Tue Sep 13 22:55:50 2022 ] Batch(17/162) done. Loss: 0.0683 lr:0.010000 +[ Tue Sep 13 22:56:43 2022 ] Batch(117/162) done. Loss: 0.1300 lr:0.010000 +[ Tue Sep 13 22:57:07 2022 ] Eval epoch: 62 +[ Tue Sep 13 23:00:03 2022 ] Mean test loss of 930 batches: 2.9251832962036133. +[ Tue Sep 13 23:00:03 2022 ] Top1: 54.89% +[ Tue Sep 13 23:00:04 2022 ] Top5: 81.63% +[ Tue Sep 13 23:00:04 2022 ] Training epoch: 63 +[ Tue Sep 13 23:00:36 2022 ] Batch(55/162) done. Loss: 0.0774 lr:0.010000 +[ Tue Sep 13 23:01:30 2022 ] Batch(155/162) done. Loss: 0.0291 lr:0.010000 +[ Tue Sep 13 23:01:33 2022 ] Eval epoch: 63 +[ Tue Sep 13 23:04:29 2022 ] Mean test loss of 930 batches: 2.900144100189209. +[ Tue Sep 13 23:04:30 2022 ] Top1: 55.64% +[ Tue Sep 13 23:04:30 2022 ] Top5: 82.05% +[ Tue Sep 13 23:04:30 2022 ] Training epoch: 64 +[ Tue Sep 13 23:05:23 2022 ] Batch(93/162) done. Loss: 0.0377 lr:0.010000 +[ Tue Sep 13 23:06:00 2022 ] Eval epoch: 64 +[ Tue Sep 13 23:08:55 2022 ] Mean test loss of 930 batches: 3.0037639141082764. +[ Tue Sep 13 23:08:56 2022 ] Top1: 55.98% +[ Tue Sep 13 23:08:56 2022 ] Top5: 82.32% +[ Tue Sep 13 23:08:56 2022 ] Training epoch: 65 +[ Tue Sep 13 23:09:16 2022 ] Batch(31/162) done. Loss: 0.0785 lr:0.010000 +[ Tue Sep 13 23:10:09 2022 ] Batch(131/162) done. Loss: 0.0441 lr:0.010000 +[ Tue Sep 13 23:10:25 2022 ] Eval epoch: 65 +[ Tue Sep 13 23:13:21 2022 ] Mean test loss of 930 batches: 3.0986738204956055. +[ Tue Sep 13 23:13:21 2022 ] Top1: 54.00% +[ Tue Sep 13 23:13:22 2022 ] Top5: 81.36% +[ Tue Sep 13 23:13:22 2022 ] Training epoch: 66 +[ Tue Sep 13 23:14:02 2022 ] Batch(69/162) done. Loss: 0.0315 lr:0.010000 +[ Tue Sep 13 23:14:51 2022 ] Eval epoch: 66 +[ Tue Sep 13 23:17:47 2022 ] Mean test loss of 930 batches: 2.9420394897460938. +[ Tue Sep 13 23:17:47 2022 ] Top1: 55.64% +[ Tue Sep 13 23:17:47 2022 ] Top5: 82.06% +[ Tue Sep 13 23:17:48 2022 ] Training epoch: 67 +[ Tue Sep 13 23:17:55 2022 ] Batch(7/162) done. Loss: 0.0441 lr:0.010000 +[ Tue Sep 13 23:18:48 2022 ] Batch(107/162) done. Loss: 0.1258 lr:0.010000 +[ Tue Sep 13 23:19:17 2022 ] Eval epoch: 67 +[ Tue Sep 13 23:22:12 2022 ] Mean test loss of 930 batches: 3.000858783721924. +[ Tue Sep 13 23:22:12 2022 ] Top1: 53.96% +[ Tue Sep 13 23:22:13 2022 ] Top5: 81.23% +[ Tue Sep 13 23:22:13 2022 ] Training epoch: 68 +[ Tue Sep 13 23:22:40 2022 ] Batch(45/162) done. Loss: 0.0435 lr:0.010000 +[ Tue Sep 13 23:23:34 2022 ] Batch(145/162) done. Loss: 0.0925 lr:0.010000 +[ Tue Sep 13 23:23:42 2022 ] Eval epoch: 68 +[ Tue Sep 13 23:26:38 2022 ] Mean test loss of 930 batches: 3.1119837760925293. +[ Tue Sep 13 23:26:38 2022 ] Top1: 54.25% +[ Tue Sep 13 23:26:39 2022 ] Top5: 81.33% +[ Tue Sep 13 23:26:39 2022 ] Training epoch: 69 +[ Tue Sep 13 23:27:26 2022 ] Batch(83/162) done. Loss: 0.0659 lr:0.010000 +[ Tue Sep 13 23:28:08 2022 ] Eval epoch: 69 +[ Tue Sep 13 23:31:04 2022 ] Mean test loss of 930 batches: 3.1158344745635986. +[ Tue Sep 13 23:31:04 2022 ] Top1: 55.40% +[ Tue Sep 13 23:31:05 2022 ] Top5: 81.91% +[ Tue Sep 13 23:31:05 2022 ] Training epoch: 70 +[ Tue Sep 13 23:31:19 2022 ] Batch(21/162) done. Loss: 0.1093 lr:0.010000 +[ Tue Sep 13 23:32:13 2022 ] Batch(121/162) done. Loss: 0.0139 lr:0.010000 +[ Tue Sep 13 23:32:34 2022 ] Eval epoch: 70 +[ Tue Sep 13 23:35:30 2022 ] Mean test loss of 930 batches: 3.0413055419921875. +[ Tue Sep 13 23:35:30 2022 ] Top1: 55.38% +[ Tue Sep 13 23:35:31 2022 ] Top5: 82.09% +[ Tue Sep 13 23:35:31 2022 ] Training epoch: 71 +[ Tue Sep 13 23:36:05 2022 ] Batch(59/162) done. Loss: 0.0630 lr:0.010000 +[ Tue Sep 13 23:36:59 2022 ] Batch(159/162) done. Loss: 0.0268 lr:0.010000 +[ Tue Sep 13 23:37:00 2022 ] Eval epoch: 71 +[ Tue Sep 13 23:39:56 2022 ] Mean test loss of 930 batches: 3.042304515838623. +[ Tue Sep 13 23:39:56 2022 ] Top1: 55.49% +[ Tue Sep 13 23:39:56 2022 ] Top5: 81.92% +[ Tue Sep 13 23:39:57 2022 ] Training epoch: 72 +[ Tue Sep 13 23:40:52 2022 ] Batch(97/162) done. Loss: 0.0265 lr:0.010000 +[ Tue Sep 13 23:41:26 2022 ] Eval epoch: 72 +[ Tue Sep 13 23:44:22 2022 ] Mean test loss of 930 batches: 2.9594500064849854. +[ Tue Sep 13 23:44:22 2022 ] Top1: 56.20% +[ Tue Sep 13 23:44:23 2022 ] Top5: 82.35% +[ Tue Sep 13 23:44:23 2022 ] Training epoch: 73 +[ Tue Sep 13 23:44:45 2022 ] Batch(35/162) done. Loss: 0.0143 lr:0.010000 +[ Tue Sep 13 23:45:38 2022 ] Batch(135/162) done. Loss: 0.0321 lr:0.010000 +[ Tue Sep 13 23:45:53 2022 ] Eval epoch: 73 +[ Tue Sep 13 23:48:48 2022 ] Mean test loss of 930 batches: 3.0188536643981934. +[ Tue Sep 13 23:48:49 2022 ] Top1: 55.75% +[ Tue Sep 13 23:48:49 2022 ] Top5: 82.11% +[ Tue Sep 13 23:48:49 2022 ] Training epoch: 74 +[ Tue Sep 13 23:49:31 2022 ] Batch(73/162) done. Loss: 0.0452 lr:0.010000 +[ Tue Sep 13 23:50:19 2022 ] Eval epoch: 74 +[ Tue Sep 13 23:53:14 2022 ] Mean test loss of 930 batches: 3.0516719818115234. +[ Tue Sep 13 23:53:15 2022 ] Top1: 55.80% +[ Tue Sep 13 23:53:15 2022 ] Top5: 82.16% +[ Tue Sep 13 23:53:15 2022 ] Training epoch: 75 +[ Tue Sep 13 23:53:24 2022 ] Batch(11/162) done. Loss: 0.0553 lr:0.010000 +[ Tue Sep 13 23:54:18 2022 ] Batch(111/162) done. Loss: 0.0420 lr:0.010000 +[ Tue Sep 13 23:54:45 2022 ] Eval epoch: 75 +[ Tue Sep 13 23:57:41 2022 ] Mean test loss of 930 batches: 3.168367624282837. +[ Tue Sep 13 23:57:42 2022 ] Top1: 53.77% +[ Tue Sep 13 23:57:42 2022 ] Top5: 80.80% +[ Tue Sep 13 23:57:42 2022 ] Training epoch: 76 +[ Tue Sep 13 23:58:12 2022 ] Batch(49/162) done. Loss: 0.0311 lr:0.010000 +[ Tue Sep 13 23:59:05 2022 ] Batch(149/162) done. Loss: 0.0690 lr:0.010000 +[ Tue Sep 13 23:59:12 2022 ] Eval epoch: 76 +[ Wed Sep 14 00:02:07 2022 ] Mean test loss of 930 batches: 3.2538864612579346. +[ Wed Sep 14 00:02:08 2022 ] Top1: 55.50% +[ Wed Sep 14 00:02:08 2022 ] Top5: 81.82% +[ Wed Sep 14 00:02:08 2022 ] Training epoch: 77 +[ Wed Sep 14 00:02:58 2022 ] Batch(87/162) done. Loss: 0.0741 lr:0.010000 +[ Wed Sep 14 00:03:38 2022 ] Eval epoch: 77 +[ Wed Sep 14 00:06:33 2022 ] Mean test loss of 930 batches: 3.240590810775757. +[ Wed Sep 14 00:06:33 2022 ] Top1: 55.54% +[ Wed Sep 14 00:06:34 2022 ] Top5: 82.00% +[ Wed Sep 14 00:06:34 2022 ] Training epoch: 78 +[ Wed Sep 14 00:06:51 2022 ] Batch(25/162) done. Loss: 0.0154 lr:0.010000 +[ Wed Sep 14 00:07:44 2022 ] Batch(125/162) done. Loss: 0.0165 lr:0.010000 +[ Wed Sep 14 00:08:04 2022 ] Eval epoch: 78 +[ Wed Sep 14 00:10:59 2022 ] Mean test loss of 930 batches: 3.1219122409820557. +[ Wed Sep 14 00:10:59 2022 ] Top1: 55.73% +[ Wed Sep 14 00:11:00 2022 ] Top5: 82.06% +[ Wed Sep 14 00:11:00 2022 ] Training epoch: 79 +[ Wed Sep 14 00:11:37 2022 ] Batch(63/162) done. Loss: 0.0136 lr:0.010000 +[ Wed Sep 14 00:12:29 2022 ] Eval epoch: 79 +[ Wed Sep 14 00:15:25 2022 ] Mean test loss of 930 batches: 3.2684264183044434. +[ Wed Sep 14 00:15:25 2022 ] Top1: 54.38% +[ Wed Sep 14 00:15:25 2022 ] Top5: 81.33% +[ Wed Sep 14 00:15:26 2022 ] Training epoch: 80 +[ Wed Sep 14 00:15:30 2022 ] Batch(1/162) done. Loss: 0.0127 lr:0.010000 +[ Wed Sep 14 00:16:23 2022 ] Batch(101/162) done. Loss: 0.0567 lr:0.010000 +[ Wed Sep 14 00:16:55 2022 ] Eval epoch: 80 +[ Wed Sep 14 00:19:50 2022 ] Mean test loss of 930 batches: 3.172088623046875. +[ Wed Sep 14 00:19:50 2022 ] Top1: 55.38% +[ Wed Sep 14 00:19:51 2022 ] Top5: 81.78% +[ Wed Sep 14 00:19:51 2022 ] Training epoch: 81 +[ Wed Sep 14 00:20:15 2022 ] Batch(39/162) done. Loss: 0.0603 lr:0.001000 +[ Wed Sep 14 00:21:08 2022 ] Batch(139/162) done. Loss: 0.0486 lr:0.001000 +[ Wed Sep 14 00:21:20 2022 ] Eval epoch: 81 +[ Wed Sep 14 00:24:16 2022 ] Mean test loss of 930 batches: 3.256845474243164. +[ Wed Sep 14 00:24:16 2022 ] Top1: 55.29% +[ Wed Sep 14 00:24:17 2022 ] Top5: 81.75% +[ Wed Sep 14 00:24:17 2022 ] Training epoch: 82 +[ Wed Sep 14 00:25:01 2022 ] Batch(77/162) done. Loss: 0.0205 lr:0.001000 +[ Wed Sep 14 00:25:47 2022 ] Eval epoch: 82 +[ Wed Sep 14 00:28:41 2022 ] Mean test loss of 930 batches: 3.354170322418213. +[ Wed Sep 14 00:28:42 2022 ] Top1: 54.68% +[ Wed Sep 14 00:28:42 2022 ] Top5: 81.45% +[ Wed Sep 14 00:28:42 2022 ] Training epoch: 83 +[ Wed Sep 14 00:28:54 2022 ] Batch(15/162) done. Loss: 0.0281 lr:0.001000 +[ Wed Sep 14 00:29:47 2022 ] Batch(115/162) done. Loss: 0.0510 lr:0.001000 +[ Wed Sep 14 00:30:12 2022 ] Eval epoch: 83 +[ Wed Sep 14 00:33:07 2022 ] Mean test loss of 930 batches: 3.1280291080474854. +[ Wed Sep 14 00:33:08 2022 ] Top1: 55.75% +[ Wed Sep 14 00:33:08 2022 ] Top5: 82.10% +[ Wed Sep 14 00:33:08 2022 ] Training epoch: 84 +[ Wed Sep 14 00:33:40 2022 ] Batch(53/162) done. Loss: 0.0216 lr:0.001000 +[ Wed Sep 14 00:34:33 2022 ] Batch(153/162) done. Loss: 0.0449 lr:0.001000 +[ Wed Sep 14 00:34:38 2022 ] Eval epoch: 84 +[ Wed Sep 14 00:37:33 2022 ] Mean test loss of 930 batches: 3.2356135845184326. +[ Wed Sep 14 00:37:33 2022 ] Top1: 55.73% +[ Wed Sep 14 00:37:34 2022 ] Top5: 82.04% +[ Wed Sep 14 00:37:34 2022 ] Training epoch: 85 +[ Wed Sep 14 00:38:26 2022 ] Batch(91/162) done. Loss: 0.0204 lr:0.001000 +[ Wed Sep 14 00:39:03 2022 ] Eval epoch: 85 +[ Wed Sep 14 00:41:59 2022 ] Mean test loss of 930 batches: 3.166569471359253. +[ Wed Sep 14 00:42:00 2022 ] Top1: 55.83% +[ Wed Sep 14 00:42:00 2022 ] Top5: 82.28% +[ Wed Sep 14 00:42:01 2022 ] Training epoch: 86 +[ Wed Sep 14 00:42:19 2022 ] Batch(29/162) done. Loss: 0.1386 lr:0.001000 +[ Wed Sep 14 00:43:13 2022 ] Batch(129/162) done. Loss: 0.0539 lr:0.001000 +[ Wed Sep 14 00:43:30 2022 ] Eval epoch: 86 +[ Wed Sep 14 00:46:25 2022 ] Mean test loss of 930 batches: 3.0878560543060303. +[ Wed Sep 14 00:46:26 2022 ] Top1: 56.25% +[ Wed Sep 14 00:46:26 2022 ] Top5: 82.38% +[ Wed Sep 14 00:46:26 2022 ] Training epoch: 87 +[ Wed Sep 14 00:47:05 2022 ] Batch(67/162) done. Loss: 0.0155 lr:0.001000 +[ Wed Sep 14 00:47:56 2022 ] Eval epoch: 87 +[ Wed Sep 14 00:50:51 2022 ] Mean test loss of 930 batches: 3.159611940383911. +[ Wed Sep 14 00:50:51 2022 ] Top1: 55.64% +[ Wed Sep 14 00:50:52 2022 ] Top5: 82.13% +[ Wed Sep 14 00:50:52 2022 ] Training epoch: 88 +[ Wed Sep 14 00:50:58 2022 ] Batch(5/162) done. Loss: 0.0725 lr:0.001000 +[ Wed Sep 14 00:51:51 2022 ] Batch(105/162) done. Loss: 0.0445 lr:0.001000 +[ Wed Sep 14 00:52:21 2022 ] Eval epoch: 88 +[ Wed Sep 14 00:55:17 2022 ] Mean test loss of 930 batches: 3.1161746978759766. +[ Wed Sep 14 00:55:18 2022 ] Top1: 56.49% +[ Wed Sep 14 00:55:18 2022 ] Top5: 82.62% +[ Wed Sep 14 00:55:18 2022 ] Training epoch: 89 +[ Wed Sep 14 00:55:44 2022 ] Batch(43/162) done. Loss: 0.0231 lr:0.001000 +[ Wed Sep 14 00:56:38 2022 ] Batch(143/162) done. Loss: 0.0668 lr:0.001000 +[ Wed Sep 14 00:56:48 2022 ] Eval epoch: 89 +[ Wed Sep 14 00:59:43 2022 ] Mean test loss of 930 batches: 3.1481363773345947. +[ Wed Sep 14 00:59:43 2022 ] Top1: 55.74% +[ Wed Sep 14 00:59:44 2022 ] Top5: 82.12% +[ Wed Sep 14 00:59:44 2022 ] Training epoch: 90 +[ Wed Sep 14 01:00:31 2022 ] Batch(81/162) done. Loss: 0.0568 lr:0.001000 +[ Wed Sep 14 01:01:14 2022 ] Eval epoch: 90 +[ Wed Sep 14 01:04:09 2022 ] Mean test loss of 930 batches: 3.179015874862671. +[ Wed Sep 14 01:04:09 2022 ] Top1: 55.97% +[ Wed Sep 14 01:04:10 2022 ] Top5: 82.25% +[ Wed Sep 14 01:04:10 2022 ] Training epoch: 91 +[ Wed Sep 14 01:04:23 2022 ] Batch(19/162) done. Loss: 0.0575 lr:0.001000 +[ Wed Sep 14 01:05:17 2022 ] Batch(119/162) done. Loss: 0.0452 lr:0.001000 +[ Wed Sep 14 01:05:40 2022 ] Eval epoch: 91 +[ Wed Sep 14 01:08:35 2022 ] Mean test loss of 930 batches: 3.1520650386810303. +[ Wed Sep 14 01:08:36 2022 ] Top1: 55.99% +[ Wed Sep 14 01:08:36 2022 ] Top5: 82.21% +[ Wed Sep 14 01:08:36 2022 ] Training epoch: 92 +[ Wed Sep 14 01:09:10 2022 ] Batch(57/162) done. Loss: 0.0443 lr:0.001000 +[ Wed Sep 14 01:10:03 2022 ] Batch(157/162) done. Loss: 0.0655 lr:0.001000 +[ Wed Sep 14 01:10:06 2022 ] Eval epoch: 92 +[ Wed Sep 14 01:13:01 2022 ] Mean test loss of 930 batches: 3.122722864151001. +[ Wed Sep 14 01:13:01 2022 ] Top1: 55.44% +[ Wed Sep 14 01:13:02 2022 ] Top5: 81.86% +[ Wed Sep 14 01:13:02 2022 ] Training epoch: 93 +[ Wed Sep 14 01:13:56 2022 ] Batch(95/162) done. Loss: 0.0390 lr:0.001000 +[ Wed Sep 14 01:14:31 2022 ] Eval epoch: 93 +[ Wed Sep 14 01:17:27 2022 ] Mean test loss of 930 batches: 3.3127288818359375. +[ Wed Sep 14 01:17:27 2022 ] Top1: 53.86% +[ Wed Sep 14 01:17:28 2022 ] Top5: 81.14% +[ Wed Sep 14 01:17:28 2022 ] Training epoch: 94 +[ Wed Sep 14 01:17:49 2022 ] Batch(33/162) done. Loss: 0.0923 lr:0.001000 +[ Wed Sep 14 01:18:42 2022 ] Batch(133/162) done. Loss: 0.0343 lr:0.001000 +[ Wed Sep 14 01:18:58 2022 ] Eval epoch: 94 +[ Wed Sep 14 01:21:53 2022 ] Mean test loss of 930 batches: 3.1064746379852295. +[ Wed Sep 14 01:21:53 2022 ] Top1: 56.02% +[ Wed Sep 14 01:21:54 2022 ] Top5: 82.27% +[ Wed Sep 14 01:21:54 2022 ] Training epoch: 95 +[ Wed Sep 14 01:22:35 2022 ] Batch(71/162) done. Loss: 0.0166 lr:0.001000 +[ Wed Sep 14 01:23:24 2022 ] Eval epoch: 95 +[ Wed Sep 14 01:26:19 2022 ] Mean test loss of 930 batches: 3.1581296920776367. +[ Wed Sep 14 01:26:19 2022 ] Top1: 53.96% +[ Wed Sep 14 01:26:20 2022 ] Top5: 81.01% +[ Wed Sep 14 01:26:20 2022 ] Training epoch: 96 +[ Wed Sep 14 01:26:28 2022 ] Batch(9/162) done. Loss: 0.0111 lr:0.001000 +[ Wed Sep 14 01:27:22 2022 ] Batch(109/162) done. Loss: 0.0800 lr:0.001000 +[ Wed Sep 14 01:27:50 2022 ] Eval epoch: 96 +[ Wed Sep 14 01:30:45 2022 ] Mean test loss of 930 batches: 3.182723045349121. +[ Wed Sep 14 01:30:45 2022 ] Top1: 55.66% +[ Wed Sep 14 01:30:46 2022 ] Top5: 82.06% +[ Wed Sep 14 01:30:46 2022 ] Training epoch: 97 +[ Wed Sep 14 01:31:14 2022 ] Batch(47/162) done. Loss: 0.0364 lr:0.001000 +[ Wed Sep 14 01:32:08 2022 ] Batch(147/162) done. Loss: 0.0555 lr:0.001000 +[ Wed Sep 14 01:32:16 2022 ] Eval epoch: 97 +[ Wed Sep 14 01:35:11 2022 ] Mean test loss of 930 batches: 3.2519757747650146. +[ Wed Sep 14 01:35:12 2022 ] Top1: 55.02% +[ Wed Sep 14 01:35:12 2022 ] Top5: 81.75% +[ Wed Sep 14 01:35:12 2022 ] Training epoch: 98 +[ Wed Sep 14 01:36:01 2022 ] Batch(85/162) done. Loss: 0.0209 lr:0.001000 +[ Wed Sep 14 01:36:42 2022 ] Eval epoch: 98 +[ Wed Sep 14 01:39:37 2022 ] Mean test loss of 930 batches: 3.2562928199768066. +[ Wed Sep 14 01:39:37 2022 ] Top1: 55.94% +[ Wed Sep 14 01:39:38 2022 ] Top5: 82.22% +[ Wed Sep 14 01:39:38 2022 ] Training epoch: 99 +[ Wed Sep 14 01:39:54 2022 ] Batch(23/162) done. Loss: 0.1453 lr:0.001000 +[ Wed Sep 14 01:40:47 2022 ] Batch(123/162) done. Loss: 0.0869 lr:0.001000 +[ Wed Sep 14 01:41:08 2022 ] Eval epoch: 99 +[ Wed Sep 14 01:44:03 2022 ] Mean test loss of 930 batches: 3.168196201324463. +[ Wed Sep 14 01:44:03 2022 ] Top1: 55.50% +[ Wed Sep 14 01:44:04 2022 ] Top5: 81.94% +[ Wed Sep 14 01:44:04 2022 ] Training epoch: 100 +[ Wed Sep 14 01:44:40 2022 ] Batch(61/162) done. Loss: 0.0889 lr:0.001000 +[ Wed Sep 14 01:45:33 2022 ] Batch(161/162) done. Loss: 0.0700 lr:0.001000 +[ Wed Sep 14 01:45:34 2022 ] Eval epoch: 100 +[ Wed Sep 14 01:48:28 2022 ] Mean test loss of 930 batches: 3.1338253021240234. +[ Wed Sep 14 01:48:29 2022 ] Top1: 55.56% +[ Wed Sep 14 01:48:29 2022 ] Top5: 82.05% diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_xset/config.yaml b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_xset/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..95eabebe3c8c9d15c3043908dc57294e74d9e9b4 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_xset/config.yaml @@ -0,0 +1,59 @@ +Experiment_name: ntu120_joint_xset +base_lr: 0.1 +batch_size: 64 +config: ./config/ntu120_xset/train_joint.yaml +device: +- 4 +- 5 +eval_interval: 5 +feeder: feeders.feeder.Feeder +groups: 8 +ignore_weights: [] +keep_rate: 0.9 +log_interval: 100 +model: model.decouple_gcn.Model +model_args: + block_size: 41 + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + groups: 16 + num_class: 120 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu120_joint_xset +nesterov: true +num_epoch: 100 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_data_joint.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_data_joint.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu120_joint_xset diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_xset/decouple_gcn.py b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_xset/decouple_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..6dcce4552ced280fe5b2060df92daebd2452cf7c --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_xset/decouple_gcn.py @@ -0,0 +1,235 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math +from model.dropSke import DropBlock_Ske +from model.dropT import DropBlockT_1d + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + + +def conv_branch_init(conv): + weight = conv.weight + n = weight.size(0) + k1 = weight.size(1) + k2 = weight.size(2) + nn.init.normal(weight, 0, math.sqrt(2. / (n * k1 * k2))) + nn.init.constant(conv.bias, 0) + + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class unit_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1, num_point=25, block_size=41): + super(unit_tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + self.dropS = DropBlock_Ske(num_point=num_point) + self.dropT = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob, A): + x = self.bn(self.conv(x)) + x = self.dropT(self.dropS(x, keep_prob, A), keep_prob) + return x + + +class unit_tcn_skip(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(unit_tcn_skip, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class unit_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, coff_embedding=4, num_subset=3): + super(unit_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.num_point = num_point + self.groups = groups + self.num_subset = num_subset + self.DecoupleA = nn.Parameter(torch.tensor(np.reshape(A.astype(np.float32), [ + 3, 1, num_point, num_point]), dtype=torch.float32, requires_grad=True).repeat(1, groups, 1, 1), requires_grad=True) + + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.bn0 = nn.BatchNorm2d(out_channels * num_subset) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + bn_init(self.bn, 1e-6) + + self.Linear_weight = nn.Parameter(torch.zeros( + in_channels, out_channels * num_subset, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0, math.sqrt( + 0.5 / (out_channels * num_subset))) + + self.Linear_bias = nn.Parameter(torch.zeros( + 1, out_channels * num_subset, 1, 1, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.constant(self.Linear_bias, 1e-6) + + eye_array = [] + for i in range(out_channels): + eye_array.append(torch.eye(num_point)) + self.eyes = nn.Parameter(torch.tensor(torch.stack( + eye_array), requires_grad=False, device='cuda'), requires_grad=False) # [c,25,25] + + def norm(self, A): + b, c, h, w = A.size() + A = A.view(c, self.num_point, self.num_point) + D_list = torch.sum(A, 1).view(c, 1, self.num_point) + D_list_12 = (D_list + 0.001)**(-1) + D_12 = self.eyes * D_list_12 + A = torch.bmm(A, D_12).view(b, c, h, w) + return A + + def forward(self, x0): + learn_A = self.DecoupleA.repeat( + 1, self.out_channels // self.groups, 1, 1) + norm_learn_A = torch.cat([self.norm(learn_A[0:1, ...]), self.norm( + learn_A[1:2, ...]), self.norm(learn_A[2:3, ...])], 0) + + x = torch.einsum( + 'nctw,cd->ndtw', (x0, self.Linear_weight)).contiguous() + x = x + self.Linear_bias + x = self.bn0(x) + + n, kc, t, v = x.size() + x = x.view(n, self.num_subset, kc // self.num_subset, t, v) + x = torch.einsum('nkctv,kcvw->nctw', (x, norm_learn_A)) + + x = self.bn(x) + x += self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, block_size, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = unit_gcn(in_channels, out_channels, A, groups, num_point) + self.tcn1 = unit_tcn(out_channels, out_channels, + stride=stride, num_point=num_point) + self.relu = nn.ReLU() + + self.A = nn.Parameter(torch.tensor(np.sum(np.reshape(A.astype(np.float32), [ + 3, num_point, num_point]), axis=0), dtype=torch.float32, requires_grad=False, device='cuda'), requires_grad=False) + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + + else: + self.residual = unit_tcn_skip( + in_channels, out_channels, kernel_size=1, stride=stride) + self.dropSke = DropBlock_Ske(num_point=num_point) + self.dropT_skip = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob): + x = self.tcn1(self.gcn1(x), keep_prob, self.A) + self.dropT_skip( + self.dropSke(self.residual(x), keep_prob, self.A), keep_prob) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, groups, num_point, + block_size, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l5 = TCN_GCN_unit( + 64, 128, A, groups, num_point, block_size, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l8 = TCN_GCN_unit(128, 256, A, groups, + num_point, block_size, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x, keep_prob=0.9): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute( + 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x, 1.0) + x = self.l2(x, 1.0) + x = self.l3(x, 1.0) + x = self.l4(x, 1.0) + x = self.l5(x, 1.0) + x = self.l6(x, 1.0) + x = self.l7(x, keep_prob) + x = self.l8(x, keep_prob) + x = self.l9(x, keep_prob) + x = self.l10(x, keep_prob) + + # N*M,C,T,V + c_new = x.size(1) + x = x.reshape(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_xset/eval_results/best_acc.pkl b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_xset/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..57342906731da287f0165c0d489d95e086e1c4fd --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_xset/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00b889d584186751762a5588f6ce7eb9e76bf9cee0dde92ea70cd0316a4f3d37 +size 34946665 diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_xset/log.txt b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_xset/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..72a517a4da463c29dc8fd429fb4a64347d021d92 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xset/ntu120_joint_xset/log.txt @@ -0,0 +1,665 @@ +[ Tue Sep 13 18:24:44 2022 ] Parameters: +{'work_dir': './work_dir/ntu120_joint_xset', 'model_saved_name': './save_models/ntu120_joint_xset', 'Experiment_name': 'ntu120_joint_xset', 'config': './config/ntu120_xset/train_joint.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_label.pkl'}, 'model': 'model.decouple_gcn.Model', 'model_args': {'num_class': 120, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'groups': 16, 'block_size': 41, 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80], 'device': [4, 5], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 100, 'weight_decay': 0.0001, 'keep_rate': 0.9, 'groups': 8, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Tue Sep 13 18:24:44 2022 ] Training epoch: 1 +[ Tue Sep 13 18:25:39 2022 ] Batch(99/162) done. Loss: 3.8180 lr:0.100000 +[ Tue Sep 13 18:26:07 2022 ] Eval epoch: 1 +[ Tue Sep 13 18:29:00 2022 ] Mean test loss of 930 batches: 5.057918071746826. +[ Tue Sep 13 18:29:00 2022 ] Top1: 5.31% +[ Tue Sep 13 18:29:01 2022 ] Top5: 19.98% +[ Tue Sep 13 18:29:01 2022 ] Training epoch: 2 +[ Tue Sep 13 18:29:24 2022 ] Batch(37/162) done. Loss: 3.4480 lr:0.100000 +[ Tue Sep 13 18:30:17 2022 ] Batch(137/162) done. Loss: 3.0256 lr:0.100000 +[ Tue Sep 13 18:30:29 2022 ] Eval epoch: 2 +[ Tue Sep 13 18:33:23 2022 ] Mean test loss of 930 batches: 4.520117282867432. +[ Tue Sep 13 18:33:23 2022 ] Top1: 12.39% +[ Tue Sep 13 18:33:24 2022 ] Top5: 32.08% +[ Tue Sep 13 18:33:24 2022 ] Training epoch: 3 +[ Tue Sep 13 18:34:07 2022 ] Batch(75/162) done. Loss: 2.6454 lr:0.100000 +[ Tue Sep 13 18:34:53 2022 ] Eval epoch: 3 +[ Tue Sep 13 18:37:45 2022 ] Mean test loss of 930 batches: 4.802359580993652. +[ Tue Sep 13 18:37:46 2022 ] Top1: 12.20% +[ Tue Sep 13 18:37:46 2022 ] Top5: 35.43% +[ Tue Sep 13 18:37:46 2022 ] Training epoch: 4 +[ Tue Sep 13 18:37:57 2022 ] Batch(13/162) done. Loss: 2.7732 lr:0.100000 +[ Tue Sep 13 18:38:49 2022 ] Batch(113/162) done. Loss: 2.5822 lr:0.100000 +[ Tue Sep 13 18:39:15 2022 ] Eval epoch: 4 +[ Tue Sep 13 18:42:07 2022 ] Mean test loss of 930 batches: 4.510881423950195. +[ Tue Sep 13 18:42:08 2022 ] Top1: 15.24% +[ Tue Sep 13 18:42:08 2022 ] Top5: 40.40% +[ Tue Sep 13 18:42:08 2022 ] Training epoch: 5 +[ Tue Sep 13 18:42:39 2022 ] Batch(51/162) done. Loss: 1.9599 lr:0.100000 +[ Tue Sep 13 18:43:31 2022 ] Batch(151/162) done. Loss: 2.2079 lr:0.100000 +[ Tue Sep 13 18:43:37 2022 ] Eval epoch: 5 +[ Tue Sep 13 18:46:30 2022 ] Mean test loss of 930 batches: 3.514641523361206. +[ Tue Sep 13 18:46:30 2022 ] Top1: 21.95% +[ Tue Sep 13 18:46:30 2022 ] Top5: 45.98% +[ Tue Sep 13 18:46:31 2022 ] Training epoch: 6 +[ Tue Sep 13 18:47:21 2022 ] Batch(89/162) done. Loss: 1.8766 lr:0.100000 +[ Tue Sep 13 18:47:59 2022 ] Eval epoch: 6 +[ Tue Sep 13 18:50:53 2022 ] Mean test loss of 930 batches: 3.3838963508605957. +[ Tue Sep 13 18:50:54 2022 ] Top1: 24.29% +[ Tue Sep 13 18:50:54 2022 ] Top5: 51.41% +[ Tue Sep 13 18:50:54 2022 ] Training epoch: 7 +[ Tue Sep 13 18:51:12 2022 ] Batch(27/162) done. Loss: 1.7406 lr:0.100000 +[ Tue Sep 13 18:52:05 2022 ] Batch(127/162) done. Loss: 1.5248 lr:0.100000 +[ Tue Sep 13 18:52:23 2022 ] Eval epoch: 7 +[ Tue Sep 13 18:55:16 2022 ] Mean test loss of 930 batches: 3.594515562057495. +[ Tue Sep 13 18:55:16 2022 ] Top1: 22.54% +[ Tue Sep 13 18:55:17 2022 ] Top5: 49.68% +[ Tue Sep 13 18:55:17 2022 ] Training epoch: 8 +[ Tue Sep 13 18:55:55 2022 ] Batch(65/162) done. Loss: 1.9312 lr:0.100000 +[ Tue Sep 13 18:56:46 2022 ] Eval epoch: 8 +[ Tue Sep 13 18:59:39 2022 ] Mean test loss of 930 batches: 3.6537744998931885. +[ Tue Sep 13 18:59:40 2022 ] Top1: 23.86% +[ Tue Sep 13 18:59:40 2022 ] Top5: 51.98% +[ Tue Sep 13 18:59:41 2022 ] Training epoch: 9 +[ Tue Sep 13 18:59:46 2022 ] Batch(3/162) done. Loss: 1.4093 lr:0.100000 +[ Tue Sep 13 19:00:38 2022 ] Batch(103/162) done. Loss: 2.0028 lr:0.100000 +[ Tue Sep 13 19:01:09 2022 ] Eval epoch: 9 +[ Tue Sep 13 19:04:03 2022 ] Mean test loss of 930 batches: 3.2135202884674072. +[ Tue Sep 13 19:04:03 2022 ] Top1: 27.69% +[ Tue Sep 13 19:04:04 2022 ] Top5: 57.09% +[ Tue Sep 13 19:04:04 2022 ] Training epoch: 10 +[ Tue Sep 13 19:04:29 2022 ] Batch(41/162) done. Loss: 1.3819 lr:0.100000 +[ Tue Sep 13 19:05:22 2022 ] Batch(141/162) done. Loss: 1.3964 lr:0.100000 +[ Tue Sep 13 19:05:32 2022 ] Eval epoch: 10 +[ Tue Sep 13 19:08:26 2022 ] Mean test loss of 930 batches: 3.041006088256836. +[ Tue Sep 13 19:08:26 2022 ] Top1: 32.01% +[ Tue Sep 13 19:08:27 2022 ] Top5: 60.68% +[ Tue Sep 13 19:08:27 2022 ] Training epoch: 11 +[ Tue Sep 13 19:09:12 2022 ] Batch(79/162) done. Loss: 1.6535 lr:0.100000 +[ Tue Sep 13 19:09:55 2022 ] Eval epoch: 11 +[ Tue Sep 13 19:12:48 2022 ] Mean test loss of 930 batches: 2.9078786373138428. +[ Tue Sep 13 19:12:48 2022 ] Top1: 31.72% +[ Tue Sep 13 19:12:49 2022 ] Top5: 63.92% +[ Tue Sep 13 19:12:49 2022 ] Training epoch: 12 +[ Tue Sep 13 19:13:01 2022 ] Batch(17/162) done. Loss: 1.0866 lr:0.100000 +[ Tue Sep 13 19:13:54 2022 ] Batch(117/162) done. Loss: 1.6844 lr:0.100000 +[ Tue Sep 13 19:14:17 2022 ] Eval epoch: 12 +[ Tue Sep 13 19:17:10 2022 ] Mean test loss of 930 batches: 2.8232834339141846. +[ Tue Sep 13 19:17:11 2022 ] Top1: 34.05% +[ Tue Sep 13 19:17:11 2022 ] Top5: 65.62% +[ Tue Sep 13 19:17:12 2022 ] Training epoch: 13 +[ Tue Sep 13 19:17:44 2022 ] Batch(55/162) done. Loss: 1.5754 lr:0.100000 +[ Tue Sep 13 19:18:36 2022 ] Batch(155/162) done. Loss: 1.3233 lr:0.100000 +[ Tue Sep 13 19:18:40 2022 ] Eval epoch: 13 +[ Tue Sep 13 19:21:33 2022 ] Mean test loss of 930 batches: 2.576730966567993. +[ Tue Sep 13 19:21:34 2022 ] Top1: 37.25% +[ Tue Sep 13 19:21:34 2022 ] Top5: 67.63% +[ Tue Sep 13 19:21:34 2022 ] Training epoch: 14 +[ Tue Sep 13 19:22:27 2022 ] Batch(93/162) done. Loss: 0.9363 lr:0.100000 +[ Tue Sep 13 19:23:03 2022 ] Eval epoch: 14 +[ Tue Sep 13 19:25:56 2022 ] Mean test loss of 930 batches: 2.4898667335510254. +[ Tue Sep 13 19:25:57 2022 ] Top1: 39.04% +[ Tue Sep 13 19:25:57 2022 ] Top5: 71.56% +[ Tue Sep 13 19:25:57 2022 ] Training epoch: 15 +[ Tue Sep 13 19:26:17 2022 ] Batch(31/162) done. Loss: 1.2431 lr:0.100000 +[ Tue Sep 13 19:27:10 2022 ] Batch(131/162) done. Loss: 1.2638 lr:0.100000 +[ Tue Sep 13 19:27:26 2022 ] Eval epoch: 15 +[ Tue Sep 13 19:30:19 2022 ] Mean test loss of 930 batches: 8.205449104309082. +[ Tue Sep 13 19:30:19 2022 ] Top1: 15.85% +[ Tue Sep 13 19:30:20 2022 ] Top5: 45.73% +[ Tue Sep 13 19:30:20 2022 ] Training epoch: 16 +[ Tue Sep 13 19:31:00 2022 ] Batch(69/162) done. Loss: 0.6821 lr:0.100000 +[ Tue Sep 13 19:31:48 2022 ] Eval epoch: 16 +[ Tue Sep 13 19:34:42 2022 ] Mean test loss of 930 batches: 2.558546781539917. +[ Tue Sep 13 19:34:43 2022 ] Top1: 37.41% +[ Tue Sep 13 19:34:43 2022 ] Top5: 69.58% +[ Tue Sep 13 19:34:44 2022 ] Training epoch: 17 +[ Tue Sep 13 19:34:51 2022 ] Batch(7/162) done. Loss: 0.5426 lr:0.100000 +[ Tue Sep 13 19:35:43 2022 ] Batch(107/162) done. Loss: 0.9380 lr:0.100000 +[ Tue Sep 13 19:36:12 2022 ] Eval epoch: 17 +[ Tue Sep 13 19:39:05 2022 ] Mean test loss of 930 batches: 2.8448023796081543. +[ Tue Sep 13 19:39:05 2022 ] Top1: 38.49% +[ Tue Sep 13 19:39:06 2022 ] Top5: 71.85% +[ Tue Sep 13 19:39:06 2022 ] Training epoch: 18 +[ Tue Sep 13 19:39:33 2022 ] Batch(45/162) done. Loss: 0.8881 lr:0.100000 +[ Tue Sep 13 19:40:26 2022 ] Batch(145/162) done. Loss: 1.2085 lr:0.100000 +[ Tue Sep 13 19:40:35 2022 ] Eval epoch: 18 +[ Tue Sep 13 19:43:28 2022 ] Mean test loss of 930 batches: 2.3035261631011963. +[ Tue Sep 13 19:43:29 2022 ] Top1: 44.94% +[ Tue Sep 13 19:43:30 2022 ] Top5: 76.12% +[ Tue Sep 13 19:43:30 2022 ] Training epoch: 19 +[ Tue Sep 13 19:44:17 2022 ] Batch(83/162) done. Loss: 1.0136 lr:0.100000 +[ Tue Sep 13 19:44:58 2022 ] Eval epoch: 19 +[ Tue Sep 13 19:47:51 2022 ] Mean test loss of 930 batches: 2.334582805633545. +[ Tue Sep 13 19:47:51 2022 ] Top1: 43.35% +[ Tue Sep 13 19:47:52 2022 ] Top5: 75.37% +[ Tue Sep 13 19:47:52 2022 ] Training epoch: 20 +[ Tue Sep 13 19:48:07 2022 ] Batch(21/162) done. Loss: 0.6658 lr:0.100000 +[ Tue Sep 13 19:48:59 2022 ] Batch(121/162) done. Loss: 1.1169 lr:0.100000 +[ Tue Sep 13 19:49:21 2022 ] Eval epoch: 20 +[ Tue Sep 13 19:52:14 2022 ] Mean test loss of 930 batches: 2.371380567550659. +[ Tue Sep 13 19:52:15 2022 ] Top1: 43.80% +[ Tue Sep 13 19:52:16 2022 ] Top5: 74.84% +[ Tue Sep 13 19:52:16 2022 ] Training epoch: 21 +[ Tue Sep 13 19:52:51 2022 ] Batch(59/162) done. Loss: 0.8850 lr:0.100000 +[ Tue Sep 13 19:53:44 2022 ] Batch(159/162) done. Loss: 1.1289 lr:0.100000 +[ Tue Sep 13 19:53:45 2022 ] Eval epoch: 21 +[ Tue Sep 13 19:56:39 2022 ] Mean test loss of 930 batches: 4.497220039367676. +[ Tue Sep 13 19:56:39 2022 ] Top1: 32.04% +[ Tue Sep 13 19:56:40 2022 ] Top5: 64.57% +[ Tue Sep 13 19:56:40 2022 ] Training epoch: 22 +[ Tue Sep 13 19:57:35 2022 ] Batch(97/162) done. Loss: 1.0195 lr:0.100000 +[ Tue Sep 13 19:58:09 2022 ] Eval epoch: 22 +[ Tue Sep 13 20:01:02 2022 ] Mean test loss of 930 batches: 2.406083345413208. +[ Tue Sep 13 20:01:03 2022 ] Top1: 45.50% +[ Tue Sep 13 20:01:04 2022 ] Top5: 76.21% +[ Tue Sep 13 20:01:04 2022 ] Training epoch: 23 +[ Tue Sep 13 20:01:26 2022 ] Batch(35/162) done. Loss: 0.7738 lr:0.100000 +[ Tue Sep 13 20:02:19 2022 ] Batch(135/162) done. Loss: 0.8067 lr:0.100000 +[ Tue Sep 13 20:02:33 2022 ] Eval epoch: 23 +[ Tue Sep 13 20:05:27 2022 ] Mean test loss of 930 batches: 3.3997855186462402. +[ Tue Sep 13 20:05:28 2022 ] Top1: 40.54% +[ Tue Sep 13 20:05:28 2022 ] Top5: 71.48% +[ Tue Sep 13 20:05:29 2022 ] Training epoch: 24 +[ Tue Sep 13 20:06:10 2022 ] Batch(73/162) done. Loss: 0.7124 lr:0.100000 +[ Tue Sep 13 20:06:57 2022 ] Eval epoch: 24 +[ Tue Sep 13 20:09:51 2022 ] Mean test loss of 930 batches: 2.1338369846343994. +[ Tue Sep 13 20:09:51 2022 ] Top1: 48.53% +[ Tue Sep 13 20:09:52 2022 ] Top5: 78.32% +[ Tue Sep 13 20:09:52 2022 ] Training epoch: 25 +[ Tue Sep 13 20:10:02 2022 ] Batch(11/162) done. Loss: 0.7499 lr:0.100000 +[ Tue Sep 13 20:10:54 2022 ] Batch(111/162) done. Loss: 0.6216 lr:0.100000 +[ Tue Sep 13 20:11:21 2022 ] Eval epoch: 25 +[ Tue Sep 13 20:14:14 2022 ] Mean test loss of 930 batches: 3.167142152786255. +[ Tue Sep 13 20:14:15 2022 ] Top1: 41.00% +[ Tue Sep 13 20:14:16 2022 ] Top5: 73.00% +[ Tue Sep 13 20:14:16 2022 ] Training epoch: 26 +[ Tue Sep 13 20:14:45 2022 ] Batch(49/162) done. Loss: 0.5860 lr:0.100000 +[ Tue Sep 13 20:15:38 2022 ] Batch(149/162) done. Loss: 0.5114 lr:0.100000 +[ Tue Sep 13 20:15:44 2022 ] Eval epoch: 26 +[ Tue Sep 13 20:18:38 2022 ] Mean test loss of 930 batches: 2.810288667678833. +[ Tue Sep 13 20:18:39 2022 ] Top1: 44.65% +[ Tue Sep 13 20:18:39 2022 ] Top5: 75.26% +[ Tue Sep 13 20:18:40 2022 ] Training epoch: 27 +[ Tue Sep 13 20:19:29 2022 ] Batch(87/162) done. Loss: 1.0034 lr:0.100000 +[ Tue Sep 13 20:20:08 2022 ] Eval epoch: 27 +[ Tue Sep 13 20:23:01 2022 ] Mean test loss of 930 batches: 2.8310000896453857. +[ Tue Sep 13 20:23:02 2022 ] Top1: 44.29% +[ Tue Sep 13 20:23:03 2022 ] Top5: 75.71% +[ Tue Sep 13 20:23:03 2022 ] Training epoch: 28 +[ Tue Sep 13 20:23:19 2022 ] Batch(25/162) done. Loss: 0.5400 lr:0.100000 +[ Tue Sep 13 20:24:12 2022 ] Batch(125/162) done. Loss: 0.6086 lr:0.100000 +[ Tue Sep 13 20:24:31 2022 ] Eval epoch: 28 +[ Tue Sep 13 20:27:25 2022 ] Mean test loss of 930 batches: 2.7002975940704346. +[ Tue Sep 13 20:27:26 2022 ] Top1: 43.56% +[ Tue Sep 13 20:27:26 2022 ] Top5: 75.30% +[ Tue Sep 13 20:27:26 2022 ] Training epoch: 29 +[ Tue Sep 13 20:28:03 2022 ] Batch(63/162) done. Loss: 0.5889 lr:0.100000 +[ Tue Sep 13 20:28:55 2022 ] Eval epoch: 29 +[ Tue Sep 13 20:31:49 2022 ] Mean test loss of 930 batches: 2.3055806159973145. +[ Tue Sep 13 20:31:50 2022 ] Top1: 49.78% +[ Tue Sep 13 20:31:50 2022 ] Top5: 79.30% +[ Tue Sep 13 20:31:50 2022 ] Training epoch: 30 +[ Tue Sep 13 20:31:54 2022 ] Batch(1/162) done. Loss: 0.5570 lr:0.100000 +[ Tue Sep 13 20:32:47 2022 ] Batch(101/162) done. Loss: 0.4686 lr:0.100000 +[ Tue Sep 13 20:33:19 2022 ] Eval epoch: 30 +[ Tue Sep 13 20:36:11 2022 ] Mean test loss of 930 batches: 2.272409200668335. +[ Tue Sep 13 20:36:12 2022 ] Top1: 49.19% +[ Tue Sep 13 20:36:12 2022 ] Top5: 79.82% +[ Tue Sep 13 20:36:12 2022 ] Training epoch: 31 +[ Tue Sep 13 20:36:36 2022 ] Batch(39/162) done. Loss: 0.4463 lr:0.100000 +[ Tue Sep 13 20:37:29 2022 ] Batch(139/162) done. Loss: 0.4721 lr:0.100000 +[ Tue Sep 13 20:37:41 2022 ] Eval epoch: 31 +[ Tue Sep 13 20:40:34 2022 ] Mean test loss of 930 batches: 2.351616144180298. +[ Tue Sep 13 20:40:35 2022 ] Top1: 48.35% +[ Tue Sep 13 20:40:35 2022 ] Top5: 79.29% +[ Tue Sep 13 20:40:35 2022 ] Training epoch: 32 +[ Tue Sep 13 20:41:20 2022 ] Batch(77/162) done. Loss: 0.4002 lr:0.100000 +[ Tue Sep 13 20:42:04 2022 ] Eval epoch: 32 +[ Tue Sep 13 20:44:57 2022 ] Mean test loss of 930 batches: 2.726222276687622. +[ Tue Sep 13 20:44:58 2022 ] Top1: 46.53% +[ Tue Sep 13 20:44:58 2022 ] Top5: 78.03% +[ Tue Sep 13 20:44:59 2022 ] Training epoch: 33 +[ Tue Sep 13 20:45:10 2022 ] Batch(15/162) done. Loss: 0.4803 lr:0.100000 +[ Tue Sep 13 20:46:03 2022 ] Batch(115/162) done. Loss: 0.8060 lr:0.100000 +[ Tue Sep 13 20:46:27 2022 ] Eval epoch: 33 +[ Tue Sep 13 20:49:21 2022 ] Mean test loss of 930 batches: 2.3054656982421875. +[ Tue Sep 13 20:49:21 2022 ] Top1: 48.85% +[ Tue Sep 13 20:49:22 2022 ] Top5: 79.23% +[ Tue Sep 13 20:49:22 2022 ] Training epoch: 34 +[ Tue Sep 13 20:49:54 2022 ] Batch(53/162) done. Loss: 0.3195 lr:0.100000 +[ Tue Sep 13 20:50:46 2022 ] Batch(153/162) done. Loss: 0.5020 lr:0.100000 +[ Tue Sep 13 20:50:51 2022 ] Eval epoch: 34 +[ Tue Sep 13 20:53:45 2022 ] Mean test loss of 930 batches: 2.202313184738159. +[ Tue Sep 13 20:53:45 2022 ] Top1: 51.52% +[ Tue Sep 13 20:53:46 2022 ] Top5: 81.74% +[ Tue Sep 13 20:53:46 2022 ] Training epoch: 35 +[ Tue Sep 13 20:54:38 2022 ] Batch(91/162) done. Loss: 0.3751 lr:0.100000 +[ Tue Sep 13 20:55:15 2022 ] Eval epoch: 35 +[ Tue Sep 13 20:58:08 2022 ] Mean test loss of 930 batches: 2.8869104385375977. +[ Tue Sep 13 20:58:08 2022 ] Top1: 41.64% +[ Tue Sep 13 20:58:09 2022 ] Top5: 73.56% +[ Tue Sep 13 20:58:09 2022 ] Training epoch: 36 +[ Tue Sep 13 20:58:28 2022 ] Batch(29/162) done. Loss: 0.4301 lr:0.100000 +[ Tue Sep 13 20:59:20 2022 ] Batch(129/162) done. Loss: 0.5671 lr:0.100000 +[ Tue Sep 13 20:59:37 2022 ] Eval epoch: 36 +[ Tue Sep 13 21:02:31 2022 ] Mean test loss of 930 batches: 2.3880438804626465. +[ Tue Sep 13 21:02:31 2022 ] Top1: 49.24% +[ Tue Sep 13 21:02:32 2022 ] Top5: 79.65% +[ Tue Sep 13 21:02:32 2022 ] Training epoch: 37 +[ Tue Sep 13 21:03:11 2022 ] Batch(67/162) done. Loss: 0.5165 lr:0.100000 +[ Tue Sep 13 21:04:00 2022 ] Eval epoch: 37 +[ Tue Sep 13 21:06:54 2022 ] Mean test loss of 930 batches: 3.3565597534179688. +[ Tue Sep 13 21:06:54 2022 ] Top1: 41.14% +[ Tue Sep 13 21:06:55 2022 ] Top5: 73.15% +[ Tue Sep 13 21:06:55 2022 ] Training epoch: 38 +[ Tue Sep 13 21:07:01 2022 ] Batch(5/162) done. Loss: 0.1957 lr:0.100000 +[ Tue Sep 13 21:07:54 2022 ] Batch(105/162) done. Loss: 0.4742 lr:0.100000 +[ Tue Sep 13 21:08:24 2022 ] Eval epoch: 38 +[ Tue Sep 13 21:11:17 2022 ] Mean test loss of 930 batches: 2.8878660202026367. +[ Tue Sep 13 21:11:18 2022 ] Top1: 46.58% +[ Tue Sep 13 21:11:19 2022 ] Top5: 77.30% +[ Tue Sep 13 21:11:19 2022 ] Training epoch: 39 +[ Tue Sep 13 21:11:45 2022 ] Batch(43/162) done. Loss: 0.4849 lr:0.100000 +[ Tue Sep 13 21:12:37 2022 ] Batch(143/162) done. Loss: 0.3489 lr:0.100000 +[ Tue Sep 13 21:12:47 2022 ] Eval epoch: 39 +[ Tue Sep 13 21:15:41 2022 ] Mean test loss of 930 batches: 2.192498207092285. +[ Tue Sep 13 21:15:41 2022 ] Top1: 52.32% +[ Tue Sep 13 21:15:42 2022 ] Top5: 81.30% +[ Tue Sep 13 21:15:42 2022 ] Training epoch: 40 +[ Tue Sep 13 21:16:28 2022 ] Batch(81/162) done. Loss: 0.3842 lr:0.100000 +[ Tue Sep 13 21:17:11 2022 ] Eval epoch: 40 +[ Tue Sep 13 21:20:04 2022 ] Mean test loss of 930 batches: 2.615729331970215. +[ Tue Sep 13 21:20:04 2022 ] Top1: 46.91% +[ Tue Sep 13 21:20:05 2022 ] Top5: 78.14% +[ Tue Sep 13 21:20:05 2022 ] Training epoch: 41 +[ Tue Sep 13 21:20:19 2022 ] Batch(19/162) done. Loss: 0.1957 lr:0.100000 +[ Tue Sep 13 21:21:12 2022 ] Batch(119/162) done. Loss: 0.4244 lr:0.100000 +[ Tue Sep 13 21:21:34 2022 ] Eval epoch: 41 +[ Tue Sep 13 21:24:28 2022 ] Mean test loss of 930 batches: 2.267261505126953. +[ Tue Sep 13 21:24:28 2022 ] Top1: 52.96% +[ Tue Sep 13 21:24:29 2022 ] Top5: 81.85% +[ Tue Sep 13 21:24:29 2022 ] Training epoch: 42 +[ Tue Sep 13 21:25:03 2022 ] Batch(57/162) done. Loss: 0.2303 lr:0.100000 +[ Tue Sep 13 21:25:56 2022 ] Batch(157/162) done. Loss: 0.4598 lr:0.100000 +[ Tue Sep 13 21:25:58 2022 ] Eval epoch: 42 +[ Tue Sep 13 21:28:51 2022 ] Mean test loss of 930 batches: 2.647237777709961. +[ Tue Sep 13 21:28:51 2022 ] Top1: 47.21% +[ Tue Sep 13 21:28:52 2022 ] Top5: 77.54% +[ Tue Sep 13 21:28:53 2022 ] Training epoch: 43 +[ Tue Sep 13 21:29:46 2022 ] Batch(95/162) done. Loss: 0.7358 lr:0.100000 +[ Tue Sep 13 21:30:21 2022 ] Eval epoch: 43 +[ Tue Sep 13 21:33:14 2022 ] Mean test loss of 930 batches: 2.5410149097442627. +[ Tue Sep 13 21:33:14 2022 ] Top1: 51.14% +[ Tue Sep 13 21:33:15 2022 ] Top5: 81.06% +[ Tue Sep 13 21:33:15 2022 ] Training epoch: 44 +[ Tue Sep 13 21:33:36 2022 ] Batch(33/162) done. Loss: 0.3927 lr:0.100000 +[ Tue Sep 13 21:34:28 2022 ] Batch(133/162) done. Loss: 0.4016 lr:0.100000 +[ Tue Sep 13 21:34:43 2022 ] Eval epoch: 44 +[ Tue Sep 13 21:37:36 2022 ] Mean test loss of 930 batches: 2.4503626823425293. +[ Tue Sep 13 21:37:37 2022 ] Top1: 51.08% +[ Tue Sep 13 21:37:37 2022 ] Top5: 80.13% +[ Tue Sep 13 21:37:37 2022 ] Training epoch: 45 +[ Tue Sep 13 21:38:18 2022 ] Batch(71/162) done. Loss: 0.5829 lr:0.100000 +[ Tue Sep 13 21:39:06 2022 ] Eval epoch: 45 +[ Tue Sep 13 21:41:59 2022 ] Mean test loss of 930 batches: 2.5172011852264404. +[ Tue Sep 13 21:41:59 2022 ] Top1: 51.35% +[ Tue Sep 13 21:42:00 2022 ] Top5: 81.10% +[ Tue Sep 13 21:42:00 2022 ] Training epoch: 46 +[ Tue Sep 13 21:42:08 2022 ] Batch(9/162) done. Loss: 0.3420 lr:0.100000 +[ Tue Sep 13 21:43:01 2022 ] Batch(109/162) done. Loss: 0.4147 lr:0.100000 +[ Tue Sep 13 21:43:29 2022 ] Eval epoch: 46 +[ Tue Sep 13 21:46:21 2022 ] Mean test loss of 930 batches: 2.650909900665283. +[ Tue Sep 13 21:46:22 2022 ] Top1: 50.69% +[ Tue Sep 13 21:46:23 2022 ] Top5: 79.20% +[ Tue Sep 13 21:46:23 2022 ] Training epoch: 47 +[ Tue Sep 13 21:46:51 2022 ] Batch(47/162) done. Loss: 0.5927 lr:0.100000 +[ Tue Sep 13 21:47:44 2022 ] Batch(147/162) done. Loss: 0.5132 lr:0.100000 +[ Tue Sep 13 21:47:51 2022 ] Eval epoch: 47 +[ Tue Sep 13 21:50:45 2022 ] Mean test loss of 930 batches: 2.458667039871216. +[ Tue Sep 13 21:50:46 2022 ] Top1: 53.88% +[ Tue Sep 13 21:50:47 2022 ] Top5: 81.86% +[ Tue Sep 13 21:50:47 2022 ] Training epoch: 48 +[ Tue Sep 13 21:51:35 2022 ] Batch(85/162) done. Loss: 0.3732 lr:0.100000 +[ Tue Sep 13 21:52:15 2022 ] Eval epoch: 48 +[ Tue Sep 13 21:55:09 2022 ] Mean test loss of 930 batches: 2.575839042663574. +[ Tue Sep 13 21:55:10 2022 ] Top1: 52.06% +[ Tue Sep 13 21:55:10 2022 ] Top5: 79.94% +[ Tue Sep 13 21:55:10 2022 ] Training epoch: 49 +[ Tue Sep 13 21:55:26 2022 ] Batch(23/162) done. Loss: 0.4156 lr:0.100000 +[ Tue Sep 13 21:56:19 2022 ] Batch(123/162) done. Loss: 0.4479 lr:0.100000 +[ Tue Sep 13 21:56:39 2022 ] Eval epoch: 49 +[ Tue Sep 13 21:59:32 2022 ] Mean test loss of 930 batches: 2.6112258434295654. +[ Tue Sep 13 21:59:32 2022 ] Top1: 50.85% +[ Tue Sep 13 21:59:33 2022 ] Top5: 78.85% +[ Tue Sep 13 21:59:33 2022 ] Training epoch: 50 +[ Tue Sep 13 22:00:09 2022 ] Batch(61/162) done. Loss: 0.3971 lr:0.100000 +[ Tue Sep 13 22:01:01 2022 ] Batch(161/162) done. Loss: 0.3934 lr:0.100000 +[ Tue Sep 13 22:01:02 2022 ] Eval epoch: 50 +[ Tue Sep 13 22:03:55 2022 ] Mean test loss of 930 batches: 2.7453091144561768. +[ Tue Sep 13 22:03:56 2022 ] Top1: 48.41% +[ Tue Sep 13 22:03:56 2022 ] Top5: 77.79% +[ Tue Sep 13 22:03:57 2022 ] Training epoch: 51 +[ Tue Sep 13 22:04:52 2022 ] Batch(99/162) done. Loss: 0.4055 lr:0.100000 +[ Tue Sep 13 22:05:25 2022 ] Eval epoch: 51 +[ Tue Sep 13 22:08:18 2022 ] Mean test loss of 930 batches: 2.782780408859253. +[ Tue Sep 13 22:08:18 2022 ] Top1: 47.93% +[ Tue Sep 13 22:08:19 2022 ] Top5: 78.11% +[ Tue Sep 13 22:08:19 2022 ] Training epoch: 52 +[ Tue Sep 13 22:08:42 2022 ] Batch(37/162) done. Loss: 0.2562 lr:0.100000 +[ Tue Sep 13 22:09:35 2022 ] Batch(137/162) done. Loss: 0.2431 lr:0.100000 +[ Tue Sep 13 22:09:48 2022 ] Eval epoch: 52 +[ Tue Sep 13 22:12:41 2022 ] Mean test loss of 930 batches: 4.7305097579956055. +[ Tue Sep 13 22:12:41 2022 ] Top1: 37.37% +[ Tue Sep 13 22:12:42 2022 ] Top5: 64.51% +[ Tue Sep 13 22:12:42 2022 ] Training epoch: 53 +[ Tue Sep 13 22:13:25 2022 ] Batch(75/162) done. Loss: 0.2918 lr:0.100000 +[ Tue Sep 13 22:14:10 2022 ] Eval epoch: 53 +[ Tue Sep 13 22:17:03 2022 ] Mean test loss of 930 batches: 2.7816007137298584. +[ Tue Sep 13 22:17:04 2022 ] Top1: 51.75% +[ Tue Sep 13 22:17:05 2022 ] Top5: 79.28% +[ Tue Sep 13 22:17:05 2022 ] Training epoch: 54 +[ Tue Sep 13 22:17:15 2022 ] Batch(13/162) done. Loss: 0.1914 lr:0.100000 +[ Tue Sep 13 22:18:08 2022 ] Batch(113/162) done. Loss: 0.4339 lr:0.100000 +[ Tue Sep 13 22:18:33 2022 ] Eval epoch: 54 +[ Tue Sep 13 22:21:26 2022 ] Mean test loss of 930 batches: 2.4547810554504395. +[ Tue Sep 13 22:21:26 2022 ] Top1: 53.56% +[ Tue Sep 13 22:21:27 2022 ] Top5: 80.69% +[ Tue Sep 13 22:21:27 2022 ] Training epoch: 55 +[ Tue Sep 13 22:21:57 2022 ] Batch(51/162) done. Loss: 0.4438 lr:0.100000 +[ Tue Sep 13 22:22:49 2022 ] Batch(151/162) done. Loss: 0.1667 lr:0.100000 +[ Tue Sep 13 22:22:55 2022 ] Eval epoch: 55 +[ Tue Sep 13 22:25:48 2022 ] Mean test loss of 930 batches: 2.4871363639831543. +[ Tue Sep 13 22:25:48 2022 ] Top1: 52.28% +[ Tue Sep 13 22:25:49 2022 ] Top5: 80.72% +[ Tue Sep 13 22:25:49 2022 ] Training epoch: 56 +[ Tue Sep 13 22:26:39 2022 ] Batch(89/162) done. Loss: 0.4151 lr:0.100000 +[ Tue Sep 13 22:27:18 2022 ] Eval epoch: 56 +[ Tue Sep 13 22:30:11 2022 ] Mean test loss of 930 batches: 2.8810834884643555. +[ Tue Sep 13 22:30:11 2022 ] Top1: 51.17% +[ Tue Sep 13 22:30:12 2022 ] Top5: 79.63% +[ Tue Sep 13 22:30:12 2022 ] Training epoch: 57 +[ Tue Sep 13 22:30:30 2022 ] Batch(27/162) done. Loss: 0.1794 lr:0.100000 +[ Tue Sep 13 22:31:22 2022 ] Batch(127/162) done. Loss: 0.7188 lr:0.100000 +[ Tue Sep 13 22:31:40 2022 ] Eval epoch: 57 +[ Tue Sep 13 22:34:33 2022 ] Mean test loss of 930 batches: 3.043966293334961. +[ Tue Sep 13 22:34:34 2022 ] Top1: 46.67% +[ Tue Sep 13 22:34:34 2022 ] Top5: 76.08% +[ Tue Sep 13 22:34:34 2022 ] Training epoch: 58 +[ Tue Sep 13 22:35:12 2022 ] Batch(65/162) done. Loss: 0.2419 lr:0.100000 +[ Tue Sep 13 22:36:03 2022 ] Eval epoch: 58 +[ Tue Sep 13 22:38:56 2022 ] Mean test loss of 930 batches: 2.534623861312866. +[ Tue Sep 13 22:38:56 2022 ] Top1: 51.77% +[ Tue Sep 13 22:38:57 2022 ] Top5: 80.38% +[ Tue Sep 13 22:38:57 2022 ] Training epoch: 59 +[ Tue Sep 13 22:39:02 2022 ] Batch(3/162) done. Loss: 0.2717 lr:0.100000 +[ Tue Sep 13 22:39:55 2022 ] Batch(103/162) done. Loss: 0.4790 lr:0.100000 +[ Tue Sep 13 22:40:25 2022 ] Eval epoch: 59 +[ Tue Sep 13 22:43:18 2022 ] Mean test loss of 930 batches: 2.920837163925171. +[ Tue Sep 13 22:43:18 2022 ] Top1: 53.35% +[ Tue Sep 13 22:43:19 2022 ] Top5: 81.02% +[ Tue Sep 13 22:43:19 2022 ] Training epoch: 60 +[ Tue Sep 13 22:43:44 2022 ] Batch(41/162) done. Loss: 0.1751 lr:0.100000 +[ Tue Sep 13 22:44:37 2022 ] Batch(141/162) done. Loss: 0.4344 lr:0.100000 +[ Tue Sep 13 22:44:48 2022 ] Eval epoch: 60 +[ Tue Sep 13 22:47:40 2022 ] Mean test loss of 930 batches: 2.9989264011383057. +[ Tue Sep 13 22:47:41 2022 ] Top1: 48.94% +[ Tue Sep 13 22:47:41 2022 ] Top5: 76.92% +[ Tue Sep 13 22:47:41 2022 ] Training epoch: 61 +[ Tue Sep 13 22:48:26 2022 ] Batch(79/162) done. Loss: 0.2523 lr:0.010000 +[ Tue Sep 13 22:49:10 2022 ] Eval epoch: 61 +[ Tue Sep 13 22:52:03 2022 ] Mean test loss of 930 batches: 2.2626051902770996. +[ Tue Sep 13 22:52:04 2022 ] Top1: 58.74% +[ Tue Sep 13 22:52:04 2022 ] Top5: 84.23% +[ Tue Sep 13 22:52:05 2022 ] Training epoch: 62 +[ Tue Sep 13 22:52:17 2022 ] Batch(17/162) done. Loss: 0.0895 lr:0.010000 +[ Tue Sep 13 22:53:10 2022 ] Batch(117/162) done. Loss: 0.1551 lr:0.010000 +[ Tue Sep 13 22:53:33 2022 ] Eval epoch: 62 +[ Tue Sep 13 22:56:27 2022 ] Mean test loss of 930 batches: 2.2402141094207764. +[ Tue Sep 13 22:56:27 2022 ] Top1: 59.54% +[ Tue Sep 13 22:56:28 2022 ] Top5: 84.73% +[ Tue Sep 13 22:56:28 2022 ] Training epoch: 63 +[ Tue Sep 13 22:57:00 2022 ] Batch(55/162) done. Loss: 0.0543 lr:0.010000 +[ Tue Sep 13 22:57:53 2022 ] Batch(155/162) done. Loss: 0.0686 lr:0.010000 +[ Tue Sep 13 22:57:56 2022 ] Eval epoch: 63 +[ Tue Sep 13 23:00:50 2022 ] Mean test loss of 930 batches: 2.3638341426849365. +[ Tue Sep 13 23:00:50 2022 ] Top1: 58.94% +[ Tue Sep 13 23:00:50 2022 ] Top5: 84.32% +[ Tue Sep 13 23:00:51 2022 ] Training epoch: 64 +[ Tue Sep 13 23:01:43 2022 ] Batch(93/162) done. Loss: 0.0921 lr:0.010000 +[ Tue Sep 13 23:02:19 2022 ] Eval epoch: 64 +[ Tue Sep 13 23:05:13 2022 ] Mean test loss of 930 batches: 2.352214813232422. +[ Tue Sep 13 23:05:14 2022 ] Top1: 59.51% +[ Tue Sep 13 23:05:14 2022 ] Top5: 84.59% +[ Tue Sep 13 23:05:15 2022 ] Training epoch: 65 +[ Tue Sep 13 23:05:34 2022 ] Batch(31/162) done. Loss: 0.0716 lr:0.010000 +[ Tue Sep 13 23:06:27 2022 ] Batch(131/162) done. Loss: 0.0759 lr:0.010000 +[ Tue Sep 13 23:06:43 2022 ] Eval epoch: 65 +[ Tue Sep 13 23:09:36 2022 ] Mean test loss of 930 batches: 2.398852586746216. +[ Tue Sep 13 23:09:37 2022 ] Top1: 59.31% +[ Tue Sep 13 23:09:37 2022 ] Top5: 84.54% +[ Tue Sep 13 23:09:38 2022 ] Training epoch: 66 +[ Tue Sep 13 23:10:17 2022 ] Batch(69/162) done. Loss: 0.0377 lr:0.010000 +[ Tue Sep 13 23:11:06 2022 ] Eval epoch: 66 +[ Tue Sep 13 23:13:59 2022 ] Mean test loss of 930 batches: 2.3643901348114014. +[ Tue Sep 13 23:13:59 2022 ] Top1: 59.43% +[ Tue Sep 13 23:14:00 2022 ] Top5: 84.64% +[ Tue Sep 13 23:14:00 2022 ] Training epoch: 67 +[ Tue Sep 13 23:14:07 2022 ] Batch(7/162) done. Loss: 0.0666 lr:0.010000 +[ Tue Sep 13 23:15:00 2022 ] Batch(107/162) done. Loss: 0.1195 lr:0.010000 +[ Tue Sep 13 23:15:28 2022 ] Eval epoch: 67 +[ Tue Sep 13 23:18:21 2022 ] Mean test loss of 930 batches: 2.4129855632781982. +[ Tue Sep 13 23:18:22 2022 ] Top1: 59.50% +[ Tue Sep 13 23:18:23 2022 ] Top5: 84.60% +[ Tue Sep 13 23:18:23 2022 ] Training epoch: 68 +[ Tue Sep 13 23:18:50 2022 ] Batch(45/162) done. Loss: 0.0946 lr:0.010000 +[ Tue Sep 13 23:19:43 2022 ] Batch(145/162) done. Loss: 0.0837 lr:0.010000 +[ Tue Sep 13 23:19:52 2022 ] Eval epoch: 68 +[ Tue Sep 13 23:22:45 2022 ] Mean test loss of 930 batches: 2.399461269378662. +[ Tue Sep 13 23:22:46 2022 ] Top1: 59.54% +[ Tue Sep 13 23:22:46 2022 ] Top5: 84.72% +[ Tue Sep 13 23:22:46 2022 ] Training epoch: 69 +[ Tue Sep 13 23:23:34 2022 ] Batch(83/162) done. Loss: 0.0429 lr:0.010000 +[ Tue Sep 13 23:24:15 2022 ] Eval epoch: 69 +[ Tue Sep 13 23:27:09 2022 ] Mean test loss of 930 batches: 2.4148502349853516. +[ Tue Sep 13 23:27:10 2022 ] Top1: 59.44% +[ Tue Sep 13 23:27:10 2022 ] Top5: 84.70% +[ Tue Sep 13 23:27:10 2022 ] Training epoch: 70 +[ Tue Sep 13 23:27:25 2022 ] Batch(21/162) done. Loss: 0.1547 lr:0.010000 +[ Tue Sep 13 23:28:18 2022 ] Batch(121/162) done. Loss: 0.0320 lr:0.010000 +[ Tue Sep 13 23:28:39 2022 ] Eval epoch: 70 +[ Tue Sep 13 23:31:32 2022 ] Mean test loss of 930 batches: 2.3825840950012207. +[ Tue Sep 13 23:31:33 2022 ] Top1: 59.81% +[ Tue Sep 13 23:31:33 2022 ] Top5: 84.77% +[ Tue Sep 13 23:31:34 2022 ] Training epoch: 71 +[ Tue Sep 13 23:32:08 2022 ] Batch(59/162) done. Loss: 0.1290 lr:0.010000 +[ Tue Sep 13 23:33:01 2022 ] Batch(159/162) done. Loss: 0.0549 lr:0.010000 +[ Tue Sep 13 23:33:02 2022 ] Eval epoch: 71 +[ Tue Sep 13 23:35:56 2022 ] Mean test loss of 930 batches: 2.3638386726379395. +[ Tue Sep 13 23:35:56 2022 ] Top1: 59.85% +[ Tue Sep 13 23:35:57 2022 ] Top5: 85.02% +[ Tue Sep 13 23:35:57 2022 ] Training epoch: 72 +[ Tue Sep 13 23:36:52 2022 ] Batch(97/162) done. Loss: 0.0311 lr:0.010000 +[ Tue Sep 13 23:37:26 2022 ] Eval epoch: 72 +[ Tue Sep 13 23:40:19 2022 ] Mean test loss of 930 batches: 2.419126510620117. +[ Tue Sep 13 23:40:19 2022 ] Top1: 59.65% +[ Tue Sep 13 23:40:20 2022 ] Top5: 84.89% +[ Tue Sep 13 23:40:20 2022 ] Training epoch: 73 +[ Tue Sep 13 23:40:42 2022 ] Batch(35/162) done. Loss: 0.0438 lr:0.010000 +[ Tue Sep 13 23:41:35 2022 ] Batch(135/162) done. Loss: 0.0685 lr:0.010000 +[ Tue Sep 13 23:41:49 2022 ] Eval epoch: 73 +[ Tue Sep 13 23:44:42 2022 ] Mean test loss of 930 batches: 2.403249979019165. +[ Tue Sep 13 23:44:42 2022 ] Top1: 59.70% +[ Tue Sep 13 23:44:43 2022 ] Top5: 84.84% +[ Tue Sep 13 23:44:43 2022 ] Training epoch: 74 +[ Tue Sep 13 23:45:25 2022 ] Batch(73/162) done. Loss: 0.0443 lr:0.010000 +[ Tue Sep 13 23:46:12 2022 ] Eval epoch: 74 +[ Tue Sep 13 23:49:06 2022 ] Mean test loss of 930 batches: 2.4638261795043945. +[ Tue Sep 13 23:49:06 2022 ] Top1: 59.42% +[ Tue Sep 13 23:49:07 2022 ] Top5: 84.60% +[ Tue Sep 13 23:49:07 2022 ] Training epoch: 75 +[ Tue Sep 13 23:49:16 2022 ] Batch(11/162) done. Loss: 0.0248 lr:0.010000 +[ Tue Sep 13 23:50:09 2022 ] Batch(111/162) done. Loss: 0.0414 lr:0.010000 +[ Tue Sep 13 23:50:35 2022 ] Eval epoch: 75 +[ Tue Sep 13 23:53:28 2022 ] Mean test loss of 930 batches: 2.473863363265991. +[ Tue Sep 13 23:53:29 2022 ] Top1: 59.64% +[ Tue Sep 13 23:53:29 2022 ] Top5: 84.75% +[ Tue Sep 13 23:53:29 2022 ] Training epoch: 76 +[ Tue Sep 13 23:53:58 2022 ] Batch(49/162) done. Loss: 0.0623 lr:0.010000 +[ Tue Sep 13 23:54:51 2022 ] Batch(149/162) done. Loss: 0.1217 lr:0.010000 +[ Tue Sep 13 23:54:58 2022 ] Eval epoch: 76 +[ Tue Sep 13 23:57:51 2022 ] Mean test loss of 930 batches: 2.529571294784546. +[ Tue Sep 13 23:57:52 2022 ] Top1: 59.31% +[ Tue Sep 13 23:57:52 2022 ] Top5: 84.50% +[ Tue Sep 13 23:57:52 2022 ] Training epoch: 77 +[ Tue Sep 13 23:58:41 2022 ] Batch(87/162) done. Loss: 0.1024 lr:0.010000 +[ Tue Sep 13 23:59:21 2022 ] Eval epoch: 77 +[ Wed Sep 14 00:02:14 2022 ] Mean test loss of 930 batches: 2.4722189903259277. +[ Wed Sep 14 00:02:14 2022 ] Top1: 59.78% +[ Wed Sep 14 00:02:15 2022 ] Top5: 84.88% +[ Wed Sep 14 00:02:15 2022 ] Training epoch: 78 +[ Wed Sep 14 00:02:32 2022 ] Batch(25/162) done. Loss: 0.0177 lr:0.010000 +[ Wed Sep 14 00:03:24 2022 ] Batch(125/162) done. Loss: 0.0622 lr:0.010000 +[ Wed Sep 14 00:03:44 2022 ] Eval epoch: 78 +[ Wed Sep 14 00:06:37 2022 ] Mean test loss of 930 batches: 2.524034261703491. +[ Wed Sep 14 00:06:38 2022 ] Top1: 59.45% +[ Wed Sep 14 00:06:39 2022 ] Top5: 84.39% +[ Wed Sep 14 00:06:39 2022 ] Training epoch: 79 +[ Wed Sep 14 00:07:16 2022 ] Batch(63/162) done. Loss: 0.0366 lr:0.010000 +[ Wed Sep 14 00:08:08 2022 ] Eval epoch: 79 +[ Wed Sep 14 00:11:01 2022 ] Mean test loss of 930 batches: 2.5322084426879883. +[ Wed Sep 14 00:11:02 2022 ] Top1: 59.45% +[ Wed Sep 14 00:11:02 2022 ] Top5: 84.63% +[ Wed Sep 14 00:11:03 2022 ] Training epoch: 80 +[ Wed Sep 14 00:11:07 2022 ] Batch(1/162) done. Loss: 0.0272 lr:0.010000 +[ Wed Sep 14 00:11:59 2022 ] Batch(101/162) done. Loss: 0.0790 lr:0.010000 +[ Wed Sep 14 00:12:31 2022 ] Eval epoch: 80 +[ Wed Sep 14 00:15:25 2022 ] Mean test loss of 930 batches: 2.545362949371338. +[ Wed Sep 14 00:15:25 2022 ] Top1: 59.38% +[ Wed Sep 14 00:15:26 2022 ] Top5: 84.49% +[ Wed Sep 14 00:15:26 2022 ] Training epoch: 81 +[ Wed Sep 14 00:15:50 2022 ] Batch(39/162) done. Loss: 0.0313 lr:0.001000 +[ Wed Sep 14 00:16:43 2022 ] Batch(139/162) done. Loss: 0.0836 lr:0.001000 +[ Wed Sep 14 00:16:55 2022 ] Eval epoch: 81 +[ Wed Sep 14 00:19:48 2022 ] Mean test loss of 930 batches: 2.5255229473114014. +[ Wed Sep 14 00:19:49 2022 ] Top1: 59.54% +[ Wed Sep 14 00:19:49 2022 ] Top5: 84.60% +[ Wed Sep 14 00:19:50 2022 ] Training epoch: 82 +[ Wed Sep 14 00:20:34 2022 ] Batch(77/162) done. Loss: 0.0274 lr:0.001000 +[ Wed Sep 14 00:21:18 2022 ] Eval epoch: 82 +[ Wed Sep 14 00:24:11 2022 ] Mean test loss of 930 batches: 2.5230913162231445. +[ Wed Sep 14 00:24:12 2022 ] Top1: 59.72% +[ Wed Sep 14 00:24:12 2022 ] Top5: 84.71% +[ Wed Sep 14 00:24:13 2022 ] Training epoch: 83 +[ Wed Sep 14 00:24:24 2022 ] Batch(15/162) done. Loss: 0.0518 lr:0.001000 +[ Wed Sep 14 00:25:17 2022 ] Batch(115/162) done. Loss: 0.0615 lr:0.001000 +[ Wed Sep 14 00:25:41 2022 ] Eval epoch: 83 +[ Wed Sep 14 00:28:34 2022 ] Mean test loss of 930 batches: 2.510268449783325. +[ Wed Sep 14 00:28:35 2022 ] Top1: 59.71% +[ Wed Sep 14 00:28:35 2022 ] Top5: 84.64% +[ Wed Sep 14 00:28:36 2022 ] Training epoch: 84 +[ Wed Sep 14 00:29:07 2022 ] Batch(53/162) done. Loss: 0.0111 lr:0.001000 +[ Wed Sep 14 00:30:00 2022 ] Batch(153/162) done. Loss: 0.0526 lr:0.001000 +[ Wed Sep 14 00:30:04 2022 ] Eval epoch: 84 +[ Wed Sep 14 00:32:58 2022 ] Mean test loss of 930 batches: 2.5461337566375732. +[ Wed Sep 14 00:32:58 2022 ] Top1: 59.51% +[ Wed Sep 14 00:32:59 2022 ] Top5: 84.61% +[ Wed Sep 14 00:32:59 2022 ] Training epoch: 85 +[ Wed Sep 14 00:33:51 2022 ] Batch(91/162) done. Loss: 0.0426 lr:0.001000 +[ Wed Sep 14 00:34:28 2022 ] Eval epoch: 85 +[ Wed Sep 14 00:37:21 2022 ] Mean test loss of 930 batches: 2.5131518840789795. +[ Wed Sep 14 00:37:22 2022 ] Top1: 59.86% +[ Wed Sep 14 00:37:22 2022 ] Top5: 84.93% +[ Wed Sep 14 00:37:22 2022 ] Training epoch: 86 +[ Wed Sep 14 00:37:41 2022 ] Batch(29/162) done. Loss: 0.1134 lr:0.001000 +[ Wed Sep 14 00:38:34 2022 ] Batch(129/162) done. Loss: 0.0646 lr:0.001000 +[ Wed Sep 14 00:38:51 2022 ] Eval epoch: 86 +[ Wed Sep 14 00:41:44 2022 ] Mean test loss of 930 batches: 2.4873545169830322. +[ Wed Sep 14 00:41:45 2022 ] Top1: 59.94% +[ Wed Sep 14 00:41:46 2022 ] Top5: 84.92% +[ Wed Sep 14 00:41:46 2022 ] Training epoch: 87 +[ Wed Sep 14 00:42:25 2022 ] Batch(67/162) done. Loss: 0.0480 lr:0.001000 +[ Wed Sep 14 00:43:14 2022 ] Eval epoch: 87 +[ Wed Sep 14 00:46:08 2022 ] Mean test loss of 930 batches: 2.504473924636841. +[ Wed Sep 14 00:46:08 2022 ] Top1: 59.91% +[ Wed Sep 14 00:46:09 2022 ] Top5: 84.94% +[ Wed Sep 14 00:46:09 2022 ] Training epoch: 88 +[ Wed Sep 14 00:46:15 2022 ] Batch(5/162) done. Loss: 0.0890 lr:0.001000 +[ Wed Sep 14 00:47:08 2022 ] Batch(105/162) done. Loss: 0.1213 lr:0.001000 +[ Wed Sep 14 00:47:38 2022 ] Eval epoch: 88 +[ Wed Sep 14 00:50:31 2022 ] Mean test loss of 930 batches: 2.4991095066070557. +[ Wed Sep 14 00:50:32 2022 ] Top1: 60.00% +[ Wed Sep 14 00:50:32 2022 ] Top5: 84.83% +[ Wed Sep 14 00:50:33 2022 ] Training epoch: 89 +[ Wed Sep 14 00:50:59 2022 ] Batch(43/162) done. Loss: 0.0259 lr:0.001000 +[ Wed Sep 14 00:51:52 2022 ] Batch(143/162) done. Loss: 0.1193 lr:0.001000 +[ Wed Sep 14 00:52:01 2022 ] Eval epoch: 89 +[ Wed Sep 14 00:54:55 2022 ] Mean test loss of 930 batches: 2.496248483657837. +[ Wed Sep 14 00:54:55 2022 ] Top1: 59.66% +[ Wed Sep 14 00:54:56 2022 ] Top5: 84.75% +[ Wed Sep 14 00:54:56 2022 ] Training epoch: 90 +[ Wed Sep 14 00:55:42 2022 ] Batch(81/162) done. Loss: 0.0691 lr:0.001000 +[ Wed Sep 14 00:56:25 2022 ] Eval epoch: 90 +[ Wed Sep 14 00:59:19 2022 ] Mean test loss of 930 batches: 2.4718194007873535. +[ Wed Sep 14 00:59:19 2022 ] Top1: 60.11% +[ Wed Sep 14 00:59:20 2022 ] Top5: 84.91% +[ Wed Sep 14 00:59:20 2022 ] Training epoch: 91 +[ Wed Sep 14 00:59:33 2022 ] Batch(19/162) done. Loss: 0.0215 lr:0.001000 +[ Wed Sep 14 01:00:26 2022 ] Batch(119/162) done. Loss: 0.0698 lr:0.001000 +[ Wed Sep 14 01:00:49 2022 ] Eval epoch: 91 +[ Wed Sep 14 01:03:41 2022 ] Mean test loss of 930 batches: 2.5102038383483887. +[ Wed Sep 14 01:03:42 2022 ] Top1: 60.02% +[ Wed Sep 14 01:03:42 2022 ] Top5: 84.80% +[ Wed Sep 14 01:03:43 2022 ] Training epoch: 92 +[ Wed Sep 14 01:04:16 2022 ] Batch(57/162) done. Loss: 0.0306 lr:0.001000 +[ Wed Sep 14 01:05:09 2022 ] Batch(157/162) done. Loss: 0.1090 lr:0.001000 +[ Wed Sep 14 01:05:11 2022 ] Eval epoch: 92 +[ Wed Sep 14 01:08:05 2022 ] Mean test loss of 930 batches: 2.5122852325439453. +[ Wed Sep 14 01:08:05 2022 ] Top1: 59.70% +[ Wed Sep 14 01:08:06 2022 ] Top5: 84.54% +[ Wed Sep 14 01:08:06 2022 ] Training epoch: 93 +[ Wed Sep 14 01:08:59 2022 ] Batch(95/162) done. Loss: 0.0734 lr:0.001000 +[ Wed Sep 14 01:09:34 2022 ] Eval epoch: 93 +[ Wed Sep 14 01:12:28 2022 ] Mean test loss of 930 batches: 2.5456082820892334. +[ Wed Sep 14 01:12:29 2022 ] Top1: 59.85% +[ Wed Sep 14 01:12:29 2022 ] Top5: 84.79% +[ Wed Sep 14 01:12:29 2022 ] Training epoch: 94 +[ Wed Sep 14 01:12:50 2022 ] Batch(33/162) done. Loss: 0.1057 lr:0.001000 +[ Wed Sep 14 01:13:43 2022 ] Batch(133/162) done. Loss: 0.0681 lr:0.001000 +[ Wed Sep 14 01:13:58 2022 ] Eval epoch: 94 +[ Wed Sep 14 01:16:51 2022 ] Mean test loss of 930 batches: 2.493978500366211. +[ Wed Sep 14 01:16:52 2022 ] Top1: 60.04% +[ Wed Sep 14 01:16:52 2022 ] Top5: 84.90% +[ Wed Sep 14 01:16:52 2022 ] Training epoch: 95 +[ Wed Sep 14 01:17:33 2022 ] Batch(71/162) done. Loss: 0.0440 lr:0.001000 +[ Wed Sep 14 01:18:21 2022 ] Eval epoch: 95 +[ Wed Sep 14 01:21:15 2022 ] Mean test loss of 930 batches: 2.4783060550689697. +[ Wed Sep 14 01:21:16 2022 ] Top1: 60.17% +[ Wed Sep 14 01:21:16 2022 ] Top5: 85.04% +[ Wed Sep 14 01:21:17 2022 ] Training epoch: 96 +[ Wed Sep 14 01:21:25 2022 ] Batch(9/162) done. Loss: 0.0118 lr:0.001000 +[ Wed Sep 14 01:22:18 2022 ] Batch(109/162) done. Loss: 0.1139 lr:0.001000 +[ Wed Sep 14 01:22:45 2022 ] Eval epoch: 96 +[ Wed Sep 14 01:25:38 2022 ] Mean test loss of 930 batches: 2.4734930992126465. +[ Wed Sep 14 01:25:39 2022 ] Top1: 59.92% +[ Wed Sep 14 01:25:39 2022 ] Top5: 84.71% +[ Wed Sep 14 01:25:40 2022 ] Training epoch: 97 +[ Wed Sep 14 01:26:08 2022 ] Batch(47/162) done. Loss: 0.0519 lr:0.001000 +[ Wed Sep 14 01:27:00 2022 ] Batch(147/162) done. Loss: 0.0832 lr:0.001000 +[ Wed Sep 14 01:27:08 2022 ] Eval epoch: 97 +[ Wed Sep 14 01:30:02 2022 ] Mean test loss of 930 batches: 2.523946762084961. +[ Wed Sep 14 01:30:02 2022 ] Top1: 59.79% +[ Wed Sep 14 01:30:03 2022 ] Top5: 84.84% +[ Wed Sep 14 01:30:03 2022 ] Training epoch: 98 +[ Wed Sep 14 01:30:52 2022 ] Batch(85/162) done. Loss: 0.0325 lr:0.001000 +[ Wed Sep 14 01:31:32 2022 ] Eval epoch: 98 +[ Wed Sep 14 01:34:25 2022 ] Mean test loss of 930 batches: 2.539496421813965. +[ Wed Sep 14 01:34:26 2022 ] Top1: 59.73% +[ Wed Sep 14 01:34:27 2022 ] Top5: 84.72% +[ Wed Sep 14 01:34:27 2022 ] Training epoch: 99 +[ Wed Sep 14 01:34:42 2022 ] Batch(23/162) done. Loss: 0.0896 lr:0.001000 +[ Wed Sep 14 01:35:35 2022 ] Batch(123/162) done. Loss: 0.1063 lr:0.001000 +[ Wed Sep 14 01:35:55 2022 ] Eval epoch: 99 +[ Wed Sep 14 01:38:49 2022 ] Mean test loss of 930 batches: 2.492906332015991. +[ Wed Sep 14 01:38:50 2022 ] Top1: 59.87% +[ Wed Sep 14 01:38:50 2022 ] Top5: 84.83% +[ Wed Sep 14 01:38:51 2022 ] Training epoch: 100 +[ Wed Sep 14 01:39:26 2022 ] Batch(61/162) done. Loss: 0.0529 lr:0.001000 +[ Wed Sep 14 01:40:19 2022 ] Batch(161/162) done. Loss: 0.0567 lr:0.001000 +[ Wed Sep 14 01:40:19 2022 ] Eval epoch: 100 +[ Wed Sep 14 01:43:12 2022 ] Mean test loss of 930 batches: 2.551042079925537. +[ Wed Sep 14 01:43:12 2022 ] Top1: 59.58% +[ Wed Sep 14 01:43:13 2022 ] Top5: 84.58% diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_motion_xsub/config.yaml b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_motion_xsub/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f2e1d38914b63b7902336eb1a120f0bd5516bd3f --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_motion_xsub/config.yaml @@ -0,0 +1,59 @@ +Experiment_name: ntu120_bone_motion_xsub +base_lr: 0.1 +batch_size: 64 +config: ./config/ntu120_xsub/train_bone_motion.yaml +device: +- 2 +- 3 +eval_interval: 5 +feeder: feeders.feeder.Feeder +groups: 8 +ignore_weights: [] +keep_rate: 0.9 +log_interval: 100 +model: model.decouple_gcn.Model +model_args: + block_size: 41 + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + groups: 16 + num_class: 120 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu120_bone_motion_xsub +nesterov: true +num_epoch: 100 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_data_bone_motion.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_data_bone_motion.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu120_bone_motion_xsub diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_motion_xsub/decouple_gcn.py b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_motion_xsub/decouple_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..6dcce4552ced280fe5b2060df92daebd2452cf7c --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_motion_xsub/decouple_gcn.py @@ -0,0 +1,235 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math +from model.dropSke import DropBlock_Ske +from model.dropT import DropBlockT_1d + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + + +def conv_branch_init(conv): + weight = conv.weight + n = weight.size(0) + k1 = weight.size(1) + k2 = weight.size(2) + nn.init.normal(weight, 0, math.sqrt(2. / (n * k1 * k2))) + nn.init.constant(conv.bias, 0) + + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class unit_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1, num_point=25, block_size=41): + super(unit_tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + self.dropS = DropBlock_Ske(num_point=num_point) + self.dropT = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob, A): + x = self.bn(self.conv(x)) + x = self.dropT(self.dropS(x, keep_prob, A), keep_prob) + return x + + +class unit_tcn_skip(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(unit_tcn_skip, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class unit_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, coff_embedding=4, num_subset=3): + super(unit_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.num_point = num_point + self.groups = groups + self.num_subset = num_subset + self.DecoupleA = nn.Parameter(torch.tensor(np.reshape(A.astype(np.float32), [ + 3, 1, num_point, num_point]), dtype=torch.float32, requires_grad=True).repeat(1, groups, 1, 1), requires_grad=True) + + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.bn0 = nn.BatchNorm2d(out_channels * num_subset) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + bn_init(self.bn, 1e-6) + + self.Linear_weight = nn.Parameter(torch.zeros( + in_channels, out_channels * num_subset, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0, math.sqrt( + 0.5 / (out_channels * num_subset))) + + self.Linear_bias = nn.Parameter(torch.zeros( + 1, out_channels * num_subset, 1, 1, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.constant(self.Linear_bias, 1e-6) + + eye_array = [] + for i in range(out_channels): + eye_array.append(torch.eye(num_point)) + self.eyes = nn.Parameter(torch.tensor(torch.stack( + eye_array), requires_grad=False, device='cuda'), requires_grad=False) # [c,25,25] + + def norm(self, A): + b, c, h, w = A.size() + A = A.view(c, self.num_point, self.num_point) + D_list = torch.sum(A, 1).view(c, 1, self.num_point) + D_list_12 = (D_list + 0.001)**(-1) + D_12 = self.eyes * D_list_12 + A = torch.bmm(A, D_12).view(b, c, h, w) + return A + + def forward(self, x0): + learn_A = self.DecoupleA.repeat( + 1, self.out_channels // self.groups, 1, 1) + norm_learn_A = torch.cat([self.norm(learn_A[0:1, ...]), self.norm( + learn_A[1:2, ...]), self.norm(learn_A[2:3, ...])], 0) + + x = torch.einsum( + 'nctw,cd->ndtw', (x0, self.Linear_weight)).contiguous() + x = x + self.Linear_bias + x = self.bn0(x) + + n, kc, t, v = x.size() + x = x.view(n, self.num_subset, kc // self.num_subset, t, v) + x = torch.einsum('nkctv,kcvw->nctw', (x, norm_learn_A)) + + x = self.bn(x) + x += self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, block_size, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = unit_gcn(in_channels, out_channels, A, groups, num_point) + self.tcn1 = unit_tcn(out_channels, out_channels, + stride=stride, num_point=num_point) + self.relu = nn.ReLU() + + self.A = nn.Parameter(torch.tensor(np.sum(np.reshape(A.astype(np.float32), [ + 3, num_point, num_point]), axis=0), dtype=torch.float32, requires_grad=False, device='cuda'), requires_grad=False) + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + + else: + self.residual = unit_tcn_skip( + in_channels, out_channels, kernel_size=1, stride=stride) + self.dropSke = DropBlock_Ske(num_point=num_point) + self.dropT_skip = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob): + x = self.tcn1(self.gcn1(x), keep_prob, self.A) + self.dropT_skip( + self.dropSke(self.residual(x), keep_prob, self.A), keep_prob) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, groups, num_point, + block_size, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l5 = TCN_GCN_unit( + 64, 128, A, groups, num_point, block_size, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l8 = TCN_GCN_unit(128, 256, A, groups, + num_point, block_size, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x, keep_prob=0.9): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute( + 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x, 1.0) + x = self.l2(x, 1.0) + x = self.l3(x, 1.0) + x = self.l4(x, 1.0) + x = self.l5(x, 1.0) + x = self.l6(x, 1.0) + x = self.l7(x, keep_prob) + x = self.l8(x, keep_prob) + x = self.l9(x, keep_prob) + x = self.l10(x, keep_prob) + + # N*M,C,T,V + c_new = x.size(1) + x = x.reshape(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_motion_xsub/eval_results/best_acc.pkl b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_motion_xsub/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..5e591caa5aec273d5d078ef90b68e2635ee32ece --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_motion_xsub/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d319d34cbeed08e8f709b4a01a345ff4a510879b95bf04b23ba714830db2d8e +size 29946137 diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_motion_xsub/log.txt b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_motion_xsub/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..4d06d2aed861c738f9dd912347b1c6e8e71ef470 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_motion_xsub/log.txt @@ -0,0 +1,746 @@ +[ Mon Sep 12 17:08:02 2022 ] Parameters: +{'work_dir': './work_dir/ntu120_bone_motion_xsub', 'model_saved_name': './save_models/ntu120_bone_motion_xsub', 'Experiment_name': 'ntu120_bone_motion_xsub', 'config': './config/ntu120_xsub/train_bone_motion.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_data_bone_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_data_bone_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_label.pkl'}, 'model': 'model.decouple_gcn.Model', 'model_args': {'num_class': 120, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'groups': 16, 'block_size': 41, 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80], 'device': [2, 3], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 100, 'weight_decay': 0.0001, 'keep_rate': 0.9, 'groups': 8, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Mon Sep 12 17:08:02 2022 ] Training epoch: 1 +[ Mon Sep 12 17:08:53 2022 ] Batch(99/243) done. Loss: 3.9996 lr:0.100000 +[ Mon Sep 12 17:09:37 2022 ] Batch(199/243) done. Loss: 3.4128 lr:0.100000 +[ Mon Sep 12 17:09:56 2022 ] Eval epoch: 1 +[ Mon Sep 12 17:12:26 2022 ] Mean test loss of 796 batches: 5.401659965515137. +[ Mon Sep 12 17:12:27 2022 ] Top1: 4.08% +[ Mon Sep 12 17:12:27 2022 ] Top5: 14.92% +[ Mon Sep 12 17:12:27 2022 ] Training epoch: 2 +[ Mon Sep 12 17:13:01 2022 ] Batch(56/243) done. Loss: 3.3313 lr:0.100000 +[ Mon Sep 12 17:13:54 2022 ] Batch(156/243) done. Loss: 2.7961 lr:0.100000 +[ Mon Sep 12 17:14:39 2022 ] Eval epoch: 2 +[ Mon Sep 12 17:17:10 2022 ] Mean test loss of 796 batches: 5.036680698394775. +[ Mon Sep 12 17:17:10 2022 ] Top1: 5.15% +[ Mon Sep 12 17:17:10 2022 ] Top5: 17.48% +[ Mon Sep 12 17:17:11 2022 ] Training epoch: 3 +[ Mon Sep 12 17:17:22 2022 ] Batch(13/243) done. Loss: 2.6744 lr:0.100000 +[ Mon Sep 12 17:18:15 2022 ] Batch(113/243) done. Loss: 2.3809 lr:0.100000 +[ Mon Sep 12 17:19:08 2022 ] Batch(213/243) done. Loss: 2.1960 lr:0.100000 +[ Mon Sep 12 17:19:23 2022 ] Eval epoch: 3 +[ Mon Sep 12 17:21:54 2022 ] Mean test loss of 796 batches: 3.8832848072052. +[ Mon Sep 12 17:21:54 2022 ] Top1: 13.53% +[ Mon Sep 12 17:21:54 2022 ] Top5: 34.60% +[ Mon Sep 12 17:21:55 2022 ] Training epoch: 4 +[ Mon Sep 12 17:22:36 2022 ] Batch(70/243) done. Loss: 1.9707 lr:0.100000 +[ Mon Sep 12 17:23:29 2022 ] Batch(170/243) done. Loss: 1.7194 lr:0.100000 +[ Mon Sep 12 17:24:08 2022 ] Eval epoch: 4 +[ Mon Sep 12 17:26:38 2022 ] Mean test loss of 796 batches: 5.208371162414551. +[ Mon Sep 12 17:26:39 2022 ] Top1: 9.02% +[ Mon Sep 12 17:26:39 2022 ] Top5: 27.73% +[ Mon Sep 12 17:26:39 2022 ] Training epoch: 5 +[ Mon Sep 12 17:26:58 2022 ] Batch(27/243) done. Loss: 1.9087 lr:0.100000 +[ Mon Sep 12 17:27:51 2022 ] Batch(127/243) done. Loss: 1.8244 lr:0.100000 +[ Mon Sep 12 17:28:44 2022 ] Batch(227/243) done. Loss: 1.6479 lr:0.100000 +[ Mon Sep 12 17:28:52 2022 ] Eval epoch: 5 +[ Mon Sep 12 17:31:22 2022 ] Mean test loss of 796 batches: 3.828791856765747. +[ Mon Sep 12 17:31:23 2022 ] Top1: 17.41% +[ Mon Sep 12 17:31:23 2022 ] Top5: 45.41% +[ Mon Sep 12 17:31:23 2022 ] Training epoch: 6 +[ Mon Sep 12 17:32:11 2022 ] Batch(84/243) done. Loss: 1.4387 lr:0.100000 +[ Mon Sep 12 17:33:04 2022 ] Batch(184/243) done. Loss: 1.5802 lr:0.100000 +[ Mon Sep 12 17:33:35 2022 ] Eval epoch: 6 +[ Mon Sep 12 17:36:05 2022 ] Mean test loss of 796 batches: 2.96073055267334. +[ Mon Sep 12 17:36:05 2022 ] Top1: 24.78% +[ Mon Sep 12 17:36:06 2022 ] Top5: 55.59% +[ Mon Sep 12 17:36:06 2022 ] Training epoch: 7 +[ Mon Sep 12 17:36:31 2022 ] Batch(41/243) done. Loss: 1.4074 lr:0.100000 +[ Mon Sep 12 17:37:24 2022 ] Batch(141/243) done. Loss: 1.2927 lr:0.100000 +[ Mon Sep 12 17:38:17 2022 ] Batch(241/243) done. Loss: 1.3877 lr:0.100000 +[ Mon Sep 12 17:38:17 2022 ] Eval epoch: 7 +[ Mon Sep 12 17:40:47 2022 ] Mean test loss of 796 batches: 3.244659662246704. +[ Mon Sep 12 17:40:48 2022 ] Top1: 24.42% +[ Mon Sep 12 17:40:48 2022 ] Top5: 55.99% +[ Mon Sep 12 17:40:48 2022 ] Training epoch: 8 +[ Mon Sep 12 17:41:44 2022 ] Batch(98/243) done. Loss: 1.2548 lr:0.100000 +[ Mon Sep 12 17:42:37 2022 ] Batch(198/243) done. Loss: 1.1425 lr:0.100000 +[ Mon Sep 12 17:43:00 2022 ] Eval epoch: 8 +[ Mon Sep 12 17:45:29 2022 ] Mean test loss of 796 batches: 3.560640811920166. +[ Mon Sep 12 17:45:30 2022 ] Top1: 23.09% +[ Mon Sep 12 17:45:30 2022 ] Top5: 54.99% +[ Mon Sep 12 17:45:31 2022 ] Training epoch: 9 +[ Mon Sep 12 17:46:03 2022 ] Batch(55/243) done. Loss: 1.1511 lr:0.100000 +[ Mon Sep 12 17:46:56 2022 ] Batch(155/243) done. Loss: 1.0868 lr:0.100000 +[ Mon Sep 12 17:47:42 2022 ] Eval epoch: 9 +[ Mon Sep 12 17:50:13 2022 ] Mean test loss of 796 batches: 3.479330539703369. +[ Mon Sep 12 17:50:13 2022 ] Top1: 28.47% +[ Mon Sep 12 17:50:14 2022 ] Top5: 62.28% +[ Mon Sep 12 17:50:14 2022 ] Training epoch: 10 +[ Mon Sep 12 17:50:24 2022 ] Batch(12/243) done. Loss: 1.1992 lr:0.100000 +[ Mon Sep 12 17:51:17 2022 ] Batch(112/243) done. Loss: 0.9321 lr:0.100000 +[ Mon Sep 12 17:52:10 2022 ] Batch(212/243) done. Loss: 1.1290 lr:0.100000 +[ Mon Sep 12 17:52:26 2022 ] Eval epoch: 10 +[ Mon Sep 12 17:54:55 2022 ] Mean test loss of 796 batches: 3.548513650894165. +[ Mon Sep 12 17:54:56 2022 ] Top1: 23.18% +[ Mon Sep 12 17:54:56 2022 ] Top5: 57.00% +[ Mon Sep 12 17:54:56 2022 ] Training epoch: 11 +[ Mon Sep 12 17:55:36 2022 ] Batch(69/243) done. Loss: 0.7892 lr:0.100000 +[ Mon Sep 12 17:56:29 2022 ] Batch(169/243) done. Loss: 0.8913 lr:0.100000 +[ Mon Sep 12 17:57:08 2022 ] Eval epoch: 11 +[ Mon Sep 12 17:59:38 2022 ] Mean test loss of 796 batches: 3.6393656730651855. +[ Mon Sep 12 17:59:38 2022 ] Top1: 29.19% +[ Mon Sep 12 17:59:39 2022 ] Top5: 64.37% +[ Mon Sep 12 17:59:39 2022 ] Training epoch: 12 +[ Mon Sep 12 17:59:56 2022 ] Batch(26/243) done. Loss: 1.1150 lr:0.100000 +[ Mon Sep 12 18:00:49 2022 ] Batch(126/243) done. Loss: 1.1215 lr:0.100000 +[ Mon Sep 12 18:01:42 2022 ] Batch(226/243) done. Loss: 0.8452 lr:0.100000 +[ Mon Sep 12 18:01:51 2022 ] Eval epoch: 12 +[ Mon Sep 12 18:04:20 2022 ] Mean test loss of 796 batches: 3.349566698074341. +[ Mon Sep 12 18:04:21 2022 ] Top1: 27.39% +[ Mon Sep 12 18:04:21 2022 ] Top5: 61.31% +[ Mon Sep 12 18:04:21 2022 ] Training epoch: 13 +[ Mon Sep 12 18:05:09 2022 ] Batch(83/243) done. Loss: 0.7315 lr:0.100000 +[ Mon Sep 12 18:06:02 2022 ] Batch(183/243) done. Loss: 0.9912 lr:0.100000 +[ Mon Sep 12 18:06:33 2022 ] Eval epoch: 13 +[ Mon Sep 12 18:09:03 2022 ] Mean test loss of 796 batches: 3.827038526535034. +[ Mon Sep 12 18:09:03 2022 ] Top1: 24.15% +[ Mon Sep 12 18:09:03 2022 ] Top5: 54.18% +[ Mon Sep 12 18:09:04 2022 ] Training epoch: 14 +[ Mon Sep 12 18:09:28 2022 ] Batch(40/243) done. Loss: 0.7388 lr:0.100000 +[ Mon Sep 12 18:10:21 2022 ] Batch(140/243) done. Loss: 0.9804 lr:0.100000 +[ Mon Sep 12 18:11:14 2022 ] Batch(240/243) done. Loss: 1.0120 lr:0.100000 +[ Mon Sep 12 18:11:15 2022 ] Eval epoch: 14 +[ Mon Sep 12 18:13:45 2022 ] Mean test loss of 796 batches: 2.664210081100464. +[ Mon Sep 12 18:13:46 2022 ] Top1: 38.95% +[ Mon Sep 12 18:13:46 2022 ] Top5: 74.01% +[ Mon Sep 12 18:13:46 2022 ] Training epoch: 15 +[ Mon Sep 12 18:14:41 2022 ] Batch(97/243) done. Loss: 0.6941 lr:0.100000 +[ Mon Sep 12 18:15:34 2022 ] Batch(197/243) done. Loss: 0.9211 lr:0.100000 +[ Mon Sep 12 18:15:58 2022 ] Eval epoch: 15 +[ Mon Sep 12 18:18:28 2022 ] Mean test loss of 796 batches: 3.5780069828033447. +[ Mon Sep 12 18:18:28 2022 ] Top1: 25.70% +[ Mon Sep 12 18:18:28 2022 ] Top5: 56.70% +[ Mon Sep 12 18:18:29 2022 ] Training epoch: 16 +[ Mon Sep 12 18:19:01 2022 ] Batch(54/243) done. Loss: 0.9974 lr:0.100000 +[ Mon Sep 12 18:19:54 2022 ] Batch(154/243) done. Loss: 0.7709 lr:0.100000 +[ Mon Sep 12 18:20:41 2022 ] Eval epoch: 16 +[ Mon Sep 12 18:23:10 2022 ] Mean test loss of 796 batches: 3.6213204860687256. +[ Mon Sep 12 18:23:10 2022 ] Top1: 29.02% +[ Mon Sep 12 18:23:11 2022 ] Top5: 64.59% +[ Mon Sep 12 18:23:11 2022 ] Training epoch: 17 +[ Mon Sep 12 18:23:20 2022 ] Batch(11/243) done. Loss: 0.6434 lr:0.100000 +[ Mon Sep 12 18:24:13 2022 ] Batch(111/243) done. Loss: 0.8069 lr:0.100000 +[ Mon Sep 12 18:25:06 2022 ] Batch(211/243) done. Loss: 0.7576 lr:0.100000 +[ Mon Sep 12 18:25:23 2022 ] Eval epoch: 17 +[ Mon Sep 12 18:27:52 2022 ] Mean test loss of 796 batches: 7.608621120452881. +[ Mon Sep 12 18:27:52 2022 ] Top1: 14.36% +[ Mon Sep 12 18:27:52 2022 ] Top5: 36.52% +[ Mon Sep 12 18:27:53 2022 ] Training epoch: 18 +[ Mon Sep 12 18:28:32 2022 ] Batch(68/243) done. Loss: 0.5996 lr:0.100000 +[ Mon Sep 12 18:29:25 2022 ] Batch(168/243) done. Loss: 0.7372 lr:0.100000 +[ Mon Sep 12 18:30:04 2022 ] Eval epoch: 18 +[ Mon Sep 12 18:32:34 2022 ] Mean test loss of 796 batches: 3.6257283687591553. +[ Mon Sep 12 18:32:34 2022 ] Top1: 32.63% +[ Mon Sep 12 18:32:35 2022 ] Top5: 68.10% +[ Mon Sep 12 18:32:35 2022 ] Training epoch: 19 +[ Mon Sep 12 18:32:52 2022 ] Batch(25/243) done. Loss: 0.7385 lr:0.100000 +[ Mon Sep 12 18:33:45 2022 ] Batch(125/243) done. Loss: 0.5734 lr:0.100000 +[ Mon Sep 12 18:34:37 2022 ] Batch(225/243) done. Loss: 0.9544 lr:0.100000 +[ Mon Sep 12 18:34:47 2022 ] Eval epoch: 19 +[ Mon Sep 12 18:37:15 2022 ] Mean test loss of 796 batches: 5.148682117462158. +[ Mon Sep 12 18:37:16 2022 ] Top1: 26.56% +[ Mon Sep 12 18:37:16 2022 ] Top5: 59.36% +[ Mon Sep 12 18:37:17 2022 ] Training epoch: 20 +[ Mon Sep 12 18:38:03 2022 ] Batch(82/243) done. Loss: 0.6392 lr:0.100000 +[ Mon Sep 12 18:38:56 2022 ] Batch(182/243) done. Loss: 0.6307 lr:0.100000 +[ Mon Sep 12 18:39:28 2022 ] Eval epoch: 20 +[ Mon Sep 12 18:41:57 2022 ] Mean test loss of 796 batches: 4.334875583648682. +[ Mon Sep 12 18:41:58 2022 ] Top1: 25.49% +[ Mon Sep 12 18:41:58 2022 ] Top5: 58.52% +[ Mon Sep 12 18:41:58 2022 ] Training epoch: 21 +[ Mon Sep 12 18:42:23 2022 ] Batch(39/243) done. Loss: 0.4598 lr:0.100000 +[ Mon Sep 12 18:43:15 2022 ] Batch(139/243) done. Loss: 0.6031 lr:0.100000 +[ Mon Sep 12 18:44:09 2022 ] Batch(239/243) done. Loss: 0.5268 lr:0.100000 +[ Mon Sep 12 18:44:10 2022 ] Eval epoch: 21 +[ Mon Sep 12 18:46:39 2022 ] Mean test loss of 796 batches: 3.004617929458618. +[ Mon Sep 12 18:46:40 2022 ] Top1: 37.08% +[ Mon Sep 12 18:46:40 2022 ] Top5: 71.58% +[ Mon Sep 12 18:46:40 2022 ] Training epoch: 22 +[ Mon Sep 12 18:47:35 2022 ] Batch(96/243) done. Loss: 0.5180 lr:0.100000 +[ Mon Sep 12 18:48:28 2022 ] Batch(196/243) done. Loss: 0.6307 lr:0.100000 +[ Mon Sep 12 18:48:53 2022 ] Eval epoch: 22 +[ Mon Sep 12 18:51:22 2022 ] Mean test loss of 796 batches: 4.371796607971191. +[ Mon Sep 12 18:51:22 2022 ] Top1: 27.89% +[ Mon Sep 12 18:51:23 2022 ] Top5: 64.40% +[ Mon Sep 12 18:51:23 2022 ] Training epoch: 23 +[ Mon Sep 12 18:51:54 2022 ] Batch(53/243) done. Loss: 0.5186 lr:0.100000 +[ Mon Sep 12 18:52:47 2022 ] Batch(153/243) done. Loss: 0.6799 lr:0.100000 +[ Mon Sep 12 18:53:34 2022 ] Eval epoch: 23 +[ Mon Sep 12 18:56:04 2022 ] Mean test loss of 796 batches: 2.7760326862335205. +[ Mon Sep 12 18:56:04 2022 ] Top1: 43.91% +[ Mon Sep 12 18:56:04 2022 ] Top5: 77.80% +[ Mon Sep 12 18:56:05 2022 ] Training epoch: 24 +[ Mon Sep 12 18:56:14 2022 ] Batch(10/243) done. Loss: 0.3992 lr:0.100000 +[ Mon Sep 12 18:57:06 2022 ] Batch(110/243) done. Loss: 0.5360 lr:0.100000 +[ Mon Sep 12 18:57:59 2022 ] Batch(210/243) done. Loss: 0.6967 lr:0.100000 +[ Mon Sep 12 18:58:16 2022 ] Eval epoch: 24 +[ Mon Sep 12 19:00:46 2022 ] Mean test loss of 796 batches: 3.683417797088623. +[ Mon Sep 12 19:00:46 2022 ] Top1: 32.41% +[ Mon Sep 12 19:00:47 2022 ] Top5: 69.33% +[ Mon Sep 12 19:00:47 2022 ] Training epoch: 25 +[ Mon Sep 12 19:01:26 2022 ] Batch(67/243) done. Loss: 0.3724 lr:0.100000 +[ Mon Sep 12 19:02:19 2022 ] Batch(167/243) done. Loss: 0.5667 lr:0.100000 +[ Mon Sep 12 19:02:59 2022 ] Eval epoch: 25 +[ Mon Sep 12 19:05:28 2022 ] Mean test loss of 796 batches: 3.7700467109680176. +[ Mon Sep 12 19:05:29 2022 ] Top1: 35.64% +[ Mon Sep 12 19:05:29 2022 ] Top5: 71.80% +[ Mon Sep 12 19:05:29 2022 ] Training epoch: 26 +[ Mon Sep 12 19:05:45 2022 ] Batch(24/243) done. Loss: 0.5268 lr:0.100000 +[ Mon Sep 12 19:06:38 2022 ] Batch(124/243) done. Loss: 0.5311 lr:0.100000 +[ Mon Sep 12 19:07:31 2022 ] Batch(224/243) done. Loss: 0.4188 lr:0.100000 +[ Mon Sep 12 19:07:41 2022 ] Eval epoch: 26 +[ Mon Sep 12 19:10:10 2022 ] Mean test loss of 796 batches: 3.242473602294922. +[ Mon Sep 12 19:10:10 2022 ] Top1: 36.40% +[ Mon Sep 12 19:10:11 2022 ] Top5: 71.52% +[ Mon Sep 12 19:10:11 2022 ] Training epoch: 27 +[ Mon Sep 12 19:10:58 2022 ] Batch(81/243) done. Loss: 0.5183 lr:0.100000 +[ Mon Sep 12 19:11:50 2022 ] Batch(181/243) done. Loss: 0.3025 lr:0.100000 +[ Mon Sep 12 19:12:23 2022 ] Eval epoch: 27 +[ Mon Sep 12 19:14:53 2022 ] Mean test loss of 796 batches: 2.796579599380493. +[ Mon Sep 12 19:14:53 2022 ] Top1: 39.60% +[ Mon Sep 12 19:14:54 2022 ] Top5: 76.10% +[ Mon Sep 12 19:14:54 2022 ] Training epoch: 28 +[ Mon Sep 12 19:15:18 2022 ] Batch(38/243) done. Loss: 0.3922 lr:0.100000 +[ Mon Sep 12 19:16:11 2022 ] Batch(138/243) done. Loss: 0.3928 lr:0.100000 +[ Mon Sep 12 19:17:04 2022 ] Batch(238/243) done. Loss: 0.4935 lr:0.100000 +[ Mon Sep 12 19:17:06 2022 ] Eval epoch: 28 +[ Mon Sep 12 19:19:35 2022 ] Mean test loss of 796 batches: 3.149681568145752. +[ Mon Sep 12 19:19:36 2022 ] Top1: 39.86% +[ Mon Sep 12 19:19:36 2022 ] Top5: 71.70% +[ Mon Sep 12 19:19:36 2022 ] Training epoch: 29 +[ Mon Sep 12 19:20:30 2022 ] Batch(95/243) done. Loss: 0.4636 lr:0.100000 +[ Mon Sep 12 19:21:23 2022 ] Batch(195/243) done. Loss: 0.4148 lr:0.100000 +[ Mon Sep 12 19:21:48 2022 ] Eval epoch: 29 +[ Mon Sep 12 19:24:18 2022 ] Mean test loss of 796 batches: 2.6393115520477295. +[ Mon Sep 12 19:24:18 2022 ] Top1: 44.02% +[ Mon Sep 12 19:24:19 2022 ] Top5: 76.26% +[ Mon Sep 12 19:24:19 2022 ] Training epoch: 30 +[ Mon Sep 12 19:24:50 2022 ] Batch(52/243) done. Loss: 0.2787 lr:0.100000 +[ Mon Sep 12 19:25:43 2022 ] Batch(152/243) done. Loss: 0.2371 lr:0.100000 +[ Mon Sep 12 19:26:31 2022 ] Eval epoch: 30 +[ Mon Sep 12 19:29:00 2022 ] Mean test loss of 796 batches: 4.372233867645264. +[ Mon Sep 12 19:29:01 2022 ] Top1: 32.23% +[ Mon Sep 12 19:29:01 2022 ] Top5: 66.81% +[ Mon Sep 12 19:29:01 2022 ] Training epoch: 31 +[ Mon Sep 12 19:29:09 2022 ] Batch(9/243) done. Loss: 0.3249 lr:0.100000 +[ Mon Sep 12 19:30:02 2022 ] Batch(109/243) done. Loss: 0.3918 lr:0.100000 +[ Mon Sep 12 19:30:55 2022 ] Batch(209/243) done. Loss: 0.4728 lr:0.100000 +[ Mon Sep 12 19:31:13 2022 ] Eval epoch: 31 +[ Mon Sep 12 19:33:42 2022 ] Mean test loss of 796 batches: 3.062802314758301. +[ Mon Sep 12 19:33:43 2022 ] Top1: 37.70% +[ Mon Sep 12 19:33:43 2022 ] Top5: 70.54% +[ Mon Sep 12 19:33:43 2022 ] Training epoch: 32 +[ Mon Sep 12 19:34:22 2022 ] Batch(66/243) done. Loss: 0.2712 lr:0.100000 +[ Mon Sep 12 19:35:14 2022 ] Batch(166/243) done. Loss: 0.3010 lr:0.100000 +[ Mon Sep 12 19:35:55 2022 ] Eval epoch: 32 +[ Mon Sep 12 19:38:24 2022 ] Mean test loss of 796 batches: 3.370488405227661. +[ Mon Sep 12 19:38:25 2022 ] Top1: 34.81% +[ Mon Sep 12 19:38:25 2022 ] Top5: 65.00% +[ Mon Sep 12 19:38:25 2022 ] Training epoch: 33 +[ Mon Sep 12 19:38:42 2022 ] Batch(23/243) done. Loss: 0.2757 lr:0.100000 +[ Mon Sep 12 19:39:35 2022 ] Batch(123/243) done. Loss: 0.4600 lr:0.100000 +[ Mon Sep 12 19:40:28 2022 ] Batch(223/243) done. Loss: 0.5677 lr:0.100000 +[ Mon Sep 12 19:40:38 2022 ] Eval epoch: 33 +[ Mon Sep 12 19:43:08 2022 ] Mean test loss of 796 batches: 16.024497985839844. +[ Mon Sep 12 19:43:08 2022 ] Top1: 6.50% +[ Mon Sep 12 19:43:09 2022 ] Top5: 26.47% +[ Mon Sep 12 19:43:09 2022 ] Training epoch: 34 +[ Mon Sep 12 19:43:56 2022 ] Batch(80/243) done. Loss: 0.6097 lr:0.100000 +[ Mon Sep 12 19:44:49 2022 ] Batch(180/243) done. Loss: 0.3325 lr:0.100000 +[ Mon Sep 12 19:45:22 2022 ] Eval epoch: 34 +[ Mon Sep 12 19:47:52 2022 ] Mean test loss of 796 batches: 3.6056463718414307. +[ Mon Sep 12 19:47:52 2022 ] Top1: 36.16% +[ Mon Sep 12 19:47:53 2022 ] Top5: 68.21% +[ Mon Sep 12 19:47:53 2022 ] Training epoch: 35 +[ Mon Sep 12 19:48:17 2022 ] Batch(37/243) done. Loss: 0.2264 lr:0.100000 +[ Mon Sep 12 19:49:10 2022 ] Batch(137/243) done. Loss: 0.4211 lr:0.100000 +[ Mon Sep 12 19:50:03 2022 ] Batch(237/243) done. Loss: 0.2841 lr:0.100000 +[ Mon Sep 12 19:50:06 2022 ] Eval epoch: 35 +[ Mon Sep 12 19:52:36 2022 ] Mean test loss of 796 batches: 2.8799333572387695. +[ Mon Sep 12 19:52:36 2022 ] Top1: 39.62% +[ Mon Sep 12 19:52:36 2022 ] Top5: 72.87% +[ Mon Sep 12 19:52:37 2022 ] Training epoch: 36 +[ Mon Sep 12 19:53:31 2022 ] Batch(94/243) done. Loss: 0.6247 lr:0.100000 +[ Mon Sep 12 19:54:24 2022 ] Batch(194/243) done. Loss: 0.4778 lr:0.100000 +[ Mon Sep 12 19:54:50 2022 ] Eval epoch: 36 +[ Mon Sep 12 19:57:19 2022 ] Mean test loss of 796 batches: 7.829808235168457. +[ Mon Sep 12 19:57:20 2022 ] Top1: 17.71% +[ Mon Sep 12 19:57:20 2022 ] Top5: 46.19% +[ Mon Sep 12 19:57:20 2022 ] Training epoch: 37 +[ Mon Sep 12 19:57:52 2022 ] Batch(51/243) done. Loss: 0.3482 lr:0.100000 +[ Mon Sep 12 19:58:45 2022 ] Batch(151/243) done. Loss: 0.4126 lr:0.100000 +[ Mon Sep 12 19:59:33 2022 ] Eval epoch: 37 +[ Mon Sep 12 20:02:03 2022 ] Mean test loss of 796 batches: 3.139714002609253. +[ Mon Sep 12 20:02:03 2022 ] Top1: 40.64% +[ Mon Sep 12 20:02:04 2022 ] Top5: 75.03% +[ Mon Sep 12 20:02:04 2022 ] Training epoch: 38 +[ Mon Sep 12 20:02:13 2022 ] Batch(8/243) done. Loss: 0.3178 lr:0.100000 +[ Mon Sep 12 20:03:06 2022 ] Batch(108/243) done. Loss: 0.4396 lr:0.100000 +[ Mon Sep 12 20:03:59 2022 ] Batch(208/243) done. Loss: 0.5314 lr:0.100000 +[ Mon Sep 12 20:04:17 2022 ] Eval epoch: 38 +[ Mon Sep 12 20:06:47 2022 ] Mean test loss of 796 batches: 3.982567310333252. +[ Mon Sep 12 20:06:47 2022 ] Top1: 34.04% +[ Mon Sep 12 20:06:47 2022 ] Top5: 65.26% +[ Mon Sep 12 20:06:48 2022 ] Training epoch: 39 +[ Mon Sep 12 20:07:26 2022 ] Batch(65/243) done. Loss: 0.4634 lr:0.100000 +[ Mon Sep 12 20:08:19 2022 ] Batch(165/243) done. Loss: 0.3818 lr:0.100000 +[ Mon Sep 12 20:09:00 2022 ] Eval epoch: 39 +[ Mon Sep 12 20:11:30 2022 ] Mean test loss of 796 batches: 16.22344970703125. +[ Mon Sep 12 20:11:30 2022 ] Top1: 8.39% +[ Mon Sep 12 20:11:30 2022 ] Top5: 24.31% +[ Mon Sep 12 20:11:31 2022 ] Training epoch: 40 +[ Mon Sep 12 20:11:47 2022 ] Batch(22/243) done. Loss: 0.1639 lr:0.100000 +[ Mon Sep 12 20:12:39 2022 ] Batch(122/243) done. Loss: 0.2482 lr:0.100000 +[ Mon Sep 12 20:13:32 2022 ] Batch(222/243) done. Loss: 0.4236 lr:0.100000 +[ Mon Sep 12 20:13:43 2022 ] Eval epoch: 40 +[ Mon Sep 12 20:16:13 2022 ] Mean test loss of 796 batches: 4.428019046783447. +[ Mon Sep 12 20:16:14 2022 ] Top1: 35.63% +[ Mon Sep 12 20:16:14 2022 ] Top5: 68.37% +[ Mon Sep 12 20:16:14 2022 ] Training epoch: 41 +[ Mon Sep 12 20:17:00 2022 ] Batch(79/243) done. Loss: 0.2300 lr:0.100000 +[ Mon Sep 12 20:17:53 2022 ] Batch(179/243) done. Loss: 0.2221 lr:0.100000 +[ Mon Sep 12 20:18:27 2022 ] Eval epoch: 41 +[ Mon Sep 12 20:20:57 2022 ] Mean test loss of 796 batches: 3.410799503326416. +[ Mon Sep 12 20:20:57 2022 ] Top1: 38.42% +[ Mon Sep 12 20:20:57 2022 ] Top5: 68.94% +[ Mon Sep 12 20:20:58 2022 ] Training epoch: 42 +[ Mon Sep 12 20:21:21 2022 ] Batch(36/243) done. Loss: 0.3772 lr:0.100000 +[ Mon Sep 12 20:22:14 2022 ] Batch(136/243) done. Loss: 0.4248 lr:0.100000 +[ Mon Sep 12 20:23:06 2022 ] Batch(236/243) done. Loss: 0.5014 lr:0.100000 +[ Mon Sep 12 20:23:10 2022 ] Eval epoch: 42 +[ Mon Sep 12 20:25:40 2022 ] Mean test loss of 796 batches: 5.514777660369873. +[ Mon Sep 12 20:25:40 2022 ] Top1: 26.86% +[ Mon Sep 12 20:25:40 2022 ] Top5: 56.38% +[ Mon Sep 12 20:25:41 2022 ] Training epoch: 43 +[ Mon Sep 12 20:26:34 2022 ] Batch(93/243) done. Loss: 0.3111 lr:0.100000 +[ Mon Sep 12 20:27:27 2022 ] Batch(193/243) done. Loss: 0.3367 lr:0.100000 +[ Mon Sep 12 20:27:53 2022 ] Eval epoch: 43 +[ Mon Sep 12 20:30:22 2022 ] Mean test loss of 796 batches: 4.449807643890381. +[ Mon Sep 12 20:30:23 2022 ] Top1: 31.61% +[ Mon Sep 12 20:30:23 2022 ] Top5: 61.38% +[ Mon Sep 12 20:30:23 2022 ] Training epoch: 44 +[ Mon Sep 12 20:30:54 2022 ] Batch(50/243) done. Loss: 0.2808 lr:0.100000 +[ Mon Sep 12 20:31:47 2022 ] Batch(150/243) done. Loss: 0.4795 lr:0.100000 +[ Mon Sep 12 20:32:35 2022 ] Eval epoch: 44 +[ Mon Sep 12 20:35:05 2022 ] Mean test loss of 796 batches: 3.5568363666534424. +[ Mon Sep 12 20:35:05 2022 ] Top1: 41.52% +[ Mon Sep 12 20:35:06 2022 ] Top5: 73.82% +[ Mon Sep 12 20:35:06 2022 ] Training epoch: 45 +[ Mon Sep 12 20:35:14 2022 ] Batch(7/243) done. Loss: 0.3210 lr:0.100000 +[ Mon Sep 12 20:36:07 2022 ] Batch(107/243) done. Loss: 0.4242 lr:0.100000 +[ Mon Sep 12 20:37:00 2022 ] Batch(207/243) done. Loss: 0.3617 lr:0.100000 +[ Mon Sep 12 20:37:18 2022 ] Eval epoch: 45 +[ Mon Sep 12 20:39:48 2022 ] Mean test loss of 796 batches: 4.839462757110596. +[ Mon Sep 12 20:39:49 2022 ] Top1: 34.52% +[ Mon Sep 12 20:39:49 2022 ] Top5: 65.73% +[ Mon Sep 12 20:39:49 2022 ] Training epoch: 46 +[ Mon Sep 12 20:40:27 2022 ] Batch(64/243) done. Loss: 0.2956 lr:0.100000 +[ Mon Sep 12 20:41:20 2022 ] Batch(164/243) done. Loss: 0.3429 lr:0.100000 +[ Mon Sep 12 20:42:02 2022 ] Eval epoch: 46 +[ Mon Sep 12 20:44:31 2022 ] Mean test loss of 796 batches: 3.5616369247436523. +[ Mon Sep 12 20:44:32 2022 ] Top1: 41.18% +[ Mon Sep 12 20:44:32 2022 ] Top5: 72.87% +[ Mon Sep 12 20:44:32 2022 ] Training epoch: 47 +[ Mon Sep 12 20:44:48 2022 ] Batch(21/243) done. Loss: 0.1968 lr:0.100000 +[ Mon Sep 12 20:45:41 2022 ] Batch(121/243) done. Loss: 0.1955 lr:0.100000 +[ Mon Sep 12 20:46:33 2022 ] Batch(221/243) done. Loss: 0.3667 lr:0.100000 +[ Mon Sep 12 20:46:45 2022 ] Eval epoch: 47 +[ Mon Sep 12 20:49:14 2022 ] Mean test loss of 796 batches: 3.485515594482422. +[ Mon Sep 12 20:49:15 2022 ] Top1: 38.04% +[ Mon Sep 12 20:49:15 2022 ] Top5: 70.00% +[ Mon Sep 12 20:49:15 2022 ] Training epoch: 48 +[ Mon Sep 12 20:50:01 2022 ] Batch(78/243) done. Loss: 0.3435 lr:0.100000 +[ Mon Sep 12 20:50:54 2022 ] Batch(178/243) done. Loss: 0.1893 lr:0.100000 +[ Mon Sep 12 20:51:28 2022 ] Eval epoch: 48 +[ Mon Sep 12 20:53:57 2022 ] Mean test loss of 796 batches: 4.158741474151611. +[ Mon Sep 12 20:53:58 2022 ] Top1: 38.58% +[ Mon Sep 12 20:53:58 2022 ] Top5: 71.56% +[ Mon Sep 12 20:53:58 2022 ] Training epoch: 49 +[ Mon Sep 12 20:54:21 2022 ] Batch(35/243) done. Loss: 0.3753 lr:0.100000 +[ Mon Sep 12 20:55:14 2022 ] Batch(135/243) done. Loss: 0.3850 lr:0.100000 +[ Mon Sep 12 20:56:07 2022 ] Batch(235/243) done. Loss: 0.3775 lr:0.100000 +[ Mon Sep 12 20:56:11 2022 ] Eval epoch: 49 +[ Mon Sep 12 20:58:41 2022 ] Mean test loss of 796 batches: 6.0918073654174805. +[ Mon Sep 12 20:58:41 2022 ] Top1: 24.65% +[ Mon Sep 12 20:58:41 2022 ] Top5: 52.45% +[ Mon Sep 12 20:58:42 2022 ] Training epoch: 50 +[ Mon Sep 12 20:59:35 2022 ] Batch(92/243) done. Loss: 0.3753 lr:0.100000 +[ Mon Sep 12 21:00:28 2022 ] Batch(192/243) done. Loss: 0.2491 lr:0.100000 +[ Mon Sep 12 21:00:54 2022 ] Eval epoch: 50 +[ Mon Sep 12 21:03:24 2022 ] Mean test loss of 796 batches: 4.761442184448242. +[ Mon Sep 12 21:03:24 2022 ] Top1: 29.23% +[ Mon Sep 12 21:03:24 2022 ] Top5: 61.77% +[ Mon Sep 12 21:03:25 2022 ] Training epoch: 51 +[ Mon Sep 12 21:03:55 2022 ] Batch(49/243) done. Loss: 0.2470 lr:0.100000 +[ Mon Sep 12 21:04:47 2022 ] Batch(149/243) done. Loss: 0.1682 lr:0.100000 +[ Mon Sep 12 21:05:37 2022 ] Eval epoch: 51 +[ Mon Sep 12 21:08:07 2022 ] Mean test loss of 796 batches: 4.20699405670166. +[ Mon Sep 12 21:08:07 2022 ] Top1: 39.81% +[ Mon Sep 12 21:08:07 2022 ] Top5: 68.85% +[ Mon Sep 12 21:08:08 2022 ] Training epoch: 52 +[ Mon Sep 12 21:08:15 2022 ] Batch(6/243) done. Loss: 0.3685 lr:0.100000 +[ Mon Sep 12 21:09:08 2022 ] Batch(106/243) done. Loss: 0.2466 lr:0.100000 +[ Mon Sep 12 21:10:01 2022 ] Batch(206/243) done. Loss: 0.3860 lr:0.100000 +[ Mon Sep 12 21:10:20 2022 ] Eval epoch: 52 +[ Mon Sep 12 21:12:49 2022 ] Mean test loss of 796 batches: 3.3168282508850098. +[ Mon Sep 12 21:12:50 2022 ] Top1: 42.73% +[ Mon Sep 12 21:12:50 2022 ] Top5: 76.45% +[ Mon Sep 12 21:12:50 2022 ] Training epoch: 53 +[ Mon Sep 12 21:13:28 2022 ] Batch(63/243) done. Loss: 0.4394 lr:0.100000 +[ Mon Sep 12 21:14:21 2022 ] Batch(163/243) done. Loss: 0.3355 lr:0.100000 +[ Mon Sep 12 21:15:03 2022 ] Eval epoch: 53 +[ Mon Sep 12 21:17:32 2022 ] Mean test loss of 796 batches: 3.2490127086639404. +[ Mon Sep 12 21:17:33 2022 ] Top1: 42.81% +[ Mon Sep 12 21:17:33 2022 ] Top5: 74.85% +[ Mon Sep 12 21:17:33 2022 ] Training epoch: 54 +[ Mon Sep 12 21:17:48 2022 ] Batch(20/243) done. Loss: 0.4547 lr:0.100000 +[ Mon Sep 12 21:18:41 2022 ] Batch(120/243) done. Loss: 0.2926 lr:0.100000 +[ Mon Sep 12 21:19:34 2022 ] Batch(220/243) done. Loss: 0.6257 lr:0.100000 +[ Mon Sep 12 21:19:46 2022 ] Eval epoch: 54 +[ Mon Sep 12 21:22:15 2022 ] Mean test loss of 796 batches: 4.4578704833984375. +[ Mon Sep 12 21:22:15 2022 ] Top1: 38.24% +[ Mon Sep 12 21:22:16 2022 ] Top5: 70.75% +[ Mon Sep 12 21:22:16 2022 ] Training epoch: 55 +[ Mon Sep 12 21:23:01 2022 ] Batch(77/243) done. Loss: 0.5628 lr:0.100000 +[ Mon Sep 12 21:23:54 2022 ] Batch(177/243) done. Loss: 0.6820 lr:0.100000 +[ Mon Sep 12 21:24:29 2022 ] Eval epoch: 55 +[ Mon Sep 12 21:26:59 2022 ] Mean test loss of 796 batches: 4.338413715362549. +[ Mon Sep 12 21:26:59 2022 ] Top1: 39.27% +[ Mon Sep 12 21:26:59 2022 ] Top5: 70.55% +[ Mon Sep 12 21:27:00 2022 ] Training epoch: 56 +[ Mon Sep 12 21:27:22 2022 ] Batch(34/243) done. Loss: 0.3782 lr:0.100000 +[ Mon Sep 12 21:28:15 2022 ] Batch(134/243) done. Loss: 0.1938 lr:0.100000 +[ Mon Sep 12 21:29:07 2022 ] Batch(234/243) done. Loss: 0.2396 lr:0.100000 +[ Mon Sep 12 21:29:12 2022 ] Eval epoch: 56 +[ Mon Sep 12 21:31:41 2022 ] Mean test loss of 796 batches: 4.485368251800537. +[ Mon Sep 12 21:31:42 2022 ] Top1: 35.11% +[ Mon Sep 12 21:31:42 2022 ] Top5: 67.80% +[ Mon Sep 12 21:31:42 2022 ] Training epoch: 57 +[ Mon Sep 12 21:32:35 2022 ] Batch(91/243) done. Loss: 0.3136 lr:0.100000 +[ Mon Sep 12 21:33:27 2022 ] Batch(191/243) done. Loss: 0.3970 lr:0.100000 +[ Mon Sep 12 21:33:55 2022 ] Eval epoch: 57 +[ Mon Sep 12 21:36:24 2022 ] Mean test loss of 796 batches: 4.196974754333496. +[ Mon Sep 12 21:36:24 2022 ] Top1: 35.71% +[ Mon Sep 12 21:36:25 2022 ] Top5: 69.00% +[ Mon Sep 12 21:36:25 2022 ] Training epoch: 58 +[ Mon Sep 12 21:36:55 2022 ] Batch(48/243) done. Loss: 0.2219 lr:0.100000 +[ Mon Sep 12 21:37:48 2022 ] Batch(148/243) done. Loss: 0.3556 lr:0.100000 +[ Mon Sep 12 21:38:38 2022 ] Eval epoch: 58 +[ Mon Sep 12 21:41:07 2022 ] Mean test loss of 796 batches: 6.65744686126709. +[ Mon Sep 12 21:41:07 2022 ] Top1: 20.99% +[ Mon Sep 12 21:41:08 2022 ] Top5: 47.98% +[ Mon Sep 12 21:41:08 2022 ] Training epoch: 59 +[ Mon Sep 12 21:41:15 2022 ] Batch(5/243) done. Loss: 0.1090 lr:0.100000 +[ Mon Sep 12 21:42:08 2022 ] Batch(105/243) done. Loss: 0.2668 lr:0.100000 +[ Mon Sep 12 21:43:01 2022 ] Batch(205/243) done. Loss: 0.6769 lr:0.100000 +[ Mon Sep 12 21:43:20 2022 ] Eval epoch: 59 +[ Mon Sep 12 21:45:50 2022 ] Mean test loss of 796 batches: 4.866194248199463. +[ Mon Sep 12 21:45:50 2022 ] Top1: 36.23% +[ Mon Sep 12 21:45:50 2022 ] Top5: 67.78% +[ Mon Sep 12 21:45:51 2022 ] Training epoch: 60 +[ Mon Sep 12 21:46:28 2022 ] Batch(62/243) done. Loss: 0.1987 lr:0.100000 +[ Mon Sep 12 21:47:20 2022 ] Batch(162/243) done. Loss: 0.3460 lr:0.100000 +[ Mon Sep 12 21:48:03 2022 ] Eval epoch: 60 +[ Mon Sep 12 21:50:33 2022 ] Mean test loss of 796 batches: 3.260272979736328. +[ Mon Sep 12 21:50:33 2022 ] Top1: 41.98% +[ Mon Sep 12 21:50:33 2022 ] Top5: 74.38% +[ Mon Sep 12 21:50:34 2022 ] Training epoch: 61 +[ Mon Sep 12 21:50:48 2022 ] Batch(19/243) done. Loss: 0.1512 lr:0.010000 +[ Mon Sep 12 21:51:41 2022 ] Batch(119/243) done. Loss: 0.1251 lr:0.010000 +[ Mon Sep 12 21:52:34 2022 ] Batch(219/243) done. Loss: 0.1873 lr:0.010000 +[ Mon Sep 12 21:52:46 2022 ] Eval epoch: 61 +[ Mon Sep 12 21:55:16 2022 ] Mean test loss of 796 batches: 2.871124029159546. +[ Mon Sep 12 21:55:16 2022 ] Top1: 49.90% +[ Mon Sep 12 21:55:17 2022 ] Top5: 80.83% +[ Mon Sep 12 21:55:17 2022 ] Training epoch: 62 +[ Mon Sep 12 21:56:01 2022 ] Batch(76/243) done. Loss: 0.0721 lr:0.010000 +[ Mon Sep 12 21:56:54 2022 ] Batch(176/243) done. Loss: 0.0414 lr:0.010000 +[ Mon Sep 12 21:57:29 2022 ] Eval epoch: 62 +[ Mon Sep 12 21:59:59 2022 ] Mean test loss of 796 batches: 2.853978157043457. +[ Mon Sep 12 21:59:59 2022 ] Top1: 50.46% +[ Mon Sep 12 22:00:00 2022 ] Top5: 81.38% +[ Mon Sep 12 22:00:00 2022 ] Training epoch: 63 +[ Mon Sep 12 22:00:22 2022 ] Batch(33/243) done. Loss: 0.0897 lr:0.010000 +[ Mon Sep 12 22:01:14 2022 ] Batch(133/243) done. Loss: 0.0934 lr:0.010000 +[ Mon Sep 12 22:02:07 2022 ] Batch(233/243) done. Loss: 0.1227 lr:0.010000 +[ Mon Sep 12 22:02:12 2022 ] Eval epoch: 63 +[ Mon Sep 12 22:04:42 2022 ] Mean test loss of 796 batches: 2.8253042697906494. +[ Mon Sep 12 22:04:42 2022 ] Top1: 51.65% +[ Mon Sep 12 22:04:43 2022 ] Top5: 82.35% +[ Mon Sep 12 22:04:43 2022 ] Training epoch: 64 +[ Mon Sep 12 22:05:35 2022 ] Batch(90/243) done. Loss: 0.0743 lr:0.010000 +[ Mon Sep 12 22:06:28 2022 ] Batch(190/243) done. Loss: 0.0875 lr:0.010000 +[ Mon Sep 12 22:06:55 2022 ] Eval epoch: 64 +[ Mon Sep 12 22:09:25 2022 ] Mean test loss of 796 batches: 2.9669079780578613. +[ Mon Sep 12 22:09:25 2022 ] Top1: 50.87% +[ Mon Sep 12 22:09:26 2022 ] Top5: 82.07% +[ Mon Sep 12 22:09:26 2022 ] Training epoch: 65 +[ Mon Sep 12 22:09:55 2022 ] Batch(47/243) done. Loss: 0.0185 lr:0.010000 +[ Mon Sep 12 22:10:48 2022 ] Batch(147/243) done. Loss: 0.0595 lr:0.010000 +[ Mon Sep 12 22:11:38 2022 ] Eval epoch: 65 +[ Mon Sep 12 22:14:08 2022 ] Mean test loss of 796 batches: 2.926304578781128. +[ Mon Sep 12 22:14:08 2022 ] Top1: 51.17% +[ Mon Sep 12 22:14:09 2022 ] Top5: 81.96% +[ Mon Sep 12 22:14:09 2022 ] Training epoch: 66 +[ Mon Sep 12 22:14:15 2022 ] Batch(4/243) done. Loss: 0.0893 lr:0.010000 +[ Mon Sep 12 22:15:08 2022 ] Batch(104/243) done. Loss: 0.0399 lr:0.010000 +[ Mon Sep 12 22:16:01 2022 ] Batch(204/243) done. Loss: 0.0083 lr:0.010000 +[ Mon Sep 12 22:16:21 2022 ] Eval epoch: 66 +[ Mon Sep 12 22:18:51 2022 ] Mean test loss of 796 batches: 3.071596622467041. +[ Mon Sep 12 22:18:52 2022 ] Top1: 51.42% +[ Mon Sep 12 22:18:52 2022 ] Top5: 82.05% +[ Mon Sep 12 22:18:52 2022 ] Training epoch: 67 +[ Mon Sep 12 22:19:29 2022 ] Batch(61/243) done. Loss: 0.0227 lr:0.010000 +[ Mon Sep 12 22:20:22 2022 ] Batch(161/243) done. Loss: 0.0420 lr:0.010000 +[ Mon Sep 12 22:21:05 2022 ] Eval epoch: 67 +[ Mon Sep 12 22:23:34 2022 ] Mean test loss of 796 batches: 3.0440354347229004. +[ Mon Sep 12 22:23:35 2022 ] Top1: 51.06% +[ Mon Sep 12 22:23:35 2022 ] Top5: 81.90% +[ Mon Sep 12 22:23:35 2022 ] Training epoch: 68 +[ Mon Sep 12 22:23:49 2022 ] Batch(18/243) done. Loss: 0.0644 lr:0.010000 +[ Mon Sep 12 22:24:42 2022 ] Batch(118/243) done. Loss: 0.0124 lr:0.010000 +[ Mon Sep 12 22:25:35 2022 ] Batch(218/243) done. Loss: 0.0345 lr:0.010000 +[ Mon Sep 12 22:25:48 2022 ] Eval epoch: 68 +[ Mon Sep 12 22:28:18 2022 ] Mean test loss of 796 batches: 3.002916097640991. +[ Mon Sep 12 22:28:18 2022 ] Top1: 51.48% +[ Mon Sep 12 22:28:19 2022 ] Top5: 82.18% +[ Mon Sep 12 22:28:19 2022 ] Training epoch: 69 +[ Mon Sep 12 22:29:03 2022 ] Batch(75/243) done. Loss: 0.0655 lr:0.010000 +[ Mon Sep 12 22:29:56 2022 ] Batch(175/243) done. Loss: 0.1207 lr:0.010000 +[ Mon Sep 12 22:30:31 2022 ] Eval epoch: 69 +[ Mon Sep 12 22:33:01 2022 ] Mean test loss of 796 batches: 3.0260205268859863. +[ Mon Sep 12 22:33:02 2022 ] Top1: 51.93% +[ Mon Sep 12 22:33:02 2022 ] Top5: 82.39% +[ Mon Sep 12 22:33:02 2022 ] Training epoch: 70 +[ Mon Sep 12 22:33:23 2022 ] Batch(32/243) done. Loss: 0.0986 lr:0.010000 +[ Mon Sep 12 22:34:16 2022 ] Batch(132/243) done. Loss: 0.0726 lr:0.010000 +[ Mon Sep 12 22:35:09 2022 ] Batch(232/243) done. Loss: 0.0555 lr:0.010000 +[ Mon Sep 12 22:35:15 2022 ] Eval epoch: 70 +[ Mon Sep 12 22:37:45 2022 ] Mean test loss of 796 batches: 2.9354891777038574. +[ Mon Sep 12 22:37:45 2022 ] Top1: 52.34% +[ Mon Sep 12 22:37:46 2022 ] Top5: 82.85% +[ Mon Sep 12 22:37:46 2022 ] Training epoch: 71 +[ Mon Sep 12 22:38:37 2022 ] Batch(89/243) done. Loss: 0.0333 lr:0.010000 +[ Mon Sep 12 22:39:30 2022 ] Batch(189/243) done. Loss: 0.0335 lr:0.010000 +[ Mon Sep 12 22:39:58 2022 ] Eval epoch: 71 +[ Mon Sep 12 22:42:29 2022 ] Mean test loss of 796 batches: 3.0900039672851562. +[ Mon Sep 12 22:42:29 2022 ] Top1: 51.37% +[ Mon Sep 12 22:42:29 2022 ] Top5: 82.26% +[ Mon Sep 12 22:42:30 2022 ] Training epoch: 72 +[ Mon Sep 12 22:42:58 2022 ] Batch(46/243) done. Loss: 0.0120 lr:0.010000 +[ Mon Sep 12 22:43:51 2022 ] Batch(146/243) done. Loss: 0.0276 lr:0.010000 +[ Mon Sep 12 22:44:42 2022 ] Eval epoch: 72 +[ Mon Sep 12 22:47:12 2022 ] Mean test loss of 796 batches: 3.181964874267578. +[ Mon Sep 12 22:47:12 2022 ] Top1: 50.43% +[ Mon Sep 12 22:47:13 2022 ] Top5: 81.50% +[ Mon Sep 12 22:47:13 2022 ] Training epoch: 73 +[ Mon Sep 12 22:47:19 2022 ] Batch(3/243) done. Loss: 0.0571 lr:0.010000 +[ Mon Sep 12 22:48:12 2022 ] Batch(103/243) done. Loss: 0.0637 lr:0.010000 +[ Mon Sep 12 22:49:04 2022 ] Batch(203/243) done. Loss: 0.0149 lr:0.010000 +[ Mon Sep 12 22:49:25 2022 ] Eval epoch: 73 +[ Mon Sep 12 22:51:55 2022 ] Mean test loss of 796 batches: 3.086237668991089. +[ Mon Sep 12 22:51:56 2022 ] Top1: 51.54% +[ Mon Sep 12 22:51:56 2022 ] Top5: 82.37% +[ Mon Sep 12 22:51:56 2022 ] Training epoch: 74 +[ Mon Sep 12 22:52:32 2022 ] Batch(60/243) done. Loss: 0.0192 lr:0.010000 +[ Mon Sep 12 22:53:25 2022 ] Batch(160/243) done. Loss: 0.0189 lr:0.010000 +[ Mon Sep 12 22:54:09 2022 ] Eval epoch: 74 +[ Mon Sep 12 22:56:39 2022 ] Mean test loss of 796 batches: 3.1431281566619873. +[ Mon Sep 12 22:56:39 2022 ] Top1: 51.25% +[ Mon Sep 12 22:56:39 2022 ] Top5: 82.38% +[ Mon Sep 12 22:56:40 2022 ] Training epoch: 75 +[ Mon Sep 12 22:56:52 2022 ] Batch(17/243) done. Loss: 0.0360 lr:0.010000 +[ Mon Sep 12 22:57:45 2022 ] Batch(117/243) done. Loss: 0.0251 lr:0.010000 +[ Mon Sep 12 22:58:38 2022 ] Batch(217/243) done. Loss: 0.0442 lr:0.010000 +[ Mon Sep 12 22:58:52 2022 ] Eval epoch: 75 +[ Mon Sep 12 23:01:21 2022 ] Mean test loss of 796 batches: 3.1644086837768555. +[ Mon Sep 12 23:01:22 2022 ] Top1: 51.29% +[ Mon Sep 12 23:01:22 2022 ] Top5: 82.37% +[ Mon Sep 12 23:01:22 2022 ] Training epoch: 76 +[ Mon Sep 12 23:02:06 2022 ] Batch(74/243) done. Loss: 0.1041 lr:0.010000 +[ Mon Sep 12 23:02:59 2022 ] Batch(174/243) done. Loss: 0.0044 lr:0.010000 +[ Mon Sep 12 23:03:35 2022 ] Eval epoch: 76 +[ Mon Sep 12 23:06:05 2022 ] Mean test loss of 796 batches: 3.1715903282165527. +[ Mon Sep 12 23:06:05 2022 ] Top1: 50.93% +[ Mon Sep 12 23:06:05 2022 ] Top5: 82.09% +[ Mon Sep 12 23:06:06 2022 ] Training epoch: 77 +[ Mon Sep 12 23:06:27 2022 ] Batch(31/243) done. Loss: 0.0590 lr:0.010000 +[ Mon Sep 12 23:07:19 2022 ] Batch(131/243) done. Loss: 0.0440 lr:0.010000 +[ Mon Sep 12 23:08:12 2022 ] Batch(231/243) done. Loss: 0.0496 lr:0.010000 +[ Mon Sep 12 23:08:18 2022 ] Eval epoch: 77 +[ Mon Sep 12 23:10:48 2022 ] Mean test loss of 796 batches: 3.0519044399261475. +[ Mon Sep 12 23:10:48 2022 ] Top1: 52.19% +[ Mon Sep 12 23:10:49 2022 ] Top5: 82.47% +[ Mon Sep 12 23:10:49 2022 ] Training epoch: 78 +[ Mon Sep 12 23:11:40 2022 ] Batch(88/243) done. Loss: 0.0440 lr:0.010000 +[ Mon Sep 12 23:12:33 2022 ] Batch(188/243) done. Loss: 0.0604 lr:0.010000 +[ Mon Sep 12 23:13:01 2022 ] Eval epoch: 78 +[ Mon Sep 12 23:15:31 2022 ] Mean test loss of 796 batches: 3.2088100910186768. +[ Mon Sep 12 23:15:31 2022 ] Top1: 51.09% +[ Mon Sep 12 23:15:32 2022 ] Top5: 81.82% +[ Mon Sep 12 23:15:32 2022 ] Training epoch: 79 +[ Mon Sep 12 23:16:00 2022 ] Batch(45/243) done. Loss: 0.0630 lr:0.010000 +[ Mon Sep 12 23:16:53 2022 ] Batch(145/243) done. Loss: 0.0380 lr:0.010000 +[ Mon Sep 12 23:17:44 2022 ] Eval epoch: 79 +[ Mon Sep 12 23:20:14 2022 ] Mean test loss of 796 batches: 3.236017942428589. +[ Mon Sep 12 23:20:14 2022 ] Top1: 51.50% +[ Mon Sep 12 23:20:14 2022 ] Top5: 82.01% +[ Mon Sep 12 23:20:15 2022 ] Training epoch: 80 +[ Mon Sep 12 23:20:20 2022 ] Batch(2/243) done. Loss: 0.0371 lr:0.010000 +[ Mon Sep 12 23:21:13 2022 ] Batch(102/243) done. Loss: 0.0451 lr:0.010000 +[ Mon Sep 12 23:22:06 2022 ] Batch(202/243) done. Loss: 0.0111 lr:0.010000 +[ Mon Sep 12 23:22:27 2022 ] Eval epoch: 80 +[ Mon Sep 12 23:24:57 2022 ] Mean test loss of 796 batches: 3.183028221130371. +[ Mon Sep 12 23:24:57 2022 ] Top1: 51.42% +[ Mon Sep 12 23:24:57 2022 ] Top5: 82.27% +[ Mon Sep 12 23:24:58 2022 ] Training epoch: 81 +[ Mon Sep 12 23:25:33 2022 ] Batch(59/243) done. Loss: 0.0484 lr:0.001000 +[ Mon Sep 12 23:26:26 2022 ] Batch(159/243) done. Loss: 0.0440 lr:0.001000 +[ Mon Sep 12 23:27:10 2022 ] Eval epoch: 81 +[ Mon Sep 12 23:29:40 2022 ] Mean test loss of 796 batches: 3.242577075958252. +[ Mon Sep 12 23:29:40 2022 ] Top1: 50.64% +[ Mon Sep 12 23:29:41 2022 ] Top5: 81.85% +[ Mon Sep 12 23:29:41 2022 ] Training epoch: 82 +[ Mon Sep 12 23:29:53 2022 ] Batch(16/243) done. Loss: 0.0190 lr:0.001000 +[ Mon Sep 12 23:30:46 2022 ] Batch(116/243) done. Loss: 0.0148 lr:0.001000 +[ Mon Sep 12 23:31:39 2022 ] Batch(216/243) done. Loss: 0.0409 lr:0.001000 +[ Mon Sep 12 23:31:53 2022 ] Eval epoch: 82 +[ Mon Sep 12 23:34:23 2022 ] Mean test loss of 796 batches: 3.20725154876709. +[ Mon Sep 12 23:34:23 2022 ] Top1: 51.45% +[ Mon Sep 12 23:34:24 2022 ] Top5: 82.32% +[ Mon Sep 12 23:34:24 2022 ] Training epoch: 83 +[ Mon Sep 12 23:35:07 2022 ] Batch(73/243) done. Loss: 0.0874 lr:0.001000 +[ Mon Sep 12 23:35:59 2022 ] Batch(173/243) done. Loss: 0.0335 lr:0.001000 +[ Mon Sep 12 23:36:36 2022 ] Eval epoch: 83 +[ Mon Sep 12 23:39:06 2022 ] Mean test loss of 796 batches: 3.1513242721557617. +[ Mon Sep 12 23:39:06 2022 ] Top1: 51.75% +[ Mon Sep 12 23:39:07 2022 ] Top5: 82.37% +[ Mon Sep 12 23:39:07 2022 ] Training epoch: 84 +[ Mon Sep 12 23:39:27 2022 ] Batch(30/243) done. Loss: 0.0181 lr:0.001000 +[ Mon Sep 12 23:40:20 2022 ] Batch(130/243) done. Loss: 0.0753 lr:0.001000 +[ Mon Sep 12 23:41:13 2022 ] Batch(230/243) done. Loss: 0.0129 lr:0.001000 +[ Mon Sep 12 23:41:20 2022 ] Eval epoch: 84 +[ Mon Sep 12 23:43:49 2022 ] Mean test loss of 796 batches: 3.1976230144500732. +[ Mon Sep 12 23:43:50 2022 ] Top1: 51.62% +[ Mon Sep 12 23:43:50 2022 ] Top5: 82.35% +[ Mon Sep 12 23:43:50 2022 ] Training epoch: 85 +[ Mon Sep 12 23:44:41 2022 ] Batch(87/243) done. Loss: 0.0158 lr:0.001000 +[ Mon Sep 12 23:45:34 2022 ] Batch(187/243) done. Loss: 0.0364 lr:0.001000 +[ Mon Sep 12 23:46:03 2022 ] Eval epoch: 85 +[ Mon Sep 12 23:48:32 2022 ] Mean test loss of 796 batches: 3.187116861343384. +[ Mon Sep 12 23:48:33 2022 ] Top1: 51.59% +[ Mon Sep 12 23:48:33 2022 ] Top5: 82.31% +[ Mon Sep 12 23:48:33 2022 ] Training epoch: 86 +[ Mon Sep 12 23:49:01 2022 ] Batch(44/243) done. Loss: 0.0126 lr:0.001000 +[ Mon Sep 12 23:49:54 2022 ] Batch(144/243) done. Loss: 0.0161 lr:0.001000 +[ Mon Sep 12 23:50:46 2022 ] Eval epoch: 86 +[ Mon Sep 12 23:53:15 2022 ] Mean test loss of 796 batches: 3.2308666706085205. +[ Mon Sep 12 23:53:16 2022 ] Top1: 51.43% +[ Mon Sep 12 23:53:16 2022 ] Top5: 82.06% +[ Mon Sep 12 23:53:16 2022 ] Training epoch: 87 +[ Mon Sep 12 23:53:21 2022 ] Batch(1/243) done. Loss: 0.0844 lr:0.001000 +[ Mon Sep 12 23:54:14 2022 ] Batch(101/243) done. Loss: 0.0450 lr:0.001000 +[ Mon Sep 12 23:55:07 2022 ] Batch(201/243) done. Loss: 0.0782 lr:0.001000 +[ Mon Sep 12 23:55:29 2022 ] Eval epoch: 87 +[ Mon Sep 12 23:57:59 2022 ] Mean test loss of 796 batches: 3.226652145385742. +[ Mon Sep 12 23:57:59 2022 ] Top1: 51.32% +[ Mon Sep 12 23:57:59 2022 ] Top5: 82.01% +[ Mon Sep 12 23:58:00 2022 ] Training epoch: 88 +[ Mon Sep 12 23:58:35 2022 ] Batch(58/243) done. Loss: 0.0215 lr:0.001000 +[ Mon Sep 12 23:59:28 2022 ] Batch(158/243) done. Loss: 0.0571 lr:0.001000 +[ Tue Sep 13 00:00:12 2022 ] Eval epoch: 88 +[ Tue Sep 13 00:02:41 2022 ] Mean test loss of 796 batches: 3.2637722492218018. +[ Tue Sep 13 00:02:42 2022 ] Top1: 50.40% +[ Tue Sep 13 00:02:42 2022 ] Top5: 81.56% +[ Tue Sep 13 00:02:42 2022 ] Training epoch: 89 +[ Tue Sep 13 00:02:55 2022 ] Batch(15/243) done. Loss: 0.0201 lr:0.001000 +[ Tue Sep 13 00:03:48 2022 ] Batch(115/243) done. Loss: 0.0867 lr:0.001000 +[ Tue Sep 13 00:04:40 2022 ] Batch(215/243) done. Loss: 0.0216 lr:0.001000 +[ Tue Sep 13 00:04:55 2022 ] Eval epoch: 89 +[ Tue Sep 13 00:07:24 2022 ] Mean test loss of 796 batches: 3.2266507148742676. +[ Tue Sep 13 00:07:24 2022 ] Top1: 50.92% +[ Tue Sep 13 00:07:25 2022 ] Top5: 81.87% +[ Tue Sep 13 00:07:25 2022 ] Training epoch: 90 +[ Tue Sep 13 00:08:08 2022 ] Batch(72/243) done. Loss: 0.0154 lr:0.001000 +[ Tue Sep 13 00:09:01 2022 ] Batch(172/243) done. Loss: 0.0784 lr:0.001000 +[ Tue Sep 13 00:09:38 2022 ] Eval epoch: 90 +[ Tue Sep 13 00:12:07 2022 ] Mean test loss of 796 batches: 3.225825071334839. +[ Tue Sep 13 00:12:08 2022 ] Top1: 51.16% +[ Tue Sep 13 00:12:08 2022 ] Top5: 81.81% +[ Tue Sep 13 00:12:08 2022 ] Training epoch: 91 +[ Tue Sep 13 00:12:28 2022 ] Batch(29/243) done. Loss: 0.0316 lr:0.001000 +[ Tue Sep 13 00:13:21 2022 ] Batch(129/243) done. Loss: 0.0128 lr:0.001000 +[ Tue Sep 13 00:14:14 2022 ] Batch(229/243) done. Loss: 0.0234 lr:0.001000 +[ Tue Sep 13 00:14:21 2022 ] Eval epoch: 91 +[ Tue Sep 13 00:16:51 2022 ] Mean test loss of 796 batches: 3.2642405033111572. +[ Tue Sep 13 00:16:51 2022 ] Top1: 50.66% +[ Tue Sep 13 00:16:52 2022 ] Top5: 81.73% +[ Tue Sep 13 00:16:52 2022 ] Training epoch: 92 +[ Tue Sep 13 00:17:42 2022 ] Batch(86/243) done. Loss: 0.0553 lr:0.001000 +[ Tue Sep 13 00:18:34 2022 ] Batch(186/243) done. Loss: 0.0227 lr:0.001000 +[ Tue Sep 13 00:19:04 2022 ] Eval epoch: 92 +[ Tue Sep 13 00:21:34 2022 ] Mean test loss of 796 batches: 3.32381010055542. +[ Tue Sep 13 00:21:34 2022 ] Top1: 49.58% +[ Tue Sep 13 00:21:35 2022 ] Top5: 81.17% +[ Tue Sep 13 00:21:35 2022 ] Training epoch: 93 +[ Tue Sep 13 00:22:02 2022 ] Batch(43/243) done. Loss: 0.0260 lr:0.001000 +[ Tue Sep 13 00:22:55 2022 ] Batch(143/243) done. Loss: 0.0413 lr:0.001000 +[ Tue Sep 13 00:23:47 2022 ] Eval epoch: 93 +[ Tue Sep 13 00:26:17 2022 ] Mean test loss of 796 batches: 3.227717638015747. +[ Tue Sep 13 00:26:17 2022 ] Top1: 51.69% +[ Tue Sep 13 00:26:18 2022 ] Top5: 82.23% +[ Tue Sep 13 00:26:18 2022 ] Training epoch: 94 +[ Tue Sep 13 00:26:22 2022 ] Batch(0/243) done. Loss: 0.0988 lr:0.001000 +[ Tue Sep 13 00:27:15 2022 ] Batch(100/243) done. Loss: 0.0238 lr:0.001000 +[ Tue Sep 13 00:28:08 2022 ] Batch(200/243) done. Loss: 0.0388 lr:0.001000 +[ Tue Sep 13 00:28:31 2022 ] Eval epoch: 94 +[ Tue Sep 13 00:31:00 2022 ] Mean test loss of 796 batches: 3.270937204360962. +[ Tue Sep 13 00:31:01 2022 ] Top1: 51.21% +[ Tue Sep 13 00:31:01 2022 ] Top5: 81.94% +[ Tue Sep 13 00:31:01 2022 ] Training epoch: 95 +[ Tue Sep 13 00:31:35 2022 ] Batch(57/243) done. Loss: 0.2001 lr:0.001000 +[ Tue Sep 13 00:32:28 2022 ] Batch(157/243) done. Loss: 0.0202 lr:0.001000 +[ Tue Sep 13 00:33:13 2022 ] Eval epoch: 95 +[ Tue Sep 13 00:35:43 2022 ] Mean test loss of 796 batches: 3.2287380695343018. +[ Tue Sep 13 00:35:44 2022 ] Top1: 50.79% +[ Tue Sep 13 00:35:44 2022 ] Top5: 81.82% +[ Tue Sep 13 00:35:44 2022 ] Training epoch: 96 +[ Tue Sep 13 00:35:56 2022 ] Batch(14/243) done. Loss: 0.0420 lr:0.001000 +[ Tue Sep 13 00:36:49 2022 ] Batch(114/243) done. Loss: 0.0295 lr:0.001000 +[ Tue Sep 13 00:37:42 2022 ] Batch(214/243) done. Loss: 0.0117 lr:0.001000 +[ Tue Sep 13 00:37:57 2022 ] Eval epoch: 96 +[ Tue Sep 13 00:40:26 2022 ] Mean test loss of 796 batches: 3.366297721862793. +[ Tue Sep 13 00:40:27 2022 ] Top1: 49.64% +[ Tue Sep 13 00:40:27 2022 ] Top5: 81.07% +[ Tue Sep 13 00:40:27 2022 ] Training epoch: 97 +[ Tue Sep 13 00:41:09 2022 ] Batch(71/243) done. Loss: 0.0625 lr:0.001000 +[ Tue Sep 13 00:42:02 2022 ] Batch(171/243) done. Loss: 0.0306 lr:0.001000 +[ Tue Sep 13 00:42:40 2022 ] Eval epoch: 97 +[ Tue Sep 13 00:45:10 2022 ] Mean test loss of 796 batches: 3.252760887145996. +[ Tue Sep 13 00:45:11 2022 ] Top1: 50.98% +[ Tue Sep 13 00:45:11 2022 ] Top5: 82.04% +[ Tue Sep 13 00:45:11 2022 ] Training epoch: 98 +[ Tue Sep 13 00:45:30 2022 ] Batch(28/243) done. Loss: 0.0788 lr:0.001000 +[ Tue Sep 13 00:46:23 2022 ] Batch(128/243) done. Loss: 0.0487 lr:0.001000 +[ Tue Sep 13 00:47:16 2022 ] Batch(228/243) done. Loss: 0.0122 lr:0.001000 +[ Tue Sep 13 00:47:24 2022 ] Eval epoch: 98 +[ Tue Sep 13 00:49:54 2022 ] Mean test loss of 796 batches: 3.223783016204834. +[ Tue Sep 13 00:49:54 2022 ] Top1: 51.54% +[ Tue Sep 13 00:49:54 2022 ] Top5: 82.20% +[ Tue Sep 13 00:49:55 2022 ] Training epoch: 99 +[ Tue Sep 13 00:50:44 2022 ] Batch(85/243) done. Loss: 0.1317 lr:0.001000 +[ Tue Sep 13 00:51:37 2022 ] Batch(185/243) done. Loss: 0.1158 lr:0.001000 +[ Tue Sep 13 00:52:07 2022 ] Eval epoch: 99 +[ Tue Sep 13 00:54:37 2022 ] Mean test loss of 796 batches: 3.285264015197754. +[ Tue Sep 13 00:54:38 2022 ] Top1: 50.84% +[ Tue Sep 13 00:54:38 2022 ] Top5: 81.73% +[ Tue Sep 13 00:54:38 2022 ] Training epoch: 100 +[ Tue Sep 13 00:55:05 2022 ] Batch(42/243) done. Loss: 0.0879 lr:0.001000 +[ Tue Sep 13 00:55:57 2022 ] Batch(142/243) done. Loss: 0.0337 lr:0.001000 +[ Tue Sep 13 00:56:50 2022 ] Batch(242/243) done. Loss: 0.0754 lr:0.001000 +[ Tue Sep 13 00:56:51 2022 ] Eval epoch: 100 +[ Tue Sep 13 00:59:21 2022 ] Mean test loss of 796 batches: 3.213009834289551. +[ Tue Sep 13 00:59:21 2022 ] Top1: 51.19% +[ Tue Sep 13 00:59:22 2022 ] Top5: 82.15% diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_xsub/config.yaml b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_xsub/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fdd94cd54e51611121f7b8b0d0aa75bbc5e63420 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_xsub/config.yaml @@ -0,0 +1,61 @@ +Experiment_name: ntu120_bone_xsub +base_lr: 0.1 +batch_size: 64 +config: ./config/ntu120_xsub/train_bone.yaml +device: +- 0 +- 1 +- 2 +- 3 +eval_interval: 5 +feeder: feeders.feeder.Feeder +groups: 8 +ignore_weights: [] +keep_rate: 0.9 +log_interval: 100 +model: model.decouple_gcn.Model +model_args: + block_size: 41 + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + groups: 16 + num_class: 120 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu120_bone_xsub +nesterov: true +num_epoch: 100 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_data_bone.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_data_bone.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu120_bone_xsub diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_xsub/decouple_gcn.py b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_xsub/decouple_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..6dcce4552ced280fe5b2060df92daebd2452cf7c --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_xsub/decouple_gcn.py @@ -0,0 +1,235 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math +from model.dropSke import DropBlock_Ske +from model.dropT import DropBlockT_1d + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + + +def conv_branch_init(conv): + weight = conv.weight + n = weight.size(0) + k1 = weight.size(1) + k2 = weight.size(2) + nn.init.normal(weight, 0, math.sqrt(2. / (n * k1 * k2))) + nn.init.constant(conv.bias, 0) + + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class unit_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1, num_point=25, block_size=41): + super(unit_tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + self.dropS = DropBlock_Ske(num_point=num_point) + self.dropT = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob, A): + x = self.bn(self.conv(x)) + x = self.dropT(self.dropS(x, keep_prob, A), keep_prob) + return x + + +class unit_tcn_skip(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(unit_tcn_skip, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class unit_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, coff_embedding=4, num_subset=3): + super(unit_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.num_point = num_point + self.groups = groups + self.num_subset = num_subset + self.DecoupleA = nn.Parameter(torch.tensor(np.reshape(A.astype(np.float32), [ + 3, 1, num_point, num_point]), dtype=torch.float32, requires_grad=True).repeat(1, groups, 1, 1), requires_grad=True) + + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.bn0 = nn.BatchNorm2d(out_channels * num_subset) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + bn_init(self.bn, 1e-6) + + self.Linear_weight = nn.Parameter(torch.zeros( + in_channels, out_channels * num_subset, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0, math.sqrt( + 0.5 / (out_channels * num_subset))) + + self.Linear_bias = nn.Parameter(torch.zeros( + 1, out_channels * num_subset, 1, 1, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.constant(self.Linear_bias, 1e-6) + + eye_array = [] + for i in range(out_channels): + eye_array.append(torch.eye(num_point)) + self.eyes = nn.Parameter(torch.tensor(torch.stack( + eye_array), requires_grad=False, device='cuda'), requires_grad=False) # [c,25,25] + + def norm(self, A): + b, c, h, w = A.size() + A = A.view(c, self.num_point, self.num_point) + D_list = torch.sum(A, 1).view(c, 1, self.num_point) + D_list_12 = (D_list + 0.001)**(-1) + D_12 = self.eyes * D_list_12 + A = torch.bmm(A, D_12).view(b, c, h, w) + return A + + def forward(self, x0): + learn_A = self.DecoupleA.repeat( + 1, self.out_channels // self.groups, 1, 1) + norm_learn_A = torch.cat([self.norm(learn_A[0:1, ...]), self.norm( + learn_A[1:2, ...]), self.norm(learn_A[2:3, ...])], 0) + + x = torch.einsum( + 'nctw,cd->ndtw', (x0, self.Linear_weight)).contiguous() + x = x + self.Linear_bias + x = self.bn0(x) + + n, kc, t, v = x.size() + x = x.view(n, self.num_subset, kc // self.num_subset, t, v) + x = torch.einsum('nkctv,kcvw->nctw', (x, norm_learn_A)) + + x = self.bn(x) + x += self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, block_size, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = unit_gcn(in_channels, out_channels, A, groups, num_point) + self.tcn1 = unit_tcn(out_channels, out_channels, + stride=stride, num_point=num_point) + self.relu = nn.ReLU() + + self.A = nn.Parameter(torch.tensor(np.sum(np.reshape(A.astype(np.float32), [ + 3, num_point, num_point]), axis=0), dtype=torch.float32, requires_grad=False, device='cuda'), requires_grad=False) + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + + else: + self.residual = unit_tcn_skip( + in_channels, out_channels, kernel_size=1, stride=stride) + self.dropSke = DropBlock_Ske(num_point=num_point) + self.dropT_skip = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob): + x = self.tcn1(self.gcn1(x), keep_prob, self.A) + self.dropT_skip( + self.dropSke(self.residual(x), keep_prob, self.A), keep_prob) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, groups, num_point, + block_size, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l5 = TCN_GCN_unit( + 64, 128, A, groups, num_point, block_size, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l8 = TCN_GCN_unit(128, 256, A, groups, + num_point, block_size, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x, keep_prob=0.9): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute( + 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x, 1.0) + x = self.l2(x, 1.0) + x = self.l3(x, 1.0) + x = self.l4(x, 1.0) + x = self.l5(x, 1.0) + x = self.l6(x, 1.0) + x = self.l7(x, keep_prob) + x = self.l8(x, keep_prob) + x = self.l9(x, keep_prob) + x = self.l10(x, keep_prob) + + # N*M,C,T,V + c_new = x.size(1) + x = x.reshape(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_xsub/eval_results/best_acc.pkl b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_xsub/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..050cd3704c0b4b736b50a16284a3d9ffa3362701 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_xsub/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:403481d5eb96956925c0876602005017491fd110130aac32242ef00938ab8bb9 +size 29946137 diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_xsub/log.txt b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_xsub/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..cb1e41ddb2ae99d158ec330d762de441d9db1891 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_bone_xsub/log.txt @@ -0,0 +1,757 @@ +[ Mon Sep 12 17:07:56 2022 ] Parameters: +{'work_dir': './work_dir/ntu120_bone_xsub', 'model_saved_name': './save_models/ntu120_bone_xsub', 'Experiment_name': 'ntu120_bone_xsub', 'config': './config/ntu120_xsub/train_bone.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_data_bone.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_data_bone.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_label.pkl'}, 'model': 'model.decouple_gcn.Model', 'model_args': {'num_class': 120, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'groups': 16, 'block_size': 41, 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80], 'device': [0, 1], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 100, 'weight_decay': 0.0001, 'keep_rate': 0.9, 'groups': 8, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Mon Sep 12 17:07:56 2022 ] Training epoch: 1 +[ Mon Sep 12 17:08:46 2022 ] Batch(99/243) done. Loss: 3.9044 lr:0.100000 +[ Mon Sep 12 17:09:31 2022 ] Batch(199/243) done. Loss: 3.1255 lr:0.100000 +[ Mon Sep 12 17:09:50 2022 ] Eval epoch: 1 +[ Mon Sep 12 17:12:22 2022 ] Mean test loss of 796 batches: 5.347392559051514. +[ Mon Sep 12 17:12:22 2022 ] Top1: 3.80% +[ Mon Sep 12 17:12:23 2022 ] Top5: 15.10% +[ Mon Sep 12 17:12:23 2022 ] Training epoch: 2 +[ Tue Sep 13 10:03:02 2022 ] Parameters: +{'work_dir': './work_dir/ntu120_bone_xsub', 'model_saved_name': './save_models/ntu120_bone_xsub', 'Experiment_name': 'ntu120_bone_xsub', 'config': './config/ntu120_xsub/train_bone.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_data_bone.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_data_bone.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_label.pkl'}, 'model': 'model.decouple_gcn.Model', 'model_args': {'num_class': 120, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'groups': 16, 'block_size': 41, 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80], 'device': [0, 1, 2, 3], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 100, 'weight_decay': 0.0001, 'keep_rate': 0.9, 'groups': 8, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Tue Sep 13 10:03:02 2022 ] Training epoch: 1 +[ Tue Sep 13 10:03:36 2022 ] Batch(99/243) done. Loss: 3.9577 lr:0.100000 +[ Tue Sep 13 10:04:01 2022 ] Batch(199/243) done. Loss: 3.2154 lr:0.100000 +[ Tue Sep 13 10:04:11 2022 ] Eval epoch: 1 +[ Tue Sep 13 10:05:41 2022 ] Mean test loss of 796 batches: 5.444550514221191. +[ Tue Sep 13 10:05:42 2022 ] Top1: 4.22% +[ Tue Sep 13 10:05:42 2022 ] Top5: 15.97% +[ Tue Sep 13 10:05:42 2022 ] Training epoch: 2 +[ Tue Sep 13 10:06:02 2022 ] Batch(56/243) done. Loss: 3.1889 lr:0.100000 +[ Tue Sep 13 10:06:32 2022 ] Batch(156/243) done. Loss: 2.5144 lr:0.100000 +[ Tue Sep 13 10:06:58 2022 ] Eval epoch: 2 +[ Tue Sep 13 10:08:29 2022 ] Mean test loss of 796 batches: 5.415019989013672. +[ Tue Sep 13 10:08:30 2022 ] Top1: 7.58% +[ Tue Sep 13 10:08:30 2022 ] Top5: 23.15% +[ Tue Sep 13 10:08:30 2022 ] Training epoch: 3 +[ Tue Sep 13 10:08:37 2022 ] Batch(13/243) done. Loss: 2.4849 lr:0.100000 +[ Tue Sep 13 10:09:07 2022 ] Batch(113/243) done. Loss: 2.2850 lr:0.100000 +[ Tue Sep 13 10:09:37 2022 ] Batch(213/243) done. Loss: 2.3303 lr:0.100000 +[ Tue Sep 13 10:09:46 2022 ] Eval epoch: 3 +[ Tue Sep 13 10:11:17 2022 ] Mean test loss of 796 batches: 3.962733030319214. +[ Tue Sep 13 10:11:17 2022 ] Top1: 11.96% +[ Tue Sep 13 10:11:17 2022 ] Top5: 33.16% +[ Tue Sep 13 10:11:18 2022 ] Training epoch: 4 +[ Tue Sep 13 10:11:42 2022 ] Batch(70/243) done. Loss: 1.9193 lr:0.100000 +[ Tue Sep 13 10:12:12 2022 ] Batch(170/243) done. Loss: 2.0948 lr:0.100000 +[ Tue Sep 13 10:12:33 2022 ] Eval epoch: 4 +[ Tue Sep 13 10:14:04 2022 ] Mean test loss of 796 batches: 3.8267576694488525. +[ Tue Sep 13 10:14:04 2022 ] Top1: 17.78% +[ Tue Sep 13 10:14:04 2022 ] Top5: 40.34% +[ Tue Sep 13 10:14:05 2022 ] Training epoch: 5 +[ Tue Sep 13 10:14:16 2022 ] Batch(27/243) done. Loss: 1.8219 lr:0.100000 +[ Tue Sep 13 10:14:46 2022 ] Batch(127/243) done. Loss: 2.0562 lr:0.100000 +[ Tue Sep 13 10:15:16 2022 ] Batch(227/243) done. Loss: 1.7730 lr:0.100000 +[ Tue Sep 13 10:15:21 2022 ] Eval epoch: 5 +[ Tue Sep 13 10:16:50 2022 ] Mean test loss of 796 batches: 3.385392427444458. +[ Tue Sep 13 10:16:51 2022 ] Top1: 19.85% +[ Tue Sep 13 10:16:51 2022 ] Top5: 45.67% +[ Tue Sep 13 10:16:51 2022 ] Training epoch: 6 +[ Tue Sep 13 10:17:20 2022 ] Batch(84/243) done. Loss: 1.7124 lr:0.100000 +[ Tue Sep 13 10:17:50 2022 ] Batch(184/243) done. Loss: 1.7753 lr:0.100000 +[ Tue Sep 13 10:18:07 2022 ] Eval epoch: 6 +[ Tue Sep 13 10:19:37 2022 ] Mean test loss of 796 batches: 3.1966803073883057. +[ Tue Sep 13 10:19:38 2022 ] Top1: 22.52% +[ Tue Sep 13 10:19:38 2022 ] Top5: 51.96% +[ Tue Sep 13 10:19:38 2022 ] Training epoch: 7 +[ Tue Sep 13 10:19:54 2022 ] Batch(41/243) done. Loss: 1.6634 lr:0.100000 +[ Tue Sep 13 10:20:24 2022 ] Batch(141/243) done. Loss: 1.2232 lr:0.100000 +[ Tue Sep 13 10:20:54 2022 ] Batch(241/243) done. Loss: 1.4194 lr:0.100000 +[ Tue Sep 13 10:20:55 2022 ] Eval epoch: 7 +[ Tue Sep 13 10:22:25 2022 ] Mean test loss of 796 batches: 3.186509132385254. +[ Tue Sep 13 10:22:25 2022 ] Top1: 25.69% +[ Tue Sep 13 10:22:26 2022 ] Top5: 58.77% +[ Tue Sep 13 10:22:26 2022 ] Training epoch: 8 +[ Tue Sep 13 10:22:59 2022 ] Batch(98/243) done. Loss: 1.3116 lr:0.100000 +[ Tue Sep 13 10:23:29 2022 ] Batch(198/243) done. Loss: 1.3068 lr:0.100000 +[ Tue Sep 13 10:23:43 2022 ] Eval epoch: 8 +[ Tue Sep 13 10:25:13 2022 ] Mean test loss of 796 batches: 2.9324722290039062. +[ Tue Sep 13 10:25:13 2022 ] Top1: 27.67% +[ Tue Sep 13 10:25:14 2022 ] Top5: 59.88% +[ Tue Sep 13 10:25:14 2022 ] Training epoch: 9 +[ Tue Sep 13 10:25:34 2022 ] Batch(55/243) done. Loss: 1.2367 lr:0.100000 +[ Tue Sep 13 10:26:05 2022 ] Batch(155/243) done. Loss: 1.4505 lr:0.100000 +[ Tue Sep 13 10:26:31 2022 ] Eval epoch: 9 +[ Tue Sep 13 10:28:01 2022 ] Mean test loss of 796 batches: 2.934443712234497. +[ Tue Sep 13 10:28:02 2022 ] Top1: 30.04% +[ Tue Sep 13 10:28:02 2022 ] Top5: 61.77% +[ Tue Sep 13 10:28:02 2022 ] Training epoch: 10 +[ Tue Sep 13 10:28:09 2022 ] Batch(12/243) done. Loss: 1.4612 lr:0.100000 +[ Tue Sep 13 10:28:39 2022 ] Batch(112/243) done. Loss: 0.9946 lr:0.100000 +[ Tue Sep 13 10:29:09 2022 ] Batch(212/243) done. Loss: 1.2065 lr:0.100000 +[ Tue Sep 13 10:29:18 2022 ] Eval epoch: 10 +[ Tue Sep 13 10:30:49 2022 ] Mean test loss of 796 batches: 2.912278652191162. +[ Tue Sep 13 10:30:49 2022 ] Top1: 29.90% +[ Tue Sep 13 10:30:49 2022 ] Top5: 61.19% +[ Tue Sep 13 10:30:50 2022 ] Training epoch: 11 +[ Tue Sep 13 10:31:14 2022 ] Batch(69/243) done. Loss: 0.7080 lr:0.100000 +[ Tue Sep 13 10:31:44 2022 ] Batch(169/243) done. Loss: 1.0251 lr:0.100000 +[ Tue Sep 13 10:32:06 2022 ] Eval epoch: 11 +[ Tue Sep 13 10:33:37 2022 ] Mean test loss of 796 batches: 3.0440070629119873. +[ Tue Sep 13 10:33:37 2022 ] Top1: 29.37% +[ Tue Sep 13 10:33:38 2022 ] Top5: 62.99% +[ Tue Sep 13 10:33:38 2022 ] Training epoch: 12 +[ Tue Sep 13 10:33:49 2022 ] Batch(26/243) done. Loss: 0.8521 lr:0.100000 +[ Tue Sep 13 10:34:19 2022 ] Batch(126/243) done. Loss: 1.3272 lr:0.100000 +[ Tue Sep 13 10:34:49 2022 ] Batch(226/243) done. Loss: 1.0632 lr:0.100000 +[ Tue Sep 13 10:34:54 2022 ] Eval epoch: 12 +[ Tue Sep 13 10:36:24 2022 ] Mean test loss of 796 batches: 3.03654146194458. +[ Tue Sep 13 10:36:25 2022 ] Top1: 32.36% +[ Tue Sep 13 10:36:25 2022 ] Top5: 65.46% +[ Tue Sep 13 10:36:25 2022 ] Training epoch: 13 +[ Tue Sep 13 10:36:53 2022 ] Batch(83/243) done. Loss: 1.0905 lr:0.100000 +[ Tue Sep 13 10:37:23 2022 ] Batch(183/243) done. Loss: 1.0714 lr:0.100000 +[ Tue Sep 13 10:37:41 2022 ] Eval epoch: 13 +[ Tue Sep 13 10:39:11 2022 ] Mean test loss of 796 batches: 2.899965524673462. +[ Tue Sep 13 10:39:12 2022 ] Top1: 33.87% +[ Tue Sep 13 10:39:12 2022 ] Top5: 68.04% +[ Tue Sep 13 10:39:12 2022 ] Training epoch: 14 +[ Tue Sep 13 10:39:27 2022 ] Batch(40/243) done. Loss: 1.0002 lr:0.100000 +[ Tue Sep 13 10:39:57 2022 ] Batch(140/243) done. Loss: 0.8327 lr:0.100000 +[ Tue Sep 13 10:40:27 2022 ] Batch(240/243) done. Loss: 1.0224 lr:0.100000 +[ Tue Sep 13 10:40:27 2022 ] Eval epoch: 14 +[ Tue Sep 13 10:41:58 2022 ] Mean test loss of 796 batches: 2.6744863986968994. +[ Tue Sep 13 10:41:58 2022 ] Top1: 36.46% +[ Tue Sep 13 10:41:58 2022 ] Top5: 71.09% +[ Tue Sep 13 10:41:59 2022 ] Training epoch: 15 +[ Tue Sep 13 10:42:31 2022 ] Batch(97/243) done. Loss: 0.7399 lr:0.100000 +[ Tue Sep 13 10:43:01 2022 ] Batch(197/243) done. Loss: 1.1364 lr:0.100000 +[ Tue Sep 13 10:43:15 2022 ] Eval epoch: 15 +[ Tue Sep 13 10:44:45 2022 ] Mean test loss of 796 batches: 2.4184412956237793. +[ Tue Sep 13 10:44:45 2022 ] Top1: 39.46% +[ Tue Sep 13 10:44:46 2022 ] Top5: 74.47% +[ Tue Sep 13 10:44:46 2022 ] Training epoch: 16 +[ Tue Sep 13 10:45:05 2022 ] Batch(54/243) done. Loss: 1.0675 lr:0.100000 +[ Tue Sep 13 10:45:35 2022 ] Batch(154/243) done. Loss: 0.7498 lr:0.100000 +[ Tue Sep 13 10:46:02 2022 ] Eval epoch: 16 +[ Tue Sep 13 10:47:32 2022 ] Mean test loss of 796 batches: 2.5964293479919434. +[ Tue Sep 13 10:47:32 2022 ] Top1: 38.40% +[ Tue Sep 13 10:47:33 2022 ] Top5: 72.80% +[ Tue Sep 13 10:47:33 2022 ] Training epoch: 17 +[ Tue Sep 13 10:47:40 2022 ] Batch(11/243) done. Loss: 0.7426 lr:0.100000 +[ Tue Sep 13 10:48:10 2022 ] Batch(111/243) done. Loss: 0.9214 lr:0.100000 +[ Tue Sep 13 10:48:40 2022 ] Batch(211/243) done. Loss: 0.9561 lr:0.100000 +[ Tue Sep 13 10:48:49 2022 ] Eval epoch: 17 +[ Tue Sep 13 10:50:20 2022 ] Mean test loss of 796 batches: 2.5357229709625244. +[ Tue Sep 13 10:50:20 2022 ] Top1: 40.98% +[ Tue Sep 13 10:50:21 2022 ] Top5: 75.79% +[ Tue Sep 13 10:50:21 2022 ] Training epoch: 18 +[ Tue Sep 13 10:50:45 2022 ] Batch(68/243) done. Loss: 0.6866 lr:0.100000 +[ Tue Sep 13 10:51:15 2022 ] Batch(168/243) done. Loss: 0.7612 lr:0.100000 +[ Tue Sep 13 10:51:37 2022 ] Eval epoch: 18 +[ Tue Sep 13 10:53:07 2022 ] Mean test loss of 796 batches: 2.3321893215179443. +[ Tue Sep 13 10:53:08 2022 ] Top1: 39.66% +[ Tue Sep 13 10:53:08 2022 ] Top5: 76.27% +[ Tue Sep 13 10:53:08 2022 ] Training epoch: 19 +[ Tue Sep 13 10:53:19 2022 ] Batch(25/243) done. Loss: 0.7873 lr:0.100000 +[ Tue Sep 13 10:53:49 2022 ] Batch(125/243) done. Loss: 0.6658 lr:0.100000 +[ Tue Sep 13 10:54:19 2022 ] Batch(225/243) done. Loss: 0.8716 lr:0.100000 +[ Tue Sep 13 10:54:25 2022 ] Eval epoch: 19 +[ Tue Sep 13 10:55:54 2022 ] Mean test loss of 796 batches: 2.633082628250122. +[ Tue Sep 13 10:55:55 2022 ] Top1: 38.99% +[ Tue Sep 13 10:55:55 2022 ] Top5: 74.64% +[ Tue Sep 13 10:55:55 2022 ] Training epoch: 20 +[ Tue Sep 13 10:56:24 2022 ] Batch(82/243) done. Loss: 0.7991 lr:0.100000 +[ Tue Sep 13 10:56:54 2022 ] Batch(182/243) done. Loss: 0.7026 lr:0.100000 +[ Tue Sep 13 10:57:12 2022 ] Eval epoch: 20 +[ Tue Sep 13 10:58:42 2022 ] Mean test loss of 796 batches: 2.5945448875427246. +[ Tue Sep 13 10:58:42 2022 ] Top1: 41.06% +[ Tue Sep 13 10:58:43 2022 ] Top5: 77.05% +[ Tue Sep 13 10:58:43 2022 ] Training epoch: 21 +[ Tue Sep 13 10:58:58 2022 ] Batch(39/243) done. Loss: 0.6007 lr:0.100000 +[ Tue Sep 13 10:59:28 2022 ] Batch(139/243) done. Loss: 0.8379 lr:0.100000 +[ Tue Sep 13 10:59:58 2022 ] Batch(239/243) done. Loss: 0.6651 lr:0.100000 +[ Tue Sep 13 10:59:59 2022 ] Eval epoch: 21 +[ Tue Sep 13 11:01:29 2022 ] Mean test loss of 796 batches: 2.3643059730529785. +[ Tue Sep 13 11:01:30 2022 ] Top1: 44.31% +[ Tue Sep 13 11:01:30 2022 ] Top5: 77.77% +[ Tue Sep 13 11:01:30 2022 ] Training epoch: 22 +[ Tue Sep 13 11:02:03 2022 ] Batch(96/243) done. Loss: 0.7236 lr:0.100000 +[ Tue Sep 13 11:02:33 2022 ] Batch(196/243) done. Loss: 0.6861 lr:0.100000 +[ Tue Sep 13 11:02:47 2022 ] Eval epoch: 22 +[ Tue Sep 13 11:04:17 2022 ] Mean test loss of 796 batches: 2.5485894680023193. +[ Tue Sep 13 11:04:17 2022 ] Top1: 41.21% +[ Tue Sep 13 11:04:18 2022 ] Top5: 76.45% +[ Tue Sep 13 11:04:18 2022 ] Training epoch: 23 +[ Tue Sep 13 11:04:37 2022 ] Batch(53/243) done. Loss: 0.4884 lr:0.100000 +[ Tue Sep 13 11:05:07 2022 ] Batch(153/243) done. Loss: 0.6766 lr:0.100000 +[ Tue Sep 13 11:05:34 2022 ] Eval epoch: 23 +[ Tue Sep 13 11:07:04 2022 ] Mean test loss of 796 batches: 2.6978600025177. +[ Tue Sep 13 11:07:04 2022 ] Top1: 41.24% +[ Tue Sep 13 11:07:05 2022 ] Top5: 77.83% +[ Tue Sep 13 11:07:05 2022 ] Training epoch: 24 +[ Tue Sep 13 11:07:11 2022 ] Batch(10/243) done. Loss: 0.4772 lr:0.100000 +[ Tue Sep 13 11:07:41 2022 ] Batch(110/243) done. Loss: 0.4487 lr:0.100000 +[ Tue Sep 13 11:08:11 2022 ] Batch(210/243) done. Loss: 0.7073 lr:0.100000 +[ Tue Sep 13 11:08:21 2022 ] Eval epoch: 24 +[ Tue Sep 13 11:09:51 2022 ] Mean test loss of 796 batches: 9.196643829345703. +[ Tue Sep 13 11:09:51 2022 ] Top1: 9.55% +[ Tue Sep 13 11:09:51 2022 ] Top5: 31.08% +[ Tue Sep 13 11:09:52 2022 ] Training epoch: 25 +[ Tue Sep 13 11:10:15 2022 ] Batch(67/243) done. Loss: 0.5691 lr:0.100000 +[ Tue Sep 13 11:10:45 2022 ] Batch(167/243) done. Loss: 0.7838 lr:0.100000 +[ Tue Sep 13 11:11:08 2022 ] Eval epoch: 25 +[ Tue Sep 13 11:12:38 2022 ] Mean test loss of 796 batches: 2.3803958892822266. +[ Tue Sep 13 11:12:39 2022 ] Top1: 44.11% +[ Tue Sep 13 11:12:39 2022 ] Top5: 79.53% +[ Tue Sep 13 11:12:39 2022 ] Training epoch: 26 +[ Tue Sep 13 11:12:50 2022 ] Batch(24/243) done. Loss: 0.6260 lr:0.100000 +[ Tue Sep 13 11:13:20 2022 ] Batch(124/243) done. Loss: 0.5755 lr:0.100000 +[ Tue Sep 13 11:13:50 2022 ] Batch(224/243) done. Loss: 0.5419 lr:0.100000 +[ Tue Sep 13 11:13:55 2022 ] Eval epoch: 26 +[ Tue Sep 13 11:15:25 2022 ] Mean test loss of 796 batches: 2.658560037612915. +[ Tue Sep 13 11:15:25 2022 ] Top1: 42.29% +[ Tue Sep 13 11:15:26 2022 ] Top5: 75.35% +[ Tue Sep 13 11:15:26 2022 ] Training epoch: 27 +[ Tue Sep 13 11:15:53 2022 ] Batch(81/243) done. Loss: 0.6493 lr:0.100000 +[ Tue Sep 13 11:16:23 2022 ] Batch(181/243) done. Loss: 0.4724 lr:0.100000 +[ Tue Sep 13 11:16:42 2022 ] Eval epoch: 27 +[ Tue Sep 13 11:18:11 2022 ] Mean test loss of 796 batches: 2.3870038986206055. +[ Tue Sep 13 11:18:12 2022 ] Top1: 45.12% +[ Tue Sep 13 11:18:12 2022 ] Top5: 79.03% +[ Tue Sep 13 11:18:12 2022 ] Training epoch: 28 +[ Tue Sep 13 11:18:28 2022 ] Batch(38/243) done. Loss: 0.5939 lr:0.100000 +[ Tue Sep 13 11:18:57 2022 ] Batch(138/243) done. Loss: 0.4462 lr:0.100000 +[ Tue Sep 13 11:19:27 2022 ] Batch(238/243) done. Loss: 0.4096 lr:0.100000 +[ Tue Sep 13 11:19:29 2022 ] Eval epoch: 28 +[ Tue Sep 13 11:20:59 2022 ] Mean test loss of 796 batches: 2.431807279586792. +[ Tue Sep 13 11:21:00 2022 ] Top1: 45.66% +[ Tue Sep 13 11:21:00 2022 ] Top5: 78.43% +[ Tue Sep 13 11:21:00 2022 ] Training epoch: 29 +[ Tue Sep 13 11:21:32 2022 ] Batch(95/243) done. Loss: 0.5137 lr:0.100000 +[ Tue Sep 13 11:22:02 2022 ] Batch(195/243) done. Loss: 0.4970 lr:0.100000 +[ Tue Sep 13 11:22:16 2022 ] Eval epoch: 29 +[ Tue Sep 13 11:23:46 2022 ] Mean test loss of 796 batches: 2.3383309841156006. +[ Tue Sep 13 11:23:47 2022 ] Top1: 45.98% +[ Tue Sep 13 11:23:47 2022 ] Top5: 78.55% +[ Tue Sep 13 11:23:47 2022 ] Training epoch: 30 +[ Tue Sep 13 11:24:06 2022 ] Batch(52/243) done. Loss: 0.5574 lr:0.100000 +[ Tue Sep 13 11:24:36 2022 ] Batch(152/243) done. Loss: 0.3848 lr:0.100000 +[ Tue Sep 13 11:25:03 2022 ] Eval epoch: 30 +[ Tue Sep 13 11:26:32 2022 ] Mean test loss of 796 batches: 2.407961368560791. +[ Tue Sep 13 11:26:33 2022 ] Top1: 45.24% +[ Tue Sep 13 11:26:33 2022 ] Top5: 79.51% +[ Tue Sep 13 11:26:33 2022 ] Training epoch: 31 +[ Tue Sep 13 11:26:39 2022 ] Batch(9/243) done. Loss: 0.5381 lr:0.100000 +[ Tue Sep 13 11:27:09 2022 ] Batch(109/243) done. Loss: 0.7146 lr:0.100000 +[ Tue Sep 13 11:27:39 2022 ] Batch(209/243) done. Loss: 0.7419 lr:0.100000 +[ Tue Sep 13 11:27:49 2022 ] Eval epoch: 31 +[ Tue Sep 13 11:29:19 2022 ] Mean test loss of 796 batches: 2.7233266830444336. +[ Tue Sep 13 11:29:19 2022 ] Top1: 42.41% +[ Tue Sep 13 11:29:20 2022 ] Top5: 77.12% +[ Tue Sep 13 11:29:20 2022 ] Training epoch: 32 +[ Tue Sep 13 11:29:43 2022 ] Batch(66/243) done. Loss: 0.3441 lr:0.100000 +[ Tue Sep 13 11:30:13 2022 ] Batch(166/243) done. Loss: 0.3795 lr:0.100000 +[ Tue Sep 13 11:30:36 2022 ] Eval epoch: 32 +[ Tue Sep 13 11:32:05 2022 ] Mean test loss of 796 batches: 2.897885322570801. +[ Tue Sep 13 11:32:06 2022 ] Top1: 42.48% +[ Tue Sep 13 11:32:06 2022 ] Top5: 78.75% +[ Tue Sep 13 11:32:06 2022 ] Training epoch: 33 +[ Tue Sep 13 11:32:17 2022 ] Batch(23/243) done. Loss: 0.3501 lr:0.100000 +[ Tue Sep 13 11:32:47 2022 ] Batch(123/243) done. Loss: 0.6153 lr:0.100000 +[ Tue Sep 13 11:33:17 2022 ] Batch(223/243) done. Loss: 0.6015 lr:0.100000 +[ Tue Sep 13 11:33:23 2022 ] Eval epoch: 33 +[ Tue Sep 13 11:34:52 2022 ] Mean test loss of 796 batches: 2.5278916358947754. +[ Tue Sep 13 11:34:53 2022 ] Top1: 45.33% +[ Tue Sep 13 11:34:53 2022 ] Top5: 79.27% +[ Tue Sep 13 11:34:53 2022 ] Training epoch: 34 +[ Tue Sep 13 11:35:21 2022 ] Batch(80/243) done. Loss: 0.7118 lr:0.100000 +[ Tue Sep 13 11:35:51 2022 ] Batch(180/243) done. Loss: 0.3857 lr:0.100000 +[ Tue Sep 13 11:36:10 2022 ] Eval epoch: 34 +[ Tue Sep 13 11:37:39 2022 ] Mean test loss of 796 batches: 2.578415870666504. +[ Tue Sep 13 11:37:40 2022 ] Top1: 44.19% +[ Tue Sep 13 11:37:40 2022 ] Top5: 77.54% +[ Tue Sep 13 11:37:40 2022 ] Training epoch: 35 +[ Tue Sep 13 11:37:55 2022 ] Batch(37/243) done. Loss: 0.3720 lr:0.100000 +[ Tue Sep 13 11:38:25 2022 ] Batch(137/243) done. Loss: 0.4876 lr:0.100000 +[ Tue Sep 13 11:38:55 2022 ] Batch(237/243) done. Loss: 0.5665 lr:0.100000 +[ Tue Sep 13 11:38:56 2022 ] Eval epoch: 35 +[ Tue Sep 13 11:40:27 2022 ] Mean test loss of 796 batches: 2.3181099891662598. +[ Tue Sep 13 11:40:27 2022 ] Top1: 47.44% +[ Tue Sep 13 11:40:28 2022 ] Top5: 80.46% +[ Tue Sep 13 11:40:28 2022 ] Training epoch: 36 +[ Tue Sep 13 11:41:00 2022 ] Batch(94/243) done. Loss: 0.7410 lr:0.100000 +[ Tue Sep 13 11:41:30 2022 ] Batch(194/243) done. Loss: 0.6214 lr:0.100000 +[ Tue Sep 13 11:41:44 2022 ] Eval epoch: 36 +[ Tue Sep 13 11:43:15 2022 ] Mean test loss of 796 batches: 3.2811484336853027. +[ Tue Sep 13 11:43:15 2022 ] Top1: 41.43% +[ Tue Sep 13 11:43:15 2022 ] Top5: 76.30% +[ Tue Sep 13 11:43:16 2022 ] Training epoch: 37 +[ Tue Sep 13 11:43:35 2022 ] Batch(51/243) done. Loss: 0.5524 lr:0.100000 +[ Tue Sep 13 11:44:05 2022 ] Batch(151/243) done. Loss: 0.6357 lr:0.100000 +[ Tue Sep 13 11:44:32 2022 ] Eval epoch: 37 +[ Tue Sep 13 11:46:02 2022 ] Mean test loss of 796 batches: 2.4665732383728027. +[ Tue Sep 13 11:46:03 2022 ] Top1: 47.75% +[ Tue Sep 13 11:46:03 2022 ] Top5: 79.85% +[ Tue Sep 13 11:46:03 2022 ] Training epoch: 38 +[ Tue Sep 13 11:46:09 2022 ] Batch(8/243) done. Loss: 0.5600 lr:0.100000 +[ Tue Sep 13 11:46:39 2022 ] Batch(108/243) done. Loss: 0.4920 lr:0.100000 +[ Tue Sep 13 11:47:09 2022 ] Batch(208/243) done. Loss: 0.5169 lr:0.100000 +[ Tue Sep 13 11:47:19 2022 ] Eval epoch: 38 +[ Tue Sep 13 11:48:49 2022 ] Mean test loss of 796 batches: 2.9676082134246826. +[ Tue Sep 13 11:48:50 2022 ] Top1: 43.14% +[ Tue Sep 13 11:48:50 2022 ] Top5: 78.24% +[ Tue Sep 13 11:48:50 2022 ] Training epoch: 39 +[ Tue Sep 13 11:49:13 2022 ] Batch(65/243) done. Loss: 0.4457 lr:0.100000 +[ Tue Sep 13 11:49:43 2022 ] Batch(165/243) done. Loss: 0.4225 lr:0.100000 +[ Tue Sep 13 11:50:06 2022 ] Eval epoch: 39 +[ Tue Sep 13 11:51:36 2022 ] Mean test loss of 796 batches: 2.3777058124542236. +[ Tue Sep 13 11:51:36 2022 ] Top1: 48.60% +[ Tue Sep 13 11:51:37 2022 ] Top5: 81.73% +[ Tue Sep 13 11:51:37 2022 ] Training epoch: 40 +[ Tue Sep 13 11:51:47 2022 ] Batch(22/243) done. Loss: 0.2437 lr:0.100000 +[ Tue Sep 13 11:52:17 2022 ] Batch(122/243) done. Loss: 0.6209 lr:0.100000 +[ Tue Sep 13 11:52:46 2022 ] Batch(222/243) done. Loss: 0.4602 lr:0.100000 +[ Tue Sep 13 11:52:53 2022 ] Eval epoch: 40 +[ Tue Sep 13 11:54:23 2022 ] Mean test loss of 796 batches: 2.2834651470184326. +[ Tue Sep 13 11:54:23 2022 ] Top1: 48.18% +[ Tue Sep 13 11:54:23 2022 ] Top5: 81.19% +[ Tue Sep 13 11:54:24 2022 ] Training epoch: 41 +[ Tue Sep 13 11:54:51 2022 ] Batch(79/243) done. Loss: 0.3084 lr:0.100000 +[ Tue Sep 13 11:55:21 2022 ] Batch(179/243) done. Loss: 0.4477 lr:0.100000 +[ Tue Sep 13 11:55:40 2022 ] Eval epoch: 41 +[ Tue Sep 13 11:57:11 2022 ] Mean test loss of 796 batches: 2.2875711917877197. +[ Tue Sep 13 11:57:12 2022 ] Top1: 47.92% +[ Tue Sep 13 11:57:12 2022 ] Top5: 81.64% +[ Tue Sep 13 11:57:12 2022 ] Training epoch: 42 +[ Tue Sep 13 11:57:27 2022 ] Batch(36/243) done. Loss: 0.5149 lr:0.100000 +[ Tue Sep 13 11:57:56 2022 ] Batch(136/243) done. Loss: 0.3281 lr:0.100000 +[ Tue Sep 13 11:58:26 2022 ] Batch(236/243) done. Loss: 0.5555 lr:0.100000 +[ Tue Sep 13 11:58:28 2022 ] Eval epoch: 42 +[ Tue Sep 13 11:59:59 2022 ] Mean test loss of 796 batches: 2.9600913524627686. +[ Tue Sep 13 11:59:59 2022 ] Top1: 42.34% +[ Tue Sep 13 11:59:59 2022 ] Top5: 76.59% +[ Tue Sep 13 12:00:00 2022 ] Training epoch: 43 +[ Tue Sep 13 12:00:31 2022 ] Batch(93/243) done. Loss: 0.5678 lr:0.100000 +[ Tue Sep 13 12:01:01 2022 ] Batch(193/243) done. Loss: 0.2838 lr:0.100000 +[ Tue Sep 13 12:01:16 2022 ] Eval epoch: 43 +[ Tue Sep 13 12:02:46 2022 ] Mean test loss of 796 batches: 2.416943311691284. +[ Tue Sep 13 12:02:47 2022 ] Top1: 48.39% +[ Tue Sep 13 12:02:47 2022 ] Top5: 80.53% +[ Tue Sep 13 12:02:47 2022 ] Training epoch: 44 +[ Tue Sep 13 12:03:06 2022 ] Batch(50/243) done. Loss: 0.4872 lr:0.100000 +[ Tue Sep 13 12:03:36 2022 ] Batch(150/243) done. Loss: 0.5165 lr:0.100000 +[ Tue Sep 13 12:04:03 2022 ] Eval epoch: 44 +[ Tue Sep 13 12:05:34 2022 ] Mean test loss of 796 batches: 2.670891284942627. +[ Tue Sep 13 12:05:34 2022 ] Top1: 46.07% +[ Tue Sep 13 12:05:34 2022 ] Top5: 79.14% +[ Tue Sep 13 12:05:35 2022 ] Training epoch: 45 +[ Tue Sep 13 12:05:41 2022 ] Batch(7/243) done. Loss: 0.4505 lr:0.100000 +[ Tue Sep 13 12:06:11 2022 ] Batch(107/243) done. Loss: 0.5073 lr:0.100000 +[ Tue Sep 13 12:06:41 2022 ] Batch(207/243) done. Loss: 0.3428 lr:0.100000 +[ Tue Sep 13 12:06:51 2022 ] Eval epoch: 45 +[ Tue Sep 13 12:08:21 2022 ] Mean test loss of 796 batches: 2.291255474090576. +[ Tue Sep 13 12:08:22 2022 ] Top1: 47.96% +[ Tue Sep 13 12:08:22 2022 ] Top5: 81.30% +[ Tue Sep 13 12:08:22 2022 ] Training epoch: 46 +[ Tue Sep 13 12:08:45 2022 ] Batch(64/243) done. Loss: 0.3668 lr:0.100000 +[ Tue Sep 13 12:09:15 2022 ] Batch(164/243) done. Loss: 0.3639 lr:0.100000 +[ Tue Sep 13 12:09:38 2022 ] Eval epoch: 46 +[ Tue Sep 13 12:11:09 2022 ] Mean test loss of 796 batches: 3.0566868782043457. +[ Tue Sep 13 12:11:09 2022 ] Top1: 43.69% +[ Tue Sep 13 12:11:09 2022 ] Top5: 76.28% +[ Tue Sep 13 12:11:10 2022 ] Training epoch: 47 +[ Tue Sep 13 12:11:19 2022 ] Batch(21/243) done. Loss: 0.3062 lr:0.100000 +[ Tue Sep 13 12:11:49 2022 ] Batch(121/243) done. Loss: 0.4540 lr:0.100000 +[ Tue Sep 13 12:12:19 2022 ] Batch(221/243) done. Loss: 0.5952 lr:0.100000 +[ Tue Sep 13 12:12:26 2022 ] Eval epoch: 47 +[ Tue Sep 13 12:13:55 2022 ] Mean test loss of 796 batches: 14.264094352722168. +[ Tue Sep 13 12:13:56 2022 ] Top1: 5.31% +[ Tue Sep 13 12:13:56 2022 ] Top5: 16.96% +[ Tue Sep 13 12:13:56 2022 ] Training epoch: 48 +[ Tue Sep 13 12:14:23 2022 ] Batch(78/243) done. Loss: 0.3861 lr:0.100000 +[ Tue Sep 13 12:14:53 2022 ] Batch(178/243) done. Loss: 0.5330 lr:0.100000 +[ Tue Sep 13 12:15:12 2022 ] Eval epoch: 48 +[ Tue Sep 13 12:16:42 2022 ] Mean test loss of 796 batches: 2.734037399291992. +[ Tue Sep 13 12:16:42 2022 ] Top1: 42.76% +[ Tue Sep 13 12:16:43 2022 ] Top5: 77.40% +[ Tue Sep 13 12:16:43 2022 ] Training epoch: 49 +[ Tue Sep 13 12:16:57 2022 ] Batch(35/243) done. Loss: 0.3180 lr:0.100000 +[ Tue Sep 13 12:17:27 2022 ] Batch(135/243) done. Loss: 0.5142 lr:0.100000 +[ Tue Sep 13 12:17:58 2022 ] Batch(235/243) done. Loss: 0.4537 lr:0.100000 +[ Tue Sep 13 12:18:00 2022 ] Eval epoch: 49 +[ Tue Sep 13 12:19:30 2022 ] Mean test loss of 796 batches: 2.4061684608459473. +[ Tue Sep 13 12:19:30 2022 ] Top1: 47.99% +[ Tue Sep 13 12:19:31 2022 ] Top5: 81.06% +[ Tue Sep 13 12:19:31 2022 ] Training epoch: 50 +[ Tue Sep 13 12:20:02 2022 ] Batch(92/243) done. Loss: 0.5954 lr:0.100000 +[ Tue Sep 13 12:20:32 2022 ] Batch(192/243) done. Loss: 0.6014 lr:0.100000 +[ Tue Sep 13 12:20:47 2022 ] Eval epoch: 50 +[ Tue Sep 13 12:22:17 2022 ] Mean test loss of 796 batches: 2.785815954208374. +[ Tue Sep 13 12:22:18 2022 ] Top1: 45.54% +[ Tue Sep 13 12:22:18 2022 ] Top5: 78.75% +[ Tue Sep 13 12:22:18 2022 ] Training epoch: 51 +[ Tue Sep 13 12:22:36 2022 ] Batch(49/243) done. Loss: 0.4449 lr:0.100000 +[ Tue Sep 13 12:23:06 2022 ] Batch(149/243) done. Loss: 0.2752 lr:0.100000 +[ Tue Sep 13 12:23:34 2022 ] Eval epoch: 51 +[ Tue Sep 13 12:25:05 2022 ] Mean test loss of 796 batches: 2.936903476715088. +[ Tue Sep 13 12:25:05 2022 ] Top1: 43.72% +[ Tue Sep 13 12:25:05 2022 ] Top5: 76.89% +[ Tue Sep 13 12:25:06 2022 ] Training epoch: 52 +[ Tue Sep 13 12:25:11 2022 ] Batch(6/243) done. Loss: 0.1977 lr:0.100000 +[ Tue Sep 13 12:25:41 2022 ] Batch(106/243) done. Loss: 0.3755 lr:0.100000 +[ Tue Sep 13 12:26:10 2022 ] Batch(206/243) done. Loss: 0.4178 lr:0.100000 +[ Tue Sep 13 12:26:21 2022 ] Eval epoch: 52 +[ Tue Sep 13 12:27:52 2022 ] Mean test loss of 796 batches: 2.6056711673736572. +[ Tue Sep 13 12:27:53 2022 ] Top1: 47.38% +[ Tue Sep 13 12:27:53 2022 ] Top5: 80.59% +[ Tue Sep 13 12:27:53 2022 ] Training epoch: 53 +[ Tue Sep 13 12:28:15 2022 ] Batch(63/243) done. Loss: 0.5516 lr:0.100000 +[ Tue Sep 13 12:28:46 2022 ] Batch(163/243) done. Loss: 0.4919 lr:0.100000 +[ Tue Sep 13 12:29:10 2022 ] Eval epoch: 53 +[ Tue Sep 13 12:30:40 2022 ] Mean test loss of 796 batches: 2.7067835330963135. +[ Tue Sep 13 12:30:40 2022 ] Top1: 46.74% +[ Tue Sep 13 12:30:40 2022 ] Top5: 80.81% +[ Tue Sep 13 12:30:41 2022 ] Training epoch: 54 +[ Tue Sep 13 12:30:50 2022 ] Batch(20/243) done. Loss: 0.2022 lr:0.100000 +[ Tue Sep 13 12:31:20 2022 ] Batch(120/243) done. Loss: 0.3183 lr:0.100000 +[ Tue Sep 13 12:31:50 2022 ] Batch(220/243) done. Loss: 0.4903 lr:0.100000 +[ Tue Sep 13 12:31:57 2022 ] Eval epoch: 54 +[ Tue Sep 13 12:33:27 2022 ] Mean test loss of 796 batches: 2.815014362335205. +[ Tue Sep 13 12:33:27 2022 ] Top1: 45.23% +[ Tue Sep 13 12:33:27 2022 ] Top5: 78.11% +[ Tue Sep 13 12:33:28 2022 ] Training epoch: 55 +[ Tue Sep 13 12:33:54 2022 ] Batch(77/243) done. Loss: 0.4042 lr:0.100000 +[ Tue Sep 13 12:34:24 2022 ] Batch(177/243) done. Loss: 0.5307 lr:0.100000 +[ Tue Sep 13 12:34:44 2022 ] Eval epoch: 55 +[ Tue Sep 13 12:36:13 2022 ] Mean test loss of 796 batches: 2.895005941390991. +[ Tue Sep 13 12:36:13 2022 ] Top1: 45.16% +[ Tue Sep 13 12:36:14 2022 ] Top5: 77.35% +[ Tue Sep 13 12:36:14 2022 ] Training epoch: 56 +[ Tue Sep 13 12:36:28 2022 ] Batch(34/243) done. Loss: 0.6129 lr:0.100000 +[ Tue Sep 13 12:36:58 2022 ] Batch(134/243) done. Loss: 0.3129 lr:0.100000 +[ Tue Sep 13 12:37:28 2022 ] Batch(234/243) done. Loss: 0.5452 lr:0.100000 +[ Tue Sep 13 12:37:30 2022 ] Eval epoch: 56 +[ Tue Sep 13 12:39:00 2022 ] Mean test loss of 796 batches: 3.1033451557159424. +[ Tue Sep 13 12:39:01 2022 ] Top1: 44.08% +[ Tue Sep 13 12:39:01 2022 ] Top5: 78.70% +[ Tue Sep 13 12:39:01 2022 ] Training epoch: 57 +[ Tue Sep 13 12:39:32 2022 ] Batch(91/243) done. Loss: 0.2330 lr:0.100000 +[ Tue Sep 13 12:40:02 2022 ] Batch(191/243) done. Loss: 0.5911 lr:0.100000 +[ Tue Sep 13 12:40:17 2022 ] Eval epoch: 57 +[ Tue Sep 13 12:41:47 2022 ] Mean test loss of 796 batches: 28.345247268676758. +[ Tue Sep 13 12:41:47 2022 ] Top1: 5.01% +[ Tue Sep 13 12:41:47 2022 ] Top5: 13.06% +[ Tue Sep 13 12:41:48 2022 ] Training epoch: 58 +[ Tue Sep 13 12:42:05 2022 ] Batch(48/243) done. Loss: 0.3507 lr:0.100000 +[ Tue Sep 13 12:42:35 2022 ] Batch(148/243) done. Loss: 0.3725 lr:0.100000 +[ Tue Sep 13 12:43:03 2022 ] Eval epoch: 58 +[ Tue Sep 13 12:44:34 2022 ] Mean test loss of 796 batches: 2.8092286586761475. +[ Tue Sep 13 12:44:34 2022 ] Top1: 46.88% +[ Tue Sep 13 12:44:35 2022 ] Top5: 79.48% +[ Tue Sep 13 12:44:35 2022 ] Training epoch: 59 +[ Tue Sep 13 12:44:40 2022 ] Batch(5/243) done. Loss: 0.3029 lr:0.100000 +[ Tue Sep 13 12:45:10 2022 ] Batch(105/243) done. Loss: 0.2342 lr:0.100000 +[ Tue Sep 13 12:45:40 2022 ] Batch(205/243) done. Loss: 0.6002 lr:0.100000 +[ Tue Sep 13 12:45:51 2022 ] Eval epoch: 59 +[ Tue Sep 13 12:47:21 2022 ] Mean test loss of 796 batches: 2.6384665966033936. +[ Tue Sep 13 12:47:21 2022 ] Top1: 46.77% +[ Tue Sep 13 12:47:22 2022 ] Top5: 79.31% +[ Tue Sep 13 12:47:22 2022 ] Training epoch: 60 +[ Tue Sep 13 12:47:44 2022 ] Batch(62/243) done. Loss: 0.2674 lr:0.100000 +[ Tue Sep 13 12:48:14 2022 ] Batch(162/243) done. Loss: 0.3390 lr:0.100000 +[ Tue Sep 13 12:48:38 2022 ] Eval epoch: 60 +[ Tue Sep 13 12:50:07 2022 ] Mean test loss of 796 batches: 3.1235268115997314. +[ Tue Sep 13 12:50:08 2022 ] Top1: 43.82% +[ Tue Sep 13 12:50:08 2022 ] Top5: 77.60% +[ Tue Sep 13 12:50:08 2022 ] Training epoch: 61 +[ Tue Sep 13 12:50:17 2022 ] Batch(19/243) done. Loss: 0.2343 lr:0.010000 +[ Tue Sep 13 12:50:47 2022 ] Batch(119/243) done. Loss: 0.2469 lr:0.010000 +[ Tue Sep 13 12:51:17 2022 ] Batch(219/243) done. Loss: 0.1518 lr:0.010000 +[ Tue Sep 13 12:51:25 2022 ] Eval epoch: 61 +[ Tue Sep 13 12:52:54 2022 ] Mean test loss of 796 batches: 2.237048864364624. +[ Tue Sep 13 12:52:55 2022 ] Top1: 55.25% +[ Tue Sep 13 12:52:55 2022 ] Top5: 85.63% +[ Tue Sep 13 12:52:55 2022 ] Training epoch: 62 +[ Tue Sep 13 12:53:21 2022 ] Batch(76/243) done. Loss: 0.1355 lr:0.010000 +[ Tue Sep 13 12:53:51 2022 ] Batch(176/243) done. Loss: 0.0464 lr:0.010000 +[ Tue Sep 13 12:54:11 2022 ] Eval epoch: 62 +[ Tue Sep 13 12:55:41 2022 ] Mean test loss of 796 batches: 2.2395992279052734. +[ Tue Sep 13 12:55:42 2022 ] Top1: 55.93% +[ Tue Sep 13 12:55:42 2022 ] Top5: 85.76% +[ Tue Sep 13 12:55:42 2022 ] Training epoch: 63 +[ Tue Sep 13 12:55:56 2022 ] Batch(33/243) done. Loss: 0.0812 lr:0.010000 +[ Tue Sep 13 12:56:26 2022 ] Batch(133/243) done. Loss: 0.0868 lr:0.010000 +[ Tue Sep 13 12:56:56 2022 ] Batch(233/243) done. Loss: 0.1094 lr:0.010000 +[ Tue Sep 13 12:56:58 2022 ] Eval epoch: 63 +[ Tue Sep 13 12:58:29 2022 ] Mean test loss of 796 batches: 2.2251181602478027. +[ Tue Sep 13 12:58:29 2022 ] Top1: 56.46% +[ Tue Sep 13 12:58:30 2022 ] Top5: 86.06% +[ Tue Sep 13 12:58:30 2022 ] Training epoch: 64 +[ Tue Sep 13 12:59:00 2022 ] Batch(90/243) done. Loss: 0.1751 lr:0.010000 +[ Tue Sep 13 12:59:30 2022 ] Batch(190/243) done. Loss: 0.0929 lr:0.010000 +[ Tue Sep 13 12:59:46 2022 ] Eval epoch: 64 +[ Tue Sep 13 13:01:16 2022 ] Mean test loss of 796 batches: 2.337252378463745. +[ Tue Sep 13 13:01:17 2022 ] Top1: 55.47% +[ Tue Sep 13 13:01:17 2022 ] Top5: 85.48% +[ Tue Sep 13 13:01:17 2022 ] Training epoch: 65 +[ Tue Sep 13 13:01:35 2022 ] Batch(47/243) done. Loss: 0.0395 lr:0.010000 +[ Tue Sep 13 13:02:05 2022 ] Batch(147/243) done. Loss: 0.1822 lr:0.010000 +[ Tue Sep 13 13:02:34 2022 ] Eval epoch: 65 +[ Tue Sep 13 13:04:04 2022 ] Mean test loss of 796 batches: 2.358794927597046. +[ Tue Sep 13 13:04:04 2022 ] Top1: 55.68% +[ Tue Sep 13 13:04:04 2022 ] Top5: 85.67% +[ Tue Sep 13 13:04:05 2022 ] Training epoch: 66 +[ Tue Sep 13 13:04:10 2022 ] Batch(4/243) done. Loss: 0.1156 lr:0.010000 +[ Tue Sep 13 13:04:40 2022 ] Batch(104/243) done. Loss: 0.0366 lr:0.010000 +[ Tue Sep 13 13:05:10 2022 ] Batch(204/243) done. Loss: 0.1222 lr:0.010000 +[ Tue Sep 13 13:05:22 2022 ] Eval epoch: 66 +[ Tue Sep 13 13:06:52 2022 ] Mean test loss of 796 batches: 2.4481406211853027. +[ Tue Sep 13 13:06:53 2022 ] Top1: 55.74% +[ Tue Sep 13 13:06:53 2022 ] Top5: 85.42% +[ Tue Sep 13 13:06:53 2022 ] Training epoch: 67 +[ Tue Sep 13 13:07:16 2022 ] Batch(61/243) done. Loss: 0.0795 lr:0.010000 +[ Tue Sep 13 13:07:46 2022 ] Batch(161/243) done. Loss: 0.0534 lr:0.010000 +[ Tue Sep 13 13:08:10 2022 ] Eval epoch: 67 +[ Tue Sep 13 13:09:40 2022 ] Mean test loss of 796 batches: 2.4075491428375244. +[ Tue Sep 13 13:09:40 2022 ] Top1: 55.97% +[ Tue Sep 13 13:09:41 2022 ] Top5: 85.72% +[ Tue Sep 13 13:09:41 2022 ] Training epoch: 68 +[ Tue Sep 13 13:09:50 2022 ] Batch(18/243) done. Loss: 0.1113 lr:0.010000 +[ Tue Sep 13 13:10:20 2022 ] Batch(118/243) done. Loss: 0.0638 lr:0.010000 +[ Tue Sep 13 13:10:50 2022 ] Batch(218/243) done. Loss: 0.0918 lr:0.010000 +[ Tue Sep 13 13:10:58 2022 ] Eval epoch: 68 +[ Tue Sep 13 13:12:27 2022 ] Mean test loss of 796 batches: 2.3931756019592285. +[ Tue Sep 13 13:12:28 2022 ] Top1: 56.16% +[ Tue Sep 13 13:12:28 2022 ] Top5: 85.73% +[ Tue Sep 13 13:12:28 2022 ] Training epoch: 69 +[ Tue Sep 13 13:12:54 2022 ] Batch(75/243) done. Loss: 0.1031 lr:0.010000 +[ Tue Sep 13 13:13:24 2022 ] Batch(175/243) done. Loss: 0.0616 lr:0.010000 +[ Tue Sep 13 13:13:45 2022 ] Eval epoch: 69 +[ Tue Sep 13 13:15:14 2022 ] Mean test loss of 796 batches: 2.514704942703247. +[ Tue Sep 13 13:15:15 2022 ] Top1: 55.21% +[ Tue Sep 13 13:15:15 2022 ] Top5: 85.25% +[ Tue Sep 13 13:15:15 2022 ] Training epoch: 70 +[ Tue Sep 13 13:15:29 2022 ] Batch(32/243) done. Loss: 0.0328 lr:0.010000 +[ Tue Sep 13 13:15:58 2022 ] Batch(132/243) done. Loss: 0.0665 lr:0.010000 +[ Tue Sep 13 13:16:29 2022 ] Batch(232/243) done. Loss: 0.1057 lr:0.010000 +[ Tue Sep 13 13:16:32 2022 ] Eval epoch: 70 +[ Tue Sep 13 13:18:03 2022 ] Mean test loss of 796 batches: 2.4123992919921875. +[ Tue Sep 13 13:18:03 2022 ] Top1: 55.97% +[ Tue Sep 13 13:18:03 2022 ] Top5: 85.94% +[ Tue Sep 13 13:18:04 2022 ] Training epoch: 71 +[ Tue Sep 13 13:18:34 2022 ] Batch(89/243) done. Loss: 0.0821 lr:0.010000 +[ Tue Sep 13 13:19:04 2022 ] Batch(189/243) done. Loss: 0.0117 lr:0.010000 +[ Tue Sep 13 13:19:20 2022 ] Eval epoch: 71 +[ Tue Sep 13 13:20:50 2022 ] Mean test loss of 796 batches: 2.4410130977630615. +[ Tue Sep 13 13:20:50 2022 ] Top1: 56.26% +[ Tue Sep 13 13:20:51 2022 ] Top5: 86.02% +[ Tue Sep 13 13:20:51 2022 ] Training epoch: 72 +[ Tue Sep 13 13:21:08 2022 ] Batch(46/243) done. Loss: 0.0303 lr:0.010000 +[ Tue Sep 13 13:21:39 2022 ] Batch(146/243) done. Loss: 0.0665 lr:0.010000 +[ Tue Sep 13 13:22:08 2022 ] Eval epoch: 72 +[ Tue Sep 13 13:23:37 2022 ] Mean test loss of 796 batches: 2.438089370727539. +[ Tue Sep 13 13:23:37 2022 ] Top1: 56.12% +[ Tue Sep 13 13:23:37 2022 ] Top5: 85.96% +[ Tue Sep 13 13:23:38 2022 ] Training epoch: 73 +[ Tue Sep 13 13:23:42 2022 ] Batch(3/243) done. Loss: 0.0759 lr:0.010000 +[ Tue Sep 13 13:24:12 2022 ] Batch(103/243) done. Loss: 0.1013 lr:0.010000 +[ Tue Sep 13 13:24:42 2022 ] Batch(203/243) done. Loss: 0.0546 lr:0.010000 +[ Tue Sep 13 13:24:54 2022 ] Eval epoch: 73 +[ Tue Sep 13 13:26:24 2022 ] Mean test loss of 796 batches: 2.5160343647003174. +[ Tue Sep 13 13:26:24 2022 ] Top1: 55.85% +[ Tue Sep 13 13:26:24 2022 ] Top5: 85.90% +[ Tue Sep 13 13:26:25 2022 ] Training epoch: 74 +[ Tue Sep 13 13:26:46 2022 ] Batch(60/243) done. Loss: 0.0480 lr:0.010000 +[ Tue Sep 13 13:27:16 2022 ] Batch(160/243) done. Loss: 0.0445 lr:0.010000 +[ Tue Sep 13 13:27:41 2022 ] Eval epoch: 74 +[ Tue Sep 13 13:29:11 2022 ] Mean test loss of 796 batches: 2.597693681716919. +[ Tue Sep 13 13:29:11 2022 ] Top1: 55.46% +[ Tue Sep 13 13:29:11 2022 ] Top5: 85.56% +[ Tue Sep 13 13:29:12 2022 ] Training epoch: 75 +[ Tue Sep 13 13:29:20 2022 ] Batch(17/243) done. Loss: 0.0589 lr:0.010000 +[ Tue Sep 13 13:29:50 2022 ] Batch(117/243) done. Loss: 0.0458 lr:0.010000 +[ Tue Sep 13 13:30:20 2022 ] Batch(217/243) done. Loss: 0.0797 lr:0.010000 +[ Tue Sep 13 13:30:28 2022 ] Eval epoch: 75 +[ Tue Sep 13 13:31:58 2022 ] Mean test loss of 796 batches: 2.51838755607605. +[ Tue Sep 13 13:31:59 2022 ] Top1: 56.59% +[ Tue Sep 13 13:31:59 2022 ] Top5: 86.06% +[ Tue Sep 13 13:31:59 2022 ] Training epoch: 76 +[ Tue Sep 13 13:32:25 2022 ] Batch(74/243) done. Loss: 0.0864 lr:0.010000 +[ Tue Sep 13 13:32:54 2022 ] Batch(174/243) done. Loss: 0.0252 lr:0.010000 +[ Tue Sep 13 13:33:15 2022 ] Eval epoch: 76 +[ Tue Sep 13 13:34:46 2022 ] Mean test loss of 796 batches: 2.5579476356506348. +[ Tue Sep 13 13:34:46 2022 ] Top1: 55.87% +[ Tue Sep 13 13:34:47 2022 ] Top5: 85.64% +[ Tue Sep 13 13:34:47 2022 ] Training epoch: 77 +[ Tue Sep 13 13:35:00 2022 ] Batch(31/243) done. Loss: 0.0412 lr:0.010000 +[ Tue Sep 13 13:35:30 2022 ] Batch(131/243) done. Loss: 0.0723 lr:0.010000 +[ Tue Sep 13 13:36:00 2022 ] Batch(231/243) done. Loss: 0.1002 lr:0.010000 +[ Tue Sep 13 13:36:03 2022 ] Eval epoch: 77 +[ Tue Sep 13 13:37:34 2022 ] Mean test loss of 796 batches: 2.5376739501953125. +[ Tue Sep 13 13:37:35 2022 ] Top1: 56.41% +[ Tue Sep 13 13:37:35 2022 ] Top5: 85.96% +[ Tue Sep 13 13:37:35 2022 ] Training epoch: 78 +[ Tue Sep 13 13:38:05 2022 ] Batch(88/243) done. Loss: 0.0484 lr:0.010000 +[ Tue Sep 13 13:38:35 2022 ] Batch(188/243) done. Loss: 0.0305 lr:0.010000 +[ Tue Sep 13 13:38:51 2022 ] Eval epoch: 78 +[ Tue Sep 13 13:40:21 2022 ] Mean test loss of 796 batches: 2.5792503356933594. +[ Tue Sep 13 13:40:21 2022 ] Top1: 55.75% +[ Tue Sep 13 13:40:22 2022 ] Top5: 85.50% +[ Tue Sep 13 13:40:22 2022 ] Training epoch: 79 +[ Tue Sep 13 13:40:39 2022 ] Batch(45/243) done. Loss: 0.0676 lr:0.010000 +[ Tue Sep 13 13:41:09 2022 ] Batch(145/243) done. Loss: 0.0301 lr:0.010000 +[ Tue Sep 13 13:41:38 2022 ] Eval epoch: 79 +[ Tue Sep 13 13:43:08 2022 ] Mean test loss of 796 batches: 2.5704731941223145. +[ Tue Sep 13 13:43:09 2022 ] Top1: 56.07% +[ Tue Sep 13 13:43:09 2022 ] Top5: 85.69% +[ Tue Sep 13 13:43:09 2022 ] Training epoch: 80 +[ Tue Sep 13 13:43:14 2022 ] Batch(2/243) done. Loss: 0.0936 lr:0.010000 +[ Tue Sep 13 13:43:43 2022 ] Batch(102/243) done. Loss: 0.0752 lr:0.010000 +[ Tue Sep 13 13:44:13 2022 ] Batch(202/243) done. Loss: 0.0291 lr:0.010000 +[ Tue Sep 13 13:44:26 2022 ] Eval epoch: 80 +[ Tue Sep 13 13:45:56 2022 ] Mean test loss of 796 batches: 2.6504592895507812. +[ Tue Sep 13 13:45:56 2022 ] Top1: 55.93% +[ Tue Sep 13 13:45:56 2022 ] Top5: 85.44% +[ Tue Sep 13 13:45:57 2022 ] Training epoch: 81 +[ Tue Sep 13 13:46:18 2022 ] Batch(59/243) done. Loss: 0.0546 lr:0.001000 +[ Tue Sep 13 13:46:48 2022 ] Batch(159/243) done. Loss: 0.1036 lr:0.001000 +[ Tue Sep 13 13:47:13 2022 ] Eval epoch: 81 +[ Tue Sep 13 13:48:42 2022 ] Mean test loss of 796 batches: 2.6293492317199707. +[ Tue Sep 13 13:48:42 2022 ] Top1: 55.86% +[ Tue Sep 13 13:48:42 2022 ] Top5: 85.53% +[ Tue Sep 13 13:48:43 2022 ] Training epoch: 82 +[ Tue Sep 13 13:48:51 2022 ] Batch(16/243) done. Loss: 0.0634 lr:0.001000 +[ Tue Sep 13 13:49:21 2022 ] Batch(116/243) done. Loss: 0.0092 lr:0.001000 +[ Tue Sep 13 13:49:51 2022 ] Batch(216/243) done. Loss: 0.0490 lr:0.001000 +[ Tue Sep 13 13:49:59 2022 ] Eval epoch: 82 +[ Tue Sep 13 13:51:30 2022 ] Mean test loss of 796 batches: 2.653411388397217. +[ Tue Sep 13 13:51:30 2022 ] Top1: 55.83% +[ Tue Sep 13 13:51:31 2022 ] Top5: 85.50% +[ Tue Sep 13 13:51:31 2022 ] Training epoch: 83 +[ Tue Sep 13 13:51:57 2022 ] Batch(73/243) done. Loss: 0.0265 lr:0.001000 +[ Tue Sep 13 13:52:27 2022 ] Batch(173/243) done. Loss: 0.0194 lr:0.001000 +[ Tue Sep 13 13:52:48 2022 ] Eval epoch: 83 +[ Tue Sep 13 13:54:17 2022 ] Mean test loss of 796 batches: 2.6267552375793457. +[ Tue Sep 13 13:54:17 2022 ] Top1: 55.94% +[ Tue Sep 13 13:54:17 2022 ] Top5: 85.60% +[ Tue Sep 13 13:54:18 2022 ] Training epoch: 84 +[ Tue Sep 13 13:54:30 2022 ] Batch(30/243) done. Loss: 0.0394 lr:0.001000 +[ Tue Sep 13 13:55:00 2022 ] Batch(130/243) done. Loss: 0.0827 lr:0.001000 +[ Tue Sep 13 13:55:30 2022 ] Batch(230/243) done. Loss: 0.0686 lr:0.001000 +[ Tue Sep 13 13:55:34 2022 ] Eval epoch: 84 +[ Tue Sep 13 13:57:03 2022 ] Mean test loss of 796 batches: 2.5898735523223877. +[ Tue Sep 13 13:57:03 2022 ] Top1: 56.22% +[ Tue Sep 13 13:57:04 2022 ] Top5: 85.74% +[ Tue Sep 13 13:57:04 2022 ] Training epoch: 85 +[ Tue Sep 13 13:57:33 2022 ] Batch(87/243) done. Loss: 0.0559 lr:0.001000 +[ Tue Sep 13 13:58:03 2022 ] Batch(187/243) done. Loss: 0.0420 lr:0.001000 +[ Tue Sep 13 13:58:20 2022 ] Eval epoch: 85 +[ Tue Sep 13 13:59:49 2022 ] Mean test loss of 796 batches: 2.5916905403137207. +[ Tue Sep 13 13:59:50 2022 ] Top1: 56.13% +[ Tue Sep 13 13:59:50 2022 ] Top5: 85.68% +[ Tue Sep 13 13:59:50 2022 ] Training epoch: 86 +[ Tue Sep 13 14:00:07 2022 ] Batch(44/243) done. Loss: 0.0396 lr:0.001000 +[ Tue Sep 13 14:00:37 2022 ] Batch(144/243) done. Loss: 0.0288 lr:0.001000 +[ Tue Sep 13 14:01:07 2022 ] Eval epoch: 86 +[ Tue Sep 13 14:02:37 2022 ] Mean test loss of 796 batches: 2.5492138862609863. +[ Tue Sep 13 14:02:37 2022 ] Top1: 56.50% +[ Tue Sep 13 14:02:38 2022 ] Top5: 85.82% +[ Tue Sep 13 14:02:38 2022 ] Training epoch: 87 +[ Tue Sep 13 14:02:42 2022 ] Batch(1/243) done. Loss: 0.0766 lr:0.001000 +[ Tue Sep 13 14:03:12 2022 ] Batch(101/243) done. Loss: 0.0827 lr:0.001000 +[ Tue Sep 13 14:03:41 2022 ] Batch(201/243) done. Loss: 0.0746 lr:0.001000 +[ Tue Sep 13 14:03:54 2022 ] Eval epoch: 87 +[ Tue Sep 13 14:05:24 2022 ] Mean test loss of 796 batches: 2.5682311058044434. +[ Tue Sep 13 14:05:24 2022 ] Top1: 56.08% +[ Tue Sep 13 14:05:24 2022 ] Top5: 85.66% +[ Tue Sep 13 14:05:25 2022 ] Training epoch: 88 +[ Tue Sep 13 14:05:45 2022 ] Batch(58/243) done. Loss: 0.0400 lr:0.001000 +[ Tue Sep 13 14:06:15 2022 ] Batch(158/243) done. Loss: 0.0518 lr:0.001000 +[ Tue Sep 13 14:06:41 2022 ] Eval epoch: 88 +[ Tue Sep 13 14:08:13 2022 ] Mean test loss of 796 batches: 2.5862810611724854. +[ Tue Sep 13 14:08:14 2022 ] Top1: 56.15% +[ Tue Sep 13 14:08:14 2022 ] Top5: 85.86% +[ Tue Sep 13 14:08:14 2022 ] Training epoch: 89 +[ Tue Sep 13 14:08:22 2022 ] Batch(15/243) done. Loss: 0.0697 lr:0.001000 +[ Tue Sep 13 14:08:52 2022 ] Batch(115/243) done. Loss: 0.0347 lr:0.001000 +[ Tue Sep 13 14:09:22 2022 ] Batch(215/243) done. Loss: 0.0376 lr:0.001000 +[ Tue Sep 13 14:09:30 2022 ] Eval epoch: 89 +[ Tue Sep 13 14:11:02 2022 ] Mean test loss of 796 batches: 2.6040351390838623. +[ Tue Sep 13 14:11:02 2022 ] Top1: 55.94% +[ Tue Sep 13 14:11:02 2022 ] Top5: 85.50% +[ Tue Sep 13 14:11:03 2022 ] Training epoch: 90 +[ Tue Sep 13 14:11:30 2022 ] Batch(72/243) done. Loss: 0.0266 lr:0.001000 +[ Tue Sep 13 14:12:00 2022 ] Batch(172/243) done. Loss: 0.0592 lr:0.001000 +[ Tue Sep 13 14:12:21 2022 ] Eval epoch: 90 +[ Tue Sep 13 14:13:52 2022 ] Mean test loss of 796 batches: 2.6085898876190186. +[ Tue Sep 13 14:13:53 2022 ] Top1: 55.85% +[ Tue Sep 13 14:13:53 2022 ] Top5: 85.52% +[ Tue Sep 13 14:13:53 2022 ] Training epoch: 91 +[ Tue Sep 13 14:14:06 2022 ] Batch(29/243) done. Loss: 0.0219 lr:0.001000 +[ Tue Sep 13 14:14:36 2022 ] Batch(129/243) done. Loss: 0.0494 lr:0.001000 +[ Tue Sep 13 14:15:06 2022 ] Batch(229/243) done. Loss: 0.0842 lr:0.001000 +[ Tue Sep 13 14:15:10 2022 ] Eval epoch: 91 +[ Tue Sep 13 14:16:41 2022 ] Mean test loss of 796 batches: 2.601207971572876. +[ Tue Sep 13 14:16:41 2022 ] Top1: 55.89% +[ Tue Sep 13 14:16:42 2022 ] Top5: 85.64% +[ Tue Sep 13 14:16:42 2022 ] Training epoch: 92 +[ Tue Sep 13 14:17:11 2022 ] Batch(86/243) done. Loss: 0.0986 lr:0.001000 +[ Tue Sep 13 14:17:41 2022 ] Batch(186/243) done. Loss: 0.1233 lr:0.001000 +[ Tue Sep 13 14:17:58 2022 ] Eval epoch: 92 +[ Tue Sep 13 14:19:28 2022 ] Mean test loss of 796 batches: 2.6894383430480957. +[ Tue Sep 13 14:19:28 2022 ] Top1: 55.42% +[ Tue Sep 13 14:19:29 2022 ] Top5: 85.34% +[ Tue Sep 13 14:19:29 2022 ] Training epoch: 93 +[ Tue Sep 13 14:19:46 2022 ] Batch(43/243) done. Loss: 0.0524 lr:0.001000 +[ Tue Sep 13 14:20:16 2022 ] Batch(143/243) done. Loss: 0.0981 lr:0.001000 +[ Tue Sep 13 14:20:45 2022 ] Eval epoch: 93 +[ Tue Sep 13 14:22:16 2022 ] Mean test loss of 796 batches: 2.6224026679992676. +[ Tue Sep 13 14:22:16 2022 ] Top1: 56.13% +[ Tue Sep 13 14:22:17 2022 ] Top5: 85.60% +[ Tue Sep 13 14:22:17 2022 ] Training epoch: 94 +[ Tue Sep 13 14:22:21 2022 ] Batch(0/243) done. Loss: 0.0663 lr:0.001000 +[ Tue Sep 13 14:22:51 2022 ] Batch(100/243) done. Loss: 0.0486 lr:0.001000 +[ Tue Sep 13 14:23:20 2022 ] Batch(200/243) done. Loss: 0.0635 lr:0.001000 +[ Tue Sep 13 14:23:33 2022 ] Eval epoch: 94 +[ Tue Sep 13 14:25:03 2022 ] Mean test loss of 796 batches: 2.6107020378112793. +[ Tue Sep 13 14:25:04 2022 ] Top1: 56.09% +[ Tue Sep 13 14:25:04 2022 ] Top5: 85.62% +[ Tue Sep 13 14:25:04 2022 ] Training epoch: 95 +[ Tue Sep 13 14:25:25 2022 ] Batch(57/243) done. Loss: 0.1862 lr:0.001000 +[ Tue Sep 13 14:25:55 2022 ] Batch(157/243) done. Loss: 0.0362 lr:0.001000 +[ Tue Sep 13 14:26:21 2022 ] Eval epoch: 95 +[ Tue Sep 13 14:27:51 2022 ] Mean test loss of 796 batches: 2.630995035171509. +[ Tue Sep 13 14:27:51 2022 ] Top1: 56.15% +[ Tue Sep 13 14:27:51 2022 ] Top5: 85.78% +[ Tue Sep 13 14:27:52 2022 ] Training epoch: 96 +[ Tue Sep 13 14:28:00 2022 ] Batch(14/243) done. Loss: 0.0357 lr:0.001000 +[ Tue Sep 13 14:28:30 2022 ] Batch(114/243) done. Loss: 0.0910 lr:0.001000 +[ Tue Sep 13 14:28:59 2022 ] Batch(214/243) done. Loss: 0.0290 lr:0.001000 +[ Tue Sep 13 14:29:08 2022 ] Eval epoch: 96 +[ Tue Sep 13 14:30:37 2022 ] Mean test loss of 796 batches: 2.5851011276245117. +[ Tue Sep 13 14:30:38 2022 ] Top1: 56.15% +[ Tue Sep 13 14:30:38 2022 ] Top5: 85.95% +[ Tue Sep 13 14:30:38 2022 ] Training epoch: 97 +[ Tue Sep 13 14:31:03 2022 ] Batch(71/243) done. Loss: 0.0985 lr:0.001000 +[ Tue Sep 13 14:31:33 2022 ] Batch(171/243) done. Loss: 0.0368 lr:0.001000 +[ Tue Sep 13 14:31:55 2022 ] Eval epoch: 97 +[ Tue Sep 13 14:33:25 2022 ] Mean test loss of 796 batches: 2.600297689437866. +[ Tue Sep 13 14:33:25 2022 ] Top1: 56.21% +[ Tue Sep 13 14:33:25 2022 ] Top5: 85.83% +[ Tue Sep 13 14:33:26 2022 ] Training epoch: 98 +[ Tue Sep 13 14:33:38 2022 ] Batch(28/243) done. Loss: 0.1044 lr:0.001000 +[ Tue Sep 13 14:34:07 2022 ] Batch(128/243) done. Loss: 0.0899 lr:0.001000 +[ Tue Sep 13 14:34:37 2022 ] Batch(228/243) done. Loss: 0.0325 lr:0.001000 +[ Tue Sep 13 14:34:42 2022 ] Eval epoch: 98 +[ Tue Sep 13 14:36:11 2022 ] Mean test loss of 796 batches: 2.640855073928833. +[ Tue Sep 13 14:36:11 2022 ] Top1: 56.13% +[ Tue Sep 13 14:36:12 2022 ] Top5: 85.73% +[ Tue Sep 13 14:36:12 2022 ] Training epoch: 99 +[ Tue Sep 13 14:36:41 2022 ] Batch(85/243) done. Loss: 0.0878 lr:0.001000 +[ Tue Sep 13 14:37:11 2022 ] Batch(185/243) done. Loss: 0.0576 lr:0.001000 +[ Tue Sep 13 14:37:29 2022 ] Eval epoch: 99 +[ Tue Sep 13 14:38:58 2022 ] Mean test loss of 796 batches: 2.7020773887634277. +[ Tue Sep 13 14:38:58 2022 ] Top1: 55.70% +[ Tue Sep 13 14:38:58 2022 ] Top5: 85.29% +[ Tue Sep 13 14:38:59 2022 ] Training epoch: 100 +[ Tue Sep 13 14:39:15 2022 ] Batch(42/243) done. Loss: 0.0629 lr:0.001000 +[ Tue Sep 13 14:39:45 2022 ] Batch(142/243) done. Loss: 0.0454 lr:0.001000 +[ Tue Sep 13 14:40:15 2022 ] Batch(242/243) done. Loss: 0.0956 lr:0.001000 +[ Tue Sep 13 14:40:15 2022 ] Eval epoch: 100 +[ Tue Sep 13 14:41:44 2022 ] Mean test loss of 796 batches: 2.707759380340576. +[ Tue Sep 13 14:41:45 2022 ] Top1: 55.49% +[ Tue Sep 13 14:41:45 2022 ] Top5: 85.48% diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_motion_xsub/config.yaml b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_motion_xsub/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b9404db9d09190dcb6b52daa8ee82bc3894eca11 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_motion_xsub/config.yaml @@ -0,0 +1,59 @@ +Experiment_name: ntu120_joint_motion_xsub +base_lr: 0.1 +batch_size: 64 +config: ./config/ntu120_xsub/train_joint_motion.yaml +device: +- 6 +- 7 +eval_interval: 5 +feeder: feeders.feeder.Feeder +groups: 8 +ignore_weights: [] +keep_rate: 0.9 +log_interval: 100 +model: model.decouple_gcn.Model +model_args: + block_size: 41 + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + groups: 16 + num_class: 120 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu120_joint_motion_xsub +nesterov: true +num_epoch: 100 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_data_joint_motion.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_data_joint_motion.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu120_joint_motion_xsub diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_motion_xsub/decouple_gcn.py b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_motion_xsub/decouple_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..6dcce4552ced280fe5b2060df92daebd2452cf7c --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_motion_xsub/decouple_gcn.py @@ -0,0 +1,235 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math +from model.dropSke import DropBlock_Ske +from model.dropT import DropBlockT_1d + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + + +def conv_branch_init(conv): + weight = conv.weight + n = weight.size(0) + k1 = weight.size(1) + k2 = weight.size(2) + nn.init.normal(weight, 0, math.sqrt(2. / (n * k1 * k2))) + nn.init.constant(conv.bias, 0) + + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class unit_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1, num_point=25, block_size=41): + super(unit_tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + self.dropS = DropBlock_Ske(num_point=num_point) + self.dropT = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob, A): + x = self.bn(self.conv(x)) + x = self.dropT(self.dropS(x, keep_prob, A), keep_prob) + return x + + +class unit_tcn_skip(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(unit_tcn_skip, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class unit_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, coff_embedding=4, num_subset=3): + super(unit_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.num_point = num_point + self.groups = groups + self.num_subset = num_subset + self.DecoupleA = nn.Parameter(torch.tensor(np.reshape(A.astype(np.float32), [ + 3, 1, num_point, num_point]), dtype=torch.float32, requires_grad=True).repeat(1, groups, 1, 1), requires_grad=True) + + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.bn0 = nn.BatchNorm2d(out_channels * num_subset) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + bn_init(self.bn, 1e-6) + + self.Linear_weight = nn.Parameter(torch.zeros( + in_channels, out_channels * num_subset, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0, math.sqrt( + 0.5 / (out_channels * num_subset))) + + self.Linear_bias = nn.Parameter(torch.zeros( + 1, out_channels * num_subset, 1, 1, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.constant(self.Linear_bias, 1e-6) + + eye_array = [] + for i in range(out_channels): + eye_array.append(torch.eye(num_point)) + self.eyes = nn.Parameter(torch.tensor(torch.stack( + eye_array), requires_grad=False, device='cuda'), requires_grad=False) # [c,25,25] + + def norm(self, A): + b, c, h, w = A.size() + A = A.view(c, self.num_point, self.num_point) + D_list = torch.sum(A, 1).view(c, 1, self.num_point) + D_list_12 = (D_list + 0.001)**(-1) + D_12 = self.eyes * D_list_12 + A = torch.bmm(A, D_12).view(b, c, h, w) + return A + + def forward(self, x0): + learn_A = self.DecoupleA.repeat( + 1, self.out_channels // self.groups, 1, 1) + norm_learn_A = torch.cat([self.norm(learn_A[0:1, ...]), self.norm( + learn_A[1:2, ...]), self.norm(learn_A[2:3, ...])], 0) + + x = torch.einsum( + 'nctw,cd->ndtw', (x0, self.Linear_weight)).contiguous() + x = x + self.Linear_bias + x = self.bn0(x) + + n, kc, t, v = x.size() + x = x.view(n, self.num_subset, kc // self.num_subset, t, v) + x = torch.einsum('nkctv,kcvw->nctw', (x, norm_learn_A)) + + x = self.bn(x) + x += self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, block_size, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = unit_gcn(in_channels, out_channels, A, groups, num_point) + self.tcn1 = unit_tcn(out_channels, out_channels, + stride=stride, num_point=num_point) + self.relu = nn.ReLU() + + self.A = nn.Parameter(torch.tensor(np.sum(np.reshape(A.astype(np.float32), [ + 3, num_point, num_point]), axis=0), dtype=torch.float32, requires_grad=False, device='cuda'), requires_grad=False) + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + + else: + self.residual = unit_tcn_skip( + in_channels, out_channels, kernel_size=1, stride=stride) + self.dropSke = DropBlock_Ske(num_point=num_point) + self.dropT_skip = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob): + x = self.tcn1(self.gcn1(x), keep_prob, self.A) + self.dropT_skip( + self.dropSke(self.residual(x), keep_prob, self.A), keep_prob) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, groups, num_point, + block_size, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l5 = TCN_GCN_unit( + 64, 128, A, groups, num_point, block_size, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l8 = TCN_GCN_unit(128, 256, A, groups, + num_point, block_size, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x, keep_prob=0.9): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute( + 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x, 1.0) + x = self.l2(x, 1.0) + x = self.l3(x, 1.0) + x = self.l4(x, 1.0) + x = self.l5(x, 1.0) + x = self.l6(x, 1.0) + x = self.l7(x, keep_prob) + x = self.l8(x, keep_prob) + x = self.l9(x, keep_prob) + x = self.l10(x, keep_prob) + + # N*M,C,T,V + c_new = x.size(1) + x = x.reshape(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_motion_xsub/eval_results/best_acc.pkl b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_motion_xsub/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..c92b2f6d162ede3fb1871ccb936f20f37ed68191 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_motion_xsub/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:947c4edde0efb481f67a7c241ab8cf025f7d42583324823dd917eedc526fe344 +size 29946137 diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_motion_xsub/log.txt b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_motion_xsub/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..ae24d808df887962cbcab55123281bc0b68948cc --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_motion_xsub/log.txt @@ -0,0 +1,746 @@ +[ Mon Sep 12 17:08:18 2022 ] Parameters: +{'work_dir': './work_dir/ntu120_joint_motion_xsub', 'model_saved_name': './save_models/ntu120_joint_motion_xsub', 'Experiment_name': 'ntu120_joint_motion_xsub', 'config': './config/ntu120_xsub/train_joint_motion.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_data_joint_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_data_joint_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_label.pkl'}, 'model': 'model.decouple_gcn.Model', 'model_args': {'num_class': 120, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'groups': 16, 'block_size': 41, 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80], 'device': [6, 7], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 100, 'weight_decay': 0.0001, 'keep_rate': 0.9, 'groups': 8, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Mon Sep 12 17:08:18 2022 ] Training epoch: 1 +[ Mon Sep 12 17:09:08 2022 ] Batch(99/243) done. Loss: 3.9828 lr:0.100000 +[ Mon Sep 12 17:09:53 2022 ] Batch(199/243) done. Loss: 3.5107 lr:0.100000 +[ Mon Sep 12 17:10:13 2022 ] Eval epoch: 1 +[ Mon Sep 12 17:12:45 2022 ] Mean test loss of 796 batches: 5.743283271789551. +[ Mon Sep 12 17:12:46 2022 ] Top1: 4.45% +[ Mon Sep 12 17:12:46 2022 ] Top5: 12.60% +[ Mon Sep 12 17:12:46 2022 ] Training epoch: 2 +[ Mon Sep 12 17:13:20 2022 ] Batch(56/243) done. Loss: 3.4010 lr:0.100000 +[ Mon Sep 12 17:14:13 2022 ] Batch(156/243) done. Loss: 2.9352 lr:0.100000 +[ Mon Sep 12 17:15:00 2022 ] Eval epoch: 2 +[ Mon Sep 12 17:17:32 2022 ] Mean test loss of 796 batches: 5.4759979248046875. +[ Mon Sep 12 17:17:32 2022 ] Top1: 6.80% +[ Mon Sep 12 17:17:33 2022 ] Top5: 21.29% +[ Mon Sep 12 17:17:33 2022 ] Training epoch: 3 +[ Mon Sep 12 17:17:43 2022 ] Batch(13/243) done. Loss: 2.8028 lr:0.100000 +[ Mon Sep 12 17:18:36 2022 ] Batch(113/243) done. Loss: 2.3965 lr:0.100000 +[ Mon Sep 12 17:19:30 2022 ] Batch(213/243) done. Loss: 2.2430 lr:0.100000 +[ Mon Sep 12 17:19:46 2022 ] Eval epoch: 3 +[ Mon Sep 12 17:22:17 2022 ] Mean test loss of 796 batches: 4.726529598236084. +[ Mon Sep 12 17:22:17 2022 ] Top1: 9.04% +[ Mon Sep 12 17:22:18 2022 ] Top5: 24.75% +[ Mon Sep 12 17:22:18 2022 ] Training epoch: 4 +[ Mon Sep 12 17:22:58 2022 ] Batch(70/243) done. Loss: 2.0684 lr:0.100000 +[ Mon Sep 12 17:23:52 2022 ] Batch(170/243) done. Loss: 1.6667 lr:0.100000 +[ Mon Sep 12 17:24:30 2022 ] Eval epoch: 4 +[ Mon Sep 12 17:27:01 2022 ] Mean test loss of 796 batches: 4.703661918640137. +[ Mon Sep 12 17:27:02 2022 ] Top1: 13.90% +[ Mon Sep 12 17:27:02 2022 ] Top5: 34.63% +[ Mon Sep 12 17:27:02 2022 ] Training epoch: 5 +[ Mon Sep 12 17:27:20 2022 ] Batch(27/243) done. Loss: 2.0104 lr:0.100000 +[ Mon Sep 12 17:28:13 2022 ] Batch(127/243) done. Loss: 1.8591 lr:0.100000 +[ Mon Sep 12 17:29:07 2022 ] Batch(227/243) done. Loss: 1.4994 lr:0.100000 +[ Mon Sep 12 17:29:15 2022 ] Eval epoch: 5 +[ Mon Sep 12 17:31:45 2022 ] Mean test loss of 796 batches: 3.6374218463897705. +[ Mon Sep 12 17:31:46 2022 ] Top1: 16.08% +[ Mon Sep 12 17:31:46 2022 ] Top5: 42.62% +[ Mon Sep 12 17:31:46 2022 ] Training epoch: 6 +[ Mon Sep 12 17:32:34 2022 ] Batch(84/243) done. Loss: 1.6156 lr:0.100000 +[ Mon Sep 12 17:33:28 2022 ] Batch(184/243) done. Loss: 1.4599 lr:0.100000 +[ Mon Sep 12 17:33:59 2022 ] Eval epoch: 6 +[ Mon Sep 12 17:36:30 2022 ] Mean test loss of 796 batches: 4.115126609802246. +[ Mon Sep 12 17:36:31 2022 ] Top1: 19.99% +[ Mon Sep 12 17:36:31 2022 ] Top5: 49.28% +[ Mon Sep 12 17:36:31 2022 ] Training epoch: 7 +[ Mon Sep 12 17:36:56 2022 ] Batch(41/243) done. Loss: 1.2778 lr:0.100000 +[ Mon Sep 12 17:37:50 2022 ] Batch(141/243) done. Loss: 1.1354 lr:0.100000 +[ Mon Sep 12 17:38:43 2022 ] Batch(241/243) done. Loss: 1.4545 lr:0.100000 +[ Mon Sep 12 17:38:44 2022 ] Eval epoch: 7 +[ Mon Sep 12 17:41:15 2022 ] Mean test loss of 796 batches: 3.3611857891082764. +[ Mon Sep 12 17:41:15 2022 ] Top1: 25.49% +[ Mon Sep 12 17:41:15 2022 ] Top5: 55.62% +[ Mon Sep 12 17:41:16 2022 ] Training epoch: 8 +[ Mon Sep 12 17:42:11 2022 ] Batch(98/243) done. Loss: 1.1384 lr:0.100000 +[ Mon Sep 12 17:43:04 2022 ] Batch(198/243) done. Loss: 1.0976 lr:0.100000 +[ Mon Sep 12 17:43:28 2022 ] Eval epoch: 8 +[ Mon Sep 12 17:45:59 2022 ] Mean test loss of 796 batches: 4.76453971862793. +[ Mon Sep 12 17:45:59 2022 ] Top1: 20.45% +[ Mon Sep 12 17:46:00 2022 ] Top5: 47.84% +[ Mon Sep 12 17:46:00 2022 ] Training epoch: 9 +[ Mon Sep 12 17:46:32 2022 ] Batch(55/243) done. Loss: 0.9948 lr:0.100000 +[ Mon Sep 12 17:47:26 2022 ] Batch(155/243) done. Loss: 1.2244 lr:0.100000 +[ Mon Sep 12 17:48:12 2022 ] Eval epoch: 9 +[ Mon Sep 12 17:50:43 2022 ] Mean test loss of 796 batches: 3.4281227588653564. +[ Mon Sep 12 17:50:44 2022 ] Top1: 28.01% +[ Mon Sep 12 17:50:44 2022 ] Top5: 58.91% +[ Mon Sep 12 17:50:44 2022 ] Training epoch: 10 +[ Mon Sep 12 17:50:54 2022 ] Batch(12/243) done. Loss: 1.1751 lr:0.100000 +[ Mon Sep 12 17:51:47 2022 ] Batch(112/243) done. Loss: 0.9554 lr:0.100000 +[ Mon Sep 12 17:52:41 2022 ] Batch(212/243) done. Loss: 1.1116 lr:0.100000 +[ Mon Sep 12 17:52:57 2022 ] Eval epoch: 10 +[ Mon Sep 12 17:55:28 2022 ] Mean test loss of 796 batches: 3.9069652557373047. +[ Mon Sep 12 17:55:28 2022 ] Top1: 26.39% +[ Mon Sep 12 17:55:29 2022 ] Top5: 59.63% +[ Mon Sep 12 17:55:29 2022 ] Training epoch: 11 +[ Mon Sep 12 17:56:09 2022 ] Batch(69/243) done. Loss: 0.8494 lr:0.100000 +[ Mon Sep 12 17:57:02 2022 ] Batch(169/243) done. Loss: 1.1055 lr:0.100000 +[ Mon Sep 12 17:57:41 2022 ] Eval epoch: 11 +[ Mon Sep 12 18:00:12 2022 ] Mean test loss of 796 batches: 3.2280941009521484. +[ Mon Sep 12 18:00:13 2022 ] Top1: 31.25% +[ Mon Sep 12 18:00:13 2022 ] Top5: 64.52% +[ Mon Sep 12 18:00:13 2022 ] Training epoch: 12 +[ Mon Sep 12 18:00:30 2022 ] Batch(26/243) done. Loss: 0.9573 lr:0.100000 +[ Mon Sep 12 18:01:24 2022 ] Batch(126/243) done. Loss: 1.2943 lr:0.100000 +[ Mon Sep 12 18:02:17 2022 ] Batch(226/243) done. Loss: 0.9637 lr:0.100000 +[ Mon Sep 12 18:02:26 2022 ] Eval epoch: 12 +[ Mon Sep 12 18:04:57 2022 ] Mean test loss of 796 batches: 4.624372482299805. +[ Mon Sep 12 18:04:58 2022 ] Top1: 19.33% +[ Mon Sep 12 18:04:58 2022 ] Top5: 47.97% +[ Mon Sep 12 18:04:59 2022 ] Training epoch: 13 +[ Mon Sep 12 18:05:46 2022 ] Batch(83/243) done. Loss: 1.0534 lr:0.100000 +[ Mon Sep 12 18:06:39 2022 ] Batch(183/243) done. Loss: 1.0734 lr:0.100000 +[ Mon Sep 12 18:07:11 2022 ] Eval epoch: 13 +[ Mon Sep 12 18:09:41 2022 ] Mean test loss of 796 batches: 3.4322330951690674. +[ Mon Sep 12 18:09:42 2022 ] Top1: 29.96% +[ Mon Sep 12 18:09:42 2022 ] Top5: 65.63% +[ Mon Sep 12 18:09:42 2022 ] Training epoch: 14 +[ Mon Sep 12 18:10:07 2022 ] Batch(40/243) done. Loss: 0.9375 lr:0.100000 +[ Mon Sep 12 18:11:00 2022 ] Batch(140/243) done. Loss: 0.8313 lr:0.100000 +[ Mon Sep 12 18:11:54 2022 ] Batch(240/243) done. Loss: 1.1521 lr:0.100000 +[ Mon Sep 12 18:11:55 2022 ] Eval epoch: 14 +[ Mon Sep 12 18:14:26 2022 ] Mean test loss of 796 batches: 2.9193553924560547. +[ Mon Sep 12 18:14:26 2022 ] Top1: 35.86% +[ Mon Sep 12 18:14:27 2022 ] Top5: 72.78% +[ Mon Sep 12 18:14:27 2022 ] Training epoch: 15 +[ Mon Sep 12 18:15:22 2022 ] Batch(97/243) done. Loss: 0.7764 lr:0.100000 +[ Mon Sep 12 18:16:15 2022 ] Batch(197/243) done. Loss: 0.8692 lr:0.100000 +[ Mon Sep 12 18:16:39 2022 ] Eval epoch: 15 +[ Mon Sep 12 18:19:10 2022 ] Mean test loss of 796 batches: 2.9887285232543945. +[ Mon Sep 12 18:19:11 2022 ] Top1: 31.90% +[ Mon Sep 12 18:19:11 2022 ] Top5: 68.86% +[ Mon Sep 12 18:19:11 2022 ] Training epoch: 16 +[ Mon Sep 12 18:19:43 2022 ] Batch(54/243) done. Loss: 1.1223 lr:0.100000 +[ Mon Sep 12 18:20:36 2022 ] Batch(154/243) done. Loss: 0.9452 lr:0.100000 +[ Mon Sep 12 18:21:24 2022 ] Eval epoch: 16 +[ Mon Sep 12 18:23:55 2022 ] Mean test loss of 796 batches: 3.189964771270752. +[ Mon Sep 12 18:23:55 2022 ] Top1: 31.94% +[ Mon Sep 12 18:23:56 2022 ] Top5: 66.61% +[ Mon Sep 12 18:23:56 2022 ] Training epoch: 17 +[ Mon Sep 12 18:24:05 2022 ] Batch(11/243) done. Loss: 0.7831 lr:0.100000 +[ Mon Sep 12 18:24:58 2022 ] Batch(111/243) done. Loss: 0.9609 lr:0.100000 +[ Mon Sep 12 18:25:52 2022 ] Batch(211/243) done. Loss: 0.8769 lr:0.100000 +[ Mon Sep 12 18:26:09 2022 ] Eval epoch: 17 +[ Mon Sep 12 18:28:39 2022 ] Mean test loss of 796 batches: 2.984318494796753. +[ Mon Sep 12 18:28:40 2022 ] Top1: 32.78% +[ Mon Sep 12 18:28:40 2022 ] Top5: 67.75% +[ Mon Sep 12 18:28:40 2022 ] Training epoch: 18 +[ Mon Sep 12 18:29:20 2022 ] Batch(68/243) done. Loss: 0.6888 lr:0.100000 +[ Mon Sep 12 18:30:13 2022 ] Batch(168/243) done. Loss: 0.8678 lr:0.100000 +[ Mon Sep 12 18:30:53 2022 ] Eval epoch: 18 +[ Mon Sep 12 18:33:24 2022 ] Mean test loss of 796 batches: 3.6525020599365234. +[ Mon Sep 12 18:33:24 2022 ] Top1: 26.64% +[ Mon Sep 12 18:33:24 2022 ] Top5: 60.06% +[ Mon Sep 12 18:33:25 2022 ] Training epoch: 19 +[ Mon Sep 12 18:33:41 2022 ] Batch(25/243) done. Loss: 0.6830 lr:0.100000 +[ Mon Sep 12 18:34:35 2022 ] Batch(125/243) done. Loss: 0.5905 lr:0.100000 +[ Mon Sep 12 18:35:28 2022 ] Batch(225/243) done. Loss: 0.9013 lr:0.100000 +[ Mon Sep 12 18:35:37 2022 ] Eval epoch: 19 +[ Mon Sep 12 18:38:09 2022 ] Mean test loss of 796 batches: 4.428297519683838. +[ Mon Sep 12 18:38:09 2022 ] Top1: 23.64% +[ Mon Sep 12 18:38:09 2022 ] Top5: 57.18% +[ Mon Sep 12 18:38:10 2022 ] Training epoch: 20 +[ Mon Sep 12 18:38:56 2022 ] Batch(82/243) done. Loss: 0.7506 lr:0.100000 +[ Mon Sep 12 18:39:50 2022 ] Batch(182/243) done. Loss: 0.6540 lr:0.100000 +[ Mon Sep 12 18:40:22 2022 ] Eval epoch: 20 +[ Mon Sep 12 18:42:53 2022 ] Mean test loss of 796 batches: 6.080588340759277. +[ Mon Sep 12 18:42:54 2022 ] Top1: 10.31% +[ Mon Sep 12 18:42:54 2022 ] Top5: 32.39% +[ Mon Sep 12 18:42:54 2022 ] Training epoch: 21 +[ Mon Sep 12 18:43:18 2022 ] Batch(39/243) done. Loss: 0.5286 lr:0.100000 +[ Mon Sep 12 18:44:11 2022 ] Batch(139/243) done. Loss: 0.5689 lr:0.100000 +[ Mon Sep 12 18:45:05 2022 ] Batch(239/243) done. Loss: 0.6534 lr:0.100000 +[ Mon Sep 12 18:45:07 2022 ] Eval epoch: 21 +[ Mon Sep 12 18:47:38 2022 ] Mean test loss of 796 batches: 3.9268245697021484. +[ Mon Sep 12 18:47:38 2022 ] Top1: 25.51% +[ Mon Sep 12 18:47:38 2022 ] Top5: 58.81% +[ Mon Sep 12 18:47:39 2022 ] Training epoch: 22 +[ Mon Sep 12 18:48:33 2022 ] Batch(96/243) done. Loss: 0.7457 lr:0.100000 +[ Mon Sep 12 18:49:26 2022 ] Batch(196/243) done. Loss: 0.8362 lr:0.100000 +[ Mon Sep 12 18:49:51 2022 ] Eval epoch: 22 +[ Mon Sep 12 18:52:22 2022 ] Mean test loss of 796 batches: 3.1748151779174805. +[ Mon Sep 12 18:52:22 2022 ] Top1: 35.90% +[ Mon Sep 12 18:52:23 2022 ] Top5: 69.46% +[ Mon Sep 12 18:52:23 2022 ] Training epoch: 23 +[ Mon Sep 12 18:52:54 2022 ] Batch(53/243) done. Loss: 0.7031 lr:0.100000 +[ Mon Sep 12 18:53:48 2022 ] Batch(153/243) done. Loss: 0.7082 lr:0.100000 +[ Mon Sep 12 18:54:36 2022 ] Eval epoch: 23 +[ Mon Sep 12 18:57:07 2022 ] Mean test loss of 796 batches: 3.1078102588653564. +[ Mon Sep 12 18:57:07 2022 ] Top1: 34.99% +[ Mon Sep 12 18:57:07 2022 ] Top5: 69.51% +[ Mon Sep 12 18:57:08 2022 ] Training epoch: 24 +[ Mon Sep 12 18:57:16 2022 ] Batch(10/243) done. Loss: 0.4838 lr:0.100000 +[ Mon Sep 12 18:58:09 2022 ] Batch(110/243) done. Loss: 0.6159 lr:0.100000 +[ Mon Sep 12 18:59:03 2022 ] Batch(210/243) done. Loss: 0.8235 lr:0.100000 +[ Mon Sep 12 18:59:20 2022 ] Eval epoch: 24 +[ Mon Sep 12 19:01:50 2022 ] Mean test loss of 796 batches: 3.1660282611846924. +[ Mon Sep 12 19:01:51 2022 ] Top1: 30.58% +[ Mon Sep 12 19:01:51 2022 ] Top5: 63.14% +[ Mon Sep 12 19:01:51 2022 ] Training epoch: 25 +[ Mon Sep 12 19:02:30 2022 ] Batch(67/243) done. Loss: 0.5538 lr:0.100000 +[ Mon Sep 12 19:03:24 2022 ] Batch(167/243) done. Loss: 0.7944 lr:0.100000 +[ Mon Sep 12 19:04:04 2022 ] Eval epoch: 25 +[ Mon Sep 12 19:06:35 2022 ] Mean test loss of 796 batches: 3.2890923023223877. +[ Mon Sep 12 19:06:35 2022 ] Top1: 35.01% +[ Mon Sep 12 19:06:36 2022 ] Top5: 70.55% +[ Mon Sep 12 19:06:36 2022 ] Training epoch: 26 +[ Mon Sep 12 19:06:52 2022 ] Batch(24/243) done. Loss: 0.6708 lr:0.100000 +[ Mon Sep 12 19:07:45 2022 ] Batch(124/243) done. Loss: 0.5950 lr:0.100000 +[ Mon Sep 12 19:08:39 2022 ] Batch(224/243) done. Loss: 0.5146 lr:0.100000 +[ Mon Sep 12 19:08:49 2022 ] Eval epoch: 26 +[ Mon Sep 12 19:11:20 2022 ] Mean test loss of 796 batches: 3.0431575775146484. +[ Mon Sep 12 19:11:20 2022 ] Top1: 39.31% +[ Mon Sep 12 19:11:21 2022 ] Top5: 73.00% +[ Mon Sep 12 19:11:21 2022 ] Training epoch: 27 +[ Mon Sep 12 19:12:07 2022 ] Batch(81/243) done. Loss: 0.5579 lr:0.100000 +[ Mon Sep 12 19:13:01 2022 ] Batch(181/243) done. Loss: 0.4032 lr:0.100000 +[ Mon Sep 12 19:13:34 2022 ] Eval epoch: 27 +[ Mon Sep 12 19:16:05 2022 ] Mean test loss of 796 batches: 2.9798786640167236. +[ Mon Sep 12 19:16:05 2022 ] Top1: 39.50% +[ Mon Sep 12 19:16:06 2022 ] Top5: 72.99% +[ Mon Sep 12 19:16:06 2022 ] Training epoch: 28 +[ Mon Sep 12 19:16:29 2022 ] Batch(38/243) done. Loss: 0.6820 lr:0.100000 +[ Mon Sep 12 19:17:23 2022 ] Batch(138/243) done. Loss: 0.4468 lr:0.100000 +[ Mon Sep 12 19:18:16 2022 ] Batch(238/243) done. Loss: 0.5453 lr:0.100000 +[ Mon Sep 12 19:18:18 2022 ] Eval epoch: 28 +[ Mon Sep 12 19:20:49 2022 ] Mean test loss of 796 batches: 3.0454938411712646. +[ Mon Sep 12 19:20:49 2022 ] Top1: 38.20% +[ Mon Sep 12 19:20:50 2022 ] Top5: 71.75% +[ Mon Sep 12 19:20:50 2022 ] Training epoch: 29 +[ Mon Sep 12 19:21:44 2022 ] Batch(95/243) done. Loss: 0.4766 lr:0.100000 +[ Mon Sep 12 19:22:38 2022 ] Batch(195/243) done. Loss: 0.3668 lr:0.100000 +[ Mon Sep 12 19:23:03 2022 ] Eval epoch: 29 +[ Mon Sep 12 19:25:33 2022 ] Mean test loss of 796 batches: 2.752485513687134. +[ Mon Sep 12 19:25:34 2022 ] Top1: 43.64% +[ Mon Sep 12 19:25:34 2022 ] Top5: 77.60% +[ Mon Sep 12 19:25:34 2022 ] Training epoch: 30 +[ Mon Sep 12 19:26:05 2022 ] Batch(52/243) done. Loss: 0.5540 lr:0.100000 +[ Mon Sep 12 19:26:59 2022 ] Batch(152/243) done. Loss: 0.3592 lr:0.100000 +[ Mon Sep 12 19:27:47 2022 ] Eval epoch: 30 +[ Mon Sep 12 19:30:18 2022 ] Mean test loss of 796 batches: 2.984344720840454. +[ Mon Sep 12 19:30:18 2022 ] Top1: 39.46% +[ Mon Sep 12 19:30:19 2022 ] Top5: 73.23% +[ Mon Sep 12 19:30:19 2022 ] Training epoch: 31 +[ Mon Sep 12 19:30:27 2022 ] Batch(9/243) done. Loss: 0.4950 lr:0.100000 +[ Mon Sep 12 19:31:20 2022 ] Batch(109/243) done. Loss: 0.4737 lr:0.100000 +[ Mon Sep 12 19:32:14 2022 ] Batch(209/243) done. Loss: 0.4834 lr:0.100000 +[ Mon Sep 12 19:32:32 2022 ] Eval epoch: 31 +[ Mon Sep 12 19:35:03 2022 ] Mean test loss of 796 batches: 3.027348279953003. +[ Mon Sep 12 19:35:03 2022 ] Top1: 37.86% +[ Mon Sep 12 19:35:04 2022 ] Top5: 71.76% +[ Mon Sep 12 19:35:04 2022 ] Training epoch: 32 +[ Mon Sep 12 19:35:42 2022 ] Batch(66/243) done. Loss: 0.3802 lr:0.100000 +[ Mon Sep 12 19:36:36 2022 ] Batch(166/243) done. Loss: 0.2585 lr:0.100000 +[ Mon Sep 12 19:37:17 2022 ] Eval epoch: 32 +[ Mon Sep 12 19:39:47 2022 ] Mean test loss of 796 batches: 4.240810871124268. +[ Mon Sep 12 19:39:47 2022 ] Top1: 30.77% +[ Mon Sep 12 19:39:48 2022 ] Top5: 63.77% +[ Mon Sep 12 19:39:48 2022 ] Training epoch: 33 +[ Mon Sep 12 19:40:03 2022 ] Batch(23/243) done. Loss: 0.3715 lr:0.100000 +[ Mon Sep 12 19:40:57 2022 ] Batch(123/243) done. Loss: 0.5635 lr:0.100000 +[ Mon Sep 12 19:41:50 2022 ] Batch(223/243) done. Loss: 0.5962 lr:0.100000 +[ Mon Sep 12 19:42:01 2022 ] Eval epoch: 33 +[ Mon Sep 12 19:44:31 2022 ] Mean test loss of 796 batches: 2.9045612812042236. +[ Mon Sep 12 19:44:31 2022 ] Top1: 39.17% +[ Mon Sep 12 19:44:32 2022 ] Top5: 74.38% +[ Mon Sep 12 19:44:32 2022 ] Training epoch: 34 +[ Mon Sep 12 19:45:18 2022 ] Batch(80/243) done. Loss: 0.5678 lr:0.100000 +[ Mon Sep 12 19:46:11 2022 ] Batch(180/243) done. Loss: 0.4247 lr:0.100000 +[ Mon Sep 12 19:46:45 2022 ] Eval epoch: 34 +[ Mon Sep 12 19:49:15 2022 ] Mean test loss of 796 batches: 3.31986403465271. +[ Mon Sep 12 19:49:15 2022 ] Top1: 39.28% +[ Mon Sep 12 19:49:16 2022 ] Top5: 70.77% +[ Mon Sep 12 19:49:16 2022 ] Training epoch: 35 +[ Mon Sep 12 19:49:39 2022 ] Batch(37/243) done. Loss: 0.4078 lr:0.100000 +[ Mon Sep 12 19:50:32 2022 ] Batch(137/243) done. Loss: 0.5305 lr:0.100000 +[ Mon Sep 12 19:51:26 2022 ] Batch(237/243) done. Loss: 0.4316 lr:0.100000 +[ Mon Sep 12 19:51:28 2022 ] Eval epoch: 35 +[ Mon Sep 12 19:54:00 2022 ] Mean test loss of 796 batches: 2.73232102394104. +[ Mon Sep 12 19:54:00 2022 ] Top1: 43.68% +[ Mon Sep 12 19:54:00 2022 ] Top5: 76.74% +[ Mon Sep 12 19:54:01 2022 ] Training epoch: 36 +[ Mon Sep 12 19:54:54 2022 ] Batch(94/243) done. Loss: 0.6683 lr:0.100000 +[ Mon Sep 12 19:55:47 2022 ] Batch(194/243) done. Loss: 0.5727 lr:0.100000 +[ Mon Sep 12 19:56:13 2022 ] Eval epoch: 36 +[ Mon Sep 12 19:58:44 2022 ] Mean test loss of 796 batches: 3.422870635986328. +[ Mon Sep 12 19:58:44 2022 ] Top1: 37.66% +[ Mon Sep 12 19:58:44 2022 ] Top5: 70.87% +[ Mon Sep 12 19:58:45 2022 ] Training epoch: 37 +[ Mon Sep 12 19:59:15 2022 ] Batch(51/243) done. Loss: 0.4763 lr:0.100000 +[ Mon Sep 12 20:00:08 2022 ] Batch(151/243) done. Loss: 0.5658 lr:0.100000 +[ Mon Sep 12 20:00:57 2022 ] Eval epoch: 37 +[ Mon Sep 12 20:03:28 2022 ] Mean test loss of 796 batches: 4.982846260070801. +[ Mon Sep 12 20:03:28 2022 ] Top1: 26.71% +[ Mon Sep 12 20:03:28 2022 ] Top5: 56.68% +[ Mon Sep 12 20:03:29 2022 ] Training epoch: 38 +[ Mon Sep 12 20:03:36 2022 ] Batch(8/243) done. Loss: 0.3946 lr:0.100000 +[ Mon Sep 12 20:04:29 2022 ] Batch(108/243) done. Loss: 0.5022 lr:0.100000 +[ Mon Sep 12 20:05:23 2022 ] Batch(208/243) done. Loss: 0.4371 lr:0.100000 +[ Mon Sep 12 20:05:41 2022 ] Eval epoch: 38 +[ Mon Sep 12 20:08:12 2022 ] Mean test loss of 796 batches: 4.276895999908447. +[ Mon Sep 12 20:08:12 2022 ] Top1: 35.45% +[ Mon Sep 12 20:08:13 2022 ] Top5: 66.57% +[ Mon Sep 12 20:08:13 2022 ] Training epoch: 39 +[ Mon Sep 12 20:08:50 2022 ] Batch(65/243) done. Loss: 0.8096 lr:0.100000 +[ Mon Sep 12 20:09:44 2022 ] Batch(165/243) done. Loss: 0.4515 lr:0.100000 +[ Mon Sep 12 20:10:25 2022 ] Eval epoch: 39 +[ Mon Sep 12 20:12:56 2022 ] Mean test loss of 796 batches: 3.5807816982269287. +[ Mon Sep 12 20:12:56 2022 ] Top1: 36.28% +[ Mon Sep 12 20:12:57 2022 ] Top5: 68.17% +[ Mon Sep 12 20:12:57 2022 ] Training epoch: 40 +[ Mon Sep 12 20:13:12 2022 ] Batch(22/243) done. Loss: 0.2295 lr:0.100000 +[ Mon Sep 12 20:14:05 2022 ] Batch(122/243) done. Loss: 0.3097 lr:0.100000 +[ Mon Sep 12 20:14:59 2022 ] Batch(222/243) done. Loss: 0.3564 lr:0.100000 +[ Mon Sep 12 20:15:10 2022 ] Eval epoch: 40 +[ Mon Sep 12 20:17:41 2022 ] Mean test loss of 796 batches: 2.955436944961548. +[ Mon Sep 12 20:17:41 2022 ] Top1: 44.20% +[ Mon Sep 12 20:17:41 2022 ] Top5: 75.99% +[ Mon Sep 12 20:17:42 2022 ] Training epoch: 41 +[ Mon Sep 12 20:18:27 2022 ] Batch(79/243) done. Loss: 0.5303 lr:0.100000 +[ Mon Sep 12 20:19:20 2022 ] Batch(179/243) done. Loss: 0.3567 lr:0.100000 +[ Mon Sep 12 20:19:54 2022 ] Eval epoch: 41 +[ Mon Sep 12 20:22:25 2022 ] Mean test loss of 796 batches: 3.0165064334869385. +[ Mon Sep 12 20:22:25 2022 ] Top1: 41.44% +[ Mon Sep 12 20:22:26 2022 ] Top5: 74.77% +[ Mon Sep 12 20:22:26 2022 ] Training epoch: 42 +[ Mon Sep 12 20:22:48 2022 ] Batch(36/243) done. Loss: 0.2645 lr:0.100000 +[ Mon Sep 12 20:23:42 2022 ] Batch(136/243) done. Loss: 0.4502 lr:0.100000 +[ Mon Sep 12 20:24:35 2022 ] Batch(236/243) done. Loss: 0.5138 lr:0.100000 +[ Mon Sep 12 20:24:39 2022 ] Eval epoch: 42 +[ Mon Sep 12 20:27:10 2022 ] Mean test loss of 796 batches: 3.1357388496398926. +[ Mon Sep 12 20:27:10 2022 ] Top1: 41.20% +[ Mon Sep 12 20:27:11 2022 ] Top5: 74.33% +[ Mon Sep 12 20:27:11 2022 ] Training epoch: 43 +[ Mon Sep 12 20:28:04 2022 ] Batch(93/243) done. Loss: 0.5577 lr:0.100000 +[ Mon Sep 12 20:28:57 2022 ] Batch(193/243) done. Loss: 0.4311 lr:0.100000 +[ Mon Sep 12 20:29:24 2022 ] Eval epoch: 43 +[ Mon Sep 12 20:31:55 2022 ] Mean test loss of 796 batches: 4.484673023223877. +[ Mon Sep 12 20:31:55 2022 ] Top1: 34.84% +[ Mon Sep 12 20:31:56 2022 ] Top5: 67.95% +[ Mon Sep 12 20:31:56 2022 ] Training epoch: 44 +[ Mon Sep 12 20:32:26 2022 ] Batch(50/243) done. Loss: 0.5115 lr:0.100000 +[ Mon Sep 12 20:33:19 2022 ] Batch(150/243) done. Loss: 0.6260 lr:0.100000 +[ Mon Sep 12 20:34:09 2022 ] Eval epoch: 44 +[ Mon Sep 12 20:36:40 2022 ] Mean test loss of 796 batches: 3.408787488937378. +[ Mon Sep 12 20:36:40 2022 ] Top1: 41.38% +[ Mon Sep 12 20:36:41 2022 ] Top5: 75.54% +[ Mon Sep 12 20:36:41 2022 ] Training epoch: 45 +[ Mon Sep 12 20:36:48 2022 ] Batch(7/243) done. Loss: 0.4537 lr:0.100000 +[ Mon Sep 12 20:37:41 2022 ] Batch(107/243) done. Loss: 0.4634 lr:0.100000 +[ Mon Sep 12 20:38:35 2022 ] Batch(207/243) done. Loss: 0.3688 lr:0.100000 +[ Mon Sep 12 20:38:54 2022 ] Eval epoch: 45 +[ Mon Sep 12 20:41:25 2022 ] Mean test loss of 796 batches: 3.4471657276153564. +[ Mon Sep 12 20:41:25 2022 ] Top1: 39.65% +[ Mon Sep 12 20:41:26 2022 ] Top5: 72.64% +[ Mon Sep 12 20:41:26 2022 ] Training epoch: 46 +[ Mon Sep 12 20:42:03 2022 ] Batch(64/243) done. Loss: 0.2999 lr:0.100000 +[ Mon Sep 12 20:42:57 2022 ] Batch(164/243) done. Loss: 0.4602 lr:0.100000 +[ Mon Sep 12 20:43:39 2022 ] Eval epoch: 46 +[ Mon Sep 12 20:46:10 2022 ] Mean test loss of 796 batches: 3.1922051906585693. +[ Mon Sep 12 20:46:10 2022 ] Top1: 38.30% +[ Mon Sep 12 20:46:11 2022 ] Top5: 71.94% +[ Mon Sep 12 20:46:11 2022 ] Training epoch: 47 +[ Mon Sep 12 20:46:25 2022 ] Batch(21/243) done. Loss: 0.4091 lr:0.100000 +[ Mon Sep 12 20:47:18 2022 ] Batch(121/243) done. Loss: 0.2515 lr:0.100000 +[ Mon Sep 12 20:48:12 2022 ] Batch(221/243) done. Loss: 0.5852 lr:0.100000 +[ Mon Sep 12 20:48:24 2022 ] Eval epoch: 47 +[ Mon Sep 12 20:50:54 2022 ] Mean test loss of 796 batches: 3.2509422302246094. +[ Mon Sep 12 20:50:55 2022 ] Top1: 39.08% +[ Mon Sep 12 20:50:55 2022 ] Top5: 74.01% +[ Mon Sep 12 20:50:55 2022 ] Training epoch: 48 +[ Mon Sep 12 20:51:40 2022 ] Batch(78/243) done. Loss: 0.4491 lr:0.100000 +[ Mon Sep 12 20:52:33 2022 ] Batch(178/243) done. Loss: 0.4859 lr:0.100000 +[ Mon Sep 12 20:53:08 2022 ] Eval epoch: 48 +[ Mon Sep 12 20:55:39 2022 ] Mean test loss of 796 batches: 4.199170112609863. +[ Mon Sep 12 20:55:39 2022 ] Top1: 36.34% +[ Mon Sep 12 20:55:40 2022 ] Top5: 68.84% +[ Mon Sep 12 20:55:40 2022 ] Training epoch: 49 +[ Mon Sep 12 20:56:02 2022 ] Batch(35/243) done. Loss: 0.5163 lr:0.100000 +[ Mon Sep 12 20:56:55 2022 ] Batch(135/243) done. Loss: 0.5192 lr:0.100000 +[ Mon Sep 12 20:57:49 2022 ] Batch(235/243) done. Loss: 0.2901 lr:0.100000 +[ Mon Sep 12 20:57:53 2022 ] Eval epoch: 49 +[ Mon Sep 12 21:00:24 2022 ] Mean test loss of 796 batches: 4.176433563232422. +[ Mon Sep 12 21:00:24 2022 ] Top1: 39.08% +[ Mon Sep 12 21:00:25 2022 ] Top5: 70.88% +[ Mon Sep 12 21:00:25 2022 ] Training epoch: 50 +[ Mon Sep 12 21:01:17 2022 ] Batch(92/243) done. Loss: 0.3617 lr:0.100000 +[ Mon Sep 12 21:02:11 2022 ] Batch(192/243) done. Loss: 0.3877 lr:0.100000 +[ Mon Sep 12 21:02:38 2022 ] Eval epoch: 50 +[ Mon Sep 12 21:05:09 2022 ] Mean test loss of 796 batches: 3.0291857719421387. +[ Mon Sep 12 21:05:09 2022 ] Top1: 41.97% +[ Mon Sep 12 21:05:09 2022 ] Top5: 76.29% +[ Mon Sep 12 21:05:10 2022 ] Training epoch: 51 +[ Mon Sep 12 21:05:39 2022 ] Batch(49/243) done. Loss: 0.3313 lr:0.100000 +[ Mon Sep 12 21:06:32 2022 ] Batch(149/243) done. Loss: 0.4774 lr:0.100000 +[ Mon Sep 12 21:07:23 2022 ] Eval epoch: 51 +[ Mon Sep 12 21:09:54 2022 ] Mean test loss of 796 batches: 4.730879306793213. +[ Mon Sep 12 21:09:54 2022 ] Top1: 29.74% +[ Mon Sep 12 21:09:54 2022 ] Top5: 62.17% +[ Mon Sep 12 21:09:55 2022 ] Training epoch: 52 +[ Mon Sep 12 21:10:01 2022 ] Batch(6/243) done. Loss: 0.2274 lr:0.100000 +[ Mon Sep 12 21:10:54 2022 ] Batch(106/243) done. Loss: 0.1653 lr:0.100000 +[ Mon Sep 12 21:11:48 2022 ] Batch(206/243) done. Loss: 0.5538 lr:0.100000 +[ Mon Sep 12 21:12:08 2022 ] Eval epoch: 52 +[ Mon Sep 12 21:14:39 2022 ] Mean test loss of 796 batches: 3.6701395511627197. +[ Mon Sep 12 21:14:39 2022 ] Top1: 40.39% +[ Mon Sep 12 21:14:39 2022 ] Top5: 71.54% +[ Mon Sep 12 21:14:40 2022 ] Training epoch: 53 +[ Mon Sep 12 21:15:16 2022 ] Batch(63/243) done. Loss: 0.3948 lr:0.100000 +[ Mon Sep 12 21:16:10 2022 ] Batch(163/243) done. Loss: 0.4890 lr:0.100000 +[ Mon Sep 12 21:16:52 2022 ] Eval epoch: 53 +[ Mon Sep 12 21:19:23 2022 ] Mean test loss of 796 batches: 3.335566759109497. +[ Mon Sep 12 21:19:24 2022 ] Top1: 39.80% +[ Mon Sep 12 21:19:24 2022 ] Top5: 71.85% +[ Mon Sep 12 21:19:24 2022 ] Training epoch: 54 +[ Mon Sep 12 21:19:38 2022 ] Batch(20/243) done. Loss: 0.3850 lr:0.100000 +[ Mon Sep 12 21:20:32 2022 ] Batch(120/243) done. Loss: 0.5277 lr:0.100000 +[ Mon Sep 12 21:21:25 2022 ] Batch(220/243) done. Loss: 0.4992 lr:0.100000 +[ Mon Sep 12 21:21:37 2022 ] Eval epoch: 54 +[ Mon Sep 12 21:24:09 2022 ] Mean test loss of 796 batches: 3.5634169578552246. +[ Mon Sep 12 21:24:09 2022 ] Top1: 43.06% +[ Mon Sep 12 21:24:09 2022 ] Top5: 74.42% +[ Mon Sep 12 21:24:10 2022 ] Training epoch: 55 +[ Mon Sep 12 21:24:54 2022 ] Batch(77/243) done. Loss: 0.4606 lr:0.100000 +[ Mon Sep 12 21:25:47 2022 ] Batch(177/243) done. Loss: 0.8160 lr:0.100000 +[ Mon Sep 12 21:26:22 2022 ] Eval epoch: 55 +[ Mon Sep 12 21:28:53 2022 ] Mean test loss of 796 batches: 3.872100353240967. +[ Mon Sep 12 21:28:54 2022 ] Top1: 39.43% +[ Mon Sep 12 21:28:54 2022 ] Top5: 73.59% +[ Mon Sep 12 21:28:54 2022 ] Training epoch: 56 +[ Mon Sep 12 21:29:15 2022 ] Batch(34/243) done. Loss: 0.5868 lr:0.100000 +[ Mon Sep 12 21:30:09 2022 ] Batch(134/243) done. Loss: 0.2790 lr:0.100000 +[ Mon Sep 12 21:31:02 2022 ] Batch(234/243) done. Loss: 0.3649 lr:0.100000 +[ Mon Sep 12 21:31:07 2022 ] Eval epoch: 56 +[ Mon Sep 12 21:33:38 2022 ] Mean test loss of 796 batches: 3.6488142013549805. +[ Mon Sep 12 21:33:39 2022 ] Top1: 39.34% +[ Mon Sep 12 21:33:39 2022 ] Top5: 71.31% +[ Mon Sep 12 21:33:39 2022 ] Training epoch: 57 +[ Mon Sep 12 21:34:31 2022 ] Batch(91/243) done. Loss: 0.2369 lr:0.100000 +[ Mon Sep 12 21:35:24 2022 ] Batch(191/243) done. Loss: 0.4638 lr:0.100000 +[ Mon Sep 12 21:35:52 2022 ] Eval epoch: 57 +[ Mon Sep 12 21:38:23 2022 ] Mean test loss of 796 batches: 4.045055866241455. +[ Mon Sep 12 21:38:23 2022 ] Top1: 39.11% +[ Mon Sep 12 21:38:24 2022 ] Top5: 71.53% +[ Mon Sep 12 21:38:24 2022 ] Training epoch: 58 +[ Mon Sep 12 21:38:53 2022 ] Batch(48/243) done. Loss: 0.2509 lr:0.100000 +[ Mon Sep 12 21:39:46 2022 ] Batch(148/243) done. Loss: 0.4252 lr:0.100000 +[ Mon Sep 12 21:40:37 2022 ] Eval epoch: 58 +[ Mon Sep 12 21:43:08 2022 ] Mean test loss of 796 batches: 3.6366567611694336. +[ Mon Sep 12 21:43:08 2022 ] Top1: 40.15% +[ Mon Sep 12 21:43:09 2022 ] Top5: 73.47% +[ Mon Sep 12 21:43:09 2022 ] Training epoch: 59 +[ Mon Sep 12 21:43:14 2022 ] Batch(5/243) done. Loss: 0.1725 lr:0.100000 +[ Mon Sep 12 21:44:08 2022 ] Batch(105/243) done. Loss: 0.1798 lr:0.100000 +[ Mon Sep 12 21:45:01 2022 ] Batch(205/243) done. Loss: 0.5699 lr:0.100000 +[ Mon Sep 12 21:45:21 2022 ] Eval epoch: 59 +[ Mon Sep 12 21:47:52 2022 ] Mean test loss of 796 batches: 4.1476850509643555. +[ Mon Sep 12 21:47:53 2022 ] Top1: 36.29% +[ Mon Sep 12 21:47:53 2022 ] Top5: 68.46% +[ Mon Sep 12 21:47:53 2022 ] Training epoch: 60 +[ Mon Sep 12 21:48:30 2022 ] Batch(62/243) done. Loss: 0.1565 lr:0.100000 +[ Mon Sep 12 21:49:23 2022 ] Batch(162/243) done. Loss: 0.3137 lr:0.100000 +[ Mon Sep 12 21:50:06 2022 ] Eval epoch: 60 +[ Mon Sep 12 21:52:38 2022 ] Mean test loss of 796 batches: 3.4781711101531982. +[ Mon Sep 12 21:52:38 2022 ] Top1: 44.30% +[ Mon Sep 12 21:52:39 2022 ] Top5: 76.36% +[ Mon Sep 12 21:52:39 2022 ] Training epoch: 61 +[ Mon Sep 12 21:52:52 2022 ] Batch(19/243) done. Loss: 0.2289 lr:0.010000 +[ Mon Sep 12 21:53:46 2022 ] Batch(119/243) done. Loss: 0.1061 lr:0.010000 +[ Mon Sep 12 21:54:39 2022 ] Batch(219/243) done. Loss: 0.1921 lr:0.010000 +[ Mon Sep 12 21:54:52 2022 ] Eval epoch: 61 +[ Mon Sep 12 21:57:23 2022 ] Mean test loss of 796 batches: 2.852508068084717. +[ Mon Sep 12 21:57:23 2022 ] Top1: 49.14% +[ Mon Sep 12 21:57:24 2022 ] Top5: 80.56% +[ Mon Sep 12 21:57:24 2022 ] Training epoch: 62 +[ Mon Sep 12 21:58:07 2022 ] Batch(76/243) done. Loss: 0.2023 lr:0.010000 +[ Mon Sep 12 21:59:01 2022 ] Batch(176/243) done. Loss: 0.0368 lr:0.010000 +[ Mon Sep 12 21:59:36 2022 ] Eval epoch: 62 +[ Mon Sep 12 22:02:07 2022 ] Mean test loss of 796 batches: 3.0011351108551025. +[ Mon Sep 12 22:02:08 2022 ] Top1: 47.58% +[ Mon Sep 12 22:02:08 2022 ] Top5: 79.43% +[ Mon Sep 12 22:02:08 2022 ] Training epoch: 63 +[ Mon Sep 12 22:02:29 2022 ] Batch(33/243) done. Loss: 0.1184 lr:0.010000 +[ Mon Sep 12 22:03:22 2022 ] Batch(133/243) done. Loss: 0.1296 lr:0.010000 +[ Mon Sep 12 22:04:16 2022 ] Batch(233/243) done. Loss: 0.2059 lr:0.010000 +[ Mon Sep 12 22:04:21 2022 ] Eval epoch: 63 +[ Mon Sep 12 22:06:52 2022 ] Mean test loss of 796 batches: 2.870868682861328. +[ Mon Sep 12 22:06:53 2022 ] Top1: 51.09% +[ Mon Sep 12 22:06:53 2022 ] Top5: 81.82% +[ Mon Sep 12 22:06:53 2022 ] Training epoch: 64 +[ Mon Sep 12 22:07:44 2022 ] Batch(90/243) done. Loss: 0.1001 lr:0.010000 +[ Mon Sep 12 22:08:38 2022 ] Batch(190/243) done. Loss: 0.1054 lr:0.010000 +[ Mon Sep 12 22:09:06 2022 ] Eval epoch: 64 +[ Mon Sep 12 22:11:38 2022 ] Mean test loss of 796 batches: 2.920163631439209. +[ Mon Sep 12 22:11:38 2022 ] Top1: 51.21% +[ Mon Sep 12 22:11:39 2022 ] Top5: 81.85% +[ Mon Sep 12 22:11:39 2022 ] Training epoch: 65 +[ Mon Sep 12 22:12:07 2022 ] Batch(47/243) done. Loss: 0.0761 lr:0.010000 +[ Mon Sep 12 22:13:01 2022 ] Batch(147/243) done. Loss: 0.0371 lr:0.010000 +[ Mon Sep 12 22:13:52 2022 ] Eval epoch: 65 +[ Mon Sep 12 22:16:23 2022 ] Mean test loss of 796 batches: 3.0438802242279053. +[ Mon Sep 12 22:16:23 2022 ] Top1: 49.43% +[ Mon Sep 12 22:16:24 2022 ] Top5: 80.56% +[ Mon Sep 12 22:16:24 2022 ] Training epoch: 66 +[ Mon Sep 12 22:16:29 2022 ] Batch(4/243) done. Loss: 0.0809 lr:0.010000 +[ Mon Sep 12 22:17:23 2022 ] Batch(104/243) done. Loss: 0.0946 lr:0.010000 +[ Mon Sep 12 22:18:16 2022 ] Batch(204/243) done. Loss: 0.0191 lr:0.010000 +[ Mon Sep 12 22:18:37 2022 ] Eval epoch: 66 +[ Mon Sep 12 22:21:08 2022 ] Mean test loss of 796 batches: 3.0824527740478516. +[ Mon Sep 12 22:21:09 2022 ] Top1: 51.53% +[ Mon Sep 12 22:21:09 2022 ] Top5: 81.71% +[ Mon Sep 12 22:21:09 2022 ] Training epoch: 67 +[ Mon Sep 12 22:21:45 2022 ] Batch(61/243) done. Loss: 0.0327 lr:0.010000 +[ Mon Sep 12 22:22:39 2022 ] Batch(161/243) done. Loss: 0.0732 lr:0.010000 +[ Mon Sep 12 22:23:22 2022 ] Eval epoch: 67 +[ Mon Sep 12 22:25:53 2022 ] Mean test loss of 796 batches: 3.083371162414551. +[ Mon Sep 12 22:25:54 2022 ] Top1: 51.02% +[ Mon Sep 12 22:25:54 2022 ] Top5: 81.61% +[ Mon Sep 12 22:25:54 2022 ] Training epoch: 68 +[ Mon Sep 12 22:26:07 2022 ] Batch(18/243) done. Loss: 0.0614 lr:0.010000 +[ Mon Sep 12 22:27:00 2022 ] Batch(118/243) done. Loss: 0.0235 lr:0.010000 +[ Mon Sep 12 22:27:54 2022 ] Batch(218/243) done. Loss: 0.0443 lr:0.010000 +[ Mon Sep 12 22:28:07 2022 ] Eval epoch: 68 +[ Mon Sep 12 22:30:38 2022 ] Mean test loss of 796 batches: 3.059816360473633. +[ Mon Sep 12 22:30:39 2022 ] Top1: 51.32% +[ Mon Sep 12 22:30:39 2022 ] Top5: 81.60% +[ Mon Sep 12 22:30:39 2022 ] Training epoch: 69 +[ Mon Sep 12 22:31:22 2022 ] Batch(75/243) done. Loss: 0.0619 lr:0.010000 +[ Mon Sep 12 22:32:16 2022 ] Batch(175/243) done. Loss: 0.2430 lr:0.010000 +[ Mon Sep 12 22:32:52 2022 ] Eval epoch: 69 +[ Mon Sep 12 22:35:24 2022 ] Mean test loss of 796 batches: 3.0981345176696777. +[ Mon Sep 12 22:35:24 2022 ] Top1: 51.92% +[ Mon Sep 12 22:35:24 2022 ] Top5: 82.02% +[ Mon Sep 12 22:35:25 2022 ] Training epoch: 70 +[ Mon Sep 12 22:35:45 2022 ] Batch(32/243) done. Loss: 0.0729 lr:0.010000 +[ Mon Sep 12 22:36:38 2022 ] Batch(132/243) done. Loss: 0.0404 lr:0.010000 +[ Mon Sep 12 22:37:32 2022 ] Batch(232/243) done. Loss: 0.0802 lr:0.010000 +[ Mon Sep 12 22:37:38 2022 ] Eval epoch: 70 +[ Mon Sep 12 22:40:08 2022 ] Mean test loss of 796 batches: 3.0480141639709473. +[ Mon Sep 12 22:40:09 2022 ] Top1: 51.89% +[ Mon Sep 12 22:40:09 2022 ] Top5: 81.99% +[ Mon Sep 12 22:40:09 2022 ] Training epoch: 71 +[ Mon Sep 12 22:41:00 2022 ] Batch(89/243) done. Loss: 0.0413 lr:0.010000 +[ Mon Sep 12 22:41:54 2022 ] Batch(189/243) done. Loss: 0.0038 lr:0.010000 +[ Mon Sep 12 22:42:22 2022 ] Eval epoch: 71 +[ Mon Sep 12 22:44:53 2022 ] Mean test loss of 796 batches: 3.1876227855682373. +[ Mon Sep 12 22:44:54 2022 ] Top1: 51.41% +[ Mon Sep 12 22:44:54 2022 ] Top5: 81.72% +[ Mon Sep 12 22:44:54 2022 ] Training epoch: 72 +[ Mon Sep 12 22:45:22 2022 ] Batch(46/243) done. Loss: 0.0979 lr:0.010000 +[ Mon Sep 12 22:46:15 2022 ] Batch(146/243) done. Loss: 0.0448 lr:0.010000 +[ Mon Sep 12 22:47:07 2022 ] Eval epoch: 72 +[ Mon Sep 12 22:49:37 2022 ] Mean test loss of 796 batches: 3.284703254699707. +[ Mon Sep 12 22:49:38 2022 ] Top1: 49.42% +[ Mon Sep 12 22:49:38 2022 ] Top5: 80.76% +[ Mon Sep 12 22:49:38 2022 ] Training epoch: 73 +[ Mon Sep 12 22:49:43 2022 ] Batch(3/243) done. Loss: 0.0261 lr:0.010000 +[ Mon Sep 12 22:50:36 2022 ] Batch(103/243) done. Loss: 0.0331 lr:0.010000 +[ Mon Sep 12 22:51:30 2022 ] Batch(203/243) done. Loss: 0.0445 lr:0.010000 +[ Mon Sep 12 22:51:51 2022 ] Eval epoch: 73 +[ Mon Sep 12 22:54:22 2022 ] Mean test loss of 796 batches: 3.1807875633239746. +[ Mon Sep 12 22:54:23 2022 ] Top1: 51.38% +[ Mon Sep 12 22:54:23 2022 ] Top5: 81.59% +[ Mon Sep 12 22:54:23 2022 ] Training epoch: 74 +[ Mon Sep 12 22:54:58 2022 ] Batch(60/243) done. Loss: 0.0416 lr:0.010000 +[ Mon Sep 12 22:55:52 2022 ] Batch(160/243) done. Loss: 0.0468 lr:0.010000 +[ Mon Sep 12 22:56:36 2022 ] Eval epoch: 74 +[ Mon Sep 12 22:59:07 2022 ] Mean test loss of 796 batches: 3.26816725730896. +[ Mon Sep 12 22:59:07 2022 ] Top1: 50.86% +[ Mon Sep 12 22:59:08 2022 ] Top5: 81.36% +[ Mon Sep 12 22:59:08 2022 ] Training epoch: 75 +[ Mon Sep 12 22:59:20 2022 ] Batch(17/243) done. Loss: 0.0461 lr:0.010000 +[ Mon Sep 12 23:00:14 2022 ] Batch(117/243) done. Loss: 0.0289 lr:0.010000 +[ Mon Sep 12 23:01:07 2022 ] Batch(217/243) done. Loss: 0.0427 lr:0.010000 +[ Mon Sep 12 23:01:21 2022 ] Eval epoch: 75 +[ Mon Sep 12 23:03:52 2022 ] Mean test loss of 796 batches: 3.1034815311431885. +[ Mon Sep 12 23:03:52 2022 ] Top1: 52.44% +[ Mon Sep 12 23:03:53 2022 ] Top5: 82.28% +[ Mon Sep 12 23:03:53 2022 ] Training epoch: 76 +[ Mon Sep 12 23:04:36 2022 ] Batch(74/243) done. Loss: 0.0704 lr:0.010000 +[ Mon Sep 12 23:05:29 2022 ] Batch(174/243) done. Loss: 0.0075 lr:0.010000 +[ Mon Sep 12 23:06:06 2022 ] Eval epoch: 76 +[ Mon Sep 12 23:08:37 2022 ] Mean test loss of 796 batches: 3.2890477180480957. +[ Mon Sep 12 23:08:37 2022 ] Top1: 49.28% +[ Mon Sep 12 23:08:37 2022 ] Top5: 80.70% +[ Mon Sep 12 23:08:38 2022 ] Training epoch: 77 +[ Mon Sep 12 23:08:57 2022 ] Batch(31/243) done. Loss: 0.0824 lr:0.010000 +[ Mon Sep 12 23:09:51 2022 ] Batch(131/243) done. Loss: 0.0392 lr:0.010000 +[ Mon Sep 12 23:10:44 2022 ] Batch(231/243) done. Loss: 0.0310 lr:0.010000 +[ Mon Sep 12 23:10:51 2022 ] Eval epoch: 77 +[ Mon Sep 12 23:13:21 2022 ] Mean test loss of 796 batches: 3.286661386489868. +[ Mon Sep 12 23:13:22 2022 ] Top1: 51.69% +[ Mon Sep 12 23:13:22 2022 ] Top5: 82.00% +[ Mon Sep 12 23:13:22 2022 ] Training epoch: 78 +[ Mon Sep 12 23:14:12 2022 ] Batch(88/243) done. Loss: 0.0495 lr:0.010000 +[ Mon Sep 12 23:15:06 2022 ] Batch(188/243) done. Loss: 0.0499 lr:0.010000 +[ Mon Sep 12 23:15:35 2022 ] Eval epoch: 78 +[ Mon Sep 12 23:18:06 2022 ] Mean test loss of 796 batches: 3.410419464111328. +[ Mon Sep 12 23:18:06 2022 ] Top1: 50.12% +[ Mon Sep 12 23:18:07 2022 ] Top5: 80.87% +[ Mon Sep 12 23:18:07 2022 ] Training epoch: 79 +[ Mon Sep 12 23:18:34 2022 ] Batch(45/243) done. Loss: 0.0719 lr:0.010000 +[ Mon Sep 12 23:19:28 2022 ] Batch(145/243) done. Loss: 0.0136 lr:0.010000 +[ Mon Sep 12 23:20:20 2022 ] Eval epoch: 79 +[ Mon Sep 12 23:22:51 2022 ] Mean test loss of 796 batches: 3.340691566467285. +[ Mon Sep 12 23:22:51 2022 ] Top1: 51.51% +[ Mon Sep 12 23:22:52 2022 ] Top5: 81.60% +[ Mon Sep 12 23:22:52 2022 ] Training epoch: 80 +[ Mon Sep 12 23:22:56 2022 ] Batch(2/243) done. Loss: 0.0512 lr:0.010000 +[ Mon Sep 12 23:23:49 2022 ] Batch(102/243) done. Loss: 0.0190 lr:0.010000 +[ Mon Sep 12 23:24:43 2022 ] Batch(202/243) done. Loss: 0.0227 lr:0.010000 +[ Mon Sep 12 23:25:05 2022 ] Eval epoch: 80 +[ Mon Sep 12 23:27:36 2022 ] Mean test loss of 796 batches: 3.4196999073028564. +[ Mon Sep 12 23:27:36 2022 ] Top1: 50.91% +[ Mon Sep 12 23:27:37 2022 ] Top5: 81.32% +[ Mon Sep 12 23:27:37 2022 ] Training epoch: 81 +[ Mon Sep 12 23:28:12 2022 ] Batch(59/243) done. Loss: 0.1012 lr:0.001000 +[ Mon Sep 12 23:29:05 2022 ] Batch(159/243) done. Loss: 0.0594 lr:0.001000 +[ Mon Sep 12 23:29:50 2022 ] Eval epoch: 81 +[ Mon Sep 12 23:32:21 2022 ] Mean test loss of 796 batches: 3.4129106998443604. +[ Mon Sep 12 23:32:21 2022 ] Top1: 49.94% +[ Mon Sep 12 23:32:22 2022 ] Top5: 80.92% +[ Mon Sep 12 23:32:22 2022 ] Training epoch: 82 +[ Mon Sep 12 23:32:33 2022 ] Batch(16/243) done. Loss: 0.0366 lr:0.001000 +[ Mon Sep 12 23:33:27 2022 ] Batch(116/243) done. Loss: 0.0414 lr:0.001000 +[ Mon Sep 12 23:34:20 2022 ] Batch(216/243) done. Loss: 0.0294 lr:0.001000 +[ Mon Sep 12 23:34:35 2022 ] Eval epoch: 82 +[ Mon Sep 12 23:37:05 2022 ] Mean test loss of 796 batches: 3.4732067584991455. +[ Mon Sep 12 23:37:06 2022 ] Top1: 50.00% +[ Mon Sep 12 23:37:06 2022 ] Top5: 80.94% +[ Mon Sep 12 23:37:06 2022 ] Training epoch: 83 +[ Mon Sep 12 23:37:49 2022 ] Batch(73/243) done. Loss: 0.0921 lr:0.001000 +[ Mon Sep 12 23:38:42 2022 ] Batch(173/243) done. Loss: 0.0280 lr:0.001000 +[ Mon Sep 12 23:39:19 2022 ] Eval epoch: 83 +[ Mon Sep 12 23:41:50 2022 ] Mean test loss of 796 batches: 3.2555062770843506. +[ Mon Sep 12 23:41:50 2022 ] Top1: 51.72% +[ Mon Sep 12 23:41:51 2022 ] Top5: 82.01% +[ Mon Sep 12 23:41:51 2022 ] Training epoch: 84 +[ Mon Sep 12 23:42:10 2022 ] Batch(30/243) done. Loss: 0.0519 lr:0.001000 +[ Mon Sep 12 23:43:03 2022 ] Batch(130/243) done. Loss: 0.0613 lr:0.001000 +[ Mon Sep 12 23:43:57 2022 ] Batch(230/243) done. Loss: 0.0280 lr:0.001000 +[ Mon Sep 12 23:44:03 2022 ] Eval epoch: 84 +[ Mon Sep 12 23:46:34 2022 ] Mean test loss of 796 batches: 3.338073968887329. +[ Mon Sep 12 23:46:35 2022 ] Top1: 51.04% +[ Mon Sep 12 23:46:35 2022 ] Top5: 81.55% +[ Mon Sep 12 23:46:35 2022 ] Training epoch: 85 +[ Mon Sep 12 23:47:25 2022 ] Batch(87/243) done. Loss: 0.0245 lr:0.001000 +[ Mon Sep 12 23:48:19 2022 ] Batch(187/243) done. Loss: 0.0188 lr:0.001000 +[ Mon Sep 12 23:48:48 2022 ] Eval epoch: 85 +[ Mon Sep 12 23:51:19 2022 ] Mean test loss of 796 batches: 3.3033416271209717. +[ Mon Sep 12 23:51:19 2022 ] Top1: 51.45% +[ Mon Sep 12 23:51:20 2022 ] Top5: 81.72% +[ Mon Sep 12 23:51:20 2022 ] Training epoch: 86 +[ Mon Sep 12 23:51:47 2022 ] Batch(44/243) done. Loss: 0.0434 lr:0.001000 +[ Mon Sep 12 23:52:40 2022 ] Batch(144/243) done. Loss: 0.0236 lr:0.001000 +[ Mon Sep 12 23:53:33 2022 ] Eval epoch: 86 +[ Mon Sep 12 23:56:04 2022 ] Mean test loss of 796 batches: 3.372443199157715. +[ Mon Sep 12 23:56:04 2022 ] Top1: 51.01% +[ Mon Sep 12 23:56:05 2022 ] Top5: 81.37% +[ Mon Sep 12 23:56:05 2022 ] Training epoch: 87 +[ Mon Sep 12 23:56:08 2022 ] Batch(1/243) done. Loss: 0.0489 lr:0.001000 +[ Mon Sep 12 23:57:02 2022 ] Batch(101/243) done. Loss: 0.0604 lr:0.001000 +[ Mon Sep 12 23:57:55 2022 ] Batch(201/243) done. Loss: 0.0703 lr:0.001000 +[ Mon Sep 12 23:58:17 2022 ] Eval epoch: 87 +[ Tue Sep 13 00:00:48 2022 ] Mean test loss of 796 batches: 3.3413257598876953. +[ Tue Sep 13 00:00:48 2022 ] Top1: 51.24% +[ Tue Sep 13 00:00:49 2022 ] Top5: 81.68% +[ Tue Sep 13 00:00:49 2022 ] Training epoch: 88 +[ Tue Sep 13 00:01:23 2022 ] Batch(58/243) done. Loss: 0.0489 lr:0.001000 +[ Tue Sep 13 00:02:17 2022 ] Batch(158/243) done. Loss: 0.0582 lr:0.001000 +[ Tue Sep 13 00:03:02 2022 ] Eval epoch: 88 +[ Tue Sep 13 00:05:33 2022 ] Mean test loss of 796 batches: 3.504627227783203. +[ Tue Sep 13 00:05:33 2022 ] Top1: 48.36% +[ Tue Sep 13 00:05:33 2022 ] Top5: 79.56% +[ Tue Sep 13 00:05:34 2022 ] Training epoch: 89 +[ Tue Sep 13 00:05:45 2022 ] Batch(15/243) done. Loss: 0.1064 lr:0.001000 +[ Tue Sep 13 00:06:38 2022 ] Batch(115/243) done. Loss: 0.0455 lr:0.001000 +[ Tue Sep 13 00:07:32 2022 ] Batch(215/243) done. Loss: 0.0330 lr:0.001000 +[ Tue Sep 13 00:07:47 2022 ] Eval epoch: 89 +[ Tue Sep 13 00:10:17 2022 ] Mean test loss of 796 batches: 3.399523973464966. +[ Tue Sep 13 00:10:18 2022 ] Top1: 49.74% +[ Tue Sep 13 00:10:18 2022 ] Top5: 80.94% +[ Tue Sep 13 00:10:18 2022 ] Training epoch: 90 +[ Tue Sep 13 00:11:00 2022 ] Batch(72/243) done. Loss: 0.0055 lr:0.001000 +[ Tue Sep 13 00:11:54 2022 ] Batch(172/243) done. Loss: 0.1521 lr:0.001000 +[ Tue Sep 13 00:12:31 2022 ] Eval epoch: 90 +[ Tue Sep 13 00:15:02 2022 ] Mean test loss of 796 batches: 3.3444113731384277. +[ Tue Sep 13 00:15:02 2022 ] Top1: 51.10% +[ Tue Sep 13 00:15:03 2022 ] Top5: 81.42% +[ Tue Sep 13 00:15:03 2022 ] Training epoch: 91 +[ Tue Sep 13 00:15:21 2022 ] Batch(29/243) done. Loss: 0.0395 lr:0.001000 +[ Tue Sep 13 00:16:15 2022 ] Batch(129/243) done. Loss: 0.0075 lr:0.001000 +[ Tue Sep 13 00:17:08 2022 ] Batch(229/243) done. Loss: 0.0953 lr:0.001000 +[ Tue Sep 13 00:17:16 2022 ] Eval epoch: 91 +[ Tue Sep 13 00:19:47 2022 ] Mean test loss of 796 batches: 3.4517605304718018. +[ Tue Sep 13 00:19:47 2022 ] Top1: 49.49% +[ Tue Sep 13 00:19:47 2022 ] Top5: 80.58% +[ Tue Sep 13 00:19:48 2022 ] Training epoch: 92 +[ Tue Sep 13 00:20:37 2022 ] Batch(86/243) done. Loss: 0.0396 lr:0.001000 +[ Tue Sep 13 00:21:30 2022 ] Batch(186/243) done. Loss: 0.0267 lr:0.001000 +[ Tue Sep 13 00:22:01 2022 ] Eval epoch: 92 +[ Tue Sep 13 00:24:32 2022 ] Mean test loss of 796 batches: 3.5669608116149902. +[ Tue Sep 13 00:24:32 2022 ] Top1: 47.84% +[ Tue Sep 13 00:24:33 2022 ] Top5: 79.81% +[ Tue Sep 13 00:24:33 2022 ] Training epoch: 93 +[ Tue Sep 13 00:24:59 2022 ] Batch(43/243) done. Loss: 0.0418 lr:0.001000 +[ Tue Sep 13 00:25:52 2022 ] Batch(143/243) done. Loss: 0.0722 lr:0.001000 +[ Tue Sep 13 00:26:46 2022 ] Eval epoch: 93 +[ Tue Sep 13 00:29:16 2022 ] Mean test loss of 796 batches: 3.3956525325775146. +[ Tue Sep 13 00:29:17 2022 ] Top1: 51.18% +[ Tue Sep 13 00:29:17 2022 ] Top5: 81.62% +[ Tue Sep 13 00:29:17 2022 ] Training epoch: 94 +[ Tue Sep 13 00:29:20 2022 ] Batch(0/243) done. Loss: 0.0832 lr:0.001000 +[ Tue Sep 13 00:30:14 2022 ] Batch(100/243) done. Loss: 0.0261 lr:0.001000 +[ Tue Sep 13 00:31:07 2022 ] Batch(200/243) done. Loss: 0.0256 lr:0.001000 +[ Tue Sep 13 00:31:30 2022 ] Eval epoch: 94 +[ Tue Sep 13 00:34:00 2022 ] Mean test loss of 796 batches: 3.412087917327881. +[ Tue Sep 13 00:34:01 2022 ] Top1: 50.98% +[ Tue Sep 13 00:34:01 2022 ] Top5: 81.43% +[ Tue Sep 13 00:34:01 2022 ] Training epoch: 95 +[ Tue Sep 13 00:34:35 2022 ] Batch(57/243) done. Loss: 0.2316 lr:0.001000 +[ Tue Sep 13 00:35:28 2022 ] Batch(157/243) done. Loss: 0.0360 lr:0.001000 +[ Tue Sep 13 00:36:14 2022 ] Eval epoch: 95 +[ Tue Sep 13 00:38:45 2022 ] Mean test loss of 796 batches: 3.4245078563690186. +[ Tue Sep 13 00:38:45 2022 ] Top1: 50.40% +[ Tue Sep 13 00:38:45 2022 ] Top5: 81.34% +[ Tue Sep 13 00:38:46 2022 ] Training epoch: 96 +[ Tue Sep 13 00:38:56 2022 ] Batch(14/243) done. Loss: 0.0458 lr:0.001000 +[ Tue Sep 13 00:39:50 2022 ] Batch(114/243) done. Loss: 0.0580 lr:0.001000 +[ Tue Sep 13 00:40:43 2022 ] Batch(214/243) done. Loss: 0.0146 lr:0.001000 +[ Tue Sep 13 00:40:58 2022 ] Eval epoch: 96 +[ Tue Sep 13 00:43:29 2022 ] Mean test loss of 796 batches: 3.6255176067352295. +[ Tue Sep 13 00:43:29 2022 ] Top1: 47.00% +[ Tue Sep 13 00:43:30 2022 ] Top5: 79.52% +[ Tue Sep 13 00:43:30 2022 ] Training epoch: 97 +[ Tue Sep 13 00:44:11 2022 ] Batch(71/243) done. Loss: 0.0982 lr:0.001000 +[ Tue Sep 13 00:45:05 2022 ] Batch(171/243) done. Loss: 0.0173 lr:0.001000 +[ Tue Sep 13 00:45:43 2022 ] Eval epoch: 97 +[ Tue Sep 13 00:48:14 2022 ] Mean test loss of 796 batches: 3.4557762145996094. +[ Tue Sep 13 00:48:14 2022 ] Top1: 50.08% +[ Tue Sep 13 00:48:14 2022 ] Top5: 80.97% +[ Tue Sep 13 00:48:15 2022 ] Training epoch: 98 +[ Tue Sep 13 00:48:33 2022 ] Batch(28/243) done. Loss: 0.0425 lr:0.001000 +[ Tue Sep 13 00:49:26 2022 ] Batch(128/243) done. Loss: 0.0405 lr:0.001000 +[ Tue Sep 13 00:50:20 2022 ] Batch(228/243) done. Loss: 0.0122 lr:0.001000 +[ Tue Sep 13 00:50:27 2022 ] Eval epoch: 98 +[ Tue Sep 13 00:52:58 2022 ] Mean test loss of 796 batches: 3.3831961154937744. +[ Tue Sep 13 00:52:59 2022 ] Top1: 51.53% +[ Tue Sep 13 00:52:59 2022 ] Top5: 81.86% +[ Tue Sep 13 00:52:59 2022 ] Training epoch: 99 +[ Tue Sep 13 00:53:48 2022 ] Batch(85/243) done. Loss: 0.0917 lr:0.001000 +[ Tue Sep 13 00:54:41 2022 ] Batch(185/243) done. Loss: 0.1321 lr:0.001000 +[ Tue Sep 13 00:55:12 2022 ] Eval epoch: 99 +[ Tue Sep 13 00:57:43 2022 ] Mean test loss of 796 batches: 3.407810926437378. +[ Tue Sep 13 00:57:44 2022 ] Top1: 50.55% +[ Tue Sep 13 00:57:44 2022 ] Top5: 81.20% +[ Tue Sep 13 00:57:44 2022 ] Training epoch: 100 +[ Tue Sep 13 00:58:09 2022 ] Batch(42/243) done. Loss: 0.0693 lr:0.001000 +[ Tue Sep 13 00:59:03 2022 ] Batch(142/243) done. Loss: 0.0106 lr:0.001000 +[ Tue Sep 13 00:59:56 2022 ] Batch(242/243) done. Loss: 0.0785 lr:0.001000 +[ Tue Sep 13 00:59:57 2022 ] Eval epoch: 100 +[ Tue Sep 13 01:02:27 2022 ] Mean test loss of 796 batches: 3.423464298248291. +[ Tue Sep 13 01:02:27 2022 ] Top1: 50.07% +[ Tue Sep 13 01:02:27 2022 ] Top5: 81.12% diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_xsub/config.yaml b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_xsub/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..70724ac7be972860fe4922ee9d461743ea86b570 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_xsub/config.yaml @@ -0,0 +1,59 @@ +Experiment_name: ntu120_joint_xsub +base_lr: 0.1 +batch_size: 64 +config: ./config/ntu120_xsub/train_joint.yaml +device: +- 4 +- 5 +eval_interval: 5 +feeder: feeders.feeder.Feeder +groups: 8 +ignore_weights: [] +keep_rate: 0.9 +log_interval: 100 +model: model.decouple_gcn.Model +model_args: + block_size: 41 + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + groups: 16 + num_class: 120 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu120_joint_xsub +nesterov: true +num_epoch: 100 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_data_joint.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_data_joint.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu120_joint_xsub diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_xsub/decouple_gcn.py b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_xsub/decouple_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..6dcce4552ced280fe5b2060df92daebd2452cf7c --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_xsub/decouple_gcn.py @@ -0,0 +1,235 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math +from model.dropSke import DropBlock_Ske +from model.dropT import DropBlockT_1d + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + + +def conv_branch_init(conv): + weight = conv.weight + n = weight.size(0) + k1 = weight.size(1) + k2 = weight.size(2) + nn.init.normal(weight, 0, math.sqrt(2. / (n * k1 * k2))) + nn.init.constant(conv.bias, 0) + + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class unit_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1, num_point=25, block_size=41): + super(unit_tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + self.dropS = DropBlock_Ske(num_point=num_point) + self.dropT = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob, A): + x = self.bn(self.conv(x)) + x = self.dropT(self.dropS(x, keep_prob, A), keep_prob) + return x + + +class unit_tcn_skip(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(unit_tcn_skip, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class unit_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, coff_embedding=4, num_subset=3): + super(unit_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.num_point = num_point + self.groups = groups + self.num_subset = num_subset + self.DecoupleA = nn.Parameter(torch.tensor(np.reshape(A.astype(np.float32), [ + 3, 1, num_point, num_point]), dtype=torch.float32, requires_grad=True).repeat(1, groups, 1, 1), requires_grad=True) + + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.bn0 = nn.BatchNorm2d(out_channels * num_subset) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + bn_init(self.bn, 1e-6) + + self.Linear_weight = nn.Parameter(torch.zeros( + in_channels, out_channels * num_subset, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0, math.sqrt( + 0.5 / (out_channels * num_subset))) + + self.Linear_bias = nn.Parameter(torch.zeros( + 1, out_channels * num_subset, 1, 1, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.constant(self.Linear_bias, 1e-6) + + eye_array = [] + for i in range(out_channels): + eye_array.append(torch.eye(num_point)) + self.eyes = nn.Parameter(torch.tensor(torch.stack( + eye_array), requires_grad=False, device='cuda'), requires_grad=False) # [c,25,25] + + def norm(self, A): + b, c, h, w = A.size() + A = A.view(c, self.num_point, self.num_point) + D_list = torch.sum(A, 1).view(c, 1, self.num_point) + D_list_12 = (D_list + 0.001)**(-1) + D_12 = self.eyes * D_list_12 + A = torch.bmm(A, D_12).view(b, c, h, w) + return A + + def forward(self, x0): + learn_A = self.DecoupleA.repeat( + 1, self.out_channels // self.groups, 1, 1) + norm_learn_A = torch.cat([self.norm(learn_A[0:1, ...]), self.norm( + learn_A[1:2, ...]), self.norm(learn_A[2:3, ...])], 0) + + x = torch.einsum( + 'nctw,cd->ndtw', (x0, self.Linear_weight)).contiguous() + x = x + self.Linear_bias + x = self.bn0(x) + + n, kc, t, v = x.size() + x = x.view(n, self.num_subset, kc // self.num_subset, t, v) + x = torch.einsum('nkctv,kcvw->nctw', (x, norm_learn_A)) + + x = self.bn(x) + x += self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, block_size, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = unit_gcn(in_channels, out_channels, A, groups, num_point) + self.tcn1 = unit_tcn(out_channels, out_channels, + stride=stride, num_point=num_point) + self.relu = nn.ReLU() + + self.A = nn.Parameter(torch.tensor(np.sum(np.reshape(A.astype(np.float32), [ + 3, num_point, num_point]), axis=0), dtype=torch.float32, requires_grad=False, device='cuda'), requires_grad=False) + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + + else: + self.residual = unit_tcn_skip( + in_channels, out_channels, kernel_size=1, stride=stride) + self.dropSke = DropBlock_Ske(num_point=num_point) + self.dropT_skip = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob): + x = self.tcn1(self.gcn1(x), keep_prob, self.A) + self.dropT_skip( + self.dropSke(self.residual(x), keep_prob, self.A), keep_prob) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, groups, num_point, + block_size, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l5 = TCN_GCN_unit( + 64, 128, A, groups, num_point, block_size, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l8 = TCN_GCN_unit(128, 256, A, groups, + num_point, block_size, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x, keep_prob=0.9): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute( + 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x, 1.0) + x = self.l2(x, 1.0) + x = self.l3(x, 1.0) + x = self.l4(x, 1.0) + x = self.l5(x, 1.0) + x = self.l6(x, 1.0) + x = self.l7(x, keep_prob) + x = self.l8(x, keep_prob) + x = self.l9(x, keep_prob) + x = self.l10(x, keep_prob) + + # N*M,C,T,V + c_new = x.size(1) + x = x.reshape(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_xsub/eval_results/best_acc.pkl b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_xsub/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..4b55af3127502eb1910c772bfd9dfb33b165d3db --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_xsub/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7357c0977e0bbbe458e55546b83a0cb9e5690d38fe02c45612fe69dcb289fe38 +size 29946137 diff --git a/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_xsub/log.txt b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_xsub/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..3ac1c27ac60e6a5d835520eeb2bfd974d7dcf698 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu120_xsub/ntu120_joint_xsub/log.txt @@ -0,0 +1,746 @@ +[ Mon Sep 12 17:08:11 2022 ] Parameters: +{'work_dir': './work_dir/ntu120_joint_xsub', 'model_saved_name': './save_models/ntu120_joint_xsub', 'Experiment_name': 'ntu120_joint_xsub', 'config': './config/ntu120_xsub/train_joint.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_label.pkl'}, 'model': 'model.decouple_gcn.Model', 'model_args': {'num_class': 120, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'groups': 16, 'block_size': 41, 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80], 'device': [4, 5], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 100, 'weight_decay': 0.0001, 'keep_rate': 0.9, 'groups': 8, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Mon Sep 12 17:08:11 2022 ] Training epoch: 1 +[ Mon Sep 12 17:09:01 2022 ] Batch(99/243) done. Loss: 3.7190 lr:0.100000 +[ Mon Sep 12 17:09:46 2022 ] Batch(199/243) done. Loss: 3.1809 lr:0.100000 +[ Mon Sep 12 17:10:05 2022 ] Eval epoch: 1 +[ Mon Sep 12 17:12:34 2022 ] Mean test loss of 796 batches: 5.361838340759277. +[ Mon Sep 12 17:12:35 2022 ] Top1: 4.77% +[ Mon Sep 12 17:12:35 2022 ] Top5: 17.25% +[ Mon Sep 12 17:12:35 2022 ] Training epoch: 2 +[ Mon Sep 12 17:13:08 2022 ] Batch(56/243) done. Loss: 3.0372 lr:0.100000 +[ Mon Sep 12 17:14:01 2022 ] Batch(156/243) done. Loss: 2.4914 lr:0.100000 +[ Mon Sep 12 17:14:46 2022 ] Eval epoch: 2 +[ Mon Sep 12 17:17:15 2022 ] Mean test loss of 796 batches: 4.679137229919434. +[ Mon Sep 12 17:17:16 2022 ] Top1: 10.01% +[ Mon Sep 12 17:17:16 2022 ] Top5: 25.83% +[ Mon Sep 12 17:17:16 2022 ] Training epoch: 3 +[ Mon Sep 12 17:17:26 2022 ] Batch(13/243) done. Loss: 2.5587 lr:0.100000 +[ Mon Sep 12 17:18:19 2022 ] Batch(113/243) done. Loss: 2.1768 lr:0.100000 +[ Mon Sep 12 17:19:12 2022 ] Batch(213/243) done. Loss: 2.3778 lr:0.100000 +[ Mon Sep 12 17:19:27 2022 ] Eval epoch: 3 +[ Mon Sep 12 17:21:56 2022 ] Mean test loss of 796 batches: 4.291816234588623. +[ Mon Sep 12 17:21:56 2022 ] Top1: 11.05% +[ Mon Sep 12 17:21:57 2022 ] Top5: 31.20% +[ Mon Sep 12 17:21:57 2022 ] Training epoch: 4 +[ Mon Sep 12 17:22:37 2022 ] Batch(70/243) done. Loss: 2.0107 lr:0.100000 +[ Mon Sep 12 17:23:30 2022 ] Batch(170/243) done. Loss: 2.0625 lr:0.100000 +[ Mon Sep 12 17:24:08 2022 ] Eval epoch: 4 +[ Mon Sep 12 17:26:37 2022 ] Mean test loss of 796 batches: 4.092510223388672. +[ Mon Sep 12 17:26:38 2022 ] Top1: 14.37% +[ Mon Sep 12 17:26:38 2022 ] Top5: 36.23% +[ Mon Sep 12 17:26:38 2022 ] Training epoch: 5 +[ Mon Sep 12 17:26:56 2022 ] Batch(27/243) done. Loss: 1.7477 lr:0.100000 +[ Mon Sep 12 17:27:49 2022 ] Batch(127/243) done. Loss: 1.9247 lr:0.100000 +[ Mon Sep 12 17:28:41 2022 ] Batch(227/243) done. Loss: 1.6601 lr:0.100000 +[ Mon Sep 12 17:28:49 2022 ] Eval epoch: 5 +[ Mon Sep 12 17:31:18 2022 ] Mean test loss of 796 batches: 3.604010581970215. +[ Mon Sep 12 17:31:19 2022 ] Top1: 18.38% +[ Mon Sep 12 17:31:19 2022 ] Top5: 44.76% +[ Mon Sep 12 17:31:19 2022 ] Training epoch: 6 +[ Mon Sep 12 17:32:07 2022 ] Batch(84/243) done. Loss: 1.8004 lr:0.100000 +[ Mon Sep 12 17:32:59 2022 ] Batch(184/243) done. Loss: 1.7991 lr:0.100000 +[ Mon Sep 12 17:33:30 2022 ] Eval epoch: 6 +[ Mon Sep 12 17:35:59 2022 ] Mean test loss of 796 batches: 3.3129656314849854. +[ Mon Sep 12 17:35:59 2022 ] Top1: 21.87% +[ Mon Sep 12 17:35:59 2022 ] Top5: 50.46% +[ Mon Sep 12 17:35:59 2022 ] Training epoch: 7 +[ Mon Sep 12 17:36:24 2022 ] Batch(41/243) done. Loss: 1.6051 lr:0.100000 +[ Mon Sep 12 17:37:17 2022 ] Batch(141/243) done. Loss: 1.1784 lr:0.100000 +[ Mon Sep 12 17:38:09 2022 ] Batch(241/243) done. Loss: 1.3763 lr:0.100000 +[ Mon Sep 12 17:38:10 2022 ] Eval epoch: 7 +[ Mon Sep 12 17:40:38 2022 ] Mean test loss of 796 batches: 2.904848575592041. +[ Mon Sep 12 17:40:39 2022 ] Top1: 27.32% +[ Mon Sep 12 17:40:39 2022 ] Top5: 61.45% +[ Mon Sep 12 17:40:39 2022 ] Training epoch: 8 +[ Mon Sep 12 17:41:34 2022 ] Batch(98/243) done. Loss: 1.2853 lr:0.100000 +[ Mon Sep 12 17:42:27 2022 ] Batch(198/243) done. Loss: 1.2021 lr:0.100000 +[ Mon Sep 12 17:42:50 2022 ] Eval epoch: 8 +[ Mon Sep 12 17:45:19 2022 ] Mean test loss of 796 batches: 3.138392210006714. +[ Mon Sep 12 17:45:19 2022 ] Top1: 26.41% +[ Mon Sep 12 17:45:20 2022 ] Top5: 57.90% +[ Mon Sep 12 17:45:20 2022 ] Training epoch: 9 +[ Mon Sep 12 17:45:52 2022 ] Batch(55/243) done. Loss: 1.0583 lr:0.100000 +[ Mon Sep 12 17:46:45 2022 ] Batch(155/243) done. Loss: 1.0930 lr:0.100000 +[ Mon Sep 12 17:47:31 2022 ] Eval epoch: 9 +[ Mon Sep 12 17:50:00 2022 ] Mean test loss of 796 batches: 2.994957685470581. +[ Mon Sep 12 17:50:00 2022 ] Top1: 29.95% +[ Mon Sep 12 17:50:00 2022 ] Top5: 61.35% +[ Mon Sep 12 17:50:01 2022 ] Training epoch: 10 +[ Mon Sep 12 17:50:10 2022 ] Batch(12/243) done. Loss: 1.3079 lr:0.100000 +[ Mon Sep 12 17:51:03 2022 ] Batch(112/243) done. Loss: 1.0368 lr:0.100000 +[ Mon Sep 12 17:51:56 2022 ] Batch(212/243) done. Loss: 1.2674 lr:0.100000 +[ Mon Sep 12 17:52:12 2022 ] Eval epoch: 10 +[ Mon Sep 12 17:54:40 2022 ] Mean test loss of 796 batches: 3.029468059539795. +[ Mon Sep 12 17:54:41 2022 ] Top1: 28.90% +[ Mon Sep 12 17:54:41 2022 ] Top5: 60.87% +[ Mon Sep 12 17:54:41 2022 ] Training epoch: 11 +[ Mon Sep 12 17:55:21 2022 ] Batch(69/243) done. Loss: 0.8488 lr:0.100000 +[ Mon Sep 12 17:56:14 2022 ] Batch(169/243) done. Loss: 1.1231 lr:0.100000 +[ Mon Sep 12 17:56:52 2022 ] Eval epoch: 11 +[ Mon Sep 12 17:59:21 2022 ] Mean test loss of 796 batches: 2.9211504459381104. +[ Mon Sep 12 17:59:21 2022 ] Top1: 31.10% +[ Mon Sep 12 17:59:22 2022 ] Top5: 65.88% +[ Mon Sep 12 17:59:22 2022 ] Training epoch: 12 +[ Mon Sep 12 17:59:39 2022 ] Batch(26/243) done. Loss: 0.8822 lr:0.100000 +[ Mon Sep 12 18:00:32 2022 ] Batch(126/243) done. Loss: 1.1657 lr:0.100000 +[ Mon Sep 12 18:01:24 2022 ] Batch(226/243) done. Loss: 0.9553 lr:0.100000 +[ Mon Sep 12 18:01:33 2022 ] Eval epoch: 12 +[ Mon Sep 12 18:04:01 2022 ] Mean test loss of 796 batches: 3.866846799850464. +[ Mon Sep 12 18:04:01 2022 ] Top1: 25.76% +[ Mon Sep 12 18:04:02 2022 ] Top5: 56.17% +[ Mon Sep 12 18:04:02 2022 ] Training epoch: 13 +[ Mon Sep 12 18:04:48 2022 ] Batch(83/243) done. Loss: 1.1388 lr:0.100000 +[ Mon Sep 12 18:05:41 2022 ] Batch(183/243) done. Loss: 1.1985 lr:0.100000 +[ Mon Sep 12 18:06:12 2022 ] Eval epoch: 13 +[ Mon Sep 12 18:08:41 2022 ] Mean test loss of 796 batches: 2.716705560684204. +[ Mon Sep 12 18:08:41 2022 ] Top1: 37.91% +[ Mon Sep 12 18:08:42 2022 ] Top5: 74.10% +[ Mon Sep 12 18:08:42 2022 ] Training epoch: 14 +[ Mon Sep 12 18:09:06 2022 ] Batch(40/243) done. Loss: 0.8329 lr:0.100000 +[ Mon Sep 12 18:09:59 2022 ] Batch(140/243) done. Loss: 0.8772 lr:0.100000 +[ Mon Sep 12 18:10:52 2022 ] Batch(240/243) done. Loss: 1.2715 lr:0.100000 +[ Mon Sep 12 18:10:53 2022 ] Eval epoch: 14 +[ Mon Sep 12 18:13:22 2022 ] Mean test loss of 796 batches: 2.4711339473724365. +[ Mon Sep 12 18:13:22 2022 ] Top1: 36.33% +[ Mon Sep 12 18:13:22 2022 ] Top5: 72.33% +[ Mon Sep 12 18:13:23 2022 ] Training epoch: 15 +[ Mon Sep 12 18:14:17 2022 ] Batch(97/243) done. Loss: 0.9205 lr:0.100000 +[ Mon Sep 12 18:15:10 2022 ] Batch(197/243) done. Loss: 1.1311 lr:0.100000 +[ Mon Sep 12 18:15:34 2022 ] Eval epoch: 15 +[ Mon Sep 12 18:18:03 2022 ] Mean test loss of 796 batches: 2.4340267181396484. +[ Mon Sep 12 18:18:03 2022 ] Top1: 41.34% +[ Mon Sep 12 18:18:03 2022 ] Top5: 74.69% +[ Mon Sep 12 18:18:03 2022 ] Training epoch: 16 +[ Mon Sep 12 18:18:35 2022 ] Batch(54/243) done. Loss: 1.0560 lr:0.100000 +[ Mon Sep 12 18:19:28 2022 ] Batch(154/243) done. Loss: 0.7556 lr:0.100000 +[ Mon Sep 12 18:20:14 2022 ] Eval epoch: 16 +[ Mon Sep 12 18:22:42 2022 ] Mean test loss of 796 batches: 2.4173901081085205. +[ Mon Sep 12 18:22:42 2022 ] Top1: 38.66% +[ Mon Sep 12 18:22:43 2022 ] Top5: 75.33% +[ Mon Sep 12 18:22:43 2022 ] Training epoch: 17 +[ Mon Sep 12 18:22:52 2022 ] Batch(11/243) done. Loss: 0.5570 lr:0.100000 +[ Mon Sep 12 18:23:45 2022 ] Batch(111/243) done. Loss: 0.7542 lr:0.100000 +[ Mon Sep 12 18:24:38 2022 ] Batch(211/243) done. Loss: 0.7955 lr:0.100000 +[ Mon Sep 12 18:24:54 2022 ] Eval epoch: 17 +[ Mon Sep 12 18:27:23 2022 ] Mean test loss of 796 batches: 2.4517364501953125. +[ Mon Sep 12 18:27:23 2022 ] Top1: 40.98% +[ Mon Sep 12 18:27:23 2022 ] Top5: 74.23% +[ Mon Sep 12 18:27:24 2022 ] Training epoch: 18 +[ Mon Sep 12 18:28:03 2022 ] Batch(68/243) done. Loss: 0.6977 lr:0.100000 +[ Mon Sep 12 18:28:55 2022 ] Batch(168/243) done. Loss: 0.7670 lr:0.100000 +[ Mon Sep 12 18:29:34 2022 ] Eval epoch: 18 +[ Mon Sep 12 18:32:03 2022 ] Mean test loss of 796 batches: 2.8006997108459473. +[ Mon Sep 12 18:32:03 2022 ] Top1: 37.71% +[ Mon Sep 12 18:32:03 2022 ] Top5: 74.40% +[ Mon Sep 12 18:32:04 2022 ] Training epoch: 19 +[ Mon Sep 12 18:32:20 2022 ] Batch(25/243) done. Loss: 0.8622 lr:0.100000 +[ Mon Sep 12 18:33:12 2022 ] Batch(125/243) done. Loss: 0.6451 lr:0.100000 +[ Mon Sep 12 18:34:05 2022 ] Batch(225/243) done. Loss: 0.7216 lr:0.100000 +[ Mon Sep 12 18:34:14 2022 ] Eval epoch: 19 +[ Mon Sep 12 18:36:43 2022 ] Mean test loss of 796 batches: 3.1234707832336426. +[ Mon Sep 12 18:36:43 2022 ] Top1: 33.68% +[ Mon Sep 12 18:36:44 2022 ] Top5: 67.92% +[ Mon Sep 12 18:36:44 2022 ] Training epoch: 20 +[ Mon Sep 12 18:37:30 2022 ] Batch(82/243) done. Loss: 0.7481 lr:0.100000 +[ Mon Sep 12 18:38:23 2022 ] Batch(182/243) done. Loss: 0.6089 lr:0.100000 +[ Mon Sep 12 18:38:55 2022 ] Eval epoch: 20 +[ Mon Sep 12 18:41:24 2022 ] Mean test loss of 796 batches: 2.6673736572265625. +[ Mon Sep 12 18:41:24 2022 ] Top1: 38.96% +[ Mon Sep 12 18:41:24 2022 ] Top5: 74.41% +[ Mon Sep 12 18:41:24 2022 ] Training epoch: 21 +[ Mon Sep 12 18:41:48 2022 ] Batch(39/243) done. Loss: 0.4091 lr:0.100000 +[ Mon Sep 12 18:42:41 2022 ] Batch(139/243) done. Loss: 0.6318 lr:0.100000 +[ Mon Sep 12 18:43:34 2022 ] Batch(239/243) done. Loss: 0.7306 lr:0.100000 +[ Mon Sep 12 18:43:36 2022 ] Eval epoch: 21 +[ Mon Sep 12 18:46:04 2022 ] Mean test loss of 796 batches: 2.3942465782165527. +[ Mon Sep 12 18:46:04 2022 ] Top1: 43.69% +[ Mon Sep 12 18:46:05 2022 ] Top5: 78.36% +[ Mon Sep 12 18:46:05 2022 ] Training epoch: 22 +[ Mon Sep 12 18:46:59 2022 ] Batch(96/243) done. Loss: 0.6980 lr:0.100000 +[ Mon Sep 12 18:47:51 2022 ] Batch(196/243) done. Loss: 0.6690 lr:0.100000 +[ Mon Sep 12 18:48:16 2022 ] Eval epoch: 22 +[ Mon Sep 12 18:50:44 2022 ] Mean test loss of 796 batches: 2.5864007472991943. +[ Mon Sep 12 18:50:44 2022 ] Top1: 39.41% +[ Mon Sep 12 18:50:44 2022 ] Top5: 75.04% +[ Mon Sep 12 18:50:45 2022 ] Training epoch: 23 +[ Mon Sep 12 18:51:16 2022 ] Batch(53/243) done. Loss: 0.4845 lr:0.100000 +[ Mon Sep 12 18:52:08 2022 ] Batch(153/243) done. Loss: 0.7480 lr:0.100000 +[ Mon Sep 12 18:52:56 2022 ] Eval epoch: 23 +[ Mon Sep 12 18:55:25 2022 ] Mean test loss of 796 batches: 2.513529062271118. +[ Mon Sep 12 18:55:25 2022 ] Top1: 43.67% +[ Mon Sep 12 18:55:25 2022 ] Top5: 78.55% +[ Mon Sep 12 18:55:26 2022 ] Training epoch: 24 +[ Mon Sep 12 18:55:34 2022 ] Batch(10/243) done. Loss: 0.4489 lr:0.100000 +[ Mon Sep 12 18:56:27 2022 ] Batch(110/243) done. Loss: 0.5259 lr:0.100000 +[ Mon Sep 12 18:57:20 2022 ] Batch(210/243) done. Loss: 0.9508 lr:0.100000 +[ Mon Sep 12 18:57:37 2022 ] Eval epoch: 24 +[ Mon Sep 12 19:00:05 2022 ] Mean test loss of 796 batches: 2.2841122150421143. +[ Mon Sep 12 19:00:05 2022 ] Top1: 43.42% +[ Mon Sep 12 19:00:06 2022 ] Top5: 78.57% +[ Mon Sep 12 19:00:06 2022 ] Training epoch: 25 +[ Mon Sep 12 19:00:44 2022 ] Batch(67/243) done. Loss: 0.5586 lr:0.100000 +[ Mon Sep 12 19:01:37 2022 ] Batch(167/243) done. Loss: 0.7997 lr:0.100000 +[ Mon Sep 12 19:02:17 2022 ] Eval epoch: 25 +[ Mon Sep 12 19:04:45 2022 ] Mean test loss of 796 batches: 2.3876681327819824. +[ Mon Sep 12 19:04:46 2022 ] Top1: 44.44% +[ Mon Sep 12 19:04:46 2022 ] Top5: 77.63% +[ Mon Sep 12 19:04:46 2022 ] Training epoch: 26 +[ Mon Sep 12 19:05:02 2022 ] Batch(24/243) done. Loss: 0.6328 lr:0.100000 +[ Mon Sep 12 19:05:55 2022 ] Batch(124/243) done. Loss: 0.8385 lr:0.100000 +[ Mon Sep 12 19:06:48 2022 ] Batch(224/243) done. Loss: 0.4274 lr:0.100000 +[ Mon Sep 12 19:06:57 2022 ] Eval epoch: 26 +[ Mon Sep 12 19:09:26 2022 ] Mean test loss of 796 batches: 2.849940538406372. +[ Mon Sep 12 19:09:26 2022 ] Top1: 41.05% +[ Mon Sep 12 19:09:27 2022 ] Top5: 75.96% +[ Mon Sep 12 19:09:27 2022 ] Training epoch: 27 +[ Mon Sep 12 19:10:13 2022 ] Batch(81/243) done. Loss: 0.4895 lr:0.100000 +[ Mon Sep 12 19:11:05 2022 ] Batch(181/243) done. Loss: 0.4380 lr:0.100000 +[ Mon Sep 12 19:11:38 2022 ] Eval epoch: 27 +[ Mon Sep 12 19:14:06 2022 ] Mean test loss of 796 batches: 2.4651989936828613. +[ Mon Sep 12 19:14:07 2022 ] Top1: 43.31% +[ Mon Sep 12 19:14:07 2022 ] Top5: 78.15% +[ Mon Sep 12 19:14:07 2022 ] Training epoch: 28 +[ Mon Sep 12 19:14:31 2022 ] Batch(38/243) done. Loss: 0.5685 lr:0.100000 +[ Mon Sep 12 19:15:23 2022 ] Batch(138/243) done. Loss: 0.4821 lr:0.100000 +[ Mon Sep 12 19:16:16 2022 ] Batch(238/243) done. Loss: 0.3740 lr:0.100000 +[ Mon Sep 12 19:16:18 2022 ] Eval epoch: 28 +[ Mon Sep 12 19:18:47 2022 ] Mean test loss of 796 batches: 2.620584011077881. +[ Mon Sep 12 19:18:47 2022 ] Top1: 42.39% +[ Mon Sep 12 19:18:47 2022 ] Top5: 76.08% +[ Mon Sep 12 19:18:48 2022 ] Training epoch: 29 +[ Mon Sep 12 19:19:41 2022 ] Batch(95/243) done. Loss: 0.6795 lr:0.100000 +[ Mon Sep 12 19:20:34 2022 ] Batch(195/243) done. Loss: 0.5652 lr:0.100000 +[ Mon Sep 12 19:20:59 2022 ] Eval epoch: 29 +[ Mon Sep 12 19:23:27 2022 ] Mean test loss of 796 batches: 2.435502767562866. +[ Mon Sep 12 19:23:27 2022 ] Top1: 43.64% +[ Mon Sep 12 19:23:28 2022 ] Top5: 78.20% +[ Mon Sep 12 19:23:28 2022 ] Training epoch: 30 +[ Mon Sep 12 19:23:58 2022 ] Batch(52/243) done. Loss: 0.3756 lr:0.100000 +[ Mon Sep 12 19:24:51 2022 ] Batch(152/243) done. Loss: 0.2534 lr:0.100000 +[ Mon Sep 12 19:25:39 2022 ] Eval epoch: 30 +[ Mon Sep 12 19:28:07 2022 ] Mean test loss of 796 batches: 2.9882123470306396. +[ Mon Sep 12 19:28:07 2022 ] Top1: 40.10% +[ Mon Sep 12 19:28:07 2022 ] Top5: 73.79% +[ Mon Sep 12 19:28:08 2022 ] Training epoch: 31 +[ Mon Sep 12 19:28:16 2022 ] Batch(9/243) done. Loss: 0.3546 lr:0.100000 +[ Mon Sep 12 19:29:09 2022 ] Batch(109/243) done. Loss: 0.4524 lr:0.100000 +[ Mon Sep 12 19:30:01 2022 ] Batch(209/243) done. Loss: 0.5393 lr:0.100000 +[ Mon Sep 12 19:30:19 2022 ] Eval epoch: 31 +[ Mon Sep 12 19:32:47 2022 ] Mean test loss of 796 batches: 2.3977699279785156. +[ Mon Sep 12 19:32:48 2022 ] Top1: 44.57% +[ Mon Sep 12 19:32:48 2022 ] Top5: 78.84% +[ Mon Sep 12 19:32:48 2022 ] Training epoch: 32 +[ Mon Sep 12 19:33:26 2022 ] Batch(66/243) done. Loss: 0.2371 lr:0.100000 +[ Mon Sep 12 19:34:19 2022 ] Batch(166/243) done. Loss: 0.3477 lr:0.100000 +[ Mon Sep 12 19:35:00 2022 ] Eval epoch: 32 +[ Mon Sep 12 19:37:28 2022 ] Mean test loss of 796 batches: 2.502819061279297. +[ Mon Sep 12 19:37:28 2022 ] Top1: 44.61% +[ Mon Sep 12 19:37:29 2022 ] Top5: 79.77% +[ Mon Sep 12 19:37:29 2022 ] Training epoch: 33 +[ Mon Sep 12 19:37:44 2022 ] Batch(23/243) done. Loss: 0.3132 lr:0.100000 +[ Mon Sep 12 19:38:37 2022 ] Batch(123/243) done. Loss: 0.6401 lr:0.100000 +[ Mon Sep 12 19:39:29 2022 ] Batch(223/243) done. Loss: 0.4580 lr:0.100000 +[ Mon Sep 12 19:39:39 2022 ] Eval epoch: 33 +[ Mon Sep 12 19:42:08 2022 ] Mean test loss of 796 batches: 3.035712957382202. +[ Mon Sep 12 19:42:09 2022 ] Top1: 41.71% +[ Mon Sep 12 19:42:09 2022 ] Top5: 75.48% +[ Mon Sep 12 19:42:09 2022 ] Training epoch: 34 +[ Mon Sep 12 19:42:54 2022 ] Batch(80/243) done. Loss: 0.5582 lr:0.100000 +[ Mon Sep 12 19:43:47 2022 ] Batch(180/243) done. Loss: 0.3260 lr:0.100000 +[ Mon Sep 12 19:44:20 2022 ] Eval epoch: 34 +[ Mon Sep 12 19:46:49 2022 ] Mean test loss of 796 batches: 2.2783524990081787. +[ Mon Sep 12 19:46:49 2022 ] Top1: 46.67% +[ Mon Sep 12 19:46:50 2022 ] Top5: 79.04% +[ Mon Sep 12 19:46:50 2022 ] Training epoch: 35 +[ Mon Sep 12 19:47:12 2022 ] Batch(37/243) done. Loss: 0.2910 lr:0.100000 +[ Mon Sep 12 19:48:05 2022 ] Batch(137/243) done. Loss: 0.5316 lr:0.100000 +[ Mon Sep 12 19:48:58 2022 ] Batch(237/243) done. Loss: 0.3858 lr:0.100000 +[ Mon Sep 12 19:49:01 2022 ] Eval epoch: 35 +[ Mon Sep 12 19:51:29 2022 ] Mean test loss of 796 batches: 2.168560028076172. +[ Mon Sep 12 19:51:30 2022 ] Top1: 47.69% +[ Mon Sep 12 19:51:30 2022 ] Top5: 82.49% +[ Mon Sep 12 19:51:30 2022 ] Training epoch: 36 +[ Mon Sep 12 19:52:23 2022 ] Batch(94/243) done. Loss: 0.7607 lr:0.100000 +[ Mon Sep 12 19:53:16 2022 ] Batch(194/243) done. Loss: 0.6586 lr:0.100000 +[ Mon Sep 12 19:53:41 2022 ] Eval epoch: 36 +[ Mon Sep 12 19:56:10 2022 ] Mean test loss of 796 batches: 2.7959372997283936. +[ Mon Sep 12 19:56:10 2022 ] Top1: 43.37% +[ Mon Sep 12 19:56:10 2022 ] Top5: 77.52% +[ Mon Sep 12 19:56:11 2022 ] Training epoch: 37 +[ Mon Sep 12 19:56:41 2022 ] Batch(51/243) done. Loss: 0.5353 lr:0.100000 +[ Mon Sep 12 19:57:34 2022 ] Batch(151/243) done. Loss: 0.4690 lr:0.100000 +[ Mon Sep 12 19:58:22 2022 ] Eval epoch: 37 +[ Mon Sep 12 20:00:50 2022 ] Mean test loss of 796 batches: 2.983930826187134. +[ Mon Sep 12 20:00:51 2022 ] Top1: 45.45% +[ Mon Sep 12 20:00:51 2022 ] Top5: 78.63% +[ Mon Sep 12 20:00:51 2022 ] Training epoch: 38 +[ Mon Sep 12 20:00:59 2022 ] Batch(8/243) done. Loss: 0.4120 lr:0.100000 +[ Mon Sep 12 20:01:52 2022 ] Batch(108/243) done. Loss: 0.5134 lr:0.100000 +[ Mon Sep 12 20:02:44 2022 ] Batch(208/243) done. Loss: 0.4337 lr:0.100000 +[ Mon Sep 12 20:03:02 2022 ] Eval epoch: 38 +[ Mon Sep 12 20:05:31 2022 ] Mean test loss of 796 batches: 2.54374623298645. +[ Mon Sep 12 20:05:31 2022 ] Top1: 46.76% +[ Mon Sep 12 20:05:32 2022 ] Top5: 80.62% +[ Mon Sep 12 20:05:32 2022 ] Training epoch: 39 +[ Mon Sep 12 20:06:09 2022 ] Batch(65/243) done. Loss: 0.1754 lr:0.100000 +[ Mon Sep 12 20:07:02 2022 ] Batch(165/243) done. Loss: 0.4286 lr:0.100000 +[ Mon Sep 12 20:07:43 2022 ] Eval epoch: 39 +[ Mon Sep 12 20:10:12 2022 ] Mean test loss of 796 batches: 2.713900327682495. +[ Mon Sep 12 20:10:13 2022 ] Top1: 43.11% +[ Mon Sep 12 20:10:13 2022 ] Top5: 78.05% +[ Mon Sep 12 20:10:13 2022 ] Training epoch: 40 +[ Mon Sep 12 20:10:28 2022 ] Batch(22/243) done. Loss: 0.2603 lr:0.100000 +[ Mon Sep 12 20:11:21 2022 ] Batch(122/243) done. Loss: 0.3049 lr:0.100000 +[ Mon Sep 12 20:12:14 2022 ] Batch(222/243) done. Loss: 0.5437 lr:0.100000 +[ Mon Sep 12 20:12:24 2022 ] Eval epoch: 40 +[ Mon Sep 12 20:14:53 2022 ] Mean test loss of 796 batches: 2.497074604034424. +[ Mon Sep 12 20:14:53 2022 ] Top1: 48.94% +[ Mon Sep 12 20:14:54 2022 ] Top5: 80.40% +[ Mon Sep 12 20:14:54 2022 ] Training epoch: 41 +[ Mon Sep 12 20:15:39 2022 ] Batch(79/243) done. Loss: 0.4706 lr:0.100000 +[ Mon Sep 12 20:16:32 2022 ] Batch(179/243) done. Loss: 0.5966 lr:0.100000 +[ Mon Sep 12 20:17:05 2022 ] Eval epoch: 41 +[ Mon Sep 12 20:19:34 2022 ] Mean test loss of 796 batches: 2.306436777114868. +[ Mon Sep 12 20:19:34 2022 ] Top1: 49.38% +[ Mon Sep 12 20:19:35 2022 ] Top5: 81.72% +[ Mon Sep 12 20:19:35 2022 ] Training epoch: 42 +[ Mon Sep 12 20:19:58 2022 ] Batch(36/243) done. Loss: 0.4055 lr:0.100000 +[ Mon Sep 12 20:20:51 2022 ] Batch(136/243) done. Loss: 0.4943 lr:0.100000 +[ Mon Sep 12 20:21:44 2022 ] Batch(236/243) done. Loss: 0.4343 lr:0.100000 +[ Mon Sep 12 20:21:47 2022 ] Eval epoch: 42 +[ Mon Sep 12 20:24:18 2022 ] Mean test loss of 796 batches: 2.6163108348846436. +[ Mon Sep 12 20:24:18 2022 ] Top1: 45.79% +[ Mon Sep 12 20:24:18 2022 ] Top5: 78.65% +[ Mon Sep 12 20:24:19 2022 ] Training epoch: 43 +[ Mon Sep 12 20:25:12 2022 ] Batch(93/243) done. Loss: 0.4468 lr:0.100000 +[ Mon Sep 12 20:26:05 2022 ] Batch(193/243) done. Loss: 0.5667 lr:0.100000 +[ Mon Sep 12 20:26:31 2022 ] Eval epoch: 43 +[ Mon Sep 12 20:29:01 2022 ] Mean test loss of 796 batches: 2.8471124172210693. +[ Mon Sep 12 20:29:01 2022 ] Top1: 44.80% +[ Mon Sep 12 20:29:02 2022 ] Top5: 77.51% +[ Mon Sep 12 20:29:02 2022 ] Training epoch: 44 +[ Mon Sep 12 20:29:32 2022 ] Batch(50/243) done. Loss: 0.3060 lr:0.100000 +[ Mon Sep 12 20:30:25 2022 ] Batch(150/243) done. Loss: 0.5914 lr:0.100000 +[ Mon Sep 12 20:31:14 2022 ] Eval epoch: 44 +[ Mon Sep 12 20:33:44 2022 ] Mean test loss of 796 batches: 2.6193630695343018. +[ Mon Sep 12 20:33:45 2022 ] Top1: 47.18% +[ Mon Sep 12 20:33:45 2022 ] Top5: 80.08% +[ Mon Sep 12 20:33:46 2022 ] Training epoch: 45 +[ Mon Sep 12 20:33:53 2022 ] Batch(7/243) done. Loss: 0.3902 lr:0.100000 +[ Mon Sep 12 20:34:46 2022 ] Batch(107/243) done. Loss: 0.5657 lr:0.100000 +[ Mon Sep 12 20:35:39 2022 ] Batch(207/243) done. Loss: 0.2205 lr:0.100000 +[ Mon Sep 12 20:35:58 2022 ] Eval epoch: 45 +[ Mon Sep 12 20:38:28 2022 ] Mean test loss of 796 batches: 2.66359806060791. +[ Mon Sep 12 20:38:29 2022 ] Top1: 44.93% +[ Mon Sep 12 20:38:29 2022 ] Top5: 78.03% +[ Mon Sep 12 20:38:29 2022 ] Training epoch: 46 +[ Mon Sep 12 20:39:07 2022 ] Batch(64/243) done. Loss: 0.1533 lr:0.100000 +[ Mon Sep 12 20:40:00 2022 ] Batch(164/243) done. Loss: 0.3249 lr:0.100000 +[ Mon Sep 12 20:40:42 2022 ] Eval epoch: 46 +[ Mon Sep 12 20:43:12 2022 ] Mean test loss of 796 batches: 2.7212274074554443. +[ Mon Sep 12 20:43:12 2022 ] Top1: 43.07% +[ Mon Sep 12 20:43:13 2022 ] Top5: 76.16% +[ Mon Sep 12 20:43:13 2022 ] Training epoch: 47 +[ Mon Sep 12 20:43:28 2022 ] Batch(21/243) done. Loss: 0.3087 lr:0.100000 +[ Mon Sep 12 20:44:21 2022 ] Batch(121/243) done. Loss: 0.4566 lr:0.100000 +[ Mon Sep 12 20:45:14 2022 ] Batch(221/243) done. Loss: 0.2735 lr:0.100000 +[ Mon Sep 12 20:45:25 2022 ] Eval epoch: 47 +[ Mon Sep 12 20:47:56 2022 ] Mean test loss of 796 batches: 2.5096700191497803. +[ Mon Sep 12 20:47:56 2022 ] Top1: 48.75% +[ Mon Sep 12 20:47:56 2022 ] Top5: 81.11% +[ Mon Sep 12 20:47:57 2022 ] Training epoch: 48 +[ Mon Sep 12 20:48:42 2022 ] Batch(78/243) done. Loss: 0.3789 lr:0.100000 +[ Mon Sep 12 20:49:35 2022 ] Batch(178/243) done. Loss: 0.3563 lr:0.100000 +[ Mon Sep 12 20:50:09 2022 ] Eval epoch: 48 +[ Mon Sep 12 20:52:39 2022 ] Mean test loss of 796 batches: 2.9559221267700195. +[ Mon Sep 12 20:52:39 2022 ] Top1: 44.30% +[ Mon Sep 12 20:52:40 2022 ] Top5: 76.90% +[ Mon Sep 12 20:52:40 2022 ] Training epoch: 49 +[ Mon Sep 12 20:53:02 2022 ] Batch(35/243) done. Loss: 0.3420 lr:0.100000 +[ Mon Sep 12 20:53:55 2022 ] Batch(135/243) done. Loss: 0.6071 lr:0.100000 +[ Mon Sep 12 20:54:48 2022 ] Batch(235/243) done. Loss: 0.2530 lr:0.100000 +[ Mon Sep 12 20:54:52 2022 ] Eval epoch: 49 +[ Mon Sep 12 20:57:22 2022 ] Mean test loss of 796 batches: 3.282691478729248. +[ Mon Sep 12 20:57:23 2022 ] Top1: 42.52% +[ Mon Sep 12 20:57:23 2022 ] Top5: 74.37% +[ Mon Sep 12 20:57:23 2022 ] Training epoch: 50 +[ Mon Sep 12 20:58:16 2022 ] Batch(92/243) done. Loss: 0.4806 lr:0.100000 +[ Mon Sep 12 20:59:09 2022 ] Batch(192/243) done. Loss: 0.3802 lr:0.100000 +[ Mon Sep 12 20:59:36 2022 ] Eval epoch: 50 +[ Mon Sep 12 21:02:06 2022 ] Mean test loss of 796 batches: 3.1574456691741943. +[ Mon Sep 12 21:02:06 2022 ] Top1: 43.83% +[ Mon Sep 12 21:02:07 2022 ] Top5: 76.17% +[ Mon Sep 12 21:02:07 2022 ] Training epoch: 51 +[ Mon Sep 12 21:02:37 2022 ] Batch(49/243) done. Loss: 0.2924 lr:0.100000 +[ Mon Sep 12 21:03:30 2022 ] Batch(149/243) done. Loss: 0.3555 lr:0.100000 +[ Mon Sep 12 21:04:20 2022 ] Eval epoch: 51 +[ Mon Sep 12 21:06:50 2022 ] Mean test loss of 796 batches: 2.7255911827087402. +[ Mon Sep 12 21:06:50 2022 ] Top1: 45.44% +[ Mon Sep 12 21:06:51 2022 ] Top5: 80.12% +[ Mon Sep 12 21:06:51 2022 ] Training epoch: 52 +[ Mon Sep 12 21:06:58 2022 ] Batch(6/243) done. Loss: 0.2800 lr:0.100000 +[ Mon Sep 12 21:07:51 2022 ] Batch(106/243) done. Loss: 0.4368 lr:0.100000 +[ Mon Sep 12 21:08:44 2022 ] Batch(206/243) done. Loss: 0.3939 lr:0.100000 +[ Mon Sep 12 21:09:03 2022 ] Eval epoch: 52 +[ Mon Sep 12 21:11:33 2022 ] Mean test loss of 796 batches: 2.8366401195526123. +[ Mon Sep 12 21:11:33 2022 ] Top1: 45.62% +[ Mon Sep 12 21:11:34 2022 ] Top5: 78.17% +[ Mon Sep 12 21:11:34 2022 ] Training epoch: 53 +[ Mon Sep 12 21:12:12 2022 ] Batch(63/243) done. Loss: 0.2960 lr:0.100000 +[ Mon Sep 12 21:13:04 2022 ] Batch(163/243) done. Loss: 0.3067 lr:0.100000 +[ Mon Sep 12 21:13:46 2022 ] Eval epoch: 53 +[ Mon Sep 12 21:16:17 2022 ] Mean test loss of 796 batches: 2.702021837234497. +[ Mon Sep 12 21:16:17 2022 ] Top1: 48.21% +[ Mon Sep 12 21:16:18 2022 ] Top5: 80.06% +[ Mon Sep 12 21:16:18 2022 ] Training epoch: 54 +[ Mon Sep 12 21:16:32 2022 ] Batch(20/243) done. Loss: 0.2548 lr:0.100000 +[ Mon Sep 12 21:17:25 2022 ] Batch(120/243) done. Loss: 0.3072 lr:0.100000 +[ Mon Sep 12 21:18:18 2022 ] Batch(220/243) done. Loss: 0.4308 lr:0.100000 +[ Mon Sep 12 21:18:30 2022 ] Eval epoch: 54 +[ Mon Sep 12 21:21:00 2022 ] Mean test loss of 796 batches: 2.8809432983398438. +[ Mon Sep 12 21:21:01 2022 ] Top1: 44.77% +[ Mon Sep 12 21:21:01 2022 ] Top5: 76.19% +[ Mon Sep 12 21:21:02 2022 ] Training epoch: 55 +[ Mon Sep 12 21:21:46 2022 ] Batch(77/243) done. Loss: 0.5762 lr:0.100000 +[ Mon Sep 12 21:22:39 2022 ] Batch(177/243) done. Loss: 0.5683 lr:0.100000 +[ Mon Sep 12 21:23:14 2022 ] Eval epoch: 55 +[ Mon Sep 12 21:25:44 2022 ] Mean test loss of 796 batches: 2.5277981758117676. +[ Mon Sep 12 21:25:44 2022 ] Top1: 48.81% +[ Mon Sep 12 21:25:44 2022 ] Top5: 80.88% +[ Mon Sep 12 21:25:45 2022 ] Training epoch: 56 +[ Mon Sep 12 21:26:07 2022 ] Batch(34/243) done. Loss: 0.5117 lr:0.100000 +[ Mon Sep 12 21:27:00 2022 ] Batch(134/243) done. Loss: 0.1882 lr:0.100000 +[ Mon Sep 12 21:27:53 2022 ] Batch(234/243) done. Loss: 0.3638 lr:0.100000 +[ Mon Sep 12 21:27:57 2022 ] Eval epoch: 56 +[ Mon Sep 12 21:30:27 2022 ] Mean test loss of 796 batches: 3.64302396774292. +[ Mon Sep 12 21:30:27 2022 ] Top1: 40.97% +[ Mon Sep 12 21:30:28 2022 ] Top5: 72.24% +[ Mon Sep 12 21:30:28 2022 ] Training epoch: 57 +[ Mon Sep 12 21:31:20 2022 ] Batch(91/243) done. Loss: 0.3306 lr:0.100000 +[ Mon Sep 12 21:32:13 2022 ] Batch(191/243) done. Loss: 0.5866 lr:0.100000 +[ Mon Sep 12 21:32:40 2022 ] Eval epoch: 57 +[ Mon Sep 12 21:35:10 2022 ] Mean test loss of 796 batches: 2.9249634742736816. +[ Mon Sep 12 21:35:10 2022 ] Top1: 45.85% +[ Mon Sep 12 21:35:11 2022 ] Top5: 77.70% +[ Mon Sep 12 21:35:11 2022 ] Training epoch: 58 +[ Mon Sep 12 21:35:40 2022 ] Batch(48/243) done. Loss: 0.1647 lr:0.100000 +[ Mon Sep 12 21:36:33 2022 ] Batch(148/243) done. Loss: 0.5696 lr:0.100000 +[ Mon Sep 12 21:37:23 2022 ] Eval epoch: 58 +[ Mon Sep 12 21:39:54 2022 ] Mean test loss of 796 batches: 3.296038866043091. +[ Mon Sep 12 21:39:54 2022 ] Top1: 44.77% +[ Mon Sep 12 21:39:54 2022 ] Top5: 75.72% +[ Mon Sep 12 21:39:55 2022 ] Training epoch: 59 +[ Mon Sep 12 21:40:01 2022 ] Batch(5/243) done. Loss: 0.1390 lr:0.100000 +[ Mon Sep 12 21:40:54 2022 ] Batch(105/243) done. Loss: 0.1552 lr:0.100000 +[ Mon Sep 12 21:41:47 2022 ] Batch(205/243) done. Loss: 0.6132 lr:0.100000 +[ Mon Sep 12 21:42:07 2022 ] Eval epoch: 59 +[ Mon Sep 12 21:44:37 2022 ] Mean test loss of 796 batches: 4.113931655883789. +[ Mon Sep 12 21:44:37 2022 ] Top1: 31.77% +[ Mon Sep 12 21:44:38 2022 ] Top5: 66.28% +[ Mon Sep 12 21:44:38 2022 ] Training epoch: 60 +[ Mon Sep 12 21:45:15 2022 ] Batch(62/243) done. Loss: 0.3674 lr:0.100000 +[ Mon Sep 12 21:46:08 2022 ] Batch(162/243) done. Loss: 0.5138 lr:0.100000 +[ Mon Sep 12 21:46:50 2022 ] Eval epoch: 60 +[ Mon Sep 12 21:49:20 2022 ] Mean test loss of 796 batches: 2.752439498901367. +[ Mon Sep 12 21:49:21 2022 ] Top1: 48.44% +[ Mon Sep 12 21:49:21 2022 ] Top5: 79.91% +[ Mon Sep 12 21:49:21 2022 ] Training epoch: 61 +[ Mon Sep 12 21:49:35 2022 ] Batch(19/243) done. Loss: 0.1299 lr:0.010000 +[ Mon Sep 12 21:50:28 2022 ] Batch(119/243) done. Loss: 0.2408 lr:0.010000 +[ Mon Sep 12 21:51:21 2022 ] Batch(219/243) done. Loss: 0.2369 lr:0.010000 +[ Mon Sep 12 21:51:34 2022 ] Eval epoch: 61 +[ Mon Sep 12 21:54:05 2022 ] Mean test loss of 796 batches: 2.21579909324646. +[ Mon Sep 12 21:54:05 2022 ] Top1: 55.86% +[ Mon Sep 12 21:54:05 2022 ] Top5: 85.32% +[ Mon Sep 12 21:54:05 2022 ] Training epoch: 62 +[ Mon Sep 12 21:54:50 2022 ] Batch(76/243) done. Loss: 0.0834 lr:0.010000 +[ Mon Sep 12 21:55:43 2022 ] Batch(176/243) done. Loss: 0.0547 lr:0.010000 +[ Mon Sep 12 21:56:18 2022 ] Eval epoch: 62 +[ Mon Sep 12 21:58:49 2022 ] Mean test loss of 796 batches: 2.238507032394409. +[ Mon Sep 12 21:58:49 2022 ] Top1: 56.63% +[ Mon Sep 12 21:58:49 2022 ] Top5: 85.71% +[ Mon Sep 12 21:58:50 2022 ] Training epoch: 63 +[ Mon Sep 12 21:59:11 2022 ] Batch(33/243) done. Loss: 0.0954 lr:0.010000 +[ Mon Sep 12 22:00:04 2022 ] Batch(133/243) done. Loss: 0.1910 lr:0.010000 +[ Mon Sep 12 22:00:57 2022 ] Batch(233/243) done. Loss: 0.0895 lr:0.010000 +[ Mon Sep 12 22:01:02 2022 ] Eval epoch: 63 +[ Mon Sep 12 22:03:32 2022 ] Mean test loss of 796 batches: 2.265852928161621. +[ Mon Sep 12 22:03:32 2022 ] Top1: 56.58% +[ Mon Sep 12 22:03:33 2022 ] Top5: 85.87% +[ Mon Sep 12 22:03:33 2022 ] Training epoch: 64 +[ Mon Sep 12 22:04:24 2022 ] Batch(90/243) done. Loss: 0.1179 lr:0.010000 +[ Mon Sep 12 22:05:17 2022 ] Batch(190/243) done. Loss: 0.1409 lr:0.010000 +[ Mon Sep 12 22:05:45 2022 ] Eval epoch: 64 +[ Mon Sep 12 22:08:15 2022 ] Mean test loss of 796 batches: 2.3048434257507324. +[ Mon Sep 12 22:08:15 2022 ] Top1: 56.37% +[ Mon Sep 12 22:08:16 2022 ] Top5: 85.68% +[ Mon Sep 12 22:08:16 2022 ] Training epoch: 65 +[ Mon Sep 12 22:08:45 2022 ] Batch(47/243) done. Loss: 0.0954 lr:0.010000 +[ Mon Sep 12 22:09:38 2022 ] Batch(147/243) done. Loss: 0.0779 lr:0.010000 +[ Mon Sep 12 22:10:28 2022 ] Eval epoch: 65 +[ Mon Sep 12 22:12:59 2022 ] Mean test loss of 796 batches: 2.3024234771728516. +[ Mon Sep 12 22:12:59 2022 ] Top1: 56.74% +[ Mon Sep 12 22:12:59 2022 ] Top5: 85.74% +[ Mon Sep 12 22:13:00 2022 ] Training epoch: 66 +[ Mon Sep 12 22:13:06 2022 ] Batch(4/243) done. Loss: 0.1016 lr:0.010000 +[ Mon Sep 12 22:13:58 2022 ] Batch(104/243) done. Loss: 0.0461 lr:0.010000 +[ Mon Sep 12 22:14:51 2022 ] Batch(204/243) done. Loss: 0.0116 lr:0.010000 +[ Mon Sep 12 22:15:12 2022 ] Eval epoch: 66 +[ Mon Sep 12 22:17:41 2022 ] Mean test loss of 796 batches: 2.4010677337646484. +[ Mon Sep 12 22:17:42 2022 ] Top1: 56.48% +[ Mon Sep 12 22:17:42 2022 ] Top5: 85.45% +[ Mon Sep 12 22:17:42 2022 ] Training epoch: 67 +[ Mon Sep 12 22:18:19 2022 ] Batch(61/243) done. Loss: 0.0483 lr:0.010000 +[ Mon Sep 12 22:19:12 2022 ] Batch(161/243) done. Loss: 0.0378 lr:0.010000 +[ Mon Sep 12 22:19:55 2022 ] Eval epoch: 67 +[ Mon Sep 12 22:22:25 2022 ] Mean test loss of 796 batches: 2.4693613052368164. +[ Mon Sep 12 22:22:25 2022 ] Top1: 55.82% +[ Mon Sep 12 22:22:25 2022 ] Top5: 85.19% +[ Mon Sep 12 22:22:26 2022 ] Training epoch: 68 +[ Mon Sep 12 22:22:39 2022 ] Batch(18/243) done. Loss: 0.0381 lr:0.010000 +[ Mon Sep 12 22:23:32 2022 ] Batch(118/243) done. Loss: 0.0438 lr:0.010000 +[ Mon Sep 12 22:24:25 2022 ] Batch(218/243) done. Loss: 0.0356 lr:0.010000 +[ Mon Sep 12 22:24:38 2022 ] Eval epoch: 68 +[ Mon Sep 12 22:27:08 2022 ] Mean test loss of 796 batches: 2.441556692123413. +[ Mon Sep 12 22:27:09 2022 ] Top1: 56.56% +[ Mon Sep 12 22:27:09 2022 ] Top5: 85.39% +[ Mon Sep 12 22:27:09 2022 ] Training epoch: 69 +[ Mon Sep 12 22:27:53 2022 ] Batch(75/243) done. Loss: 0.0639 lr:0.010000 +[ Mon Sep 12 22:28:46 2022 ] Batch(175/243) done. Loss: 0.0246 lr:0.010000 +[ Mon Sep 12 22:29:21 2022 ] Eval epoch: 69 +[ Mon Sep 12 22:31:50 2022 ] Mean test loss of 796 batches: 2.452775239944458. +[ Mon Sep 12 22:31:50 2022 ] Top1: 56.46% +[ Mon Sep 12 22:31:50 2022 ] Top5: 85.42% +[ Mon Sep 12 22:31:51 2022 ] Training epoch: 70 +[ Mon Sep 12 22:32:11 2022 ] Batch(32/243) done. Loss: 0.0645 lr:0.010000 +[ Mon Sep 12 22:33:03 2022 ] Batch(132/243) done. Loss: 0.0337 lr:0.010000 +[ Mon Sep 12 22:33:57 2022 ] Batch(232/243) done. Loss: 0.1710 lr:0.010000 +[ Mon Sep 12 22:34:02 2022 ] Eval epoch: 70 +[ Mon Sep 12 22:36:30 2022 ] Mean test loss of 796 batches: 2.438685178756714. +[ Mon Sep 12 22:36:31 2022 ] Top1: 56.90% +[ Mon Sep 12 22:36:31 2022 ] Top5: 85.70% +[ Mon Sep 12 22:36:31 2022 ] Training epoch: 71 +[ Mon Sep 12 22:37:22 2022 ] Batch(89/243) done. Loss: 0.0785 lr:0.010000 +[ Mon Sep 12 22:38:14 2022 ] Batch(189/243) done. Loss: 0.0159 lr:0.010000 +[ Mon Sep 12 22:38:43 2022 ] Eval epoch: 71 +[ Mon Sep 12 22:41:11 2022 ] Mean test loss of 796 batches: 2.4772839546203613. +[ Mon Sep 12 22:41:11 2022 ] Top1: 56.68% +[ Mon Sep 12 22:41:12 2022 ] Top5: 85.52% +[ Mon Sep 12 22:41:12 2022 ] Training epoch: 72 +[ Mon Sep 12 22:41:39 2022 ] Batch(46/243) done. Loss: 0.0166 lr:0.010000 +[ Mon Sep 12 22:42:32 2022 ] Batch(146/243) done. Loss: 0.1071 lr:0.010000 +[ Mon Sep 12 22:43:23 2022 ] Eval epoch: 72 +[ Mon Sep 12 22:45:51 2022 ] Mean test loss of 796 batches: 2.5877044200897217. +[ Mon Sep 12 22:45:51 2022 ] Top1: 55.97% +[ Mon Sep 12 22:45:52 2022 ] Top5: 85.10% +[ Mon Sep 12 22:45:52 2022 ] Training epoch: 73 +[ Mon Sep 12 22:45:57 2022 ] Batch(3/243) done. Loss: 0.0275 lr:0.010000 +[ Mon Sep 12 22:46:49 2022 ] Batch(103/243) done. Loss: 0.0644 lr:0.010000 +[ Mon Sep 12 22:47:42 2022 ] Batch(203/243) done. Loss: 0.0433 lr:0.010000 +[ Mon Sep 12 22:48:03 2022 ] Eval epoch: 73 +[ Mon Sep 12 22:50:32 2022 ] Mean test loss of 796 batches: 2.651918649673462. +[ Mon Sep 12 22:50:32 2022 ] Top1: 55.48% +[ Mon Sep 12 22:50:32 2022 ] Top5: 84.69% +[ Mon Sep 12 22:50:33 2022 ] Training epoch: 74 +[ Mon Sep 12 22:51:07 2022 ] Batch(60/243) done. Loss: 0.0305 lr:0.010000 +[ Mon Sep 12 22:52:00 2022 ] Batch(160/243) done. Loss: 0.0308 lr:0.010000 +[ Mon Sep 12 22:52:43 2022 ] Eval epoch: 74 +[ Mon Sep 12 22:55:12 2022 ] Mean test loss of 796 batches: 2.5842037200927734. +[ Mon Sep 12 22:55:12 2022 ] Top1: 56.39% +[ Mon Sep 12 22:55:12 2022 ] Top5: 85.13% +[ Mon Sep 12 22:55:13 2022 ] Training epoch: 75 +[ Mon Sep 12 22:55:25 2022 ] Batch(17/243) done. Loss: 0.0366 lr:0.010000 +[ Mon Sep 12 22:56:18 2022 ] Batch(117/243) done. Loss: 0.0302 lr:0.010000 +[ Mon Sep 12 22:57:10 2022 ] Batch(217/243) done. Loss: 0.0747 lr:0.010000 +[ Mon Sep 12 22:57:24 2022 ] Eval epoch: 75 +[ Mon Sep 12 22:59:52 2022 ] Mean test loss of 796 batches: 2.570859909057617. +[ Mon Sep 12 22:59:53 2022 ] Top1: 56.14% +[ Mon Sep 12 22:59:53 2022 ] Top5: 85.25% +[ Mon Sep 12 22:59:53 2022 ] Training epoch: 76 +[ Mon Sep 12 23:00:36 2022 ] Batch(74/243) done. Loss: 0.0796 lr:0.010000 +[ Mon Sep 12 23:01:28 2022 ] Batch(174/243) done. Loss: 0.0257 lr:0.010000 +[ Mon Sep 12 23:02:05 2022 ] Eval epoch: 76 +[ Mon Sep 12 23:04:33 2022 ] Mean test loss of 796 batches: 2.592128038406372. +[ Mon Sep 12 23:04:34 2022 ] Top1: 55.84% +[ Mon Sep 12 23:04:34 2022 ] Top5: 85.30% +[ Mon Sep 12 23:04:34 2022 ] Training epoch: 77 +[ Mon Sep 12 23:04:54 2022 ] Batch(31/243) done. Loss: 0.0744 lr:0.010000 +[ Mon Sep 12 23:05:46 2022 ] Batch(131/243) done. Loss: 0.1931 lr:0.010000 +[ Mon Sep 12 23:06:39 2022 ] Batch(231/243) done. Loss: 0.1079 lr:0.010000 +[ Mon Sep 12 23:06:45 2022 ] Eval epoch: 77 +[ Mon Sep 12 23:09:14 2022 ] Mean test loss of 796 batches: 2.520390033721924. +[ Mon Sep 12 23:09:15 2022 ] Top1: 57.01% +[ Mon Sep 12 23:09:15 2022 ] Top5: 85.66% +[ Mon Sep 12 23:09:15 2022 ] Training epoch: 78 +[ Mon Sep 12 23:10:05 2022 ] Batch(88/243) done. Loss: 0.0603 lr:0.010000 +[ Mon Sep 12 23:10:57 2022 ] Batch(188/243) done. Loss: 0.0310 lr:0.010000 +[ Mon Sep 12 23:11:26 2022 ] Eval epoch: 78 +[ Mon Sep 12 23:13:55 2022 ] Mean test loss of 796 batches: 2.6386399269104004. +[ Mon Sep 12 23:13:55 2022 ] Top1: 56.20% +[ Mon Sep 12 23:13:55 2022 ] Top5: 85.21% +[ Mon Sep 12 23:13:56 2022 ] Training epoch: 79 +[ Mon Sep 12 23:14:22 2022 ] Batch(45/243) done. Loss: 0.0765 lr:0.010000 +[ Mon Sep 12 23:15:15 2022 ] Batch(145/243) done. Loss: 0.0186 lr:0.010000 +[ Mon Sep 12 23:16:06 2022 ] Eval epoch: 79 +[ Mon Sep 12 23:18:35 2022 ] Mean test loss of 796 batches: 2.681422233581543. +[ Mon Sep 12 23:18:36 2022 ] Top1: 55.56% +[ Mon Sep 12 23:18:36 2022 ] Top5: 84.93% +[ Mon Sep 12 23:18:36 2022 ] Training epoch: 80 +[ Mon Sep 12 23:18:40 2022 ] Batch(2/243) done. Loss: 0.0715 lr:0.010000 +[ Mon Sep 12 23:19:33 2022 ] Batch(102/243) done. Loss: 0.0329 lr:0.010000 +[ Mon Sep 12 23:20:26 2022 ] Batch(202/243) done. Loss: 0.0207 lr:0.010000 +[ Mon Sep 12 23:20:47 2022 ] Eval epoch: 80 +[ Mon Sep 12 23:23:16 2022 ] Mean test loss of 796 batches: 2.7165815830230713. +[ Mon Sep 12 23:23:16 2022 ] Top1: 55.60% +[ Mon Sep 12 23:23:16 2022 ] Top5: 84.92% +[ Mon Sep 12 23:23:17 2022 ] Training epoch: 81 +[ Mon Sep 12 23:23:51 2022 ] Batch(59/243) done. Loss: 0.0494 lr:0.001000 +[ Mon Sep 12 23:24:44 2022 ] Batch(159/243) done. Loss: 0.0567 lr:0.001000 +[ Mon Sep 12 23:25:28 2022 ] Eval epoch: 81 +[ Mon Sep 12 23:27:56 2022 ] Mean test loss of 796 batches: 2.6882925033569336. +[ Mon Sep 12 23:27:56 2022 ] Top1: 55.76% +[ Mon Sep 12 23:27:57 2022 ] Top5: 85.11% +[ Mon Sep 12 23:27:57 2022 ] Training epoch: 82 +[ Mon Sep 12 23:28:08 2022 ] Batch(16/243) done. Loss: 0.0430 lr:0.001000 +[ Mon Sep 12 23:29:01 2022 ] Batch(116/243) done. Loss: 0.0271 lr:0.001000 +[ Mon Sep 12 23:29:54 2022 ] Batch(216/243) done. Loss: 0.0803 lr:0.001000 +[ Mon Sep 12 23:30:08 2022 ] Eval epoch: 82 +[ Mon Sep 12 23:32:37 2022 ] Mean test loss of 796 batches: 2.646885633468628. +[ Mon Sep 12 23:32:37 2022 ] Top1: 56.28% +[ Mon Sep 12 23:32:38 2022 ] Top5: 85.33% +[ Mon Sep 12 23:32:38 2022 ] Training epoch: 83 +[ Mon Sep 12 23:33:20 2022 ] Batch(73/243) done. Loss: 0.1074 lr:0.001000 +[ Mon Sep 12 23:34:12 2022 ] Batch(173/243) done. Loss: 0.0116 lr:0.001000 +[ Mon Sep 12 23:34:49 2022 ] Eval epoch: 83 +[ Mon Sep 12 23:37:18 2022 ] Mean test loss of 796 batches: 2.597583770751953. +[ Mon Sep 12 23:37:19 2022 ] Top1: 56.53% +[ Mon Sep 12 23:37:19 2022 ] Top5: 85.37% +[ Mon Sep 12 23:37:19 2022 ] Training epoch: 84 +[ Mon Sep 12 23:37:38 2022 ] Batch(30/243) done. Loss: 0.0481 lr:0.001000 +[ Mon Sep 12 23:38:31 2022 ] Batch(130/243) done. Loss: 0.0732 lr:0.001000 +[ Mon Sep 12 23:39:24 2022 ] Batch(230/243) done. Loss: 0.0687 lr:0.001000 +[ Mon Sep 12 23:39:31 2022 ] Eval epoch: 84 +[ Mon Sep 12 23:41:59 2022 ] Mean test loss of 796 batches: 2.664794683456421. +[ Mon Sep 12 23:41:59 2022 ] Top1: 56.09% +[ Mon Sep 12 23:42:00 2022 ] Top5: 85.21% +[ Mon Sep 12 23:42:00 2022 ] Training epoch: 85 +[ Mon Sep 12 23:42:49 2022 ] Batch(87/243) done. Loss: 0.0488 lr:0.001000 +[ Mon Sep 12 23:43:42 2022 ] Batch(187/243) done. Loss: 0.0218 lr:0.001000 +[ Mon Sep 12 23:44:11 2022 ] Eval epoch: 85 +[ Mon Sep 12 23:46:40 2022 ] Mean test loss of 796 batches: 2.662086009979248. +[ Mon Sep 12 23:46:40 2022 ] Top1: 56.15% +[ Mon Sep 12 23:46:40 2022 ] Top5: 85.20% +[ Mon Sep 12 23:46:41 2022 ] Training epoch: 86 +[ Mon Sep 12 23:47:07 2022 ] Batch(44/243) done. Loss: 0.0633 lr:0.001000 +[ Mon Sep 12 23:48:00 2022 ] Batch(144/243) done. Loss: 0.0310 lr:0.001000 +[ Mon Sep 12 23:48:52 2022 ] Eval epoch: 86 +[ Mon Sep 12 23:51:20 2022 ] Mean test loss of 796 batches: 2.6869003772735596. +[ Mon Sep 12 23:51:21 2022 ] Top1: 56.17% +[ Mon Sep 12 23:51:21 2022 ] Top5: 85.22% +[ Mon Sep 12 23:51:21 2022 ] Training epoch: 87 +[ Mon Sep 12 23:51:25 2022 ] Batch(1/243) done. Loss: 0.1027 lr:0.001000 +[ Mon Sep 12 23:52:18 2022 ] Batch(101/243) done. Loss: 0.0944 lr:0.001000 +[ Mon Sep 12 23:53:11 2022 ] Batch(201/243) done. Loss: 0.1088 lr:0.001000 +[ Mon Sep 12 23:53:32 2022 ] Eval epoch: 87 +[ Mon Sep 12 23:56:00 2022 ] Mean test loss of 796 batches: 2.6837778091430664. +[ Mon Sep 12 23:56:01 2022 ] Top1: 56.26% +[ Mon Sep 12 23:56:01 2022 ] Top5: 85.23% +[ Mon Sep 12 23:56:01 2022 ] Training epoch: 88 +[ Mon Sep 12 23:56:35 2022 ] Batch(58/243) done. Loss: 0.0394 lr:0.001000 +[ Mon Sep 12 23:57:28 2022 ] Batch(158/243) done. Loss: 0.0608 lr:0.001000 +[ Mon Sep 12 23:58:12 2022 ] Eval epoch: 88 +[ Tue Sep 13 00:00:41 2022 ] Mean test loss of 796 batches: 2.6194639205932617. +[ Tue Sep 13 00:00:41 2022 ] Top1: 56.45% +[ Tue Sep 13 00:00:41 2022 ] Top5: 85.51% +[ Tue Sep 13 00:00:42 2022 ] Training epoch: 89 +[ Tue Sep 13 00:00:53 2022 ] Batch(15/243) done. Loss: 0.0463 lr:0.001000 +[ Tue Sep 13 00:01:46 2022 ] Batch(115/243) done. Loss: 0.0355 lr:0.001000 +[ Tue Sep 13 00:02:39 2022 ] Batch(215/243) done. Loss: 0.0294 lr:0.001000 +[ Tue Sep 13 00:02:53 2022 ] Eval epoch: 89 +[ Tue Sep 13 00:05:23 2022 ] Mean test loss of 796 batches: 2.6501572132110596. +[ Tue Sep 13 00:05:24 2022 ] Top1: 56.37% +[ Tue Sep 13 00:05:24 2022 ] Top5: 85.40% +[ Tue Sep 13 00:05:24 2022 ] Training epoch: 90 +[ Tue Sep 13 00:06:05 2022 ] Batch(72/243) done. Loss: 0.0114 lr:0.001000 +[ Tue Sep 13 00:06:58 2022 ] Batch(172/243) done. Loss: 0.1095 lr:0.001000 +[ Tue Sep 13 00:07:35 2022 ] Eval epoch: 90 +[ Tue Sep 13 00:10:03 2022 ] Mean test loss of 796 batches: 2.677971363067627. +[ Tue Sep 13 00:10:04 2022 ] Top1: 56.01% +[ Tue Sep 13 00:10:04 2022 ] Top5: 85.13% +[ Tue Sep 13 00:10:04 2022 ] Training epoch: 91 +[ Tue Sep 13 00:10:23 2022 ] Batch(29/243) done. Loss: 0.0262 lr:0.001000 +[ Tue Sep 13 00:11:15 2022 ] Batch(129/243) done. Loss: 0.0242 lr:0.001000 +[ Tue Sep 13 00:12:08 2022 ] Batch(229/243) done. Loss: 0.0689 lr:0.001000 +[ Tue Sep 13 00:12:15 2022 ] Eval epoch: 91 +[ Tue Sep 13 00:14:44 2022 ] Mean test loss of 796 batches: 2.676117420196533. +[ Tue Sep 13 00:14:44 2022 ] Top1: 56.30% +[ Tue Sep 13 00:14:44 2022 ] Top5: 85.33% +[ Tue Sep 13 00:14:45 2022 ] Training epoch: 92 +[ Tue Sep 13 00:15:33 2022 ] Batch(86/243) done. Loss: 0.0536 lr:0.001000 +[ Tue Sep 13 00:16:26 2022 ] Batch(186/243) done. Loss: 0.0407 lr:0.001000 +[ Tue Sep 13 00:16:56 2022 ] Eval epoch: 92 +[ Tue Sep 13 00:19:24 2022 ] Mean test loss of 796 batches: 2.6445698738098145. +[ Tue Sep 13 00:19:25 2022 ] Top1: 56.10% +[ Tue Sep 13 00:19:25 2022 ] Top5: 85.17% +[ Tue Sep 13 00:19:25 2022 ] Training epoch: 93 +[ Tue Sep 13 00:19:51 2022 ] Batch(43/243) done. Loss: 0.0268 lr:0.001000 +[ Tue Sep 13 00:20:44 2022 ] Batch(143/243) done. Loss: 0.0451 lr:0.001000 +[ Tue Sep 13 00:21:36 2022 ] Eval epoch: 93 +[ Tue Sep 13 00:24:04 2022 ] Mean test loss of 796 batches: 2.6552574634552. +[ Tue Sep 13 00:24:05 2022 ] Top1: 56.38% +[ Tue Sep 13 00:24:05 2022 ] Top5: 85.42% +[ Tue Sep 13 00:24:05 2022 ] Training epoch: 94 +[ Tue Sep 13 00:24:08 2022 ] Batch(0/243) done. Loss: 0.1094 lr:0.001000 +[ Tue Sep 13 00:25:01 2022 ] Batch(100/243) done. Loss: 0.0419 lr:0.001000 +[ Tue Sep 13 00:25:53 2022 ] Batch(200/243) done. Loss: 0.0359 lr:0.001000 +[ Tue Sep 13 00:26:16 2022 ] Eval epoch: 94 +[ Tue Sep 13 00:28:44 2022 ] Mean test loss of 796 batches: 2.687640428543091. +[ Tue Sep 13 00:28:44 2022 ] Top1: 56.05% +[ Tue Sep 13 00:28:44 2022 ] Top5: 85.21% +[ Tue Sep 13 00:28:45 2022 ] Training epoch: 95 +[ Tue Sep 13 00:29:18 2022 ] Batch(57/243) done. Loss: 0.1638 lr:0.001000 +[ Tue Sep 13 00:30:11 2022 ] Batch(157/243) done. Loss: 0.0316 lr:0.001000 +[ Tue Sep 13 00:30:55 2022 ] Eval epoch: 95 +[ Tue Sep 13 00:33:24 2022 ] Mean test loss of 796 batches: 2.674198865890503. +[ Tue Sep 13 00:33:25 2022 ] Top1: 56.44% +[ Tue Sep 13 00:33:25 2022 ] Top5: 85.35% +[ Tue Sep 13 00:33:25 2022 ] Training epoch: 96 +[ Tue Sep 13 00:33:36 2022 ] Batch(14/243) done. Loss: 0.0519 lr:0.001000 +[ Tue Sep 13 00:34:29 2022 ] Batch(114/243) done. Loss: 0.0252 lr:0.001000 +[ Tue Sep 13 00:35:21 2022 ] Batch(214/243) done. Loss: 0.0330 lr:0.001000 +[ Tue Sep 13 00:35:36 2022 ] Eval epoch: 96 +[ Tue Sep 13 00:38:05 2022 ] Mean test loss of 796 batches: 2.697723865509033. +[ Tue Sep 13 00:38:05 2022 ] Top1: 55.86% +[ Tue Sep 13 00:38:05 2022 ] Top5: 85.19% +[ Tue Sep 13 00:38:06 2022 ] Training epoch: 97 +[ Tue Sep 13 00:38:46 2022 ] Batch(71/243) done. Loss: 0.0756 lr:0.001000 +[ Tue Sep 13 00:39:39 2022 ] Batch(171/243) done. Loss: 0.0225 lr:0.001000 +[ Tue Sep 13 00:40:16 2022 ] Eval epoch: 97 +[ Tue Sep 13 00:42:45 2022 ] Mean test loss of 796 batches: 2.7128243446350098. +[ Tue Sep 13 00:42:45 2022 ] Top1: 55.78% +[ Tue Sep 13 00:42:45 2022 ] Top5: 85.03% +[ Tue Sep 13 00:42:46 2022 ] Training epoch: 98 +[ Tue Sep 13 00:43:04 2022 ] Batch(28/243) done. Loss: 0.0676 lr:0.001000 +[ Tue Sep 13 00:43:57 2022 ] Batch(128/243) done. Loss: 0.0412 lr:0.001000 +[ Tue Sep 13 00:44:49 2022 ] Batch(228/243) done. Loss: 0.0327 lr:0.001000 +[ Tue Sep 13 00:44:57 2022 ] Eval epoch: 98 +[ Tue Sep 13 00:47:25 2022 ] Mean test loss of 796 batches: 2.72189998626709. +[ Tue Sep 13 00:47:26 2022 ] Top1: 55.88% +[ Tue Sep 13 00:47:26 2022 ] Top5: 84.96% +[ Tue Sep 13 00:47:26 2022 ] Training epoch: 99 +[ Tue Sep 13 00:48:15 2022 ] Batch(85/243) done. Loss: 0.0767 lr:0.001000 +[ Tue Sep 13 00:49:07 2022 ] Batch(185/243) done. Loss: 0.1553 lr:0.001000 +[ Tue Sep 13 00:49:37 2022 ] Eval epoch: 99 +[ Tue Sep 13 00:52:06 2022 ] Mean test loss of 796 batches: 2.7023370265960693. +[ Tue Sep 13 00:52:06 2022 ] Top1: 56.12% +[ Tue Sep 13 00:52:07 2022 ] Top5: 84.94% +[ Tue Sep 13 00:52:07 2022 ] Training epoch: 100 +[ Tue Sep 13 00:52:33 2022 ] Batch(42/243) done. Loss: 0.0636 lr:0.001000 +[ Tue Sep 13 00:53:25 2022 ] Batch(142/243) done. Loss: 0.0703 lr:0.001000 +[ Tue Sep 13 00:54:18 2022 ] Batch(242/243) done. Loss: 0.0472 lr:0.001000 +[ Tue Sep 13 00:54:19 2022 ] Eval epoch: 100 +[ Tue Sep 13 00:56:47 2022 ] Mean test loss of 796 batches: 2.7367427349090576. +[ Tue Sep 13 00:56:48 2022 ] Top1: 56.10% +[ Tue Sep 13 00:56:48 2022 ] Top5: 85.17% diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_motion_xsub/config.yaml b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_motion_xsub/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4d320d7ab7867633c8673fbc8d76ce1847cca53e --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_motion_xsub/config.yaml @@ -0,0 +1,59 @@ +Experiment_name: ntu_bone_motion_xsub +base_lr: 0.1 +batch_size: 64 +config: ./config/nturgbd-cross-subject/train_bone_motion.yaml +device: +- 6 +- 7 +eval_interval: 5 +feeder: feeders.feeder.Feeder +groups: 8 +ignore_weights: [] +keep_rate: 0.9 +log_interval: 100 +model: model.decouple_gcn.Model +model_args: + block_size: 41 + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + groups: 16 + num_class: 60 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu_bone_motion_xsub +nesterov: true +num_epoch: 100 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_data_bone_motion.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_data_bone_motion.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu_bone_motion_xsub diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_motion_xsub/decouple_gcn.py b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_motion_xsub/decouple_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..6dcce4552ced280fe5b2060df92daebd2452cf7c --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_motion_xsub/decouple_gcn.py @@ -0,0 +1,235 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math +from model.dropSke import DropBlock_Ske +from model.dropT import DropBlockT_1d + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + + +def conv_branch_init(conv): + weight = conv.weight + n = weight.size(0) + k1 = weight.size(1) + k2 = weight.size(2) + nn.init.normal(weight, 0, math.sqrt(2. / (n * k1 * k2))) + nn.init.constant(conv.bias, 0) + + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class unit_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1, num_point=25, block_size=41): + super(unit_tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + self.dropS = DropBlock_Ske(num_point=num_point) + self.dropT = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob, A): + x = self.bn(self.conv(x)) + x = self.dropT(self.dropS(x, keep_prob, A), keep_prob) + return x + + +class unit_tcn_skip(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(unit_tcn_skip, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class unit_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, coff_embedding=4, num_subset=3): + super(unit_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.num_point = num_point + self.groups = groups + self.num_subset = num_subset + self.DecoupleA = nn.Parameter(torch.tensor(np.reshape(A.astype(np.float32), [ + 3, 1, num_point, num_point]), dtype=torch.float32, requires_grad=True).repeat(1, groups, 1, 1), requires_grad=True) + + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.bn0 = nn.BatchNorm2d(out_channels * num_subset) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + bn_init(self.bn, 1e-6) + + self.Linear_weight = nn.Parameter(torch.zeros( + in_channels, out_channels * num_subset, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0, math.sqrt( + 0.5 / (out_channels * num_subset))) + + self.Linear_bias = nn.Parameter(torch.zeros( + 1, out_channels * num_subset, 1, 1, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.constant(self.Linear_bias, 1e-6) + + eye_array = [] + for i in range(out_channels): + eye_array.append(torch.eye(num_point)) + self.eyes = nn.Parameter(torch.tensor(torch.stack( + eye_array), requires_grad=False, device='cuda'), requires_grad=False) # [c,25,25] + + def norm(self, A): + b, c, h, w = A.size() + A = A.view(c, self.num_point, self.num_point) + D_list = torch.sum(A, 1).view(c, 1, self.num_point) + D_list_12 = (D_list + 0.001)**(-1) + D_12 = self.eyes * D_list_12 + A = torch.bmm(A, D_12).view(b, c, h, w) + return A + + def forward(self, x0): + learn_A = self.DecoupleA.repeat( + 1, self.out_channels // self.groups, 1, 1) + norm_learn_A = torch.cat([self.norm(learn_A[0:1, ...]), self.norm( + learn_A[1:2, ...]), self.norm(learn_A[2:3, ...])], 0) + + x = torch.einsum( + 'nctw,cd->ndtw', (x0, self.Linear_weight)).contiguous() + x = x + self.Linear_bias + x = self.bn0(x) + + n, kc, t, v = x.size() + x = x.view(n, self.num_subset, kc // self.num_subset, t, v) + x = torch.einsum('nkctv,kcvw->nctw', (x, norm_learn_A)) + + x = self.bn(x) + x += self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, block_size, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = unit_gcn(in_channels, out_channels, A, groups, num_point) + self.tcn1 = unit_tcn(out_channels, out_channels, + stride=stride, num_point=num_point) + self.relu = nn.ReLU() + + self.A = nn.Parameter(torch.tensor(np.sum(np.reshape(A.astype(np.float32), [ + 3, num_point, num_point]), axis=0), dtype=torch.float32, requires_grad=False, device='cuda'), requires_grad=False) + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + + else: + self.residual = unit_tcn_skip( + in_channels, out_channels, kernel_size=1, stride=stride) + self.dropSke = DropBlock_Ske(num_point=num_point) + self.dropT_skip = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob): + x = self.tcn1(self.gcn1(x), keep_prob, self.A) + self.dropT_skip( + self.dropSke(self.residual(x), keep_prob, self.A), keep_prob) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, groups, num_point, + block_size, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l5 = TCN_GCN_unit( + 64, 128, A, groups, num_point, block_size, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l8 = TCN_GCN_unit(128, 256, A, groups, + num_point, block_size, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x, keep_prob=0.9): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute( + 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x, 1.0) + x = self.l2(x, 1.0) + x = self.l3(x, 1.0) + x = self.l4(x, 1.0) + x = self.l5(x, 1.0) + x = self.l6(x, 1.0) + x = self.l7(x, keep_prob) + x = self.l8(x, keep_prob) + x = self.l9(x, keep_prob) + x = self.l10(x, keep_prob) + + # N*M,C,T,V + c_new = x.size(1) + x = x.reshape(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_motion_xsub/eval_results/best_acc.pkl b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_motion_xsub/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..157999eec74a0f5626c1d09acbbb59ff5e421fbd --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_motion_xsub/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3791fa834c3f09decb16e0f7ec4152d246b53aaa6cb3ba55b7804744365641bd +size 4979902 diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_motion_xsub/log.txt b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_motion_xsub/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..3d1cbacf0f669119eab6d0acb455c5b1dbc62501 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_motion_xsub/log.txt @@ -0,0 +1,626 @@ +[ Tue Sep 13 10:03:55 2022 ] Parameters: +{'work_dir': './work_dir/ntu_bone_motion_xsub', 'model_saved_name': './save_models/ntu_bone_motion_xsub', 'Experiment_name': 'ntu_bone_motion_xsub', 'config': './config/nturgbd-cross-subject/train_bone_motion.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_data_bone_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_data_bone_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_label.pkl'}, 'model': 'model.decouple_gcn.Model', 'model_args': {'num_class': 60, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'groups': 16, 'block_size': 41, 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80], 'device': [6, 7], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 100, 'weight_decay': 0.0001, 'keep_rate': 0.9, 'groups': 8, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Tue Sep 13 10:03:55 2022 ] Training epoch: 1 +[ Tue Sep 13 10:04:44 2022 ] Batch(99/123) done. Loss: 2.9891 lr:0.100000 +[ Tue Sep 13 10:04:55 2022 ] Eval epoch: 1 +[ Tue Sep 13 10:05:45 2022 ] Mean test loss of 258 batches: 5.3130412101745605. +[ Tue Sep 13 10:05:45 2022 ] Top1: 4.62% +[ Tue Sep 13 10:05:45 2022 ] Top5: 17.12% +[ Tue Sep 13 10:05:46 2022 ] Training epoch: 2 +[ Tue Sep 13 10:06:29 2022 ] Batch(76/123) done. Loss: 2.5252 lr:0.100000 +[ Tue Sep 13 10:06:54 2022 ] Eval epoch: 2 +[ Tue Sep 13 10:07:45 2022 ] Mean test loss of 258 batches: 4.381324291229248. +[ Tue Sep 13 10:07:45 2022 ] Top1: 8.96% +[ Tue Sep 13 10:07:45 2022 ] Top5: 27.62% +[ Tue Sep 13 10:07:45 2022 ] Training epoch: 3 +[ Tue Sep 13 10:08:16 2022 ] Batch(53/123) done. Loss: 2.8846 lr:0.100000 +[ Tue Sep 13 10:08:54 2022 ] Eval epoch: 3 +[ Tue Sep 13 10:09:44 2022 ] Mean test loss of 258 batches: 4.099518299102783. +[ Tue Sep 13 10:09:44 2022 ] Top1: 13.61% +[ Tue Sep 13 10:09:44 2022 ] Top5: 42.49% +[ Tue Sep 13 10:09:44 2022 ] Training epoch: 4 +[ Tue Sep 13 10:10:03 2022 ] Batch(30/123) done. Loss: 1.9974 lr:0.100000 +[ Tue Sep 13 10:10:53 2022 ] Eval epoch: 4 +[ Tue Sep 13 10:11:43 2022 ] Mean test loss of 258 batches: 3.317542791366577. +[ Tue Sep 13 10:11:43 2022 ] Top1: 17.44% +[ Tue Sep 13 10:11:43 2022 ] Top5: 48.21% +[ Tue Sep 13 10:11:43 2022 ] Training epoch: 5 +[ Tue Sep 13 10:11:50 2022 ] Batch(7/123) done. Loss: 1.7207 lr:0.100000 +[ Tue Sep 13 10:12:44 2022 ] Batch(107/123) done. Loss: 1.3655 lr:0.100000 +[ Tue Sep 13 10:12:52 2022 ] Eval epoch: 5 +[ Tue Sep 13 10:13:42 2022 ] Mean test loss of 258 batches: 3.480855703353882. +[ Tue Sep 13 10:13:42 2022 ] Top1: 22.24% +[ Tue Sep 13 10:13:42 2022 ] Top5: 59.80% +[ Tue Sep 13 10:13:42 2022 ] Training epoch: 6 +[ Tue Sep 13 10:14:30 2022 ] Batch(84/123) done. Loss: 1.3335 lr:0.100000 +[ Tue Sep 13 10:14:50 2022 ] Eval epoch: 6 +[ Tue Sep 13 10:15:41 2022 ] Mean test loss of 258 batches: 6.322797775268555. +[ Tue Sep 13 10:15:41 2022 ] Top1: 12.08% +[ Tue Sep 13 10:15:41 2022 ] Top5: 34.04% +[ Tue Sep 13 10:15:41 2022 ] Training epoch: 7 +[ Tue Sep 13 10:16:16 2022 ] Batch(61/123) done. Loss: 1.4482 lr:0.100000 +[ Tue Sep 13 10:16:49 2022 ] Eval epoch: 7 +[ Tue Sep 13 10:17:40 2022 ] Mean test loss of 258 batches: 3.362936496734619. +[ Tue Sep 13 10:17:40 2022 ] Top1: 24.81% +[ Tue Sep 13 10:17:40 2022 ] Top5: 59.66% +[ Tue Sep 13 10:17:40 2022 ] Training epoch: 8 +[ Tue Sep 13 10:18:03 2022 ] Batch(38/123) done. Loss: 1.2326 lr:0.100000 +[ Tue Sep 13 10:18:48 2022 ] Eval epoch: 8 +[ Tue Sep 13 10:19:38 2022 ] Mean test loss of 258 batches: 2.9007482528686523. +[ Tue Sep 13 10:19:38 2022 ] Top1: 30.63% +[ Tue Sep 13 10:19:39 2022 ] Top5: 68.46% +[ Tue Sep 13 10:19:39 2022 ] Training epoch: 9 +[ Tue Sep 13 10:19:50 2022 ] Batch(15/123) done. Loss: 1.0663 lr:0.100000 +[ Tue Sep 13 10:20:43 2022 ] Batch(115/123) done. Loss: 0.8887 lr:0.100000 +[ Tue Sep 13 10:20:47 2022 ] Eval epoch: 9 +[ Tue Sep 13 10:21:38 2022 ] Mean test loss of 258 batches: 3.1942429542541504. +[ Tue Sep 13 10:21:38 2022 ] Top1: 28.76% +[ Tue Sep 13 10:21:38 2022 ] Top5: 64.62% +[ Tue Sep 13 10:21:38 2022 ] Training epoch: 10 +[ Tue Sep 13 10:22:30 2022 ] Batch(92/123) done. Loss: 0.9215 lr:0.100000 +[ Tue Sep 13 10:22:46 2022 ] Eval epoch: 10 +[ Tue Sep 13 10:23:36 2022 ] Mean test loss of 258 batches: 3.464066982269287. +[ Tue Sep 13 10:23:36 2022 ] Top1: 24.20% +[ Tue Sep 13 10:23:37 2022 ] Top5: 57.41% +[ Tue Sep 13 10:23:37 2022 ] Training epoch: 11 +[ Tue Sep 13 10:24:17 2022 ] Batch(69/123) done. Loss: 1.1655 lr:0.100000 +[ Tue Sep 13 10:24:45 2022 ] Eval epoch: 11 +[ Tue Sep 13 10:25:35 2022 ] Mean test loss of 258 batches: 4.239729881286621. +[ Tue Sep 13 10:25:35 2022 ] Top1: 26.44% +[ Tue Sep 13 10:25:36 2022 ] Top5: 61.07% +[ Tue Sep 13 10:25:36 2022 ] Training epoch: 12 +[ Tue Sep 13 10:26:03 2022 ] Batch(46/123) done. Loss: 0.8782 lr:0.100000 +[ Tue Sep 13 10:26:44 2022 ] Eval epoch: 12 +[ Tue Sep 13 10:27:34 2022 ] Mean test loss of 258 batches: 2.6224913597106934. +[ Tue Sep 13 10:27:34 2022 ] Top1: 40.12% +[ Tue Sep 13 10:27:35 2022 ] Top5: 77.62% +[ Tue Sep 13 10:27:35 2022 ] Training epoch: 13 +[ Tue Sep 13 10:27:50 2022 ] Batch(23/123) done. Loss: 1.0548 lr:0.100000 +[ Tue Sep 13 10:28:43 2022 ] Eval epoch: 13 +[ Tue Sep 13 10:29:33 2022 ] Mean test loss of 258 batches: 2.6538028717041016. +[ Tue Sep 13 10:29:33 2022 ] Top1: 34.23% +[ Tue Sep 13 10:29:34 2022 ] Top5: 70.93% +[ Tue Sep 13 10:29:34 2022 ] Training epoch: 14 +[ Tue Sep 13 10:29:37 2022 ] Batch(0/123) done. Loss: 0.8820 lr:0.100000 +[ Tue Sep 13 10:30:30 2022 ] Batch(100/123) done. Loss: 1.2009 lr:0.100000 +[ Tue Sep 13 10:30:42 2022 ] Eval epoch: 14 +[ Tue Sep 13 10:31:32 2022 ] Mean test loss of 258 batches: 3.0920660495758057. +[ Tue Sep 13 10:31:32 2022 ] Top1: 30.11% +[ Tue Sep 13 10:31:32 2022 ] Top5: 65.30% +[ Tue Sep 13 10:31:33 2022 ] Training epoch: 15 +[ Tue Sep 13 10:32:17 2022 ] Batch(77/123) done. Loss: 0.8715 lr:0.100000 +[ Tue Sep 13 10:32:41 2022 ] Eval epoch: 15 +[ Tue Sep 13 10:33:31 2022 ] Mean test loss of 258 batches: 2.1878817081451416. +[ Tue Sep 13 10:33:31 2022 ] Top1: 40.13% +[ Tue Sep 13 10:33:31 2022 ] Top5: 78.24% +[ Tue Sep 13 10:33:31 2022 ] Training epoch: 16 +[ Tue Sep 13 10:34:03 2022 ] Batch(54/123) done. Loss: 0.6270 lr:0.100000 +[ Tue Sep 13 10:34:40 2022 ] Eval epoch: 16 +[ Tue Sep 13 10:35:30 2022 ] Mean test loss of 258 batches: 2.6736347675323486. +[ Tue Sep 13 10:35:30 2022 ] Top1: 37.96% +[ Tue Sep 13 10:35:30 2022 ] Top5: 75.54% +[ Tue Sep 13 10:35:30 2022 ] Training epoch: 17 +[ Tue Sep 13 10:35:50 2022 ] Batch(31/123) done. Loss: 0.6887 lr:0.100000 +[ Tue Sep 13 10:36:38 2022 ] Eval epoch: 17 +[ Tue Sep 13 10:37:29 2022 ] Mean test loss of 258 batches: 3.669713258743286. +[ Tue Sep 13 10:37:29 2022 ] Top1: 37.02% +[ Tue Sep 13 10:37:29 2022 ] Top5: 76.65% +[ Tue Sep 13 10:37:29 2022 ] Training epoch: 18 +[ Tue Sep 13 10:37:36 2022 ] Batch(8/123) done. Loss: 0.8408 lr:0.100000 +[ Tue Sep 13 10:38:29 2022 ] Batch(108/123) done. Loss: 0.8188 lr:0.100000 +[ Tue Sep 13 10:38:37 2022 ] Eval epoch: 18 +[ Tue Sep 13 10:39:28 2022 ] Mean test loss of 258 batches: 3.194699764251709. +[ Tue Sep 13 10:39:28 2022 ] Top1: 42.10% +[ Tue Sep 13 10:39:28 2022 ] Top5: 79.03% +[ Tue Sep 13 10:39:28 2022 ] Training epoch: 19 +[ Tue Sep 13 10:40:16 2022 ] Batch(85/123) done. Loss: 0.8010 lr:0.100000 +[ Tue Sep 13 10:40:36 2022 ] Eval epoch: 19 +[ Tue Sep 13 10:41:27 2022 ] Mean test loss of 258 batches: 2.3766753673553467. +[ Tue Sep 13 10:41:27 2022 ] Top1: 42.75% +[ Tue Sep 13 10:41:27 2022 ] Top5: 80.23% +[ Tue Sep 13 10:41:27 2022 ] Training epoch: 20 +[ Tue Sep 13 10:42:03 2022 ] Batch(62/123) done. Loss: 1.0108 lr:0.100000 +[ Tue Sep 13 10:42:35 2022 ] Eval epoch: 20 +[ Tue Sep 13 10:43:25 2022 ] Mean test loss of 258 batches: 3.601966142654419. +[ Tue Sep 13 10:43:25 2022 ] Top1: 39.66% +[ Tue Sep 13 10:43:25 2022 ] Top5: 77.24% +[ Tue Sep 13 10:43:26 2022 ] Training epoch: 21 +[ Tue Sep 13 10:43:49 2022 ] Batch(39/123) done. Loss: 0.4037 lr:0.100000 +[ Tue Sep 13 10:44:34 2022 ] Eval epoch: 21 +[ Tue Sep 13 10:45:24 2022 ] Mean test loss of 258 batches: 2.5317587852478027. +[ Tue Sep 13 10:45:24 2022 ] Top1: 40.53% +[ Tue Sep 13 10:45:24 2022 ] Top5: 82.08% +[ Tue Sep 13 10:45:24 2022 ] Training epoch: 22 +[ Tue Sep 13 10:45:36 2022 ] Batch(16/123) done. Loss: 0.6688 lr:0.100000 +[ Tue Sep 13 10:46:29 2022 ] Batch(116/123) done. Loss: 0.6076 lr:0.100000 +[ Tue Sep 13 10:46:33 2022 ] Eval epoch: 22 +[ Tue Sep 13 10:47:23 2022 ] Mean test loss of 258 batches: 2.8892345428466797. +[ Tue Sep 13 10:47:23 2022 ] Top1: 39.92% +[ Tue Sep 13 10:47:23 2022 ] Top5: 76.73% +[ Tue Sep 13 10:47:23 2022 ] Training epoch: 23 +[ Tue Sep 13 10:48:15 2022 ] Batch(93/123) done. Loss: 0.5551 lr:0.100000 +[ Tue Sep 13 10:48:31 2022 ] Eval epoch: 23 +[ Tue Sep 13 10:49:21 2022 ] Mean test loss of 258 batches: 3.017786741256714. +[ Tue Sep 13 10:49:21 2022 ] Top1: 41.66% +[ Tue Sep 13 10:49:22 2022 ] Top5: 77.39% +[ Tue Sep 13 10:49:22 2022 ] Training epoch: 24 +[ Tue Sep 13 10:50:02 2022 ] Batch(70/123) done. Loss: 0.5009 lr:0.100000 +[ Tue Sep 13 10:50:30 2022 ] Eval epoch: 24 +[ Tue Sep 13 10:51:20 2022 ] Mean test loss of 258 batches: 3.0127227306365967. +[ Tue Sep 13 10:51:20 2022 ] Top1: 37.76% +[ Tue Sep 13 10:51:20 2022 ] Top5: 76.32% +[ Tue Sep 13 10:51:20 2022 ] Training epoch: 25 +[ Tue Sep 13 10:51:49 2022 ] Batch(47/123) done. Loss: 0.5799 lr:0.100000 +[ Tue Sep 13 10:52:29 2022 ] Eval epoch: 25 +[ Tue Sep 13 10:53:19 2022 ] Mean test loss of 258 batches: 2.429211139678955. +[ Tue Sep 13 10:53:19 2022 ] Top1: 47.58% +[ Tue Sep 13 10:53:19 2022 ] Top5: 82.33% +[ Tue Sep 13 10:53:19 2022 ] Training epoch: 26 +[ Tue Sep 13 10:53:35 2022 ] Batch(24/123) done. Loss: 0.3484 lr:0.100000 +[ Tue Sep 13 10:54:28 2022 ] Eval epoch: 26 +[ Tue Sep 13 10:55:18 2022 ] Mean test loss of 258 batches: 2.745936393737793. +[ Tue Sep 13 10:55:18 2022 ] Top1: 44.03% +[ Tue Sep 13 10:55:18 2022 ] Top5: 80.53% +[ Tue Sep 13 10:55:18 2022 ] Training epoch: 27 +[ Tue Sep 13 10:55:22 2022 ] Batch(1/123) done. Loss: 0.5052 lr:0.100000 +[ Tue Sep 13 10:56:15 2022 ] Batch(101/123) done. Loss: 0.4921 lr:0.100000 +[ Tue Sep 13 10:56:27 2022 ] Eval epoch: 27 +[ Tue Sep 13 10:57:17 2022 ] Mean test loss of 258 batches: 2.7233641147613525. +[ Tue Sep 13 10:57:17 2022 ] Top1: 43.19% +[ Tue Sep 13 10:57:17 2022 ] Top5: 82.20% +[ Tue Sep 13 10:57:17 2022 ] Training epoch: 28 +[ Tue Sep 13 10:58:02 2022 ] Batch(78/123) done. Loss: 0.5670 lr:0.100000 +[ Tue Sep 13 10:58:25 2022 ] Eval epoch: 28 +[ Tue Sep 13 10:59:16 2022 ] Mean test loss of 258 batches: 39.702354431152344. +[ Tue Sep 13 10:59:16 2022 ] Top1: 3.87% +[ Tue Sep 13 10:59:16 2022 ] Top5: 15.35% +[ Tue Sep 13 10:59:16 2022 ] Training epoch: 29 +[ Tue Sep 13 10:59:48 2022 ] Batch(55/123) done. Loss: 0.5970 lr:0.100000 +[ Tue Sep 13 11:00:24 2022 ] Eval epoch: 29 +[ Tue Sep 13 11:01:14 2022 ] Mean test loss of 258 batches: 4.118558883666992. +[ Tue Sep 13 11:01:15 2022 ] Top1: 34.68% +[ Tue Sep 13 11:01:15 2022 ] Top5: 68.70% +[ Tue Sep 13 11:01:15 2022 ] Training epoch: 30 +[ Tue Sep 13 11:01:35 2022 ] Batch(32/123) done. Loss: 0.3338 lr:0.100000 +[ Tue Sep 13 11:02:23 2022 ] Eval epoch: 30 +[ Tue Sep 13 11:03:13 2022 ] Mean test loss of 258 batches: 3.561509847640991. +[ Tue Sep 13 11:03:13 2022 ] Top1: 42.97% +[ Tue Sep 13 11:03:13 2022 ] Top5: 80.15% +[ Tue Sep 13 11:03:14 2022 ] Training epoch: 31 +[ Tue Sep 13 11:03:21 2022 ] Batch(9/123) done. Loss: 0.2743 lr:0.100000 +[ Tue Sep 13 11:04:15 2022 ] Batch(109/123) done. Loss: 0.3693 lr:0.100000 +[ Tue Sep 13 11:04:22 2022 ] Eval epoch: 31 +[ Tue Sep 13 11:05:12 2022 ] Mean test loss of 258 batches: 2.743839740753174. +[ Tue Sep 13 11:05:12 2022 ] Top1: 40.47% +[ Tue Sep 13 11:05:12 2022 ] Top5: 80.02% +[ Tue Sep 13 11:05:12 2022 ] Training epoch: 32 +[ Tue Sep 13 11:06:01 2022 ] Batch(86/123) done. Loss: 0.3306 lr:0.100000 +[ Tue Sep 13 11:06:21 2022 ] Eval epoch: 32 +[ Tue Sep 13 11:07:11 2022 ] Mean test loss of 258 batches: 6.932523727416992. +[ Tue Sep 13 11:07:11 2022 ] Top1: 25.40% +[ Tue Sep 13 11:07:11 2022 ] Top5: 58.93% +[ Tue Sep 13 11:07:11 2022 ] Training epoch: 33 +[ Tue Sep 13 11:07:48 2022 ] Batch(63/123) done. Loss: 0.2861 lr:0.100000 +[ Tue Sep 13 11:08:20 2022 ] Eval epoch: 33 +[ Tue Sep 13 11:09:10 2022 ] Mean test loss of 258 batches: 2.9469125270843506. +[ Tue Sep 13 11:09:10 2022 ] Top1: 47.30% +[ Tue Sep 13 11:09:11 2022 ] Top5: 84.76% +[ Tue Sep 13 11:09:11 2022 ] Training epoch: 34 +[ Tue Sep 13 11:09:35 2022 ] Batch(40/123) done. Loss: 0.3769 lr:0.100000 +[ Tue Sep 13 11:10:19 2022 ] Eval epoch: 34 +[ Tue Sep 13 11:11:09 2022 ] Mean test loss of 258 batches: 2.7189626693725586. +[ Tue Sep 13 11:11:09 2022 ] Top1: 45.64% +[ Tue Sep 13 11:11:09 2022 ] Top5: 81.31% +[ Tue Sep 13 11:11:09 2022 ] Training epoch: 35 +[ Tue Sep 13 11:11:22 2022 ] Batch(17/123) done. Loss: 0.7309 lr:0.100000 +[ Tue Sep 13 11:12:15 2022 ] Batch(117/123) done. Loss: 0.3021 lr:0.100000 +[ Tue Sep 13 11:12:18 2022 ] Eval epoch: 35 +[ Tue Sep 13 11:13:08 2022 ] Mean test loss of 258 batches: 4.895280838012695. +[ Tue Sep 13 11:13:08 2022 ] Top1: 29.75% +[ Tue Sep 13 11:13:08 2022 ] Top5: 58.14% +[ Tue Sep 13 11:13:08 2022 ] Training epoch: 36 +[ Tue Sep 13 11:14:01 2022 ] Batch(94/123) done. Loss: 0.3497 lr:0.100000 +[ Tue Sep 13 11:14:16 2022 ] Eval epoch: 36 +[ Tue Sep 13 11:15:06 2022 ] Mean test loss of 258 batches: 3.470442056655884. +[ Tue Sep 13 11:15:06 2022 ] Top1: 37.30% +[ Tue Sep 13 11:15:06 2022 ] Top5: 76.48% +[ Tue Sep 13 11:15:07 2022 ] Training epoch: 37 +[ Tue Sep 13 11:15:47 2022 ] Batch(71/123) done. Loss: 0.2233 lr:0.100000 +[ Tue Sep 13 11:16:15 2022 ] Eval epoch: 37 +[ Tue Sep 13 11:17:05 2022 ] Mean test loss of 258 batches: 8.42135238647461. +[ Tue Sep 13 11:17:05 2022 ] Top1: 20.71% +[ Tue Sep 13 11:17:05 2022 ] Top5: 52.13% +[ Tue Sep 13 11:17:05 2022 ] Training epoch: 38 +[ Tue Sep 13 11:17:34 2022 ] Batch(48/123) done. Loss: 0.3567 lr:0.100000 +[ Tue Sep 13 11:18:13 2022 ] Eval epoch: 38 +[ Tue Sep 13 11:19:04 2022 ] Mean test loss of 258 batches: 6.156604766845703. +[ Tue Sep 13 11:19:04 2022 ] Top1: 32.16% +[ Tue Sep 13 11:19:04 2022 ] Top5: 61.78% +[ Tue Sep 13 11:19:04 2022 ] Training epoch: 39 +[ Tue Sep 13 11:19:21 2022 ] Batch(25/123) done. Loss: 0.2740 lr:0.100000 +[ Tue Sep 13 11:20:13 2022 ] Eval epoch: 39 +[ Tue Sep 13 11:21:03 2022 ] Mean test loss of 258 batches: 3.301473379135132. +[ Tue Sep 13 11:21:03 2022 ] Top1: 44.66% +[ Tue Sep 13 11:21:04 2022 ] Top5: 79.78% +[ Tue Sep 13 11:21:04 2022 ] Training epoch: 40 +[ Tue Sep 13 11:21:08 2022 ] Batch(2/123) done. Loss: 0.4649 lr:0.100000 +[ Tue Sep 13 11:22:01 2022 ] Batch(102/123) done. Loss: 0.4007 lr:0.100000 +[ Tue Sep 13 11:22:11 2022 ] Eval epoch: 40 +[ Tue Sep 13 11:23:02 2022 ] Mean test loss of 258 batches: 4.057832717895508. +[ Tue Sep 13 11:23:02 2022 ] Top1: 42.69% +[ Tue Sep 13 11:23:02 2022 ] Top5: 79.61% +[ Tue Sep 13 11:23:02 2022 ] Training epoch: 41 +[ Tue Sep 13 11:23:47 2022 ] Batch(79/123) done. Loss: 0.3378 lr:0.100000 +[ Tue Sep 13 11:24:11 2022 ] Eval epoch: 41 +[ Tue Sep 13 11:25:01 2022 ] Mean test loss of 258 batches: 4.960639476776123. +[ Tue Sep 13 11:25:01 2022 ] Top1: 34.41% +[ Tue Sep 13 11:25:01 2022 ] Top5: 74.06% +[ Tue Sep 13 11:25:01 2022 ] Training epoch: 42 +[ Tue Sep 13 11:25:34 2022 ] Batch(56/123) done. Loss: 0.0757 lr:0.100000 +[ Tue Sep 13 11:26:10 2022 ] Eval epoch: 42 +[ Tue Sep 13 11:27:00 2022 ] Mean test loss of 258 batches: 6.401731491088867. +[ Tue Sep 13 11:27:00 2022 ] Top1: 36.89% +[ Tue Sep 13 11:27:00 2022 ] Top5: 75.11% +[ Tue Sep 13 11:27:01 2022 ] Training epoch: 43 +[ Tue Sep 13 11:27:21 2022 ] Batch(33/123) done. Loss: 0.2113 lr:0.100000 +[ Tue Sep 13 11:28:09 2022 ] Eval epoch: 43 +[ Tue Sep 13 11:28:59 2022 ] Mean test loss of 258 batches: 2.578129768371582. +[ Tue Sep 13 11:28:59 2022 ] Top1: 49.45% +[ Tue Sep 13 11:28:59 2022 ] Top5: 84.18% +[ Tue Sep 13 11:28:59 2022 ] Training epoch: 44 +[ Tue Sep 13 11:29:08 2022 ] Batch(10/123) done. Loss: 0.1640 lr:0.100000 +[ Tue Sep 13 11:30:01 2022 ] Batch(110/123) done. Loss: 0.2625 lr:0.100000 +[ Tue Sep 13 11:30:07 2022 ] Eval epoch: 44 +[ Tue Sep 13 11:30:58 2022 ] Mean test loss of 258 batches: 3.3866872787475586. +[ Tue Sep 13 11:30:58 2022 ] Top1: 46.43% +[ Tue Sep 13 11:30:58 2022 ] Top5: 78.41% +[ Tue Sep 13 11:30:58 2022 ] Training epoch: 45 +[ Tue Sep 13 11:31:47 2022 ] Batch(87/123) done. Loss: 0.2016 lr:0.100000 +[ Tue Sep 13 11:32:06 2022 ] Eval epoch: 45 +[ Tue Sep 13 11:32:56 2022 ] Mean test loss of 258 batches: 2.310946226119995. +[ Tue Sep 13 11:32:56 2022 ] Top1: 47.83% +[ Tue Sep 13 11:32:56 2022 ] Top5: 82.22% +[ Tue Sep 13 11:32:57 2022 ] Training epoch: 46 +[ Tue Sep 13 11:33:34 2022 ] Batch(64/123) done. Loss: 0.1365 lr:0.100000 +[ Tue Sep 13 11:34:05 2022 ] Eval epoch: 46 +[ Tue Sep 13 11:34:55 2022 ] Mean test loss of 258 batches: 3.6921119689941406. +[ Tue Sep 13 11:34:55 2022 ] Top1: 45.54% +[ Tue Sep 13 11:34:55 2022 ] Top5: 78.01% +[ Tue Sep 13 11:34:55 2022 ] Training epoch: 47 +[ Tue Sep 13 11:35:20 2022 ] Batch(41/123) done. Loss: 0.1992 lr:0.100000 +[ Tue Sep 13 11:36:04 2022 ] Eval epoch: 47 +[ Tue Sep 13 11:36:54 2022 ] Mean test loss of 258 batches: 3.4982993602752686. +[ Tue Sep 13 11:36:54 2022 ] Top1: 41.20% +[ Tue Sep 13 11:36:54 2022 ] Top5: 78.97% +[ Tue Sep 13 11:36:54 2022 ] Training epoch: 48 +[ Tue Sep 13 11:37:07 2022 ] Batch(18/123) done. Loss: 0.1265 lr:0.100000 +[ Tue Sep 13 11:38:01 2022 ] Batch(118/123) done. Loss: 0.3362 lr:0.100000 +[ Tue Sep 13 11:38:03 2022 ] Eval epoch: 48 +[ Tue Sep 13 11:38:53 2022 ] Mean test loss of 258 batches: 6.748397350311279. +[ Tue Sep 13 11:38:53 2022 ] Top1: 31.89% +[ Tue Sep 13 11:38:53 2022 ] Top5: 63.62% +[ Tue Sep 13 11:38:53 2022 ] Training epoch: 49 +[ Tue Sep 13 11:39:47 2022 ] Batch(95/123) done. Loss: 0.3824 lr:0.100000 +[ Tue Sep 13 11:40:01 2022 ] Eval epoch: 49 +[ Tue Sep 13 11:40:52 2022 ] Mean test loss of 258 batches: 4.883387565612793. +[ Tue Sep 13 11:40:52 2022 ] Top1: 31.64% +[ Tue Sep 13 11:40:52 2022 ] Top5: 64.11% +[ Tue Sep 13 11:40:52 2022 ] Training epoch: 50 +[ Tue Sep 13 11:41:34 2022 ] Batch(72/123) done. Loss: 0.0751 lr:0.100000 +[ Tue Sep 13 11:42:00 2022 ] Eval epoch: 50 +[ Tue Sep 13 11:42:50 2022 ] Mean test loss of 258 batches: 3.0714542865753174. +[ Tue Sep 13 11:42:51 2022 ] Top1: 49.65% +[ Tue Sep 13 11:42:51 2022 ] Top5: 82.91% +[ Tue Sep 13 11:42:51 2022 ] Training epoch: 51 +[ Tue Sep 13 11:43:20 2022 ] Batch(49/123) done. Loss: 0.1486 lr:0.100000 +[ Tue Sep 13 11:43:59 2022 ] Eval epoch: 51 +[ Tue Sep 13 11:44:49 2022 ] Mean test loss of 258 batches: 2.816194772720337. +[ Tue Sep 13 11:44:49 2022 ] Top1: 53.70% +[ Tue Sep 13 11:44:49 2022 ] Top5: 86.68% +[ Tue Sep 13 11:44:49 2022 ] Training epoch: 52 +[ Tue Sep 13 11:45:06 2022 ] Batch(26/123) done. Loss: 0.2205 lr:0.100000 +[ Tue Sep 13 11:45:57 2022 ] Eval epoch: 52 +[ Tue Sep 13 11:46:48 2022 ] Mean test loss of 258 batches: 3.8996641635894775. +[ Tue Sep 13 11:46:48 2022 ] Top1: 43.43% +[ Tue Sep 13 11:46:48 2022 ] Top5: 74.88% +[ Tue Sep 13 11:46:48 2022 ] Training epoch: 53 +[ Tue Sep 13 11:46:53 2022 ] Batch(3/123) done. Loss: 0.1119 lr:0.100000 +[ Tue Sep 13 11:47:46 2022 ] Batch(103/123) done. Loss: 0.0956 lr:0.100000 +[ Tue Sep 13 11:47:56 2022 ] Eval epoch: 53 +[ Tue Sep 13 11:48:46 2022 ] Mean test loss of 258 batches: 8.677329063415527. +[ Tue Sep 13 11:48:46 2022 ] Top1: 29.01% +[ Tue Sep 13 11:48:47 2022 ] Top5: 61.95% +[ Tue Sep 13 11:48:47 2022 ] Training epoch: 54 +[ Tue Sep 13 11:49:33 2022 ] Batch(80/123) done. Loss: 0.1684 lr:0.100000 +[ Tue Sep 13 11:49:55 2022 ] Eval epoch: 54 +[ Tue Sep 13 11:50:46 2022 ] Mean test loss of 258 batches: 3.663032293319702. +[ Tue Sep 13 11:50:46 2022 ] Top1: 40.10% +[ Tue Sep 13 11:50:46 2022 ] Top5: 73.03% +[ Tue Sep 13 11:50:46 2022 ] Training epoch: 55 +[ Tue Sep 13 11:51:19 2022 ] Batch(57/123) done. Loss: 0.4345 lr:0.100000 +[ Tue Sep 13 11:51:54 2022 ] Eval epoch: 55 +[ Tue Sep 13 11:52:44 2022 ] Mean test loss of 258 batches: 5.364523410797119. +[ Tue Sep 13 11:52:44 2022 ] Top1: 44.34% +[ Tue Sep 13 11:52:45 2022 ] Top5: 76.92% +[ Tue Sep 13 11:52:45 2022 ] Training epoch: 56 +[ Tue Sep 13 11:53:06 2022 ] Batch(34/123) done. Loss: 0.1112 lr:0.100000 +[ Tue Sep 13 11:53:53 2022 ] Eval epoch: 56 +[ Tue Sep 13 11:54:43 2022 ] Mean test loss of 258 batches: 6.318780422210693. +[ Tue Sep 13 11:54:43 2022 ] Top1: 35.79% +[ Tue Sep 13 11:54:43 2022 ] Top5: 69.65% +[ Tue Sep 13 11:54:43 2022 ] Training epoch: 57 +[ Tue Sep 13 11:54:52 2022 ] Batch(11/123) done. Loss: 0.2920 lr:0.100000 +[ Tue Sep 13 11:55:46 2022 ] Batch(111/123) done. Loss: 0.2156 lr:0.100000 +[ Tue Sep 13 11:55:52 2022 ] Eval epoch: 57 +[ Tue Sep 13 11:56:42 2022 ] Mean test loss of 258 batches: 3.506568670272827. +[ Tue Sep 13 11:56:42 2022 ] Top1: 45.98% +[ Tue Sep 13 11:56:42 2022 ] Top5: 79.67% +[ Tue Sep 13 11:56:42 2022 ] Training epoch: 58 +[ Tue Sep 13 11:57:32 2022 ] Batch(88/123) done. Loss: 0.2862 lr:0.100000 +[ Tue Sep 13 11:57:50 2022 ] Eval epoch: 58 +[ Tue Sep 13 11:58:41 2022 ] Mean test loss of 258 batches: 2.384162425994873. +[ Tue Sep 13 11:58:41 2022 ] Top1: 51.17% +[ Tue Sep 13 11:58:41 2022 ] Top5: 84.31% +[ Tue Sep 13 11:58:41 2022 ] Training epoch: 59 +[ Tue Sep 13 11:59:19 2022 ] Batch(65/123) done. Loss: 0.2303 lr:0.100000 +[ Tue Sep 13 11:59:50 2022 ] Eval epoch: 59 +[ Tue Sep 13 12:00:40 2022 ] Mean test loss of 258 batches: 49.66264724731445. +[ Tue Sep 13 12:00:40 2022 ] Top1: 5.65% +[ Tue Sep 13 12:00:40 2022 ] Top5: 19.45% +[ Tue Sep 13 12:00:40 2022 ] Training epoch: 60 +[ Tue Sep 13 12:01:06 2022 ] Batch(42/123) done. Loss: 0.2216 lr:0.100000 +[ Tue Sep 13 12:01:49 2022 ] Eval epoch: 60 +[ Tue Sep 13 12:02:39 2022 ] Mean test loss of 258 batches: 4.438194751739502. +[ Tue Sep 13 12:02:39 2022 ] Top1: 45.27% +[ Tue Sep 13 12:02:39 2022 ] Top5: 78.47% +[ Tue Sep 13 12:02:39 2022 ] Training epoch: 61 +[ Tue Sep 13 12:02:52 2022 ] Batch(19/123) done. Loss: 0.1225 lr:0.010000 +[ Tue Sep 13 12:03:46 2022 ] Batch(119/123) done. Loss: 0.0254 lr:0.010000 +[ Tue Sep 13 12:03:47 2022 ] Eval epoch: 61 +[ Tue Sep 13 12:04:38 2022 ] Mean test loss of 258 batches: 2.1404409408569336. +[ Tue Sep 13 12:04:38 2022 ] Top1: 60.50% +[ Tue Sep 13 12:04:38 2022 ] Top5: 89.84% +[ Tue Sep 13 12:04:38 2022 ] Training epoch: 62 +[ Tue Sep 13 12:05:33 2022 ] Batch(96/123) done. Loss: 0.0363 lr:0.010000 +[ Tue Sep 13 12:05:47 2022 ] Eval epoch: 62 +[ Tue Sep 13 12:06:37 2022 ] Mean test loss of 258 batches: 1.9845335483551025. +[ Tue Sep 13 12:06:37 2022 ] Top1: 62.73% +[ Tue Sep 13 12:06:37 2022 ] Top5: 90.69% +[ Tue Sep 13 12:06:37 2022 ] Training epoch: 63 +[ Tue Sep 13 12:07:19 2022 ] Batch(73/123) done. Loss: 0.0227 lr:0.010000 +[ Tue Sep 13 12:07:46 2022 ] Eval epoch: 63 +[ Tue Sep 13 12:08:36 2022 ] Mean test loss of 258 batches: 2.0647549629211426. +[ Tue Sep 13 12:08:36 2022 ] Top1: 62.84% +[ Tue Sep 13 12:08:36 2022 ] Top5: 90.76% +[ Tue Sep 13 12:08:36 2022 ] Training epoch: 64 +[ Tue Sep 13 12:09:06 2022 ] Batch(50/123) done. Loss: 0.0432 lr:0.010000 +[ Tue Sep 13 12:09:45 2022 ] Eval epoch: 64 +[ Tue Sep 13 12:10:35 2022 ] Mean test loss of 258 batches: 2.042346477508545. +[ Tue Sep 13 12:10:35 2022 ] Top1: 63.35% +[ Tue Sep 13 12:10:35 2022 ] Top5: 90.82% +[ Tue Sep 13 12:10:35 2022 ] Training epoch: 65 +[ Tue Sep 13 12:10:53 2022 ] Batch(27/123) done. Loss: 0.0294 lr:0.010000 +[ Tue Sep 13 12:11:43 2022 ] Eval epoch: 65 +[ Tue Sep 13 12:12:34 2022 ] Mean test loss of 258 batches: 2.1357216835021973. +[ Tue Sep 13 12:12:34 2022 ] Top1: 62.56% +[ Tue Sep 13 12:12:34 2022 ] Top5: 90.68% +[ Tue Sep 13 12:12:34 2022 ] Training epoch: 66 +[ Tue Sep 13 12:12:39 2022 ] Batch(4/123) done. Loss: 0.0282 lr:0.010000 +[ Tue Sep 13 12:13:32 2022 ] Batch(104/123) done. Loss: 0.0211 lr:0.010000 +[ Tue Sep 13 12:13:42 2022 ] Eval epoch: 66 +[ Tue Sep 13 12:14:32 2022 ] Mean test loss of 258 batches: 2.2517709732055664. +[ Tue Sep 13 12:14:32 2022 ] Top1: 60.81% +[ Tue Sep 13 12:14:33 2022 ] Top5: 89.84% +[ Tue Sep 13 12:14:33 2022 ] Training epoch: 67 +[ Tue Sep 13 12:15:19 2022 ] Batch(81/123) done. Loss: 0.0229 lr:0.010000 +[ Tue Sep 13 12:15:41 2022 ] Eval epoch: 67 +[ Tue Sep 13 12:16:31 2022 ] Mean test loss of 258 batches: 2.1569643020629883. +[ Tue Sep 13 12:16:31 2022 ] Top1: 62.36% +[ Tue Sep 13 12:16:31 2022 ] Top5: 90.28% +[ Tue Sep 13 12:16:31 2022 ] Training epoch: 68 +[ Tue Sep 13 12:17:05 2022 ] Batch(58/123) done. Loss: 0.0267 lr:0.010000 +[ Tue Sep 13 12:17:39 2022 ] Eval epoch: 68 +[ Tue Sep 13 12:18:30 2022 ] Mean test loss of 258 batches: 2.168156385421753. +[ Tue Sep 13 12:18:30 2022 ] Top1: 62.16% +[ Tue Sep 13 12:18:30 2022 ] Top5: 90.57% +[ Tue Sep 13 12:18:30 2022 ] Training epoch: 69 +[ Tue Sep 13 12:18:52 2022 ] Batch(35/123) done. Loss: 0.0098 lr:0.010000 +[ Tue Sep 13 12:19:38 2022 ] Eval epoch: 69 +[ Tue Sep 13 12:20:28 2022 ] Mean test loss of 258 batches: 2.1420187950134277. +[ Tue Sep 13 12:20:28 2022 ] Top1: 62.91% +[ Tue Sep 13 12:20:29 2022 ] Top5: 90.71% +[ Tue Sep 13 12:20:29 2022 ] Training epoch: 70 +[ Tue Sep 13 12:20:38 2022 ] Batch(12/123) done. Loss: 0.0279 lr:0.010000 +[ Tue Sep 13 12:21:31 2022 ] Batch(112/123) done. Loss: 0.0426 lr:0.010000 +[ Tue Sep 13 12:21:37 2022 ] Eval epoch: 70 +[ Tue Sep 13 12:22:27 2022 ] Mean test loss of 258 batches: 2.2135446071624756. +[ Tue Sep 13 12:22:27 2022 ] Top1: 62.32% +[ Tue Sep 13 12:22:27 2022 ] Top5: 90.07% +[ Tue Sep 13 12:22:27 2022 ] Training epoch: 71 +[ Tue Sep 13 12:23:18 2022 ] Batch(89/123) done. Loss: 0.0542 lr:0.010000 +[ Tue Sep 13 12:23:36 2022 ] Eval epoch: 71 +[ Tue Sep 13 12:24:26 2022 ] Mean test loss of 258 batches: 2.23406720161438. +[ Tue Sep 13 12:24:26 2022 ] Top1: 62.39% +[ Tue Sep 13 12:24:26 2022 ] Top5: 90.33% +[ Tue Sep 13 12:24:27 2022 ] Training epoch: 72 +[ Tue Sep 13 12:25:05 2022 ] Batch(66/123) done. Loss: 0.0167 lr:0.010000 +[ Tue Sep 13 12:25:35 2022 ] Eval epoch: 72 +[ Tue Sep 13 12:26:25 2022 ] Mean test loss of 258 batches: 2.200518846511841. +[ Tue Sep 13 12:26:25 2022 ] Top1: 62.86% +[ Tue Sep 13 12:26:25 2022 ] Top5: 90.67% +[ Tue Sep 13 12:26:25 2022 ] Training epoch: 73 +[ Tue Sep 13 12:26:51 2022 ] Batch(43/123) done. Loss: 0.0226 lr:0.010000 +[ Tue Sep 13 12:27:33 2022 ] Eval epoch: 73 +[ Tue Sep 13 12:28:24 2022 ] Mean test loss of 258 batches: 2.1740310192108154. +[ Tue Sep 13 12:28:24 2022 ] Top1: 63.28% +[ Tue Sep 13 12:28:24 2022 ] Top5: 90.87% +[ Tue Sep 13 12:28:24 2022 ] Training epoch: 74 +[ Tue Sep 13 12:28:38 2022 ] Batch(20/123) done. Loss: 0.0171 lr:0.010000 +[ Tue Sep 13 12:29:31 2022 ] Batch(120/123) done. Loss: 0.0205 lr:0.010000 +[ Tue Sep 13 12:29:32 2022 ] Eval epoch: 74 +[ Tue Sep 13 12:30:22 2022 ] Mean test loss of 258 batches: 2.2767016887664795. +[ Tue Sep 13 12:30:22 2022 ] Top1: 62.21% +[ Tue Sep 13 12:30:23 2022 ] Top5: 90.45% +[ Tue Sep 13 12:30:23 2022 ] Training epoch: 75 +[ Tue Sep 13 12:31:17 2022 ] Batch(97/123) done. Loss: 0.0200 lr:0.010000 +[ Tue Sep 13 12:31:31 2022 ] Eval epoch: 75 +[ Tue Sep 13 12:32:21 2022 ] Mean test loss of 258 batches: 2.21061372756958. +[ Tue Sep 13 12:32:21 2022 ] Top1: 62.63% +[ Tue Sep 13 12:32:21 2022 ] Top5: 90.77% +[ Tue Sep 13 12:32:21 2022 ] Training epoch: 76 +[ Tue Sep 13 12:33:04 2022 ] Batch(74/123) done. Loss: 0.0267 lr:0.010000 +[ Tue Sep 13 12:33:30 2022 ] Eval epoch: 76 +[ Tue Sep 13 12:34:20 2022 ] Mean test loss of 258 batches: 2.37528920173645. +[ Tue Sep 13 12:34:20 2022 ] Top1: 62.09% +[ Tue Sep 13 12:34:20 2022 ] Top5: 90.17% +[ Tue Sep 13 12:34:20 2022 ] Training epoch: 77 +[ Tue Sep 13 12:34:51 2022 ] Batch(51/123) done. Loss: 0.0355 lr:0.010000 +[ Tue Sep 13 12:35:29 2022 ] Eval epoch: 77 +[ Tue Sep 13 12:36:19 2022 ] Mean test loss of 258 batches: 2.359511375427246. +[ Tue Sep 13 12:36:19 2022 ] Top1: 61.38% +[ Tue Sep 13 12:36:19 2022 ] Top5: 90.07% +[ Tue Sep 13 12:36:19 2022 ] Training epoch: 78 +[ Tue Sep 13 12:36:37 2022 ] Batch(28/123) done. Loss: 0.0080 lr:0.010000 +[ Tue Sep 13 12:37:27 2022 ] Eval epoch: 78 +[ Tue Sep 13 12:38:18 2022 ] Mean test loss of 258 batches: 2.328934907913208. +[ Tue Sep 13 12:38:18 2022 ] Top1: 62.75% +[ Tue Sep 13 12:38:18 2022 ] Top5: 90.51% +[ Tue Sep 13 12:38:18 2022 ] Training epoch: 79 +[ Tue Sep 13 12:38:23 2022 ] Batch(5/123) done. Loss: 0.0317 lr:0.010000 +[ Tue Sep 13 12:39:17 2022 ] Batch(105/123) done. Loss: 0.0051 lr:0.010000 +[ Tue Sep 13 12:39:26 2022 ] Eval epoch: 79 +[ Tue Sep 13 12:40:16 2022 ] Mean test loss of 258 batches: 2.375866651535034. +[ Tue Sep 13 12:40:16 2022 ] Top1: 62.17% +[ Tue Sep 13 12:40:16 2022 ] Top5: 90.34% +[ Tue Sep 13 12:40:16 2022 ] Training epoch: 80 +[ Tue Sep 13 12:41:03 2022 ] Batch(82/123) done. Loss: 0.0211 lr:0.010000 +[ Tue Sep 13 12:41:25 2022 ] Eval epoch: 80 +[ Tue Sep 13 12:42:16 2022 ] Mean test loss of 258 batches: 2.460627555847168. +[ Tue Sep 13 12:42:16 2022 ] Top1: 61.86% +[ Tue Sep 13 12:42:16 2022 ] Top5: 90.10% +[ Tue Sep 13 12:42:16 2022 ] Training epoch: 81 +[ Tue Sep 13 12:42:50 2022 ] Batch(59/123) done. Loss: 0.0085 lr:0.001000 +[ Tue Sep 13 12:43:24 2022 ] Eval epoch: 81 +[ Tue Sep 13 12:44:15 2022 ] Mean test loss of 258 batches: 2.350675106048584. +[ Tue Sep 13 12:44:15 2022 ] Top1: 62.21% +[ Tue Sep 13 12:44:15 2022 ] Top5: 90.43% +[ Tue Sep 13 12:44:15 2022 ] Training epoch: 82 +[ Tue Sep 13 12:44:37 2022 ] Batch(36/123) done. Loss: 0.0070 lr:0.001000 +[ Tue Sep 13 12:45:23 2022 ] Eval epoch: 82 +[ Tue Sep 13 12:46:14 2022 ] Mean test loss of 258 batches: 2.3446762561798096. +[ Tue Sep 13 12:46:14 2022 ] Top1: 62.28% +[ Tue Sep 13 12:46:14 2022 ] Top5: 90.34% +[ Tue Sep 13 12:46:14 2022 ] Training epoch: 83 +[ Tue Sep 13 12:46:24 2022 ] Batch(13/123) done. Loss: 0.0161 lr:0.001000 +[ Tue Sep 13 12:47:17 2022 ] Batch(113/123) done. Loss: 0.0418 lr:0.001000 +[ Tue Sep 13 12:47:22 2022 ] Eval epoch: 83 +[ Tue Sep 13 12:48:13 2022 ] Mean test loss of 258 batches: 2.4073634147644043. +[ Tue Sep 13 12:48:13 2022 ] Top1: 61.75% +[ Tue Sep 13 12:48:13 2022 ] Top5: 90.11% +[ Tue Sep 13 12:48:13 2022 ] Training epoch: 84 +[ Tue Sep 13 12:49:04 2022 ] Batch(90/123) done. Loss: 0.0404 lr:0.001000 +[ Tue Sep 13 12:49:21 2022 ] Eval epoch: 84 +[ Tue Sep 13 12:50:12 2022 ] Mean test loss of 258 batches: 2.3524913787841797. +[ Tue Sep 13 12:50:12 2022 ] Top1: 62.50% +[ Tue Sep 13 12:50:12 2022 ] Top5: 90.44% +[ Tue Sep 13 12:50:12 2022 ] Training epoch: 85 +[ Tue Sep 13 12:50:51 2022 ] Batch(67/123) done. Loss: 0.0288 lr:0.001000 +[ Tue Sep 13 12:51:20 2022 ] Eval epoch: 85 +[ Tue Sep 13 12:52:11 2022 ] Mean test loss of 258 batches: 2.361574172973633. +[ Tue Sep 13 12:52:11 2022 ] Top1: 62.50% +[ Tue Sep 13 12:52:11 2022 ] Top5: 90.63% +[ Tue Sep 13 12:52:11 2022 ] Training epoch: 86 +[ Tue Sep 13 12:52:37 2022 ] Batch(44/123) done. Loss: 0.0119 lr:0.001000 +[ Tue Sep 13 12:53:19 2022 ] Eval epoch: 86 +[ Tue Sep 13 12:54:09 2022 ] Mean test loss of 258 batches: 2.2960431575775146. +[ Tue Sep 13 12:54:09 2022 ] Top1: 62.96% +[ Tue Sep 13 12:54:09 2022 ] Top5: 90.70% +[ Tue Sep 13 12:54:09 2022 ] Training epoch: 87 +[ Tue Sep 13 12:54:24 2022 ] Batch(21/123) done. Loss: 0.0492 lr:0.001000 +[ Tue Sep 13 12:55:17 2022 ] Batch(121/123) done. Loss: 0.0159 lr:0.001000 +[ Tue Sep 13 12:55:18 2022 ] Eval epoch: 87 +[ Tue Sep 13 12:56:08 2022 ] Mean test loss of 258 batches: 2.3140199184417725. +[ Tue Sep 13 12:56:08 2022 ] Top1: 62.86% +[ Tue Sep 13 12:56:08 2022 ] Top5: 90.69% +[ Tue Sep 13 12:56:08 2022 ] Training epoch: 88 +[ Tue Sep 13 12:57:03 2022 ] Batch(98/123) done. Loss: 0.0238 lr:0.001000 +[ Tue Sep 13 12:57:16 2022 ] Eval epoch: 88 +[ Tue Sep 13 12:58:07 2022 ] Mean test loss of 258 batches: 2.533504009246826. +[ Tue Sep 13 12:58:07 2022 ] Top1: 60.68% +[ Tue Sep 13 12:58:07 2022 ] Top5: 89.55% +[ Tue Sep 13 12:58:07 2022 ] Training epoch: 89 +[ Tue Sep 13 12:58:50 2022 ] Batch(75/123) done. Loss: 0.0200 lr:0.001000 +[ Tue Sep 13 12:59:15 2022 ] Eval epoch: 89 +[ Tue Sep 13 13:00:05 2022 ] Mean test loss of 258 batches: 2.3764359951019287. +[ Tue Sep 13 13:00:06 2022 ] Top1: 62.40% +[ Tue Sep 13 13:00:06 2022 ] Top5: 90.54% +[ Tue Sep 13 13:00:06 2022 ] Training epoch: 90 +[ Tue Sep 13 13:00:36 2022 ] Batch(52/123) done. Loss: 0.0210 lr:0.001000 +[ Tue Sep 13 13:01:14 2022 ] Eval epoch: 90 +[ Tue Sep 13 13:02:05 2022 ] Mean test loss of 258 batches: 2.293139934539795. +[ Tue Sep 13 13:02:05 2022 ] Top1: 63.02% +[ Tue Sep 13 13:02:05 2022 ] Top5: 90.78% +[ Tue Sep 13 13:02:05 2022 ] Training epoch: 91 +[ Tue Sep 13 13:02:23 2022 ] Batch(29/123) done. Loss: 0.0316 lr:0.001000 +[ Tue Sep 13 13:03:13 2022 ] Eval epoch: 91 +[ Tue Sep 13 13:04:03 2022 ] Mean test loss of 258 batches: 2.3484678268432617. +[ Tue Sep 13 13:04:03 2022 ] Top1: 62.70% +[ Tue Sep 13 13:04:04 2022 ] Top5: 90.44% +[ Tue Sep 13 13:04:04 2022 ] Training epoch: 92 +[ Tue Sep 13 13:04:10 2022 ] Batch(6/123) done. Loss: 0.0527 lr:0.001000 +[ Tue Sep 13 13:05:03 2022 ] Batch(106/123) done. Loss: 0.0304 lr:0.001000 +[ Tue Sep 13 13:05:12 2022 ] Eval epoch: 92 +[ Tue Sep 13 13:06:03 2022 ] Mean test loss of 258 batches: 2.3265533447265625. +[ Tue Sep 13 13:06:03 2022 ] Top1: 62.99% +[ Tue Sep 13 13:06:03 2022 ] Top5: 90.57% +[ Tue Sep 13 13:06:03 2022 ] Training epoch: 93 +[ Tue Sep 13 13:06:50 2022 ] Batch(83/123) done. Loss: 0.0153 lr:0.001000 +[ Tue Sep 13 13:07:11 2022 ] Eval epoch: 93 +[ Tue Sep 13 13:08:02 2022 ] Mean test loss of 258 batches: 2.3389744758605957. +[ Tue Sep 13 13:08:02 2022 ] Top1: 62.42% +[ Tue Sep 13 13:08:02 2022 ] Top5: 90.62% +[ Tue Sep 13 13:08:02 2022 ] Training epoch: 94 +[ Tue Sep 13 13:08:37 2022 ] Batch(60/123) done. Loss: 0.0240 lr:0.001000 +[ Tue Sep 13 13:09:10 2022 ] Eval epoch: 94 +[ Tue Sep 13 13:10:00 2022 ] Mean test loss of 258 batches: 2.357041835784912. +[ Tue Sep 13 13:10:01 2022 ] Top1: 62.46% +[ Tue Sep 13 13:10:01 2022 ] Top5: 90.50% +[ Tue Sep 13 13:10:01 2022 ] Training epoch: 95 +[ Tue Sep 13 13:10:23 2022 ] Batch(37/123) done. Loss: 0.0098 lr:0.001000 +[ Tue Sep 13 13:11:09 2022 ] Eval epoch: 95 +[ Tue Sep 13 13:11:59 2022 ] Mean test loss of 258 batches: 2.3830180168151855. +[ Tue Sep 13 13:11:59 2022 ] Top1: 62.68% +[ Tue Sep 13 13:11:59 2022 ] Top5: 90.45% +[ Tue Sep 13 13:11:59 2022 ] Training epoch: 96 +[ Tue Sep 13 13:12:10 2022 ] Batch(14/123) done. Loss: 0.0331 lr:0.001000 +[ Tue Sep 13 13:13:03 2022 ] Batch(114/123) done. Loss: 0.0135 lr:0.001000 +[ Tue Sep 13 13:13:08 2022 ] Eval epoch: 96 +[ Tue Sep 13 13:13:58 2022 ] Mean test loss of 258 batches: 2.356888771057129. +[ Tue Sep 13 13:13:58 2022 ] Top1: 62.37% +[ Tue Sep 13 13:13:58 2022 ] Top5: 90.57% +[ Tue Sep 13 13:13:58 2022 ] Training epoch: 97 +[ Tue Sep 13 13:14:49 2022 ] Batch(91/123) done. Loss: 0.0269 lr:0.001000 +[ Tue Sep 13 13:15:06 2022 ] Eval epoch: 97 +[ Tue Sep 13 13:15:56 2022 ] Mean test loss of 258 batches: 2.3394312858581543. +[ Tue Sep 13 13:15:56 2022 ] Top1: 62.86% +[ Tue Sep 13 13:15:57 2022 ] Top5: 90.85% +[ Tue Sep 13 13:15:57 2022 ] Training epoch: 98 +[ Tue Sep 13 13:16:36 2022 ] Batch(68/123) done. Loss: 0.0099 lr:0.001000 +[ Tue Sep 13 13:17:05 2022 ] Eval epoch: 98 +[ Tue Sep 13 13:17:55 2022 ] Mean test loss of 258 batches: 2.516493082046509. +[ Tue Sep 13 13:17:55 2022 ] Top1: 60.17% +[ Tue Sep 13 13:17:55 2022 ] Top5: 89.34% +[ Tue Sep 13 13:17:55 2022 ] Training epoch: 99 +[ Tue Sep 13 13:18:23 2022 ] Batch(45/123) done. Loss: 0.0344 lr:0.001000 +[ Tue Sep 13 13:19:04 2022 ] Eval epoch: 99 +[ Tue Sep 13 13:19:54 2022 ] Mean test loss of 258 batches: 2.386345863342285. +[ Tue Sep 13 13:19:54 2022 ] Top1: 61.84% +[ Tue Sep 13 13:19:54 2022 ] Top5: 90.10% +[ Tue Sep 13 13:19:54 2022 ] Training epoch: 100 +[ Tue Sep 13 13:20:09 2022 ] Batch(22/123) done. Loss: 0.0235 lr:0.001000 +[ Tue Sep 13 13:21:02 2022 ] Batch(122/123) done. Loss: 0.2032 lr:0.001000 +[ Tue Sep 13 13:21:03 2022 ] Eval epoch: 100 +[ Tue Sep 13 13:21:53 2022 ] Mean test loss of 258 batches: 2.4683544635772705. +[ Tue Sep 13 13:21:53 2022 ] Top1: 60.51% +[ Tue Sep 13 13:21:53 2022 ] Top5: 89.61% diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_xsub/config.yaml b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_xsub/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8fb007142a56b7b758ef71ae14b44a20b8a39340 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_xsub/config.yaml @@ -0,0 +1,59 @@ +Experiment_name: ntu_bone_xsub +base_lr: 0.1 +batch_size: 64 +config: ./config/nturgbd-cross-subject/train_bone.yaml +device: +- 4 +- 5 +eval_interval: 5 +feeder: feeders.feeder.Feeder +groups: 8 +ignore_weights: [] +keep_rate: 0.9 +log_interval: 100 +model: model.decouple_gcn.Model +model_args: + block_size: 41 + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + groups: 16 + num_class: 60 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu_bone_xsub +nesterov: true +num_epoch: 100 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_data_bone.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_data_bone.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu_bone_xsub diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_xsub/decouple_gcn.py b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_xsub/decouple_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..6dcce4552ced280fe5b2060df92daebd2452cf7c --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_xsub/decouple_gcn.py @@ -0,0 +1,235 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math +from model.dropSke import DropBlock_Ske +from model.dropT import DropBlockT_1d + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + + +def conv_branch_init(conv): + weight = conv.weight + n = weight.size(0) + k1 = weight.size(1) + k2 = weight.size(2) + nn.init.normal(weight, 0, math.sqrt(2. / (n * k1 * k2))) + nn.init.constant(conv.bias, 0) + + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class unit_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1, num_point=25, block_size=41): + super(unit_tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + self.dropS = DropBlock_Ske(num_point=num_point) + self.dropT = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob, A): + x = self.bn(self.conv(x)) + x = self.dropT(self.dropS(x, keep_prob, A), keep_prob) + return x + + +class unit_tcn_skip(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(unit_tcn_skip, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class unit_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, coff_embedding=4, num_subset=3): + super(unit_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.num_point = num_point + self.groups = groups + self.num_subset = num_subset + self.DecoupleA = nn.Parameter(torch.tensor(np.reshape(A.astype(np.float32), [ + 3, 1, num_point, num_point]), dtype=torch.float32, requires_grad=True).repeat(1, groups, 1, 1), requires_grad=True) + + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.bn0 = nn.BatchNorm2d(out_channels * num_subset) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + bn_init(self.bn, 1e-6) + + self.Linear_weight = nn.Parameter(torch.zeros( + in_channels, out_channels * num_subset, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0, math.sqrt( + 0.5 / (out_channels * num_subset))) + + self.Linear_bias = nn.Parameter(torch.zeros( + 1, out_channels * num_subset, 1, 1, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.constant(self.Linear_bias, 1e-6) + + eye_array = [] + for i in range(out_channels): + eye_array.append(torch.eye(num_point)) + self.eyes = nn.Parameter(torch.tensor(torch.stack( + eye_array), requires_grad=False, device='cuda'), requires_grad=False) # [c,25,25] + + def norm(self, A): + b, c, h, w = A.size() + A = A.view(c, self.num_point, self.num_point) + D_list = torch.sum(A, 1).view(c, 1, self.num_point) + D_list_12 = (D_list + 0.001)**(-1) + D_12 = self.eyes * D_list_12 + A = torch.bmm(A, D_12).view(b, c, h, w) + return A + + def forward(self, x0): + learn_A = self.DecoupleA.repeat( + 1, self.out_channels // self.groups, 1, 1) + norm_learn_A = torch.cat([self.norm(learn_A[0:1, ...]), self.norm( + learn_A[1:2, ...]), self.norm(learn_A[2:3, ...])], 0) + + x = torch.einsum( + 'nctw,cd->ndtw', (x0, self.Linear_weight)).contiguous() + x = x + self.Linear_bias + x = self.bn0(x) + + n, kc, t, v = x.size() + x = x.view(n, self.num_subset, kc // self.num_subset, t, v) + x = torch.einsum('nkctv,kcvw->nctw', (x, norm_learn_A)) + + x = self.bn(x) + x += self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, block_size, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = unit_gcn(in_channels, out_channels, A, groups, num_point) + self.tcn1 = unit_tcn(out_channels, out_channels, + stride=stride, num_point=num_point) + self.relu = nn.ReLU() + + self.A = nn.Parameter(torch.tensor(np.sum(np.reshape(A.astype(np.float32), [ + 3, num_point, num_point]), axis=0), dtype=torch.float32, requires_grad=False, device='cuda'), requires_grad=False) + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + + else: + self.residual = unit_tcn_skip( + in_channels, out_channels, kernel_size=1, stride=stride) + self.dropSke = DropBlock_Ske(num_point=num_point) + self.dropT_skip = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob): + x = self.tcn1(self.gcn1(x), keep_prob, self.A) + self.dropT_skip( + self.dropSke(self.residual(x), keep_prob, self.A), keep_prob) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, groups, num_point, + block_size, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l5 = TCN_GCN_unit( + 64, 128, A, groups, num_point, block_size, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l8 = TCN_GCN_unit(128, 256, A, groups, + num_point, block_size, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x, keep_prob=0.9): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute( + 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x, 1.0) + x = self.l2(x, 1.0) + x = self.l3(x, 1.0) + x = self.l4(x, 1.0) + x = self.l5(x, 1.0) + x = self.l6(x, 1.0) + x = self.l7(x, keep_prob) + x = self.l8(x, keep_prob) + x = self.l9(x, keep_prob) + x = self.l10(x, keep_prob) + + # N*M,C,T,V + c_new = x.size(1) + x = x.reshape(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_xsub/eval_results/best_acc.pkl b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_xsub/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..f37dfc7dc748027b165573dcdff507af4ed7c0ba --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_xsub/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea3dc32c58240f32971f5233f9f3164840ae3822b8c8bad7232fdf9ff4d227dd +size 4979902 diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_xsub/log.txt b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_xsub/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..ec1c85cdf2a155108b8501125d50bab7eeaae5ad --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_bone_xsub/log.txt @@ -0,0 +1,626 @@ +[ Tue Sep 13 10:03:48 2022 ] Parameters: +{'work_dir': './work_dir/ntu_bone_xsub', 'model_saved_name': './save_models/ntu_bone_xsub', 'Experiment_name': 'ntu_bone_xsub', 'config': './config/nturgbd-cross-subject/train_bone.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_data_bone.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_data_bone.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_label.pkl'}, 'model': 'model.decouple_gcn.Model', 'model_args': {'num_class': 60, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'groups': 16, 'block_size': 41, 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80], 'device': [4, 5], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 100, 'weight_decay': 0.0001, 'keep_rate': 0.9, 'groups': 8, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Tue Sep 13 10:03:48 2022 ] Training epoch: 1 +[ Tue Sep 13 10:04:38 2022 ] Batch(99/123) done. Loss: 3.4120 lr:0.100000 +[ Tue Sep 13 10:04:48 2022 ] Eval epoch: 1 +[ Tue Sep 13 10:05:38 2022 ] Mean test loss of 258 batches: 4.742095947265625. +[ Tue Sep 13 10:05:38 2022 ] Top1: 4.19% +[ Tue Sep 13 10:05:38 2022 ] Top5: 14.60% +[ Tue Sep 13 10:05:38 2022 ] Training epoch: 2 +[ Tue Sep 13 10:06:22 2022 ] Batch(76/123) done. Loss: 2.7536 lr:0.100000 +[ Tue Sep 13 10:06:46 2022 ] Eval epoch: 2 +[ Tue Sep 13 10:07:36 2022 ] Mean test loss of 258 batches: 4.383267402648926. +[ Tue Sep 13 10:07:36 2022 ] Top1: 7.83% +[ Tue Sep 13 10:07:36 2022 ] Top5: 27.60% +[ Tue Sep 13 10:07:36 2022 ] Training epoch: 3 +[ Tue Sep 13 10:08:08 2022 ] Batch(53/123) done. Loss: 2.8688 lr:0.100000 +[ Tue Sep 13 10:08:44 2022 ] Eval epoch: 3 +[ Tue Sep 13 10:09:34 2022 ] Mean test loss of 258 batches: 3.981781244277954. +[ Tue Sep 13 10:09:34 2022 ] Top1: 11.83% +[ Tue Sep 13 10:09:34 2022 ] Top5: 36.82% +[ Tue Sep 13 10:09:34 2022 ] Training epoch: 4 +[ Tue Sep 13 10:09:54 2022 ] Batch(30/123) done. Loss: 2.2916 lr:0.100000 +[ Tue Sep 13 10:10:42 2022 ] Eval epoch: 4 +[ Tue Sep 13 10:11:32 2022 ] Mean test loss of 258 batches: 2.9862473011016846. +[ Tue Sep 13 10:11:33 2022 ] Top1: 18.34% +[ Tue Sep 13 10:11:33 2022 ] Top5: 58.25% +[ Tue Sep 13 10:11:33 2022 ] Training epoch: 5 +[ Tue Sep 13 10:11:40 2022 ] Batch(7/123) done. Loss: 2.0996 lr:0.100000 +[ Tue Sep 13 10:12:33 2022 ] Batch(107/123) done. Loss: 1.7637 lr:0.100000 +[ Tue Sep 13 10:12:41 2022 ] Eval epoch: 5 +[ Tue Sep 13 10:13:31 2022 ] Mean test loss of 258 batches: 3.5700297355651855. +[ Tue Sep 13 10:13:31 2022 ] Top1: 19.28% +[ Tue Sep 13 10:13:31 2022 ] Top5: 57.46% +[ Tue Sep 13 10:13:31 2022 ] Training epoch: 6 +[ Tue Sep 13 10:14:19 2022 ] Batch(84/123) done. Loss: 1.4333 lr:0.100000 +[ Tue Sep 13 10:14:39 2022 ] Eval epoch: 6 +[ Tue Sep 13 10:15:29 2022 ] Mean test loss of 258 batches: 2.9239397048950195. +[ Tue Sep 13 10:15:29 2022 ] Top1: 24.19% +[ Tue Sep 13 10:15:29 2022 ] Top5: 66.11% +[ Tue Sep 13 10:15:29 2022 ] Training epoch: 7 +[ Tue Sep 13 10:16:05 2022 ] Batch(61/123) done. Loss: 1.5305 lr:0.100000 +[ Tue Sep 13 10:16:37 2022 ] Eval epoch: 7 +[ Tue Sep 13 10:17:27 2022 ] Mean test loss of 258 batches: 2.789067268371582. +[ Tue Sep 13 10:17:27 2022 ] Top1: 24.83% +[ Tue Sep 13 10:17:27 2022 ] Top5: 64.15% +[ Tue Sep 13 10:17:27 2022 ] Training epoch: 8 +[ Tue Sep 13 10:17:51 2022 ] Batch(38/123) done. Loss: 1.2631 lr:0.100000 +[ Tue Sep 13 10:18:35 2022 ] Eval epoch: 8 +[ Tue Sep 13 10:19:25 2022 ] Mean test loss of 258 batches: 2.5744893550872803. +[ Tue Sep 13 10:19:25 2022 ] Top1: 32.38% +[ Tue Sep 13 10:19:25 2022 ] Top5: 71.55% +[ Tue Sep 13 10:19:25 2022 ] Training epoch: 9 +[ Tue Sep 13 10:19:37 2022 ] Batch(15/123) done. Loss: 1.2294 lr:0.100000 +[ Tue Sep 13 10:20:29 2022 ] Batch(115/123) done. Loss: 1.0048 lr:0.100000 +[ Tue Sep 13 10:20:33 2022 ] Eval epoch: 9 +[ Tue Sep 13 10:21:24 2022 ] Mean test loss of 258 batches: 2.6254217624664307. +[ Tue Sep 13 10:21:24 2022 ] Top1: 30.32% +[ Tue Sep 13 10:21:24 2022 ] Top5: 70.99% +[ Tue Sep 13 10:21:24 2022 ] Training epoch: 10 +[ Tue Sep 13 10:22:15 2022 ] Batch(92/123) done. Loss: 0.9860 lr:0.100000 +[ Tue Sep 13 10:22:31 2022 ] Eval epoch: 10 +[ Tue Sep 13 10:23:22 2022 ] Mean test loss of 258 batches: 3.1184659004211426. +[ Tue Sep 13 10:23:22 2022 ] Top1: 29.32% +[ Tue Sep 13 10:23:22 2022 ] Top5: 67.40% +[ Tue Sep 13 10:23:22 2022 ] Training epoch: 11 +[ Tue Sep 13 10:24:02 2022 ] Batch(69/123) done. Loss: 1.1361 lr:0.100000 +[ Tue Sep 13 10:24:30 2022 ] Eval epoch: 11 +[ Tue Sep 13 10:25:20 2022 ] Mean test loss of 258 batches: 2.622861385345459. +[ Tue Sep 13 10:25:20 2022 ] Top1: 35.36% +[ Tue Sep 13 10:25:20 2022 ] Top5: 74.38% +[ Tue Sep 13 10:25:20 2022 ] Training epoch: 12 +[ Tue Sep 13 10:25:48 2022 ] Batch(46/123) done. Loss: 1.0729 lr:0.100000 +[ Tue Sep 13 10:26:28 2022 ] Eval epoch: 12 +[ Tue Sep 13 10:27:19 2022 ] Mean test loss of 258 batches: 2.334763765335083. +[ Tue Sep 13 10:27:19 2022 ] Top1: 38.00% +[ Tue Sep 13 10:27:19 2022 ] Top5: 78.23% +[ Tue Sep 13 10:27:19 2022 ] Training epoch: 13 +[ Tue Sep 13 10:27:34 2022 ] Batch(23/123) done. Loss: 0.7770 lr:0.100000 +[ Tue Sep 13 10:28:26 2022 ] Eval epoch: 13 +[ Tue Sep 13 10:29:17 2022 ] Mean test loss of 258 batches: 2.3467118740081787. +[ Tue Sep 13 10:29:17 2022 ] Top1: 36.44% +[ Tue Sep 13 10:29:17 2022 ] Top5: 73.37% +[ Tue Sep 13 10:29:17 2022 ] Training epoch: 14 +[ Tue Sep 13 10:29:21 2022 ] Batch(0/123) done. Loss: 0.7911 lr:0.100000 +[ Tue Sep 13 10:30:13 2022 ] Batch(100/123) done. Loss: 1.0416 lr:0.100000 +[ Tue Sep 13 10:30:25 2022 ] Eval epoch: 14 +[ Tue Sep 13 10:31:15 2022 ] Mean test loss of 258 batches: 2.2429208755493164. +[ Tue Sep 13 10:31:15 2022 ] Top1: 37.54% +[ Tue Sep 13 10:31:15 2022 ] Top5: 77.70% +[ Tue Sep 13 10:31:15 2022 ] Training epoch: 15 +[ Tue Sep 13 10:31:59 2022 ] Batch(77/123) done. Loss: 0.8666 lr:0.100000 +[ Tue Sep 13 10:32:23 2022 ] Eval epoch: 15 +[ Tue Sep 13 10:33:13 2022 ] Mean test loss of 258 batches: 2.169420003890991. +[ Tue Sep 13 10:33:13 2022 ] Top1: 44.99% +[ Tue Sep 13 10:33:13 2022 ] Top5: 82.46% +[ Tue Sep 13 10:33:13 2022 ] Training epoch: 16 +[ Tue Sep 13 10:33:45 2022 ] Batch(54/123) done. Loss: 0.5842 lr:0.100000 +[ Tue Sep 13 10:34:21 2022 ] Eval epoch: 16 +[ Tue Sep 13 10:35:12 2022 ] Mean test loss of 258 batches: 1.9139314889907837. +[ Tue Sep 13 10:35:12 2022 ] Top1: 46.06% +[ Tue Sep 13 10:35:12 2022 ] Top5: 84.39% +[ Tue Sep 13 10:35:12 2022 ] Training epoch: 17 +[ Tue Sep 13 10:35:32 2022 ] Batch(31/123) done. Loss: 0.6978 lr:0.100000 +[ Tue Sep 13 10:36:20 2022 ] Eval epoch: 17 +[ Tue Sep 13 10:37:10 2022 ] Mean test loss of 258 batches: 2.0210671424865723. +[ Tue Sep 13 10:37:10 2022 ] Top1: 46.59% +[ Tue Sep 13 10:37:10 2022 ] Top5: 81.80% +[ Tue Sep 13 10:37:10 2022 ] Training epoch: 18 +[ Tue Sep 13 10:37:18 2022 ] Batch(8/123) done. Loss: 0.9801 lr:0.100000 +[ Tue Sep 13 10:38:11 2022 ] Batch(108/123) done. Loss: 0.8500 lr:0.100000 +[ Tue Sep 13 10:38:18 2022 ] Eval epoch: 18 +[ Tue Sep 13 10:39:09 2022 ] Mean test loss of 258 batches: 2.0550177097320557. +[ Tue Sep 13 10:39:09 2022 ] Top1: 45.24% +[ Tue Sep 13 10:39:09 2022 ] Top5: 82.14% +[ Tue Sep 13 10:39:09 2022 ] Training epoch: 19 +[ Tue Sep 13 10:39:57 2022 ] Batch(85/123) done. Loss: 0.9634 lr:0.100000 +[ Tue Sep 13 10:40:17 2022 ] Eval epoch: 19 +[ Tue Sep 13 10:41:07 2022 ] Mean test loss of 258 batches: 2.3090696334838867. +[ Tue Sep 13 10:41:07 2022 ] Top1: 44.53% +[ Tue Sep 13 10:41:07 2022 ] Top5: 82.95% +[ Tue Sep 13 10:41:07 2022 ] Training epoch: 20 +[ Tue Sep 13 10:41:43 2022 ] Batch(62/123) done. Loss: 1.0622 lr:0.100000 +[ Tue Sep 13 10:42:15 2022 ] Eval epoch: 20 +[ Tue Sep 13 10:43:05 2022 ] Mean test loss of 258 batches: 2.014781951904297. +[ Tue Sep 13 10:43:05 2022 ] Top1: 47.81% +[ Tue Sep 13 10:43:05 2022 ] Top5: 82.90% +[ Tue Sep 13 10:43:05 2022 ] Training epoch: 21 +[ Tue Sep 13 10:43:29 2022 ] Batch(39/123) done. Loss: 0.7243 lr:0.100000 +[ Tue Sep 13 10:44:13 2022 ] Eval epoch: 21 +[ Tue Sep 13 10:45:03 2022 ] Mean test loss of 258 batches: 2.071990728378296. +[ Tue Sep 13 10:45:03 2022 ] Top1: 47.46% +[ Tue Sep 13 10:45:03 2022 ] Top5: 83.40% +[ Tue Sep 13 10:45:03 2022 ] Training epoch: 22 +[ Tue Sep 13 10:45:15 2022 ] Batch(16/123) done. Loss: 0.6351 lr:0.100000 +[ Tue Sep 13 10:46:08 2022 ] Batch(116/123) done. Loss: 0.5568 lr:0.100000 +[ Tue Sep 13 10:46:11 2022 ] Eval epoch: 22 +[ Tue Sep 13 10:47:01 2022 ] Mean test loss of 258 batches: 1.7614084482192993. +[ Tue Sep 13 10:47:01 2022 ] Top1: 52.73% +[ Tue Sep 13 10:47:01 2022 ] Top5: 86.97% +[ Tue Sep 13 10:47:01 2022 ] Training epoch: 23 +[ Tue Sep 13 10:47:54 2022 ] Batch(93/123) done. Loss: 0.7639 lr:0.100000 +[ Tue Sep 13 10:48:09 2022 ] Eval epoch: 23 +[ Tue Sep 13 10:48:59 2022 ] Mean test loss of 258 batches: 9.128730773925781. +[ Tue Sep 13 10:48:59 2022 ] Top1: 20.48% +[ Tue Sep 13 10:48:59 2022 ] Top5: 53.67% +[ Tue Sep 13 10:48:59 2022 ] Training epoch: 24 +[ Tue Sep 13 10:49:40 2022 ] Batch(70/123) done. Loss: 0.5800 lr:0.100000 +[ Tue Sep 13 10:50:08 2022 ] Eval epoch: 24 +[ Tue Sep 13 10:50:58 2022 ] Mean test loss of 258 batches: 1.7863599061965942. +[ Tue Sep 13 10:50:58 2022 ] Top1: 53.04% +[ Tue Sep 13 10:50:58 2022 ] Top5: 86.94% +[ Tue Sep 13 10:50:58 2022 ] Training epoch: 25 +[ Tue Sep 13 10:51:26 2022 ] Batch(47/123) done. Loss: 0.4499 lr:0.100000 +[ Tue Sep 13 10:52:06 2022 ] Eval epoch: 25 +[ Tue Sep 13 10:52:56 2022 ] Mean test loss of 258 batches: 2.5577802658081055. +[ Tue Sep 13 10:52:56 2022 ] Top1: 44.30% +[ Tue Sep 13 10:52:56 2022 ] Top5: 82.32% +[ Tue Sep 13 10:52:56 2022 ] Training epoch: 26 +[ Tue Sep 13 10:53:12 2022 ] Batch(24/123) done. Loss: 0.5527 lr:0.100000 +[ Tue Sep 13 10:54:04 2022 ] Eval epoch: 26 +[ Tue Sep 13 10:54:55 2022 ] Mean test loss of 258 batches: 1.9369274377822876. +[ Tue Sep 13 10:54:55 2022 ] Top1: 52.55% +[ Tue Sep 13 10:54:55 2022 ] Top5: 86.72% +[ Tue Sep 13 10:54:55 2022 ] Training epoch: 27 +[ Tue Sep 13 10:54:59 2022 ] Batch(1/123) done. Loss: 0.4778 lr:0.100000 +[ Tue Sep 13 10:55:51 2022 ] Batch(101/123) done. Loss: 0.3616 lr:0.100000 +[ Tue Sep 13 10:56:03 2022 ] Eval epoch: 27 +[ Tue Sep 13 10:56:53 2022 ] Mean test loss of 258 batches: 2.151625633239746. +[ Tue Sep 13 10:56:53 2022 ] Top1: 51.28% +[ Tue Sep 13 10:56:53 2022 ] Top5: 85.77% +[ Tue Sep 13 10:56:53 2022 ] Training epoch: 28 +[ Tue Sep 13 10:57:38 2022 ] Batch(78/123) done. Loss: 0.4822 lr:0.100000 +[ Tue Sep 13 10:58:01 2022 ] Eval epoch: 28 +[ Tue Sep 13 10:58:51 2022 ] Mean test loss of 258 batches: 2.2599592208862305. +[ Tue Sep 13 10:58:51 2022 ] Top1: 52.19% +[ Tue Sep 13 10:58:51 2022 ] Top5: 86.54% +[ Tue Sep 13 10:58:52 2022 ] Training epoch: 29 +[ Tue Sep 13 10:59:24 2022 ] Batch(55/123) done. Loss: 0.5920 lr:0.100000 +[ Tue Sep 13 11:00:00 2022 ] Eval epoch: 29 +[ Tue Sep 13 11:00:50 2022 ] Mean test loss of 258 batches: 3.8784642219543457. +[ Tue Sep 13 11:00:50 2022 ] Top1: 40.59% +[ Tue Sep 13 11:00:50 2022 ] Top5: 78.65% +[ Tue Sep 13 11:00:50 2022 ] Training epoch: 30 +[ Tue Sep 13 11:01:10 2022 ] Batch(32/123) done. Loss: 0.5519 lr:0.100000 +[ Tue Sep 13 11:01:58 2022 ] Eval epoch: 30 +[ Tue Sep 13 11:02:48 2022 ] Mean test loss of 258 batches: 1.8356668949127197. +[ Tue Sep 13 11:02:48 2022 ] Top1: 53.78% +[ Tue Sep 13 11:02:48 2022 ] Top5: 87.10% +[ Tue Sep 13 11:02:48 2022 ] Training epoch: 31 +[ Tue Sep 13 11:02:57 2022 ] Batch(9/123) done. Loss: 0.4373 lr:0.100000 +[ Tue Sep 13 11:03:49 2022 ] Batch(109/123) done. Loss: 0.4378 lr:0.100000 +[ Tue Sep 13 11:03:56 2022 ] Eval epoch: 31 +[ Tue Sep 13 11:04:46 2022 ] Mean test loss of 258 batches: 3.0565996170043945. +[ Tue Sep 13 11:04:47 2022 ] Top1: 44.87% +[ Tue Sep 13 11:04:47 2022 ] Top5: 79.94% +[ Tue Sep 13 11:04:47 2022 ] Training epoch: 32 +[ Tue Sep 13 11:05:36 2022 ] Batch(86/123) done. Loss: 0.3620 lr:0.100000 +[ Tue Sep 13 11:05:55 2022 ] Eval epoch: 32 +[ Tue Sep 13 11:06:45 2022 ] Mean test loss of 258 batches: 1.9578102827072144. +[ Tue Sep 13 11:06:45 2022 ] Top1: 54.48% +[ Tue Sep 13 11:06:45 2022 ] Top5: 87.03% +[ Tue Sep 13 11:06:45 2022 ] Training epoch: 33 +[ Tue Sep 13 11:07:22 2022 ] Batch(63/123) done. Loss: 0.3515 lr:0.100000 +[ Tue Sep 13 11:07:53 2022 ] Eval epoch: 33 +[ Tue Sep 13 11:08:43 2022 ] Mean test loss of 258 batches: 1.8989536762237549. +[ Tue Sep 13 11:08:43 2022 ] Top1: 56.63% +[ Tue Sep 13 11:08:43 2022 ] Top5: 88.63% +[ Tue Sep 13 11:08:44 2022 ] Training epoch: 34 +[ Tue Sep 13 11:09:08 2022 ] Batch(40/123) done. Loss: 0.2738 lr:0.100000 +[ Tue Sep 13 11:09:51 2022 ] Eval epoch: 34 +[ Tue Sep 13 11:10:42 2022 ] Mean test loss of 258 batches: 1.9924609661102295. +[ Tue Sep 13 11:10:42 2022 ] Top1: 54.73% +[ Tue Sep 13 11:10:42 2022 ] Top5: 87.83% +[ Tue Sep 13 11:10:42 2022 ] Training epoch: 35 +[ Tue Sep 13 11:10:54 2022 ] Batch(17/123) done. Loss: 0.3679 lr:0.100000 +[ Tue Sep 13 11:11:47 2022 ] Batch(117/123) done. Loss: 0.4535 lr:0.100000 +[ Tue Sep 13 11:11:50 2022 ] Eval epoch: 35 +[ Tue Sep 13 11:12:40 2022 ] Mean test loss of 258 batches: 2.53005051612854. +[ Tue Sep 13 11:12:40 2022 ] Top1: 50.18% +[ Tue Sep 13 11:12:40 2022 ] Top5: 85.02% +[ Tue Sep 13 11:12:41 2022 ] Training epoch: 36 +[ Tue Sep 13 11:13:34 2022 ] Batch(94/123) done. Loss: 0.5516 lr:0.100000 +[ Tue Sep 13 11:13:48 2022 ] Eval epoch: 36 +[ Tue Sep 13 11:14:38 2022 ] Mean test loss of 258 batches: 2.025127649307251. +[ Tue Sep 13 11:14:39 2022 ] Top1: 54.23% +[ Tue Sep 13 11:14:39 2022 ] Top5: 87.36% +[ Tue Sep 13 11:14:39 2022 ] Training epoch: 37 +[ Tue Sep 13 11:15:20 2022 ] Batch(71/123) done. Loss: 0.3084 lr:0.100000 +[ Tue Sep 13 11:15:47 2022 ] Eval epoch: 37 +[ Tue Sep 13 11:16:37 2022 ] Mean test loss of 258 batches: 2.328878879547119. +[ Tue Sep 13 11:16:37 2022 ] Top1: 51.76% +[ Tue Sep 13 11:16:37 2022 ] Top5: 86.99% +[ Tue Sep 13 11:16:37 2022 ] Training epoch: 38 +[ Tue Sep 13 11:17:05 2022 ] Batch(48/123) done. Loss: 0.4784 lr:0.100000 +[ Tue Sep 13 11:17:45 2022 ] Eval epoch: 38 +[ Tue Sep 13 11:18:35 2022 ] Mean test loss of 258 batches: 1.6894583702087402. +[ Tue Sep 13 11:18:35 2022 ] Top1: 58.65% +[ Tue Sep 13 11:18:35 2022 ] Top5: 89.92% +[ Tue Sep 13 11:18:35 2022 ] Training epoch: 39 +[ Tue Sep 13 11:18:52 2022 ] Batch(25/123) done. Loss: 0.2738 lr:0.100000 +[ Tue Sep 13 11:19:43 2022 ] Eval epoch: 39 +[ Tue Sep 13 11:20:33 2022 ] Mean test loss of 258 batches: 2.2575957775115967. +[ Tue Sep 13 11:20:33 2022 ] Top1: 54.05% +[ Tue Sep 13 11:20:33 2022 ] Top5: 87.58% +[ Tue Sep 13 11:20:33 2022 ] Training epoch: 40 +[ Tue Sep 13 11:20:38 2022 ] Batch(2/123) done. Loss: 0.4132 lr:0.100000 +[ Tue Sep 13 11:21:31 2022 ] Batch(102/123) done. Loss: 0.3322 lr:0.100000 +[ Tue Sep 13 11:21:41 2022 ] Eval epoch: 40 +[ Tue Sep 13 11:22:31 2022 ] Mean test loss of 258 batches: 2.2194809913635254. +[ Tue Sep 13 11:22:31 2022 ] Top1: 51.83% +[ Tue Sep 13 11:22:31 2022 ] Top5: 85.48% +[ Tue Sep 13 11:22:31 2022 ] Training epoch: 41 +[ Tue Sep 13 11:23:16 2022 ] Batch(79/123) done. Loss: 0.2911 lr:0.100000 +[ Tue Sep 13 11:23:39 2022 ] Eval epoch: 41 +[ Tue Sep 13 11:24:29 2022 ] Mean test loss of 258 batches: 2.0743963718414307. +[ Tue Sep 13 11:24:29 2022 ] Top1: 56.13% +[ Tue Sep 13 11:24:29 2022 ] Top5: 86.22% +[ Tue Sep 13 11:24:29 2022 ] Training epoch: 42 +[ Tue Sep 13 11:25:02 2022 ] Batch(56/123) done. Loss: 0.1734 lr:0.100000 +[ Tue Sep 13 11:25:37 2022 ] Eval epoch: 42 +[ Tue Sep 13 11:26:27 2022 ] Mean test loss of 258 batches: 2.168731927871704. +[ Tue Sep 13 11:26:27 2022 ] Top1: 55.61% +[ Tue Sep 13 11:26:27 2022 ] Top5: 88.45% +[ Tue Sep 13 11:26:27 2022 ] Training epoch: 43 +[ Tue Sep 13 11:26:49 2022 ] Batch(33/123) done. Loss: 0.1579 lr:0.100000 +[ Tue Sep 13 11:27:36 2022 ] Eval epoch: 43 +[ Tue Sep 13 11:28:26 2022 ] Mean test loss of 258 batches: 2.3242321014404297. +[ Tue Sep 13 11:28:26 2022 ] Top1: 53.47% +[ Tue Sep 13 11:28:26 2022 ] Top5: 85.38% +[ Tue Sep 13 11:28:26 2022 ] Training epoch: 44 +[ Tue Sep 13 11:28:35 2022 ] Batch(10/123) done. Loss: 0.2867 lr:0.100000 +[ Tue Sep 13 11:29:27 2022 ] Batch(110/123) done. Loss: 0.3666 lr:0.100000 +[ Tue Sep 13 11:29:34 2022 ] Eval epoch: 44 +[ Tue Sep 13 11:30:24 2022 ] Mean test loss of 258 batches: 1.895141839981079. +[ Tue Sep 13 11:30:24 2022 ] Top1: 57.17% +[ Tue Sep 13 11:30:24 2022 ] Top5: 88.19% +[ Tue Sep 13 11:30:24 2022 ] Training epoch: 45 +[ Tue Sep 13 11:31:14 2022 ] Batch(87/123) done. Loss: 0.3557 lr:0.100000 +[ Tue Sep 13 11:31:32 2022 ] Eval epoch: 45 +[ Tue Sep 13 11:32:23 2022 ] Mean test loss of 258 batches: 2.182115077972412. +[ Tue Sep 13 11:32:23 2022 ] Top1: 56.97% +[ Tue Sep 13 11:32:23 2022 ] Top5: 87.57% +[ Tue Sep 13 11:32:23 2022 ] Training epoch: 46 +[ Tue Sep 13 11:33:00 2022 ] Batch(64/123) done. Loss: 0.2151 lr:0.100000 +[ Tue Sep 13 11:33:31 2022 ] Eval epoch: 46 +[ Tue Sep 13 11:34:21 2022 ] Mean test loss of 258 batches: 2.016373634338379. +[ Tue Sep 13 11:34:21 2022 ] Top1: 56.32% +[ Tue Sep 13 11:34:21 2022 ] Top5: 88.48% +[ Tue Sep 13 11:34:21 2022 ] Training epoch: 47 +[ Tue Sep 13 11:34:46 2022 ] Batch(41/123) done. Loss: 0.1683 lr:0.100000 +[ Tue Sep 13 11:35:29 2022 ] Eval epoch: 47 +[ Tue Sep 13 11:36:19 2022 ] Mean test loss of 258 batches: 2.290623426437378. +[ Tue Sep 13 11:36:20 2022 ] Top1: 55.28% +[ Tue Sep 13 11:36:20 2022 ] Top5: 88.23% +[ Tue Sep 13 11:36:20 2022 ] Training epoch: 48 +[ Tue Sep 13 11:36:33 2022 ] Batch(18/123) done. Loss: 0.1629 lr:0.100000 +[ Tue Sep 13 11:37:25 2022 ] Batch(118/123) done. Loss: 0.2851 lr:0.100000 +[ Tue Sep 13 11:37:28 2022 ] Eval epoch: 48 +[ Tue Sep 13 11:38:18 2022 ] Mean test loss of 258 batches: 2.250434398651123. +[ Tue Sep 13 11:38:18 2022 ] Top1: 55.78% +[ Tue Sep 13 11:38:18 2022 ] Top5: 87.46% +[ Tue Sep 13 11:38:18 2022 ] Training epoch: 49 +[ Tue Sep 13 11:39:12 2022 ] Batch(95/123) done. Loss: 0.2600 lr:0.100000 +[ Tue Sep 13 11:39:26 2022 ] Eval epoch: 49 +[ Tue Sep 13 11:40:16 2022 ] Mean test loss of 258 batches: 2.6489593982696533. +[ Tue Sep 13 11:40:16 2022 ] Top1: 53.65% +[ Tue Sep 13 11:40:16 2022 ] Top5: 85.38% +[ Tue Sep 13 11:40:17 2022 ] Training epoch: 50 +[ Tue Sep 13 11:40:58 2022 ] Batch(72/123) done. Loss: 0.1840 lr:0.100000 +[ Tue Sep 13 11:41:24 2022 ] Eval epoch: 50 +[ Tue Sep 13 11:42:14 2022 ] Mean test loss of 258 batches: 2.3897714614868164. +[ Tue Sep 13 11:42:15 2022 ] Top1: 53.97% +[ Tue Sep 13 11:42:15 2022 ] Top5: 87.15% +[ Tue Sep 13 11:42:15 2022 ] Training epoch: 51 +[ Tue Sep 13 11:42:44 2022 ] Batch(49/123) done. Loss: 0.1052 lr:0.100000 +[ Tue Sep 13 11:43:23 2022 ] Eval epoch: 51 +[ Tue Sep 13 11:44:13 2022 ] Mean test loss of 258 batches: 2.0851056575775146. +[ Tue Sep 13 11:44:13 2022 ] Top1: 56.27% +[ Tue Sep 13 11:44:13 2022 ] Top5: 88.23% +[ Tue Sep 13 11:44:13 2022 ] Training epoch: 52 +[ Tue Sep 13 11:44:31 2022 ] Batch(26/123) done. Loss: 0.1345 lr:0.100000 +[ Tue Sep 13 11:45:21 2022 ] Eval epoch: 52 +[ Tue Sep 13 11:46:11 2022 ] Mean test loss of 258 batches: 2.1101233959198. +[ Tue Sep 13 11:46:12 2022 ] Top1: 55.91% +[ Tue Sep 13 11:46:12 2022 ] Top5: 87.54% +[ Tue Sep 13 11:46:12 2022 ] Training epoch: 53 +[ Tue Sep 13 11:46:17 2022 ] Batch(3/123) done. Loss: 0.2526 lr:0.100000 +[ Tue Sep 13 11:47:09 2022 ] Batch(103/123) done. Loss: 0.0950 lr:0.100000 +[ Tue Sep 13 11:47:20 2022 ] Eval epoch: 53 +[ Tue Sep 13 11:48:10 2022 ] Mean test loss of 258 batches: 2.9930808544158936. +[ Tue Sep 13 11:48:10 2022 ] Top1: 51.38% +[ Tue Sep 13 11:48:10 2022 ] Top5: 84.08% +[ Tue Sep 13 11:48:10 2022 ] Training epoch: 54 +[ Tue Sep 13 11:48:56 2022 ] Batch(80/123) done. Loss: 0.2940 lr:0.100000 +[ Tue Sep 13 11:49:18 2022 ] Eval epoch: 54 +[ Tue Sep 13 11:50:09 2022 ] Mean test loss of 258 batches: 2.608795404434204. +[ Tue Sep 13 11:50:09 2022 ] Top1: 53.64% +[ Tue Sep 13 11:50:09 2022 ] Top5: 86.75% +[ Tue Sep 13 11:50:09 2022 ] Training epoch: 55 +[ Tue Sep 13 11:50:43 2022 ] Batch(57/123) done. Loss: 0.2380 lr:0.100000 +[ Tue Sep 13 11:51:17 2022 ] Eval epoch: 55 +[ Tue Sep 13 11:52:07 2022 ] Mean test loss of 258 batches: 2.0771994590759277. +[ Tue Sep 13 11:52:07 2022 ] Top1: 58.88% +[ Tue Sep 13 11:52:07 2022 ] Top5: 89.08% +[ Tue Sep 13 11:52:07 2022 ] Training epoch: 56 +[ Tue Sep 13 11:52:29 2022 ] Batch(34/123) done. Loss: 0.1672 lr:0.100000 +[ Tue Sep 13 11:53:15 2022 ] Eval epoch: 56 +[ Tue Sep 13 11:54:05 2022 ] Mean test loss of 258 batches: 2.453749179840088. +[ Tue Sep 13 11:54:05 2022 ] Top1: 54.90% +[ Tue Sep 13 11:54:06 2022 ] Top5: 86.35% +[ Tue Sep 13 11:54:06 2022 ] Training epoch: 57 +[ Tue Sep 13 11:54:15 2022 ] Batch(11/123) done. Loss: 0.1159 lr:0.100000 +[ Tue Sep 13 11:55:07 2022 ] Batch(111/123) done. Loss: 0.2282 lr:0.100000 +[ Tue Sep 13 11:55:13 2022 ] Eval epoch: 57 +[ Tue Sep 13 11:56:03 2022 ] Mean test loss of 258 batches: 2.2608907222747803. +[ Tue Sep 13 11:56:04 2022 ] Top1: 57.20% +[ Tue Sep 13 11:56:04 2022 ] Top5: 88.04% +[ Tue Sep 13 11:56:04 2022 ] Training epoch: 58 +[ Tue Sep 13 11:56:54 2022 ] Batch(88/123) done. Loss: 0.3523 lr:0.100000 +[ Tue Sep 13 11:57:12 2022 ] Eval epoch: 58 +[ Tue Sep 13 11:58:02 2022 ] Mean test loss of 258 batches: 4.464827060699463. +[ Tue Sep 13 11:58:02 2022 ] Top1: 41.90% +[ Tue Sep 13 11:58:03 2022 ] Top5: 78.26% +[ Tue Sep 13 11:58:03 2022 ] Training epoch: 59 +[ Tue Sep 13 11:58:40 2022 ] Batch(65/123) done. Loss: 0.2308 lr:0.100000 +[ Tue Sep 13 11:59:11 2022 ] Eval epoch: 59 +[ Tue Sep 13 12:00:00 2022 ] Mean test loss of 258 batches: 2.351600408554077. +[ Tue Sep 13 12:00:01 2022 ] Top1: 56.53% +[ Tue Sep 13 12:00:01 2022 ] Top5: 87.81% +[ Tue Sep 13 12:00:01 2022 ] Training epoch: 60 +[ Tue Sep 13 12:00:26 2022 ] Batch(42/123) done. Loss: 0.2450 lr:0.100000 +[ Tue Sep 13 12:01:09 2022 ] Eval epoch: 60 +[ Tue Sep 13 12:01:59 2022 ] Mean test loss of 258 batches: 2.6445326805114746. +[ Tue Sep 13 12:01:59 2022 ] Top1: 53.40% +[ Tue Sep 13 12:01:59 2022 ] Top5: 86.48% +[ Tue Sep 13 12:01:59 2022 ] Training epoch: 61 +[ Tue Sep 13 12:02:12 2022 ] Batch(19/123) done. Loss: 0.1235 lr:0.010000 +[ Tue Sep 13 12:03:05 2022 ] Batch(119/123) done. Loss: 0.0991 lr:0.010000 +[ Tue Sep 13 12:03:07 2022 ] Eval epoch: 61 +[ Tue Sep 13 12:03:57 2022 ] Mean test loss of 258 batches: 1.8522855043411255. +[ Tue Sep 13 12:03:57 2022 ] Top1: 63.30% +[ Tue Sep 13 12:03:57 2022 ] Top5: 90.65% +[ Tue Sep 13 12:03:57 2022 ] Training epoch: 62 +[ Tue Sep 13 12:04:52 2022 ] Batch(96/123) done. Loss: 0.1267 lr:0.010000 +[ Tue Sep 13 12:05:06 2022 ] Eval epoch: 62 +[ Tue Sep 13 12:05:56 2022 ] Mean test loss of 258 batches: 1.808545708656311. +[ Tue Sep 13 12:05:56 2022 ] Top1: 64.12% +[ Tue Sep 13 12:05:56 2022 ] Top5: 91.04% +[ Tue Sep 13 12:05:56 2022 ] Training epoch: 63 +[ Tue Sep 13 12:06:38 2022 ] Batch(73/123) done. Loss: 0.0333 lr:0.010000 +[ Tue Sep 13 12:07:04 2022 ] Eval epoch: 63 +[ Tue Sep 13 12:07:54 2022 ] Mean test loss of 258 batches: 1.7879505157470703. +[ Tue Sep 13 12:07:54 2022 ] Top1: 65.01% +[ Tue Sep 13 12:07:54 2022 ] Top5: 91.40% +[ Tue Sep 13 12:07:55 2022 ] Training epoch: 64 +[ Tue Sep 13 12:08:24 2022 ] Batch(50/123) done. Loss: 0.0799 lr:0.010000 +[ Tue Sep 13 12:09:02 2022 ] Eval epoch: 64 +[ Tue Sep 13 12:09:53 2022 ] Mean test loss of 258 batches: 1.8374004364013672. +[ Tue Sep 13 12:09:53 2022 ] Top1: 65.17% +[ Tue Sep 13 12:09:53 2022 ] Top5: 91.44% +[ Tue Sep 13 12:09:53 2022 ] Training epoch: 65 +[ Tue Sep 13 12:10:10 2022 ] Batch(27/123) done. Loss: 0.0559 lr:0.010000 +[ Tue Sep 13 12:11:00 2022 ] Eval epoch: 65 +[ Tue Sep 13 12:11:51 2022 ] Mean test loss of 258 batches: 1.817768931388855. +[ Tue Sep 13 12:11:51 2022 ] Top1: 65.39% +[ Tue Sep 13 12:11:51 2022 ] Top5: 91.55% +[ Tue Sep 13 12:11:51 2022 ] Training epoch: 66 +[ Tue Sep 13 12:11:56 2022 ] Batch(4/123) done. Loss: 0.0192 lr:0.010000 +[ Tue Sep 13 12:12:49 2022 ] Batch(104/123) done. Loss: 0.0499 lr:0.010000 +[ Tue Sep 13 12:12:59 2022 ] Eval epoch: 66 +[ Tue Sep 13 12:13:49 2022 ] Mean test loss of 258 batches: 1.8281463384628296. +[ Tue Sep 13 12:13:49 2022 ] Top1: 65.28% +[ Tue Sep 13 12:13:49 2022 ] Top5: 91.49% +[ Tue Sep 13 12:13:49 2022 ] Training epoch: 67 +[ Tue Sep 13 12:14:35 2022 ] Batch(81/123) done. Loss: 0.0402 lr:0.010000 +[ Tue Sep 13 12:14:57 2022 ] Eval epoch: 67 +[ Tue Sep 13 12:15:47 2022 ] Mean test loss of 258 batches: 1.8663133382797241. +[ Tue Sep 13 12:15:47 2022 ] Top1: 65.43% +[ Tue Sep 13 12:15:47 2022 ] Top5: 91.58% +[ Tue Sep 13 12:15:47 2022 ] Training epoch: 68 +[ Tue Sep 13 12:16:22 2022 ] Batch(58/123) done. Loss: 0.0263 lr:0.010000 +[ Tue Sep 13 12:16:55 2022 ] Eval epoch: 68 +[ Tue Sep 13 12:17:46 2022 ] Mean test loss of 258 batches: 1.8553483486175537. +[ Tue Sep 13 12:17:46 2022 ] Top1: 64.97% +[ Tue Sep 13 12:17:46 2022 ] Top5: 91.37% +[ Tue Sep 13 12:17:46 2022 ] Training epoch: 69 +[ Tue Sep 13 12:18:08 2022 ] Batch(35/123) done. Loss: 0.0370 lr:0.010000 +[ Tue Sep 13 12:18:54 2022 ] Eval epoch: 69 +[ Tue Sep 13 12:19:44 2022 ] Mean test loss of 258 batches: 1.8636685609817505. +[ Tue Sep 13 12:19:44 2022 ] Top1: 65.08% +[ Tue Sep 13 12:19:44 2022 ] Top5: 91.50% +[ Tue Sep 13 12:19:44 2022 ] Training epoch: 70 +[ Tue Sep 13 12:19:54 2022 ] Batch(12/123) done. Loss: 0.0232 lr:0.010000 +[ Tue Sep 13 12:20:47 2022 ] Batch(112/123) done. Loss: 0.0477 lr:0.010000 +[ Tue Sep 13 12:20:52 2022 ] Eval epoch: 70 +[ Tue Sep 13 12:21:42 2022 ] Mean test loss of 258 batches: 1.8849055767059326. +[ Tue Sep 13 12:21:42 2022 ] Top1: 65.17% +[ Tue Sep 13 12:21:42 2022 ] Top5: 91.31% +[ Tue Sep 13 12:21:42 2022 ] Training epoch: 71 +[ Tue Sep 13 12:22:33 2022 ] Batch(89/123) done. Loss: 0.1645 lr:0.010000 +[ Tue Sep 13 12:22:50 2022 ] Eval epoch: 71 +[ Tue Sep 13 12:23:40 2022 ] Mean test loss of 258 batches: 1.9196712970733643. +[ Tue Sep 13 12:23:40 2022 ] Top1: 64.89% +[ Tue Sep 13 12:23:40 2022 ] Top5: 91.48% +[ Tue Sep 13 12:23:40 2022 ] Training epoch: 72 +[ Tue Sep 13 12:24:19 2022 ] Batch(66/123) done. Loss: 0.0282 lr:0.010000 +[ Tue Sep 13 12:24:48 2022 ] Eval epoch: 72 +[ Tue Sep 13 12:25:38 2022 ] Mean test loss of 258 batches: 1.9522173404693604. +[ Tue Sep 13 12:25:38 2022 ] Top1: 64.70% +[ Tue Sep 13 12:25:38 2022 ] Top5: 91.11% +[ Tue Sep 13 12:25:38 2022 ] Training epoch: 73 +[ Tue Sep 13 12:26:05 2022 ] Batch(43/123) done. Loss: 0.0220 lr:0.010000 +[ Tue Sep 13 12:26:46 2022 ] Eval epoch: 73 +[ Tue Sep 13 12:27:37 2022 ] Mean test loss of 258 batches: 1.919583797454834. +[ Tue Sep 13 12:27:37 2022 ] Top1: 65.20% +[ Tue Sep 13 12:27:37 2022 ] Top5: 91.27% +[ Tue Sep 13 12:27:37 2022 ] Training epoch: 74 +[ Tue Sep 13 12:27:51 2022 ] Batch(20/123) done. Loss: 0.0347 lr:0.010000 +[ Tue Sep 13 12:28:43 2022 ] Batch(120/123) done. Loss: 0.0459 lr:0.010000 +[ Tue Sep 13 12:28:45 2022 ] Eval epoch: 74 +[ Tue Sep 13 12:29:35 2022 ] Mean test loss of 258 batches: 1.946401596069336. +[ Tue Sep 13 12:29:35 2022 ] Top1: 64.86% +[ Tue Sep 13 12:29:35 2022 ] Top5: 91.20% +[ Tue Sep 13 12:29:35 2022 ] Training epoch: 75 +[ Tue Sep 13 12:30:30 2022 ] Batch(97/123) done. Loss: 0.0223 lr:0.010000 +[ Tue Sep 13 12:30:43 2022 ] Eval epoch: 75 +[ Tue Sep 13 12:31:33 2022 ] Mean test loss of 258 batches: 1.9171396493911743. +[ Tue Sep 13 12:31:33 2022 ] Top1: 65.28% +[ Tue Sep 13 12:31:33 2022 ] Top5: 91.25% +[ Tue Sep 13 12:31:33 2022 ] Training epoch: 76 +[ Tue Sep 13 12:32:16 2022 ] Batch(74/123) done. Loss: 0.0539 lr:0.010000 +[ Tue Sep 13 12:32:41 2022 ] Eval epoch: 76 +[ Tue Sep 13 12:33:31 2022 ] Mean test loss of 258 batches: 1.967031717300415. +[ Tue Sep 13 12:33:32 2022 ] Top1: 64.91% +[ Tue Sep 13 12:33:32 2022 ] Top5: 91.35% +[ Tue Sep 13 12:33:32 2022 ] Training epoch: 77 +[ Tue Sep 13 12:34:02 2022 ] Batch(51/123) done. Loss: 0.0534 lr:0.010000 +[ Tue Sep 13 12:34:40 2022 ] Eval epoch: 77 +[ Tue Sep 13 12:35:29 2022 ] Mean test loss of 258 batches: 1.9728081226348877. +[ Tue Sep 13 12:35:29 2022 ] Top1: 65.31% +[ Tue Sep 13 12:35:30 2022 ] Top5: 91.17% +[ Tue Sep 13 12:35:30 2022 ] Training epoch: 78 +[ Tue Sep 13 12:35:48 2022 ] Batch(28/123) done. Loss: 0.0317 lr:0.010000 +[ Tue Sep 13 12:36:38 2022 ] Eval epoch: 78 +[ Tue Sep 13 12:37:28 2022 ] Mean test loss of 258 batches: 1.949669361114502. +[ Tue Sep 13 12:37:28 2022 ] Top1: 65.11% +[ Tue Sep 13 12:37:28 2022 ] Top5: 91.25% +[ Tue Sep 13 12:37:28 2022 ] Training epoch: 79 +[ Tue Sep 13 12:37:35 2022 ] Batch(5/123) done. Loss: 0.0147 lr:0.010000 +[ Tue Sep 13 12:38:27 2022 ] Batch(105/123) done. Loss: 0.0136 lr:0.010000 +[ Tue Sep 13 12:38:36 2022 ] Eval epoch: 79 +[ Tue Sep 13 12:39:27 2022 ] Mean test loss of 258 batches: 1.9937751293182373. +[ Tue Sep 13 12:39:27 2022 ] Top1: 65.29% +[ Tue Sep 13 12:39:27 2022 ] Top5: 91.31% +[ Tue Sep 13 12:39:27 2022 ] Training epoch: 80 +[ Tue Sep 13 12:40:14 2022 ] Batch(82/123) done. Loss: 0.0356 lr:0.010000 +[ Tue Sep 13 12:40:35 2022 ] Eval epoch: 80 +[ Tue Sep 13 12:41:25 2022 ] Mean test loss of 258 batches: 2.0242815017700195. +[ Tue Sep 13 12:41:25 2022 ] Top1: 64.96% +[ Tue Sep 13 12:41:25 2022 ] Top5: 91.13% +[ Tue Sep 13 12:41:25 2022 ] Training epoch: 81 +[ Tue Sep 13 12:42:00 2022 ] Batch(59/123) done. Loss: 0.0186 lr:0.001000 +[ Tue Sep 13 12:42:33 2022 ] Eval epoch: 81 +[ Tue Sep 13 12:43:23 2022 ] Mean test loss of 258 batches: 1.977448582649231. +[ Tue Sep 13 12:43:24 2022 ] Top1: 65.55% +[ Tue Sep 13 12:43:24 2022 ] Top5: 91.38% +[ Tue Sep 13 12:43:24 2022 ] Training epoch: 82 +[ Tue Sep 13 12:43:46 2022 ] Batch(36/123) done. Loss: 0.0176 lr:0.001000 +[ Tue Sep 13 12:44:32 2022 ] Eval epoch: 82 +[ Tue Sep 13 12:45:22 2022 ] Mean test loss of 258 batches: 2.012622356414795. +[ Tue Sep 13 12:45:22 2022 ] Top1: 65.12% +[ Tue Sep 13 12:45:22 2022 ] Top5: 91.13% +[ Tue Sep 13 12:45:22 2022 ] Training epoch: 83 +[ Tue Sep 13 12:45:33 2022 ] Batch(13/123) done. Loss: 0.1106 lr:0.001000 +[ Tue Sep 13 12:46:25 2022 ] Batch(113/123) done. Loss: 0.1164 lr:0.001000 +[ Tue Sep 13 12:46:30 2022 ] Eval epoch: 83 +[ Tue Sep 13 12:47:21 2022 ] Mean test loss of 258 batches: 2.000906467437744. +[ Tue Sep 13 12:47:21 2022 ] Top1: 65.42% +[ Tue Sep 13 12:47:21 2022 ] Top5: 91.39% +[ Tue Sep 13 12:47:21 2022 ] Training epoch: 84 +[ Tue Sep 13 12:48:12 2022 ] Batch(90/123) done. Loss: 0.0565 lr:0.001000 +[ Tue Sep 13 12:48:29 2022 ] Eval epoch: 84 +[ Tue Sep 13 12:49:19 2022 ] Mean test loss of 258 batches: 2.0127618312835693. +[ Tue Sep 13 12:49:19 2022 ] Top1: 65.14% +[ Tue Sep 13 12:49:19 2022 ] Top5: 91.21% +[ Tue Sep 13 12:49:20 2022 ] Training epoch: 85 +[ Tue Sep 13 12:49:59 2022 ] Batch(67/123) done. Loss: 0.0713 lr:0.001000 +[ Tue Sep 13 12:50:28 2022 ] Eval epoch: 85 +[ Tue Sep 13 12:51:18 2022 ] Mean test loss of 258 batches: 2.0610320568084717. +[ Tue Sep 13 12:51:18 2022 ] Top1: 64.55% +[ Tue Sep 13 12:51:18 2022 ] Top5: 91.02% +[ Tue Sep 13 12:51:19 2022 ] Training epoch: 86 +[ Tue Sep 13 12:51:45 2022 ] Batch(44/123) done. Loss: 0.0331 lr:0.001000 +[ Tue Sep 13 12:52:26 2022 ] Eval epoch: 86 +[ Tue Sep 13 12:53:17 2022 ] Mean test loss of 258 batches: 1.9696850776672363. +[ Tue Sep 13 12:53:17 2022 ] Top1: 65.51% +[ Tue Sep 13 12:53:17 2022 ] Top5: 91.43% +[ Tue Sep 13 12:53:17 2022 ] Training epoch: 87 +[ Tue Sep 13 12:53:32 2022 ] Batch(21/123) done. Loss: 0.1194 lr:0.001000 +[ Tue Sep 13 12:54:24 2022 ] Batch(121/123) done. Loss: 0.0801 lr:0.001000 +[ Tue Sep 13 12:54:25 2022 ] Eval epoch: 87 +[ Tue Sep 13 12:55:15 2022 ] Mean test loss of 258 batches: 2.010471820831299. +[ Tue Sep 13 12:55:15 2022 ] Top1: 65.19% +[ Tue Sep 13 12:55:15 2022 ] Top5: 91.33% +[ Tue Sep 13 12:55:16 2022 ] Training epoch: 88 +[ Tue Sep 13 12:56:11 2022 ] Batch(98/123) done. Loss: 0.0768 lr:0.001000 +[ Tue Sep 13 12:56:24 2022 ] Eval epoch: 88 +[ Tue Sep 13 12:57:15 2022 ] Mean test loss of 258 batches: 2.0580830574035645. +[ Tue Sep 13 12:57:15 2022 ] Top1: 64.70% +[ Tue Sep 13 12:57:15 2022 ] Top5: 91.13% +[ Tue Sep 13 12:57:15 2022 ] Training epoch: 89 +[ Tue Sep 13 12:57:58 2022 ] Batch(75/123) done. Loss: 0.0448 lr:0.001000 +[ Tue Sep 13 12:58:23 2022 ] Eval epoch: 89 +[ Tue Sep 13 12:59:13 2022 ] Mean test loss of 258 batches: 2.0660269260406494. +[ Tue Sep 13 12:59:13 2022 ] Top1: 64.53% +[ Tue Sep 13 12:59:13 2022 ] Top5: 91.05% +[ Tue Sep 13 12:59:13 2022 ] Training epoch: 90 +[ Tue Sep 13 12:59:44 2022 ] Batch(52/123) done. Loss: 0.0275 lr:0.001000 +[ Tue Sep 13 13:00:21 2022 ] Eval epoch: 90 +[ Tue Sep 13 13:01:11 2022 ] Mean test loss of 258 batches: 1.99518883228302. +[ Tue Sep 13 13:01:11 2022 ] Top1: 65.26% +[ Tue Sep 13 13:01:11 2022 ] Top5: 91.37% +[ Tue Sep 13 13:01:12 2022 ] Training epoch: 91 +[ Tue Sep 13 13:01:30 2022 ] Batch(29/123) done. Loss: 0.0468 lr:0.001000 +[ Tue Sep 13 13:02:20 2022 ] Eval epoch: 91 +[ Tue Sep 13 13:03:10 2022 ] Mean test loss of 258 batches: 2.032771587371826. +[ Tue Sep 13 13:03:10 2022 ] Top1: 64.77% +[ Tue Sep 13 13:03:10 2022 ] Top5: 91.21% +[ Tue Sep 13 13:03:10 2022 ] Training epoch: 92 +[ Tue Sep 13 13:03:17 2022 ] Batch(6/123) done. Loss: 0.0512 lr:0.001000 +[ Tue Sep 13 13:04:10 2022 ] Batch(106/123) done. Loss: 0.0190 lr:0.001000 +[ Tue Sep 13 13:04:18 2022 ] Eval epoch: 92 +[ Tue Sep 13 13:05:08 2022 ] Mean test loss of 258 batches: 1.9861313104629517. +[ Tue Sep 13 13:05:08 2022 ] Top1: 65.41% +[ Tue Sep 13 13:05:09 2022 ] Top5: 91.44% +[ Tue Sep 13 13:05:09 2022 ] Training epoch: 93 +[ Tue Sep 13 13:05:56 2022 ] Batch(83/123) done. Loss: 0.0261 lr:0.001000 +[ Tue Sep 13 13:06:17 2022 ] Eval epoch: 93 +[ Tue Sep 13 13:07:07 2022 ] Mean test loss of 258 batches: 2.015868663787842. +[ Tue Sep 13 13:07:07 2022 ] Top1: 65.24% +[ Tue Sep 13 13:07:07 2022 ] Top5: 91.29% +[ Tue Sep 13 13:07:07 2022 ] Training epoch: 94 +[ Tue Sep 13 13:07:43 2022 ] Batch(60/123) done. Loss: 0.0185 lr:0.001000 +[ Tue Sep 13 13:08:15 2022 ] Eval epoch: 94 +[ Tue Sep 13 13:09:05 2022 ] Mean test loss of 258 batches: 2.033536195755005. +[ Tue Sep 13 13:09:05 2022 ] Top1: 65.03% +[ Tue Sep 13 13:09:05 2022 ] Top5: 91.31% +[ Tue Sep 13 13:09:06 2022 ] Training epoch: 95 +[ Tue Sep 13 13:09:29 2022 ] Batch(37/123) done. Loss: 0.0353 lr:0.001000 +[ Tue Sep 13 13:10:14 2022 ] Eval epoch: 95 +[ Tue Sep 13 13:11:04 2022 ] Mean test loss of 258 batches: 2.0294594764709473. +[ Tue Sep 13 13:11:04 2022 ] Top1: 65.16% +[ Tue Sep 13 13:11:04 2022 ] Top5: 91.18% +[ Tue Sep 13 13:11:04 2022 ] Training epoch: 96 +[ Tue Sep 13 13:11:15 2022 ] Batch(14/123) done. Loss: 0.0521 lr:0.001000 +[ Tue Sep 13 13:12:07 2022 ] Batch(114/123) done. Loss: 0.0375 lr:0.001000 +[ Tue Sep 13 13:12:12 2022 ] Eval epoch: 96 +[ Tue Sep 13 13:13:02 2022 ] Mean test loss of 258 batches: 2.0271966457366943. +[ Tue Sep 13 13:13:02 2022 ] Top1: 65.34% +[ Tue Sep 13 13:13:02 2022 ] Top5: 91.39% +[ Tue Sep 13 13:13:02 2022 ] Training epoch: 97 +[ Tue Sep 13 13:13:53 2022 ] Batch(91/123) done. Loss: 0.0273 lr:0.001000 +[ Tue Sep 13 13:14:10 2022 ] Eval epoch: 97 +[ Tue Sep 13 13:15:00 2022 ] Mean test loss of 258 batches: 2.0177931785583496. +[ Tue Sep 13 13:15:00 2022 ] Top1: 65.08% +[ Tue Sep 13 13:15:00 2022 ] Top5: 91.16% +[ Tue Sep 13 13:15:00 2022 ] Training epoch: 98 +[ Tue Sep 13 13:15:39 2022 ] Batch(68/123) done. Loss: 0.0194 lr:0.001000 +[ Tue Sep 13 13:16:08 2022 ] Eval epoch: 98 +[ Tue Sep 13 13:16:58 2022 ] Mean test loss of 258 batches: 2.0503251552581787. +[ Tue Sep 13 13:16:58 2022 ] Top1: 64.79% +[ Tue Sep 13 13:16:58 2022 ] Top5: 91.08% +[ Tue Sep 13 13:16:58 2022 ] Training epoch: 99 +[ Tue Sep 13 13:17:25 2022 ] Batch(45/123) done. Loss: 0.0339 lr:0.001000 +[ Tue Sep 13 13:18:06 2022 ] Eval epoch: 99 +[ Tue Sep 13 13:18:56 2022 ] Mean test loss of 258 batches: 2.0308101177215576. +[ Tue Sep 13 13:18:57 2022 ] Top1: 65.18% +[ Tue Sep 13 13:18:57 2022 ] Top5: 91.13% +[ Tue Sep 13 13:18:57 2022 ] Training epoch: 100 +[ Tue Sep 13 13:19:12 2022 ] Batch(22/123) done. Loss: 0.0428 lr:0.001000 +[ Tue Sep 13 13:20:05 2022 ] Batch(122/123) done. Loss: 0.0780 lr:0.001000 +[ Tue Sep 13 13:20:05 2022 ] Eval epoch: 100 +[ Tue Sep 13 13:20:55 2022 ] Mean test loss of 258 batches: 2.0031473636627197. +[ Tue Sep 13 13:20:55 2022 ] Top1: 65.13% +[ Tue Sep 13 13:20:55 2022 ] Top5: 91.29% diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_motion_xsub/config.yaml b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_motion_xsub/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9143d1417d0bf9d0f0e68547d6e9674330d32ede --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_motion_xsub/config.yaml @@ -0,0 +1,59 @@ +Experiment_name: ntu_joint_motion_xsub +base_lr: 0.1 +batch_size: 64 +config: ./config/nturgbd-cross-subject/train_joint_motion.yaml +device: +- 4 +- 5 +eval_interval: 5 +feeder: feeders.feeder.Feeder +groups: 8 +ignore_weights: [] +keep_rate: 0.9 +log_interval: 100 +model: model.decouple_gcn.Model +model_args: + block_size: 41 + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + groups: 16 + num_class: 60 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu_joint_motion_xsub +nesterov: true +num_epoch: 100 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_data_joint_motion.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_data_joint_motion.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu_joint_motion_xsub diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_motion_xsub/decouple_gcn.py b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_motion_xsub/decouple_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..6dcce4552ced280fe5b2060df92daebd2452cf7c --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_motion_xsub/decouple_gcn.py @@ -0,0 +1,235 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math +from model.dropSke import DropBlock_Ske +from model.dropT import DropBlockT_1d + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + + +def conv_branch_init(conv): + weight = conv.weight + n = weight.size(0) + k1 = weight.size(1) + k2 = weight.size(2) + nn.init.normal(weight, 0, math.sqrt(2. / (n * k1 * k2))) + nn.init.constant(conv.bias, 0) + + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class unit_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1, num_point=25, block_size=41): + super(unit_tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + self.dropS = DropBlock_Ske(num_point=num_point) + self.dropT = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob, A): + x = self.bn(self.conv(x)) + x = self.dropT(self.dropS(x, keep_prob, A), keep_prob) + return x + + +class unit_tcn_skip(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(unit_tcn_skip, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class unit_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, coff_embedding=4, num_subset=3): + super(unit_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.num_point = num_point + self.groups = groups + self.num_subset = num_subset + self.DecoupleA = nn.Parameter(torch.tensor(np.reshape(A.astype(np.float32), [ + 3, 1, num_point, num_point]), dtype=torch.float32, requires_grad=True).repeat(1, groups, 1, 1), requires_grad=True) + + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.bn0 = nn.BatchNorm2d(out_channels * num_subset) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + bn_init(self.bn, 1e-6) + + self.Linear_weight = nn.Parameter(torch.zeros( + in_channels, out_channels * num_subset, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0, math.sqrt( + 0.5 / (out_channels * num_subset))) + + self.Linear_bias = nn.Parameter(torch.zeros( + 1, out_channels * num_subset, 1, 1, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.constant(self.Linear_bias, 1e-6) + + eye_array = [] + for i in range(out_channels): + eye_array.append(torch.eye(num_point)) + self.eyes = nn.Parameter(torch.tensor(torch.stack( + eye_array), requires_grad=False, device='cuda'), requires_grad=False) # [c,25,25] + + def norm(self, A): + b, c, h, w = A.size() + A = A.view(c, self.num_point, self.num_point) + D_list = torch.sum(A, 1).view(c, 1, self.num_point) + D_list_12 = (D_list + 0.001)**(-1) + D_12 = self.eyes * D_list_12 + A = torch.bmm(A, D_12).view(b, c, h, w) + return A + + def forward(self, x0): + learn_A = self.DecoupleA.repeat( + 1, self.out_channels // self.groups, 1, 1) + norm_learn_A = torch.cat([self.norm(learn_A[0:1, ...]), self.norm( + learn_A[1:2, ...]), self.norm(learn_A[2:3, ...])], 0) + + x = torch.einsum( + 'nctw,cd->ndtw', (x0, self.Linear_weight)).contiguous() + x = x + self.Linear_bias + x = self.bn0(x) + + n, kc, t, v = x.size() + x = x.view(n, self.num_subset, kc // self.num_subset, t, v) + x = torch.einsum('nkctv,kcvw->nctw', (x, norm_learn_A)) + + x = self.bn(x) + x += self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, block_size, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = unit_gcn(in_channels, out_channels, A, groups, num_point) + self.tcn1 = unit_tcn(out_channels, out_channels, + stride=stride, num_point=num_point) + self.relu = nn.ReLU() + + self.A = nn.Parameter(torch.tensor(np.sum(np.reshape(A.astype(np.float32), [ + 3, num_point, num_point]), axis=0), dtype=torch.float32, requires_grad=False, device='cuda'), requires_grad=False) + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + + else: + self.residual = unit_tcn_skip( + in_channels, out_channels, kernel_size=1, stride=stride) + self.dropSke = DropBlock_Ske(num_point=num_point) + self.dropT_skip = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob): + x = self.tcn1(self.gcn1(x), keep_prob, self.A) + self.dropT_skip( + self.dropSke(self.residual(x), keep_prob, self.A), keep_prob) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, groups, num_point, + block_size, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l5 = TCN_GCN_unit( + 64, 128, A, groups, num_point, block_size, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l8 = TCN_GCN_unit(128, 256, A, groups, + num_point, block_size, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x, keep_prob=0.9): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute( + 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x, 1.0) + x = self.l2(x, 1.0) + x = self.l3(x, 1.0) + x = self.l4(x, 1.0) + x = self.l5(x, 1.0) + x = self.l6(x, 1.0) + x = self.l7(x, keep_prob) + x = self.l8(x, keep_prob) + x = self.l9(x, keep_prob) + x = self.l10(x, keep_prob) + + # N*M,C,T,V + c_new = x.size(1) + x = x.reshape(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_motion_xsub/eval_results/best_acc.pkl b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_motion_xsub/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..7f254322580315bf3638de472a81397ad2ce2e93 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_motion_xsub/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e80ca63a7e7eaab88a1f1a22672471a1ed95998eb32386b95211268824ea1ba +size 4979902 diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_motion_xsub/log.txt b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_motion_xsub/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..dfc2462505d569de278a854377207c96e14ba26d --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_motion_xsub/log.txt @@ -0,0 +1,626 @@ +[ Wed Sep 14 08:59:06 2022 ] Parameters: +{'work_dir': './work_dir/ntu_joint_motion_xsub', 'model_saved_name': './save_models/ntu_joint_motion_xsub', 'Experiment_name': 'ntu_joint_motion_xsub', 'config': './config/nturgbd-cross-subject/train_joint_motion.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_data_joint_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_data_joint_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_label.pkl'}, 'model': 'model.decouple_gcn.Model', 'model_args': {'num_class': 60, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'groups': 16, 'block_size': 41, 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80], 'device': [4, 5], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 100, 'weight_decay': 0.0001, 'keep_rate': 0.9, 'groups': 8, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Wed Sep 14 08:59:06 2022 ] Training epoch: 1 +[ Wed Sep 14 08:59:56 2022 ] Batch(99/123) done. Loss: 2.9676 lr:0.100000 +[ Wed Sep 14 09:00:06 2022 ] Eval epoch: 1 +[ Wed Sep 14 09:00:56 2022 ] Mean test loss of 258 batches: 7.117305278778076. +[ Wed Sep 14 09:00:56 2022 ] Top1: 4.80% +[ Wed Sep 14 09:00:56 2022 ] Top5: 19.06% +[ Wed Sep 14 09:00:56 2022 ] Training epoch: 2 +[ Wed Sep 14 09:01:40 2022 ] Batch(76/123) done. Loss: 2.6146 lr:0.100000 +[ Wed Sep 14 09:02:04 2022 ] Eval epoch: 2 +[ Wed Sep 14 09:02:55 2022 ] Mean test loss of 258 batches: 5.275425434112549. +[ Wed Sep 14 09:02:55 2022 ] Top1: 10.10% +[ Wed Sep 14 09:02:55 2022 ] Top5: 28.08% +[ Wed Sep 14 09:02:55 2022 ] Training epoch: 3 +[ Wed Sep 14 09:03:26 2022 ] Batch(53/123) done. Loss: 2.8425 lr:0.100000 +[ Wed Sep 14 09:04:03 2022 ] Eval epoch: 3 +[ Wed Sep 14 09:04:53 2022 ] Mean test loss of 258 batches: 4.473618507385254. +[ Wed Sep 14 09:04:53 2022 ] Top1: 10.51% +[ Wed Sep 14 09:04:53 2022 ] Top5: 34.15% +[ Wed Sep 14 09:04:54 2022 ] Training epoch: 4 +[ Wed Sep 14 09:05:13 2022 ] Batch(30/123) done. Loss: 2.3812 lr:0.100000 +[ Wed Sep 14 09:06:01 2022 ] Eval epoch: 4 +[ Wed Sep 14 09:06:51 2022 ] Mean test loss of 258 batches: 4.136472702026367. +[ Wed Sep 14 09:06:52 2022 ] Top1: 15.39% +[ Wed Sep 14 09:06:52 2022 ] Top5: 41.83% +[ Wed Sep 14 09:06:52 2022 ] Training epoch: 5 +[ Wed Sep 14 09:06:59 2022 ] Batch(7/123) done. Loss: 2.0284 lr:0.100000 +[ Wed Sep 14 09:07:51 2022 ] Batch(107/123) done. Loss: 1.6414 lr:0.100000 +[ Wed Sep 14 09:08:00 2022 ] Eval epoch: 5 +[ Wed Sep 14 09:08:50 2022 ] Mean test loss of 258 batches: 3.7556955814361572. +[ Wed Sep 14 09:08:50 2022 ] Top1: 20.32% +[ Wed Sep 14 09:08:50 2022 ] Top5: 48.43% +[ Wed Sep 14 09:08:50 2022 ] Training epoch: 6 +[ Wed Sep 14 09:09:38 2022 ] Batch(84/123) done. Loss: 1.3047 lr:0.100000 +[ Wed Sep 14 09:09:58 2022 ] Eval epoch: 6 +[ Wed Sep 14 09:10:48 2022 ] Mean test loss of 258 batches: 3.519871473312378. +[ Wed Sep 14 09:10:48 2022 ] Top1: 21.75% +[ Wed Sep 14 09:10:49 2022 ] Top5: 53.95% +[ Wed Sep 14 09:10:49 2022 ] Training epoch: 7 +[ Wed Sep 14 09:11:24 2022 ] Batch(61/123) done. Loss: 1.5018 lr:0.100000 +[ Wed Sep 14 09:11:57 2022 ] Eval epoch: 7 +[ Wed Sep 14 09:12:47 2022 ] Mean test loss of 258 batches: 3.1029582023620605. +[ Wed Sep 14 09:12:47 2022 ] Top1: 28.23% +[ Wed Sep 14 09:12:47 2022 ] Top5: 59.74% +[ Wed Sep 14 09:12:47 2022 ] Training epoch: 8 +[ Wed Sep 14 09:13:11 2022 ] Batch(38/123) done. Loss: 1.2094 lr:0.100000 +[ Wed Sep 14 09:13:55 2022 ] Eval epoch: 8 +[ Wed Sep 14 09:14:46 2022 ] Mean test loss of 258 batches: 3.524228572845459. +[ Wed Sep 14 09:14:46 2022 ] Top1: 26.87% +[ Wed Sep 14 09:14:46 2022 ] Top5: 59.79% +[ Wed Sep 14 09:14:46 2022 ] Training epoch: 9 +[ Wed Sep 14 09:14:58 2022 ] Batch(15/123) done. Loss: 1.1210 lr:0.100000 +[ Wed Sep 14 09:15:50 2022 ] Batch(115/123) done. Loss: 1.0431 lr:0.100000 +[ Wed Sep 14 09:15:54 2022 ] Eval epoch: 9 +[ Wed Sep 14 09:16:44 2022 ] Mean test loss of 258 batches: 3.543813705444336. +[ Wed Sep 14 09:16:44 2022 ] Top1: 31.44% +[ Wed Sep 14 09:16:44 2022 ] Top5: 68.33% +[ Wed Sep 14 09:16:45 2022 ] Training epoch: 10 +[ Wed Sep 14 09:17:37 2022 ] Batch(92/123) done. Loss: 0.9419 lr:0.100000 +[ Wed Sep 14 09:17:53 2022 ] Eval epoch: 10 +[ Wed Sep 14 09:18:43 2022 ] Mean test loss of 258 batches: 2.7530345916748047. +[ Wed Sep 14 09:18:43 2022 ] Top1: 36.08% +[ Wed Sep 14 09:18:44 2022 ] Top5: 70.69% +[ Wed Sep 14 09:18:44 2022 ] Training epoch: 11 +[ Wed Sep 14 09:19:23 2022 ] Batch(69/123) done. Loss: 1.0906 lr:0.100000 +[ Wed Sep 14 09:19:52 2022 ] Eval epoch: 11 +[ Wed Sep 14 09:20:42 2022 ] Mean test loss of 258 batches: 3.238889217376709. +[ Wed Sep 14 09:20:42 2022 ] Top1: 33.14% +[ Wed Sep 14 09:20:42 2022 ] Top5: 67.65% +[ Wed Sep 14 09:20:42 2022 ] Training epoch: 12 +[ Wed Sep 14 09:21:10 2022 ] Batch(46/123) done. Loss: 0.8957 lr:0.100000 +[ Wed Sep 14 09:21:50 2022 ] Eval epoch: 12 +[ Wed Sep 14 09:22:41 2022 ] Mean test loss of 258 batches: 2.5152828693389893. +[ Wed Sep 14 09:22:41 2022 ] Top1: 42.08% +[ Wed Sep 14 09:22:41 2022 ] Top5: 78.97% +[ Wed Sep 14 09:22:41 2022 ] Training epoch: 13 +[ Wed Sep 14 09:22:57 2022 ] Batch(23/123) done. Loss: 1.0458 lr:0.100000 +[ Wed Sep 14 09:23:49 2022 ] Eval epoch: 13 +[ Wed Sep 14 09:24:39 2022 ] Mean test loss of 258 batches: 2.6714367866516113. +[ Wed Sep 14 09:24:39 2022 ] Top1: 37.28% +[ Wed Sep 14 09:24:39 2022 ] Top5: 71.97% +[ Wed Sep 14 09:24:39 2022 ] Training epoch: 14 +[ Wed Sep 14 09:24:43 2022 ] Batch(0/123) done. Loss: 0.9178 lr:0.100000 +[ Wed Sep 14 09:25:35 2022 ] Batch(100/123) done. Loss: 1.0160 lr:0.100000 +[ Wed Sep 14 09:25:47 2022 ] Eval epoch: 14 +[ Wed Sep 14 09:26:38 2022 ] Mean test loss of 258 batches: 2.4809772968292236. +[ Wed Sep 14 09:26:38 2022 ] Top1: 39.05% +[ Wed Sep 14 09:26:38 2022 ] Top5: 77.75% +[ Wed Sep 14 09:26:38 2022 ] Training epoch: 15 +[ Wed Sep 14 09:27:22 2022 ] Batch(77/123) done. Loss: 0.9060 lr:0.100000 +[ Wed Sep 14 09:27:46 2022 ] Eval epoch: 15 +[ Wed Sep 14 09:28:36 2022 ] Mean test loss of 258 batches: 2.2054946422576904. +[ Wed Sep 14 09:28:36 2022 ] Top1: 43.96% +[ Wed Sep 14 09:28:37 2022 ] Top5: 81.79% +[ Wed Sep 14 09:28:37 2022 ] Training epoch: 16 +[ Wed Sep 14 09:29:08 2022 ] Batch(54/123) done. Loss: 0.6082 lr:0.100000 +[ Wed Sep 14 09:29:44 2022 ] Eval epoch: 16 +[ Wed Sep 14 09:30:35 2022 ] Mean test loss of 258 batches: 2.413510322570801. +[ Wed Sep 14 09:30:35 2022 ] Top1: 41.93% +[ Wed Sep 14 09:30:35 2022 ] Top5: 79.76% +[ Wed Sep 14 09:30:35 2022 ] Training epoch: 17 +[ Wed Sep 14 09:30:55 2022 ] Batch(31/123) done. Loss: 0.8388 lr:0.100000 +[ Wed Sep 14 09:31:43 2022 ] Eval epoch: 17 +[ Wed Sep 14 09:32:34 2022 ] Mean test loss of 258 batches: 2.462135076522827. +[ Wed Sep 14 09:32:34 2022 ] Top1: 44.52% +[ Wed Sep 14 09:32:34 2022 ] Top5: 80.72% +[ Wed Sep 14 09:32:34 2022 ] Training epoch: 18 +[ Wed Sep 14 09:32:42 2022 ] Batch(8/123) done. Loss: 0.9092 lr:0.100000 +[ Wed Sep 14 09:33:34 2022 ] Batch(108/123) done. Loss: 0.7070 lr:0.100000 +[ Wed Sep 14 09:33:42 2022 ] Eval epoch: 18 +[ Wed Sep 14 09:34:32 2022 ] Mean test loss of 258 batches: 2.302579164505005. +[ Wed Sep 14 09:34:32 2022 ] Top1: 48.50% +[ Wed Sep 14 09:34:32 2022 ] Top5: 84.71% +[ Wed Sep 14 09:34:32 2022 ] Training epoch: 19 +[ Wed Sep 14 09:35:21 2022 ] Batch(85/123) done. Loss: 0.7225 lr:0.100000 +[ Wed Sep 14 09:35:40 2022 ] Eval epoch: 19 +[ Wed Sep 14 09:36:31 2022 ] Mean test loss of 258 batches: 2.287689208984375. +[ Wed Sep 14 09:36:31 2022 ] Top1: 45.81% +[ Wed Sep 14 09:36:31 2022 ] Top5: 83.19% +[ Wed Sep 14 09:36:31 2022 ] Training epoch: 20 +[ Wed Sep 14 09:37:07 2022 ] Batch(62/123) done. Loss: 0.8596 lr:0.100000 +[ Wed Sep 14 09:37:39 2022 ] Eval epoch: 20 +[ Wed Sep 14 09:38:30 2022 ] Mean test loss of 258 batches: 6.094878196716309. +[ Wed Sep 14 09:38:30 2022 ] Top1: 24.93% +[ Wed Sep 14 09:38:30 2022 ] Top5: 63.79% +[ Wed Sep 14 09:38:30 2022 ] Training epoch: 21 +[ Wed Sep 14 09:38:54 2022 ] Batch(39/123) done. Loss: 0.5370 lr:0.100000 +[ Wed Sep 14 09:39:38 2022 ] Eval epoch: 21 +[ Wed Sep 14 09:40:28 2022 ] Mean test loss of 258 batches: 2.607907772064209. +[ Wed Sep 14 09:40:28 2022 ] Top1: 46.86% +[ Wed Sep 14 09:40:28 2022 ] Top5: 83.54% +[ Wed Sep 14 09:40:29 2022 ] Training epoch: 22 +[ Wed Sep 14 09:40:41 2022 ] Batch(16/123) done. Loss: 0.5816 lr:0.100000 +[ Wed Sep 14 09:41:33 2022 ] Batch(116/123) done. Loss: 0.5359 lr:0.100000 +[ Wed Sep 14 09:41:37 2022 ] Eval epoch: 22 +[ Wed Sep 14 09:42:27 2022 ] Mean test loss of 258 batches: 2.0424587726593018. +[ Wed Sep 14 09:42:27 2022 ] Top1: 52.03% +[ Wed Sep 14 09:42:27 2022 ] Top5: 84.37% +[ Wed Sep 14 09:42:27 2022 ] Training epoch: 23 +[ Wed Sep 14 09:43:20 2022 ] Batch(93/123) done. Loss: 0.5305 lr:0.100000 +[ Wed Sep 14 09:43:35 2022 ] Eval epoch: 23 +[ Wed Sep 14 09:44:25 2022 ] Mean test loss of 258 batches: 2.3085105419158936. +[ Wed Sep 14 09:44:25 2022 ] Top1: 47.98% +[ Wed Sep 14 09:44:25 2022 ] Top5: 82.33% +[ Wed Sep 14 09:44:26 2022 ] Training epoch: 24 +[ Wed Sep 14 09:45:06 2022 ] Batch(70/123) done. Loss: 0.6515 lr:0.100000 +[ Wed Sep 14 09:45:33 2022 ] Eval epoch: 24 +[ Wed Sep 14 09:46:24 2022 ] Mean test loss of 258 batches: 3.1666781902313232. +[ Wed Sep 14 09:46:24 2022 ] Top1: 38.96% +[ Wed Sep 14 09:46:24 2022 ] Top5: 73.57% +[ Wed Sep 14 09:46:24 2022 ] Training epoch: 25 +[ Wed Sep 14 09:46:52 2022 ] Batch(47/123) done. Loss: 0.5286 lr:0.100000 +[ Wed Sep 14 09:47:32 2022 ] Eval epoch: 25 +[ Wed Sep 14 09:48:22 2022 ] Mean test loss of 258 batches: 2.881316900253296. +[ Wed Sep 14 09:48:22 2022 ] Top1: 41.57% +[ Wed Sep 14 09:48:22 2022 ] Top5: 77.86% +[ Wed Sep 14 09:48:22 2022 ] Training epoch: 26 +[ Wed Sep 14 09:48:39 2022 ] Batch(24/123) done. Loss: 0.3656 lr:0.100000 +[ Wed Sep 14 09:49:30 2022 ] Eval epoch: 26 +[ Wed Sep 14 09:50:21 2022 ] Mean test loss of 258 batches: 2.0794057846069336. +[ Wed Sep 14 09:50:21 2022 ] Top1: 53.21% +[ Wed Sep 14 09:50:21 2022 ] Top5: 86.81% +[ Wed Sep 14 09:50:21 2022 ] Training epoch: 27 +[ Wed Sep 14 09:50:25 2022 ] Batch(1/123) done. Loss: 0.5163 lr:0.100000 +[ Wed Sep 14 09:51:18 2022 ] Batch(101/123) done. Loss: 0.3626 lr:0.100000 +[ Wed Sep 14 09:51:29 2022 ] Eval epoch: 27 +[ Wed Sep 14 09:52:20 2022 ] Mean test loss of 258 batches: 8.668159484863281. +[ Wed Sep 14 09:52:20 2022 ] Top1: 25.24% +[ Wed Sep 14 09:52:20 2022 ] Top5: 53.81% +[ Wed Sep 14 09:52:20 2022 ] Training epoch: 28 +[ Wed Sep 14 09:53:05 2022 ] Batch(78/123) done. Loss: 0.5547 lr:0.100000 +[ Wed Sep 14 09:53:28 2022 ] Eval epoch: 28 +[ Wed Sep 14 09:54:19 2022 ] Mean test loss of 258 batches: 2.150930643081665. +[ Wed Sep 14 09:54:19 2022 ] Top1: 49.72% +[ Wed Sep 14 09:54:19 2022 ] Top5: 81.52% +[ Wed Sep 14 09:54:19 2022 ] Training epoch: 29 +[ Wed Sep 14 09:54:52 2022 ] Batch(55/123) done. Loss: 0.4950 lr:0.100000 +[ Wed Sep 14 09:55:27 2022 ] Eval epoch: 29 +[ Wed Sep 14 09:56:18 2022 ] Mean test loss of 258 batches: 2.4052159786224365. +[ Wed Sep 14 09:56:18 2022 ] Top1: 50.33% +[ Wed Sep 14 09:56:18 2022 ] Top5: 83.42% +[ Wed Sep 14 09:56:18 2022 ] Training epoch: 30 +[ Wed Sep 14 09:56:38 2022 ] Batch(32/123) done. Loss: 0.3554 lr:0.100000 +[ Wed Sep 14 09:57:26 2022 ] Eval epoch: 30 +[ Wed Sep 14 09:58:17 2022 ] Mean test loss of 258 batches: 1.9445937871932983. +[ Wed Sep 14 09:58:17 2022 ] Top1: 55.87% +[ Wed Sep 14 09:58:17 2022 ] Top5: 88.66% +[ Wed Sep 14 09:58:17 2022 ] Training epoch: 31 +[ Wed Sep 14 09:58:25 2022 ] Batch(9/123) done. Loss: 0.2407 lr:0.100000 +[ Wed Sep 14 09:59:18 2022 ] Batch(109/123) done. Loss: 0.6598 lr:0.100000 +[ Wed Sep 14 09:59:25 2022 ] Eval epoch: 31 +[ Wed Sep 14 10:00:15 2022 ] Mean test loss of 258 batches: 4.852235317230225. +[ Wed Sep 14 10:00:16 2022 ] Top1: 29.85% +[ Wed Sep 14 10:00:16 2022 ] Top5: 64.09% +[ Wed Sep 14 10:00:16 2022 ] Training epoch: 32 +[ Wed Sep 14 10:01:05 2022 ] Batch(86/123) done. Loss: 0.3530 lr:0.100000 +[ Wed Sep 14 10:01:24 2022 ] Eval epoch: 32 +[ Wed Sep 14 10:02:14 2022 ] Mean test loss of 258 batches: 2.033247470855713. +[ Wed Sep 14 10:02:14 2022 ] Top1: 53.42% +[ Wed Sep 14 10:02:14 2022 ] Top5: 87.76% +[ Wed Sep 14 10:02:15 2022 ] Training epoch: 33 +[ Wed Sep 14 10:02:51 2022 ] Batch(63/123) done. Loss: 0.3615 lr:0.100000 +[ Wed Sep 14 10:03:23 2022 ] Eval epoch: 33 +[ Wed Sep 14 10:04:13 2022 ] Mean test loss of 258 batches: 1.970260500907898. +[ Wed Sep 14 10:04:13 2022 ] Top1: 50.72% +[ Wed Sep 14 10:04:13 2022 ] Top5: 82.19% +[ Wed Sep 14 10:04:13 2022 ] Training epoch: 34 +[ Wed Sep 14 10:04:38 2022 ] Batch(40/123) done. Loss: 0.3227 lr:0.100000 +[ Wed Sep 14 10:05:21 2022 ] Eval epoch: 34 +[ Wed Sep 14 10:06:11 2022 ] Mean test loss of 258 batches: 3.3851633071899414. +[ Wed Sep 14 10:06:11 2022 ] Top1: 42.80% +[ Wed Sep 14 10:06:11 2022 ] Top5: 73.22% +[ Wed Sep 14 10:06:12 2022 ] Training epoch: 35 +[ Wed Sep 14 10:06:25 2022 ] Batch(17/123) done. Loss: 1.0034 lr:0.100000 +[ Wed Sep 14 10:07:17 2022 ] Batch(117/123) done. Loss: 0.3550 lr:0.100000 +[ Wed Sep 14 10:07:20 2022 ] Eval epoch: 35 +[ Wed Sep 14 10:08:11 2022 ] Mean test loss of 258 batches: 2.9214372634887695. +[ Wed Sep 14 10:08:11 2022 ] Top1: 44.47% +[ Wed Sep 14 10:08:11 2022 ] Top5: 80.00% +[ Wed Sep 14 10:08:11 2022 ] Training epoch: 36 +[ Wed Sep 14 10:09:04 2022 ] Batch(94/123) done. Loss: 0.5148 lr:0.100000 +[ Wed Sep 14 10:09:19 2022 ] Eval epoch: 36 +[ Wed Sep 14 10:10:10 2022 ] Mean test loss of 258 batches: 1.9721895456314087. +[ Wed Sep 14 10:10:10 2022 ] Top1: 55.58% +[ Wed Sep 14 10:10:10 2022 ] Top5: 87.91% +[ Wed Sep 14 10:10:10 2022 ] Training epoch: 37 +[ Wed Sep 14 10:10:51 2022 ] Batch(71/123) done. Loss: 0.2195 lr:0.100000 +[ Wed Sep 14 10:11:19 2022 ] Eval epoch: 37 +[ Wed Sep 14 10:12:09 2022 ] Mean test loss of 258 batches: 2.8524513244628906. +[ Wed Sep 14 10:12:09 2022 ] Top1: 40.61% +[ Wed Sep 14 10:12:10 2022 ] Top5: 77.65% +[ Wed Sep 14 10:12:10 2022 ] Training epoch: 38 +[ Wed Sep 14 10:12:38 2022 ] Batch(48/123) done. Loss: 0.2998 lr:0.100000 +[ Wed Sep 14 10:13:18 2022 ] Eval epoch: 38 +[ Wed Sep 14 10:14:08 2022 ] Mean test loss of 258 batches: 2.7291102409362793. +[ Wed Sep 14 10:14:08 2022 ] Top1: 45.87% +[ Wed Sep 14 10:14:08 2022 ] Top5: 79.30% +[ Wed Sep 14 10:14:08 2022 ] Training epoch: 39 +[ Wed Sep 14 10:14:24 2022 ] Batch(25/123) done. Loss: 0.3353 lr:0.100000 +[ Wed Sep 14 10:15:16 2022 ] Eval epoch: 39 +[ Wed Sep 14 10:16:06 2022 ] Mean test loss of 258 batches: 2.7411115169525146. +[ Wed Sep 14 10:16:06 2022 ] Top1: 46.88% +[ Wed Sep 14 10:16:06 2022 ] Top5: 77.58% +[ Wed Sep 14 10:16:06 2022 ] Training epoch: 40 +[ Wed Sep 14 10:16:10 2022 ] Batch(2/123) done. Loss: 0.3090 lr:0.100000 +[ Wed Sep 14 10:17:03 2022 ] Batch(102/123) done. Loss: 0.4225 lr:0.100000 +[ Wed Sep 14 10:17:14 2022 ] Eval epoch: 40 +[ Wed Sep 14 10:18:04 2022 ] Mean test loss of 258 batches: 2.272958993911743. +[ Wed Sep 14 10:18:04 2022 ] Top1: 55.48% +[ Wed Sep 14 10:18:04 2022 ] Top5: 84.42% +[ Wed Sep 14 10:18:04 2022 ] Training epoch: 41 +[ Wed Sep 14 10:18:49 2022 ] Batch(79/123) done. Loss: 0.2367 lr:0.100000 +[ Wed Sep 14 10:19:12 2022 ] Eval epoch: 41 +[ Wed Sep 14 10:20:02 2022 ] Mean test loss of 258 batches: 2.334578275680542. +[ Wed Sep 14 10:20:02 2022 ] Top1: 50.41% +[ Wed Sep 14 10:20:02 2022 ] Top5: 85.12% +[ Wed Sep 14 10:20:02 2022 ] Training epoch: 42 +[ Wed Sep 14 10:20:36 2022 ] Batch(56/123) done. Loss: 0.2050 lr:0.100000 +[ Wed Sep 14 10:21:11 2022 ] Eval epoch: 42 +[ Wed Sep 14 10:22:01 2022 ] Mean test loss of 258 batches: 2.800611734390259. +[ Wed Sep 14 10:22:01 2022 ] Top1: 48.37% +[ Wed Sep 14 10:22:01 2022 ] Top5: 81.51% +[ Wed Sep 14 10:22:01 2022 ] Training epoch: 43 +[ Wed Sep 14 10:22:22 2022 ] Batch(33/123) done. Loss: 0.2342 lr:0.100000 +[ Wed Sep 14 10:23:09 2022 ] Eval epoch: 43 +[ Wed Sep 14 10:24:00 2022 ] Mean test loss of 258 batches: 2.851212739944458. +[ Wed Sep 14 10:24:00 2022 ] Top1: 48.91% +[ Wed Sep 14 10:24:00 2022 ] Top5: 82.86% +[ Wed Sep 14 10:24:00 2022 ] Training epoch: 44 +[ Wed Sep 14 10:24:09 2022 ] Batch(10/123) done. Loss: 0.1688 lr:0.100000 +[ Wed Sep 14 10:25:01 2022 ] Batch(110/123) done. Loss: 0.1909 lr:0.100000 +[ Wed Sep 14 10:25:08 2022 ] Eval epoch: 44 +[ Wed Sep 14 10:25:58 2022 ] Mean test loss of 258 batches: 2.710599184036255. +[ Wed Sep 14 10:25:59 2022 ] Top1: 49.56% +[ Wed Sep 14 10:25:59 2022 ] Top5: 80.95% +[ Wed Sep 14 10:25:59 2022 ] Training epoch: 45 +[ Wed Sep 14 10:26:48 2022 ] Batch(87/123) done. Loss: 0.2594 lr:0.100000 +[ Wed Sep 14 10:27:07 2022 ] Eval epoch: 45 +[ Wed Sep 14 10:27:57 2022 ] Mean test loss of 258 batches: 2.7569780349731445. +[ Wed Sep 14 10:27:57 2022 ] Top1: 51.54% +[ Wed Sep 14 10:27:57 2022 ] Top5: 84.37% +[ Wed Sep 14 10:27:57 2022 ] Training epoch: 46 +[ Wed Sep 14 10:28:34 2022 ] Batch(64/123) done. Loss: 0.1981 lr:0.100000 +[ Wed Sep 14 10:29:05 2022 ] Eval epoch: 46 +[ Wed Sep 14 10:29:55 2022 ] Mean test loss of 258 batches: 2.514928102493286. +[ Wed Sep 14 10:29:55 2022 ] Top1: 52.87% +[ Wed Sep 14 10:29:56 2022 ] Top5: 84.62% +[ Wed Sep 14 10:29:56 2022 ] Training epoch: 47 +[ Wed Sep 14 10:30:21 2022 ] Batch(41/123) done. Loss: 0.2048 lr:0.100000 +[ Wed Sep 14 10:31:04 2022 ] Eval epoch: 47 +[ Wed Sep 14 10:31:54 2022 ] Mean test loss of 258 batches: 2.7253973484039307. +[ Wed Sep 14 10:31:54 2022 ] Top1: 47.93% +[ Wed Sep 14 10:31:54 2022 ] Top5: 83.28% +[ Wed Sep 14 10:31:54 2022 ] Training epoch: 48 +[ Wed Sep 14 10:32:07 2022 ] Batch(18/123) done. Loss: 0.1297 lr:0.100000 +[ Wed Sep 14 10:33:00 2022 ] Batch(118/123) done. Loss: 0.2993 lr:0.100000 +[ Wed Sep 14 10:33:03 2022 ] Eval epoch: 48 +[ Wed Sep 14 10:33:53 2022 ] Mean test loss of 258 batches: 2.5282280445098877. +[ Wed Sep 14 10:33:53 2022 ] Top1: 53.72% +[ Wed Sep 14 10:33:53 2022 ] Top5: 85.70% +[ Wed Sep 14 10:33:53 2022 ] Training epoch: 49 +[ Wed Sep 14 10:34:46 2022 ] Batch(95/123) done. Loss: 0.2121 lr:0.100000 +[ Wed Sep 14 10:35:01 2022 ] Eval epoch: 49 +[ Wed Sep 14 10:35:51 2022 ] Mean test loss of 258 batches: 2.4360342025756836. +[ Wed Sep 14 10:35:51 2022 ] Top1: 56.73% +[ Wed Sep 14 10:35:52 2022 ] Top5: 85.95% +[ Wed Sep 14 10:35:52 2022 ] Training epoch: 50 +[ Wed Sep 14 10:36:33 2022 ] Batch(72/123) done. Loss: 0.1184 lr:0.100000 +[ Wed Sep 14 10:37:00 2022 ] Eval epoch: 50 +[ Wed Sep 14 10:37:50 2022 ] Mean test loss of 258 batches: 6.697039604187012. +[ Wed Sep 14 10:37:50 2022 ] Top1: 30.44% +[ Wed Sep 14 10:37:50 2022 ] Top5: 64.86% +[ Wed Sep 14 10:37:50 2022 ] Training epoch: 51 +[ Wed Sep 14 10:38:19 2022 ] Batch(49/123) done. Loss: 0.1056 lr:0.100000 +[ Wed Sep 14 10:38:58 2022 ] Eval epoch: 51 +[ Wed Sep 14 10:39:48 2022 ] Mean test loss of 258 batches: 2.337132453918457. +[ Wed Sep 14 10:39:48 2022 ] Top1: 54.66% +[ Wed Sep 14 10:39:49 2022 ] Top5: 85.56% +[ Wed Sep 14 10:39:49 2022 ] Training epoch: 52 +[ Wed Sep 14 10:40:06 2022 ] Batch(26/123) done. Loss: 0.1039 lr:0.100000 +[ Wed Sep 14 10:40:57 2022 ] Eval epoch: 52 +[ Wed Sep 14 10:41:47 2022 ] Mean test loss of 258 batches: 2.2631611824035645. +[ Wed Sep 14 10:41:47 2022 ] Top1: 57.53% +[ Wed Sep 14 10:41:47 2022 ] Top5: 87.88% +[ Wed Sep 14 10:41:47 2022 ] Training epoch: 53 +[ Wed Sep 14 10:41:52 2022 ] Batch(3/123) done. Loss: 0.1295 lr:0.100000 +[ Wed Sep 14 10:42:45 2022 ] Batch(103/123) done. Loss: 0.2321 lr:0.100000 +[ Wed Sep 14 10:42:55 2022 ] Eval epoch: 53 +[ Wed Sep 14 10:43:45 2022 ] Mean test loss of 258 batches: 3.587123155593872. +[ Wed Sep 14 10:43:45 2022 ] Top1: 48.50% +[ Wed Sep 14 10:43:45 2022 ] Top5: 80.49% +[ Wed Sep 14 10:43:45 2022 ] Training epoch: 54 +[ Wed Sep 14 10:44:31 2022 ] Batch(80/123) done. Loss: 0.3542 lr:0.100000 +[ Wed Sep 14 10:44:54 2022 ] Eval epoch: 54 +[ Wed Sep 14 10:45:44 2022 ] Mean test loss of 258 batches: 2.8351242542266846. +[ Wed Sep 14 10:45:44 2022 ] Top1: 51.29% +[ Wed Sep 14 10:45:44 2022 ] Top5: 84.38% +[ Wed Sep 14 10:45:44 2022 ] Training epoch: 55 +[ Wed Sep 14 10:46:18 2022 ] Batch(57/123) done. Loss: 0.3892 lr:0.100000 +[ Wed Sep 14 10:46:52 2022 ] Eval epoch: 55 +[ Wed Sep 14 10:47:43 2022 ] Mean test loss of 258 batches: 3.4230153560638428. +[ Wed Sep 14 10:47:43 2022 ] Top1: 46.41% +[ Wed Sep 14 10:47:43 2022 ] Top5: 74.94% +[ Wed Sep 14 10:47:43 2022 ] Training epoch: 56 +[ Wed Sep 14 10:48:05 2022 ] Batch(34/123) done. Loss: 0.2057 lr:0.100000 +[ Wed Sep 14 10:48:51 2022 ] Eval epoch: 56 +[ Wed Sep 14 10:49:41 2022 ] Mean test loss of 258 batches: 3.243438482284546. +[ Wed Sep 14 10:49:41 2022 ] Top1: 48.62% +[ Wed Sep 14 10:49:41 2022 ] Top5: 81.65% +[ Wed Sep 14 10:49:41 2022 ] Training epoch: 57 +[ Wed Sep 14 10:49:51 2022 ] Batch(11/123) done. Loss: 0.1141 lr:0.100000 +[ Wed Sep 14 10:50:44 2022 ] Batch(111/123) done. Loss: 0.2407 lr:0.100000 +[ Wed Sep 14 10:50:50 2022 ] Eval epoch: 57 +[ Wed Sep 14 10:51:40 2022 ] Mean test loss of 258 batches: 2.709395170211792. +[ Wed Sep 14 10:51:40 2022 ] Top1: 54.94% +[ Wed Sep 14 10:51:40 2022 ] Top5: 86.71% +[ Wed Sep 14 10:51:41 2022 ] Training epoch: 58 +[ Wed Sep 14 10:52:31 2022 ] Batch(88/123) done. Loss: 0.3724 lr:0.100000 +[ Wed Sep 14 10:52:49 2022 ] Eval epoch: 58 +[ Wed Sep 14 10:53:39 2022 ] Mean test loss of 258 batches: 2.4786291122436523. +[ Wed Sep 14 10:53:39 2022 ] Top1: 55.64% +[ Wed Sep 14 10:53:39 2022 ] Top5: 86.99% +[ Wed Sep 14 10:53:39 2022 ] Training epoch: 59 +[ Wed Sep 14 10:54:17 2022 ] Batch(65/123) done. Loss: 0.2332 lr:0.100000 +[ Wed Sep 14 10:54:47 2022 ] Eval epoch: 59 +[ Wed Sep 14 10:55:38 2022 ] Mean test loss of 258 batches: 2.4485208988189697. +[ Wed Sep 14 10:55:38 2022 ] Top1: 58.89% +[ Wed Sep 14 10:55:38 2022 ] Top5: 87.28% +[ Wed Sep 14 10:55:38 2022 ] Training epoch: 60 +[ Wed Sep 14 10:56:04 2022 ] Batch(42/123) done. Loss: 0.3334 lr:0.100000 +[ Wed Sep 14 10:56:46 2022 ] Eval epoch: 60 +[ Wed Sep 14 10:57:36 2022 ] Mean test loss of 258 batches: 3.0010201930999756. +[ Wed Sep 14 10:57:37 2022 ] Top1: 53.13% +[ Wed Sep 14 10:57:37 2022 ] Top5: 81.94% +[ Wed Sep 14 10:57:37 2022 ] Training epoch: 61 +[ Wed Sep 14 10:57:50 2022 ] Batch(19/123) done. Loss: 0.1282 lr:0.010000 +[ Wed Sep 14 10:58:43 2022 ] Batch(119/123) done. Loss: 0.0618 lr:0.010000 +[ Wed Sep 14 10:58:45 2022 ] Eval epoch: 61 +[ Wed Sep 14 10:59:35 2022 ] Mean test loss of 258 batches: 1.9603235721588135. +[ Wed Sep 14 10:59:35 2022 ] Top1: 62.39% +[ Wed Sep 14 10:59:35 2022 ] Top5: 90.20% +[ Wed Sep 14 10:59:35 2022 ] Training epoch: 62 +[ Wed Sep 14 11:00:29 2022 ] Batch(96/123) done. Loss: 0.0595 lr:0.010000 +[ Wed Sep 14 11:00:43 2022 ] Eval epoch: 62 +[ Wed Sep 14 11:01:34 2022 ] Mean test loss of 258 batches: 1.8955367803573608. +[ Wed Sep 14 11:01:34 2022 ] Top1: 63.66% +[ Wed Sep 14 11:01:34 2022 ] Top5: 90.93% +[ Wed Sep 14 11:01:34 2022 ] Training epoch: 63 +[ Wed Sep 14 11:02:16 2022 ] Batch(73/123) done. Loss: 0.0512 lr:0.010000 +[ Wed Sep 14 11:02:42 2022 ] Eval epoch: 63 +[ Wed Sep 14 11:03:33 2022 ] Mean test loss of 258 batches: 1.8982775211334229. +[ Wed Sep 14 11:03:33 2022 ] Top1: 64.49% +[ Wed Sep 14 11:03:33 2022 ] Top5: 91.25% +[ Wed Sep 14 11:03:33 2022 ] Training epoch: 64 +[ Wed Sep 14 11:04:03 2022 ] Batch(50/123) done. Loss: 0.0530 lr:0.010000 +[ Wed Sep 14 11:04:41 2022 ] Eval epoch: 64 +[ Wed Sep 14 11:05:31 2022 ] Mean test loss of 258 batches: 1.9569075107574463. +[ Wed Sep 14 11:05:31 2022 ] Top1: 64.34% +[ Wed Sep 14 11:05:31 2022 ] Top5: 91.17% +[ Wed Sep 14 11:05:31 2022 ] Training epoch: 65 +[ Wed Sep 14 11:05:49 2022 ] Batch(27/123) done. Loss: 0.0265 lr:0.010000 +[ Wed Sep 14 11:06:39 2022 ] Eval epoch: 65 +[ Wed Sep 14 11:07:30 2022 ] Mean test loss of 258 batches: 1.949947714805603. +[ Wed Sep 14 11:07:30 2022 ] Top1: 64.49% +[ Wed Sep 14 11:07:30 2022 ] Top5: 91.01% +[ Wed Sep 14 11:07:30 2022 ] Training epoch: 66 +[ Wed Sep 14 11:07:36 2022 ] Batch(4/123) done. Loss: 0.0709 lr:0.010000 +[ Wed Sep 14 11:08:28 2022 ] Batch(104/123) done. Loss: 0.0198 lr:0.010000 +[ Wed Sep 14 11:08:38 2022 ] Eval epoch: 66 +[ Wed Sep 14 11:09:29 2022 ] Mean test loss of 258 batches: 2.0349206924438477. +[ Wed Sep 14 11:09:29 2022 ] Top1: 62.64% +[ Wed Sep 14 11:09:29 2022 ] Top5: 90.13% +[ Wed Sep 14 11:09:29 2022 ] Training epoch: 67 +[ Wed Sep 14 11:10:15 2022 ] Batch(81/123) done. Loss: 0.0433 lr:0.010000 +[ Wed Sep 14 11:10:37 2022 ] Eval epoch: 67 +[ Wed Sep 14 11:11:28 2022 ] Mean test loss of 258 batches: 1.9164913892745972. +[ Wed Sep 14 11:11:28 2022 ] Top1: 64.89% +[ Wed Sep 14 11:11:28 2022 ] Top5: 90.84% +[ Wed Sep 14 11:11:28 2022 ] Training epoch: 68 +[ Wed Sep 14 11:12:02 2022 ] Batch(58/123) done. Loss: 0.0351 lr:0.010000 +[ Wed Sep 14 11:12:36 2022 ] Eval epoch: 68 +[ Wed Sep 14 11:13:27 2022 ] Mean test loss of 258 batches: 2.004775047302246. +[ Wed Sep 14 11:13:27 2022 ] Top1: 64.33% +[ Wed Sep 14 11:13:27 2022 ] Top5: 91.14% +[ Wed Sep 14 11:13:27 2022 ] Training epoch: 69 +[ Wed Sep 14 11:13:49 2022 ] Batch(35/123) done. Loss: 0.0100 lr:0.010000 +[ Wed Sep 14 11:14:35 2022 ] Eval epoch: 69 +[ Wed Sep 14 11:15:25 2022 ] Mean test loss of 258 batches: 1.8820568323135376. +[ Wed Sep 14 11:15:25 2022 ] Top1: 65.26% +[ Wed Sep 14 11:15:25 2022 ] Top5: 91.45% +[ Wed Sep 14 11:15:25 2022 ] Training epoch: 70 +[ Wed Sep 14 11:15:35 2022 ] Batch(12/123) done. Loss: 0.0176 lr:0.010000 +[ Wed Sep 14 11:16:28 2022 ] Batch(112/123) done. Loss: 0.0529 lr:0.010000 +[ Wed Sep 14 11:16:34 2022 ] Eval epoch: 70 +[ Wed Sep 14 11:17:24 2022 ] Mean test loss of 258 batches: 1.9909242391586304. +[ Wed Sep 14 11:17:24 2022 ] Top1: 64.18% +[ Wed Sep 14 11:17:24 2022 ] Top5: 91.01% +[ Wed Sep 14 11:17:24 2022 ] Training epoch: 71 +[ Wed Sep 14 11:18:15 2022 ] Batch(89/123) done. Loss: 0.0750 lr:0.010000 +[ Wed Sep 14 11:18:32 2022 ] Eval epoch: 71 +[ Wed Sep 14 11:19:23 2022 ] Mean test loss of 258 batches: 2.0400452613830566. +[ Wed Sep 14 11:19:23 2022 ] Top1: 64.12% +[ Wed Sep 14 11:19:23 2022 ] Top5: 91.12% +[ Wed Sep 14 11:19:23 2022 ] Training epoch: 72 +[ Wed Sep 14 11:20:02 2022 ] Batch(66/123) done. Loss: 0.0289 lr:0.010000 +[ Wed Sep 14 11:20:31 2022 ] Eval epoch: 72 +[ Wed Sep 14 11:21:22 2022 ] Mean test loss of 258 batches: 1.9922386407852173. +[ Wed Sep 14 11:21:22 2022 ] Top1: 65.43% +[ Wed Sep 14 11:21:22 2022 ] Top5: 91.14% +[ Wed Sep 14 11:21:22 2022 ] Training epoch: 73 +[ Wed Sep 14 11:21:49 2022 ] Batch(43/123) done. Loss: 0.0306 lr:0.010000 +[ Wed Sep 14 11:22:30 2022 ] Eval epoch: 73 +[ Wed Sep 14 11:23:21 2022 ] Mean test loss of 258 batches: 1.9337546825408936. +[ Wed Sep 14 11:23:21 2022 ] Top1: 65.52% +[ Wed Sep 14 11:23:21 2022 ] Top5: 91.03% +[ Wed Sep 14 11:23:21 2022 ] Training epoch: 74 +[ Wed Sep 14 11:23:35 2022 ] Batch(20/123) done. Loss: 0.0443 lr:0.010000 +[ Wed Sep 14 11:24:28 2022 ] Batch(120/123) done. Loss: 0.0212 lr:0.010000 +[ Wed Sep 14 11:24:29 2022 ] Eval epoch: 74 +[ Wed Sep 14 11:25:20 2022 ] Mean test loss of 258 batches: 1.9641762971878052. +[ Wed Sep 14 11:25:20 2022 ] Top1: 64.78% +[ Wed Sep 14 11:25:20 2022 ] Top5: 91.12% +[ Wed Sep 14 11:25:20 2022 ] Training epoch: 75 +[ Wed Sep 14 11:26:15 2022 ] Batch(97/123) done. Loss: 0.1005 lr:0.010000 +[ Wed Sep 14 11:26:28 2022 ] Eval epoch: 75 +[ Wed Sep 14 11:27:18 2022 ] Mean test loss of 258 batches: 2.0250115394592285. +[ Wed Sep 14 11:27:18 2022 ] Top1: 65.42% +[ Wed Sep 14 11:27:19 2022 ] Top5: 91.19% +[ Wed Sep 14 11:27:19 2022 ] Training epoch: 76 +[ Wed Sep 14 11:28:01 2022 ] Batch(74/123) done. Loss: 0.0445 lr:0.010000 +[ Wed Sep 14 11:28:27 2022 ] Eval epoch: 76 +[ Wed Sep 14 11:29:18 2022 ] Mean test loss of 258 batches: 2.028303861618042. +[ Wed Sep 14 11:29:18 2022 ] Top1: 64.91% +[ Wed Sep 14 11:29:18 2022 ] Top5: 91.10% +[ Wed Sep 14 11:29:18 2022 ] Training epoch: 77 +[ Wed Sep 14 11:29:49 2022 ] Batch(51/123) done. Loss: 0.0450 lr:0.010000 +[ Wed Sep 14 11:30:26 2022 ] Eval epoch: 77 +[ Wed Sep 14 11:31:17 2022 ] Mean test loss of 258 batches: 2.0982468128204346. +[ Wed Sep 14 11:31:17 2022 ] Top1: 63.76% +[ Wed Sep 14 11:31:17 2022 ] Top5: 90.35% +[ Wed Sep 14 11:31:17 2022 ] Training epoch: 78 +[ Wed Sep 14 11:31:35 2022 ] Batch(28/123) done. Loss: 0.0178 lr:0.010000 +[ Wed Sep 14 11:32:25 2022 ] Eval epoch: 78 +[ Wed Sep 14 11:33:15 2022 ] Mean test loss of 258 batches: 2.0096275806427. +[ Wed Sep 14 11:33:15 2022 ] Top1: 65.12% +[ Wed Sep 14 11:33:15 2022 ] Top5: 91.05% +[ Wed Sep 14 11:33:15 2022 ] Training epoch: 79 +[ Wed Sep 14 11:33:22 2022 ] Batch(5/123) done. Loss: 0.0282 lr:0.010000 +[ Wed Sep 14 11:34:15 2022 ] Batch(105/123) done. Loss: 0.0274 lr:0.010000 +[ Wed Sep 14 11:34:24 2022 ] Eval epoch: 79 +[ Wed Sep 14 11:35:14 2022 ] Mean test loss of 258 batches: 2.0404365062713623. +[ Wed Sep 14 11:35:14 2022 ] Top1: 64.80% +[ Wed Sep 14 11:35:14 2022 ] Top5: 90.87% +[ Wed Sep 14 11:35:14 2022 ] Training epoch: 80 +[ Wed Sep 14 11:36:01 2022 ] Batch(82/123) done. Loss: 0.0332 lr:0.010000 +[ Wed Sep 14 11:36:22 2022 ] Eval epoch: 80 +[ Wed Sep 14 11:37:13 2022 ] Mean test loss of 258 batches: 2.062880754470825. +[ Wed Sep 14 11:37:13 2022 ] Top1: 64.70% +[ Wed Sep 14 11:37:13 2022 ] Top5: 90.80% +[ Wed Sep 14 11:37:13 2022 ] Training epoch: 81 +[ Wed Sep 14 11:37:48 2022 ] Batch(59/123) done. Loss: 0.0180 lr:0.001000 +[ Wed Sep 14 11:38:21 2022 ] Eval epoch: 81 +[ Wed Sep 14 11:39:12 2022 ] Mean test loss of 258 batches: 2.0670080184936523. +[ Wed Sep 14 11:39:12 2022 ] Top1: 64.45% +[ Wed Sep 14 11:39:12 2022 ] Top5: 90.50% +[ Wed Sep 14 11:39:12 2022 ] Training epoch: 82 +[ Wed Sep 14 11:39:34 2022 ] Batch(36/123) done. Loss: 0.0049 lr:0.001000 +[ Wed Sep 14 11:40:20 2022 ] Eval epoch: 82 +[ Wed Sep 14 11:41:10 2022 ] Mean test loss of 258 batches: 2.0475635528564453. +[ Wed Sep 14 11:41:10 2022 ] Top1: 64.64% +[ Wed Sep 14 11:41:11 2022 ] Top5: 90.76% +[ Wed Sep 14 11:41:11 2022 ] Training epoch: 83 +[ Wed Sep 14 11:41:21 2022 ] Batch(13/123) done. Loss: 0.0343 lr:0.001000 +[ Wed Sep 14 11:42:14 2022 ] Batch(113/123) done. Loss: 0.0986 lr:0.001000 +[ Wed Sep 14 11:42:19 2022 ] Eval epoch: 83 +[ Wed Sep 14 11:43:09 2022 ] Mean test loss of 258 batches: 2.1397147178649902. +[ Wed Sep 14 11:43:09 2022 ] Top1: 63.35% +[ Wed Sep 14 11:43:09 2022 ] Top5: 90.08% +[ Wed Sep 14 11:43:09 2022 ] Training epoch: 84 +[ Wed Sep 14 11:44:00 2022 ] Batch(90/123) done. Loss: 0.0335 lr:0.001000 +[ Wed Sep 14 11:44:17 2022 ] Eval epoch: 84 +[ Wed Sep 14 11:45:07 2022 ] Mean test loss of 258 batches: 2.033123731613159. +[ Wed Sep 14 11:45:08 2022 ] Top1: 65.30% +[ Wed Sep 14 11:45:08 2022 ] Top5: 91.11% +[ Wed Sep 14 11:45:08 2022 ] Training epoch: 85 +[ Wed Sep 14 11:45:47 2022 ] Batch(67/123) done. Loss: 0.0603 lr:0.001000 +[ Wed Sep 14 11:46:16 2022 ] Eval epoch: 85 +[ Wed Sep 14 11:47:06 2022 ] Mean test loss of 258 batches: 2.0243470668792725. +[ Wed Sep 14 11:47:06 2022 ] Top1: 65.12% +[ Wed Sep 14 11:47:06 2022 ] Top5: 91.20% +[ Wed Sep 14 11:47:06 2022 ] Training epoch: 86 +[ Wed Sep 14 11:47:33 2022 ] Batch(44/123) done. Loss: 0.0162 lr:0.001000 +[ Wed Sep 14 11:48:14 2022 ] Eval epoch: 86 +[ Wed Sep 14 11:49:05 2022 ] Mean test loss of 258 batches: 2.013662338256836. +[ Wed Sep 14 11:49:05 2022 ] Top1: 65.12% +[ Wed Sep 14 11:49:05 2022 ] Top5: 91.19% +[ Wed Sep 14 11:49:05 2022 ] Training epoch: 87 +[ Wed Sep 14 11:49:20 2022 ] Batch(21/123) done. Loss: 0.0551 lr:0.001000 +[ Wed Sep 14 11:50:13 2022 ] Batch(121/123) done. Loss: 0.0238 lr:0.001000 +[ Wed Sep 14 11:50:13 2022 ] Eval epoch: 87 +[ Wed Sep 14 11:51:04 2022 ] Mean test loss of 258 batches: 2.044766426086426. +[ Wed Sep 14 11:51:04 2022 ] Top1: 64.66% +[ Wed Sep 14 11:51:04 2022 ] Top5: 90.71% +[ Wed Sep 14 11:51:04 2022 ] Training epoch: 88 +[ Wed Sep 14 11:51:59 2022 ] Batch(98/123) done. Loss: 0.0383 lr:0.001000 +[ Wed Sep 14 11:52:12 2022 ] Eval epoch: 88 +[ Wed Sep 14 11:53:03 2022 ] Mean test loss of 258 batches: 2.2156059741973877. +[ Wed Sep 14 11:53:03 2022 ] Top1: 61.84% +[ Wed Sep 14 11:53:03 2022 ] Top5: 89.54% +[ Wed Sep 14 11:53:03 2022 ] Training epoch: 89 +[ Wed Sep 14 11:53:46 2022 ] Batch(75/123) done. Loss: 0.0369 lr:0.001000 +[ Wed Sep 14 11:54:11 2022 ] Eval epoch: 89 +[ Wed Sep 14 11:55:02 2022 ] Mean test loss of 258 batches: 2.0188987255096436. +[ Wed Sep 14 11:55:02 2022 ] Top1: 65.18% +[ Wed Sep 14 11:55:02 2022 ] Top5: 91.04% +[ Wed Sep 14 11:55:02 2022 ] Training epoch: 90 +[ Wed Sep 14 11:55:33 2022 ] Batch(52/123) done. Loss: 0.0521 lr:0.001000 +[ Wed Sep 14 11:56:10 2022 ] Eval epoch: 90 +[ Wed Sep 14 11:57:00 2022 ] Mean test loss of 258 batches: 2.026618480682373. +[ Wed Sep 14 11:57:00 2022 ] Top1: 65.04% +[ Wed Sep 14 11:57:01 2022 ] Top5: 91.18% +[ Wed Sep 14 11:57:01 2022 ] Training epoch: 91 +[ Wed Sep 14 11:57:19 2022 ] Batch(29/123) done. Loss: 0.0974 lr:0.001000 +[ Wed Sep 14 11:58:09 2022 ] Eval epoch: 91 +[ Wed Sep 14 11:58:59 2022 ] Mean test loss of 258 batches: 2.0772697925567627. +[ Wed Sep 14 11:58:59 2022 ] Top1: 64.63% +[ Wed Sep 14 11:58:59 2022 ] Top5: 90.98% +[ Wed Sep 14 11:58:59 2022 ] Training epoch: 92 +[ Wed Sep 14 11:59:06 2022 ] Batch(6/123) done. Loss: 0.0350 lr:0.001000 +[ Wed Sep 14 11:59:59 2022 ] Batch(106/123) done. Loss: 0.0306 lr:0.001000 +[ Wed Sep 14 12:00:07 2022 ] Eval epoch: 92 +[ Wed Sep 14 12:00:57 2022 ] Mean test loss of 258 batches: 2.028740882873535. +[ Wed Sep 14 12:00:57 2022 ] Top1: 64.95% +[ Wed Sep 14 12:00:58 2022 ] Top5: 90.94% +[ Wed Sep 14 12:00:58 2022 ] Training epoch: 93 +[ Wed Sep 14 12:01:45 2022 ] Batch(83/123) done. Loss: 0.0421 lr:0.001000 +[ Wed Sep 14 12:02:06 2022 ] Eval epoch: 93 +[ Wed Sep 14 12:02:57 2022 ] Mean test loss of 258 batches: 2.0420658588409424. +[ Wed Sep 14 12:02:57 2022 ] Top1: 65.03% +[ Wed Sep 14 12:02:57 2022 ] Top5: 91.18% +[ Wed Sep 14 12:02:57 2022 ] Training epoch: 94 +[ Wed Sep 14 12:03:32 2022 ] Batch(60/123) done. Loss: 0.0374 lr:0.001000 +[ Wed Sep 14 12:04:05 2022 ] Eval epoch: 94 +[ Wed Sep 14 12:04:55 2022 ] Mean test loss of 258 batches: 2.0979018211364746. +[ Wed Sep 14 12:04:55 2022 ] Top1: 64.39% +[ Wed Sep 14 12:04:55 2022 ] Top5: 90.70% +[ Wed Sep 14 12:04:55 2022 ] Training epoch: 95 +[ Wed Sep 14 12:05:19 2022 ] Batch(37/123) done. Loss: 0.0155 lr:0.001000 +[ Wed Sep 14 12:06:04 2022 ] Eval epoch: 95 +[ Wed Sep 14 12:06:54 2022 ] Mean test loss of 258 batches: 2.1317577362060547. +[ Wed Sep 14 12:06:54 2022 ] Top1: 64.32% +[ Wed Sep 14 12:06:54 2022 ] Top5: 90.88% +[ Wed Sep 14 12:06:54 2022 ] Training epoch: 96 +[ Wed Sep 14 12:07:05 2022 ] Batch(14/123) done. Loss: 0.0402 lr:0.001000 +[ Wed Sep 14 12:07:58 2022 ] Batch(114/123) done. Loss: 0.0214 lr:0.001000 +[ Wed Sep 14 12:08:02 2022 ] Eval epoch: 96 +[ Wed Sep 14 12:08:53 2022 ] Mean test loss of 258 batches: 2.0478529930114746. +[ Wed Sep 14 12:08:53 2022 ] Top1: 64.88% +[ Wed Sep 14 12:08:53 2022 ] Top5: 90.83% +[ Wed Sep 14 12:08:53 2022 ] Training epoch: 97 +[ Wed Sep 14 12:09:45 2022 ] Batch(91/123) done. Loss: 0.0384 lr:0.001000 +[ Wed Sep 14 12:10:01 2022 ] Eval epoch: 97 +[ Wed Sep 14 12:10:52 2022 ] Mean test loss of 258 batches: 2.002946615219116. +[ Wed Sep 14 12:10:52 2022 ] Top1: 65.23% +[ Wed Sep 14 12:10:52 2022 ] Top5: 91.10% +[ Wed Sep 14 12:10:52 2022 ] Training epoch: 98 +[ Wed Sep 14 12:11:31 2022 ] Batch(68/123) done. Loss: 0.0264 lr:0.001000 +[ Wed Sep 14 12:12:00 2022 ] Eval epoch: 98 +[ Wed Sep 14 12:12:50 2022 ] Mean test loss of 258 batches: 2.1867194175720215. +[ Wed Sep 14 12:12:50 2022 ] Top1: 62.01% +[ Wed Sep 14 12:12:50 2022 ] Top5: 89.53% +[ Wed Sep 14 12:12:50 2022 ] Training epoch: 99 +[ Wed Sep 14 12:13:18 2022 ] Batch(45/123) done. Loss: 0.0695 lr:0.001000 +[ Wed Sep 14 12:13:59 2022 ] Eval epoch: 99 +[ Wed Sep 14 12:14:49 2022 ] Mean test loss of 258 batches: 2.1064836978912354. +[ Wed Sep 14 12:14:49 2022 ] Top1: 64.20% +[ Wed Sep 14 12:14:49 2022 ] Top5: 90.53% +[ Wed Sep 14 12:14:49 2022 ] Training epoch: 100 +[ Wed Sep 14 12:15:04 2022 ] Batch(22/123) done. Loss: 0.0574 lr:0.001000 +[ Wed Sep 14 12:15:57 2022 ] Batch(122/123) done. Loss: 0.0998 lr:0.001000 +[ Wed Sep 14 12:15:58 2022 ] Eval epoch: 100 +[ Wed Sep 14 12:16:48 2022 ] Mean test loss of 258 batches: 2.2521514892578125. +[ Wed Sep 14 12:16:48 2022 ] Top1: 61.13% +[ Wed Sep 14 12:16:48 2022 ] Top5: 89.39% diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_xsub/config.yaml b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_xsub/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ed3cf12cf7d00845eb46ee4034a1053bdf981163 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_xsub/config.yaml @@ -0,0 +1,59 @@ +Experiment_name: ntu_joint_xsub +base_lr: 0.1 +batch_size: 64 +config: ./config/nturgbd-cross-subject/train_joint.yaml +device: +- 2 +- 3 +eval_interval: 5 +feeder: feeders.feeder.Feeder +groups: 8 +ignore_weights: [] +keep_rate: 0.9 +log_interval: 100 +model: model.decouple_gcn.Model +model_args: + block_size: 41 + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + groups: 16 + num_class: 60 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu_joint_xsub +nesterov: true +num_epoch: 100 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_data_joint.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_data_joint.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu_joint_xsub diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_xsub/decouple_gcn.py b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_xsub/decouple_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..6dcce4552ced280fe5b2060df92daebd2452cf7c --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_xsub/decouple_gcn.py @@ -0,0 +1,235 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math +from model.dropSke import DropBlock_Ske +from model.dropT import DropBlockT_1d + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + + +def conv_branch_init(conv): + weight = conv.weight + n = weight.size(0) + k1 = weight.size(1) + k2 = weight.size(2) + nn.init.normal(weight, 0, math.sqrt(2. / (n * k1 * k2))) + nn.init.constant(conv.bias, 0) + + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class unit_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1, num_point=25, block_size=41): + super(unit_tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + self.dropS = DropBlock_Ske(num_point=num_point) + self.dropT = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob, A): + x = self.bn(self.conv(x)) + x = self.dropT(self.dropS(x, keep_prob, A), keep_prob) + return x + + +class unit_tcn_skip(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(unit_tcn_skip, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class unit_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, coff_embedding=4, num_subset=3): + super(unit_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.num_point = num_point + self.groups = groups + self.num_subset = num_subset + self.DecoupleA = nn.Parameter(torch.tensor(np.reshape(A.astype(np.float32), [ + 3, 1, num_point, num_point]), dtype=torch.float32, requires_grad=True).repeat(1, groups, 1, 1), requires_grad=True) + + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.bn0 = nn.BatchNorm2d(out_channels * num_subset) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + bn_init(self.bn, 1e-6) + + self.Linear_weight = nn.Parameter(torch.zeros( + in_channels, out_channels * num_subset, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0, math.sqrt( + 0.5 / (out_channels * num_subset))) + + self.Linear_bias = nn.Parameter(torch.zeros( + 1, out_channels * num_subset, 1, 1, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.constant(self.Linear_bias, 1e-6) + + eye_array = [] + for i in range(out_channels): + eye_array.append(torch.eye(num_point)) + self.eyes = nn.Parameter(torch.tensor(torch.stack( + eye_array), requires_grad=False, device='cuda'), requires_grad=False) # [c,25,25] + + def norm(self, A): + b, c, h, w = A.size() + A = A.view(c, self.num_point, self.num_point) + D_list = torch.sum(A, 1).view(c, 1, self.num_point) + D_list_12 = (D_list + 0.001)**(-1) + D_12 = self.eyes * D_list_12 + A = torch.bmm(A, D_12).view(b, c, h, w) + return A + + def forward(self, x0): + learn_A = self.DecoupleA.repeat( + 1, self.out_channels // self.groups, 1, 1) + norm_learn_A = torch.cat([self.norm(learn_A[0:1, ...]), self.norm( + learn_A[1:2, ...]), self.norm(learn_A[2:3, ...])], 0) + + x = torch.einsum( + 'nctw,cd->ndtw', (x0, self.Linear_weight)).contiguous() + x = x + self.Linear_bias + x = self.bn0(x) + + n, kc, t, v = x.size() + x = x.view(n, self.num_subset, kc // self.num_subset, t, v) + x = torch.einsum('nkctv,kcvw->nctw', (x, norm_learn_A)) + + x = self.bn(x) + x += self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, block_size, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = unit_gcn(in_channels, out_channels, A, groups, num_point) + self.tcn1 = unit_tcn(out_channels, out_channels, + stride=stride, num_point=num_point) + self.relu = nn.ReLU() + + self.A = nn.Parameter(torch.tensor(np.sum(np.reshape(A.astype(np.float32), [ + 3, num_point, num_point]), axis=0), dtype=torch.float32, requires_grad=False, device='cuda'), requires_grad=False) + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + + else: + self.residual = unit_tcn_skip( + in_channels, out_channels, kernel_size=1, stride=stride) + self.dropSke = DropBlock_Ske(num_point=num_point) + self.dropT_skip = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob): + x = self.tcn1(self.gcn1(x), keep_prob, self.A) + self.dropT_skip( + self.dropSke(self.residual(x), keep_prob, self.A), keep_prob) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, groups, num_point, + block_size, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l5 = TCN_GCN_unit( + 64, 128, A, groups, num_point, block_size, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l8 = TCN_GCN_unit(128, 256, A, groups, + num_point, block_size, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x, keep_prob=0.9): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute( + 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x, 1.0) + x = self.l2(x, 1.0) + x = self.l3(x, 1.0) + x = self.l4(x, 1.0) + x = self.l5(x, 1.0) + x = self.l6(x, 1.0) + x = self.l7(x, keep_prob) + x = self.l8(x, keep_prob) + x = self.l9(x, keep_prob) + x = self.l10(x, keep_prob) + + # N*M,C,T,V + c_new = x.size(1) + x = x.reshape(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_xsub/eval_results/best_acc.pkl b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_xsub/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..9348c676322240c7cbc8fa9def3f2dbd132e3d98 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_xsub/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2236ff2f1107d378f01f43fcea2695b74d41cdb119a970aaf352e77acb9fd636 +size 4979902 diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_xsub/log.txt b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_xsub/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..2b0bc62d0e8330b81cd2d27bcda3000bd38d83a5 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xsub/ntu_joint_xsub/log.txt @@ -0,0 +1,626 @@ +[ Wed Sep 14 08:59:00 2022 ] Parameters: +{'work_dir': './work_dir/ntu_joint_xsub', 'model_saved_name': './save_models/ntu_joint_xsub', 'Experiment_name': 'ntu_joint_xsub', 'config': './config/nturgbd-cross-subject/train_joint.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_label.pkl'}, 'model': 'model.decouple_gcn.Model', 'model_args': {'num_class': 60, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'groups': 16, 'block_size': 41, 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80], 'device': [2, 3], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 100, 'weight_decay': 0.0001, 'keep_rate': 0.9, 'groups': 8, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Wed Sep 14 08:59:00 2022 ] Training epoch: 1 +[ Wed Sep 14 08:59:50 2022 ] Batch(99/123) done. Loss: 2.9147 lr:0.100000 +[ Wed Sep 14 09:00:00 2022 ] Eval epoch: 1 +[ Wed Sep 14 09:00:50 2022 ] Mean test loss of 258 batches: 4.591220378875732. +[ Wed Sep 14 09:00:50 2022 ] Top1: 6.49% +[ Wed Sep 14 09:00:50 2022 ] Top5: 23.94% +[ Wed Sep 14 09:00:50 2022 ] Training epoch: 2 +[ Wed Sep 14 09:01:34 2022 ] Batch(76/123) done. Loss: 2.3112 lr:0.100000 +[ Wed Sep 14 09:01:58 2022 ] Eval epoch: 2 +[ Wed Sep 14 09:02:48 2022 ] Mean test loss of 258 batches: 4.086060523986816. +[ Wed Sep 14 09:02:48 2022 ] Top1: 10.88% +[ Wed Sep 14 09:02:48 2022 ] Top5: 34.88% +[ Wed Sep 14 09:02:48 2022 ] Training epoch: 3 +[ Wed Sep 14 09:03:20 2022 ] Batch(53/123) done. Loss: 2.6104 lr:0.100000 +[ Wed Sep 14 09:03:56 2022 ] Eval epoch: 3 +[ Wed Sep 14 09:04:46 2022 ] Mean test loss of 258 batches: 3.353515148162842. +[ Wed Sep 14 09:04:47 2022 ] Top1: 15.84% +[ Wed Sep 14 09:04:47 2022 ] Top5: 45.24% +[ Wed Sep 14 09:04:47 2022 ] Training epoch: 4 +[ Wed Sep 14 09:05:06 2022 ] Batch(30/123) done. Loss: 2.2817 lr:0.100000 +[ Wed Sep 14 09:05:55 2022 ] Eval epoch: 4 +[ Wed Sep 14 09:06:45 2022 ] Mean test loss of 258 batches: 3.0355422496795654. +[ Wed Sep 14 09:06:45 2022 ] Top1: 20.13% +[ Wed Sep 14 09:06:45 2022 ] Top5: 58.54% +[ Wed Sep 14 09:06:45 2022 ] Training epoch: 5 +[ Wed Sep 14 09:06:52 2022 ] Batch(7/123) done. Loss: 1.8547 lr:0.100000 +[ Wed Sep 14 09:07:45 2022 ] Batch(107/123) done. Loss: 1.6255 lr:0.100000 +[ Wed Sep 14 09:07:53 2022 ] Eval epoch: 5 +[ Wed Sep 14 09:08:43 2022 ] Mean test loss of 258 batches: 2.7248806953430176. +[ Wed Sep 14 09:08:43 2022 ] Top1: 24.51% +[ Wed Sep 14 09:08:43 2022 ] Top5: 59.93% +[ Wed Sep 14 09:08:43 2022 ] Training epoch: 6 +[ Wed Sep 14 09:09:31 2022 ] Batch(84/123) done. Loss: 1.4542 lr:0.100000 +[ Wed Sep 14 09:09:51 2022 ] Eval epoch: 6 +[ Wed Sep 14 09:10:41 2022 ] Mean test loss of 258 batches: 2.7830545902252197. +[ Wed Sep 14 09:10:41 2022 ] Top1: 25.60% +[ Wed Sep 14 09:10:41 2022 ] Top5: 66.76% +[ Wed Sep 14 09:10:41 2022 ] Training epoch: 7 +[ Wed Sep 14 09:11:17 2022 ] Batch(61/123) done. Loss: 1.4214 lr:0.100000 +[ Wed Sep 14 09:11:49 2022 ] Eval epoch: 7 +[ Wed Sep 14 09:12:40 2022 ] Mean test loss of 258 batches: 2.7432169914245605. +[ Wed Sep 14 09:12:40 2022 ] Top1: 26.01% +[ Wed Sep 14 09:12:40 2022 ] Top5: 66.35% +[ Wed Sep 14 09:12:40 2022 ] Training epoch: 8 +[ Wed Sep 14 09:13:03 2022 ] Batch(38/123) done. Loss: 1.4682 lr:0.100000 +[ Wed Sep 14 09:13:48 2022 ] Eval epoch: 8 +[ Wed Sep 14 09:14:38 2022 ] Mean test loss of 258 batches: 2.611168622970581. +[ Wed Sep 14 09:14:38 2022 ] Top1: 32.43% +[ Wed Sep 14 09:14:38 2022 ] Top5: 68.30% +[ Wed Sep 14 09:14:38 2022 ] Training epoch: 9 +[ Wed Sep 14 09:14:49 2022 ] Batch(15/123) done. Loss: 1.3381 lr:0.100000 +[ Wed Sep 14 09:15:42 2022 ] Batch(115/123) done. Loss: 0.9783 lr:0.100000 +[ Wed Sep 14 09:15:46 2022 ] Eval epoch: 9 +[ Wed Sep 14 09:16:36 2022 ] Mean test loss of 258 batches: 2.5825960636138916. +[ Wed Sep 14 09:16:36 2022 ] Top1: 31.28% +[ Wed Sep 14 09:16:37 2022 ] Top5: 70.43% +[ Wed Sep 14 09:16:37 2022 ] Training epoch: 10 +[ Wed Sep 14 09:17:29 2022 ] Batch(92/123) done. Loss: 0.8202 lr:0.100000 +[ Wed Sep 14 09:17:45 2022 ] Eval epoch: 10 +[ Wed Sep 14 09:18:35 2022 ] Mean test loss of 258 batches: 2.5349206924438477. +[ Wed Sep 14 09:18:35 2022 ] Top1: 38.55% +[ Wed Sep 14 09:18:35 2022 ] Top5: 73.22% +[ Wed Sep 14 09:18:35 2022 ] Training epoch: 11 +[ Wed Sep 14 09:19:15 2022 ] Batch(69/123) done. Loss: 1.0928 lr:0.100000 +[ Wed Sep 14 09:19:43 2022 ] Eval epoch: 11 +[ Wed Sep 14 09:20:33 2022 ] Mean test loss of 258 batches: 2.245619773864746. +[ Wed Sep 14 09:20:33 2022 ] Top1: 39.81% +[ Wed Sep 14 09:20:33 2022 ] Top5: 78.21% +[ Wed Sep 14 09:20:33 2022 ] Training epoch: 12 +[ Wed Sep 14 09:21:01 2022 ] Batch(46/123) done. Loss: 1.0304 lr:0.100000 +[ Wed Sep 14 09:21:41 2022 ] Eval epoch: 12 +[ Wed Sep 14 09:22:31 2022 ] Mean test loss of 258 batches: 2.614882469177246. +[ Wed Sep 14 09:22:31 2022 ] Top1: 38.28% +[ Wed Sep 14 09:22:31 2022 ] Top5: 76.38% +[ Wed Sep 14 09:22:32 2022 ] Training epoch: 13 +[ Wed Sep 14 09:22:47 2022 ] Batch(23/123) done. Loss: 0.8045 lr:0.100000 +[ Wed Sep 14 09:23:39 2022 ] Eval epoch: 13 +[ Wed Sep 14 09:24:30 2022 ] Mean test loss of 258 batches: 2.154670238494873. +[ Wed Sep 14 09:24:30 2022 ] Top1: 43.33% +[ Wed Sep 14 09:24:30 2022 ] Top5: 79.09% +[ Wed Sep 14 09:24:30 2022 ] Training epoch: 14 +[ Wed Sep 14 09:24:33 2022 ] Batch(0/123) done. Loss: 0.8124 lr:0.100000 +[ Wed Sep 14 09:25:26 2022 ] Batch(100/123) done. Loss: 1.0300 lr:0.100000 +[ Wed Sep 14 09:25:38 2022 ] Eval epoch: 14 +[ Wed Sep 14 09:26:28 2022 ] Mean test loss of 258 batches: 2.1980268955230713. +[ Wed Sep 14 09:26:28 2022 ] Top1: 40.46% +[ Wed Sep 14 09:26:28 2022 ] Top5: 79.35% +[ Wed Sep 14 09:26:28 2022 ] Training epoch: 15 +[ Wed Sep 14 09:27:12 2022 ] Batch(77/123) done. Loss: 1.0310 lr:0.100000 +[ Wed Sep 14 09:27:36 2022 ] Eval epoch: 15 +[ Wed Sep 14 09:28:26 2022 ] Mean test loss of 258 batches: 2.291968584060669. +[ Wed Sep 14 09:28:26 2022 ] Top1: 42.55% +[ Wed Sep 14 09:28:26 2022 ] Top5: 79.31% +[ Wed Sep 14 09:28:26 2022 ] Training epoch: 16 +[ Wed Sep 14 09:28:58 2022 ] Batch(54/123) done. Loss: 0.7916 lr:0.100000 +[ Wed Sep 14 09:29:34 2022 ] Eval epoch: 16 +[ Wed Sep 14 09:30:24 2022 ] Mean test loss of 258 batches: 2.040419578552246. +[ Wed Sep 14 09:30:24 2022 ] Top1: 44.35% +[ Wed Sep 14 09:30:24 2022 ] Top5: 83.03% +[ Wed Sep 14 09:30:24 2022 ] Training epoch: 17 +[ Wed Sep 14 09:30:44 2022 ] Batch(31/123) done. Loss: 0.6790 lr:0.100000 +[ Wed Sep 14 09:31:32 2022 ] Eval epoch: 17 +[ Wed Sep 14 09:32:22 2022 ] Mean test loss of 258 batches: 2.0747108459472656. +[ Wed Sep 14 09:32:22 2022 ] Top1: 43.29% +[ Wed Sep 14 09:32:23 2022 ] Top5: 80.58% +[ Wed Sep 14 09:32:23 2022 ] Training epoch: 18 +[ Wed Sep 14 09:32:30 2022 ] Batch(8/123) done. Loss: 0.9900 lr:0.100000 +[ Wed Sep 14 09:33:23 2022 ] Batch(108/123) done. Loss: 0.9786 lr:0.100000 +[ Wed Sep 14 09:33:31 2022 ] Eval epoch: 18 +[ Wed Sep 14 09:34:21 2022 ] Mean test loss of 258 batches: 2.5247063636779785. +[ Wed Sep 14 09:34:21 2022 ] Top1: 40.60% +[ Wed Sep 14 09:34:21 2022 ] Top5: 78.68% +[ Wed Sep 14 09:34:21 2022 ] Training epoch: 19 +[ Wed Sep 14 09:35:09 2022 ] Batch(85/123) done. Loss: 1.0015 lr:0.100000 +[ Wed Sep 14 09:35:29 2022 ] Eval epoch: 19 +[ Wed Sep 14 09:36:19 2022 ] Mean test loss of 258 batches: 2.1536636352539062. +[ Wed Sep 14 09:36:19 2022 ] Top1: 44.80% +[ Wed Sep 14 09:36:19 2022 ] Top5: 82.30% +[ Wed Sep 14 09:36:19 2022 ] Training epoch: 20 +[ Wed Sep 14 09:36:55 2022 ] Batch(62/123) done. Loss: 1.1036 lr:0.100000 +[ Wed Sep 14 09:37:27 2022 ] Eval epoch: 20 +[ Wed Sep 14 09:38:18 2022 ] Mean test loss of 258 batches: 1.6829923391342163. +[ Wed Sep 14 09:38:18 2022 ] Top1: 55.40% +[ Wed Sep 14 09:38:18 2022 ] Top5: 88.28% +[ Wed Sep 14 09:38:18 2022 ] Training epoch: 21 +[ Wed Sep 14 09:38:42 2022 ] Batch(39/123) done. Loss: 0.7845 lr:0.100000 +[ Wed Sep 14 09:39:26 2022 ] Eval epoch: 21 +[ Wed Sep 14 09:40:16 2022 ] Mean test loss of 258 batches: 2.2478978633880615. +[ Wed Sep 14 09:40:16 2022 ] Top1: 43.64% +[ Wed Sep 14 09:40:16 2022 ] Top5: 82.79% +[ Wed Sep 14 09:40:16 2022 ] Training epoch: 22 +[ Wed Sep 14 09:40:28 2022 ] Batch(16/123) done. Loss: 0.7186 lr:0.100000 +[ Wed Sep 14 09:41:21 2022 ] Batch(116/123) done. Loss: 0.6766 lr:0.100000 +[ Wed Sep 14 09:41:24 2022 ] Eval epoch: 22 +[ Wed Sep 14 09:42:14 2022 ] Mean test loss of 258 batches: 2.2252414226531982. +[ Wed Sep 14 09:42:14 2022 ] Top1: 48.07% +[ Wed Sep 14 09:42:14 2022 ] Top5: 84.36% +[ Wed Sep 14 09:42:14 2022 ] Training epoch: 23 +[ Wed Sep 14 09:43:07 2022 ] Batch(93/123) done. Loss: 0.9349 lr:0.100000 +[ Wed Sep 14 09:43:22 2022 ] Eval epoch: 23 +[ Wed Sep 14 09:44:12 2022 ] Mean test loss of 258 batches: 1.8568469285964966. +[ Wed Sep 14 09:44:12 2022 ] Top1: 53.35% +[ Wed Sep 14 09:44:13 2022 ] Top5: 86.43% +[ Wed Sep 14 09:44:13 2022 ] Training epoch: 24 +[ Wed Sep 14 09:44:53 2022 ] Batch(70/123) done. Loss: 0.6512 lr:0.100000 +[ Wed Sep 14 09:45:21 2022 ] Eval epoch: 24 +[ Wed Sep 14 09:46:11 2022 ] Mean test loss of 258 batches: 1.7404369115829468. +[ Wed Sep 14 09:46:11 2022 ] Top1: 54.24% +[ Wed Sep 14 09:46:11 2022 ] Top5: 87.77% +[ Wed Sep 14 09:46:11 2022 ] Training epoch: 25 +[ Wed Sep 14 09:46:39 2022 ] Batch(47/123) done. Loss: 0.4268 lr:0.100000 +[ Wed Sep 14 09:47:19 2022 ] Eval epoch: 25 +[ Wed Sep 14 09:48:09 2022 ] Mean test loss of 258 batches: 1.952316403388977. +[ Wed Sep 14 09:48:09 2022 ] Top1: 53.28% +[ Wed Sep 14 09:48:09 2022 ] Top5: 87.27% +[ Wed Sep 14 09:48:09 2022 ] Training epoch: 26 +[ Wed Sep 14 09:48:25 2022 ] Batch(24/123) done. Loss: 0.4414 lr:0.100000 +[ Wed Sep 14 09:49:17 2022 ] Eval epoch: 26 +[ Wed Sep 14 09:50:07 2022 ] Mean test loss of 258 batches: 1.8949825763702393. +[ Wed Sep 14 09:50:07 2022 ] Top1: 53.51% +[ Wed Sep 14 09:50:07 2022 ] Top5: 86.95% +[ Wed Sep 14 09:50:07 2022 ] Training epoch: 27 +[ Wed Sep 14 09:50:11 2022 ] Batch(1/123) done. Loss: 0.5545 lr:0.100000 +[ Wed Sep 14 09:51:04 2022 ] Batch(101/123) done. Loss: 0.3903 lr:0.100000 +[ Wed Sep 14 09:51:15 2022 ] Eval epoch: 27 +[ Wed Sep 14 09:52:05 2022 ] Mean test loss of 258 batches: 1.7793006896972656. +[ Wed Sep 14 09:52:05 2022 ] Top1: 55.94% +[ Wed Sep 14 09:52:05 2022 ] Top5: 87.00% +[ Wed Sep 14 09:52:05 2022 ] Training epoch: 28 +[ Wed Sep 14 09:52:50 2022 ] Batch(78/123) done. Loss: 0.8556 lr:0.100000 +[ Wed Sep 14 09:53:13 2022 ] Eval epoch: 28 +[ Wed Sep 14 09:54:03 2022 ] Mean test loss of 258 batches: 2.0563740730285645. +[ Wed Sep 14 09:54:03 2022 ] Top1: 52.83% +[ Wed Sep 14 09:54:04 2022 ] Top5: 87.34% +[ Wed Sep 14 09:54:04 2022 ] Training epoch: 29 +[ Wed Sep 14 09:54:36 2022 ] Batch(55/123) done. Loss: 0.7766 lr:0.100000 +[ Wed Sep 14 09:55:12 2022 ] Eval epoch: 29 +[ Wed Sep 14 09:56:01 2022 ] Mean test loss of 258 batches: 2.5048208236694336. +[ Wed Sep 14 09:56:01 2022 ] Top1: 46.99% +[ Wed Sep 14 09:56:01 2022 ] Top5: 83.80% +[ Wed Sep 14 09:56:02 2022 ] Training epoch: 30 +[ Wed Sep 14 09:56:22 2022 ] Batch(32/123) done. Loss: 0.4454 lr:0.100000 +[ Wed Sep 14 09:57:10 2022 ] Eval epoch: 30 +[ Wed Sep 14 09:58:00 2022 ] Mean test loss of 258 batches: 2.186023473739624. +[ Wed Sep 14 09:58:00 2022 ] Top1: 50.51% +[ Wed Sep 14 09:58:00 2022 ] Top5: 85.93% +[ Wed Sep 14 09:58:00 2022 ] Training epoch: 31 +[ Wed Sep 14 09:58:08 2022 ] Batch(9/123) done. Loss: 0.4499 lr:0.100000 +[ Wed Sep 14 09:59:01 2022 ] Batch(109/123) done. Loss: 0.4952 lr:0.100000 +[ Wed Sep 14 09:59:08 2022 ] Eval epoch: 31 +[ Wed Sep 14 09:59:59 2022 ] Mean test loss of 258 batches: 1.4914491176605225. +[ Wed Sep 14 09:59:59 2022 ] Top1: 60.33% +[ Wed Sep 14 09:59:59 2022 ] Top5: 90.76% +[ Wed Sep 14 09:59:59 2022 ] Training epoch: 32 +[ Wed Sep 14 10:00:48 2022 ] Batch(86/123) done. Loss: 0.5064 lr:0.100000 +[ Wed Sep 14 10:01:07 2022 ] Eval epoch: 32 +[ Wed Sep 14 10:01:57 2022 ] Mean test loss of 258 batches: 1.968911051750183. +[ Wed Sep 14 10:01:57 2022 ] Top1: 53.23% +[ Wed Sep 14 10:01:58 2022 ] Top5: 87.66% +[ Wed Sep 14 10:01:58 2022 ] Training epoch: 33 +[ Wed Sep 14 10:02:34 2022 ] Batch(63/123) done. Loss: 0.4501 lr:0.100000 +[ Wed Sep 14 10:03:06 2022 ] Eval epoch: 33 +[ Wed Sep 14 10:03:56 2022 ] Mean test loss of 258 batches: 1.7173289060592651. +[ Wed Sep 14 10:03:56 2022 ] Top1: 55.98% +[ Wed Sep 14 10:03:56 2022 ] Top5: 87.16% +[ Wed Sep 14 10:03:56 2022 ] Training epoch: 34 +[ Wed Sep 14 10:04:21 2022 ] Batch(40/123) done. Loss: 0.4133 lr:0.100000 +[ Wed Sep 14 10:05:04 2022 ] Eval epoch: 34 +[ Wed Sep 14 10:05:54 2022 ] Mean test loss of 258 batches: 1.647443413734436. +[ Wed Sep 14 10:05:54 2022 ] Top1: 59.80% +[ Wed Sep 14 10:05:55 2022 ] Top5: 89.44% +[ Wed Sep 14 10:05:55 2022 ] Training epoch: 35 +[ Wed Sep 14 10:06:07 2022 ] Batch(17/123) done. Loss: 0.4764 lr:0.100000 +[ Wed Sep 14 10:06:59 2022 ] Batch(117/123) done. Loss: 0.5666 lr:0.100000 +[ Wed Sep 14 10:07:02 2022 ] Eval epoch: 35 +[ Wed Sep 14 10:07:52 2022 ] Mean test loss of 258 batches: 1.9978245496749878. +[ Wed Sep 14 10:07:52 2022 ] Top1: 54.96% +[ Wed Sep 14 10:07:52 2022 ] Top5: 86.92% +[ Wed Sep 14 10:07:52 2022 ] Training epoch: 36 +[ Wed Sep 14 10:08:45 2022 ] Batch(94/123) done. Loss: 0.7286 lr:0.100000 +[ Wed Sep 14 10:09:00 2022 ] Eval epoch: 36 +[ Wed Sep 14 10:09:51 2022 ] Mean test loss of 258 batches: 1.6905821561813354. +[ Wed Sep 14 10:09:51 2022 ] Top1: 59.84% +[ Wed Sep 14 10:09:51 2022 ] Top5: 90.23% +[ Wed Sep 14 10:09:51 2022 ] Training epoch: 37 +[ Wed Sep 14 10:10:32 2022 ] Batch(71/123) done. Loss: 0.5124 lr:0.100000 +[ Wed Sep 14 10:10:59 2022 ] Eval epoch: 37 +[ Wed Sep 14 10:11:49 2022 ] Mean test loss of 258 batches: 2.075540065765381. +[ Wed Sep 14 10:11:49 2022 ] Top1: 55.08% +[ Wed Sep 14 10:11:49 2022 ] Top5: 88.20% +[ Wed Sep 14 10:11:49 2022 ] Training epoch: 38 +[ Wed Sep 14 10:12:18 2022 ] Batch(48/123) done. Loss: 0.3995 lr:0.100000 +[ Wed Sep 14 10:12:57 2022 ] Eval epoch: 38 +[ Wed Sep 14 10:13:47 2022 ] Mean test loss of 258 batches: 1.8056854009628296. +[ Wed Sep 14 10:13:47 2022 ] Top1: 56.98% +[ Wed Sep 14 10:13:47 2022 ] Top5: 88.25% +[ Wed Sep 14 10:13:48 2022 ] Training epoch: 39 +[ Wed Sep 14 10:14:04 2022 ] Batch(25/123) done. Loss: 0.2991 lr:0.100000 +[ Wed Sep 14 10:14:55 2022 ] Eval epoch: 39 +[ Wed Sep 14 10:15:45 2022 ] Mean test loss of 258 batches: 1.9381791353225708. +[ Wed Sep 14 10:15:46 2022 ] Top1: 56.47% +[ Wed Sep 14 10:15:46 2022 ] Top5: 88.34% +[ Wed Sep 14 10:15:46 2022 ] Training epoch: 40 +[ Wed Sep 14 10:15:50 2022 ] Batch(2/123) done. Loss: 0.4283 lr:0.100000 +[ Wed Sep 14 10:16:43 2022 ] Batch(102/123) done. Loss: 0.5978 lr:0.100000 +[ Wed Sep 14 10:16:54 2022 ] Eval epoch: 40 +[ Wed Sep 14 10:17:44 2022 ] Mean test loss of 258 batches: 2.035595417022705. +[ Wed Sep 14 10:17:44 2022 ] Top1: 57.83% +[ Wed Sep 14 10:17:44 2022 ] Top5: 88.69% +[ Wed Sep 14 10:17:44 2022 ] Training epoch: 41 +[ Wed Sep 14 10:18:29 2022 ] Batch(79/123) done. Loss: 0.2991 lr:0.100000 +[ Wed Sep 14 10:18:52 2022 ] Eval epoch: 41 +[ Wed Sep 14 10:19:42 2022 ] Mean test loss of 258 batches: 1.82920241355896. +[ Wed Sep 14 10:19:42 2022 ] Top1: 57.34% +[ Wed Sep 14 10:19:42 2022 ] Top5: 88.00% +[ Wed Sep 14 10:19:42 2022 ] Training epoch: 42 +[ Wed Sep 14 10:20:15 2022 ] Batch(56/123) done. Loss: 0.2271 lr:0.100000 +[ Wed Sep 14 10:20:50 2022 ] Eval epoch: 42 +[ Wed Sep 14 10:21:41 2022 ] Mean test loss of 258 batches: 1.6885477304458618. +[ Wed Sep 14 10:21:41 2022 ] Top1: 59.30% +[ Wed Sep 14 10:21:41 2022 ] Top5: 89.62% +[ Wed Sep 14 10:21:41 2022 ] Training epoch: 43 +[ Wed Sep 14 10:22:02 2022 ] Batch(33/123) done. Loss: 0.3189 lr:0.100000 +[ Wed Sep 14 10:22:49 2022 ] Eval epoch: 43 +[ Wed Sep 14 10:23:39 2022 ] Mean test loss of 258 batches: 2.392216205596924. +[ Wed Sep 14 10:23:39 2022 ] Top1: 55.12% +[ Wed Sep 14 10:23:39 2022 ] Top5: 87.38% +[ Wed Sep 14 10:23:39 2022 ] Training epoch: 44 +[ Wed Sep 14 10:23:48 2022 ] Batch(10/123) done. Loss: 0.2906 lr:0.100000 +[ Wed Sep 14 10:24:41 2022 ] Batch(110/123) done. Loss: 0.2826 lr:0.100000 +[ Wed Sep 14 10:24:47 2022 ] Eval epoch: 44 +[ Wed Sep 14 10:25:37 2022 ] Mean test loss of 258 batches: 1.8153728246688843. +[ Wed Sep 14 10:25:37 2022 ] Top1: 57.09% +[ Wed Sep 14 10:25:37 2022 ] Top5: 88.91% +[ Wed Sep 14 10:25:37 2022 ] Training epoch: 45 +[ Wed Sep 14 10:26:27 2022 ] Batch(87/123) done. Loss: 0.3844 lr:0.100000 +[ Wed Sep 14 10:26:46 2022 ] Eval epoch: 45 +[ Wed Sep 14 10:27:36 2022 ] Mean test loss of 258 batches: 1.9515217542648315. +[ Wed Sep 14 10:27:36 2022 ] Top1: 54.25% +[ Wed Sep 14 10:27:36 2022 ] Top5: 87.61% +[ Wed Sep 14 10:27:36 2022 ] Training epoch: 46 +[ Wed Sep 14 10:28:13 2022 ] Batch(64/123) done. Loss: 0.3033 lr:0.100000 +[ Wed Sep 14 10:28:44 2022 ] Eval epoch: 46 +[ Wed Sep 14 10:29:34 2022 ] Mean test loss of 258 batches: 1.7243938446044922. +[ Wed Sep 14 10:29:34 2022 ] Top1: 59.54% +[ Wed Sep 14 10:29:34 2022 ] Top5: 89.23% +[ Wed Sep 14 10:29:34 2022 ] Training epoch: 47 +[ Wed Sep 14 10:29:59 2022 ] Batch(41/123) done. Loss: 0.2655 lr:0.100000 +[ Wed Sep 14 10:30:42 2022 ] Eval epoch: 47 +[ Wed Sep 14 10:31:33 2022 ] Mean test loss of 258 batches: 2.1055798530578613. +[ Wed Sep 14 10:31:33 2022 ] Top1: 55.53% +[ Wed Sep 14 10:31:33 2022 ] Top5: 87.32% +[ Wed Sep 14 10:31:33 2022 ] Training epoch: 48 +[ Wed Sep 14 10:31:46 2022 ] Batch(18/123) done. Loss: 0.2439 lr:0.100000 +[ Wed Sep 14 10:32:38 2022 ] Batch(118/123) done. Loss: 0.4248 lr:0.100000 +[ Wed Sep 14 10:32:41 2022 ] Eval epoch: 48 +[ Wed Sep 14 10:33:30 2022 ] Mean test loss of 258 batches: 1.7812795639038086. +[ Wed Sep 14 10:33:31 2022 ] Top1: 60.28% +[ Wed Sep 14 10:33:31 2022 ] Top5: 89.31% +[ Wed Sep 14 10:33:31 2022 ] Training epoch: 49 +[ Wed Sep 14 10:34:24 2022 ] Batch(95/123) done. Loss: 0.3316 lr:0.100000 +[ Wed Sep 14 10:34:39 2022 ] Eval epoch: 49 +[ Wed Sep 14 10:35:29 2022 ] Mean test loss of 258 batches: 1.7424616813659668. +[ Wed Sep 14 10:35:29 2022 ] Top1: 62.07% +[ Wed Sep 14 10:35:29 2022 ] Top5: 90.17% +[ Wed Sep 14 10:35:29 2022 ] Training epoch: 50 +[ Wed Sep 14 10:36:10 2022 ] Batch(72/123) done. Loss: 0.2719 lr:0.100000 +[ Wed Sep 14 10:36:37 2022 ] Eval epoch: 50 +[ Wed Sep 14 10:37:27 2022 ] Mean test loss of 258 batches: 2.0730063915252686. +[ Wed Sep 14 10:37:27 2022 ] Top1: 57.09% +[ Wed Sep 14 10:37:27 2022 ] Top5: 88.33% +[ Wed Sep 14 10:37:27 2022 ] Training epoch: 51 +[ Wed Sep 14 10:37:57 2022 ] Batch(49/123) done. Loss: 0.1891 lr:0.100000 +[ Wed Sep 14 10:38:35 2022 ] Eval epoch: 51 +[ Wed Sep 14 10:39:25 2022 ] Mean test loss of 258 batches: 2.068854331970215. +[ Wed Sep 14 10:39:25 2022 ] Top1: 56.49% +[ Wed Sep 14 10:39:25 2022 ] Top5: 87.82% +[ Wed Sep 14 10:39:25 2022 ] Training epoch: 52 +[ Wed Sep 14 10:39:43 2022 ] Batch(26/123) done. Loss: 0.2361 lr:0.100000 +[ Wed Sep 14 10:40:34 2022 ] Eval epoch: 52 +[ Wed Sep 14 10:41:24 2022 ] Mean test loss of 258 batches: 1.7761597633361816. +[ Wed Sep 14 10:41:24 2022 ] Top1: 60.53% +[ Wed Sep 14 10:41:24 2022 ] Top5: 89.23% +[ Wed Sep 14 10:41:24 2022 ] Training epoch: 53 +[ Wed Sep 14 10:41:29 2022 ] Batch(3/123) done. Loss: 0.2845 lr:0.100000 +[ Wed Sep 14 10:42:22 2022 ] Batch(103/123) done. Loss: 0.3599 lr:0.100000 +[ Wed Sep 14 10:42:32 2022 ] Eval epoch: 53 +[ Wed Sep 14 10:43:22 2022 ] Mean test loss of 258 batches: 1.8274670839309692. +[ Wed Sep 14 10:43:22 2022 ] Top1: 59.37% +[ Wed Sep 14 10:43:22 2022 ] Top5: 89.29% +[ Wed Sep 14 10:43:22 2022 ] Training epoch: 54 +[ Wed Sep 14 10:44:07 2022 ] Batch(80/123) done. Loss: 0.3570 lr:0.100000 +[ Wed Sep 14 10:44:30 2022 ] Eval epoch: 54 +[ Wed Sep 14 10:45:20 2022 ] Mean test loss of 258 batches: 2.603060007095337. +[ Wed Sep 14 10:45:20 2022 ] Top1: 52.80% +[ Wed Sep 14 10:45:20 2022 ] Top5: 85.49% +[ Wed Sep 14 10:45:20 2022 ] Training epoch: 55 +[ Wed Sep 14 10:45:54 2022 ] Batch(57/123) done. Loss: 0.3911 lr:0.100000 +[ Wed Sep 14 10:46:28 2022 ] Eval epoch: 55 +[ Wed Sep 14 10:47:19 2022 ] Mean test loss of 258 batches: 2.296555995941162. +[ Wed Sep 14 10:47:19 2022 ] Top1: 55.17% +[ Wed Sep 14 10:47:19 2022 ] Top5: 88.06% +[ Wed Sep 14 10:47:19 2022 ] Training epoch: 56 +[ Wed Sep 14 10:47:40 2022 ] Batch(34/123) done. Loss: 0.3296 lr:0.100000 +[ Wed Sep 14 10:48:26 2022 ] Eval epoch: 56 +[ Wed Sep 14 10:49:17 2022 ] Mean test loss of 258 batches: 2.9580914974212646. +[ Wed Sep 14 10:49:17 2022 ] Top1: 50.03% +[ Wed Sep 14 10:49:17 2022 ] Top5: 84.24% +[ Wed Sep 14 10:49:17 2022 ] Training epoch: 57 +[ Wed Sep 14 10:49:26 2022 ] Batch(11/123) done. Loss: 0.3893 lr:0.100000 +[ Wed Sep 14 10:50:19 2022 ] Batch(111/123) done. Loss: 0.2722 lr:0.100000 +[ Wed Sep 14 10:50:25 2022 ] Eval epoch: 57 +[ Wed Sep 14 10:51:15 2022 ] Mean test loss of 258 batches: 2.132084608078003. +[ Wed Sep 14 10:51:15 2022 ] Top1: 56.30% +[ Wed Sep 14 10:51:15 2022 ] Top5: 87.72% +[ Wed Sep 14 10:51:15 2022 ] Training epoch: 58 +[ Wed Sep 14 10:52:05 2022 ] Batch(88/123) done. Loss: 0.2534 lr:0.100000 +[ Wed Sep 14 10:52:23 2022 ] Eval epoch: 58 +[ Wed Sep 14 10:53:14 2022 ] Mean test loss of 258 batches: 65.98489379882812. +[ Wed Sep 14 10:53:14 2022 ] Top1: 3.87% +[ Wed Sep 14 10:53:14 2022 ] Top5: 15.19% +[ Wed Sep 14 10:53:14 2022 ] Training epoch: 59 +[ Wed Sep 14 10:53:51 2022 ] Batch(65/123) done. Loss: 0.2358 lr:0.100000 +[ Wed Sep 14 10:54:22 2022 ] Eval epoch: 59 +[ Wed Sep 14 10:55:12 2022 ] Mean test loss of 258 batches: 2.2219696044921875. +[ Wed Sep 14 10:55:12 2022 ] Top1: 58.53% +[ Wed Sep 14 10:55:12 2022 ] Top5: 87.46% +[ Wed Sep 14 10:55:12 2022 ] Training epoch: 60 +[ Wed Sep 14 10:55:38 2022 ] Batch(42/123) done. Loss: 0.2937 lr:0.100000 +[ Wed Sep 14 10:56:20 2022 ] Eval epoch: 60 +[ Wed Sep 14 10:57:11 2022 ] Mean test loss of 258 batches: 1.9551914930343628. +[ Wed Sep 14 10:57:11 2022 ] Top1: 58.57% +[ Wed Sep 14 10:57:11 2022 ] Top5: 87.32% +[ Wed Sep 14 10:57:11 2022 ] Training epoch: 61 +[ Wed Sep 14 10:57:24 2022 ] Batch(19/123) done. Loss: 0.1494 lr:0.010000 +[ Wed Sep 14 10:58:17 2022 ] Batch(119/123) done. Loss: 0.1193 lr:0.010000 +[ Wed Sep 14 10:58:19 2022 ] Eval epoch: 61 +[ Wed Sep 14 10:59:09 2022 ] Mean test loss of 258 batches: 1.575683832168579. +[ Wed Sep 14 10:59:09 2022 ] Top1: 66.74% +[ Wed Sep 14 10:59:09 2022 ] Top5: 91.60% +[ Wed Sep 14 10:59:09 2022 ] Training epoch: 62 +[ Wed Sep 14 11:00:03 2022 ] Batch(96/123) done. Loss: 0.0999 lr:0.010000 +[ Wed Sep 14 11:00:17 2022 ] Eval epoch: 62 +[ Wed Sep 14 11:01:07 2022 ] Mean test loss of 258 batches: 1.536460280418396. +[ Wed Sep 14 11:01:07 2022 ] Top1: 67.30% +[ Wed Sep 14 11:01:07 2022 ] Top5: 91.82% +[ Wed Sep 14 11:01:07 2022 ] Training epoch: 63 +[ Wed Sep 14 11:01:49 2022 ] Batch(73/123) done. Loss: 0.0839 lr:0.010000 +[ Wed Sep 14 11:02:15 2022 ] Eval epoch: 63 +[ Wed Sep 14 11:03:05 2022 ] Mean test loss of 258 batches: 1.558850646018982. +[ Wed Sep 14 11:03:05 2022 ] Top1: 67.97% +[ Wed Sep 14 11:03:05 2022 ] Top5: 91.76% +[ Wed Sep 14 11:03:05 2022 ] Training epoch: 64 +[ Wed Sep 14 11:03:35 2022 ] Batch(50/123) done. Loss: 0.0853 lr:0.010000 +[ Wed Sep 14 11:04:13 2022 ] Eval epoch: 64 +[ Wed Sep 14 11:05:03 2022 ] Mean test loss of 258 batches: 1.6455943584442139. +[ Wed Sep 14 11:05:03 2022 ] Top1: 67.59% +[ Wed Sep 14 11:05:03 2022 ] Top5: 91.65% +[ Wed Sep 14 11:05:03 2022 ] Training epoch: 65 +[ Wed Sep 14 11:05:21 2022 ] Batch(27/123) done. Loss: 0.0390 lr:0.010000 +[ Wed Sep 14 11:06:11 2022 ] Eval epoch: 65 +[ Wed Sep 14 11:07:01 2022 ] Mean test loss of 258 batches: 1.562364935874939. +[ Wed Sep 14 11:07:01 2022 ] Top1: 68.21% +[ Wed Sep 14 11:07:01 2022 ] Top5: 92.05% +[ Wed Sep 14 11:07:01 2022 ] Training epoch: 66 +[ Wed Sep 14 11:07:06 2022 ] Batch(4/123) done. Loss: 0.0553 lr:0.010000 +[ Wed Sep 14 11:07:59 2022 ] Batch(104/123) done. Loss: 0.0542 lr:0.010000 +[ Wed Sep 14 11:08:09 2022 ] Eval epoch: 66 +[ Wed Sep 14 11:08:59 2022 ] Mean test loss of 258 batches: 1.6343063116073608. +[ Wed Sep 14 11:08:59 2022 ] Top1: 67.62% +[ Wed Sep 14 11:08:59 2022 ] Top5: 91.82% +[ Wed Sep 14 11:08:59 2022 ] Training epoch: 67 +[ Wed Sep 14 11:09:46 2022 ] Batch(81/123) done. Loss: 0.0217 lr:0.010000 +[ Wed Sep 14 11:10:08 2022 ] Eval epoch: 67 +[ Wed Sep 14 11:10:58 2022 ] Mean test loss of 258 batches: 1.6032357215881348. +[ Wed Sep 14 11:10:58 2022 ] Top1: 68.16% +[ Wed Sep 14 11:10:58 2022 ] Top5: 92.14% +[ Wed Sep 14 11:10:58 2022 ] Training epoch: 68 +[ Wed Sep 14 11:11:32 2022 ] Batch(58/123) done. Loss: 0.0329 lr:0.010000 +[ Wed Sep 14 11:12:06 2022 ] Eval epoch: 68 +[ Wed Sep 14 11:12:56 2022 ] Mean test loss of 258 batches: 1.5968663692474365. +[ Wed Sep 14 11:12:56 2022 ] Top1: 67.98% +[ Wed Sep 14 11:12:56 2022 ] Top5: 91.98% +[ Wed Sep 14 11:12:56 2022 ] Training epoch: 69 +[ Wed Sep 14 11:13:18 2022 ] Batch(35/123) done. Loss: 0.0294 lr:0.010000 +[ Wed Sep 14 11:14:05 2022 ] Eval epoch: 69 +[ Wed Sep 14 11:14:54 2022 ] Mean test loss of 258 batches: 1.6394085884094238. +[ Wed Sep 14 11:14:55 2022 ] Top1: 68.04% +[ Wed Sep 14 11:14:55 2022 ] Top5: 92.00% +[ Wed Sep 14 11:14:55 2022 ] Training epoch: 70 +[ Wed Sep 14 11:15:05 2022 ] Batch(12/123) done. Loss: 0.0688 lr:0.010000 +[ Wed Sep 14 11:15:57 2022 ] Batch(112/123) done. Loss: 0.0644 lr:0.010000 +[ Wed Sep 14 11:16:03 2022 ] Eval epoch: 70 +[ Wed Sep 14 11:16:53 2022 ] Mean test loss of 258 batches: 1.662743330001831. +[ Wed Sep 14 11:16:53 2022 ] Top1: 68.26% +[ Wed Sep 14 11:16:53 2022 ] Top5: 91.95% +[ Wed Sep 14 11:16:53 2022 ] Training epoch: 71 +[ Wed Sep 14 11:17:44 2022 ] Batch(89/123) done. Loss: 0.1076 lr:0.010000 +[ Wed Sep 14 11:18:01 2022 ] Eval epoch: 71 +[ Wed Sep 14 11:18:52 2022 ] Mean test loss of 258 batches: 1.7534395456314087. +[ Wed Sep 14 11:18:52 2022 ] Top1: 67.43% +[ Wed Sep 14 11:18:52 2022 ] Top5: 91.75% +[ Wed Sep 14 11:18:52 2022 ] Training epoch: 72 +[ Wed Sep 14 11:19:30 2022 ] Batch(66/123) done. Loss: 0.0462 lr:0.010000 +[ Wed Sep 14 11:20:00 2022 ] Eval epoch: 72 +[ Wed Sep 14 11:20:50 2022 ] Mean test loss of 258 batches: 1.6716774702072144. +[ Wed Sep 14 11:20:50 2022 ] Top1: 68.33% +[ Wed Sep 14 11:20:50 2022 ] Top5: 91.84% +[ Wed Sep 14 11:20:50 2022 ] Training epoch: 73 +[ Wed Sep 14 11:21:16 2022 ] Batch(43/123) done. Loss: 0.0303 lr:0.010000 +[ Wed Sep 14 11:21:58 2022 ] Eval epoch: 73 +[ Wed Sep 14 11:22:48 2022 ] Mean test loss of 258 batches: 1.625872254371643. +[ Wed Sep 14 11:22:48 2022 ] Top1: 68.91% +[ Wed Sep 14 11:22:48 2022 ] Top5: 92.24% +[ Wed Sep 14 11:22:49 2022 ] Training epoch: 74 +[ Wed Sep 14 11:23:02 2022 ] Batch(20/123) done. Loss: 0.0452 lr:0.010000 +[ Wed Sep 14 11:23:55 2022 ] Batch(120/123) done. Loss: 0.0478 lr:0.010000 +[ Wed Sep 14 11:23:57 2022 ] Eval epoch: 74 +[ Wed Sep 14 11:24:47 2022 ] Mean test loss of 258 batches: 1.6459804773330688. +[ Wed Sep 14 11:24:47 2022 ] Top1: 68.80% +[ Wed Sep 14 11:24:47 2022 ] Top5: 92.11% +[ Wed Sep 14 11:24:47 2022 ] Training epoch: 75 +[ Wed Sep 14 11:25:42 2022 ] Batch(97/123) done. Loss: 0.0224 lr:0.010000 +[ Wed Sep 14 11:25:55 2022 ] Eval epoch: 75 +[ Wed Sep 14 11:26:45 2022 ] Mean test loss of 258 batches: 1.661178469657898. +[ Wed Sep 14 11:26:45 2022 ] Top1: 68.80% +[ Wed Sep 14 11:26:45 2022 ] Top5: 92.26% +[ Wed Sep 14 11:26:45 2022 ] Training epoch: 76 +[ Wed Sep 14 11:27:28 2022 ] Batch(74/123) done. Loss: 0.1159 lr:0.010000 +[ Wed Sep 14 11:27:53 2022 ] Eval epoch: 76 +[ Wed Sep 14 11:28:43 2022 ] Mean test loss of 258 batches: 1.709254503250122. +[ Wed Sep 14 11:28:43 2022 ] Top1: 68.46% +[ Wed Sep 14 11:28:43 2022 ] Top5: 91.94% +[ Wed Sep 14 11:28:44 2022 ] Training epoch: 77 +[ Wed Sep 14 11:29:14 2022 ] Batch(51/123) done. Loss: 0.0600 lr:0.010000 +[ Wed Sep 14 11:29:52 2022 ] Eval epoch: 77 +[ Wed Sep 14 11:30:42 2022 ] Mean test loss of 258 batches: 1.7816194295883179. +[ Wed Sep 14 11:30:42 2022 ] Top1: 68.09% +[ Wed Sep 14 11:30:42 2022 ] Top5: 91.47% +[ Wed Sep 14 11:30:42 2022 ] Training epoch: 78 +[ Wed Sep 14 11:31:00 2022 ] Batch(28/123) done. Loss: 0.0498 lr:0.010000 +[ Wed Sep 14 11:31:50 2022 ] Eval epoch: 78 +[ Wed Sep 14 11:32:40 2022 ] Mean test loss of 258 batches: 1.7139428853988647. +[ Wed Sep 14 11:32:40 2022 ] Top1: 68.27% +[ Wed Sep 14 11:32:40 2022 ] Top5: 91.90% +[ Wed Sep 14 11:32:41 2022 ] Training epoch: 79 +[ Wed Sep 14 11:32:46 2022 ] Batch(5/123) done. Loss: 0.0601 lr:0.010000 +[ Wed Sep 14 11:33:39 2022 ] Batch(105/123) done. Loss: 0.0715 lr:0.010000 +[ Wed Sep 14 11:33:48 2022 ] Eval epoch: 79 +[ Wed Sep 14 11:34:38 2022 ] Mean test loss of 258 batches: 1.718898057937622. +[ Wed Sep 14 11:34:39 2022 ] Top1: 68.59% +[ Wed Sep 14 11:34:39 2022 ] Top5: 92.04% +[ Wed Sep 14 11:34:39 2022 ] Training epoch: 80 +[ Wed Sep 14 11:35:25 2022 ] Batch(82/123) done. Loss: 0.0313 lr:0.010000 +[ Wed Sep 14 11:35:46 2022 ] Eval epoch: 80 +[ Wed Sep 14 11:36:36 2022 ] Mean test loss of 258 batches: 1.7329745292663574. +[ Wed Sep 14 11:36:36 2022 ] Top1: 68.54% +[ Wed Sep 14 11:36:36 2022 ] Top5: 92.01% +[ Wed Sep 14 11:36:36 2022 ] Training epoch: 81 +[ Wed Sep 14 11:37:10 2022 ] Batch(59/123) done. Loss: 0.0474 lr:0.001000 +[ Wed Sep 14 11:37:44 2022 ] Eval epoch: 81 +[ Wed Sep 14 11:38:34 2022 ] Mean test loss of 258 batches: 1.7133427858352661. +[ Wed Sep 14 11:38:34 2022 ] Top1: 68.72% +[ Wed Sep 14 11:38:34 2022 ] Top5: 92.05% +[ Wed Sep 14 11:38:34 2022 ] Training epoch: 82 +[ Wed Sep 14 11:38:57 2022 ] Batch(36/123) done. Loss: 0.0127 lr:0.001000 +[ Wed Sep 14 11:39:43 2022 ] Eval epoch: 82 +[ Wed Sep 14 11:40:33 2022 ] Mean test loss of 258 batches: 1.688591718673706. +[ Wed Sep 14 11:40:33 2022 ] Top1: 68.88% +[ Wed Sep 14 11:40:33 2022 ] Top5: 92.21% +[ Wed Sep 14 11:40:33 2022 ] Training epoch: 83 +[ Wed Sep 14 11:40:43 2022 ] Batch(13/123) done. Loss: 0.1167 lr:0.001000 +[ Wed Sep 14 11:41:36 2022 ] Batch(113/123) done. Loss: 0.0909 lr:0.001000 +[ Wed Sep 14 11:41:41 2022 ] Eval epoch: 83 +[ Wed Sep 14 11:42:31 2022 ] Mean test loss of 258 batches: 1.7332514524459839. +[ Wed Sep 14 11:42:31 2022 ] Top1: 68.36% +[ Wed Sep 14 11:42:31 2022 ] Top5: 92.05% +[ Wed Sep 14 11:42:31 2022 ] Training epoch: 84 +[ Wed Sep 14 11:43:22 2022 ] Batch(90/123) done. Loss: 0.0397 lr:0.001000 +[ Wed Sep 14 11:43:39 2022 ] Eval epoch: 84 +[ Wed Sep 14 11:44:29 2022 ] Mean test loss of 258 batches: 1.7026885747909546. +[ Wed Sep 14 11:44:29 2022 ] Top1: 68.63% +[ Wed Sep 14 11:44:29 2022 ] Top5: 92.21% +[ Wed Sep 14 11:44:29 2022 ] Training epoch: 85 +[ Wed Sep 14 11:45:08 2022 ] Batch(67/123) done. Loss: 0.0991 lr:0.001000 +[ Wed Sep 14 11:45:37 2022 ] Eval epoch: 85 +[ Wed Sep 14 11:46:27 2022 ] Mean test loss of 258 batches: 1.7128080129623413. +[ Wed Sep 14 11:46:27 2022 ] Top1: 68.54% +[ Wed Sep 14 11:46:28 2022 ] Top5: 92.25% +[ Wed Sep 14 11:46:28 2022 ] Training epoch: 86 +[ Wed Sep 14 11:46:54 2022 ] Batch(44/123) done. Loss: 0.0424 lr:0.001000 +[ Wed Sep 14 11:47:36 2022 ] Eval epoch: 86 +[ Wed Sep 14 11:48:26 2022 ] Mean test loss of 258 batches: 1.675726294517517. +[ Wed Sep 14 11:48:26 2022 ] Top1: 68.90% +[ Wed Sep 14 11:48:26 2022 ] Top5: 92.18% +[ Wed Sep 14 11:48:26 2022 ] Training epoch: 87 +[ Wed Sep 14 11:48:40 2022 ] Batch(21/123) done. Loss: 0.0922 lr:0.001000 +[ Wed Sep 14 11:49:33 2022 ] Batch(121/123) done. Loss: 0.0637 lr:0.001000 +[ Wed Sep 14 11:49:34 2022 ] Eval epoch: 87 +[ Wed Sep 14 11:50:24 2022 ] Mean test loss of 258 batches: 1.7134418487548828. +[ Wed Sep 14 11:50:24 2022 ] Top1: 68.70% +[ Wed Sep 14 11:50:24 2022 ] Top5: 92.15% +[ Wed Sep 14 11:50:24 2022 ] Training epoch: 88 +[ Wed Sep 14 11:51:20 2022 ] Batch(98/123) done. Loss: 0.0572 lr:0.001000 +[ Wed Sep 14 11:51:33 2022 ] Eval epoch: 88 +[ Wed Sep 14 11:52:23 2022 ] Mean test loss of 258 batches: 1.7478185892105103. +[ Wed Sep 14 11:52:23 2022 ] Top1: 68.37% +[ Wed Sep 14 11:52:23 2022 ] Top5: 92.06% +[ Wed Sep 14 11:52:23 2022 ] Training epoch: 89 +[ Wed Sep 14 11:53:06 2022 ] Batch(75/123) done. Loss: 0.0702 lr:0.001000 +[ Wed Sep 14 11:53:31 2022 ] Eval epoch: 89 +[ Wed Sep 14 11:54:21 2022 ] Mean test loss of 258 batches: 1.714134931564331. +[ Wed Sep 14 11:54:21 2022 ] Top1: 68.56% +[ Wed Sep 14 11:54:21 2022 ] Top5: 92.05% +[ Wed Sep 14 11:54:21 2022 ] Training epoch: 90 +[ Wed Sep 14 11:54:52 2022 ] Batch(52/123) done. Loss: 0.0289 lr:0.001000 +[ Wed Sep 14 11:55:29 2022 ] Eval epoch: 90 +[ Wed Sep 14 11:56:20 2022 ] Mean test loss of 258 batches: 1.685438871383667. +[ Wed Sep 14 11:56:20 2022 ] Top1: 68.59% +[ Wed Sep 14 11:56:20 2022 ] Top5: 92.16% +[ Wed Sep 14 11:56:20 2022 ] Training epoch: 91 +[ Wed Sep 14 11:56:39 2022 ] Batch(29/123) done. Loss: 0.1004 lr:0.001000 +[ Wed Sep 14 11:57:28 2022 ] Eval epoch: 91 +[ Wed Sep 14 11:58:18 2022 ] Mean test loss of 258 batches: 1.7459733486175537. +[ Wed Sep 14 11:58:18 2022 ] Top1: 68.23% +[ Wed Sep 14 11:58:18 2022 ] Top5: 92.06% +[ Wed Sep 14 11:58:18 2022 ] Training epoch: 92 +[ Wed Sep 14 11:58:25 2022 ] Batch(6/123) done. Loss: 0.0909 lr:0.001000 +[ Wed Sep 14 11:59:18 2022 ] Batch(106/123) done. Loss: 0.0383 lr:0.001000 +[ Wed Sep 14 11:59:26 2022 ] Eval epoch: 92 +[ Wed Sep 14 12:00:16 2022 ] Mean test loss of 258 batches: 1.7359580993652344. +[ Wed Sep 14 12:00:16 2022 ] Top1: 68.49% +[ Wed Sep 14 12:00:16 2022 ] Top5: 92.02% +[ Wed Sep 14 12:00:16 2022 ] Training epoch: 93 +[ Wed Sep 14 12:01:03 2022 ] Batch(83/123) done. Loss: 0.0526 lr:0.001000 +[ Wed Sep 14 12:01:24 2022 ] Eval epoch: 93 +[ Wed Sep 14 12:02:14 2022 ] Mean test loss of 258 batches: 1.6857571601867676. +[ Wed Sep 14 12:02:14 2022 ] Top1: 68.85% +[ Wed Sep 14 12:02:14 2022 ] Top5: 92.33% +[ Wed Sep 14 12:02:14 2022 ] Training epoch: 94 +[ Wed Sep 14 12:02:49 2022 ] Batch(60/123) done. Loss: 0.0618 lr:0.001000 +[ Wed Sep 14 12:03:22 2022 ] Eval epoch: 94 +[ Wed Sep 14 12:04:12 2022 ] Mean test loss of 258 batches: 1.7157808542251587. +[ Wed Sep 14 12:04:12 2022 ] Top1: 68.72% +[ Wed Sep 14 12:04:12 2022 ] Top5: 92.14% +[ Wed Sep 14 12:04:12 2022 ] Training epoch: 95 +[ Wed Sep 14 12:04:35 2022 ] Batch(37/123) done. Loss: 0.0258 lr:0.001000 +[ Wed Sep 14 12:05:20 2022 ] Eval epoch: 95 +[ Wed Sep 14 12:06:10 2022 ] Mean test loss of 258 batches: 1.7361732721328735. +[ Wed Sep 14 12:06:10 2022 ] Top1: 68.57% +[ Wed Sep 14 12:06:10 2022 ] Top5: 92.00% +[ Wed Sep 14 12:06:10 2022 ] Training epoch: 96 +[ Wed Sep 14 12:06:21 2022 ] Batch(14/123) done. Loss: 0.0354 lr:0.001000 +[ Wed Sep 14 12:07:14 2022 ] Batch(114/123) done. Loss: 0.0514 lr:0.001000 +[ Wed Sep 14 12:07:19 2022 ] Eval epoch: 96 +[ Wed Sep 14 12:08:09 2022 ] Mean test loss of 258 batches: 1.7357374429702759. +[ Wed Sep 14 12:08:09 2022 ] Top1: 68.52% +[ Wed Sep 14 12:08:09 2022 ] Top5: 92.08% +[ Wed Sep 14 12:08:09 2022 ] Training epoch: 97 +[ Wed Sep 14 12:09:01 2022 ] Batch(91/123) done. Loss: 0.0434 lr:0.001000 +[ Wed Sep 14 12:09:17 2022 ] Eval epoch: 97 +[ Wed Sep 14 12:10:07 2022 ] Mean test loss of 258 batches: 1.7196378707885742. +[ Wed Sep 14 12:10:07 2022 ] Top1: 68.52% +[ Wed Sep 14 12:10:08 2022 ] Top5: 92.22% +[ Wed Sep 14 12:10:08 2022 ] Training epoch: 98 +[ Wed Sep 14 12:10:47 2022 ] Batch(68/123) done. Loss: 0.0164 lr:0.001000 +[ Wed Sep 14 12:11:16 2022 ] Eval epoch: 98 +[ Wed Sep 14 12:12:06 2022 ] Mean test loss of 258 batches: 1.734071969985962. +[ Wed Sep 14 12:12:06 2022 ] Top1: 68.67% +[ Wed Sep 14 12:12:06 2022 ] Top5: 91.99% +[ Wed Sep 14 12:12:06 2022 ] Training epoch: 99 +[ Wed Sep 14 12:12:33 2022 ] Batch(45/123) done. Loss: 0.0544 lr:0.001000 +[ Wed Sep 14 12:13:14 2022 ] Eval epoch: 99 +[ Wed Sep 14 12:14:04 2022 ] Mean test loss of 258 batches: 1.715172529220581. +[ Wed Sep 14 12:14:04 2022 ] Top1: 68.75% +[ Wed Sep 14 12:14:04 2022 ] Top5: 91.98% +[ Wed Sep 14 12:14:04 2022 ] Training epoch: 100 +[ Wed Sep 14 12:14:19 2022 ] Batch(22/123) done. Loss: 0.0687 lr:0.001000 +[ Wed Sep 14 12:15:12 2022 ] Batch(122/123) done. Loss: 0.1128 lr:0.001000 +[ Wed Sep 14 12:15:12 2022 ] Eval epoch: 100 +[ Wed Sep 14 12:16:02 2022 ] Mean test loss of 258 batches: 1.6926435232162476. +[ Wed Sep 14 12:16:02 2022 ] Top1: 68.85% +[ Wed Sep 14 12:16:02 2022 ] Top5: 92.01% diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_motion_xview/config.yaml b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_motion_xview/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6d7283f1f4fd2d2c1b8561422c8dfb194b7223c3 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_motion_xview/config.yaml @@ -0,0 +1,59 @@ +Experiment_name: ntu_bone_motion_xview +base_lr: 0.1 +batch_size: 64 +config: ./config/nturgbd-cross-view/train_bone_motion.yaml +device: +- 2 +- 3 +eval_interval: 5 +feeder: feeders.feeder.Feeder +groups: 8 +ignore_weights: [] +keep_rate: 0.9 +log_interval: 100 +model: model.decouple_gcn.Model +model_args: + block_size: 41 + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + groups: 8 + num_class: 60 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu_bone_motion_xview +nesterov: true +num_epoch: 100 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_data_bone_motion.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_data_bone_motion.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu_bone_motion_xview diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_motion_xview/decouple_gcn.py b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_motion_xview/decouple_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..6dcce4552ced280fe5b2060df92daebd2452cf7c --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_motion_xview/decouple_gcn.py @@ -0,0 +1,235 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math +from model.dropSke import DropBlock_Ske +from model.dropT import DropBlockT_1d + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + + +def conv_branch_init(conv): + weight = conv.weight + n = weight.size(0) + k1 = weight.size(1) + k2 = weight.size(2) + nn.init.normal(weight, 0, math.sqrt(2. / (n * k1 * k2))) + nn.init.constant(conv.bias, 0) + + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class unit_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1, num_point=25, block_size=41): + super(unit_tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + self.dropS = DropBlock_Ske(num_point=num_point) + self.dropT = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob, A): + x = self.bn(self.conv(x)) + x = self.dropT(self.dropS(x, keep_prob, A), keep_prob) + return x + + +class unit_tcn_skip(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(unit_tcn_skip, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class unit_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, coff_embedding=4, num_subset=3): + super(unit_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.num_point = num_point + self.groups = groups + self.num_subset = num_subset + self.DecoupleA = nn.Parameter(torch.tensor(np.reshape(A.astype(np.float32), [ + 3, 1, num_point, num_point]), dtype=torch.float32, requires_grad=True).repeat(1, groups, 1, 1), requires_grad=True) + + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.bn0 = nn.BatchNorm2d(out_channels * num_subset) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + bn_init(self.bn, 1e-6) + + self.Linear_weight = nn.Parameter(torch.zeros( + in_channels, out_channels * num_subset, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0, math.sqrt( + 0.5 / (out_channels * num_subset))) + + self.Linear_bias = nn.Parameter(torch.zeros( + 1, out_channels * num_subset, 1, 1, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.constant(self.Linear_bias, 1e-6) + + eye_array = [] + for i in range(out_channels): + eye_array.append(torch.eye(num_point)) + self.eyes = nn.Parameter(torch.tensor(torch.stack( + eye_array), requires_grad=False, device='cuda'), requires_grad=False) # [c,25,25] + + def norm(self, A): + b, c, h, w = A.size() + A = A.view(c, self.num_point, self.num_point) + D_list = torch.sum(A, 1).view(c, 1, self.num_point) + D_list_12 = (D_list + 0.001)**(-1) + D_12 = self.eyes * D_list_12 + A = torch.bmm(A, D_12).view(b, c, h, w) + return A + + def forward(self, x0): + learn_A = self.DecoupleA.repeat( + 1, self.out_channels // self.groups, 1, 1) + norm_learn_A = torch.cat([self.norm(learn_A[0:1, ...]), self.norm( + learn_A[1:2, ...]), self.norm(learn_A[2:3, ...])], 0) + + x = torch.einsum( + 'nctw,cd->ndtw', (x0, self.Linear_weight)).contiguous() + x = x + self.Linear_bias + x = self.bn0(x) + + n, kc, t, v = x.size() + x = x.view(n, self.num_subset, kc // self.num_subset, t, v) + x = torch.einsum('nkctv,kcvw->nctw', (x, norm_learn_A)) + + x = self.bn(x) + x += self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, block_size, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = unit_gcn(in_channels, out_channels, A, groups, num_point) + self.tcn1 = unit_tcn(out_channels, out_channels, + stride=stride, num_point=num_point) + self.relu = nn.ReLU() + + self.A = nn.Parameter(torch.tensor(np.sum(np.reshape(A.astype(np.float32), [ + 3, num_point, num_point]), axis=0), dtype=torch.float32, requires_grad=False, device='cuda'), requires_grad=False) + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + + else: + self.residual = unit_tcn_skip( + in_channels, out_channels, kernel_size=1, stride=stride) + self.dropSke = DropBlock_Ske(num_point=num_point) + self.dropT_skip = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob): + x = self.tcn1(self.gcn1(x), keep_prob, self.A) + self.dropT_skip( + self.dropSke(self.residual(x), keep_prob, self.A), keep_prob) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, groups, num_point, + block_size, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l5 = TCN_GCN_unit( + 64, 128, A, groups, num_point, block_size, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l8 = TCN_GCN_unit(128, 256, A, groups, + num_point, block_size, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x, keep_prob=0.9): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute( + 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x, 1.0) + x = self.l2(x, 1.0) + x = self.l3(x, 1.0) + x = self.l4(x, 1.0) + x = self.l5(x, 1.0) + x = self.l6(x, 1.0) + x = self.l7(x, keep_prob) + x = self.l8(x, keep_prob) + x = self.l9(x, keep_prob) + x = self.l10(x, keep_prob) + + # N*M,C,T,V + c_new = x.size(1) + x = x.reshape(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_motion_xview/eval_results/best_acc.pkl b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_motion_xview/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..d79a12a4cdef385b8ec0b8306bda4a493f89e673 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_motion_xview/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4644975361b545f619107b2b6e4353687caad06eb5ecd47aba2519c656431a7b +size 5718404 diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_motion_xview/log.txt b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_motion_xview/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..de0f6b72c98a671513e75d1b2b8baab832f47bcc --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_motion_xview/log.txt @@ -0,0 +1,626 @@ +[ Tue Sep 13 14:49:49 2022 ] Parameters: +{'work_dir': './work_dir/ntu_bone_motion_xview', 'model_saved_name': './save_models/ntu_bone_motion_xview', 'Experiment_name': 'ntu_bone_motion_xview', 'config': './config/nturgbd-cross-view/train_bone_motion.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_data_bone_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_data_bone_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_label.pkl'}, 'model': 'model.decouple_gcn.Model', 'model_args': {'num_class': 60, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'groups': 8, 'block_size': 41, 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80], 'device': [2, 3], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 100, 'weight_decay': 0.0001, 'keep_rate': 0.9, 'groups': 8, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Tue Sep 13 14:49:49 2022 ] Training epoch: 1 +[ Tue Sep 13 14:50:39 2022 ] Batch(99/123) done. Loss: 3.1202 lr:0.100000 +[ Tue Sep 13 14:50:49 2022 ] Eval epoch: 1 +[ Tue Sep 13 14:51:48 2022 ] Mean test loss of 296 batches: 4.840941429138184. +[ Tue Sep 13 14:51:48 2022 ] Top1: 4.59% +[ Tue Sep 13 14:51:48 2022 ] Top5: 16.73% +[ Tue Sep 13 14:51:48 2022 ] Training epoch: 2 +[ Tue Sep 13 14:52:32 2022 ] Batch(76/123) done. Loss: 2.7026 lr:0.100000 +[ Tue Sep 13 14:52:56 2022 ] Eval epoch: 2 +[ Tue Sep 13 14:53:54 2022 ] Mean test loss of 296 batches: 4.536585807800293. +[ Tue Sep 13 14:53:54 2022 ] Top1: 6.54% +[ Tue Sep 13 14:53:54 2022 ] Top5: 26.52% +[ Tue Sep 13 14:53:55 2022 ] Training epoch: 3 +[ Tue Sep 13 14:54:26 2022 ] Batch(53/123) done. Loss: 2.7912 lr:0.100000 +[ Tue Sep 13 14:55:03 2022 ] Eval epoch: 3 +[ Tue Sep 13 14:56:02 2022 ] Mean test loss of 296 batches: 3.8523175716400146. +[ Tue Sep 13 14:56:02 2022 ] Top1: 8.36% +[ Tue Sep 13 14:56:02 2022 ] Top5: 30.67% +[ Tue Sep 13 14:56:02 2022 ] Training epoch: 4 +[ Tue Sep 13 14:56:22 2022 ] Batch(30/123) done. Loss: 2.3204 lr:0.100000 +[ Tue Sep 13 14:57:11 2022 ] Eval epoch: 4 +[ Tue Sep 13 14:58:09 2022 ] Mean test loss of 296 batches: 3.4134764671325684. +[ Tue Sep 13 14:58:09 2022 ] Top1: 14.94% +[ Tue Sep 13 14:58:09 2022 ] Top5: 38.93% +[ Tue Sep 13 14:58:09 2022 ] Training epoch: 5 +[ Tue Sep 13 14:58:17 2022 ] Batch(7/123) done. Loss: 2.1312 lr:0.100000 +[ Tue Sep 13 14:59:10 2022 ] Batch(107/123) done. Loss: 2.1073 lr:0.100000 +[ Tue Sep 13 14:59:18 2022 ] Eval epoch: 5 +[ Tue Sep 13 15:00:16 2022 ] Mean test loss of 296 batches: 3.36333966255188. +[ Tue Sep 13 15:00:16 2022 ] Top1: 16.70% +[ Tue Sep 13 15:00:16 2022 ] Top5: 45.77% +[ Tue Sep 13 15:00:16 2022 ] Training epoch: 6 +[ Tue Sep 13 15:01:04 2022 ] Batch(84/123) done. Loss: 1.5339 lr:0.100000 +[ Tue Sep 13 15:01:25 2022 ] Eval epoch: 6 +[ Tue Sep 13 15:02:23 2022 ] Mean test loss of 296 batches: 2.8109207153320312. +[ Tue Sep 13 15:02:23 2022 ] Top1: 26.76% +[ Tue Sep 13 15:02:24 2022 ] Top5: 61.30% +[ Tue Sep 13 15:02:24 2022 ] Training epoch: 7 +[ Tue Sep 13 15:03:00 2022 ] Batch(61/123) done. Loss: 1.9341 lr:0.100000 +[ Tue Sep 13 15:03:32 2022 ] Eval epoch: 7 +[ Tue Sep 13 15:04:31 2022 ] Mean test loss of 296 batches: 2.886589765548706. +[ Tue Sep 13 15:04:31 2022 ] Top1: 28.90% +[ Tue Sep 13 15:04:31 2022 ] Top5: 65.04% +[ Tue Sep 13 15:04:31 2022 ] Training epoch: 8 +[ Tue Sep 13 15:04:55 2022 ] Batch(38/123) done. Loss: 1.6441 lr:0.100000 +[ Tue Sep 13 15:05:40 2022 ] Eval epoch: 8 +[ Tue Sep 13 15:06:38 2022 ] Mean test loss of 296 batches: 2.8227291107177734. +[ Tue Sep 13 15:06:38 2022 ] Top1: 27.65% +[ Tue Sep 13 15:06:38 2022 ] Top5: 66.53% +[ Tue Sep 13 15:06:38 2022 ] Training epoch: 9 +[ Tue Sep 13 15:06:50 2022 ] Batch(15/123) done. Loss: 1.0334 lr:0.100000 +[ Tue Sep 13 15:07:43 2022 ] Batch(115/123) done. Loss: 1.3961 lr:0.100000 +[ Tue Sep 13 15:07:47 2022 ] Eval epoch: 9 +[ Tue Sep 13 15:08:45 2022 ] Mean test loss of 296 batches: 2.284026622772217. +[ Tue Sep 13 15:08:45 2022 ] Top1: 35.81% +[ Tue Sep 13 15:08:45 2022 ] Top5: 74.89% +[ Tue Sep 13 15:08:45 2022 ] Training epoch: 10 +[ Tue Sep 13 15:09:37 2022 ] Batch(92/123) done. Loss: 1.4670 lr:0.100000 +[ Tue Sep 13 15:09:53 2022 ] Eval epoch: 10 +[ Tue Sep 13 15:10:52 2022 ] Mean test loss of 296 batches: 2.639165163040161. +[ Tue Sep 13 15:10:52 2022 ] Top1: 31.54% +[ Tue Sep 13 15:10:52 2022 ] Top5: 69.62% +[ Tue Sep 13 15:10:52 2022 ] Training epoch: 11 +[ Tue Sep 13 15:11:32 2022 ] Batch(69/123) done. Loss: 1.3732 lr:0.100000 +[ Tue Sep 13 15:12:01 2022 ] Eval epoch: 11 +[ Tue Sep 13 15:12:59 2022 ] Mean test loss of 296 batches: 2.07755970954895. +[ Tue Sep 13 15:12:59 2022 ] Top1: 41.10% +[ Tue Sep 13 15:12:59 2022 ] Top5: 81.31% +[ Tue Sep 13 15:12:59 2022 ] Training epoch: 12 +[ Tue Sep 13 15:13:27 2022 ] Batch(46/123) done. Loss: 1.6203 lr:0.100000 +[ Tue Sep 13 15:14:08 2022 ] Eval epoch: 12 +[ Tue Sep 13 15:15:06 2022 ] Mean test loss of 296 batches: 2.7575278282165527. +[ Tue Sep 13 15:15:06 2022 ] Top1: 33.60% +[ Tue Sep 13 15:15:06 2022 ] Top5: 72.88% +[ Tue Sep 13 15:15:06 2022 ] Training epoch: 13 +[ Tue Sep 13 15:15:22 2022 ] Batch(23/123) done. Loss: 1.0448 lr:0.100000 +[ Tue Sep 13 15:16:14 2022 ] Eval epoch: 13 +[ Tue Sep 13 15:17:13 2022 ] Mean test loss of 296 batches: 2.646923065185547. +[ Tue Sep 13 15:17:13 2022 ] Top1: 40.92% +[ Tue Sep 13 15:17:13 2022 ] Top5: 79.23% +[ Tue Sep 13 15:17:13 2022 ] Training epoch: 14 +[ Tue Sep 13 15:17:17 2022 ] Batch(0/123) done. Loss: 1.2924 lr:0.100000 +[ Tue Sep 13 15:18:10 2022 ] Batch(100/123) done. Loss: 0.8623 lr:0.100000 +[ Tue Sep 13 15:18:22 2022 ] Eval epoch: 14 +[ Tue Sep 13 15:19:20 2022 ] Mean test loss of 296 batches: 1.9788624048233032. +[ Tue Sep 13 15:19:20 2022 ] Top1: 44.42% +[ Tue Sep 13 15:19:21 2022 ] Top5: 83.32% +[ Tue Sep 13 15:19:21 2022 ] Training epoch: 15 +[ Tue Sep 13 15:20:05 2022 ] Batch(77/123) done. Loss: 1.0465 lr:0.100000 +[ Tue Sep 13 15:20:29 2022 ] Eval epoch: 15 +[ Tue Sep 13 15:21:28 2022 ] Mean test loss of 296 batches: 1.917767882347107. +[ Tue Sep 13 15:21:28 2022 ] Top1: 48.38% +[ Tue Sep 13 15:21:28 2022 ] Top5: 85.17% +[ Tue Sep 13 15:21:28 2022 ] Training epoch: 16 +[ Tue Sep 13 15:22:00 2022 ] Batch(54/123) done. Loss: 0.5756 lr:0.100000 +[ Tue Sep 13 15:22:36 2022 ] Eval epoch: 16 +[ Tue Sep 13 15:23:35 2022 ] Mean test loss of 296 batches: 2.672187328338623. +[ Tue Sep 13 15:23:35 2022 ] Top1: 42.49% +[ Tue Sep 13 15:23:35 2022 ] Top5: 79.57% +[ Tue Sep 13 15:23:35 2022 ] Training epoch: 17 +[ Tue Sep 13 15:23:55 2022 ] Batch(31/123) done. Loss: 0.9218 lr:0.100000 +[ Tue Sep 13 15:24:44 2022 ] Eval epoch: 17 +[ Tue Sep 13 15:25:42 2022 ] Mean test loss of 296 batches: 2.423353433609009. +[ Tue Sep 13 15:25:42 2022 ] Top1: 44.77% +[ Tue Sep 13 15:25:42 2022 ] Top5: 82.56% +[ Tue Sep 13 15:25:42 2022 ] Training epoch: 18 +[ Tue Sep 13 15:25:50 2022 ] Batch(8/123) done. Loss: 0.6231 lr:0.100000 +[ Tue Sep 13 15:26:43 2022 ] Batch(108/123) done. Loss: 1.0274 lr:0.100000 +[ Tue Sep 13 15:26:51 2022 ] Eval epoch: 18 +[ Tue Sep 13 15:27:49 2022 ] Mean test loss of 296 batches: 2.269897222518921. +[ Tue Sep 13 15:27:49 2022 ] Top1: 41.53% +[ Tue Sep 13 15:27:49 2022 ] Top5: 77.52% +[ Tue Sep 13 15:27:49 2022 ] Training epoch: 19 +[ Tue Sep 13 15:28:38 2022 ] Batch(85/123) done. Loss: 0.6694 lr:0.100000 +[ Tue Sep 13 15:28:58 2022 ] Eval epoch: 19 +[ Tue Sep 13 15:29:57 2022 ] Mean test loss of 296 batches: 1.9410808086395264. +[ Tue Sep 13 15:29:57 2022 ] Top1: 49.68% +[ Tue Sep 13 15:29:57 2022 ] Top5: 85.26% +[ Tue Sep 13 15:29:57 2022 ] Training epoch: 20 +[ Tue Sep 13 15:30:33 2022 ] Batch(62/123) done. Loss: 0.6519 lr:0.100000 +[ Tue Sep 13 15:31:06 2022 ] Eval epoch: 20 +[ Tue Sep 13 15:32:05 2022 ] Mean test loss of 296 batches: 3.0234177112579346. +[ Tue Sep 13 15:32:05 2022 ] Top1: 37.13% +[ Tue Sep 13 15:32:05 2022 ] Top5: 74.79% +[ Tue Sep 13 15:32:05 2022 ] Training epoch: 21 +[ Tue Sep 13 15:32:29 2022 ] Batch(39/123) done. Loss: 0.8881 lr:0.100000 +[ Tue Sep 13 15:33:13 2022 ] Eval epoch: 21 +[ Tue Sep 13 15:34:12 2022 ] Mean test loss of 296 batches: 2.9165000915527344. +[ Tue Sep 13 15:34:12 2022 ] Top1: 45.04% +[ Tue Sep 13 15:34:12 2022 ] Top5: 79.92% +[ Tue Sep 13 15:34:12 2022 ] Training epoch: 22 +[ Tue Sep 13 15:34:24 2022 ] Batch(16/123) done. Loss: 0.4737 lr:0.100000 +[ Tue Sep 13 15:35:18 2022 ] Batch(116/123) done. Loss: 0.6908 lr:0.100000 +[ Tue Sep 13 15:35:21 2022 ] Eval epoch: 22 +[ Tue Sep 13 15:36:19 2022 ] Mean test loss of 296 batches: 1.941760540008545. +[ Tue Sep 13 15:36:19 2022 ] Top1: 49.68% +[ Tue Sep 13 15:36:20 2022 ] Top5: 85.50% +[ Tue Sep 13 15:36:20 2022 ] Training epoch: 23 +[ Tue Sep 13 15:37:13 2022 ] Batch(93/123) done. Loss: 0.7538 lr:0.100000 +[ Tue Sep 13 15:37:28 2022 ] Eval epoch: 23 +[ Tue Sep 13 15:38:26 2022 ] Mean test loss of 296 batches: 2.4189651012420654. +[ Tue Sep 13 15:38:27 2022 ] Top1: 46.00% +[ Tue Sep 13 15:38:27 2022 ] Top5: 83.87% +[ Tue Sep 13 15:38:27 2022 ] Training epoch: 24 +[ Tue Sep 13 15:39:08 2022 ] Batch(70/123) done. Loss: 0.5502 lr:0.100000 +[ Tue Sep 13 15:39:36 2022 ] Eval epoch: 24 +[ Tue Sep 13 15:40:34 2022 ] Mean test loss of 296 batches: 4.8874897956848145. +[ Tue Sep 13 15:40:34 2022 ] Top1: 35.69% +[ Tue Sep 13 15:40:34 2022 ] Top5: 70.55% +[ Tue Sep 13 15:40:34 2022 ] Training epoch: 25 +[ Tue Sep 13 15:41:03 2022 ] Batch(47/123) done. Loss: 0.5227 lr:0.100000 +[ Tue Sep 13 15:41:43 2022 ] Eval epoch: 25 +[ Tue Sep 13 15:42:42 2022 ] Mean test loss of 296 batches: 2.7462456226348877. +[ Tue Sep 13 15:42:42 2022 ] Top1: 38.72% +[ Tue Sep 13 15:42:42 2022 ] Top5: 71.31% +[ Tue Sep 13 15:42:42 2022 ] Training epoch: 26 +[ Tue Sep 13 15:42:58 2022 ] Batch(24/123) done. Loss: 1.2088 lr:0.100000 +[ Tue Sep 13 15:43:50 2022 ] Eval epoch: 26 +[ Tue Sep 13 15:44:49 2022 ] Mean test loss of 296 batches: 3.007772207260132. +[ Tue Sep 13 15:44:49 2022 ] Top1: 39.06% +[ Tue Sep 13 15:44:49 2022 ] Top5: 76.15% +[ Tue Sep 13 15:44:49 2022 ] Training epoch: 27 +[ Tue Sep 13 15:44:53 2022 ] Batch(1/123) done. Loss: 0.2511 lr:0.100000 +[ Tue Sep 13 15:45:46 2022 ] Batch(101/123) done. Loss: 0.5912 lr:0.100000 +[ Tue Sep 13 15:45:58 2022 ] Eval epoch: 27 +[ Tue Sep 13 15:46:56 2022 ] Mean test loss of 296 batches: 11.681885719299316. +[ Tue Sep 13 15:46:56 2022 ] Top1: 12.93% +[ Tue Sep 13 15:46:56 2022 ] Top5: 41.14% +[ Tue Sep 13 15:46:56 2022 ] Training epoch: 28 +[ Tue Sep 13 15:47:41 2022 ] Batch(78/123) done. Loss: 0.4458 lr:0.100000 +[ Tue Sep 13 15:48:05 2022 ] Eval epoch: 28 +[ Tue Sep 13 15:49:03 2022 ] Mean test loss of 296 batches: 5.569553852081299. +[ Tue Sep 13 15:49:03 2022 ] Top1: 31.64% +[ Tue Sep 13 15:49:03 2022 ] Top5: 67.73% +[ Tue Sep 13 15:49:03 2022 ] Training epoch: 29 +[ Tue Sep 13 15:49:36 2022 ] Batch(55/123) done. Loss: 0.4035 lr:0.100000 +[ Tue Sep 13 15:50:12 2022 ] Eval epoch: 29 +[ Tue Sep 13 15:51:10 2022 ] Mean test loss of 296 batches: 2.3737335205078125. +[ Tue Sep 13 15:51:10 2022 ] Top1: 49.85% +[ Tue Sep 13 15:51:11 2022 ] Top5: 82.92% +[ Tue Sep 13 15:51:11 2022 ] Training epoch: 30 +[ Tue Sep 13 15:51:31 2022 ] Batch(32/123) done. Loss: 0.6966 lr:0.100000 +[ Tue Sep 13 15:52:19 2022 ] Eval epoch: 30 +[ Tue Sep 13 15:53:17 2022 ] Mean test loss of 296 batches: 4.07661247253418. +[ Tue Sep 13 15:53:17 2022 ] Top1: 41.06% +[ Tue Sep 13 15:53:18 2022 ] Top5: 75.22% +[ Tue Sep 13 15:53:18 2022 ] Training epoch: 31 +[ Tue Sep 13 15:53:26 2022 ] Batch(9/123) done. Loss: 0.2052 lr:0.100000 +[ Tue Sep 13 15:54:19 2022 ] Batch(109/123) done. Loss: 0.3980 lr:0.100000 +[ Tue Sep 13 15:54:26 2022 ] Eval epoch: 31 +[ Tue Sep 13 15:55:24 2022 ] Mean test loss of 296 batches: 1.540056586265564. +[ Tue Sep 13 15:55:24 2022 ] Top1: 58.08% +[ Tue Sep 13 15:55:24 2022 ] Top5: 90.97% +[ Tue Sep 13 15:55:25 2022 ] Training epoch: 32 +[ Tue Sep 13 15:56:14 2022 ] Batch(86/123) done. Loss: 0.4751 lr:0.100000 +[ Tue Sep 13 15:56:33 2022 ] Eval epoch: 32 +[ Tue Sep 13 15:57:32 2022 ] Mean test loss of 296 batches: 1.9202641248703003. +[ Tue Sep 13 15:57:32 2022 ] Top1: 57.23% +[ Tue Sep 13 15:57:32 2022 ] Top5: 89.87% +[ Tue Sep 13 15:57:32 2022 ] Training epoch: 33 +[ Tue Sep 13 15:58:09 2022 ] Batch(63/123) done. Loss: 0.3523 lr:0.100000 +[ Tue Sep 13 15:58:41 2022 ] Eval epoch: 33 +[ Tue Sep 13 15:59:39 2022 ] Mean test loss of 296 batches: 10.33612060546875. +[ Tue Sep 13 15:59:39 2022 ] Top1: 19.24% +[ Tue Sep 13 15:59:39 2022 ] Top5: 51.09% +[ Tue Sep 13 15:59:39 2022 ] Training epoch: 34 +[ Tue Sep 13 16:00:04 2022 ] Batch(40/123) done. Loss: 0.1381 lr:0.100000 +[ Tue Sep 13 16:00:48 2022 ] Eval epoch: 34 +[ Tue Sep 13 16:01:47 2022 ] Mean test loss of 296 batches: 2.9916625022888184. +[ Tue Sep 13 16:01:47 2022 ] Top1: 43.66% +[ Tue Sep 13 16:01:47 2022 ] Top5: 74.56% +[ Tue Sep 13 16:01:47 2022 ] Training epoch: 35 +[ Tue Sep 13 16:02:00 2022 ] Batch(17/123) done. Loss: 0.2303 lr:0.100000 +[ Tue Sep 13 16:02:53 2022 ] Batch(117/123) done. Loss: 0.3284 lr:0.100000 +[ Tue Sep 13 16:02:56 2022 ] Eval epoch: 35 +[ Tue Sep 13 16:03:54 2022 ] Mean test loss of 296 batches: 4.387256622314453. +[ Tue Sep 13 16:03:54 2022 ] Top1: 39.56% +[ Tue Sep 13 16:03:55 2022 ] Top5: 79.22% +[ Tue Sep 13 16:03:55 2022 ] Training epoch: 36 +[ Tue Sep 13 16:04:48 2022 ] Batch(94/123) done. Loss: 0.3162 lr:0.100000 +[ Tue Sep 13 16:05:03 2022 ] Eval epoch: 36 +[ Tue Sep 13 16:06:01 2022 ] Mean test loss of 296 batches: 2.636432647705078. +[ Tue Sep 13 16:06:01 2022 ] Top1: 49.47% +[ Tue Sep 13 16:06:01 2022 ] Top5: 84.82% +[ Tue Sep 13 16:06:01 2022 ] Training epoch: 37 +[ Tue Sep 13 16:06:42 2022 ] Batch(71/123) done. Loss: 0.1953 lr:0.100000 +[ Tue Sep 13 16:07:10 2022 ] Eval epoch: 37 +[ Tue Sep 13 16:08:08 2022 ] Mean test loss of 296 batches: 3.678734064102173. +[ Tue Sep 13 16:08:08 2022 ] Top1: 43.55% +[ Tue Sep 13 16:08:08 2022 ] Top5: 82.04% +[ Tue Sep 13 16:08:09 2022 ] Training epoch: 38 +[ Tue Sep 13 16:08:37 2022 ] Batch(48/123) done. Loss: 0.2465 lr:0.100000 +[ Tue Sep 13 16:09:17 2022 ] Eval epoch: 38 +[ Tue Sep 13 16:10:15 2022 ] Mean test loss of 296 batches: 4.35245418548584. +[ Tue Sep 13 16:10:15 2022 ] Top1: 38.75% +[ Tue Sep 13 16:10:15 2022 ] Top5: 78.83% +[ Tue Sep 13 16:10:15 2022 ] Training epoch: 39 +[ Tue Sep 13 16:10:32 2022 ] Batch(25/123) done. Loss: 0.3984 lr:0.100000 +[ Tue Sep 13 16:11:24 2022 ] Eval epoch: 39 +[ Tue Sep 13 16:12:23 2022 ] Mean test loss of 296 batches: 5.21007776260376. +[ Tue Sep 13 16:12:23 2022 ] Top1: 37.24% +[ Tue Sep 13 16:12:23 2022 ] Top5: 72.60% +[ Tue Sep 13 16:12:23 2022 ] Training epoch: 40 +[ Tue Sep 13 16:12:28 2022 ] Batch(2/123) done. Loss: 0.1409 lr:0.100000 +[ Tue Sep 13 16:13:21 2022 ] Batch(102/123) done. Loss: 0.4954 lr:0.100000 +[ Tue Sep 13 16:13:32 2022 ] Eval epoch: 40 +[ Tue Sep 13 16:14:30 2022 ] Mean test loss of 296 batches: 37.40964889526367. +[ Tue Sep 13 16:14:30 2022 ] Top1: 12.61% +[ Tue Sep 13 16:14:30 2022 ] Top5: 32.74% +[ Tue Sep 13 16:14:30 2022 ] Training epoch: 41 +[ Tue Sep 13 16:15:16 2022 ] Batch(79/123) done. Loss: 0.0789 lr:0.100000 +[ Tue Sep 13 16:15:39 2022 ] Eval epoch: 41 +[ Tue Sep 13 16:16:38 2022 ] Mean test loss of 296 batches: 2.615860939025879. +[ Tue Sep 13 16:16:38 2022 ] Top1: 52.59% +[ Tue Sep 13 16:16:38 2022 ] Top5: 84.47% +[ Tue Sep 13 16:16:38 2022 ] Training epoch: 42 +[ Tue Sep 13 16:17:12 2022 ] Batch(56/123) done. Loss: 0.2034 lr:0.100000 +[ Tue Sep 13 16:17:47 2022 ] Eval epoch: 42 +[ Tue Sep 13 16:18:45 2022 ] Mean test loss of 296 batches: 2.6538703441619873. +[ Tue Sep 13 16:18:46 2022 ] Top1: 41.63% +[ Tue Sep 13 16:18:46 2022 ] Top5: 77.41% +[ Tue Sep 13 16:18:46 2022 ] Training epoch: 43 +[ Tue Sep 13 16:19:07 2022 ] Batch(33/123) done. Loss: 0.1044 lr:0.100000 +[ Tue Sep 13 16:19:54 2022 ] Eval epoch: 43 +[ Tue Sep 13 16:20:53 2022 ] Mean test loss of 296 batches: 2.6292827129364014. +[ Tue Sep 13 16:20:53 2022 ] Top1: 45.29% +[ Tue Sep 13 16:20:53 2022 ] Top5: 77.61% +[ Tue Sep 13 16:20:53 2022 ] Training epoch: 44 +[ Tue Sep 13 16:21:02 2022 ] Batch(10/123) done. Loss: 0.2095 lr:0.100000 +[ Tue Sep 13 16:21:55 2022 ] Batch(110/123) done. Loss: 0.3201 lr:0.100000 +[ Tue Sep 13 16:22:02 2022 ] Eval epoch: 44 +[ Tue Sep 13 16:23:00 2022 ] Mean test loss of 296 batches: 3.5712924003601074. +[ Tue Sep 13 16:23:00 2022 ] Top1: 46.43% +[ Tue Sep 13 16:23:00 2022 ] Top5: 79.34% +[ Tue Sep 13 16:23:00 2022 ] Training epoch: 45 +[ Tue Sep 13 16:23:49 2022 ] Batch(87/123) done. Loss: 0.5182 lr:0.100000 +[ Tue Sep 13 16:24:08 2022 ] Eval epoch: 45 +[ Tue Sep 13 16:25:07 2022 ] Mean test loss of 296 batches: 3.635678291320801. +[ Tue Sep 13 16:25:07 2022 ] Top1: 51.18% +[ Tue Sep 13 16:25:07 2022 ] Top5: 83.47% +[ Tue Sep 13 16:25:08 2022 ] Training epoch: 46 +[ Tue Sep 13 16:25:45 2022 ] Batch(64/123) done. Loss: 0.2040 lr:0.100000 +[ Tue Sep 13 16:26:17 2022 ] Eval epoch: 46 +[ Tue Sep 13 16:27:15 2022 ] Mean test loss of 296 batches: 3.3880717754364014. +[ Tue Sep 13 16:27:15 2022 ] Top1: 50.74% +[ Tue Sep 13 16:27:15 2022 ] Top5: 83.92% +[ Tue Sep 13 16:27:15 2022 ] Training epoch: 47 +[ Tue Sep 13 16:27:41 2022 ] Batch(41/123) done. Loss: 0.0575 lr:0.100000 +[ Tue Sep 13 16:28:24 2022 ] Eval epoch: 47 +[ Tue Sep 13 16:29:22 2022 ] Mean test loss of 296 batches: 7.22807502746582. +[ Tue Sep 13 16:29:22 2022 ] Top1: 30.86% +[ Tue Sep 13 16:29:22 2022 ] Top5: 61.22% +[ Tue Sep 13 16:29:23 2022 ] Training epoch: 48 +[ Tue Sep 13 16:29:36 2022 ] Batch(18/123) done. Loss: 0.1199 lr:0.100000 +[ Tue Sep 13 16:30:29 2022 ] Batch(118/123) done. Loss: 0.2213 lr:0.100000 +[ Tue Sep 13 16:30:31 2022 ] Eval epoch: 48 +[ Tue Sep 13 16:31:30 2022 ] Mean test loss of 296 batches: 3.9004690647125244. +[ Tue Sep 13 16:31:30 2022 ] Top1: 39.07% +[ Tue Sep 13 16:31:30 2022 ] Top5: 73.79% +[ Tue Sep 13 16:31:30 2022 ] Training epoch: 49 +[ Tue Sep 13 16:32:24 2022 ] Batch(95/123) done. Loss: 0.2507 lr:0.100000 +[ Tue Sep 13 16:32:39 2022 ] Eval epoch: 49 +[ Tue Sep 13 16:33:38 2022 ] Mean test loss of 296 batches: 3.2589619159698486. +[ Tue Sep 13 16:33:38 2022 ] Top1: 47.72% +[ Tue Sep 13 16:33:38 2022 ] Top5: 81.19% +[ Tue Sep 13 16:33:38 2022 ] Training epoch: 50 +[ Tue Sep 13 16:34:20 2022 ] Batch(72/123) done. Loss: 0.0821 lr:0.100000 +[ Tue Sep 13 16:34:46 2022 ] Eval epoch: 50 +[ Tue Sep 13 16:35:45 2022 ] Mean test loss of 296 batches: 8.795620918273926. +[ Tue Sep 13 16:35:45 2022 ] Top1: 22.00% +[ Tue Sep 13 16:35:45 2022 ] Top5: 49.46% +[ Tue Sep 13 16:35:45 2022 ] Training epoch: 51 +[ Tue Sep 13 16:36:15 2022 ] Batch(49/123) done. Loss: 0.1764 lr:0.100000 +[ Tue Sep 13 16:36:54 2022 ] Eval epoch: 51 +[ Tue Sep 13 16:37:52 2022 ] Mean test loss of 296 batches: 2.2129385471343994. +[ Tue Sep 13 16:37:52 2022 ] Top1: 54.58% +[ Tue Sep 13 16:37:52 2022 ] Top5: 86.85% +[ Tue Sep 13 16:37:52 2022 ] Training epoch: 52 +[ Tue Sep 13 16:38:10 2022 ] Batch(26/123) done. Loss: 0.2596 lr:0.100000 +[ Tue Sep 13 16:39:01 2022 ] Eval epoch: 52 +[ Tue Sep 13 16:39:59 2022 ] Mean test loss of 296 batches: 2.1799874305725098. +[ Tue Sep 13 16:39:59 2022 ] Top1: 57.74% +[ Tue Sep 13 16:39:59 2022 ] Top5: 88.09% +[ Tue Sep 13 16:39:59 2022 ] Training epoch: 53 +[ Tue Sep 13 16:40:05 2022 ] Batch(3/123) done. Loss: 0.0774 lr:0.100000 +[ Tue Sep 13 16:40:58 2022 ] Batch(103/123) done. Loss: 0.2229 lr:0.100000 +[ Tue Sep 13 16:41:08 2022 ] Eval epoch: 53 +[ Tue Sep 13 16:42:06 2022 ] Mean test loss of 296 batches: 2.6150705814361572. +[ Tue Sep 13 16:42:06 2022 ] Top1: 55.49% +[ Tue Sep 13 16:42:07 2022 ] Top5: 85.43% +[ Tue Sep 13 16:42:07 2022 ] Training epoch: 54 +[ Tue Sep 13 16:42:53 2022 ] Batch(80/123) done. Loss: 0.1808 lr:0.100000 +[ Tue Sep 13 16:43:16 2022 ] Eval epoch: 54 +[ Tue Sep 13 16:44:14 2022 ] Mean test loss of 296 batches: 5.603597164154053. +[ Tue Sep 13 16:44:14 2022 ] Top1: 36.41% +[ Tue Sep 13 16:44:14 2022 ] Top5: 68.51% +[ Tue Sep 13 16:44:14 2022 ] Training epoch: 55 +[ Tue Sep 13 16:44:48 2022 ] Batch(57/123) done. Loss: 0.3081 lr:0.100000 +[ Tue Sep 13 16:45:23 2022 ] Eval epoch: 55 +[ Tue Sep 13 16:46:22 2022 ] Mean test loss of 296 batches: 2.799612283706665. +[ Tue Sep 13 16:46:22 2022 ] Top1: 48.30% +[ Tue Sep 13 16:46:22 2022 ] Top5: 82.42% +[ Tue Sep 13 16:46:22 2022 ] Training epoch: 56 +[ Tue Sep 13 16:46:44 2022 ] Batch(34/123) done. Loss: 0.2197 lr:0.100000 +[ Tue Sep 13 16:47:31 2022 ] Eval epoch: 56 +[ Tue Sep 13 16:48:29 2022 ] Mean test loss of 296 batches: 2.979994297027588. +[ Tue Sep 13 16:48:29 2022 ] Top1: 52.58% +[ Tue Sep 13 16:48:30 2022 ] Top5: 87.83% +[ Tue Sep 13 16:48:30 2022 ] Training epoch: 57 +[ Tue Sep 13 16:48:39 2022 ] Batch(11/123) done. Loss: 0.2202 lr:0.100000 +[ Tue Sep 13 16:49:32 2022 ] Batch(111/123) done. Loss: 0.0774 lr:0.100000 +[ Tue Sep 13 16:49:38 2022 ] Eval epoch: 57 +[ Tue Sep 13 16:50:37 2022 ] Mean test loss of 296 batches: 2.7195138931274414. +[ Tue Sep 13 16:50:37 2022 ] Top1: 51.15% +[ Tue Sep 13 16:50:37 2022 ] Top5: 82.30% +[ Tue Sep 13 16:50:37 2022 ] Training epoch: 58 +[ Tue Sep 13 16:51:27 2022 ] Batch(88/123) done. Loss: 0.1158 lr:0.100000 +[ Tue Sep 13 16:51:46 2022 ] Eval epoch: 58 +[ Tue Sep 13 16:52:44 2022 ] Mean test loss of 296 batches: 2.153719186782837. +[ Tue Sep 13 16:52:44 2022 ] Top1: 53.82% +[ Tue Sep 13 16:52:45 2022 ] Top5: 86.04% +[ Tue Sep 13 16:52:45 2022 ] Training epoch: 59 +[ Tue Sep 13 16:53:23 2022 ] Batch(65/123) done. Loss: 0.2057 lr:0.100000 +[ Tue Sep 13 16:53:53 2022 ] Eval epoch: 59 +[ Tue Sep 13 16:54:52 2022 ] Mean test loss of 296 batches: 4.032756328582764. +[ Tue Sep 13 16:54:52 2022 ] Top1: 44.69% +[ Tue Sep 13 16:54:52 2022 ] Top5: 80.61% +[ Tue Sep 13 16:54:52 2022 ] Training epoch: 60 +[ Tue Sep 13 16:55:18 2022 ] Batch(42/123) done. Loss: 0.2568 lr:0.100000 +[ Tue Sep 13 16:56:01 2022 ] Eval epoch: 60 +[ Tue Sep 13 16:57:00 2022 ] Mean test loss of 296 batches: 2.3625600337982178. +[ Tue Sep 13 16:57:00 2022 ] Top1: 59.93% +[ Tue Sep 13 16:57:00 2022 ] Top5: 89.68% +[ Tue Sep 13 16:57:00 2022 ] Training epoch: 61 +[ Tue Sep 13 16:57:14 2022 ] Batch(19/123) done. Loss: 0.1772 lr:0.010000 +[ Tue Sep 13 16:58:07 2022 ] Batch(119/123) done. Loss: 0.0429 lr:0.010000 +[ Tue Sep 13 16:58:09 2022 ] Eval epoch: 61 +[ Tue Sep 13 16:59:08 2022 ] Mean test loss of 296 batches: 1.7393772602081299. +[ Tue Sep 13 16:59:08 2022 ] Top1: 66.50% +[ Tue Sep 13 16:59:08 2022 ] Top5: 93.04% +[ Tue Sep 13 16:59:08 2022 ] Training epoch: 62 +[ Tue Sep 13 17:00:02 2022 ] Batch(96/123) done. Loss: 0.0733 lr:0.010000 +[ Tue Sep 13 17:00:17 2022 ] Eval epoch: 62 +[ Tue Sep 13 17:01:15 2022 ] Mean test loss of 296 batches: 1.7127116918563843. +[ Tue Sep 13 17:01:15 2022 ] Top1: 67.18% +[ Tue Sep 13 17:01:15 2022 ] Top5: 93.18% +[ Tue Sep 13 17:01:15 2022 ] Training epoch: 63 +[ Tue Sep 13 17:01:58 2022 ] Batch(73/123) done. Loss: 0.0496 lr:0.010000 +[ Tue Sep 13 17:02:24 2022 ] Eval epoch: 63 +[ Tue Sep 13 17:03:23 2022 ] Mean test loss of 296 batches: 1.7963802814483643. +[ Tue Sep 13 17:03:23 2022 ] Top1: 66.07% +[ Tue Sep 13 17:03:23 2022 ] Top5: 92.14% +[ Tue Sep 13 17:03:23 2022 ] Training epoch: 64 +[ Tue Sep 13 17:03:54 2022 ] Batch(50/123) done. Loss: 0.0717 lr:0.010000 +[ Tue Sep 13 17:04:32 2022 ] Eval epoch: 64 +[ Tue Sep 13 17:05:31 2022 ] Mean test loss of 296 batches: 1.9539475440979004. +[ Tue Sep 13 17:05:31 2022 ] Top1: 64.48% +[ Tue Sep 13 17:05:31 2022 ] Top5: 91.60% +[ Tue Sep 13 17:05:32 2022 ] Training epoch: 65 +[ Tue Sep 13 17:05:50 2022 ] Batch(27/123) done. Loss: 0.0222 lr:0.010000 +[ Tue Sep 13 17:06:41 2022 ] Eval epoch: 65 +[ Tue Sep 13 17:07:39 2022 ] Mean test loss of 296 batches: 1.960593581199646. +[ Tue Sep 13 17:07:39 2022 ] Top1: 65.09% +[ Tue Sep 13 17:07:39 2022 ] Top5: 92.06% +[ Tue Sep 13 17:07:39 2022 ] Training epoch: 66 +[ Tue Sep 13 17:07:45 2022 ] Batch(4/123) done. Loss: 0.0322 lr:0.010000 +[ Tue Sep 13 17:08:39 2022 ] Batch(104/123) done. Loss: 0.0227 lr:0.010000 +[ Tue Sep 13 17:08:48 2022 ] Eval epoch: 66 +[ Tue Sep 13 17:09:47 2022 ] Mean test loss of 296 batches: 1.9859294891357422. +[ Tue Sep 13 17:09:47 2022 ] Top1: 64.48% +[ Tue Sep 13 17:09:47 2022 ] Top5: 91.58% +[ Tue Sep 13 17:09:47 2022 ] Training epoch: 67 +[ Tue Sep 13 17:10:34 2022 ] Batch(81/123) done. Loss: 0.0098 lr:0.010000 +[ Tue Sep 13 17:10:56 2022 ] Eval epoch: 67 +[ Tue Sep 13 17:11:54 2022 ] Mean test loss of 296 batches: 1.7582426071166992. +[ Tue Sep 13 17:11:54 2022 ] Top1: 67.54% +[ Tue Sep 13 17:11:54 2022 ] Top5: 93.09% +[ Tue Sep 13 17:11:54 2022 ] Training epoch: 68 +[ Tue Sep 13 17:12:29 2022 ] Batch(58/123) done. Loss: 0.0118 lr:0.010000 +[ Tue Sep 13 17:13:03 2022 ] Eval epoch: 68 +[ Tue Sep 13 17:14:02 2022 ] Mean test loss of 296 batches: 1.7911266088485718. +[ Tue Sep 13 17:14:02 2022 ] Top1: 67.18% +[ Tue Sep 13 17:14:02 2022 ] Top5: 92.89% +[ Tue Sep 13 17:14:02 2022 ] Training epoch: 69 +[ Tue Sep 13 17:14:25 2022 ] Batch(35/123) done. Loss: 0.0214 lr:0.010000 +[ Tue Sep 13 17:15:11 2022 ] Eval epoch: 69 +[ Tue Sep 13 17:16:10 2022 ] Mean test loss of 296 batches: 1.7951366901397705. +[ Tue Sep 13 17:16:10 2022 ] Top1: 67.27% +[ Tue Sep 13 17:16:10 2022 ] Top5: 93.10% +[ Tue Sep 13 17:16:10 2022 ] Training epoch: 70 +[ Tue Sep 13 17:16:21 2022 ] Batch(12/123) done. Loss: 0.0354 lr:0.010000 +[ Tue Sep 13 17:17:14 2022 ] Batch(112/123) done. Loss: 0.0311 lr:0.010000 +[ Tue Sep 13 17:17:19 2022 ] Eval epoch: 70 +[ Tue Sep 13 17:18:18 2022 ] Mean test loss of 296 batches: 1.748536229133606. +[ Tue Sep 13 17:18:18 2022 ] Top1: 67.49% +[ Tue Sep 13 17:18:18 2022 ] Top5: 93.09% +[ Tue Sep 13 17:18:18 2022 ] Training epoch: 71 +[ Tue Sep 13 17:19:09 2022 ] Batch(89/123) done. Loss: 0.0223 lr:0.010000 +[ Tue Sep 13 17:19:27 2022 ] Eval epoch: 71 +[ Tue Sep 13 17:20:25 2022 ] Mean test loss of 296 batches: 1.9174449443817139. +[ Tue Sep 13 17:20:25 2022 ] Top1: 66.17% +[ Tue Sep 13 17:20:26 2022 ] Top5: 92.54% +[ Tue Sep 13 17:20:26 2022 ] Training epoch: 72 +[ Tue Sep 13 17:21:05 2022 ] Batch(66/123) done. Loss: 0.0342 lr:0.010000 +[ Tue Sep 13 17:21:35 2022 ] Eval epoch: 72 +[ Tue Sep 13 17:22:34 2022 ] Mean test loss of 296 batches: 1.8064578771591187. +[ Tue Sep 13 17:22:34 2022 ] Top1: 67.85% +[ Tue Sep 13 17:22:34 2022 ] Top5: 93.03% +[ Tue Sep 13 17:22:34 2022 ] Training epoch: 73 +[ Tue Sep 13 17:23:00 2022 ] Batch(43/123) done. Loss: 0.1337 lr:0.010000 +[ Tue Sep 13 17:23:43 2022 ] Eval epoch: 73 +[ Tue Sep 13 17:24:42 2022 ] Mean test loss of 296 batches: 1.8427186012268066. +[ Tue Sep 13 17:24:42 2022 ] Top1: 67.62% +[ Tue Sep 13 17:24:42 2022 ] Top5: 92.85% +[ Tue Sep 13 17:24:42 2022 ] Training epoch: 74 +[ Tue Sep 13 17:24:57 2022 ] Batch(20/123) done. Loss: 0.0491 lr:0.010000 +[ Tue Sep 13 17:25:50 2022 ] Batch(120/123) done. Loss: 0.0063 lr:0.010000 +[ Tue Sep 13 17:25:51 2022 ] Eval epoch: 74 +[ Tue Sep 13 17:26:49 2022 ] Mean test loss of 296 batches: 1.822741985321045. +[ Tue Sep 13 17:26:49 2022 ] Top1: 67.83% +[ Tue Sep 13 17:26:50 2022 ] Top5: 93.02% +[ Tue Sep 13 17:26:50 2022 ] Training epoch: 75 +[ Tue Sep 13 17:27:45 2022 ] Batch(97/123) done. Loss: 0.0283 lr:0.010000 +[ Tue Sep 13 17:27:58 2022 ] Eval epoch: 75 +[ Tue Sep 13 17:28:57 2022 ] Mean test loss of 296 batches: 2.1541099548339844. +[ Tue Sep 13 17:28:57 2022 ] Top1: 63.78% +[ Tue Sep 13 17:28:57 2022 ] Top5: 90.90% +[ Tue Sep 13 17:28:57 2022 ] Training epoch: 76 +[ Tue Sep 13 17:29:40 2022 ] Batch(74/123) done. Loss: 0.0257 lr:0.010000 +[ Tue Sep 13 17:30:06 2022 ] Eval epoch: 76 +[ Tue Sep 13 17:31:04 2022 ] Mean test loss of 296 batches: 1.8884302377700806. +[ Tue Sep 13 17:31:04 2022 ] Top1: 67.28% +[ Tue Sep 13 17:31:04 2022 ] Top5: 92.74% +[ Tue Sep 13 17:31:04 2022 ] Training epoch: 77 +[ Tue Sep 13 17:31:35 2022 ] Batch(51/123) done. Loss: 0.1267 lr:0.010000 +[ Tue Sep 13 17:32:13 2022 ] Eval epoch: 77 +[ Tue Sep 13 17:33:12 2022 ] Mean test loss of 296 batches: 1.857561707496643. +[ Tue Sep 13 17:33:12 2022 ] Top1: 67.79% +[ Tue Sep 13 17:33:12 2022 ] Top5: 93.00% +[ Tue Sep 13 17:33:12 2022 ] Training epoch: 78 +[ Tue Sep 13 17:33:31 2022 ] Batch(28/123) done. Loss: 0.0125 lr:0.010000 +[ Tue Sep 13 17:34:21 2022 ] Eval epoch: 78 +[ Tue Sep 13 17:35:20 2022 ] Mean test loss of 296 batches: 1.9330501556396484. +[ Tue Sep 13 17:35:20 2022 ] Top1: 67.03% +[ Tue Sep 13 17:35:20 2022 ] Top5: 92.92% +[ Tue Sep 13 17:35:20 2022 ] Training epoch: 79 +[ Tue Sep 13 17:35:26 2022 ] Batch(5/123) done. Loss: 0.0252 lr:0.010000 +[ Tue Sep 13 17:36:20 2022 ] Batch(105/123) done. Loss: 0.0428 lr:0.010000 +[ Tue Sep 13 17:36:29 2022 ] Eval epoch: 79 +[ Tue Sep 13 17:37:27 2022 ] Mean test loss of 296 batches: 1.858245849609375. +[ Tue Sep 13 17:37:27 2022 ] Top1: 67.81% +[ Tue Sep 13 17:37:28 2022 ] Top5: 93.01% +[ Tue Sep 13 17:37:28 2022 ] Training epoch: 80 +[ Tue Sep 13 17:38:15 2022 ] Batch(82/123) done. Loss: 0.0193 lr:0.010000 +[ Tue Sep 13 17:38:36 2022 ] Eval epoch: 80 +[ Tue Sep 13 17:39:35 2022 ] Mean test loss of 296 batches: 1.9193637371063232. +[ Tue Sep 13 17:39:35 2022 ] Top1: 67.07% +[ Tue Sep 13 17:39:35 2022 ] Top5: 92.78% +[ Tue Sep 13 17:39:35 2022 ] Training epoch: 81 +[ Tue Sep 13 17:40:11 2022 ] Batch(59/123) done. Loss: 0.0167 lr:0.001000 +[ Tue Sep 13 17:40:44 2022 ] Eval epoch: 81 +[ Tue Sep 13 17:41:43 2022 ] Mean test loss of 296 batches: 1.8838932514190674. +[ Tue Sep 13 17:41:43 2022 ] Top1: 67.84% +[ Tue Sep 13 17:41:43 2022 ] Top5: 93.13% +[ Tue Sep 13 17:41:43 2022 ] Training epoch: 82 +[ Tue Sep 13 17:42:06 2022 ] Batch(36/123) done. Loss: 0.0039 lr:0.001000 +[ Tue Sep 13 17:42:52 2022 ] Eval epoch: 82 +[ Tue Sep 13 17:43:51 2022 ] Mean test loss of 296 batches: 1.841922640800476. +[ Tue Sep 13 17:43:51 2022 ] Top1: 67.83% +[ Tue Sep 13 17:43:51 2022 ] Top5: 93.10% +[ Tue Sep 13 17:43:51 2022 ] Training epoch: 83 +[ Tue Sep 13 17:44:02 2022 ] Batch(13/123) done. Loss: 0.0685 lr:0.001000 +[ Tue Sep 13 17:44:55 2022 ] Batch(113/123) done. Loss: 0.0296 lr:0.001000 +[ Tue Sep 13 17:45:01 2022 ] Eval epoch: 83 +[ Tue Sep 13 17:45:59 2022 ] Mean test loss of 296 batches: 2.0887017250061035. +[ Tue Sep 13 17:45:59 2022 ] Top1: 65.03% +[ Tue Sep 13 17:45:59 2022 ] Top5: 91.66% +[ Tue Sep 13 17:45:59 2022 ] Training epoch: 84 +[ Tue Sep 13 17:46:51 2022 ] Batch(90/123) done. Loss: 0.0603 lr:0.001000 +[ Tue Sep 13 17:47:08 2022 ] Eval epoch: 84 +[ Tue Sep 13 17:48:07 2022 ] Mean test loss of 296 batches: 1.8506443500518799. +[ Tue Sep 13 17:48:07 2022 ] Top1: 68.36% +[ Tue Sep 13 17:48:07 2022 ] Top5: 93.14% +[ Tue Sep 13 17:48:07 2022 ] Training epoch: 85 +[ Tue Sep 13 17:48:47 2022 ] Batch(67/123) done. Loss: 0.0216 lr:0.001000 +[ Tue Sep 13 17:49:16 2022 ] Eval epoch: 85 +[ Tue Sep 13 17:50:15 2022 ] Mean test loss of 296 batches: 2.0320518016815186. +[ Tue Sep 13 17:50:15 2022 ] Top1: 65.72% +[ Tue Sep 13 17:50:15 2022 ] Top5: 92.19% +[ Tue Sep 13 17:50:15 2022 ] Training epoch: 86 +[ Tue Sep 13 17:50:42 2022 ] Batch(44/123) done. Loss: 0.0191 lr:0.001000 +[ Tue Sep 13 17:51:24 2022 ] Eval epoch: 86 +[ Tue Sep 13 17:52:22 2022 ] Mean test loss of 296 batches: 1.8204432725906372. +[ Tue Sep 13 17:52:22 2022 ] Top1: 68.36% +[ Tue Sep 13 17:52:23 2022 ] Top5: 93.28% +[ Tue Sep 13 17:52:23 2022 ] Training epoch: 87 +[ Tue Sep 13 17:52:38 2022 ] Batch(21/123) done. Loss: 0.0592 lr:0.001000 +[ Tue Sep 13 17:53:31 2022 ] Batch(121/123) done. Loss: 0.0278 lr:0.001000 +[ Tue Sep 13 17:53:31 2022 ] Eval epoch: 87 +[ Tue Sep 13 17:54:30 2022 ] Mean test loss of 296 batches: 1.8613992929458618. +[ Tue Sep 13 17:54:30 2022 ] Top1: 67.96% +[ Tue Sep 13 17:54:30 2022 ] Top5: 93.14% +[ Tue Sep 13 17:54:30 2022 ] Training epoch: 88 +[ Tue Sep 13 17:55:26 2022 ] Batch(98/123) done. Loss: 0.0548 lr:0.001000 +[ Tue Sep 13 17:55:39 2022 ] Eval epoch: 88 +[ Tue Sep 13 17:56:38 2022 ] Mean test loss of 296 batches: 1.885483980178833. +[ Tue Sep 13 17:56:38 2022 ] Top1: 67.94% +[ Tue Sep 13 17:56:38 2022 ] Top5: 93.06% +[ Tue Sep 13 17:56:38 2022 ] Training epoch: 89 +[ Tue Sep 13 17:57:22 2022 ] Batch(75/123) done. Loss: 0.0265 lr:0.001000 +[ Tue Sep 13 17:57:47 2022 ] Eval epoch: 89 +[ Tue Sep 13 17:58:45 2022 ] Mean test loss of 296 batches: 2.010345935821533. +[ Tue Sep 13 17:58:46 2022 ] Top1: 66.26% +[ Tue Sep 13 17:58:46 2022 ] Top5: 92.27% +[ Tue Sep 13 17:58:46 2022 ] Training epoch: 90 +[ Tue Sep 13 17:59:17 2022 ] Batch(52/123) done. Loss: 0.0065 lr:0.001000 +[ Tue Sep 13 17:59:55 2022 ] Eval epoch: 90 +[ Tue Sep 13 18:00:53 2022 ] Mean test loss of 296 batches: 1.9749809503555298. +[ Tue Sep 13 18:00:54 2022 ] Top1: 66.33% +[ Tue Sep 13 18:00:54 2022 ] Top5: 92.48% +[ Tue Sep 13 18:00:54 2022 ] Training epoch: 91 +[ Tue Sep 13 18:01:13 2022 ] Batch(29/123) done. Loss: 0.0292 lr:0.001000 +[ Tue Sep 13 18:02:02 2022 ] Eval epoch: 91 +[ Tue Sep 13 18:03:00 2022 ] Mean test loss of 296 batches: 1.86484956741333. +[ Tue Sep 13 18:03:00 2022 ] Top1: 67.87% +[ Tue Sep 13 18:03:01 2022 ] Top5: 93.08% +[ Tue Sep 13 18:03:01 2022 ] Training epoch: 92 +[ Tue Sep 13 18:03:08 2022 ] Batch(6/123) done. Loss: 0.0244 lr:0.001000 +[ Tue Sep 13 18:04:01 2022 ] Batch(106/123) done. Loss: 0.0240 lr:0.001000 +[ Tue Sep 13 18:04:10 2022 ] Eval epoch: 92 +[ Tue Sep 13 18:05:08 2022 ] Mean test loss of 296 batches: 1.8629224300384521. +[ Tue Sep 13 18:05:08 2022 ] Top1: 67.98% +[ Tue Sep 13 18:05:08 2022 ] Top5: 93.14% +[ Tue Sep 13 18:05:08 2022 ] Training epoch: 93 +[ Tue Sep 13 18:05:56 2022 ] Batch(83/123) done. Loss: 0.0472 lr:0.001000 +[ Tue Sep 13 18:06:17 2022 ] Eval epoch: 93 +[ Tue Sep 13 18:07:15 2022 ] Mean test loss of 296 batches: 1.8931270837783813. +[ Tue Sep 13 18:07:15 2022 ] Top1: 67.43% +[ Tue Sep 13 18:07:15 2022 ] Top5: 93.03% +[ Tue Sep 13 18:07:15 2022 ] Training epoch: 94 +[ Tue Sep 13 18:07:51 2022 ] Batch(60/123) done. Loss: 0.0221 lr:0.001000 +[ Tue Sep 13 18:08:24 2022 ] Eval epoch: 94 +[ Tue Sep 13 18:09:22 2022 ] Mean test loss of 296 batches: 1.9236911535263062. +[ Tue Sep 13 18:09:22 2022 ] Top1: 67.26% +[ Tue Sep 13 18:09:23 2022 ] Top5: 92.94% +[ Tue Sep 13 18:09:23 2022 ] Training epoch: 95 +[ Tue Sep 13 18:09:46 2022 ] Batch(37/123) done. Loss: 0.0320 lr:0.001000 +[ Tue Sep 13 18:10:31 2022 ] Eval epoch: 95 +[ Tue Sep 13 18:11:30 2022 ] Mean test loss of 296 batches: 1.8599170446395874. +[ Tue Sep 13 18:11:30 2022 ] Top1: 67.89% +[ Tue Sep 13 18:11:30 2022 ] Top5: 93.11% +[ Tue Sep 13 18:11:30 2022 ] Training epoch: 96 +[ Tue Sep 13 18:11:41 2022 ] Batch(14/123) done. Loss: 0.0230 lr:0.001000 +[ Tue Sep 13 18:12:34 2022 ] Batch(114/123) done. Loss: 0.0107 lr:0.001000 +[ Tue Sep 13 18:12:39 2022 ] Eval epoch: 96 +[ Tue Sep 13 18:13:37 2022 ] Mean test loss of 296 batches: 1.8916453123092651. +[ Tue Sep 13 18:13:37 2022 ] Top1: 67.83% +[ Tue Sep 13 18:13:37 2022 ] Top5: 92.92% +[ Tue Sep 13 18:13:37 2022 ] Training epoch: 97 +[ Tue Sep 13 18:14:29 2022 ] Batch(91/123) done. Loss: 0.0206 lr:0.001000 +[ Tue Sep 13 18:14:46 2022 ] Eval epoch: 97 +[ Tue Sep 13 18:15:44 2022 ] Mean test loss of 296 batches: 1.8666728734970093. +[ Tue Sep 13 18:15:44 2022 ] Top1: 68.06% +[ Tue Sep 13 18:15:44 2022 ] Top5: 93.02% +[ Tue Sep 13 18:15:44 2022 ] Training epoch: 98 +[ Tue Sep 13 18:16:24 2022 ] Batch(68/123) done. Loss: 0.0343 lr:0.001000 +[ Tue Sep 13 18:16:53 2022 ] Eval epoch: 98 +[ Tue Sep 13 18:17:52 2022 ] Mean test loss of 296 batches: 1.8837909698486328. +[ Tue Sep 13 18:17:52 2022 ] Top1: 67.81% +[ Tue Sep 13 18:17:52 2022 ] Top5: 93.02% +[ Tue Sep 13 18:17:52 2022 ] Training epoch: 99 +[ Tue Sep 13 18:18:19 2022 ] Batch(45/123) done. Loss: 0.0092 lr:0.001000 +[ Tue Sep 13 18:19:00 2022 ] Eval epoch: 99 +[ Tue Sep 13 18:19:59 2022 ] Mean test loss of 296 batches: 1.8697354793548584. +[ Tue Sep 13 18:19:59 2022 ] Top1: 67.54% +[ Tue Sep 13 18:19:59 2022 ] Top5: 93.00% +[ Tue Sep 13 18:19:59 2022 ] Training epoch: 100 +[ Tue Sep 13 18:20:15 2022 ] Batch(22/123) done. Loss: 0.0806 lr:0.001000 +[ Tue Sep 13 18:21:08 2022 ] Batch(122/123) done. Loss: 0.0680 lr:0.001000 +[ Tue Sep 13 18:21:09 2022 ] Eval epoch: 100 +[ Tue Sep 13 18:22:07 2022 ] Mean test loss of 296 batches: 1.956996202468872. +[ Tue Sep 13 18:22:07 2022 ] Top1: 67.08% +[ Tue Sep 13 18:22:07 2022 ] Top5: 92.84% diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_xview/config.yaml b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_xview/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..186da1c00dbee5925cc8c9a5ee01cf940891cad5 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_xview/config.yaml @@ -0,0 +1,59 @@ +Experiment_name: ntu_bone_xview +base_lr: 0.1 +batch_size: 64 +config: ./config/nturgbd-cross-view/train_bone.yaml +device: +- 6 +- 7 +eval_interval: 5 +feeder: feeders.feeder.Feeder +groups: 8 +ignore_weights: [] +keep_rate: 0.9 +log_interval: 100 +model: model.decouple_gcn.Model +model_args: + block_size: 41 + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + groups: 8 + num_class: 60 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu_bone_xview +nesterov: true +num_epoch: 100 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_data_bone.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_data_bone.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu_bone_xview diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_xview/decouple_gcn.py b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_xview/decouple_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..6dcce4552ced280fe5b2060df92daebd2452cf7c --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_xview/decouple_gcn.py @@ -0,0 +1,235 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math +from model.dropSke import DropBlock_Ske +from model.dropT import DropBlockT_1d + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + + +def conv_branch_init(conv): + weight = conv.weight + n = weight.size(0) + k1 = weight.size(1) + k2 = weight.size(2) + nn.init.normal(weight, 0, math.sqrt(2. / (n * k1 * k2))) + nn.init.constant(conv.bias, 0) + + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class unit_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1, num_point=25, block_size=41): + super(unit_tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + self.dropS = DropBlock_Ske(num_point=num_point) + self.dropT = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob, A): + x = self.bn(self.conv(x)) + x = self.dropT(self.dropS(x, keep_prob, A), keep_prob) + return x + + +class unit_tcn_skip(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(unit_tcn_skip, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class unit_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, coff_embedding=4, num_subset=3): + super(unit_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.num_point = num_point + self.groups = groups + self.num_subset = num_subset + self.DecoupleA = nn.Parameter(torch.tensor(np.reshape(A.astype(np.float32), [ + 3, 1, num_point, num_point]), dtype=torch.float32, requires_grad=True).repeat(1, groups, 1, 1), requires_grad=True) + + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.bn0 = nn.BatchNorm2d(out_channels * num_subset) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + bn_init(self.bn, 1e-6) + + self.Linear_weight = nn.Parameter(torch.zeros( + in_channels, out_channels * num_subset, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0, math.sqrt( + 0.5 / (out_channels * num_subset))) + + self.Linear_bias = nn.Parameter(torch.zeros( + 1, out_channels * num_subset, 1, 1, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.constant(self.Linear_bias, 1e-6) + + eye_array = [] + for i in range(out_channels): + eye_array.append(torch.eye(num_point)) + self.eyes = nn.Parameter(torch.tensor(torch.stack( + eye_array), requires_grad=False, device='cuda'), requires_grad=False) # [c,25,25] + + def norm(self, A): + b, c, h, w = A.size() + A = A.view(c, self.num_point, self.num_point) + D_list = torch.sum(A, 1).view(c, 1, self.num_point) + D_list_12 = (D_list + 0.001)**(-1) + D_12 = self.eyes * D_list_12 + A = torch.bmm(A, D_12).view(b, c, h, w) + return A + + def forward(self, x0): + learn_A = self.DecoupleA.repeat( + 1, self.out_channels // self.groups, 1, 1) + norm_learn_A = torch.cat([self.norm(learn_A[0:1, ...]), self.norm( + learn_A[1:2, ...]), self.norm(learn_A[2:3, ...])], 0) + + x = torch.einsum( + 'nctw,cd->ndtw', (x0, self.Linear_weight)).contiguous() + x = x + self.Linear_bias + x = self.bn0(x) + + n, kc, t, v = x.size() + x = x.view(n, self.num_subset, kc // self.num_subset, t, v) + x = torch.einsum('nkctv,kcvw->nctw', (x, norm_learn_A)) + + x = self.bn(x) + x += self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, block_size, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = unit_gcn(in_channels, out_channels, A, groups, num_point) + self.tcn1 = unit_tcn(out_channels, out_channels, + stride=stride, num_point=num_point) + self.relu = nn.ReLU() + + self.A = nn.Parameter(torch.tensor(np.sum(np.reshape(A.astype(np.float32), [ + 3, num_point, num_point]), axis=0), dtype=torch.float32, requires_grad=False, device='cuda'), requires_grad=False) + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + + else: + self.residual = unit_tcn_skip( + in_channels, out_channels, kernel_size=1, stride=stride) + self.dropSke = DropBlock_Ske(num_point=num_point) + self.dropT_skip = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob): + x = self.tcn1(self.gcn1(x), keep_prob, self.A) + self.dropT_skip( + self.dropSke(self.residual(x), keep_prob, self.A), keep_prob) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, groups, num_point, + block_size, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l5 = TCN_GCN_unit( + 64, 128, A, groups, num_point, block_size, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l8 = TCN_GCN_unit(128, 256, A, groups, + num_point, block_size, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x, keep_prob=0.9): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute( + 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x, 1.0) + x = self.l2(x, 1.0) + x = self.l3(x, 1.0) + x = self.l4(x, 1.0) + x = self.l5(x, 1.0) + x = self.l6(x, 1.0) + x = self.l7(x, keep_prob) + x = self.l8(x, keep_prob) + x = self.l9(x, keep_prob) + x = self.l10(x, keep_prob) + + # N*M,C,T,V + c_new = x.size(1) + x = x.reshape(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_xview/eval_results/best_acc.pkl b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_xview/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..c08d1cf1d526b5f68643ed1064853eec3dddc533 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_xview/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9297027362eb7c6b5159af56c1f3728b1c6957b0e1da4ad5ff558dc1a51f556 +size 5718404 diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_xview/log.txt b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_xview/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..b0736e729494c91d96104b646d50ee19510d7618 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_bone_xview/log.txt @@ -0,0 +1,636 @@ +[ Tue Sep 13 14:49:45 2022 ] Parameters: +{'work_dir': './work_dir/ntu_bone_xview', 'model_saved_name': './save_models/ntu_bone_xview', 'Experiment_name': 'ntu_bone_xview', 'config': './config/nturgbd-cross-view/train_bone.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_data_bone.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_data_bone.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_label.pkl'}, 'model': 'model.decouple_gcn.Model', 'model_args': {'num_class': 60, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'groups': 8, 'block_size': 41, 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80], 'device': [0, 1], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 100, 'weight_decay': 0.0001, 'keep_rate': 0.9, 'groups': 8, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Tue Sep 13 14:49:45 2022 ] Training epoch: 1 +[ Tue Sep 13 14:50:35 2022 ] Batch(99/123) done. Loss: 3.4291 lr:0.100000 +[ Tue Sep 13 14:50:45 2022 ] Eval epoch: 1 +[ Tue Sep 13 14:51:44 2022 ] Mean test loss of 296 batches: 4.48023796081543. +[ Tue Sep 13 14:51:44 2022 ] Top1: 3.86% +[ Tue Sep 13 14:51:44 2022 ] Top5: 15.11% +[ Tue Sep 13 14:51:44 2022 ] Training epoch: 2 +[ Wed Sep 14 08:59:18 2022 ] Parameters: +{'work_dir': './work_dir/ntu_bone_xview', 'model_saved_name': './save_models/ntu_bone_xview', 'Experiment_name': 'ntu_bone_xview', 'config': './config/nturgbd-cross-view/train_bone.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_data_bone.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_data_bone.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_label.pkl'}, 'model': 'model.decouple_gcn.Model', 'model_args': {'num_class': 60, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'groups': 8, 'block_size': 41, 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80], 'device': [6, 7], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 100, 'weight_decay': 0.0001, 'keep_rate': 0.9, 'groups': 8, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Wed Sep 14 08:59:18 2022 ] Training epoch: 1 +[ Wed Sep 14 09:00:08 2022 ] Batch(99/123) done. Loss: 3.4291 lr:0.100000 +[ Wed Sep 14 09:00:19 2022 ] Eval epoch: 1 +[ Wed Sep 14 09:01:17 2022 ] Mean test loss of 296 batches: 4.48023796081543. +[ Wed Sep 14 09:01:17 2022 ] Top1: 3.86% +[ Wed Sep 14 09:01:18 2022 ] Top5: 15.11% +[ Wed Sep 14 09:01:18 2022 ] Training epoch: 2 +[ Wed Sep 14 09:02:02 2022 ] Batch(76/123) done. Loss: 2.7826 lr:0.100000 +[ Wed Sep 14 09:02:27 2022 ] Eval epoch: 2 +[ Wed Sep 14 09:03:26 2022 ] Mean test loss of 296 batches: 3.8383889198303223. +[ Wed Sep 14 09:03:26 2022 ] Top1: 9.47% +[ Wed Sep 14 09:03:26 2022 ] Top5: 36.38% +[ Wed Sep 14 09:03:27 2022 ] Training epoch: 3 +[ Wed Sep 14 09:03:58 2022 ] Batch(53/123) done. Loss: 2.6515 lr:0.100000 +[ Wed Sep 14 09:04:36 2022 ] Eval epoch: 3 +[ Wed Sep 14 09:05:35 2022 ] Mean test loss of 296 batches: 3.2273526191711426. +[ Wed Sep 14 09:05:35 2022 ] Top1: 16.74% +[ Wed Sep 14 09:05:35 2022 ] Top5: 48.25% +[ Wed Sep 14 09:05:35 2022 ] Training epoch: 4 +[ Wed Sep 14 09:05:54 2022 ] Batch(30/123) done. Loss: 2.1078 lr:0.100000 +[ Wed Sep 14 09:06:44 2022 ] Eval epoch: 4 +[ Wed Sep 14 09:07:43 2022 ] Mean test loss of 296 batches: 3.1734416484832764. +[ Wed Sep 14 09:07:43 2022 ] Top1: 17.18% +[ Wed Sep 14 09:07:43 2022 ] Top5: 50.73% +[ Wed Sep 14 09:07:43 2022 ] Training epoch: 5 +[ Wed Sep 14 09:07:49 2022 ] Batch(7/123) done. Loss: 1.9000 lr:0.100000 +[ Wed Sep 14 09:08:43 2022 ] Batch(107/123) done. Loss: 1.8776 lr:0.100000 +[ Wed Sep 14 09:08:52 2022 ] Eval epoch: 5 +[ Wed Sep 14 09:09:50 2022 ] Mean test loss of 296 batches: 3.06025767326355. +[ Wed Sep 14 09:09:50 2022 ] Top1: 23.21% +[ Wed Sep 14 09:09:50 2022 ] Top5: 57.04% +[ Wed Sep 14 09:09:50 2022 ] Training epoch: 6 +[ Wed Sep 14 09:10:38 2022 ] Batch(84/123) done. Loss: 1.6594 lr:0.100000 +[ Wed Sep 14 09:10:59 2022 ] Eval epoch: 6 +[ Wed Sep 14 09:11:57 2022 ] Mean test loss of 296 batches: 2.8030574321746826. +[ Wed Sep 14 09:11:57 2022 ] Top1: 23.06% +[ Wed Sep 14 09:11:57 2022 ] Top5: 64.32% +[ Wed Sep 14 09:11:57 2022 ] Training epoch: 7 +[ Wed Sep 14 09:12:34 2022 ] Batch(61/123) done. Loss: 1.9769 lr:0.100000 +[ Wed Sep 14 09:13:07 2022 ] Eval epoch: 7 +[ Wed Sep 14 09:14:05 2022 ] Mean test loss of 296 batches: 2.516324520111084. +[ Wed Sep 14 09:14:05 2022 ] Top1: 30.32% +[ Wed Sep 14 09:14:05 2022 ] Top5: 67.55% +[ Wed Sep 14 09:14:05 2022 ] Training epoch: 8 +[ Wed Sep 14 09:14:29 2022 ] Batch(38/123) done. Loss: 1.7745 lr:0.100000 +[ Wed Sep 14 09:15:14 2022 ] Eval epoch: 8 +[ Wed Sep 14 09:16:13 2022 ] Mean test loss of 296 batches: 2.558635711669922. +[ Wed Sep 14 09:16:13 2022 ] Top1: 28.32% +[ Wed Sep 14 09:16:13 2022 ] Top5: 68.61% +[ Wed Sep 14 09:16:13 2022 ] Training epoch: 9 +[ Wed Sep 14 09:16:24 2022 ] Batch(15/123) done. Loss: 1.1170 lr:0.100000 +[ Wed Sep 14 09:17:18 2022 ] Batch(115/123) done. Loss: 1.5282 lr:0.100000 +[ Wed Sep 14 09:17:22 2022 ] Eval epoch: 9 +[ Wed Sep 14 09:18:20 2022 ] Mean test loss of 296 batches: 2.2108256816864014. +[ Wed Sep 14 09:18:20 2022 ] Top1: 34.77% +[ Wed Sep 14 09:18:20 2022 ] Top5: 76.16% +[ Wed Sep 14 09:18:20 2022 ] Training epoch: 10 +[ Wed Sep 14 09:19:13 2022 ] Batch(92/123) done. Loss: 1.6132 lr:0.100000 +[ Wed Sep 14 09:19:29 2022 ] Eval epoch: 10 +[ Wed Sep 14 09:20:28 2022 ] Mean test loss of 296 batches: 2.420668840408325. +[ Wed Sep 14 09:20:28 2022 ] Top1: 31.43% +[ Wed Sep 14 09:20:28 2022 ] Top5: 73.73% +[ Wed Sep 14 09:20:28 2022 ] Training epoch: 11 +[ Wed Sep 14 09:21:08 2022 ] Batch(69/123) done. Loss: 1.3780 lr:0.100000 +[ Wed Sep 14 09:21:37 2022 ] Eval epoch: 11 +[ Wed Sep 14 09:22:35 2022 ] Mean test loss of 296 batches: 2.272085428237915. +[ Wed Sep 14 09:22:35 2022 ] Top1: 33.38% +[ Wed Sep 14 09:22:35 2022 ] Top5: 74.65% +[ Wed Sep 14 09:22:35 2022 ] Training epoch: 12 +[ Wed Sep 14 09:23:03 2022 ] Batch(46/123) done. Loss: 1.7173 lr:0.100000 +[ Wed Sep 14 09:23:44 2022 ] Eval epoch: 12 +[ Wed Sep 14 09:24:43 2022 ] Mean test loss of 296 batches: 1.959482192993164. +[ Wed Sep 14 09:24:43 2022 ] Top1: 42.17% +[ Wed Sep 14 09:24:43 2022 ] Top5: 81.90% +[ Wed Sep 14 09:24:43 2022 ] Training epoch: 13 +[ Wed Sep 14 09:24:59 2022 ] Batch(23/123) done. Loss: 1.2152 lr:0.100000 +[ Wed Sep 14 09:25:52 2022 ] Eval epoch: 13 +[ Wed Sep 14 09:26:50 2022 ] Mean test loss of 296 batches: 2.579192876815796. +[ Wed Sep 14 09:26:50 2022 ] Top1: 37.60% +[ Wed Sep 14 09:26:50 2022 ] Top5: 74.38% +[ Wed Sep 14 09:26:51 2022 ] Training epoch: 14 +[ Wed Sep 14 09:26:54 2022 ] Batch(0/123) done. Loss: 1.8506 lr:0.100000 +[ Wed Sep 14 09:27:47 2022 ] Batch(100/123) done. Loss: 0.9230 lr:0.100000 +[ Wed Sep 14 09:27:59 2022 ] Eval epoch: 14 +[ Wed Sep 14 09:28:57 2022 ] Mean test loss of 296 batches: 1.9522663354873657. +[ Wed Sep 14 09:28:58 2022 ] Top1: 45.78% +[ Wed Sep 14 09:28:58 2022 ] Top5: 83.79% +[ Wed Sep 14 09:28:58 2022 ] Training epoch: 15 +[ Wed Sep 14 09:29:42 2022 ] Batch(77/123) done. Loss: 1.1117 lr:0.100000 +[ Wed Sep 14 09:30:07 2022 ] Eval epoch: 15 +[ Wed Sep 14 09:31:05 2022 ] Mean test loss of 296 batches: 1.7078609466552734. +[ Wed Sep 14 09:31:05 2022 ] Top1: 47.88% +[ Wed Sep 14 09:31:05 2022 ] Top5: 85.58% +[ Wed Sep 14 09:31:05 2022 ] Training epoch: 16 +[ Wed Sep 14 09:31:37 2022 ] Batch(54/123) done. Loss: 0.6015 lr:0.100000 +[ Wed Sep 14 09:32:14 2022 ] Eval epoch: 16 +[ Wed Sep 14 09:33:12 2022 ] Mean test loss of 296 batches: 2.0181772708892822. +[ Wed Sep 14 09:33:12 2022 ] Top1: 43.57% +[ Wed Sep 14 09:33:12 2022 ] Top5: 80.20% +[ Wed Sep 14 09:33:12 2022 ] Training epoch: 17 +[ Wed Sep 14 09:33:32 2022 ] Batch(31/123) done. Loss: 0.9454 lr:0.100000 +[ Wed Sep 14 09:34:21 2022 ] Eval epoch: 17 +[ Wed Sep 14 09:35:20 2022 ] Mean test loss of 296 batches: 1.7662732601165771. +[ Wed Sep 14 09:35:20 2022 ] Top1: 49.10% +[ Wed Sep 14 09:35:20 2022 ] Top5: 85.68% +[ Wed Sep 14 09:35:20 2022 ] Training epoch: 18 +[ Wed Sep 14 09:35:27 2022 ] Batch(8/123) done. Loss: 0.8737 lr:0.100000 +[ Wed Sep 14 09:36:21 2022 ] Batch(108/123) done. Loss: 0.9912 lr:0.100000 +[ Wed Sep 14 09:36:29 2022 ] Eval epoch: 18 +[ Wed Sep 14 09:37:27 2022 ] Mean test loss of 296 batches: 1.6528425216674805. +[ Wed Sep 14 09:37:27 2022 ] Top1: 52.66% +[ Wed Sep 14 09:37:28 2022 ] Top5: 87.01% +[ Wed Sep 14 09:37:28 2022 ] Training epoch: 19 +[ Wed Sep 14 09:38:16 2022 ] Batch(85/123) done. Loss: 0.7678 lr:0.100000 +[ Wed Sep 14 09:38:36 2022 ] Eval epoch: 19 +[ Wed Sep 14 09:39:35 2022 ] Mean test loss of 296 batches: 1.8586660623550415. +[ Wed Sep 14 09:39:35 2022 ] Top1: 51.48% +[ Wed Sep 14 09:39:35 2022 ] Top5: 86.76% +[ Wed Sep 14 09:39:35 2022 ] Training epoch: 20 +[ Wed Sep 14 09:40:11 2022 ] Batch(62/123) done. Loss: 0.9393 lr:0.100000 +[ Wed Sep 14 09:40:44 2022 ] Eval epoch: 20 +[ Wed Sep 14 09:41:42 2022 ] Mean test loss of 296 batches: 1.9394832849502563. +[ Wed Sep 14 09:41:42 2022 ] Top1: 48.98% +[ Wed Sep 14 09:41:42 2022 ] Top5: 83.51% +[ Wed Sep 14 09:41:42 2022 ] Training epoch: 21 +[ Wed Sep 14 09:42:06 2022 ] Batch(39/123) done. Loss: 1.1384 lr:0.100000 +[ Wed Sep 14 09:42:51 2022 ] Eval epoch: 21 +[ Wed Sep 14 09:43:49 2022 ] Mean test loss of 296 batches: 1.9221111536026. +[ Wed Sep 14 09:43:50 2022 ] Top1: 47.67% +[ Wed Sep 14 09:43:50 2022 ] Top5: 84.38% +[ Wed Sep 14 09:43:50 2022 ] Training epoch: 22 +[ Wed Sep 14 09:44:01 2022 ] Batch(16/123) done. Loss: 0.6597 lr:0.100000 +[ Wed Sep 14 09:44:55 2022 ] Batch(116/123) done. Loss: 1.1187 lr:0.100000 +[ Wed Sep 14 09:44:59 2022 ] Eval epoch: 22 +[ Wed Sep 14 09:45:57 2022 ] Mean test loss of 296 batches: 1.9002504348754883. +[ Wed Sep 14 09:45:57 2022 ] Top1: 49.13% +[ Wed Sep 14 09:45:57 2022 ] Top5: 86.94% +[ Wed Sep 14 09:45:57 2022 ] Training epoch: 23 +[ Wed Sep 14 09:46:50 2022 ] Batch(93/123) done. Loss: 0.8922 lr:0.100000 +[ Wed Sep 14 09:47:06 2022 ] Eval epoch: 23 +[ Wed Sep 14 09:48:04 2022 ] Mean test loss of 296 batches: 1.6847193241119385. +[ Wed Sep 14 09:48:05 2022 ] Top1: 54.31% +[ Wed Sep 14 09:48:05 2022 ] Top5: 89.81% +[ Wed Sep 14 09:48:05 2022 ] Training epoch: 24 +[ Wed Sep 14 09:48:45 2022 ] Batch(70/123) done. Loss: 0.6105 lr:0.100000 +[ Wed Sep 14 09:49:13 2022 ] Eval epoch: 24 +[ Wed Sep 14 09:50:12 2022 ] Mean test loss of 296 batches: 2.477752447128296. +[ Wed Sep 14 09:50:12 2022 ] Top1: 46.31% +[ Wed Sep 14 09:50:12 2022 ] Top5: 84.00% +[ Wed Sep 14 09:50:12 2022 ] Training epoch: 25 +[ Wed Sep 14 09:50:40 2022 ] Batch(47/123) done. Loss: 0.7480 lr:0.100000 +[ Wed Sep 14 09:51:21 2022 ] Eval epoch: 25 +[ Wed Sep 14 09:52:19 2022 ] Mean test loss of 296 batches: 1.8951337337493896. +[ Wed Sep 14 09:52:19 2022 ] Top1: 49.93% +[ Wed Sep 14 09:52:19 2022 ] Top5: 87.57% +[ Wed Sep 14 09:52:20 2022 ] Training epoch: 26 +[ Wed Sep 14 09:52:35 2022 ] Batch(24/123) done. Loss: 0.8814 lr:0.100000 +[ Wed Sep 14 09:53:28 2022 ] Eval epoch: 26 +[ Wed Sep 14 09:54:27 2022 ] Mean test loss of 296 batches: 1.8247181177139282. +[ Wed Sep 14 09:54:27 2022 ] Top1: 51.67% +[ Wed Sep 14 09:54:27 2022 ] Top5: 87.41% +[ Wed Sep 14 09:54:27 2022 ] Training epoch: 27 +[ Wed Sep 14 09:54:31 2022 ] Batch(1/123) done. Loss: 0.6602 lr:0.100000 +[ Wed Sep 14 09:55:24 2022 ] Batch(101/123) done. Loss: 0.8150 lr:0.100000 +[ Wed Sep 14 09:55:36 2022 ] Eval epoch: 27 +[ Wed Sep 14 09:56:34 2022 ] Mean test loss of 296 batches: 1.9878681898117065. +[ Wed Sep 14 09:56:34 2022 ] Top1: 50.78% +[ Wed Sep 14 09:56:34 2022 ] Top5: 85.99% +[ Wed Sep 14 09:56:35 2022 ] Training epoch: 28 +[ Wed Sep 14 09:57:19 2022 ] Batch(78/123) done. Loss: 0.5868 lr:0.100000 +[ Wed Sep 14 09:57:43 2022 ] Eval epoch: 28 +[ Wed Sep 14 09:58:42 2022 ] Mean test loss of 296 batches: 2.044982433319092. +[ Wed Sep 14 09:58:42 2022 ] Top1: 52.68% +[ Wed Sep 14 09:58:42 2022 ] Top5: 86.56% +[ Wed Sep 14 09:58:42 2022 ] Training epoch: 29 +[ Wed Sep 14 09:59:14 2022 ] Batch(55/123) done. Loss: 0.5705 lr:0.100000 +[ Wed Sep 14 09:59:51 2022 ] Eval epoch: 29 +[ Wed Sep 14 10:00:49 2022 ] Mean test loss of 296 batches: 1.8594117164611816. +[ Wed Sep 14 10:00:49 2022 ] Top1: 54.13% +[ Wed Sep 14 10:00:49 2022 ] Top5: 89.45% +[ Wed Sep 14 10:00:49 2022 ] Training epoch: 30 +[ Wed Sep 14 10:01:10 2022 ] Batch(32/123) done. Loss: 0.6097 lr:0.100000 +[ Wed Sep 14 10:01:58 2022 ] Eval epoch: 30 +[ Wed Sep 14 10:02:57 2022 ] Mean test loss of 296 batches: 1.7159819602966309. +[ Wed Sep 14 10:02:57 2022 ] Top1: 55.59% +[ Wed Sep 14 10:02:57 2022 ] Top5: 88.75% +[ Wed Sep 14 10:02:57 2022 ] Training epoch: 31 +[ Wed Sep 14 10:03:05 2022 ] Batch(9/123) done. Loss: 0.4817 lr:0.100000 +[ Wed Sep 14 10:03:59 2022 ] Batch(109/123) done. Loss: 0.4735 lr:0.100000 +[ Wed Sep 14 10:04:06 2022 ] Eval epoch: 31 +[ Wed Sep 14 10:05:04 2022 ] Mean test loss of 296 batches: 1.5892300605773926. +[ Wed Sep 14 10:05:04 2022 ] Top1: 58.15% +[ Wed Sep 14 10:05:05 2022 ] Top5: 90.15% +[ Wed Sep 14 10:05:05 2022 ] Training epoch: 32 +[ Wed Sep 14 10:05:54 2022 ] Batch(86/123) done. Loss: 0.6351 lr:0.100000 +[ Wed Sep 14 10:06:13 2022 ] Eval epoch: 32 +[ Wed Sep 14 10:07:12 2022 ] Mean test loss of 296 batches: 2.4491147994995117. +[ Wed Sep 14 10:07:12 2022 ] Top1: 50.15% +[ Wed Sep 14 10:07:12 2022 ] Top5: 87.95% +[ Wed Sep 14 10:07:12 2022 ] Training epoch: 33 +[ Wed Sep 14 10:07:49 2022 ] Batch(63/123) done. Loss: 0.4192 lr:0.100000 +[ Wed Sep 14 10:08:21 2022 ] Eval epoch: 33 +[ Wed Sep 14 10:09:19 2022 ] Mean test loss of 296 batches: 1.6166101694107056. +[ Wed Sep 14 10:09:19 2022 ] Top1: 57.56% +[ Wed Sep 14 10:09:19 2022 ] Top5: 89.58% +[ Wed Sep 14 10:09:19 2022 ] Training epoch: 34 +[ Wed Sep 14 10:09:44 2022 ] Batch(40/123) done. Loss: 0.2878 lr:0.100000 +[ Wed Sep 14 10:10:28 2022 ] Eval epoch: 34 +[ Wed Sep 14 10:11:27 2022 ] Mean test loss of 296 batches: 1.85651433467865. +[ Wed Sep 14 10:11:27 2022 ] Top1: 53.89% +[ Wed Sep 14 10:11:27 2022 ] Top5: 88.78% +[ Wed Sep 14 10:11:27 2022 ] Training epoch: 35 +[ Wed Sep 14 10:11:39 2022 ] Batch(17/123) done. Loss: 0.3453 lr:0.100000 +[ Wed Sep 14 10:12:33 2022 ] Batch(117/123) done. Loss: 0.4045 lr:0.100000 +[ Wed Sep 14 10:12:36 2022 ] Eval epoch: 35 +[ Wed Sep 14 10:13:34 2022 ] Mean test loss of 296 batches: 2.469621181488037. +[ Wed Sep 14 10:13:34 2022 ] Top1: 51.27% +[ Wed Sep 14 10:13:34 2022 ] Top5: 85.19% +[ Wed Sep 14 10:13:34 2022 ] Training epoch: 36 +[ Wed Sep 14 10:14:28 2022 ] Batch(94/123) done. Loss: 0.5856 lr:0.100000 +[ Wed Sep 14 10:14:43 2022 ] Eval epoch: 36 +[ Wed Sep 14 10:15:41 2022 ] Mean test loss of 296 batches: 1.4329378604888916. +[ Wed Sep 14 10:15:42 2022 ] Top1: 62.79% +[ Wed Sep 14 10:15:42 2022 ] Top5: 92.86% +[ Wed Sep 14 10:15:42 2022 ] Training epoch: 37 +[ Wed Sep 14 10:16:23 2022 ] Batch(71/123) done. Loss: 0.3018 lr:0.100000 +[ Wed Sep 14 10:16:50 2022 ] Eval epoch: 37 +[ Wed Sep 14 10:17:49 2022 ] Mean test loss of 296 batches: 1.8889617919921875. +[ Wed Sep 14 10:17:49 2022 ] Top1: 56.22% +[ Wed Sep 14 10:17:49 2022 ] Top5: 89.21% +[ Wed Sep 14 10:17:49 2022 ] Training epoch: 38 +[ Wed Sep 14 10:18:18 2022 ] Batch(48/123) done. Loss: 0.4272 lr:0.100000 +[ Wed Sep 14 10:18:57 2022 ] Eval epoch: 38 +[ Wed Sep 14 10:19:56 2022 ] Mean test loss of 296 batches: 1.590606689453125. +[ Wed Sep 14 10:19:56 2022 ] Top1: 59.65% +[ Wed Sep 14 10:19:56 2022 ] Top5: 91.31% +[ Wed Sep 14 10:19:56 2022 ] Training epoch: 39 +[ Wed Sep 14 10:20:12 2022 ] Batch(25/123) done. Loss: 0.4518 lr:0.100000 +[ Wed Sep 14 10:21:05 2022 ] Eval epoch: 39 +[ Wed Sep 14 10:22:03 2022 ] Mean test loss of 296 batches: 2.2368476390838623. +[ Wed Sep 14 10:22:03 2022 ] Top1: 53.85% +[ Wed Sep 14 10:22:03 2022 ] Top5: 88.07% +[ Wed Sep 14 10:22:03 2022 ] Training epoch: 40 +[ Wed Sep 14 10:22:07 2022 ] Batch(2/123) done. Loss: 0.2404 lr:0.100000 +[ Wed Sep 14 10:23:01 2022 ] Batch(102/123) done. Loss: 0.6913 lr:0.100000 +[ Wed Sep 14 10:23:12 2022 ] Eval epoch: 40 +[ Wed Sep 14 10:24:10 2022 ] Mean test loss of 296 batches: 2.0816197395324707. +[ Wed Sep 14 10:24:10 2022 ] Top1: 52.56% +[ Wed Sep 14 10:24:10 2022 ] Top5: 86.38% +[ Wed Sep 14 10:24:10 2022 ] Training epoch: 41 +[ Wed Sep 14 10:24:56 2022 ] Batch(79/123) done. Loss: 0.3203 lr:0.100000 +[ Wed Sep 14 10:25:19 2022 ] Eval epoch: 41 +[ Wed Sep 14 10:26:17 2022 ] Mean test loss of 296 batches: 1.7928508520126343. +[ Wed Sep 14 10:26:17 2022 ] Top1: 59.50% +[ Wed Sep 14 10:26:17 2022 ] Top5: 90.77% +[ Wed Sep 14 10:26:18 2022 ] Training epoch: 42 +[ Wed Sep 14 10:26:51 2022 ] Batch(56/123) done. Loss: 0.3724 lr:0.100000 +[ Wed Sep 14 10:27:26 2022 ] Eval epoch: 42 +[ Wed Sep 14 10:28:25 2022 ] Mean test loss of 296 batches: 2.006517171859741. +[ Wed Sep 14 10:28:25 2022 ] Top1: 58.25% +[ Wed Sep 14 10:28:25 2022 ] Top5: 89.90% +[ Wed Sep 14 10:28:25 2022 ] Training epoch: 43 +[ Wed Sep 14 10:28:46 2022 ] Batch(33/123) done. Loss: 0.2090 lr:0.100000 +[ Wed Sep 14 10:29:34 2022 ] Eval epoch: 43 +[ Wed Sep 14 10:30:32 2022 ] Mean test loss of 296 batches: 2.046574831008911. +[ Wed Sep 14 10:30:32 2022 ] Top1: 55.62% +[ Wed Sep 14 10:30:32 2022 ] Top5: 89.08% +[ Wed Sep 14 10:30:32 2022 ] Training epoch: 44 +[ Wed Sep 14 10:30:40 2022 ] Batch(10/123) done. Loss: 0.3393 lr:0.100000 +[ Wed Sep 14 10:31:34 2022 ] Batch(110/123) done. Loss: 0.3782 lr:0.100000 +[ Wed Sep 14 10:31:41 2022 ] Eval epoch: 44 +[ Wed Sep 14 10:32:39 2022 ] Mean test loss of 296 batches: 2.2019667625427246. +[ Wed Sep 14 10:32:39 2022 ] Top1: 54.97% +[ Wed Sep 14 10:32:39 2022 ] Top5: 87.62% +[ Wed Sep 14 10:32:40 2022 ] Training epoch: 45 +[ Wed Sep 14 10:33:29 2022 ] Batch(87/123) done. Loss: 0.4853 lr:0.100000 +[ Wed Sep 14 10:33:48 2022 ] Eval epoch: 45 +[ Wed Sep 14 10:34:46 2022 ] Mean test loss of 296 batches: 2.1224262714385986. +[ Wed Sep 14 10:34:47 2022 ] Top1: 55.39% +[ Wed Sep 14 10:34:47 2022 ] Top5: 87.98% +[ Wed Sep 14 10:34:47 2022 ] Training epoch: 46 +[ Wed Sep 14 10:35:24 2022 ] Batch(64/123) done. Loss: 0.2578 lr:0.100000 +[ Wed Sep 14 10:35:55 2022 ] Eval epoch: 46 +[ Wed Sep 14 10:36:54 2022 ] Mean test loss of 296 batches: 1.9722990989685059. +[ Wed Sep 14 10:36:54 2022 ] Top1: 59.18% +[ Wed Sep 14 10:36:54 2022 ] Top5: 90.88% +[ Wed Sep 14 10:36:54 2022 ] Training epoch: 47 +[ Wed Sep 14 10:37:19 2022 ] Batch(41/123) done. Loss: 0.5311 lr:0.100000 +[ Wed Sep 14 10:38:03 2022 ] Eval epoch: 47 +[ Wed Sep 14 10:39:01 2022 ] Mean test loss of 296 batches: 2.271484375. +[ Wed Sep 14 10:39:01 2022 ] Top1: 55.84% +[ Wed Sep 14 10:39:01 2022 ] Top5: 86.58% +[ Wed Sep 14 10:39:01 2022 ] Training epoch: 48 +[ Wed Sep 14 10:39:14 2022 ] Batch(18/123) done. Loss: 0.2038 lr:0.100000 +[ Wed Sep 14 10:40:07 2022 ] Batch(118/123) done. Loss: 0.4612 lr:0.100000 +[ Wed Sep 14 10:40:10 2022 ] Eval epoch: 48 +[ Wed Sep 14 10:41:08 2022 ] Mean test loss of 296 batches: 1.7720586061477661. +[ Wed Sep 14 10:41:08 2022 ] Top1: 59.83% +[ Wed Sep 14 10:41:08 2022 ] Top5: 90.05% +[ Wed Sep 14 10:41:08 2022 ] Training epoch: 49 +[ Wed Sep 14 10:42:02 2022 ] Batch(95/123) done. Loss: 0.2150 lr:0.100000 +[ Wed Sep 14 10:42:17 2022 ] Eval epoch: 49 +[ Wed Sep 14 10:43:15 2022 ] Mean test loss of 296 batches: 2.0885446071624756. +[ Wed Sep 14 10:43:15 2022 ] Top1: 58.22% +[ Wed Sep 14 10:43:15 2022 ] Top5: 89.32% +[ Wed Sep 14 10:43:15 2022 ] Training epoch: 50 +[ Wed Sep 14 10:43:57 2022 ] Batch(72/123) done. Loss: 0.3571 lr:0.100000 +[ Wed Sep 14 10:44:24 2022 ] Eval epoch: 50 +[ Wed Sep 14 10:45:23 2022 ] Mean test loss of 296 batches: 1.9763500690460205. +[ Wed Sep 14 10:45:23 2022 ] Top1: 59.96% +[ Wed Sep 14 10:45:23 2022 ] Top5: 90.60% +[ Wed Sep 14 10:45:23 2022 ] Training epoch: 51 +[ Wed Sep 14 10:45:52 2022 ] Batch(49/123) done. Loss: 0.2263 lr:0.100000 +[ Wed Sep 14 10:46:32 2022 ] Eval epoch: 51 +[ Wed Sep 14 10:47:30 2022 ] Mean test loss of 296 batches: 1.659812092781067. +[ Wed Sep 14 10:47:30 2022 ] Top1: 62.62% +[ Wed Sep 14 10:47:30 2022 ] Top5: 91.66% +[ Wed Sep 14 10:47:30 2022 ] Training epoch: 52 +[ Wed Sep 14 10:47:47 2022 ] Batch(26/123) done. Loss: 0.1888 lr:0.100000 +[ Wed Sep 14 10:48:39 2022 ] Eval epoch: 52 +[ Wed Sep 14 10:49:37 2022 ] Mean test loss of 296 batches: 1.8305270671844482. +[ Wed Sep 14 10:49:37 2022 ] Top1: 59.66% +[ Wed Sep 14 10:49:37 2022 ] Top5: 91.04% +[ Wed Sep 14 10:49:37 2022 ] Training epoch: 53 +[ Wed Sep 14 10:49:42 2022 ] Batch(3/123) done. Loss: 0.1646 lr:0.100000 +[ Wed Sep 14 10:50:36 2022 ] Batch(103/123) done. Loss: 0.2960 lr:0.100000 +[ Wed Sep 14 10:50:46 2022 ] Eval epoch: 53 +[ Wed Sep 14 10:51:44 2022 ] Mean test loss of 296 batches: 11.188461303710938. +[ Wed Sep 14 10:51:44 2022 ] Top1: 22.83% +[ Wed Sep 14 10:51:45 2022 ] Top5: 57.49% +[ Wed Sep 14 10:51:45 2022 ] Training epoch: 54 +[ Wed Sep 14 10:52:30 2022 ] Batch(80/123) done. Loss: 0.3274 lr:0.100000 +[ Wed Sep 14 10:52:53 2022 ] Eval epoch: 54 +[ Wed Sep 14 10:53:52 2022 ] Mean test loss of 296 batches: 2.448310613632202. +[ Wed Sep 14 10:53:52 2022 ] Top1: 55.38% +[ Wed Sep 14 10:53:52 2022 ] Top5: 87.52% +[ Wed Sep 14 10:53:52 2022 ] Training epoch: 55 +[ Wed Sep 14 10:54:26 2022 ] Batch(57/123) done. Loss: 0.4760 lr:0.100000 +[ Wed Sep 14 10:55:01 2022 ] Eval epoch: 55 +[ Wed Sep 14 10:55:59 2022 ] Mean test loss of 296 batches: 1.733485460281372. +[ Wed Sep 14 10:55:59 2022 ] Top1: 63.78% +[ Wed Sep 14 10:55:59 2022 ] Top5: 91.88% +[ Wed Sep 14 10:55:59 2022 ] Training epoch: 56 +[ Wed Sep 14 10:56:21 2022 ] Batch(34/123) done. Loss: 0.2010 lr:0.100000 +[ Wed Sep 14 10:57:08 2022 ] Eval epoch: 56 +[ Wed Sep 14 10:58:06 2022 ] Mean test loss of 296 batches: 2.0535242557525635. +[ Wed Sep 14 10:58:06 2022 ] Top1: 59.05% +[ Wed Sep 14 10:58:07 2022 ] Top5: 90.51% +[ Wed Sep 14 10:58:07 2022 ] Training epoch: 57 +[ Wed Sep 14 10:58:15 2022 ] Batch(11/123) done. Loss: 0.3022 lr:0.100000 +[ Wed Sep 14 10:59:09 2022 ] Batch(111/123) done. Loss: 0.2314 lr:0.100000 +[ Wed Sep 14 10:59:15 2022 ] Eval epoch: 57 +[ Wed Sep 14 11:00:14 2022 ] Mean test loss of 296 batches: 2.1349549293518066. +[ Wed Sep 14 11:00:14 2022 ] Top1: 58.82% +[ Wed Sep 14 11:00:14 2022 ] Top5: 90.05% +[ Wed Sep 14 11:00:14 2022 ] Training epoch: 58 +[ Wed Sep 14 11:01:04 2022 ] Batch(88/123) done. Loss: 0.2947 lr:0.100000 +[ Wed Sep 14 11:01:23 2022 ] Eval epoch: 58 +[ Wed Sep 14 11:02:21 2022 ] Mean test loss of 296 batches: 2.0795400142669678. +[ Wed Sep 14 11:02:21 2022 ] Top1: 61.10% +[ Wed Sep 14 11:02:21 2022 ] Top5: 91.16% +[ Wed Sep 14 11:02:21 2022 ] Training epoch: 59 +[ Wed Sep 14 11:02:59 2022 ] Batch(65/123) done. Loss: 0.1852 lr:0.100000 +[ Wed Sep 14 11:03:30 2022 ] Eval epoch: 59 +[ Wed Sep 14 11:04:28 2022 ] Mean test loss of 296 batches: 1.623721718788147. +[ Wed Sep 14 11:04:28 2022 ] Top1: 64.27% +[ Wed Sep 14 11:04:29 2022 ] Top5: 93.18% +[ Wed Sep 14 11:04:29 2022 ] Training epoch: 60 +[ Wed Sep 14 11:04:54 2022 ] Batch(42/123) done. Loss: 0.2725 lr:0.100000 +[ Wed Sep 14 11:05:37 2022 ] Eval epoch: 60 +[ Wed Sep 14 11:06:36 2022 ] Mean test loss of 296 batches: 1.9293628931045532. +[ Wed Sep 14 11:06:36 2022 ] Top1: 60.85% +[ Wed Sep 14 11:06:36 2022 ] Top5: 91.02% +[ Wed Sep 14 11:06:36 2022 ] Training epoch: 61 +[ Wed Sep 14 11:06:49 2022 ] Batch(19/123) done. Loss: 0.2341 lr:0.010000 +[ Wed Sep 14 11:07:43 2022 ] Batch(119/123) done. Loss: 0.0932 lr:0.010000 +[ Wed Sep 14 11:07:45 2022 ] Eval epoch: 61 +[ Wed Sep 14 11:08:43 2022 ] Mean test loss of 296 batches: 1.4320499897003174. +[ Wed Sep 14 11:08:43 2022 ] Top1: 68.15% +[ Wed Sep 14 11:08:43 2022 ] Top5: 93.89% +[ Wed Sep 14 11:08:43 2022 ] Training epoch: 62 +[ Wed Sep 14 11:09:38 2022 ] Batch(96/123) done. Loss: 0.1164 lr:0.010000 +[ Wed Sep 14 11:09:52 2022 ] Eval epoch: 62 +[ Wed Sep 14 11:10:51 2022 ] Mean test loss of 296 batches: 1.448936939239502. +[ Wed Sep 14 11:10:51 2022 ] Top1: 69.05% +[ Wed Sep 14 11:10:51 2022 ] Top5: 94.01% +[ Wed Sep 14 11:10:51 2022 ] Training epoch: 63 +[ Wed Sep 14 11:11:33 2022 ] Batch(73/123) done. Loss: 0.0806 lr:0.010000 +[ Wed Sep 14 11:12:00 2022 ] Eval epoch: 63 +[ Wed Sep 14 11:12:58 2022 ] Mean test loss of 296 batches: 1.4893498420715332. +[ Wed Sep 14 11:12:59 2022 ] Top1: 69.16% +[ Wed Sep 14 11:12:59 2022 ] Top5: 93.80% +[ Wed Sep 14 11:12:59 2022 ] Training epoch: 64 +[ Wed Sep 14 11:13:29 2022 ] Batch(50/123) done. Loss: 0.0948 lr:0.010000 +[ Wed Sep 14 11:14:08 2022 ] Eval epoch: 64 +[ Wed Sep 14 11:15:06 2022 ] Mean test loss of 296 batches: 1.4758028984069824. +[ Wed Sep 14 11:15:06 2022 ] Top1: 69.32% +[ Wed Sep 14 11:15:06 2022 ] Top5: 94.04% +[ Wed Sep 14 11:15:06 2022 ] Training epoch: 65 +[ Wed Sep 14 11:15:24 2022 ] Batch(27/123) done. Loss: 0.0569 lr:0.010000 +[ Wed Sep 14 11:16:15 2022 ] Eval epoch: 65 +[ Wed Sep 14 11:17:14 2022 ] Mean test loss of 296 batches: 1.5391931533813477. +[ Wed Sep 14 11:17:14 2022 ] Top1: 68.90% +[ Wed Sep 14 11:17:14 2022 ] Top5: 93.83% +[ Wed Sep 14 11:17:14 2022 ] Training epoch: 66 +[ Wed Sep 14 11:17:19 2022 ] Batch(4/123) done. Loss: 0.0228 lr:0.010000 +[ Wed Sep 14 11:18:13 2022 ] Batch(104/123) done. Loss: 0.0802 lr:0.010000 +[ Wed Sep 14 11:18:23 2022 ] Eval epoch: 66 +[ Wed Sep 14 11:19:21 2022 ] Mean test loss of 296 batches: 1.5404032468795776. +[ Wed Sep 14 11:19:21 2022 ] Top1: 69.34% +[ Wed Sep 14 11:19:21 2022 ] Top5: 93.88% +[ Wed Sep 14 11:19:22 2022 ] Training epoch: 67 +[ Wed Sep 14 11:20:08 2022 ] Batch(81/123) done. Loss: 0.0190 lr:0.010000 +[ Wed Sep 14 11:20:30 2022 ] Eval epoch: 67 +[ Wed Sep 14 11:21:29 2022 ] Mean test loss of 296 batches: 1.5250171422958374. +[ Wed Sep 14 11:21:29 2022 ] Top1: 69.18% +[ Wed Sep 14 11:21:29 2022 ] Top5: 94.02% +[ Wed Sep 14 11:21:29 2022 ] Training epoch: 68 +[ Wed Sep 14 11:22:03 2022 ] Batch(58/123) done. Loss: 0.0656 lr:0.010000 +[ Wed Sep 14 11:22:38 2022 ] Eval epoch: 68 +[ Wed Sep 14 11:23:37 2022 ] Mean test loss of 296 batches: 1.5099595785140991. +[ Wed Sep 14 11:23:37 2022 ] Top1: 69.89% +[ Wed Sep 14 11:23:37 2022 ] Top5: 94.11% +[ Wed Sep 14 11:23:37 2022 ] Training epoch: 69 +[ Wed Sep 14 11:23:59 2022 ] Batch(35/123) done. Loss: 0.0828 lr:0.010000 +[ Wed Sep 14 11:24:46 2022 ] Eval epoch: 69 +[ Wed Sep 14 11:25:44 2022 ] Mean test loss of 296 batches: 1.5004703998565674. +[ Wed Sep 14 11:25:45 2022 ] Top1: 70.00% +[ Wed Sep 14 11:25:45 2022 ] Top5: 94.35% +[ Wed Sep 14 11:25:45 2022 ] Training epoch: 70 +[ Wed Sep 14 11:25:54 2022 ] Batch(12/123) done. Loss: 0.0786 lr:0.010000 +[ Wed Sep 14 11:26:48 2022 ] Batch(112/123) done. Loss: 0.1137 lr:0.010000 +[ Wed Sep 14 11:26:53 2022 ] Eval epoch: 70 +[ Wed Sep 14 11:27:52 2022 ] Mean test loss of 296 batches: 1.4758144617080688. +[ Wed Sep 14 11:27:52 2022 ] Top1: 70.19% +[ Wed Sep 14 11:27:52 2022 ] Top5: 94.24% +[ Wed Sep 14 11:27:52 2022 ] Training epoch: 71 +[ Wed Sep 14 11:28:43 2022 ] Batch(89/123) done. Loss: 0.0343 lr:0.010000 +[ Wed Sep 14 11:29:01 2022 ] Eval epoch: 71 +[ Wed Sep 14 11:29:59 2022 ] Mean test loss of 296 batches: 1.5576332807540894. +[ Wed Sep 14 11:29:59 2022 ] Top1: 69.72% +[ Wed Sep 14 11:29:59 2022 ] Top5: 94.22% +[ Wed Sep 14 11:29:59 2022 ] Training epoch: 72 +[ Wed Sep 14 11:30:38 2022 ] Batch(66/123) done. Loss: 0.0272 lr:0.010000 +[ Wed Sep 14 11:31:08 2022 ] Eval epoch: 72 +[ Wed Sep 14 11:32:06 2022 ] Mean test loss of 296 batches: 1.5004475116729736. +[ Wed Sep 14 11:32:06 2022 ] Top1: 70.24% +[ Wed Sep 14 11:32:07 2022 ] Top5: 94.17% +[ Wed Sep 14 11:32:07 2022 ] Training epoch: 73 +[ Wed Sep 14 11:32:33 2022 ] Batch(43/123) done. Loss: 0.0664 lr:0.010000 +[ Wed Sep 14 11:33:15 2022 ] Eval epoch: 73 +[ Wed Sep 14 11:34:14 2022 ] Mean test loss of 296 batches: 1.6259011030197144. +[ Wed Sep 14 11:34:14 2022 ] Top1: 69.09% +[ Wed Sep 14 11:34:14 2022 ] Top5: 93.95% +[ Wed Sep 14 11:34:14 2022 ] Training epoch: 74 +[ Wed Sep 14 11:34:28 2022 ] Batch(20/123) done. Loss: 0.1103 lr:0.010000 +[ Wed Sep 14 11:35:22 2022 ] Batch(120/123) done. Loss: 0.0328 lr:0.010000 +[ Wed Sep 14 11:35:23 2022 ] Eval epoch: 74 +[ Wed Sep 14 11:36:21 2022 ] Mean test loss of 296 batches: 1.5728907585144043. +[ Wed Sep 14 11:36:21 2022 ] Top1: 69.80% +[ Wed Sep 14 11:36:21 2022 ] Top5: 94.12% +[ Wed Sep 14 11:36:21 2022 ] Training epoch: 75 +[ Wed Sep 14 11:37:17 2022 ] Batch(97/123) done. Loss: 0.0260 lr:0.010000 +[ Wed Sep 14 11:37:30 2022 ] Eval epoch: 75 +[ Wed Sep 14 11:38:29 2022 ] Mean test loss of 296 batches: 1.6013764142990112. +[ Wed Sep 14 11:38:29 2022 ] Top1: 69.63% +[ Wed Sep 14 11:38:29 2022 ] Top5: 93.94% +[ Wed Sep 14 11:38:29 2022 ] Training epoch: 76 +[ Wed Sep 14 11:39:12 2022 ] Batch(74/123) done. Loss: 0.0218 lr:0.010000 +[ Wed Sep 14 11:39:38 2022 ] Eval epoch: 76 +[ Wed Sep 14 11:40:36 2022 ] Mean test loss of 296 batches: 1.6293004751205444. +[ Wed Sep 14 11:40:36 2022 ] Top1: 69.45% +[ Wed Sep 14 11:40:36 2022 ] Top5: 93.93% +[ Wed Sep 14 11:40:36 2022 ] Training epoch: 77 +[ Wed Sep 14 11:41:07 2022 ] Batch(51/123) done. Loss: 0.1398 lr:0.010000 +[ Wed Sep 14 11:41:45 2022 ] Eval epoch: 77 +[ Wed Sep 14 11:42:44 2022 ] Mean test loss of 296 batches: 1.5975271463394165. +[ Wed Sep 14 11:42:44 2022 ] Top1: 69.78% +[ Wed Sep 14 11:42:44 2022 ] Top5: 93.95% +[ Wed Sep 14 11:42:44 2022 ] Training epoch: 78 +[ Wed Sep 14 11:43:02 2022 ] Batch(28/123) done. Loss: 0.0311 lr:0.010000 +[ Wed Sep 14 11:43:53 2022 ] Eval epoch: 78 +[ Wed Sep 14 11:44:51 2022 ] Mean test loss of 296 batches: 1.6020327806472778. +[ Wed Sep 14 11:44:52 2022 ] Top1: 69.81% +[ Wed Sep 14 11:44:52 2022 ] Top5: 94.25% +[ Wed Sep 14 11:44:52 2022 ] Training epoch: 79 +[ Wed Sep 14 11:44:57 2022 ] Batch(5/123) done. Loss: 0.0418 lr:0.010000 +[ Wed Sep 14 11:45:51 2022 ] Batch(105/123) done. Loss: 0.0253 lr:0.010000 +[ Wed Sep 14 11:46:00 2022 ] Eval epoch: 79 +[ Wed Sep 14 11:46:59 2022 ] Mean test loss of 296 batches: 1.649924397468567. +[ Wed Sep 14 11:46:59 2022 ] Top1: 69.21% +[ Wed Sep 14 11:46:59 2022 ] Top5: 93.94% +[ Wed Sep 14 11:46:59 2022 ] Training epoch: 80 +[ Wed Sep 14 11:47:46 2022 ] Batch(82/123) done. Loss: 0.0469 lr:0.010000 +[ Wed Sep 14 11:48:07 2022 ] Eval epoch: 80 +[ Wed Sep 14 11:49:06 2022 ] Mean test loss of 296 batches: 1.6569197177886963. +[ Wed Sep 14 11:49:06 2022 ] Top1: 69.47% +[ Wed Sep 14 11:49:06 2022 ] Top5: 94.04% +[ Wed Sep 14 11:49:06 2022 ] Training epoch: 81 +[ Wed Sep 14 11:49:41 2022 ] Batch(59/123) done. Loss: 0.0240 lr:0.001000 +[ Wed Sep 14 11:50:15 2022 ] Eval epoch: 81 +[ Wed Sep 14 11:51:14 2022 ] Mean test loss of 296 batches: 1.6147605180740356. +[ Wed Sep 14 11:51:14 2022 ] Top1: 70.06% +[ Wed Sep 14 11:51:14 2022 ] Top5: 94.21% +[ Wed Sep 14 11:51:14 2022 ] Training epoch: 82 +[ Wed Sep 14 11:51:37 2022 ] Batch(36/123) done. Loss: 0.0702 lr:0.001000 +[ Wed Sep 14 11:52:23 2022 ] Eval epoch: 82 +[ Wed Sep 14 11:53:21 2022 ] Mean test loss of 296 batches: 1.653167963027954. +[ Wed Sep 14 11:53:22 2022 ] Top1: 69.62% +[ Wed Sep 14 11:53:22 2022 ] Top5: 94.07% +[ Wed Sep 14 11:53:22 2022 ] Training epoch: 83 +[ Wed Sep 14 11:53:32 2022 ] Batch(13/123) done. Loss: 0.0709 lr:0.001000 +[ Wed Sep 14 11:54:25 2022 ] Batch(113/123) done. Loss: 0.0215 lr:0.001000 +[ Wed Sep 14 11:54:30 2022 ] Eval epoch: 83 +[ Wed Sep 14 11:55:29 2022 ] Mean test loss of 296 batches: 1.6503124237060547. +[ Wed Sep 14 11:55:29 2022 ] Top1: 69.66% +[ Wed Sep 14 11:55:29 2022 ] Top5: 94.24% +[ Wed Sep 14 11:55:29 2022 ] Training epoch: 84 +[ Wed Sep 14 11:56:21 2022 ] Batch(90/123) done. Loss: 0.0189 lr:0.001000 +[ Wed Sep 14 11:56:38 2022 ] Eval epoch: 84 +[ Wed Sep 14 11:57:36 2022 ] Mean test loss of 296 batches: 1.6427948474884033. +[ Wed Sep 14 11:57:36 2022 ] Top1: 69.59% +[ Wed Sep 14 11:57:36 2022 ] Top5: 94.12% +[ Wed Sep 14 11:57:37 2022 ] Training epoch: 85 +[ Wed Sep 14 11:58:16 2022 ] Batch(67/123) done. Loss: 0.0208 lr:0.001000 +[ Wed Sep 14 11:58:45 2022 ] Eval epoch: 85 +[ Wed Sep 14 11:59:44 2022 ] Mean test loss of 296 batches: 1.6593759059906006. +[ Wed Sep 14 11:59:44 2022 ] Top1: 69.32% +[ Wed Sep 14 11:59:44 2022 ] Top5: 94.04% +[ Wed Sep 14 11:59:44 2022 ] Training epoch: 86 +[ Wed Sep 14 12:00:11 2022 ] Batch(44/123) done. Loss: 0.0135 lr:0.001000 +[ Wed Sep 14 12:00:53 2022 ] Eval epoch: 86 +[ Wed Sep 14 12:01:51 2022 ] Mean test loss of 296 batches: 1.6387076377868652. +[ Wed Sep 14 12:01:51 2022 ] Top1: 69.65% +[ Wed Sep 14 12:01:51 2022 ] Top5: 94.14% +[ Wed Sep 14 12:01:51 2022 ] Training epoch: 87 +[ Wed Sep 14 12:02:06 2022 ] Batch(21/123) done. Loss: 0.0856 lr:0.001000 +[ Wed Sep 14 12:02:59 2022 ] Batch(121/123) done. Loss: 0.0542 lr:0.001000 +[ Wed Sep 14 12:03:00 2022 ] Eval epoch: 87 +[ Wed Sep 14 12:03:58 2022 ] Mean test loss of 296 batches: 1.639967441558838. +[ Wed Sep 14 12:03:58 2022 ] Top1: 69.95% +[ Wed Sep 14 12:03:58 2022 ] Top5: 94.06% +[ Wed Sep 14 12:03:58 2022 ] Training epoch: 88 +[ Wed Sep 14 12:04:54 2022 ] Batch(98/123) done. Loss: 0.0321 lr:0.001000 +[ Wed Sep 14 12:05:07 2022 ] Eval epoch: 88 +[ Wed Sep 14 12:06:05 2022 ] Mean test loss of 296 batches: 1.6449613571166992. +[ Wed Sep 14 12:06:05 2022 ] Top1: 69.75% +[ Wed Sep 14 12:06:05 2022 ] Top5: 94.15% +[ Wed Sep 14 12:06:06 2022 ] Training epoch: 89 +[ Wed Sep 14 12:06:49 2022 ] Batch(75/123) done. Loss: 0.0499 lr:0.001000 +[ Wed Sep 14 12:07:15 2022 ] Eval epoch: 89 +[ Wed Sep 14 12:08:13 2022 ] Mean test loss of 296 batches: 1.6413934230804443. +[ Wed Sep 14 12:08:13 2022 ] Top1: 69.78% +[ Wed Sep 14 12:08:13 2022 ] Top5: 94.07% +[ Wed Sep 14 12:08:14 2022 ] Training epoch: 90 +[ Wed Sep 14 12:08:44 2022 ] Batch(52/123) done. Loss: 0.0114 lr:0.001000 +[ Wed Sep 14 12:09:22 2022 ] Eval epoch: 90 +[ Wed Sep 14 12:10:21 2022 ] Mean test loss of 296 batches: 1.6614224910736084. +[ Wed Sep 14 12:10:21 2022 ] Top1: 69.58% +[ Wed Sep 14 12:10:21 2022 ] Top5: 94.02% +[ Wed Sep 14 12:10:21 2022 ] Training epoch: 91 +[ Wed Sep 14 12:10:39 2022 ] Batch(29/123) done. Loss: 0.0326 lr:0.001000 +[ Wed Sep 14 12:11:29 2022 ] Eval epoch: 91 +[ Wed Sep 14 12:12:28 2022 ] Mean test loss of 296 batches: 1.629740595817566. +[ Wed Sep 14 12:12:28 2022 ] Top1: 69.78% +[ Wed Sep 14 12:12:28 2022 ] Top5: 94.12% +[ Wed Sep 14 12:12:28 2022 ] Training epoch: 92 +[ Wed Sep 14 12:12:34 2022 ] Batch(6/123) done. Loss: 0.0278 lr:0.001000 +[ Wed Sep 14 12:13:28 2022 ] Batch(106/123) done. Loss: 0.0485 lr:0.001000 +[ Wed Sep 14 12:13:36 2022 ] Eval epoch: 92 +[ Wed Sep 14 12:14:35 2022 ] Mean test loss of 296 batches: 1.619106650352478. +[ Wed Sep 14 12:14:35 2022 ] Top1: 69.98% +[ Wed Sep 14 12:14:35 2022 ] Top5: 94.28% +[ Wed Sep 14 12:14:35 2022 ] Training epoch: 93 +[ Wed Sep 14 12:15:23 2022 ] Batch(83/123) done. Loss: 0.0251 lr:0.001000 +[ Wed Sep 14 12:15:44 2022 ] Eval epoch: 93 +[ Wed Sep 14 12:16:42 2022 ] Mean test loss of 296 batches: 1.6200447082519531. +[ Wed Sep 14 12:16:42 2022 ] Top1: 69.98% +[ Wed Sep 14 12:16:42 2022 ] Top5: 94.12% +[ Wed Sep 14 12:16:43 2022 ] Training epoch: 94 +[ Wed Sep 14 12:17:18 2022 ] Batch(60/123) done. Loss: 0.0279 lr:0.001000 +[ Wed Sep 14 12:17:51 2022 ] Eval epoch: 94 +[ Wed Sep 14 12:18:50 2022 ] Mean test loss of 296 batches: 1.6436985731124878. +[ Wed Sep 14 12:18:50 2022 ] Top1: 69.79% +[ Wed Sep 14 12:18:50 2022 ] Top5: 94.18% +[ Wed Sep 14 12:18:50 2022 ] Training epoch: 95 +[ Wed Sep 14 12:19:13 2022 ] Batch(37/123) done. Loss: 0.0240 lr:0.001000 +[ Wed Sep 14 12:19:59 2022 ] Eval epoch: 95 +[ Wed Sep 14 12:20:57 2022 ] Mean test loss of 296 batches: 1.6862620115280151. +[ Wed Sep 14 12:20:57 2022 ] Top1: 69.26% +[ Wed Sep 14 12:20:57 2022 ] Top5: 93.85% +[ Wed Sep 14 12:20:57 2022 ] Training epoch: 96 +[ Wed Sep 14 12:21:08 2022 ] Batch(14/123) done. Loss: 0.0306 lr:0.001000 +[ Wed Sep 14 12:22:01 2022 ] Batch(114/123) done. Loss: 0.0734 lr:0.001000 +[ Wed Sep 14 12:22:06 2022 ] Eval epoch: 96 +[ Wed Sep 14 12:23:04 2022 ] Mean test loss of 296 batches: 1.6525567770004272. +[ Wed Sep 14 12:23:05 2022 ] Top1: 69.90% +[ Wed Sep 14 12:23:05 2022 ] Top5: 94.15% +[ Wed Sep 14 12:23:05 2022 ] Training epoch: 97 +[ Wed Sep 14 12:23:57 2022 ] Batch(91/123) done. Loss: 0.0853 lr:0.001000 +[ Wed Sep 14 12:24:13 2022 ] Eval epoch: 97 +[ Wed Sep 14 12:25:12 2022 ] Mean test loss of 296 batches: 1.6387194395065308. +[ Wed Sep 14 12:25:12 2022 ] Top1: 69.79% +[ Wed Sep 14 12:25:12 2022 ] Top5: 94.11% +[ Wed Sep 14 12:25:12 2022 ] Training epoch: 98 +[ Wed Sep 14 12:25:52 2022 ] Batch(68/123) done. Loss: 0.0489 lr:0.001000 +[ Wed Sep 14 12:26:21 2022 ] Eval epoch: 98 +[ Wed Sep 14 12:27:19 2022 ] Mean test loss of 296 batches: 1.6475166082382202. +[ Wed Sep 14 12:27:19 2022 ] Top1: 69.97% +[ Wed Sep 14 12:27:19 2022 ] Top5: 94.11% +[ Wed Sep 14 12:27:19 2022 ] Training epoch: 99 +[ Wed Sep 14 12:27:46 2022 ] Batch(45/123) done. Loss: 0.0910 lr:0.001000 +[ Wed Sep 14 12:28:28 2022 ] Eval epoch: 99 +[ Wed Sep 14 12:29:26 2022 ] Mean test loss of 296 batches: 1.6429861783981323. +[ Wed Sep 14 12:29:26 2022 ] Top1: 69.74% +[ Wed Sep 14 12:29:26 2022 ] Top5: 94.15% +[ Wed Sep 14 12:29:26 2022 ] Training epoch: 100 +[ Wed Sep 14 12:29:41 2022 ] Batch(22/123) done. Loss: 0.0639 lr:0.001000 +[ Wed Sep 14 12:30:35 2022 ] Batch(122/123) done. Loss: 0.0656 lr:0.001000 +[ Wed Sep 14 12:30:35 2022 ] Eval epoch: 100 +[ Wed Sep 14 12:31:33 2022 ] Mean test loss of 296 batches: 1.6571861505508423. +[ Wed Sep 14 12:31:33 2022 ] Top1: 69.92% +[ Wed Sep 14 12:31:33 2022 ] Top5: 94.05% diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_motion_xview/config.yaml b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_motion_xview/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..055bafa80ec5404d46e44d1d1ba99ba2eaa5f18f --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_motion_xview/config.yaml @@ -0,0 +1,59 @@ +Experiment_name: ntu_joint_motion_xview +base_lr: 0.1 +batch_size: 64 +config: ./config/nturgbd-cross-view/train_joint_motion.yaml +device: +- 6 +- 7 +eval_interval: 5 +feeder: feeders.feeder.Feeder +groups: 8 +ignore_weights: [] +keep_rate: 0.9 +log_interval: 100 +model: model.decouple_gcn.Model +model_args: + block_size: 41 + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + groups: 8 + num_class: 60 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu_joint_motion_xview +nesterov: true +num_epoch: 100 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_data_joint_motion.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_data_joint_motion.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu_joint_motion_xview diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_motion_xview/decouple_gcn.py b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_motion_xview/decouple_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..6dcce4552ced280fe5b2060df92daebd2452cf7c --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_motion_xview/decouple_gcn.py @@ -0,0 +1,235 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math +from model.dropSke import DropBlock_Ske +from model.dropT import DropBlockT_1d + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + + +def conv_branch_init(conv): + weight = conv.weight + n = weight.size(0) + k1 = weight.size(1) + k2 = weight.size(2) + nn.init.normal(weight, 0, math.sqrt(2. / (n * k1 * k2))) + nn.init.constant(conv.bias, 0) + + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class unit_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1, num_point=25, block_size=41): + super(unit_tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + self.dropS = DropBlock_Ske(num_point=num_point) + self.dropT = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob, A): + x = self.bn(self.conv(x)) + x = self.dropT(self.dropS(x, keep_prob, A), keep_prob) + return x + + +class unit_tcn_skip(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(unit_tcn_skip, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class unit_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, coff_embedding=4, num_subset=3): + super(unit_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.num_point = num_point + self.groups = groups + self.num_subset = num_subset + self.DecoupleA = nn.Parameter(torch.tensor(np.reshape(A.astype(np.float32), [ + 3, 1, num_point, num_point]), dtype=torch.float32, requires_grad=True).repeat(1, groups, 1, 1), requires_grad=True) + + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.bn0 = nn.BatchNorm2d(out_channels * num_subset) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + bn_init(self.bn, 1e-6) + + self.Linear_weight = nn.Parameter(torch.zeros( + in_channels, out_channels * num_subset, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0, math.sqrt( + 0.5 / (out_channels * num_subset))) + + self.Linear_bias = nn.Parameter(torch.zeros( + 1, out_channels * num_subset, 1, 1, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.constant(self.Linear_bias, 1e-6) + + eye_array = [] + for i in range(out_channels): + eye_array.append(torch.eye(num_point)) + self.eyes = nn.Parameter(torch.tensor(torch.stack( + eye_array), requires_grad=False, device='cuda'), requires_grad=False) # [c,25,25] + + def norm(self, A): + b, c, h, w = A.size() + A = A.view(c, self.num_point, self.num_point) + D_list = torch.sum(A, 1).view(c, 1, self.num_point) + D_list_12 = (D_list + 0.001)**(-1) + D_12 = self.eyes * D_list_12 + A = torch.bmm(A, D_12).view(b, c, h, w) + return A + + def forward(self, x0): + learn_A = self.DecoupleA.repeat( + 1, self.out_channels // self.groups, 1, 1) + norm_learn_A = torch.cat([self.norm(learn_A[0:1, ...]), self.norm( + learn_A[1:2, ...]), self.norm(learn_A[2:3, ...])], 0) + + x = torch.einsum( + 'nctw,cd->ndtw', (x0, self.Linear_weight)).contiguous() + x = x + self.Linear_bias + x = self.bn0(x) + + n, kc, t, v = x.size() + x = x.view(n, self.num_subset, kc // self.num_subset, t, v) + x = torch.einsum('nkctv,kcvw->nctw', (x, norm_learn_A)) + + x = self.bn(x) + x += self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, block_size, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = unit_gcn(in_channels, out_channels, A, groups, num_point) + self.tcn1 = unit_tcn(out_channels, out_channels, + stride=stride, num_point=num_point) + self.relu = nn.ReLU() + + self.A = nn.Parameter(torch.tensor(np.sum(np.reshape(A.astype(np.float32), [ + 3, num_point, num_point]), axis=0), dtype=torch.float32, requires_grad=False, device='cuda'), requires_grad=False) + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + + else: + self.residual = unit_tcn_skip( + in_channels, out_channels, kernel_size=1, stride=stride) + self.dropSke = DropBlock_Ske(num_point=num_point) + self.dropT_skip = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob): + x = self.tcn1(self.gcn1(x), keep_prob, self.A) + self.dropT_skip( + self.dropSke(self.residual(x), keep_prob, self.A), keep_prob) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, groups, num_point, + block_size, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l5 = TCN_GCN_unit( + 64, 128, A, groups, num_point, block_size, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l8 = TCN_GCN_unit(128, 256, A, groups, + num_point, block_size, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x, keep_prob=0.9): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute( + 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x, 1.0) + x = self.l2(x, 1.0) + x = self.l3(x, 1.0) + x = self.l4(x, 1.0) + x = self.l5(x, 1.0) + x = self.l6(x, 1.0) + x = self.l7(x, keep_prob) + x = self.l8(x, keep_prob) + x = self.l9(x, keep_prob) + x = self.l10(x, keep_prob) + + # N*M,C,T,V + c_new = x.size(1) + x = x.reshape(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_motion_xview/eval_results/best_acc.pkl b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_motion_xview/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..48c6d9d22ac5221411cbd5a3b8bdd9934c5eb9d0 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_motion_xview/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b78eacd26e9070418de8d94ed7584febe6b9f9d0067de4db850d428e7ce3009 +size 5718404 diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_motion_xview/log.txt b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_motion_xview/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..05f5cc4e548f2ee529b4b189af13b67c3872b7a9 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_motion_xview/log.txt @@ -0,0 +1,626 @@ +[ Tue Sep 13 14:50:04 2022 ] Parameters: +{'work_dir': './work_dir/ntu_joint_motion_xview', 'model_saved_name': './save_models/ntu_joint_motion_xview', 'Experiment_name': 'ntu_joint_motion_xview', 'config': './config/nturgbd-cross-view/train_joint_motion.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_data_joint_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_data_joint_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_label.pkl'}, 'model': 'model.decouple_gcn.Model', 'model_args': {'num_class': 60, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'groups': 8, 'block_size': 41, 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80], 'device': [6, 7], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 100, 'weight_decay': 0.0001, 'keep_rate': 0.9, 'groups': 8, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Tue Sep 13 14:50:04 2022 ] Training epoch: 1 +[ Tue Sep 13 14:50:55 2022 ] Batch(99/123) done. Loss: 3.1981 lr:0.100000 +[ Tue Sep 13 14:51:06 2022 ] Eval epoch: 1 +[ Tue Sep 13 14:52:04 2022 ] Mean test loss of 296 batches: 5.98690128326416. +[ Tue Sep 13 14:52:04 2022 ] Top1: 6.99% +[ Tue Sep 13 14:52:05 2022 ] Top5: 25.84% +[ Tue Sep 13 14:52:05 2022 ] Training epoch: 2 +[ Tue Sep 13 14:52:49 2022 ] Batch(76/123) done. Loss: 2.3035 lr:0.100000 +[ Tue Sep 13 14:53:14 2022 ] Eval epoch: 2 +[ Tue Sep 13 14:54:13 2022 ] Mean test loss of 296 batches: 3.864668607711792. +[ Tue Sep 13 14:54:13 2022 ] Top1: 15.07% +[ Tue Sep 13 14:54:13 2022 ] Top5: 37.16% +[ Tue Sep 13 14:54:14 2022 ] Training epoch: 3 +[ Tue Sep 13 14:54:45 2022 ] Batch(53/123) done. Loss: 2.5177 lr:0.100000 +[ Tue Sep 13 14:55:23 2022 ] Eval epoch: 3 +[ Tue Sep 13 14:56:22 2022 ] Mean test loss of 296 batches: 3.6503658294677734. +[ Tue Sep 13 14:56:22 2022 ] Top1: 17.18% +[ Tue Sep 13 14:56:22 2022 ] Top5: 46.08% +[ Tue Sep 13 14:56:22 2022 ] Training epoch: 4 +[ Tue Sep 13 14:56:41 2022 ] Batch(30/123) done. Loss: 1.6194 lr:0.100000 +[ Tue Sep 13 14:57:31 2022 ] Eval epoch: 4 +[ Tue Sep 13 14:58:30 2022 ] Mean test loss of 296 batches: 3.14631724357605. +[ Tue Sep 13 14:58:30 2022 ] Top1: 24.73% +[ Tue Sep 13 14:58:30 2022 ] Top5: 60.16% +[ Tue Sep 13 14:58:30 2022 ] Training epoch: 5 +[ Tue Sep 13 14:58:37 2022 ] Batch(7/123) done. Loss: 1.4701 lr:0.100000 +[ Tue Sep 13 14:59:31 2022 ] Batch(107/123) done. Loss: 1.8364 lr:0.100000 +[ Tue Sep 13 14:59:39 2022 ] Eval epoch: 5 +[ Tue Sep 13 15:00:38 2022 ] Mean test loss of 296 batches: 3.6047751903533936. +[ Tue Sep 13 15:00:38 2022 ] Top1: 21.53% +[ Tue Sep 13 15:00:38 2022 ] Top5: 57.64% +[ Tue Sep 13 15:00:38 2022 ] Training epoch: 6 +[ Tue Sep 13 15:01:26 2022 ] Batch(84/123) done. Loss: 1.1015 lr:0.100000 +[ Tue Sep 13 15:01:47 2022 ] Eval epoch: 6 +[ Tue Sep 13 15:02:45 2022 ] Mean test loss of 296 batches: 3.1395316123962402. +[ Tue Sep 13 15:02:46 2022 ] Top1: 26.32% +[ Tue Sep 13 15:02:46 2022 ] Top5: 63.53% +[ Tue Sep 13 15:02:46 2022 ] Training epoch: 7 +[ Tue Sep 13 15:03:22 2022 ] Batch(61/123) done. Loss: 1.4402 lr:0.100000 +[ Tue Sep 13 15:03:55 2022 ] Eval epoch: 7 +[ Tue Sep 13 15:04:54 2022 ] Mean test loss of 296 batches: 2.5502262115478516. +[ Tue Sep 13 15:04:54 2022 ] Top1: 37.52% +[ Tue Sep 13 15:04:54 2022 ] Top5: 76.79% +[ Tue Sep 13 15:04:54 2022 ] Training epoch: 8 +[ Tue Sep 13 15:05:18 2022 ] Batch(38/123) done. Loss: 1.6526 lr:0.100000 +[ Tue Sep 13 15:06:03 2022 ] Eval epoch: 8 +[ Tue Sep 13 15:07:02 2022 ] Mean test loss of 296 batches: 2.9086506366729736. +[ Tue Sep 13 15:07:02 2022 ] Top1: 34.59% +[ Tue Sep 13 15:07:02 2022 ] Top5: 70.76% +[ Tue Sep 13 15:07:02 2022 ] Training epoch: 9 +[ Tue Sep 13 15:07:13 2022 ] Batch(15/123) done. Loss: 0.9101 lr:0.100000 +[ Tue Sep 13 15:08:07 2022 ] Batch(115/123) done. Loss: 1.3284 lr:0.100000 +[ Tue Sep 13 15:08:11 2022 ] Eval epoch: 9 +[ Tue Sep 13 15:09:10 2022 ] Mean test loss of 296 batches: 1.9909123182296753. +[ Tue Sep 13 15:09:10 2022 ] Top1: 45.13% +[ Tue Sep 13 15:09:10 2022 ] Top5: 83.95% +[ Tue Sep 13 15:09:10 2022 ] Training epoch: 10 +[ Tue Sep 13 15:10:02 2022 ] Batch(92/123) done. Loss: 1.3865 lr:0.100000 +[ Tue Sep 13 15:10:19 2022 ] Eval epoch: 10 +[ Tue Sep 13 15:11:17 2022 ] Mean test loss of 296 batches: 6.401695251464844. +[ Tue Sep 13 15:11:17 2022 ] Top1: 22.78% +[ Tue Sep 13 15:11:17 2022 ] Top5: 57.37% +[ Tue Sep 13 15:11:17 2022 ] Training epoch: 11 +[ Tue Sep 13 15:11:57 2022 ] Batch(69/123) done. Loss: 1.3237 lr:0.100000 +[ Tue Sep 13 15:12:26 2022 ] Eval epoch: 11 +[ Tue Sep 13 15:13:25 2022 ] Mean test loss of 296 batches: 2.554856300354004. +[ Tue Sep 13 15:13:25 2022 ] Top1: 41.30% +[ Tue Sep 13 15:13:25 2022 ] Top5: 77.83% +[ Tue Sep 13 15:13:25 2022 ] Training epoch: 12 +[ Tue Sep 13 15:13:53 2022 ] Batch(46/123) done. Loss: 1.1524 lr:0.100000 +[ Tue Sep 13 15:14:34 2022 ] Eval epoch: 12 +[ Tue Sep 13 15:15:33 2022 ] Mean test loss of 296 batches: 1.9716130495071411. +[ Tue Sep 13 15:15:33 2022 ] Top1: 48.66% +[ Tue Sep 13 15:15:33 2022 ] Top5: 85.61% +[ Tue Sep 13 15:15:33 2022 ] Training epoch: 13 +[ Tue Sep 13 15:15:48 2022 ] Batch(23/123) done. Loss: 1.0352 lr:0.100000 +[ Tue Sep 13 15:16:42 2022 ] Eval epoch: 13 +[ Tue Sep 13 15:17:40 2022 ] Mean test loss of 296 batches: 2.638780355453491. +[ Tue Sep 13 15:17:41 2022 ] Top1: 40.16% +[ Tue Sep 13 15:17:41 2022 ] Top5: 75.08% +[ Tue Sep 13 15:17:41 2022 ] Training epoch: 14 +[ Tue Sep 13 15:17:44 2022 ] Batch(0/123) done. Loss: 1.3193 lr:0.100000 +[ Tue Sep 13 15:18:38 2022 ] Batch(100/123) done. Loss: 0.8039 lr:0.100000 +[ Tue Sep 13 15:18:50 2022 ] Eval epoch: 14 +[ Tue Sep 13 15:19:48 2022 ] Mean test loss of 296 batches: 1.9710336923599243. +[ Tue Sep 13 15:19:48 2022 ] Top1: 46.22% +[ Tue Sep 13 15:19:49 2022 ] Top5: 82.53% +[ Tue Sep 13 15:19:49 2022 ] Training epoch: 15 +[ Tue Sep 13 15:20:33 2022 ] Batch(77/123) done. Loss: 0.9186 lr:0.100000 +[ Tue Sep 13 15:20:58 2022 ] Eval epoch: 15 +[ Tue Sep 13 15:21:56 2022 ] Mean test loss of 296 batches: 2.1553592681884766. +[ Tue Sep 13 15:21:56 2022 ] Top1: 47.05% +[ Tue Sep 13 15:21:56 2022 ] Top5: 83.56% +[ Tue Sep 13 15:21:56 2022 ] Training epoch: 16 +[ Tue Sep 13 15:22:28 2022 ] Batch(54/123) done. Loss: 0.4487 lr:0.100000 +[ Tue Sep 13 15:23:05 2022 ] Eval epoch: 16 +[ Tue Sep 13 15:24:04 2022 ] Mean test loss of 296 batches: 2.1945183277130127. +[ Tue Sep 13 15:24:04 2022 ] Top1: 43.85% +[ Tue Sep 13 15:24:04 2022 ] Top5: 80.13% +[ Tue Sep 13 15:24:04 2022 ] Training epoch: 17 +[ Tue Sep 13 15:24:24 2022 ] Batch(31/123) done. Loss: 0.6905 lr:0.100000 +[ Tue Sep 13 15:25:13 2022 ] Eval epoch: 17 +[ Tue Sep 13 15:26:11 2022 ] Mean test loss of 296 batches: 1.8277156352996826. +[ Tue Sep 13 15:26:12 2022 ] Top1: 48.08% +[ Tue Sep 13 15:26:12 2022 ] Top5: 85.59% +[ Tue Sep 13 15:26:12 2022 ] Training epoch: 18 +[ Tue Sep 13 15:26:19 2022 ] Batch(8/123) done. Loss: 0.5670 lr:0.100000 +[ Tue Sep 13 15:27:13 2022 ] Batch(108/123) done. Loss: 0.8551 lr:0.100000 +[ Tue Sep 13 15:27:21 2022 ] Eval epoch: 18 +[ Tue Sep 13 15:28:19 2022 ] Mean test loss of 296 batches: 1.9576246738433838. +[ Tue Sep 13 15:28:19 2022 ] Top1: 48.32% +[ Tue Sep 13 15:28:19 2022 ] Top5: 84.72% +[ Tue Sep 13 15:28:19 2022 ] Training epoch: 19 +[ Tue Sep 13 15:29:08 2022 ] Batch(85/123) done. Loss: 0.4684 lr:0.100000 +[ Tue Sep 13 15:29:28 2022 ] Eval epoch: 19 +[ Tue Sep 13 15:30:27 2022 ] Mean test loss of 296 batches: 1.8127565383911133. +[ Tue Sep 13 15:30:27 2022 ] Top1: 52.63% +[ Tue Sep 13 15:30:27 2022 ] Top5: 86.99% +[ Tue Sep 13 15:30:27 2022 ] Training epoch: 20 +[ Tue Sep 13 15:31:04 2022 ] Batch(62/123) done. Loss: 0.7085 lr:0.100000 +[ Tue Sep 13 15:31:37 2022 ] Eval epoch: 20 +[ Tue Sep 13 15:32:35 2022 ] Mean test loss of 296 batches: 2.8792073726654053. +[ Tue Sep 13 15:32:35 2022 ] Top1: 40.09% +[ Tue Sep 13 15:32:35 2022 ] Top5: 77.76% +[ Tue Sep 13 15:32:35 2022 ] Training epoch: 21 +[ Tue Sep 13 15:32:59 2022 ] Batch(39/123) done. Loss: 0.9062 lr:0.100000 +[ Tue Sep 13 15:33:44 2022 ] Eval epoch: 21 +[ Tue Sep 13 15:34:43 2022 ] Mean test loss of 296 batches: 1.8906662464141846. +[ Tue Sep 13 15:34:43 2022 ] Top1: 52.71% +[ Tue Sep 13 15:34:43 2022 ] Top5: 86.26% +[ Tue Sep 13 15:34:43 2022 ] Training epoch: 22 +[ Tue Sep 13 15:34:55 2022 ] Batch(16/123) done. Loss: 0.5478 lr:0.100000 +[ Tue Sep 13 15:35:49 2022 ] Batch(116/123) done. Loss: 0.6721 lr:0.100000 +[ Tue Sep 13 15:35:52 2022 ] Eval epoch: 22 +[ Tue Sep 13 15:36:51 2022 ] Mean test loss of 296 batches: 1.8552395105361938. +[ Tue Sep 13 15:36:51 2022 ] Top1: 54.10% +[ Tue Sep 13 15:36:51 2022 ] Top5: 89.19% +[ Tue Sep 13 15:36:51 2022 ] Training epoch: 23 +[ Tue Sep 13 15:37:45 2022 ] Batch(93/123) done. Loss: 0.6752 lr:0.100000 +[ Tue Sep 13 15:38:00 2022 ] Eval epoch: 23 +[ Tue Sep 13 15:38:59 2022 ] Mean test loss of 296 batches: 1.7145204544067383. +[ Tue Sep 13 15:38:59 2022 ] Top1: 54.91% +[ Tue Sep 13 15:38:59 2022 ] Top5: 88.72% +[ Tue Sep 13 15:38:59 2022 ] Training epoch: 24 +[ Tue Sep 13 15:39:40 2022 ] Batch(70/123) done. Loss: 0.5688 lr:0.100000 +[ Tue Sep 13 15:40:08 2022 ] Eval epoch: 24 +[ Tue Sep 13 15:41:07 2022 ] Mean test loss of 296 batches: 2.103210687637329. +[ Tue Sep 13 15:41:07 2022 ] Top1: 51.21% +[ Tue Sep 13 15:41:07 2022 ] Top5: 84.88% +[ Tue Sep 13 15:41:07 2022 ] Training epoch: 25 +[ Tue Sep 13 15:41:36 2022 ] Batch(47/123) done. Loss: 0.5524 lr:0.100000 +[ Tue Sep 13 15:42:16 2022 ] Eval epoch: 25 +[ Tue Sep 13 15:43:15 2022 ] Mean test loss of 296 batches: 3.0271551609039307. +[ Tue Sep 13 15:43:15 2022 ] Top1: 40.86% +[ Tue Sep 13 15:43:15 2022 ] Top5: 77.11% +[ Tue Sep 13 15:43:15 2022 ] Training epoch: 26 +[ Tue Sep 13 15:43:32 2022 ] Batch(24/123) done. Loss: 0.7358 lr:0.100000 +[ Tue Sep 13 15:44:25 2022 ] Eval epoch: 26 +[ Tue Sep 13 15:45:23 2022 ] Mean test loss of 296 batches: 2.110452890396118. +[ Tue Sep 13 15:45:24 2022 ] Top1: 53.01% +[ Tue Sep 13 15:45:24 2022 ] Top5: 86.59% +[ Tue Sep 13 15:45:24 2022 ] Training epoch: 27 +[ Tue Sep 13 15:45:28 2022 ] Batch(1/123) done. Loss: 0.3321 lr:0.100000 +[ Tue Sep 13 15:46:21 2022 ] Batch(101/123) done. Loss: 0.6389 lr:0.100000 +[ Tue Sep 13 15:46:33 2022 ] Eval epoch: 27 +[ Tue Sep 13 15:47:32 2022 ] Mean test loss of 296 batches: 1.9221830368041992. +[ Tue Sep 13 15:47:32 2022 ] Top1: 53.83% +[ Tue Sep 13 15:47:32 2022 ] Top5: 87.16% +[ Tue Sep 13 15:47:32 2022 ] Training epoch: 28 +[ Tue Sep 13 15:48:17 2022 ] Batch(78/123) done. Loss: 0.5058 lr:0.100000 +[ Tue Sep 13 15:48:41 2022 ] Eval epoch: 28 +[ Tue Sep 13 15:49:40 2022 ] Mean test loss of 296 batches: 2.319114923477173. +[ Tue Sep 13 15:49:40 2022 ] Top1: 50.28% +[ Tue Sep 13 15:49:40 2022 ] Top5: 87.05% +[ Tue Sep 13 15:49:40 2022 ] Training epoch: 29 +[ Tue Sep 13 15:50:13 2022 ] Batch(55/123) done. Loss: 0.4575 lr:0.100000 +[ Tue Sep 13 15:50:50 2022 ] Eval epoch: 29 +[ Tue Sep 13 15:51:48 2022 ] Mean test loss of 296 batches: 1.5840641260147095. +[ Tue Sep 13 15:51:49 2022 ] Top1: 59.21% +[ Tue Sep 13 15:51:49 2022 ] Top5: 90.78% +[ Tue Sep 13 15:51:49 2022 ] Training epoch: 30 +[ Tue Sep 13 15:52:09 2022 ] Batch(32/123) done. Loss: 0.6421 lr:0.100000 +[ Tue Sep 13 15:52:58 2022 ] Eval epoch: 30 +[ Tue Sep 13 15:53:57 2022 ] Mean test loss of 296 batches: 2.6196701526641846. +[ Tue Sep 13 15:53:57 2022 ] Top1: 47.21% +[ Tue Sep 13 15:53:57 2022 ] Top5: 85.07% +[ Tue Sep 13 15:53:57 2022 ] Training epoch: 31 +[ Tue Sep 13 15:54:05 2022 ] Batch(9/123) done. Loss: 0.3436 lr:0.100000 +[ Tue Sep 13 15:54:59 2022 ] Batch(109/123) done. Loss: 0.3509 lr:0.100000 +[ Tue Sep 13 15:55:06 2022 ] Eval epoch: 31 +[ Tue Sep 13 15:56:04 2022 ] Mean test loss of 296 batches: 2.2234504222869873. +[ Tue Sep 13 15:56:05 2022 ] Top1: 51.54% +[ Tue Sep 13 15:56:05 2022 ] Top5: 86.95% +[ Tue Sep 13 15:56:05 2022 ] Training epoch: 32 +[ Tue Sep 13 15:56:54 2022 ] Batch(86/123) done. Loss: 0.4887 lr:0.100000 +[ Tue Sep 13 15:57:14 2022 ] Eval epoch: 32 +[ Tue Sep 13 15:58:13 2022 ] Mean test loss of 296 batches: 2.1757853031158447. +[ Tue Sep 13 15:58:13 2022 ] Top1: 52.68% +[ Tue Sep 13 15:58:13 2022 ] Top5: 88.81% +[ Tue Sep 13 15:58:13 2022 ] Training epoch: 33 +[ Tue Sep 13 15:58:50 2022 ] Batch(63/123) done. Loss: 0.3028 lr:0.100000 +[ Tue Sep 13 15:59:22 2022 ] Eval epoch: 33 +[ Tue Sep 13 16:00:21 2022 ] Mean test loss of 296 batches: 2.040881395339966. +[ Tue Sep 13 16:00:21 2022 ] Top1: 54.25% +[ Tue Sep 13 16:00:21 2022 ] Top5: 87.92% +[ Tue Sep 13 16:00:21 2022 ] Training epoch: 34 +[ Tue Sep 13 16:00:46 2022 ] Batch(40/123) done. Loss: 0.1983 lr:0.100000 +[ Tue Sep 13 16:01:30 2022 ] Eval epoch: 34 +[ Tue Sep 13 16:02:29 2022 ] Mean test loss of 296 batches: 1.6819934844970703. +[ Tue Sep 13 16:02:29 2022 ] Top1: 60.89% +[ Tue Sep 13 16:02:29 2022 ] Top5: 90.62% +[ Tue Sep 13 16:02:29 2022 ] Training epoch: 35 +[ Tue Sep 13 16:02:42 2022 ] Batch(17/123) done. Loss: 0.2226 lr:0.100000 +[ Tue Sep 13 16:03:35 2022 ] Batch(117/123) done. Loss: 0.3388 lr:0.100000 +[ Tue Sep 13 16:03:38 2022 ] Eval epoch: 35 +[ Tue Sep 13 16:04:37 2022 ] Mean test loss of 296 batches: 2.689155340194702. +[ Tue Sep 13 16:04:37 2022 ] Top1: 53.22% +[ Tue Sep 13 16:04:37 2022 ] Top5: 86.11% +[ Tue Sep 13 16:04:37 2022 ] Training epoch: 36 +[ Tue Sep 13 16:05:31 2022 ] Batch(94/123) done. Loss: 0.6228 lr:0.100000 +[ Tue Sep 13 16:05:46 2022 ] Eval epoch: 36 +[ Tue Sep 13 16:06:45 2022 ] Mean test loss of 296 batches: 2.2137339115142822. +[ Tue Sep 13 16:06:45 2022 ] Top1: 54.58% +[ Tue Sep 13 16:06:45 2022 ] Top5: 89.01% +[ Tue Sep 13 16:06:46 2022 ] Training epoch: 37 +[ Tue Sep 13 16:07:27 2022 ] Batch(71/123) done. Loss: 0.3312 lr:0.100000 +[ Tue Sep 13 16:07:54 2022 ] Eval epoch: 37 +[ Tue Sep 13 16:08:53 2022 ] Mean test loss of 296 batches: 3.062812566757202. +[ Tue Sep 13 16:08:53 2022 ] Top1: 50.47% +[ Tue Sep 13 16:08:53 2022 ] Top5: 83.37% +[ Tue Sep 13 16:08:53 2022 ] Training epoch: 38 +[ Tue Sep 13 16:09:22 2022 ] Batch(48/123) done. Loss: 0.2139 lr:0.100000 +[ Tue Sep 13 16:10:02 2022 ] Eval epoch: 38 +[ Tue Sep 13 16:11:01 2022 ] Mean test loss of 296 batches: 2.8946635723114014. +[ Tue Sep 13 16:11:01 2022 ] Top1: 51.64% +[ Tue Sep 13 16:11:01 2022 ] Top5: 85.41% +[ Tue Sep 13 16:11:01 2022 ] Training epoch: 39 +[ Tue Sep 13 16:11:18 2022 ] Batch(25/123) done. Loss: 0.3315 lr:0.100000 +[ Tue Sep 13 16:12:10 2022 ] Eval epoch: 39 +[ Tue Sep 13 16:13:09 2022 ] Mean test loss of 296 batches: 2.6840784549713135. +[ Tue Sep 13 16:13:09 2022 ] Top1: 50.48% +[ Tue Sep 13 16:13:09 2022 ] Top5: 80.29% +[ Tue Sep 13 16:13:09 2022 ] Training epoch: 40 +[ Tue Sep 13 16:13:13 2022 ] Batch(2/123) done. Loss: 0.4279 lr:0.100000 +[ Tue Sep 13 16:14:07 2022 ] Batch(102/123) done. Loss: 0.4364 lr:0.100000 +[ Tue Sep 13 16:14:18 2022 ] Eval epoch: 40 +[ Tue Sep 13 16:15:17 2022 ] Mean test loss of 296 batches: 4.057049751281738. +[ Tue Sep 13 16:15:17 2022 ] Top1: 34.06% +[ Tue Sep 13 16:15:17 2022 ] Top5: 69.04% +[ Tue Sep 13 16:15:17 2022 ] Training epoch: 41 +[ Tue Sep 13 16:16:03 2022 ] Batch(79/123) done. Loss: 0.4677 lr:0.100000 +[ Tue Sep 13 16:16:26 2022 ] Eval epoch: 41 +[ Tue Sep 13 16:17:25 2022 ] Mean test loss of 296 batches: 1.6231486797332764. +[ Tue Sep 13 16:17:25 2022 ] Top1: 60.54% +[ Tue Sep 13 16:17:25 2022 ] Top5: 91.22% +[ Tue Sep 13 16:17:25 2022 ] Training epoch: 42 +[ Tue Sep 13 16:17:59 2022 ] Batch(56/123) done. Loss: 0.3914 lr:0.100000 +[ Tue Sep 13 16:18:34 2022 ] Eval epoch: 42 +[ Tue Sep 13 16:19:33 2022 ] Mean test loss of 296 batches: 2.197028398513794. +[ Tue Sep 13 16:19:33 2022 ] Top1: 59.31% +[ Tue Sep 13 16:19:33 2022 ] Top5: 89.90% +[ Tue Sep 13 16:19:33 2022 ] Training epoch: 43 +[ Tue Sep 13 16:19:55 2022 ] Batch(33/123) done. Loss: 0.2951 lr:0.100000 +[ Tue Sep 13 16:20:43 2022 ] Eval epoch: 43 +[ Tue Sep 13 16:21:41 2022 ] Mean test loss of 296 batches: 2.805178165435791. +[ Tue Sep 13 16:21:41 2022 ] Top1: 50.85% +[ Tue Sep 13 16:21:41 2022 ] Top5: 85.55% +[ Tue Sep 13 16:21:42 2022 ] Training epoch: 44 +[ Tue Sep 13 16:21:50 2022 ] Batch(10/123) done. Loss: 0.2585 lr:0.100000 +[ Tue Sep 13 16:22:44 2022 ] Batch(110/123) done. Loss: 0.3660 lr:0.100000 +[ Tue Sep 13 16:22:51 2022 ] Eval epoch: 44 +[ Tue Sep 13 16:23:49 2022 ] Mean test loss of 296 batches: 2.149245500564575. +[ Tue Sep 13 16:23:49 2022 ] Top1: 58.36% +[ Tue Sep 13 16:23:49 2022 ] Top5: 90.34% +[ Tue Sep 13 16:23:49 2022 ] Training epoch: 45 +[ Tue Sep 13 16:24:39 2022 ] Batch(87/123) done. Loss: 0.2961 lr:0.100000 +[ Tue Sep 13 16:24:58 2022 ] Eval epoch: 45 +[ Tue Sep 13 16:25:57 2022 ] Mean test loss of 296 batches: 1.7084667682647705. +[ Tue Sep 13 16:25:57 2022 ] Top1: 62.39% +[ Tue Sep 13 16:25:57 2022 ] Top5: 91.90% +[ Tue Sep 13 16:25:57 2022 ] Training epoch: 46 +[ Tue Sep 13 16:26:35 2022 ] Batch(64/123) done. Loss: 0.2071 lr:0.100000 +[ Tue Sep 13 16:27:06 2022 ] Eval epoch: 46 +[ Tue Sep 13 16:28:05 2022 ] Mean test loss of 296 batches: 3.5392019748687744. +[ Tue Sep 13 16:28:05 2022 ] Top1: 46.83% +[ Tue Sep 13 16:28:05 2022 ] Top5: 83.25% +[ Tue Sep 13 16:28:05 2022 ] Training epoch: 47 +[ Tue Sep 13 16:28:31 2022 ] Batch(41/123) done. Loss: 0.1576 lr:0.100000 +[ Tue Sep 13 16:29:14 2022 ] Eval epoch: 47 +[ Tue Sep 13 16:30:13 2022 ] Mean test loss of 296 batches: 2.4092211723327637. +[ Tue Sep 13 16:30:13 2022 ] Top1: 56.02% +[ Tue Sep 13 16:30:13 2022 ] Top5: 86.79% +[ Tue Sep 13 16:30:13 2022 ] Training epoch: 48 +[ Tue Sep 13 16:30:26 2022 ] Batch(18/123) done. Loss: 0.1889 lr:0.100000 +[ Tue Sep 13 16:31:20 2022 ] Batch(118/123) done. Loss: 0.2551 lr:0.100000 +[ Tue Sep 13 16:31:22 2022 ] Eval epoch: 48 +[ Tue Sep 13 16:32:21 2022 ] Mean test loss of 296 batches: 3.3240580558776855. +[ Tue Sep 13 16:32:21 2022 ] Top1: 49.16% +[ Tue Sep 13 16:32:21 2022 ] Top5: 82.17% +[ Tue Sep 13 16:32:21 2022 ] Training epoch: 49 +[ Tue Sep 13 16:33:15 2022 ] Batch(95/123) done. Loss: 0.1095 lr:0.100000 +[ Tue Sep 13 16:33:30 2022 ] Eval epoch: 49 +[ Tue Sep 13 16:34:29 2022 ] Mean test loss of 296 batches: 1.8901573419570923. +[ Tue Sep 13 16:34:29 2022 ] Top1: 62.89% +[ Tue Sep 13 16:34:29 2022 ] Top5: 91.88% +[ Tue Sep 13 16:34:29 2022 ] Training epoch: 50 +[ Tue Sep 13 16:35:11 2022 ] Batch(72/123) done. Loss: 0.1346 lr:0.100000 +[ Tue Sep 13 16:35:38 2022 ] Eval epoch: 50 +[ Tue Sep 13 16:36:37 2022 ] Mean test loss of 296 batches: 2.3353521823883057. +[ Tue Sep 13 16:36:37 2022 ] Top1: 58.21% +[ Tue Sep 13 16:36:37 2022 ] Top5: 88.60% +[ Tue Sep 13 16:36:37 2022 ] Training epoch: 51 +[ Tue Sep 13 16:37:07 2022 ] Batch(49/123) done. Loss: 0.2752 lr:0.100000 +[ Tue Sep 13 16:37:47 2022 ] Eval epoch: 51 +[ Tue Sep 13 16:38:45 2022 ] Mean test loss of 296 batches: 3.1962854862213135. +[ Tue Sep 13 16:38:45 2022 ] Top1: 45.80% +[ Tue Sep 13 16:38:46 2022 ] Top5: 81.75% +[ Tue Sep 13 16:38:46 2022 ] Training epoch: 52 +[ Tue Sep 13 16:39:03 2022 ] Batch(26/123) done. Loss: 0.4199 lr:0.100000 +[ Tue Sep 13 16:39:55 2022 ] Eval epoch: 52 +[ Tue Sep 13 16:40:53 2022 ] Mean test loss of 296 batches: 3.1642954349517822. +[ Tue Sep 13 16:40:54 2022 ] Top1: 48.18% +[ Tue Sep 13 16:40:54 2022 ] Top5: 81.95% +[ Tue Sep 13 16:40:54 2022 ] Training epoch: 53 +[ Tue Sep 13 16:40:59 2022 ] Batch(3/123) done. Loss: 0.1184 lr:0.100000 +[ Tue Sep 13 16:41:52 2022 ] Batch(103/123) done. Loss: 0.4466 lr:0.100000 +[ Tue Sep 13 16:42:03 2022 ] Eval epoch: 53 +[ Tue Sep 13 16:43:01 2022 ] Mean test loss of 296 batches: 2.8556981086730957. +[ Tue Sep 13 16:43:02 2022 ] Top1: 53.37% +[ Tue Sep 13 16:43:02 2022 ] Top5: 87.20% +[ Tue Sep 13 16:43:02 2022 ] Training epoch: 54 +[ Tue Sep 13 16:43:48 2022 ] Batch(80/123) done. Loss: 0.1332 lr:0.100000 +[ Tue Sep 13 16:44:11 2022 ] Eval epoch: 54 +[ Tue Sep 13 16:45:09 2022 ] Mean test loss of 296 batches: 2.8173575401306152. +[ Tue Sep 13 16:45:09 2022 ] Top1: 53.95% +[ Tue Sep 13 16:45:09 2022 ] Top5: 85.52% +[ Tue Sep 13 16:45:10 2022 ] Training epoch: 55 +[ Tue Sep 13 16:45:44 2022 ] Batch(57/123) done. Loss: 0.3741 lr:0.100000 +[ Tue Sep 13 16:46:19 2022 ] Eval epoch: 55 +[ Tue Sep 13 16:47:17 2022 ] Mean test loss of 296 batches: 2.326626777648926. +[ Tue Sep 13 16:47:17 2022 ] Top1: 57.03% +[ Tue Sep 13 16:47:17 2022 ] Top5: 88.66% +[ Tue Sep 13 16:47:17 2022 ] Training epoch: 56 +[ Tue Sep 13 16:47:39 2022 ] Batch(34/123) done. Loss: 0.1752 lr:0.100000 +[ Tue Sep 13 16:48:27 2022 ] Eval epoch: 56 +[ Tue Sep 13 16:49:25 2022 ] Mean test loss of 296 batches: 2.2852604389190674. +[ Tue Sep 13 16:49:25 2022 ] Top1: 58.14% +[ Tue Sep 13 16:49:25 2022 ] Top5: 90.38% +[ Tue Sep 13 16:49:26 2022 ] Training epoch: 57 +[ Tue Sep 13 16:49:35 2022 ] Batch(11/123) done. Loss: 0.1770 lr:0.100000 +[ Tue Sep 13 16:50:29 2022 ] Batch(111/123) done. Loss: 0.1583 lr:0.100000 +[ Tue Sep 13 16:50:35 2022 ] Eval epoch: 57 +[ Tue Sep 13 16:51:34 2022 ] Mean test loss of 296 batches: 3.2864108085632324. +[ Tue Sep 13 16:51:34 2022 ] Top1: 50.66% +[ Tue Sep 13 16:51:34 2022 ] Top5: 84.37% +[ Tue Sep 13 16:51:34 2022 ] Training epoch: 58 +[ Tue Sep 13 16:52:25 2022 ] Batch(88/123) done. Loss: 0.3048 lr:0.100000 +[ Tue Sep 13 16:52:43 2022 ] Eval epoch: 58 +[ Tue Sep 13 16:53:42 2022 ] Mean test loss of 296 batches: 2.21237850189209. +[ Tue Sep 13 16:53:42 2022 ] Top1: 59.33% +[ Tue Sep 13 16:53:42 2022 ] Top5: 89.86% +[ Tue Sep 13 16:53:42 2022 ] Training epoch: 59 +[ Tue Sep 13 16:54:20 2022 ] Batch(65/123) done. Loss: 0.2601 lr:0.100000 +[ Tue Sep 13 16:54:51 2022 ] Eval epoch: 59 +[ Tue Sep 13 16:55:50 2022 ] Mean test loss of 296 batches: 2.5483250617980957. +[ Tue Sep 13 16:55:50 2022 ] Top1: 55.66% +[ Tue Sep 13 16:55:50 2022 ] Top5: 87.85% +[ Tue Sep 13 16:55:50 2022 ] Training epoch: 60 +[ Tue Sep 13 16:56:16 2022 ] Batch(42/123) done. Loss: 0.1448 lr:0.100000 +[ Tue Sep 13 16:56:59 2022 ] Eval epoch: 60 +[ Tue Sep 13 16:57:58 2022 ] Mean test loss of 296 batches: 2.267015218734741. +[ Tue Sep 13 16:57:58 2022 ] Top1: 57.59% +[ Tue Sep 13 16:57:58 2022 ] Top5: 87.88% +[ Tue Sep 13 16:57:58 2022 ] Training epoch: 61 +[ Tue Sep 13 16:58:11 2022 ] Batch(19/123) done. Loss: 0.3712 lr:0.010000 +[ Tue Sep 13 16:59:05 2022 ] Batch(119/123) done. Loss: 0.0610 lr:0.010000 +[ Tue Sep 13 16:59:07 2022 ] Eval epoch: 61 +[ Tue Sep 13 17:00:06 2022 ] Mean test loss of 296 batches: 1.666585087776184. +[ Tue Sep 13 17:00:06 2022 ] Top1: 67.50% +[ Tue Sep 13 17:00:06 2022 ] Top5: 93.86% +[ Tue Sep 13 17:00:06 2022 ] Training epoch: 62 +[ Tue Sep 13 17:01:01 2022 ] Batch(96/123) done. Loss: 0.0545 lr:0.010000 +[ Tue Sep 13 17:01:15 2022 ] Eval epoch: 62 +[ Tue Sep 13 17:02:14 2022 ] Mean test loss of 296 batches: 1.6533176898956299. +[ Tue Sep 13 17:02:14 2022 ] Top1: 68.49% +[ Tue Sep 13 17:02:14 2022 ] Top5: 94.01% +[ Tue Sep 13 17:02:15 2022 ] Training epoch: 63 +[ Tue Sep 13 17:02:57 2022 ] Batch(73/123) done. Loss: 0.0578 lr:0.010000 +[ Tue Sep 13 17:03:24 2022 ] Eval epoch: 63 +[ Tue Sep 13 17:04:23 2022 ] Mean test loss of 296 batches: 1.965466022491455. +[ Tue Sep 13 17:04:23 2022 ] Top1: 64.73% +[ Tue Sep 13 17:04:23 2022 ] Top5: 91.75% +[ Tue Sep 13 17:04:23 2022 ] Training epoch: 64 +[ Tue Sep 13 17:04:53 2022 ] Batch(50/123) done. Loss: 0.0747 lr:0.010000 +[ Tue Sep 13 17:05:32 2022 ] Eval epoch: 64 +[ Tue Sep 13 17:06:31 2022 ] Mean test loss of 296 batches: 2.0580642223358154. +[ Tue Sep 13 17:06:31 2022 ] Top1: 63.19% +[ Tue Sep 13 17:06:31 2022 ] Top5: 91.49% +[ Tue Sep 13 17:06:31 2022 ] Training epoch: 65 +[ Tue Sep 13 17:06:49 2022 ] Batch(27/123) done. Loss: 0.0357 lr:0.010000 +[ Tue Sep 13 17:07:40 2022 ] Eval epoch: 65 +[ Tue Sep 13 17:08:39 2022 ] Mean test loss of 296 batches: 2.047511339187622. +[ Tue Sep 13 17:08:39 2022 ] Top1: 63.32% +[ Tue Sep 13 17:08:39 2022 ] Top5: 91.88% +[ Tue Sep 13 17:08:40 2022 ] Training epoch: 66 +[ Tue Sep 13 17:08:45 2022 ] Batch(4/123) done. Loss: 0.0379 lr:0.010000 +[ Tue Sep 13 17:09:39 2022 ] Batch(104/123) done. Loss: 0.0641 lr:0.010000 +[ Tue Sep 13 17:09:49 2022 ] Eval epoch: 66 +[ Tue Sep 13 17:10:48 2022 ] Mean test loss of 296 batches: 2.043724298477173. +[ Tue Sep 13 17:10:48 2022 ] Top1: 63.98% +[ Tue Sep 13 17:10:48 2022 ] Top5: 92.03% +[ Tue Sep 13 17:10:48 2022 ] Training epoch: 67 +[ Tue Sep 13 17:11:35 2022 ] Batch(81/123) done. Loss: 0.0269 lr:0.010000 +[ Tue Sep 13 17:11:57 2022 ] Eval epoch: 67 +[ Tue Sep 13 17:12:56 2022 ] Mean test loss of 296 batches: 1.7641881704330444. +[ Tue Sep 13 17:12:56 2022 ] Top1: 68.41% +[ Tue Sep 13 17:12:56 2022 ] Top5: 93.86% +[ Tue Sep 13 17:12:56 2022 ] Training epoch: 68 +[ Tue Sep 13 17:13:31 2022 ] Batch(58/123) done. Loss: 0.0304 lr:0.010000 +[ Tue Sep 13 17:14:05 2022 ] Eval epoch: 68 +[ Tue Sep 13 17:15:04 2022 ] Mean test loss of 296 batches: 1.8612645864486694. +[ Tue Sep 13 17:15:04 2022 ] Top1: 66.75% +[ Tue Sep 13 17:15:04 2022 ] Top5: 93.31% +[ Tue Sep 13 17:15:04 2022 ] Training epoch: 69 +[ Tue Sep 13 17:15:26 2022 ] Batch(35/123) done. Loss: 0.0256 lr:0.010000 +[ Tue Sep 13 17:16:13 2022 ] Eval epoch: 69 +[ Tue Sep 13 17:17:12 2022 ] Mean test loss of 296 batches: 1.8184658288955688. +[ Tue Sep 13 17:17:12 2022 ] Top1: 67.85% +[ Tue Sep 13 17:17:12 2022 ] Top5: 93.72% +[ Tue Sep 13 17:17:12 2022 ] Training epoch: 70 +[ Tue Sep 13 17:17:22 2022 ] Batch(12/123) done. Loss: 0.0157 lr:0.010000 +[ Tue Sep 13 17:18:16 2022 ] Batch(112/123) done. Loss: 0.0409 lr:0.010000 +[ Tue Sep 13 17:18:21 2022 ] Eval epoch: 70 +[ Tue Sep 13 17:19:20 2022 ] Mean test loss of 296 batches: 1.7241337299346924. +[ Tue Sep 13 17:19:20 2022 ] Top1: 69.37% +[ Tue Sep 13 17:19:20 2022 ] Top5: 94.27% +[ Tue Sep 13 17:19:20 2022 ] Training epoch: 71 +[ Tue Sep 13 17:20:12 2022 ] Batch(89/123) done. Loss: 0.0158 lr:0.010000 +[ Tue Sep 13 17:20:30 2022 ] Eval epoch: 71 +[ Tue Sep 13 17:21:28 2022 ] Mean test loss of 296 batches: 1.8151830434799194. +[ Tue Sep 13 17:21:28 2022 ] Top1: 67.46% +[ Tue Sep 13 17:21:28 2022 ] Top5: 93.34% +[ Tue Sep 13 17:21:28 2022 ] Training epoch: 72 +[ Tue Sep 13 17:22:07 2022 ] Batch(66/123) done. Loss: 0.0179 lr:0.010000 +[ Tue Sep 13 17:22:37 2022 ] Eval epoch: 72 +[ Tue Sep 13 17:23:36 2022 ] Mean test loss of 296 batches: 1.7506524324417114. +[ Tue Sep 13 17:23:36 2022 ] Top1: 69.20% +[ Tue Sep 13 17:23:36 2022 ] Top5: 94.11% +[ Tue Sep 13 17:23:36 2022 ] Training epoch: 73 +[ Tue Sep 13 17:24:03 2022 ] Batch(43/123) done. Loss: 0.1236 lr:0.010000 +[ Tue Sep 13 17:24:45 2022 ] Eval epoch: 73 +[ Tue Sep 13 17:25:44 2022 ] Mean test loss of 296 batches: 1.7212629318237305. +[ Tue Sep 13 17:25:44 2022 ] Top1: 69.70% +[ Tue Sep 13 17:25:44 2022 ] Top5: 94.04% +[ Tue Sep 13 17:25:44 2022 ] Training epoch: 74 +[ Tue Sep 13 17:25:58 2022 ] Batch(20/123) done. Loss: 0.0304 lr:0.010000 +[ Tue Sep 13 17:26:52 2022 ] Batch(120/123) done. Loss: 0.0157 lr:0.010000 +[ Tue Sep 13 17:26:53 2022 ] Eval epoch: 74 +[ Tue Sep 13 17:27:52 2022 ] Mean test loss of 296 batches: 1.8251532316207886. +[ Tue Sep 13 17:27:52 2022 ] Top1: 68.33% +[ Tue Sep 13 17:27:52 2022 ] Top5: 93.77% +[ Tue Sep 13 17:27:52 2022 ] Training epoch: 75 +[ Tue Sep 13 17:28:48 2022 ] Batch(97/123) done. Loss: 0.0101 lr:0.010000 +[ Tue Sep 13 17:29:02 2022 ] Eval epoch: 75 +[ Tue Sep 13 17:30:01 2022 ] Mean test loss of 296 batches: 2.27018666267395. +[ Tue Sep 13 17:30:01 2022 ] Top1: 63.27% +[ Tue Sep 13 17:30:01 2022 ] Top5: 90.81% +[ Tue Sep 13 17:30:01 2022 ] Training epoch: 76 +[ Tue Sep 13 17:30:44 2022 ] Batch(74/123) done. Loss: 0.0240 lr:0.010000 +[ Tue Sep 13 17:31:10 2022 ] Eval epoch: 76 +[ Tue Sep 13 17:32:09 2022 ] Mean test loss of 296 batches: 1.898371934890747. +[ Tue Sep 13 17:32:09 2022 ] Top1: 67.44% +[ Tue Sep 13 17:32:09 2022 ] Top5: 93.29% +[ Tue Sep 13 17:32:09 2022 ] Training epoch: 77 +[ Tue Sep 13 17:32:40 2022 ] Batch(51/123) done. Loss: 0.2338 lr:0.010000 +[ Tue Sep 13 17:33:18 2022 ] Eval epoch: 77 +[ Tue Sep 13 17:34:17 2022 ] Mean test loss of 296 batches: 1.768467664718628. +[ Tue Sep 13 17:34:18 2022 ] Top1: 69.39% +[ Tue Sep 13 17:34:18 2022 ] Top5: 94.13% +[ Tue Sep 13 17:34:18 2022 ] Training epoch: 78 +[ Tue Sep 13 17:34:36 2022 ] Batch(28/123) done. Loss: 0.0140 lr:0.010000 +[ Tue Sep 13 17:35:27 2022 ] Eval epoch: 78 +[ Tue Sep 13 17:36:26 2022 ] Mean test loss of 296 batches: 1.8052388429641724. +[ Tue Sep 13 17:36:26 2022 ] Top1: 68.85% +[ Tue Sep 13 17:36:26 2022 ] Top5: 93.91% +[ Tue Sep 13 17:36:26 2022 ] Training epoch: 79 +[ Tue Sep 13 17:36:32 2022 ] Batch(5/123) done. Loss: 0.0368 lr:0.010000 +[ Tue Sep 13 17:37:26 2022 ] Batch(105/123) done. Loss: 0.0163 lr:0.010000 +[ Tue Sep 13 17:37:35 2022 ] Eval epoch: 79 +[ Tue Sep 13 17:38:34 2022 ] Mean test loss of 296 batches: 1.7866765260696411. +[ Tue Sep 13 17:38:34 2022 ] Top1: 69.24% +[ Tue Sep 13 17:38:34 2022 ] Top5: 93.98% +[ Tue Sep 13 17:38:34 2022 ] Training epoch: 80 +[ Tue Sep 13 17:39:21 2022 ] Batch(82/123) done. Loss: 0.0450 lr:0.010000 +[ Tue Sep 13 17:39:43 2022 ] Eval epoch: 80 +[ Tue Sep 13 17:40:42 2022 ] Mean test loss of 296 batches: 1.9279972314834595. +[ Tue Sep 13 17:40:42 2022 ] Top1: 67.52% +[ Tue Sep 13 17:40:42 2022 ] Top5: 93.20% +[ Tue Sep 13 17:40:42 2022 ] Training epoch: 81 +[ Tue Sep 13 17:41:17 2022 ] Batch(59/123) done. Loss: 0.0111 lr:0.001000 +[ Tue Sep 13 17:41:51 2022 ] Eval epoch: 81 +[ Tue Sep 13 17:42:50 2022 ] Mean test loss of 296 batches: 1.8086364269256592. +[ Tue Sep 13 17:42:50 2022 ] Top1: 69.45% +[ Tue Sep 13 17:42:50 2022 ] Top5: 94.11% +[ Tue Sep 13 17:42:50 2022 ] Training epoch: 82 +[ Tue Sep 13 17:43:13 2022 ] Batch(36/123) done. Loss: 0.0173 lr:0.001000 +[ Tue Sep 13 17:43:59 2022 ] Eval epoch: 82 +[ Tue Sep 13 17:44:58 2022 ] Mean test loss of 296 batches: 1.8303943872451782. +[ Tue Sep 13 17:44:58 2022 ] Top1: 69.04% +[ Tue Sep 13 17:44:58 2022 ] Top5: 94.02% +[ Tue Sep 13 17:44:58 2022 ] Training epoch: 83 +[ Tue Sep 13 17:45:09 2022 ] Batch(13/123) done. Loss: 0.0525 lr:0.001000 +[ Tue Sep 13 17:46:03 2022 ] Batch(113/123) done. Loss: 0.0160 lr:0.001000 +[ Tue Sep 13 17:46:08 2022 ] Eval epoch: 83 +[ Tue Sep 13 17:47:06 2022 ] Mean test loss of 296 batches: 2.1240596771240234. +[ Tue Sep 13 17:47:06 2022 ] Top1: 64.85% +[ Tue Sep 13 17:47:06 2022 ] Top5: 92.37% +[ Tue Sep 13 17:47:07 2022 ] Training epoch: 84 +[ Tue Sep 13 17:47:58 2022 ] Batch(90/123) done. Loss: 0.0380 lr:0.001000 +[ Tue Sep 13 17:48:16 2022 ] Eval epoch: 84 +[ Tue Sep 13 17:49:14 2022 ] Mean test loss of 296 batches: 1.836277961730957. +[ Tue Sep 13 17:49:14 2022 ] Top1: 69.21% +[ Tue Sep 13 17:49:14 2022 ] Top5: 94.08% +[ Tue Sep 13 17:49:14 2022 ] Training epoch: 85 +[ Tue Sep 13 17:49:54 2022 ] Batch(67/123) done. Loss: 0.0201 lr:0.001000 +[ Tue Sep 13 17:50:24 2022 ] Eval epoch: 85 +[ Tue Sep 13 17:51:22 2022 ] Mean test loss of 296 batches: 2.102092981338501. +[ Tue Sep 13 17:51:22 2022 ] Top1: 66.08% +[ Tue Sep 13 17:51:23 2022 ] Top5: 92.30% +[ Tue Sep 13 17:51:23 2022 ] Training epoch: 86 +[ Tue Sep 13 17:51:50 2022 ] Batch(44/123) done. Loss: 0.0107 lr:0.001000 +[ Tue Sep 13 17:52:32 2022 ] Eval epoch: 86 +[ Tue Sep 13 17:53:31 2022 ] Mean test loss of 296 batches: 1.7919987440109253. +[ Tue Sep 13 17:53:31 2022 ] Top1: 69.13% +[ Tue Sep 13 17:53:31 2022 ] Top5: 94.11% +[ Tue Sep 13 17:53:31 2022 ] Training epoch: 87 +[ Tue Sep 13 17:53:46 2022 ] Batch(21/123) done. Loss: 0.0281 lr:0.001000 +[ Tue Sep 13 17:54:40 2022 ] Batch(121/123) done. Loss: 0.0197 lr:0.001000 +[ Tue Sep 13 17:54:41 2022 ] Eval epoch: 87 +[ Tue Sep 13 17:55:39 2022 ] Mean test loss of 296 batches: 1.8275395631790161. +[ Tue Sep 13 17:55:39 2022 ] Top1: 68.97% +[ Tue Sep 13 17:55:39 2022 ] Top5: 93.95% +[ Tue Sep 13 17:55:40 2022 ] Training epoch: 88 +[ Tue Sep 13 17:56:36 2022 ] Batch(98/123) done. Loss: 0.0644 lr:0.001000 +[ Tue Sep 13 17:56:49 2022 ] Eval epoch: 88 +[ Tue Sep 13 17:57:48 2022 ] Mean test loss of 296 batches: 1.8480579853057861. +[ Tue Sep 13 17:57:48 2022 ] Top1: 69.17% +[ Tue Sep 13 17:57:48 2022 ] Top5: 93.95% +[ Tue Sep 13 17:57:48 2022 ] Training epoch: 89 +[ Tue Sep 13 17:58:32 2022 ] Batch(75/123) done. Loss: 0.0352 lr:0.001000 +[ Tue Sep 13 17:58:57 2022 ] Eval epoch: 89 +[ Tue Sep 13 17:59:56 2022 ] Mean test loss of 296 batches: 1.9831492900848389. +[ Tue Sep 13 17:59:56 2022 ] Top1: 66.81% +[ Tue Sep 13 17:59:56 2022 ] Top5: 93.16% +[ Tue Sep 13 17:59:56 2022 ] Training epoch: 90 +[ Tue Sep 13 18:00:28 2022 ] Batch(52/123) done. Loss: 0.0110 lr:0.001000 +[ Tue Sep 13 18:01:06 2022 ] Eval epoch: 90 +[ Tue Sep 13 18:02:05 2022 ] Mean test loss of 296 batches: 2.019383668899536. +[ Tue Sep 13 18:02:05 2022 ] Top1: 66.68% +[ Tue Sep 13 18:02:05 2022 ] Top5: 92.75% +[ Tue Sep 13 18:02:05 2022 ] Training epoch: 91 +[ Tue Sep 13 18:02:24 2022 ] Batch(29/123) done. Loss: 0.0462 lr:0.001000 +[ Tue Sep 13 18:03:14 2022 ] Eval epoch: 91 +[ Tue Sep 13 18:04:13 2022 ] Mean test loss of 296 batches: 1.8380054235458374. +[ Tue Sep 13 18:04:13 2022 ] Top1: 69.08% +[ Tue Sep 13 18:04:13 2022 ] Top5: 93.97% +[ Tue Sep 13 18:04:13 2022 ] Training epoch: 92 +[ Tue Sep 13 18:04:20 2022 ] Batch(6/123) done. Loss: 0.0188 lr:0.001000 +[ Tue Sep 13 18:05:13 2022 ] Batch(106/123) done. Loss: 0.0166 lr:0.001000 +[ Tue Sep 13 18:05:22 2022 ] Eval epoch: 92 +[ Tue Sep 13 18:06:21 2022 ] Mean test loss of 296 batches: 1.809983491897583. +[ Tue Sep 13 18:06:21 2022 ] Top1: 69.28% +[ Tue Sep 13 18:06:21 2022 ] Top5: 94.13% +[ Tue Sep 13 18:06:21 2022 ] Training epoch: 93 +[ Tue Sep 13 18:07:09 2022 ] Batch(83/123) done. Loss: 0.0236 lr:0.001000 +[ Tue Sep 13 18:07:30 2022 ] Eval epoch: 93 +[ Tue Sep 13 18:08:29 2022 ] Mean test loss of 296 batches: 1.8779041767120361. +[ Tue Sep 13 18:08:29 2022 ] Top1: 68.76% +[ Tue Sep 13 18:08:30 2022 ] Top5: 93.79% +[ Tue Sep 13 18:08:30 2022 ] Training epoch: 94 +[ Tue Sep 13 18:09:05 2022 ] Batch(60/123) done. Loss: 0.0127 lr:0.001000 +[ Tue Sep 13 18:09:39 2022 ] Eval epoch: 94 +[ Tue Sep 13 18:10:38 2022 ] Mean test loss of 296 batches: 1.895398497581482. +[ Tue Sep 13 18:10:38 2022 ] Top1: 68.33% +[ Tue Sep 13 18:10:38 2022 ] Top5: 93.71% +[ Tue Sep 13 18:10:38 2022 ] Training epoch: 95 +[ Tue Sep 13 18:11:01 2022 ] Batch(37/123) done. Loss: 0.0281 lr:0.001000 +[ Tue Sep 13 18:11:47 2022 ] Eval epoch: 95 +[ Tue Sep 13 18:12:45 2022 ] Mean test loss of 296 batches: 1.8090356588363647. +[ Tue Sep 13 18:12:46 2022 ] Top1: 69.23% +[ Tue Sep 13 18:12:46 2022 ] Top5: 93.98% +[ Tue Sep 13 18:12:46 2022 ] Training epoch: 96 +[ Tue Sep 13 18:12:56 2022 ] Batch(14/123) done. Loss: 0.0346 lr:0.001000 +[ Tue Sep 13 18:13:50 2022 ] Batch(114/123) done. Loss: 0.0111 lr:0.001000 +[ Tue Sep 13 18:13:55 2022 ] Eval epoch: 96 +[ Tue Sep 13 18:14:54 2022 ] Mean test loss of 296 batches: 1.85809326171875. +[ Tue Sep 13 18:14:54 2022 ] Top1: 69.14% +[ Tue Sep 13 18:14:54 2022 ] Top5: 93.88% +[ Tue Sep 13 18:14:54 2022 ] Training epoch: 97 +[ Tue Sep 13 18:15:46 2022 ] Batch(91/123) done. Loss: 0.0274 lr:0.001000 +[ Tue Sep 13 18:16:03 2022 ] Eval epoch: 97 +[ Tue Sep 13 18:17:02 2022 ] Mean test loss of 296 batches: 1.843998908996582. +[ Tue Sep 13 18:17:02 2022 ] Top1: 69.16% +[ Tue Sep 13 18:17:02 2022 ] Top5: 93.98% +[ Tue Sep 13 18:17:02 2022 ] Training epoch: 98 +[ Tue Sep 13 18:17:42 2022 ] Batch(68/123) done. Loss: 0.0090 lr:0.001000 +[ Tue Sep 13 18:18:12 2022 ] Eval epoch: 98 +[ Tue Sep 13 18:19:11 2022 ] Mean test loss of 296 batches: 1.807930588722229. +[ Tue Sep 13 18:19:11 2022 ] Top1: 69.45% +[ Tue Sep 13 18:19:11 2022 ] Top5: 94.14% +[ Tue Sep 13 18:19:11 2022 ] Training epoch: 99 +[ Tue Sep 13 18:19:38 2022 ] Batch(45/123) done. Loss: 0.0195 lr:0.001000 +[ Tue Sep 13 18:20:20 2022 ] Eval epoch: 99 +[ Tue Sep 13 18:21:18 2022 ] Mean test loss of 296 batches: 1.8673957586288452. +[ Tue Sep 13 18:21:18 2022 ] Top1: 68.56% +[ Tue Sep 13 18:21:18 2022 ] Top5: 93.84% +[ Tue Sep 13 18:21:19 2022 ] Training epoch: 100 +[ Tue Sep 13 18:21:34 2022 ] Batch(22/123) done. Loss: 0.0654 lr:0.001000 +[ Tue Sep 13 18:22:28 2022 ] Batch(122/123) done. Loss: 0.0594 lr:0.001000 +[ Tue Sep 13 18:22:28 2022 ] Eval epoch: 100 +[ Tue Sep 13 18:23:27 2022 ] Mean test loss of 296 batches: 1.878853678703308. +[ Tue Sep 13 18:23:27 2022 ] Top1: 68.12% +[ Tue Sep 13 18:23:27 2022 ] Top5: 93.71% diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_xview/config.yaml b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_xview/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6c6ae50c1d660de1d62e3c4a8257300c0ae401d8 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_xview/config.yaml @@ -0,0 +1,59 @@ +Experiment_name: ntu_joint_xview +base_lr: 0.1 +batch_size: 64 +config: ./config/nturgbd-cross-view/train_joint.yaml +device: +- 4 +- 5 +eval_interval: 5 +feeder: feeders.feeder.Feeder +groups: 8 +ignore_weights: [] +keep_rate: 0.9 +log_interval: 100 +model: model.decouple_gcn.Model +model_args: + block_size: 41 + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + groups: 8 + num_class: 60 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu_joint_xview +nesterov: true +num_epoch: 100 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_data_joint.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_data_joint.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu_joint_xview diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_xview/decouple_gcn.py b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_xview/decouple_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..6dcce4552ced280fe5b2060df92daebd2452cf7c --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_xview/decouple_gcn.py @@ -0,0 +1,235 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math +from model.dropSke import DropBlock_Ske +from model.dropT import DropBlockT_1d + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + + +def conv_branch_init(conv): + weight = conv.weight + n = weight.size(0) + k1 = weight.size(1) + k2 = weight.size(2) + nn.init.normal(weight, 0, math.sqrt(2. / (n * k1 * k2))) + nn.init.constant(conv.bias, 0) + + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class unit_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1, num_point=25, block_size=41): + super(unit_tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + self.dropS = DropBlock_Ske(num_point=num_point) + self.dropT = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob, A): + x = self.bn(self.conv(x)) + x = self.dropT(self.dropS(x, keep_prob, A), keep_prob) + return x + + +class unit_tcn_skip(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(unit_tcn_skip, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class unit_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, coff_embedding=4, num_subset=3): + super(unit_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.num_point = num_point + self.groups = groups + self.num_subset = num_subset + self.DecoupleA = nn.Parameter(torch.tensor(np.reshape(A.astype(np.float32), [ + 3, 1, num_point, num_point]), dtype=torch.float32, requires_grad=True).repeat(1, groups, 1, 1), requires_grad=True) + + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.bn0 = nn.BatchNorm2d(out_channels * num_subset) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + bn_init(self.bn, 1e-6) + + self.Linear_weight = nn.Parameter(torch.zeros( + in_channels, out_channels * num_subset, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0, math.sqrt( + 0.5 / (out_channels * num_subset))) + + self.Linear_bias = nn.Parameter(torch.zeros( + 1, out_channels * num_subset, 1, 1, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.constant(self.Linear_bias, 1e-6) + + eye_array = [] + for i in range(out_channels): + eye_array.append(torch.eye(num_point)) + self.eyes = nn.Parameter(torch.tensor(torch.stack( + eye_array), requires_grad=False, device='cuda'), requires_grad=False) # [c,25,25] + + def norm(self, A): + b, c, h, w = A.size() + A = A.view(c, self.num_point, self.num_point) + D_list = torch.sum(A, 1).view(c, 1, self.num_point) + D_list_12 = (D_list + 0.001)**(-1) + D_12 = self.eyes * D_list_12 + A = torch.bmm(A, D_12).view(b, c, h, w) + return A + + def forward(self, x0): + learn_A = self.DecoupleA.repeat( + 1, self.out_channels // self.groups, 1, 1) + norm_learn_A = torch.cat([self.norm(learn_A[0:1, ...]), self.norm( + learn_A[1:2, ...]), self.norm(learn_A[2:3, ...])], 0) + + x = torch.einsum( + 'nctw,cd->ndtw', (x0, self.Linear_weight)).contiguous() + x = x + self.Linear_bias + x = self.bn0(x) + + n, kc, t, v = x.size() + x = x.view(n, self.num_subset, kc // self.num_subset, t, v) + x = torch.einsum('nkctv,kcvw->nctw', (x, norm_learn_A)) + + x = self.bn(x) + x += self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, block_size, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = unit_gcn(in_channels, out_channels, A, groups, num_point) + self.tcn1 = unit_tcn(out_channels, out_channels, + stride=stride, num_point=num_point) + self.relu = nn.ReLU() + + self.A = nn.Parameter(torch.tensor(np.sum(np.reshape(A.astype(np.float32), [ + 3, num_point, num_point]), axis=0), dtype=torch.float32, requires_grad=False, device='cuda'), requires_grad=False) + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + + else: + self.residual = unit_tcn_skip( + in_channels, out_channels, kernel_size=1, stride=stride) + self.dropSke = DropBlock_Ske(num_point=num_point) + self.dropT_skip = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob): + x = self.tcn1(self.gcn1(x), keep_prob, self.A) + self.dropT_skip( + self.dropSke(self.residual(x), keep_prob, self.A), keep_prob) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, groups, num_point, + block_size, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l5 = TCN_GCN_unit( + 64, 128, A, groups, num_point, block_size, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l8 = TCN_GCN_unit(128, 256, A, groups, + num_point, block_size, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x, keep_prob=0.9): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute( + 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x, 1.0) + x = self.l2(x, 1.0) + x = self.l3(x, 1.0) + x = self.l4(x, 1.0) + x = self.l5(x, 1.0) + x = self.l6(x, 1.0) + x = self.l7(x, keep_prob) + x = self.l8(x, keep_prob) + x = self.l9(x, keep_prob) + x = self.l10(x, keep_prob) + + # N*M,C,T,V + c_new = x.size(1) + x = x.reshape(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_xview/eval_results/best_acc.pkl b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_xview/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..61fbd90d75c718e52bd4b32cb31bdb201be74a82 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_xview/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ff5736a198dcde62172fb41a86614c4165f83b402247a9270244a6507721ff3 +size 5718404 diff --git a/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_xview/log.txt b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_xview/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..f9e3047aafd7b9ba6ba2d8dd3eea974416a61cd5 --- /dev/null +++ b/ckpt/Others/DC-GCN+ADG/ntu60_xview/ntu_joint_xview/log.txt @@ -0,0 +1,626 @@ +[ Tue Sep 13 14:49:58 2022 ] Parameters: +{'work_dir': './work_dir/ntu_joint_xview', 'model_saved_name': './save_models/ntu_joint_xview', 'Experiment_name': 'ntu_joint_xview', 'config': './config/nturgbd-cross-view/train_joint.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_label.pkl'}, 'model': 'model.decouple_gcn.Model', 'model_args': {'num_class': 60, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'groups': 8, 'block_size': 41, 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80], 'device': [4, 5], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 100, 'weight_decay': 0.0001, 'keep_rate': 0.9, 'groups': 8, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Tue Sep 13 14:49:58 2022 ] Training epoch: 1 +[ Tue Sep 13 14:50:48 2022 ] Batch(99/123) done. Loss: 3.1884 lr:0.100000 +[ Tue Sep 13 14:50:58 2022 ] Eval epoch: 1 +[ Tue Sep 13 14:51:55 2022 ] Mean test loss of 296 batches: 4.3766398429870605. +[ Tue Sep 13 14:51:55 2022 ] Top1: 5.55% +[ Tue Sep 13 14:51:56 2022 ] Top5: 21.01% +[ Tue Sep 13 14:51:56 2022 ] Training epoch: 2 +[ Tue Sep 13 14:52:39 2022 ] Batch(76/123) done. Loss: 2.5260 lr:0.100000 +[ Tue Sep 13 14:53:03 2022 ] Eval epoch: 2 +[ Tue Sep 13 14:54:01 2022 ] Mean test loss of 296 batches: 3.7300477027893066. +[ Tue Sep 13 14:54:01 2022 ] Top1: 11.47% +[ Tue Sep 13 14:54:01 2022 ] Top5: 37.80% +[ Tue Sep 13 14:54:01 2022 ] Training epoch: 3 +[ Tue Sep 13 14:54:33 2022 ] Batch(53/123) done. Loss: 2.5027 lr:0.100000 +[ Tue Sep 13 14:55:10 2022 ] Eval epoch: 3 +[ Tue Sep 13 14:56:08 2022 ] Mean test loss of 296 batches: 3.5141148567199707. +[ Tue Sep 13 14:56:08 2022 ] Top1: 16.17% +[ Tue Sep 13 14:56:08 2022 ] Top5: 45.18% +[ Tue Sep 13 14:56:08 2022 ] Training epoch: 4 +[ Tue Sep 13 14:56:27 2022 ] Batch(30/123) done. Loss: 2.0806 lr:0.100000 +[ Tue Sep 13 14:57:16 2022 ] Eval epoch: 4 +[ Tue Sep 13 14:58:13 2022 ] Mean test loss of 296 batches: 3.117856740951538. +[ Tue Sep 13 14:58:13 2022 ] Top1: 18.34% +[ Tue Sep 13 14:58:13 2022 ] Top5: 53.24% +[ Tue Sep 13 14:58:13 2022 ] Training epoch: 5 +[ Tue Sep 13 14:58:20 2022 ] Batch(7/123) done. Loss: 1.9279 lr:0.100000 +[ Tue Sep 13 14:59:13 2022 ] Batch(107/123) done. Loss: 1.9794 lr:0.100000 +[ Tue Sep 13 14:59:22 2022 ] Eval epoch: 5 +[ Tue Sep 13 15:00:19 2022 ] Mean test loss of 296 batches: 2.858928680419922. +[ Tue Sep 13 15:00:19 2022 ] Top1: 24.23% +[ Tue Sep 13 15:00:19 2022 ] Top5: 61.59% +[ Tue Sep 13 15:00:19 2022 ] Training epoch: 6 +[ Tue Sep 13 15:01:07 2022 ] Batch(84/123) done. Loss: 1.5759 lr:0.100000 +[ Tue Sep 13 15:01:27 2022 ] Eval epoch: 6 +[ Tue Sep 13 15:02:25 2022 ] Mean test loss of 296 batches: 2.667224407196045. +[ Tue Sep 13 15:02:25 2022 ] Top1: 27.09% +[ Tue Sep 13 15:02:25 2022 ] Top5: 65.89% +[ Tue Sep 13 15:02:25 2022 ] Training epoch: 7 +[ Tue Sep 13 15:03:01 2022 ] Batch(61/123) done. Loss: 1.8206 lr:0.100000 +[ Tue Sep 13 15:03:33 2022 ] Eval epoch: 7 +[ Tue Sep 13 15:04:31 2022 ] Mean test loss of 296 batches: 2.482271432876587. +[ Tue Sep 13 15:04:31 2022 ] Top1: 30.85% +[ Tue Sep 13 15:04:31 2022 ] Top5: 68.32% +[ Tue Sep 13 15:04:31 2022 ] Training epoch: 8 +[ Tue Sep 13 15:04:55 2022 ] Batch(38/123) done. Loss: 1.9349 lr:0.100000 +[ Tue Sep 13 15:05:39 2022 ] Eval epoch: 8 +[ Tue Sep 13 15:06:37 2022 ] Mean test loss of 296 batches: 2.896385908126831. +[ Tue Sep 13 15:06:37 2022 ] Top1: 25.35% +[ Tue Sep 13 15:06:37 2022 ] Top5: 66.09% +[ Tue Sep 13 15:06:37 2022 ] Training epoch: 9 +[ Tue Sep 13 15:06:48 2022 ] Batch(15/123) done. Loss: 1.0630 lr:0.100000 +[ Tue Sep 13 15:07:41 2022 ] Batch(115/123) done. Loss: 1.6229 lr:0.100000 +[ Tue Sep 13 15:07:45 2022 ] Eval epoch: 9 +[ Tue Sep 13 15:08:42 2022 ] Mean test loss of 296 batches: 2.4414126873016357. +[ Tue Sep 13 15:08:42 2022 ] Top1: 32.14% +[ Tue Sep 13 15:08:43 2022 ] Top5: 75.02% +[ Tue Sep 13 15:08:43 2022 ] Training epoch: 10 +[ Tue Sep 13 15:09:34 2022 ] Batch(92/123) done. Loss: 1.6600 lr:0.100000 +[ Tue Sep 13 15:09:50 2022 ] Eval epoch: 10 +[ Tue Sep 13 15:10:48 2022 ] Mean test loss of 296 batches: 2.316817045211792. +[ Tue Sep 13 15:10:48 2022 ] Top1: 35.35% +[ Tue Sep 13 15:10:48 2022 ] Top5: 77.60% +[ Tue Sep 13 15:10:48 2022 ] Training epoch: 11 +[ Tue Sep 13 15:11:28 2022 ] Batch(69/123) done. Loss: 1.4171 lr:0.100000 +[ Tue Sep 13 15:11:56 2022 ] Eval epoch: 11 +[ Tue Sep 13 15:12:54 2022 ] Mean test loss of 296 batches: 1.9401909112930298. +[ Tue Sep 13 15:12:54 2022 ] Top1: 42.82% +[ Tue Sep 13 15:12:54 2022 ] Top5: 79.94% +[ Tue Sep 13 15:12:54 2022 ] Training epoch: 12 +[ Tue Sep 13 15:13:22 2022 ] Batch(46/123) done. Loss: 1.6078 lr:0.100000 +[ Tue Sep 13 15:14:02 2022 ] Eval epoch: 12 +[ Tue Sep 13 15:15:00 2022 ] Mean test loss of 296 batches: 1.904836654663086. +[ Tue Sep 13 15:15:00 2022 ] Top1: 42.88% +[ Tue Sep 13 15:15:00 2022 ] Top5: 81.99% +[ Tue Sep 13 15:15:00 2022 ] Training epoch: 13 +[ Tue Sep 13 15:15:15 2022 ] Batch(23/123) done. Loss: 1.0612 lr:0.100000 +[ Tue Sep 13 15:16:08 2022 ] Eval epoch: 13 +[ Tue Sep 13 15:17:05 2022 ] Mean test loss of 296 batches: 1.8985826969146729. +[ Tue Sep 13 15:17:05 2022 ] Top1: 46.00% +[ Tue Sep 13 15:17:05 2022 ] Top5: 81.51% +[ Tue Sep 13 15:17:06 2022 ] Training epoch: 14 +[ Tue Sep 13 15:17:09 2022 ] Batch(0/123) done. Loss: 1.4777 lr:0.100000 +[ Tue Sep 13 15:18:02 2022 ] Batch(100/123) done. Loss: 0.8827 lr:0.100000 +[ Tue Sep 13 15:18:14 2022 ] Eval epoch: 14 +[ Tue Sep 13 15:19:11 2022 ] Mean test loss of 296 batches: 2.1175553798675537. +[ Tue Sep 13 15:19:11 2022 ] Top1: 38.57% +[ Tue Sep 13 15:19:11 2022 ] Top5: 82.37% +[ Tue Sep 13 15:19:11 2022 ] Training epoch: 15 +[ Tue Sep 13 15:19:55 2022 ] Batch(77/123) done. Loss: 1.0710 lr:0.100000 +[ Tue Sep 13 15:20:19 2022 ] Eval epoch: 15 +[ Tue Sep 13 15:21:17 2022 ] Mean test loss of 296 batches: 1.6494616270065308. +[ Tue Sep 13 15:21:17 2022 ] Top1: 50.55% +[ Tue Sep 13 15:21:17 2022 ] Top5: 87.49% +[ Tue Sep 13 15:21:17 2022 ] Training epoch: 16 +[ Tue Sep 13 15:21:49 2022 ] Batch(54/123) done. Loss: 0.5488 lr:0.100000 +[ Tue Sep 13 15:22:25 2022 ] Eval epoch: 16 +[ Tue Sep 13 15:23:23 2022 ] Mean test loss of 296 batches: 1.9867146015167236. +[ Tue Sep 13 15:23:23 2022 ] Top1: 46.45% +[ Tue Sep 13 15:23:23 2022 ] Top5: 80.89% +[ Tue Sep 13 15:23:23 2022 ] Training epoch: 17 +[ Tue Sep 13 15:23:42 2022 ] Batch(31/123) done. Loss: 1.1490 lr:0.100000 +[ Tue Sep 13 15:24:31 2022 ] Eval epoch: 17 +[ Tue Sep 13 15:25:28 2022 ] Mean test loss of 296 batches: 1.5977210998535156. +[ Tue Sep 13 15:25:28 2022 ] Top1: 52.20% +[ Tue Sep 13 15:25:28 2022 ] Top5: 87.51% +[ Tue Sep 13 15:25:28 2022 ] Training epoch: 18 +[ Tue Sep 13 15:25:36 2022 ] Batch(8/123) done. Loss: 1.0305 lr:0.100000 +[ Tue Sep 13 15:26:29 2022 ] Batch(108/123) done. Loss: 0.9323 lr:0.100000 +[ Tue Sep 13 15:26:37 2022 ] Eval epoch: 18 +[ Tue Sep 13 15:27:35 2022 ] Mean test loss of 296 batches: 1.5797696113586426. +[ Tue Sep 13 15:27:35 2022 ] Top1: 55.25% +[ Tue Sep 13 15:27:35 2022 ] Top5: 87.59% +[ Tue Sep 13 15:27:35 2022 ] Training epoch: 19 +[ Tue Sep 13 15:28:23 2022 ] Batch(85/123) done. Loss: 0.7420 lr:0.100000 +[ Tue Sep 13 15:28:43 2022 ] Eval epoch: 19 +[ Tue Sep 13 15:29:41 2022 ] Mean test loss of 296 batches: 1.6598565578460693. +[ Tue Sep 13 15:29:41 2022 ] Top1: 52.79% +[ Tue Sep 13 15:29:41 2022 ] Top5: 87.68% +[ Tue Sep 13 15:29:41 2022 ] Training epoch: 20 +[ Tue Sep 13 15:30:17 2022 ] Batch(62/123) done. Loss: 0.8692 lr:0.100000 +[ Tue Sep 13 15:30:49 2022 ] Eval epoch: 20 +[ Tue Sep 13 15:31:46 2022 ] Mean test loss of 296 batches: 1.6920138597488403. +[ Tue Sep 13 15:31:46 2022 ] Top1: 53.17% +[ Tue Sep 13 15:31:46 2022 ] Top5: 88.18% +[ Tue Sep 13 15:31:47 2022 ] Training epoch: 21 +[ Tue Sep 13 15:32:10 2022 ] Batch(39/123) done. Loss: 1.2199 lr:0.100000 +[ Tue Sep 13 15:32:54 2022 ] Eval epoch: 21 +[ Tue Sep 13 15:33:52 2022 ] Mean test loss of 296 batches: 1.6357831954956055. +[ Tue Sep 13 15:33:52 2022 ] Top1: 55.24% +[ Tue Sep 13 15:33:52 2022 ] Top5: 90.38% +[ Tue Sep 13 15:33:52 2022 ] Training epoch: 22 +[ Tue Sep 13 15:34:04 2022 ] Batch(16/123) done. Loss: 0.6507 lr:0.100000 +[ Tue Sep 13 15:34:57 2022 ] Batch(116/123) done. Loss: 0.8574 lr:0.100000 +[ Tue Sep 13 15:35:00 2022 ] Eval epoch: 22 +[ Tue Sep 13 15:35:58 2022 ] Mean test loss of 296 batches: 1.5388163328170776. +[ Tue Sep 13 15:35:58 2022 ] Top1: 57.19% +[ Tue Sep 13 15:35:58 2022 ] Top5: 89.48% +[ Tue Sep 13 15:35:58 2022 ] Training epoch: 23 +[ Tue Sep 13 15:36:51 2022 ] Batch(93/123) done. Loss: 0.9820 lr:0.100000 +[ Tue Sep 13 15:37:06 2022 ] Eval epoch: 23 +[ Tue Sep 13 15:38:04 2022 ] Mean test loss of 296 batches: 1.4746057987213135. +[ Tue Sep 13 15:38:05 2022 ] Top1: 59.40% +[ Tue Sep 13 15:38:05 2022 ] Top5: 92.52% +[ Tue Sep 13 15:38:05 2022 ] Training epoch: 24 +[ Tue Sep 13 15:38:45 2022 ] Batch(70/123) done. Loss: 0.7119 lr:0.100000 +[ Tue Sep 13 15:39:13 2022 ] Eval epoch: 24 +[ Tue Sep 13 15:40:10 2022 ] Mean test loss of 296 batches: 1.6110554933547974. +[ Tue Sep 13 15:40:10 2022 ] Top1: 56.26% +[ Tue Sep 13 15:40:11 2022 ] Top5: 89.07% +[ Tue Sep 13 15:40:11 2022 ] Training epoch: 25 +[ Tue Sep 13 15:40:39 2022 ] Batch(47/123) done. Loss: 0.5947 lr:0.100000 +[ Tue Sep 13 15:41:18 2022 ] Eval epoch: 25 +[ Tue Sep 13 15:42:16 2022 ] Mean test loss of 296 batches: 1.559248685836792. +[ Tue Sep 13 15:42:16 2022 ] Top1: 57.54% +[ Tue Sep 13 15:42:16 2022 ] Top5: 90.80% +[ Tue Sep 13 15:42:16 2022 ] Training epoch: 26 +[ Tue Sep 13 15:42:32 2022 ] Batch(24/123) done. Loss: 0.9254 lr:0.100000 +[ Tue Sep 13 15:43:24 2022 ] Eval epoch: 26 +[ Tue Sep 13 15:44:22 2022 ] Mean test loss of 296 batches: 1.7099621295928955. +[ Tue Sep 13 15:44:22 2022 ] Top1: 54.86% +[ Tue Sep 13 15:44:22 2022 ] Top5: 90.08% +[ Tue Sep 13 15:44:22 2022 ] Training epoch: 27 +[ Tue Sep 13 15:44:26 2022 ] Batch(1/123) done. Loss: 0.5434 lr:0.100000 +[ Tue Sep 13 15:45:19 2022 ] Batch(101/123) done. Loss: 0.6810 lr:0.100000 +[ Tue Sep 13 15:45:30 2022 ] Eval epoch: 27 +[ Tue Sep 13 15:46:28 2022 ] Mean test loss of 296 batches: 1.5202960968017578. +[ Tue Sep 13 15:46:28 2022 ] Top1: 57.26% +[ Tue Sep 13 15:46:28 2022 ] Top5: 89.31% +[ Tue Sep 13 15:46:28 2022 ] Training epoch: 28 +[ Tue Sep 13 15:47:13 2022 ] Batch(78/123) done. Loss: 0.6278 lr:0.100000 +[ Tue Sep 13 15:47:37 2022 ] Eval epoch: 28 +[ Tue Sep 13 15:48:34 2022 ] Mean test loss of 296 batches: 1.549084186553955. +[ Tue Sep 13 15:48:34 2022 ] Top1: 58.46% +[ Tue Sep 13 15:48:34 2022 ] Top5: 90.65% +[ Tue Sep 13 15:48:34 2022 ] Training epoch: 29 +[ Tue Sep 13 15:49:07 2022 ] Batch(55/123) done. Loss: 0.5749 lr:0.100000 +[ Tue Sep 13 15:49:43 2022 ] Eval epoch: 29 +[ Tue Sep 13 15:50:40 2022 ] Mean test loss of 296 batches: 1.4429047107696533. +[ Tue Sep 13 15:50:40 2022 ] Top1: 61.26% +[ Tue Sep 13 15:50:40 2022 ] Top5: 91.90% +[ Tue Sep 13 15:50:40 2022 ] Training epoch: 30 +[ Tue Sep 13 15:51:00 2022 ] Batch(32/123) done. Loss: 0.7543 lr:0.100000 +[ Tue Sep 13 15:51:48 2022 ] Eval epoch: 30 +[ Tue Sep 13 15:52:46 2022 ] Mean test loss of 296 batches: 1.6172176599502563. +[ Tue Sep 13 15:52:46 2022 ] Top1: 57.52% +[ Tue Sep 13 15:52:46 2022 ] Top5: 89.48% +[ Tue Sep 13 15:52:46 2022 ] Training epoch: 31 +[ Tue Sep 13 15:52:54 2022 ] Batch(9/123) done. Loss: 0.4652 lr:0.100000 +[ Tue Sep 13 15:53:47 2022 ] Batch(109/123) done. Loss: 0.4831 lr:0.100000 +[ Tue Sep 13 15:53:54 2022 ] Eval epoch: 31 +[ Tue Sep 13 15:54:51 2022 ] Mean test loss of 296 batches: 1.422489047050476. +[ Tue Sep 13 15:54:51 2022 ] Top1: 60.91% +[ Tue Sep 13 15:54:51 2022 ] Top5: 91.80% +[ Tue Sep 13 15:54:51 2022 ] Training epoch: 32 +[ Tue Sep 13 15:55:40 2022 ] Batch(86/123) done. Loss: 0.6568 lr:0.100000 +[ Tue Sep 13 15:55:59 2022 ] Eval epoch: 32 +[ Tue Sep 13 15:56:57 2022 ] Mean test loss of 296 batches: 1.5977848768234253. +[ Tue Sep 13 15:56:57 2022 ] Top1: 59.62% +[ Tue Sep 13 15:56:57 2022 ] Top5: 91.45% +[ Tue Sep 13 15:56:57 2022 ] Training epoch: 33 +[ Tue Sep 13 15:57:33 2022 ] Batch(63/123) done. Loss: 0.4892 lr:0.100000 +[ Tue Sep 13 15:58:05 2022 ] Eval epoch: 33 +[ Tue Sep 13 15:59:03 2022 ] Mean test loss of 296 batches: 1.362181544303894. +[ Tue Sep 13 15:59:03 2022 ] Top1: 62.98% +[ Tue Sep 13 15:59:03 2022 ] Top5: 93.10% +[ Tue Sep 13 15:59:03 2022 ] Training epoch: 34 +[ Tue Sep 13 15:59:27 2022 ] Batch(40/123) done. Loss: 0.3139 lr:0.100000 +[ Tue Sep 13 16:00:11 2022 ] Eval epoch: 34 +[ Tue Sep 13 16:01:09 2022 ] Mean test loss of 296 batches: 1.5716958045959473. +[ Tue Sep 13 16:01:09 2022 ] Top1: 58.34% +[ Tue Sep 13 16:01:09 2022 ] Top5: 91.20% +[ Tue Sep 13 16:01:09 2022 ] Training epoch: 35 +[ Tue Sep 13 16:01:21 2022 ] Batch(17/123) done. Loss: 0.4035 lr:0.100000 +[ Tue Sep 13 16:02:14 2022 ] Batch(117/123) done. Loss: 0.5426 lr:0.100000 +[ Tue Sep 13 16:02:17 2022 ] Eval epoch: 35 +[ Tue Sep 13 16:03:15 2022 ] Mean test loss of 296 batches: 1.9306046962738037. +[ Tue Sep 13 16:03:15 2022 ] Top1: 56.88% +[ Tue Sep 13 16:03:15 2022 ] Top5: 89.80% +[ Tue Sep 13 16:03:15 2022 ] Training epoch: 36 +[ Tue Sep 13 16:04:08 2022 ] Batch(94/123) done. Loss: 0.5866 lr:0.100000 +[ Tue Sep 13 16:04:23 2022 ] Eval epoch: 36 +[ Tue Sep 13 16:05:20 2022 ] Mean test loss of 296 batches: 1.621425986289978. +[ Tue Sep 13 16:05:20 2022 ] Top1: 58.30% +[ Tue Sep 13 16:05:21 2022 ] Top5: 90.25% +[ Tue Sep 13 16:05:21 2022 ] Training epoch: 37 +[ Tue Sep 13 16:06:01 2022 ] Batch(71/123) done. Loss: 0.4631 lr:0.100000 +[ Tue Sep 13 16:06:29 2022 ] Eval epoch: 37 +[ Tue Sep 13 16:07:27 2022 ] Mean test loss of 296 batches: 1.6041808128356934. +[ Tue Sep 13 16:07:27 2022 ] Top1: 59.83% +[ Tue Sep 13 16:07:27 2022 ] Top5: 91.42% +[ Tue Sep 13 16:07:27 2022 ] Training epoch: 38 +[ Tue Sep 13 16:07:55 2022 ] Batch(48/123) done. Loss: 0.5514 lr:0.100000 +[ Tue Sep 13 16:08:35 2022 ] Eval epoch: 38 +[ Tue Sep 13 16:09:33 2022 ] Mean test loss of 296 batches: 1.4260668754577637. +[ Tue Sep 13 16:09:33 2022 ] Top1: 62.21% +[ Tue Sep 13 16:09:33 2022 ] Top5: 91.88% +[ Tue Sep 13 16:09:33 2022 ] Training epoch: 39 +[ Tue Sep 13 16:09:49 2022 ] Batch(25/123) done. Loss: 0.4886 lr:0.100000 +[ Tue Sep 13 16:10:41 2022 ] Eval epoch: 39 +[ Tue Sep 13 16:11:39 2022 ] Mean test loss of 296 batches: 1.2637546062469482. +[ Tue Sep 13 16:11:39 2022 ] Top1: 65.39% +[ Tue Sep 13 16:11:39 2022 ] Top5: 93.23% +[ Tue Sep 13 16:11:39 2022 ] Training epoch: 40 +[ Tue Sep 13 16:11:44 2022 ] Batch(2/123) done. Loss: 0.2903 lr:0.100000 +[ Tue Sep 13 16:12:37 2022 ] Batch(102/123) done. Loss: 0.8331 lr:0.100000 +[ Tue Sep 13 16:12:47 2022 ] Eval epoch: 40 +[ Tue Sep 13 16:13:45 2022 ] Mean test loss of 296 batches: 1.3597897291183472. +[ Tue Sep 13 16:13:45 2022 ] Top1: 63.64% +[ Tue Sep 13 16:13:45 2022 ] Top5: 93.05% +[ Tue Sep 13 16:13:45 2022 ] Training epoch: 41 +[ Tue Sep 13 16:14:30 2022 ] Batch(79/123) done. Loss: 0.3577 lr:0.100000 +[ Tue Sep 13 16:14:53 2022 ] Eval epoch: 41 +[ Tue Sep 13 16:15:51 2022 ] Mean test loss of 296 batches: 1.693901777267456. +[ Tue Sep 13 16:15:51 2022 ] Top1: 59.77% +[ Tue Sep 13 16:15:51 2022 ] Top5: 91.34% +[ Tue Sep 13 16:15:51 2022 ] Training epoch: 42 +[ Tue Sep 13 16:16:24 2022 ] Batch(56/123) done. Loss: 0.3879 lr:0.100000 +[ Tue Sep 13 16:16:59 2022 ] Eval epoch: 42 +[ Tue Sep 13 16:17:57 2022 ] Mean test loss of 296 batches: 1.3235911130905151. +[ Tue Sep 13 16:17:57 2022 ] Top1: 65.72% +[ Tue Sep 13 16:17:57 2022 ] Top5: 93.21% +[ Tue Sep 13 16:17:57 2022 ] Training epoch: 43 +[ Tue Sep 13 16:18:18 2022 ] Batch(33/123) done. Loss: 0.4165 lr:0.100000 +[ Tue Sep 13 16:19:06 2022 ] Eval epoch: 43 +[ Tue Sep 13 16:20:03 2022 ] Mean test loss of 296 batches: 1.346187710762024. +[ Tue Sep 13 16:20:03 2022 ] Top1: 66.73% +[ Tue Sep 13 16:20:03 2022 ] Top5: 93.70% +[ Tue Sep 13 16:20:03 2022 ] Training epoch: 44 +[ Tue Sep 13 16:20:12 2022 ] Batch(10/123) done. Loss: 0.3119 lr:0.100000 +[ Tue Sep 13 16:21:05 2022 ] Batch(110/123) done. Loss: 0.4660 lr:0.100000 +[ Tue Sep 13 16:21:11 2022 ] Eval epoch: 44 +[ Tue Sep 13 16:22:09 2022 ] Mean test loss of 296 batches: 1.4843089580535889. +[ Tue Sep 13 16:22:09 2022 ] Top1: 65.13% +[ Tue Sep 13 16:22:09 2022 ] Top5: 93.55% +[ Tue Sep 13 16:22:09 2022 ] Training epoch: 45 +[ Tue Sep 13 16:22:58 2022 ] Batch(87/123) done. Loss: 0.4042 lr:0.100000 +[ Tue Sep 13 16:23:17 2022 ] Eval epoch: 45 +[ Tue Sep 13 16:24:15 2022 ] Mean test loss of 296 batches: 1.552656888961792. +[ Tue Sep 13 16:24:15 2022 ] Top1: 61.40% +[ Tue Sep 13 16:24:15 2022 ] Top5: 91.85% +[ Tue Sep 13 16:24:15 2022 ] Training epoch: 46 +[ Tue Sep 13 16:24:52 2022 ] Batch(64/123) done. Loss: 0.3977 lr:0.100000 +[ Tue Sep 13 16:25:23 2022 ] Eval epoch: 46 +[ Tue Sep 13 16:26:21 2022 ] Mean test loss of 296 batches: 1.632838487625122. +[ Tue Sep 13 16:26:21 2022 ] Top1: 61.35% +[ Tue Sep 13 16:26:21 2022 ] Top5: 91.91% +[ Tue Sep 13 16:26:21 2022 ] Training epoch: 47 +[ Tue Sep 13 16:26:46 2022 ] Batch(41/123) done. Loss: 0.3813 lr:0.100000 +[ Tue Sep 13 16:27:29 2022 ] Eval epoch: 47 +[ Tue Sep 13 16:28:27 2022 ] Mean test loss of 296 batches: 1.4422348737716675. +[ Tue Sep 13 16:28:28 2022 ] Top1: 64.92% +[ Tue Sep 13 16:28:28 2022 ] Top5: 92.73% +[ Tue Sep 13 16:28:28 2022 ] Training epoch: 48 +[ Tue Sep 13 16:28:40 2022 ] Batch(18/123) done. Loss: 0.1751 lr:0.100000 +[ Tue Sep 13 16:29:33 2022 ] Batch(118/123) done. Loss: 0.4498 lr:0.100000 +[ Tue Sep 13 16:29:36 2022 ] Eval epoch: 48 +[ Tue Sep 13 16:30:33 2022 ] Mean test loss of 296 batches: 1.610722303390503. +[ Tue Sep 13 16:30:33 2022 ] Top1: 63.75% +[ Tue Sep 13 16:30:33 2022 ] Top5: 92.87% +[ Tue Sep 13 16:30:33 2022 ] Training epoch: 49 +[ Tue Sep 13 16:31:27 2022 ] Batch(95/123) done. Loss: 0.4541 lr:0.100000 +[ Tue Sep 13 16:31:42 2022 ] Eval epoch: 49 +[ Tue Sep 13 16:32:39 2022 ] Mean test loss of 296 batches: 1.5844802856445312. +[ Tue Sep 13 16:32:39 2022 ] Top1: 63.33% +[ Tue Sep 13 16:32:39 2022 ] Top5: 91.78% +[ Tue Sep 13 16:32:39 2022 ] Training epoch: 50 +[ Tue Sep 13 16:33:21 2022 ] Batch(72/123) done. Loss: 0.3608 lr:0.100000 +[ Tue Sep 13 16:33:48 2022 ] Eval epoch: 50 +[ Tue Sep 13 16:34:45 2022 ] Mean test loss of 296 batches: 1.6047006845474243. +[ Tue Sep 13 16:34:45 2022 ] Top1: 62.63% +[ Tue Sep 13 16:34:45 2022 ] Top5: 92.83% +[ Tue Sep 13 16:34:45 2022 ] Training epoch: 51 +[ Tue Sep 13 16:35:14 2022 ] Batch(49/123) done. Loss: 0.4394 lr:0.100000 +[ Tue Sep 13 16:35:53 2022 ] Eval epoch: 51 +[ Tue Sep 13 16:36:51 2022 ] Mean test loss of 296 batches: 1.3082096576690674. +[ Tue Sep 13 16:36:51 2022 ] Top1: 67.72% +[ Tue Sep 13 16:36:51 2022 ] Top5: 93.76% +[ Tue Sep 13 16:36:51 2022 ] Training epoch: 52 +[ Tue Sep 13 16:37:08 2022 ] Batch(26/123) done. Loss: 0.2040 lr:0.100000 +[ Tue Sep 13 16:37:59 2022 ] Eval epoch: 52 +[ Tue Sep 13 16:38:57 2022 ] Mean test loss of 296 batches: 1.4719151258468628. +[ Tue Sep 13 16:38:57 2022 ] Top1: 66.08% +[ Tue Sep 13 16:38:57 2022 ] Top5: 93.39% +[ Tue Sep 13 16:38:57 2022 ] Training epoch: 53 +[ Tue Sep 13 16:39:02 2022 ] Batch(3/123) done. Loss: 0.1306 lr:0.100000 +[ Tue Sep 13 16:39:55 2022 ] Batch(103/123) done. Loss: 0.4051 lr:0.100000 +[ Tue Sep 13 16:40:05 2022 ] Eval epoch: 53 +[ Tue Sep 13 16:41:03 2022 ] Mean test loss of 296 batches: 1.7468684911727905. +[ Tue Sep 13 16:41:03 2022 ] Top1: 63.28% +[ Tue Sep 13 16:41:03 2022 ] Top5: 91.85% +[ Tue Sep 13 16:41:03 2022 ] Training epoch: 54 +[ Tue Sep 13 16:41:49 2022 ] Batch(80/123) done. Loss: 0.2765 lr:0.100000 +[ Tue Sep 13 16:42:12 2022 ] Eval epoch: 54 +[ Tue Sep 13 16:43:09 2022 ] Mean test loss of 296 batches: 3.891664981842041. +[ Tue Sep 13 16:43:09 2022 ] Top1: 44.03% +[ Tue Sep 13 16:43:09 2022 ] Top5: 77.75% +[ Tue Sep 13 16:43:09 2022 ] Training epoch: 55 +[ Tue Sep 13 16:43:43 2022 ] Batch(57/123) done. Loss: 0.3309 lr:0.100000 +[ Tue Sep 13 16:44:17 2022 ] Eval epoch: 55 +[ Tue Sep 13 16:45:15 2022 ] Mean test loss of 296 batches: 1.3102078437805176. +[ Tue Sep 13 16:45:15 2022 ] Top1: 68.13% +[ Tue Sep 13 16:45:15 2022 ] Top5: 93.68% +[ Tue Sep 13 16:45:15 2022 ] Training epoch: 56 +[ Tue Sep 13 16:45:36 2022 ] Batch(34/123) done. Loss: 0.2110 lr:0.100000 +[ Tue Sep 13 16:46:23 2022 ] Eval epoch: 56 +[ Tue Sep 13 16:47:21 2022 ] Mean test loss of 296 batches: 1.5310816764831543. +[ Tue Sep 13 16:47:21 2022 ] Top1: 66.83% +[ Tue Sep 13 16:47:21 2022 ] Top5: 93.13% +[ Tue Sep 13 16:47:21 2022 ] Training epoch: 57 +[ Tue Sep 13 16:47:30 2022 ] Batch(11/123) done. Loss: 0.2695 lr:0.100000 +[ Tue Sep 13 16:48:23 2022 ] Batch(111/123) done. Loss: 0.6230 lr:0.100000 +[ Tue Sep 13 16:48:29 2022 ] Eval epoch: 57 +[ Tue Sep 13 16:49:26 2022 ] Mean test loss of 296 batches: 1.5271742343902588. +[ Tue Sep 13 16:49:26 2022 ] Top1: 63.74% +[ Tue Sep 13 16:49:27 2022 ] Top5: 92.57% +[ Tue Sep 13 16:49:27 2022 ] Training epoch: 58 +[ Tue Sep 13 16:50:16 2022 ] Batch(88/123) done. Loss: 0.2602 lr:0.100000 +[ Tue Sep 13 16:50:34 2022 ] Eval epoch: 58 +[ Tue Sep 13 16:51:32 2022 ] Mean test loss of 296 batches: 1.7830654382705688. +[ Tue Sep 13 16:51:32 2022 ] Top1: 64.37% +[ Tue Sep 13 16:51:32 2022 ] Top5: 92.28% +[ Tue Sep 13 16:51:32 2022 ] Training epoch: 59 +[ Tue Sep 13 16:52:10 2022 ] Batch(65/123) done. Loss: 0.3332 lr:0.100000 +[ Tue Sep 13 16:52:41 2022 ] Eval epoch: 59 +[ Tue Sep 13 16:53:38 2022 ] Mean test loss of 296 batches: 1.5311118364334106. +[ Tue Sep 13 16:53:39 2022 ] Top1: 63.96% +[ Tue Sep 13 16:53:39 2022 ] Top5: 92.34% +[ Tue Sep 13 16:53:39 2022 ] Training epoch: 60 +[ Tue Sep 13 16:54:04 2022 ] Batch(42/123) done. Loss: 0.2504 lr:0.100000 +[ Tue Sep 13 16:54:47 2022 ] Eval epoch: 60 +[ Tue Sep 13 16:55:44 2022 ] Mean test loss of 296 batches: 1.9407597780227661. +[ Tue Sep 13 16:55:44 2022 ] Top1: 63.88% +[ Tue Sep 13 16:55:44 2022 ] Top5: 92.01% +[ Tue Sep 13 16:55:44 2022 ] Training epoch: 61 +[ Tue Sep 13 16:55:57 2022 ] Batch(19/123) done. Loss: 0.2671 lr:0.010000 +[ Tue Sep 13 16:56:50 2022 ] Batch(119/123) done. Loss: 0.1170 lr:0.010000 +[ Tue Sep 13 16:56:52 2022 ] Eval epoch: 61 +[ Tue Sep 13 16:57:50 2022 ] Mean test loss of 296 batches: 1.1426656246185303. +[ Tue Sep 13 16:57:50 2022 ] Top1: 73.05% +[ Tue Sep 13 16:57:50 2022 ] Top5: 95.08% +[ Tue Sep 13 16:57:51 2022 ] Training epoch: 62 +[ Tue Sep 13 16:58:45 2022 ] Batch(96/123) done. Loss: 0.1653 lr:0.010000 +[ Tue Sep 13 16:58:59 2022 ] Eval epoch: 62 +[ Tue Sep 13 16:59:56 2022 ] Mean test loss of 296 batches: 1.1293224096298218. +[ Tue Sep 13 16:59:57 2022 ] Top1: 73.89% +[ Tue Sep 13 16:59:57 2022 ] Top5: 95.25% +[ Tue Sep 13 16:59:57 2022 ] Training epoch: 63 +[ Tue Sep 13 17:00:39 2022 ] Batch(73/123) done. Loss: 0.0826 lr:0.010000 +[ Tue Sep 13 17:01:05 2022 ] Eval epoch: 63 +[ Tue Sep 13 17:02:03 2022 ] Mean test loss of 296 batches: 1.159111499786377. +[ Tue Sep 13 17:02:03 2022 ] Top1: 74.00% +[ Tue Sep 13 17:02:03 2022 ] Top5: 95.25% +[ Tue Sep 13 17:02:03 2022 ] Training epoch: 64 +[ Tue Sep 13 17:02:33 2022 ] Batch(50/123) done. Loss: 0.1334 lr:0.010000 +[ Tue Sep 13 17:03:11 2022 ] Eval epoch: 64 +[ Tue Sep 13 17:04:09 2022 ] Mean test loss of 296 batches: 1.1813373565673828. +[ Tue Sep 13 17:04:09 2022 ] Top1: 73.84% +[ Tue Sep 13 17:04:09 2022 ] Top5: 95.27% +[ Tue Sep 13 17:04:09 2022 ] Training epoch: 65 +[ Tue Sep 13 17:04:26 2022 ] Batch(27/123) done. Loss: 0.0609 lr:0.010000 +[ Tue Sep 13 17:05:17 2022 ] Eval epoch: 65 +[ Tue Sep 13 17:06:15 2022 ] Mean test loss of 296 batches: 1.2154911756515503. +[ Tue Sep 13 17:06:15 2022 ] Top1: 73.48% +[ Tue Sep 13 17:06:15 2022 ] Top5: 95.16% +[ Tue Sep 13 17:06:15 2022 ] Training epoch: 66 +[ Tue Sep 13 17:06:20 2022 ] Batch(4/123) done. Loss: 0.0548 lr:0.010000 +[ Tue Sep 13 17:07:13 2022 ] Batch(104/123) done. Loss: 0.1356 lr:0.010000 +[ Tue Sep 13 17:07:23 2022 ] Eval epoch: 66 +[ Tue Sep 13 17:08:21 2022 ] Mean test loss of 296 batches: 1.173687219619751. +[ Tue Sep 13 17:08:21 2022 ] Top1: 74.49% +[ Tue Sep 13 17:08:21 2022 ] Top5: 95.33% +[ Tue Sep 13 17:08:21 2022 ] Training epoch: 67 +[ Tue Sep 13 17:09:07 2022 ] Batch(81/123) done. Loss: 0.0797 lr:0.010000 +[ Tue Sep 13 17:09:29 2022 ] Eval epoch: 67 +[ Tue Sep 13 17:10:27 2022 ] Mean test loss of 296 batches: 1.1732313632965088. +[ Tue Sep 13 17:10:27 2022 ] Top1: 74.54% +[ Tue Sep 13 17:10:27 2022 ] Top5: 95.26% +[ Tue Sep 13 17:10:27 2022 ] Training epoch: 68 +[ Tue Sep 13 17:11:01 2022 ] Batch(58/123) done. Loss: 0.0586 lr:0.010000 +[ Tue Sep 13 17:11:35 2022 ] Eval epoch: 68 +[ Tue Sep 13 17:12:32 2022 ] Mean test loss of 296 batches: 1.1837102174758911. +[ Tue Sep 13 17:12:32 2022 ] Top1: 74.41% +[ Tue Sep 13 17:12:33 2022 ] Top5: 95.28% +[ Tue Sep 13 17:12:33 2022 ] Training epoch: 69 +[ Tue Sep 13 17:12:54 2022 ] Batch(35/123) done. Loss: 0.0683 lr:0.010000 +[ Tue Sep 13 17:13:41 2022 ] Eval epoch: 69 +[ Tue Sep 13 17:14:38 2022 ] Mean test loss of 296 batches: 1.1771076917648315. +[ Tue Sep 13 17:14:39 2022 ] Top1: 75.01% +[ Tue Sep 13 17:14:39 2022 ] Top5: 95.43% +[ Tue Sep 13 17:14:39 2022 ] Training epoch: 70 +[ Tue Sep 13 17:14:48 2022 ] Batch(12/123) done. Loss: 0.0953 lr:0.010000 +[ Tue Sep 13 17:15:41 2022 ] Batch(112/123) done. Loss: 0.1024 lr:0.010000 +[ Tue Sep 13 17:15:47 2022 ] Eval epoch: 70 +[ Tue Sep 13 17:16:45 2022 ] Mean test loss of 296 batches: 1.184678554534912. +[ Tue Sep 13 17:16:45 2022 ] Top1: 74.52% +[ Tue Sep 13 17:16:45 2022 ] Top5: 95.32% +[ Tue Sep 13 17:16:45 2022 ] Training epoch: 71 +[ Tue Sep 13 17:17:35 2022 ] Batch(89/123) done. Loss: 0.0544 lr:0.010000 +[ Tue Sep 13 17:17:53 2022 ] Eval epoch: 71 +[ Tue Sep 13 17:18:50 2022 ] Mean test loss of 296 batches: 1.218309760093689. +[ Tue Sep 13 17:18:51 2022 ] Top1: 74.55% +[ Tue Sep 13 17:18:51 2022 ] Top5: 95.47% +[ Tue Sep 13 17:18:51 2022 ] Training epoch: 72 +[ Tue Sep 13 17:19:29 2022 ] Batch(66/123) done. Loss: 0.0243 lr:0.010000 +[ Tue Sep 13 17:19:59 2022 ] Eval epoch: 72 +[ Tue Sep 13 17:20:56 2022 ] Mean test loss of 296 batches: 1.1953452825546265. +[ Tue Sep 13 17:20:57 2022 ] Top1: 74.90% +[ Tue Sep 13 17:20:57 2022 ] Top5: 95.46% +[ Tue Sep 13 17:20:57 2022 ] Training epoch: 73 +[ Tue Sep 13 17:21:22 2022 ] Batch(43/123) done. Loss: 0.1260 lr:0.010000 +[ Tue Sep 13 17:22:04 2022 ] Eval epoch: 73 +[ Tue Sep 13 17:23:02 2022 ] Mean test loss of 296 batches: 1.2192496061325073. +[ Tue Sep 13 17:23:02 2022 ] Top1: 74.83% +[ Tue Sep 13 17:23:02 2022 ] Top5: 95.39% +[ Tue Sep 13 17:23:02 2022 ] Training epoch: 74 +[ Tue Sep 13 17:23:16 2022 ] Batch(20/123) done. Loss: 0.0956 lr:0.010000 +[ Tue Sep 13 17:24:09 2022 ] Batch(120/123) done. Loss: 0.0303 lr:0.010000 +[ Tue Sep 13 17:24:10 2022 ] Eval epoch: 74 +[ Tue Sep 13 17:25:08 2022 ] Mean test loss of 296 batches: 1.2168002128601074. +[ Tue Sep 13 17:25:08 2022 ] Top1: 74.79% +[ Tue Sep 13 17:25:08 2022 ] Top5: 95.51% +[ Tue Sep 13 17:25:08 2022 ] Training epoch: 75 +[ Tue Sep 13 17:26:03 2022 ] Batch(97/123) done. Loss: 0.0504 lr:0.010000 +[ Tue Sep 13 17:26:16 2022 ] Eval epoch: 75 +[ Tue Sep 13 17:27:13 2022 ] Mean test loss of 296 batches: 1.26762855052948. +[ Tue Sep 13 17:27:14 2022 ] Top1: 74.45% +[ Tue Sep 13 17:27:14 2022 ] Top5: 95.30% +[ Tue Sep 13 17:27:14 2022 ] Training epoch: 76 +[ Tue Sep 13 17:27:56 2022 ] Batch(74/123) done. Loss: 0.0367 lr:0.010000 +[ Tue Sep 13 17:28:22 2022 ] Eval epoch: 76 +[ Tue Sep 13 17:29:19 2022 ] Mean test loss of 296 batches: 1.2466528415679932. +[ Tue Sep 13 17:29:19 2022 ] Top1: 74.88% +[ Tue Sep 13 17:29:19 2022 ] Top5: 95.40% +[ Tue Sep 13 17:29:20 2022 ] Training epoch: 77 +[ Tue Sep 13 17:29:49 2022 ] Batch(51/123) done. Loss: 0.0850 lr:0.010000 +[ Tue Sep 13 17:30:27 2022 ] Eval epoch: 77 +[ Tue Sep 13 17:31:25 2022 ] Mean test loss of 296 batches: 1.2405892610549927. +[ Tue Sep 13 17:31:25 2022 ] Top1: 74.42% +[ Tue Sep 13 17:31:25 2022 ] Top5: 95.38% +[ Tue Sep 13 17:31:25 2022 ] Training epoch: 78 +[ Tue Sep 13 17:31:43 2022 ] Batch(28/123) done. Loss: 0.0360 lr:0.010000 +[ Tue Sep 13 17:32:33 2022 ] Eval epoch: 78 +[ Tue Sep 13 17:33:31 2022 ] Mean test loss of 296 batches: 1.272119402885437. +[ Tue Sep 13 17:33:31 2022 ] Top1: 74.66% +[ Tue Sep 13 17:33:31 2022 ] Top5: 95.32% +[ Tue Sep 13 17:33:31 2022 ] Training epoch: 79 +[ Tue Sep 13 17:33:37 2022 ] Batch(5/123) done. Loss: 0.1068 lr:0.010000 +[ Tue Sep 13 17:34:30 2022 ] Batch(105/123) done. Loss: 0.0722 lr:0.010000 +[ Tue Sep 13 17:34:39 2022 ] Eval epoch: 79 +[ Tue Sep 13 17:35:37 2022 ] Mean test loss of 296 batches: 1.276414155960083. +[ Tue Sep 13 17:35:37 2022 ] Top1: 74.57% +[ Tue Sep 13 17:35:37 2022 ] Top5: 95.40% +[ Tue Sep 13 17:35:37 2022 ] Training epoch: 80 +[ Tue Sep 13 17:36:23 2022 ] Batch(82/123) done. Loss: 0.0984 lr:0.010000 +[ Tue Sep 13 17:36:45 2022 ] Eval epoch: 80 +[ Tue Sep 13 17:37:42 2022 ] Mean test loss of 296 batches: 1.2540721893310547. +[ Tue Sep 13 17:37:42 2022 ] Top1: 74.84% +[ Tue Sep 13 17:37:42 2022 ] Top5: 95.42% +[ Tue Sep 13 17:37:42 2022 ] Training epoch: 81 +[ Tue Sep 13 17:38:17 2022 ] Batch(59/123) done. Loss: 0.0303 lr:0.001000 +[ Tue Sep 13 17:38:50 2022 ] Eval epoch: 81 +[ Tue Sep 13 17:39:48 2022 ] Mean test loss of 296 batches: 1.2480909824371338. +[ Tue Sep 13 17:39:48 2022 ] Top1: 75.04% +[ Tue Sep 13 17:39:48 2022 ] Top5: 95.44% +[ Tue Sep 13 17:39:48 2022 ] Training epoch: 82 +[ Tue Sep 13 17:40:10 2022 ] Batch(36/123) done. Loss: 0.0671 lr:0.001000 +[ Tue Sep 13 17:40:56 2022 ] Eval epoch: 82 +[ Tue Sep 13 17:41:53 2022 ] Mean test loss of 296 batches: 1.2933716773986816. +[ Tue Sep 13 17:41:53 2022 ] Top1: 74.54% +[ Tue Sep 13 17:41:53 2022 ] Top5: 95.17% +[ Tue Sep 13 17:41:53 2022 ] Training epoch: 83 +[ Tue Sep 13 17:42:04 2022 ] Batch(13/123) done. Loss: 0.1154 lr:0.001000 +[ Tue Sep 13 17:42:57 2022 ] Batch(113/123) done. Loss: 0.0463 lr:0.001000 +[ Tue Sep 13 17:43:02 2022 ] Eval epoch: 83 +[ Tue Sep 13 17:43:59 2022 ] Mean test loss of 296 batches: 1.2759150266647339. +[ Tue Sep 13 17:43:59 2022 ] Top1: 74.79% +[ Tue Sep 13 17:44:00 2022 ] Top5: 95.43% +[ Tue Sep 13 17:44:00 2022 ] Training epoch: 84 +[ Tue Sep 13 17:44:51 2022 ] Batch(90/123) done. Loss: 0.0780 lr:0.001000 +[ Tue Sep 13 17:45:08 2022 ] Eval epoch: 84 +[ Tue Sep 13 17:46:06 2022 ] Mean test loss of 296 batches: 1.2783634662628174. +[ Tue Sep 13 17:46:06 2022 ] Top1: 75.06% +[ Tue Sep 13 17:46:06 2022 ] Top5: 95.37% +[ Tue Sep 13 17:46:06 2022 ] Training epoch: 85 +[ Tue Sep 13 17:46:45 2022 ] Batch(67/123) done. Loss: 0.0580 lr:0.001000 +[ Tue Sep 13 17:47:14 2022 ] Eval epoch: 85 +[ Tue Sep 13 17:48:11 2022 ] Mean test loss of 296 batches: 1.2869073152542114. +[ Tue Sep 13 17:48:11 2022 ] Top1: 74.67% +[ Tue Sep 13 17:48:12 2022 ] Top5: 95.26% +[ Tue Sep 13 17:48:12 2022 ] Training epoch: 86 +[ Tue Sep 13 17:48:38 2022 ] Batch(44/123) done. Loss: 0.0697 lr:0.001000 +[ Tue Sep 13 17:49:20 2022 ] Eval epoch: 86 +[ Tue Sep 13 17:50:17 2022 ] Mean test loss of 296 batches: 1.2630976438522339. +[ Tue Sep 13 17:50:17 2022 ] Top1: 74.84% +[ Tue Sep 13 17:50:17 2022 ] Top5: 95.34% +[ Tue Sep 13 17:50:17 2022 ] Training epoch: 87 +[ Tue Sep 13 17:50:31 2022 ] Batch(21/123) done. Loss: 0.1118 lr:0.001000 +[ Tue Sep 13 17:51:25 2022 ] Batch(121/123) done. Loss: 0.0595 lr:0.001000 +[ Tue Sep 13 17:51:25 2022 ] Eval epoch: 87 +[ Tue Sep 13 17:52:23 2022 ] Mean test loss of 296 batches: 1.273455023765564. +[ Tue Sep 13 17:52:23 2022 ] Top1: 75.11% +[ Tue Sep 13 17:52:23 2022 ] Top5: 95.38% +[ Tue Sep 13 17:52:23 2022 ] Training epoch: 88 +[ Tue Sep 13 17:53:19 2022 ] Batch(98/123) done. Loss: 0.0829 lr:0.001000 +[ Tue Sep 13 17:53:31 2022 ] Eval epoch: 88 +[ Tue Sep 13 17:54:29 2022 ] Mean test loss of 296 batches: 1.2803764343261719. +[ Tue Sep 13 17:54:29 2022 ] Top1: 74.74% +[ Tue Sep 13 17:54:29 2022 ] Top5: 95.30% +[ Tue Sep 13 17:54:29 2022 ] Training epoch: 89 +[ Tue Sep 13 17:55:12 2022 ] Batch(75/123) done. Loss: 0.0622 lr:0.001000 +[ Tue Sep 13 17:55:37 2022 ] Eval epoch: 89 +[ Tue Sep 13 17:56:35 2022 ] Mean test loss of 296 batches: 1.2550736665725708. +[ Tue Sep 13 17:56:35 2022 ] Top1: 75.01% +[ Tue Sep 13 17:56:35 2022 ] Top5: 95.42% +[ Tue Sep 13 17:56:35 2022 ] Training epoch: 90 +[ Tue Sep 13 17:57:06 2022 ] Batch(52/123) done. Loss: 0.0119 lr:0.001000 +[ Tue Sep 13 17:57:43 2022 ] Eval epoch: 90 +[ Tue Sep 13 17:58:41 2022 ] Mean test loss of 296 batches: 1.2952762842178345. +[ Tue Sep 13 17:58:41 2022 ] Top1: 74.68% +[ Tue Sep 13 17:58:41 2022 ] Top5: 95.34% +[ Tue Sep 13 17:58:41 2022 ] Training epoch: 91 +[ Tue Sep 13 17:59:00 2022 ] Batch(29/123) done. Loss: 0.0761 lr:0.001000 +[ Tue Sep 13 17:59:49 2022 ] Eval epoch: 91 +[ Tue Sep 13 18:00:47 2022 ] Mean test loss of 296 batches: 1.2834513187408447. +[ Tue Sep 13 18:00:47 2022 ] Top1: 74.60% +[ Tue Sep 13 18:00:47 2022 ] Top5: 95.36% +[ Tue Sep 13 18:00:47 2022 ] Training epoch: 92 +[ Tue Sep 13 18:00:53 2022 ] Batch(6/123) done. Loss: 0.0737 lr:0.001000 +[ Tue Sep 13 18:01:46 2022 ] Batch(106/123) done. Loss: 0.0351 lr:0.001000 +[ Tue Sep 13 18:01:55 2022 ] Eval epoch: 92 +[ Tue Sep 13 18:02:52 2022 ] Mean test loss of 296 batches: 1.2560278177261353. +[ Tue Sep 13 18:02:53 2022 ] Top1: 75.11% +[ Tue Sep 13 18:02:53 2022 ] Top5: 95.36% +[ Tue Sep 13 18:02:53 2022 ] Training epoch: 93 +[ Tue Sep 13 18:03:40 2022 ] Batch(83/123) done. Loss: 0.0754 lr:0.001000 +[ Tue Sep 13 18:04:01 2022 ] Eval epoch: 93 +[ Tue Sep 13 18:04:59 2022 ] Mean test loss of 296 batches: 1.2574870586395264. +[ Tue Sep 13 18:04:59 2022 ] Top1: 75.05% +[ Tue Sep 13 18:04:59 2022 ] Top5: 95.50% +[ Tue Sep 13 18:04:59 2022 ] Training epoch: 94 +[ Tue Sep 13 18:05:34 2022 ] Batch(60/123) done. Loss: 0.0386 lr:0.001000 +[ Tue Sep 13 18:06:07 2022 ] Eval epoch: 94 +[ Tue Sep 13 18:07:05 2022 ] Mean test loss of 296 batches: 1.273984432220459. +[ Tue Sep 13 18:07:05 2022 ] Top1: 74.86% +[ Tue Sep 13 18:07:05 2022 ] Top5: 95.31% +[ Tue Sep 13 18:07:05 2022 ] Training epoch: 95 +[ Tue Sep 13 18:07:28 2022 ] Batch(37/123) done. Loss: 0.0756 lr:0.001000 +[ Tue Sep 13 18:08:13 2022 ] Eval epoch: 95 +[ Tue Sep 13 18:09:10 2022 ] Mean test loss of 296 batches: 1.2880080938339233. +[ Tue Sep 13 18:09:10 2022 ] Top1: 74.99% +[ Tue Sep 13 18:09:10 2022 ] Top5: 95.28% +[ Tue Sep 13 18:09:10 2022 ] Training epoch: 96 +[ Tue Sep 13 18:09:21 2022 ] Batch(14/123) done. Loss: 0.1205 lr:0.001000 +[ Tue Sep 13 18:10:14 2022 ] Batch(114/123) done. Loss: 0.0359 lr:0.001000 +[ Tue Sep 13 18:10:19 2022 ] Eval epoch: 96 +[ Tue Sep 13 18:11:16 2022 ] Mean test loss of 296 batches: 1.2954365015029907. +[ Tue Sep 13 18:11:16 2022 ] Top1: 74.67% +[ Tue Sep 13 18:11:16 2022 ] Top5: 95.35% +[ Tue Sep 13 18:11:16 2022 ] Training epoch: 97 +[ Tue Sep 13 18:12:08 2022 ] Batch(91/123) done. Loss: 0.0634 lr:0.001000 +[ Tue Sep 13 18:12:25 2022 ] Eval epoch: 97 +[ Tue Sep 13 18:13:22 2022 ] Mean test loss of 296 batches: 1.2973577976226807. +[ Tue Sep 13 18:13:22 2022 ] Top1: 74.78% +[ Tue Sep 13 18:13:22 2022 ] Top5: 95.27% +[ Tue Sep 13 18:13:22 2022 ] Training epoch: 98 +[ Tue Sep 13 18:14:01 2022 ] Batch(68/123) done. Loss: 0.0339 lr:0.001000 +[ Tue Sep 13 18:14:30 2022 ] Eval epoch: 98 +[ Tue Sep 13 18:15:27 2022 ] Mean test loss of 296 batches: 1.2871785163879395. +[ Tue Sep 13 18:15:27 2022 ] Top1: 74.89% +[ Tue Sep 13 18:15:27 2022 ] Top5: 95.32% +[ Tue Sep 13 18:15:27 2022 ] Training epoch: 99 +[ Tue Sep 13 18:15:54 2022 ] Batch(45/123) done. Loss: 0.1499 lr:0.001000 +[ Tue Sep 13 18:16:35 2022 ] Eval epoch: 99 +[ Tue Sep 13 18:17:33 2022 ] Mean test loss of 296 batches: 1.3037185668945312. +[ Tue Sep 13 18:17:33 2022 ] Top1: 74.56% +[ Tue Sep 13 18:17:33 2022 ] Top5: 95.35% +[ Tue Sep 13 18:17:33 2022 ] Training epoch: 100 +[ Tue Sep 13 18:17:48 2022 ] Batch(22/123) done. Loss: 0.0956 lr:0.001000 +[ Tue Sep 13 18:18:41 2022 ] Batch(122/123) done. Loss: 0.1634 lr:0.001000 +[ Tue Sep 13 18:18:41 2022 ] Eval epoch: 100 +[ Tue Sep 13 18:19:38 2022 ] Mean test loss of 296 batches: 1.285120964050293. +[ Tue Sep 13 18:19:39 2022 ] Top1: 74.90% +[ Tue Sep 13 18:19:39 2022 ] Top5: 95.33%