code
stringlengths 42
43.2k
| apis
list | extract_api
stringlengths 115
61.9k
|
---|---|---|
from pathlib import Path
import sys
sys.path.append(str(Path().absolute()))
import logging
log_level = "INFO"
logging.basicConfig(
filename=str(snakemake.log),
filemode="w",
level=log_level,
format="[%(asctime)s]:%(levelname)s: %(message)s",
datefmt="%d/%m/%Y %I:%M:%S %p",
)
from evaluate.calculator import RecallCalculator
from evaluate.report import RecallReport
import pandas as pd
# setup
all_recall_reports_for_one_sample = snakemake.input.all_recall_reports_for_one_sample
sample = snakemake.wildcards.sample_id
tool = snakemake.wildcards.tool
coverage = snakemake.wildcards.coverage
coverage_threshold = snakemake.wildcards.coverage_threshold
strand_bias_threshold = snakemake.wildcards.strand_bias_threshold
gaps_threshold = snakemake.wildcards.gaps_threshold
list_with_number_of_samples = snakemake.params.list_with_number_of_samples
recall_file_for_one_sample_vs_nb_samples_filename = Path(snakemake.output.recall_file_for_one_sample_vs_nb_samples)
# API usage
logging.info(f"Loading report")
recall_report = RecallReport.from_files(all_recall_reports_for_one_sample,
concatenate_dfs_one_by_one_keeping_only_best_mappings=True)
logging.info(f"Creating calculator")
recall_calculator = RecallCalculator(recall_report)
logging.info(f"Calculating recall")
recall_df = recall_calculator.get_recall_report_wrt_truth_probes_for_those_present_in_a_given_nb_of_samples(list_with_number_of_samples)
metadata_df = pd.DataFrame(
data={
"tool": [tool] * len(recall_df),
"coverage": [coverage] * len(recall_df),
"coverage_threshold": [coverage_threshold] * len(recall_df),
"strand_bias_threshold": [strand_bias_threshold] * len(recall_df),
"gaps_threshold": [gaps_threshold] * len(recall_df),
"sample": [sample] * len(recall_df)
}
)
output_df = pd.concat([recall_df, metadata_df], axis=1)
# output
logging.info(f"Outputting recall file")
output_df.to_csv(recall_file_for_one_sample_vs_nb_samples_filename, index=False)
logging.info(f"Done")
|
[
"evaluate.calculator.RecallCalculator",
"evaluate.report.RecallReport.from_files"
] |
[((918, 981), 'pathlib.Path', 'Path', (['snakemake.output.recall_file_for_one_sample_vs_nb_samples'], {}), '(snakemake.output.recall_file_for_one_sample_vs_nb_samples)\n', (922, 981), False, 'from pathlib import Path\n'), ((996, 1027), 'logging.info', 'logging.info', (['f"""Loading report"""'], {}), "(f'Loading report')\n", (1008, 1027), False, 'import logging\n'), ((1044, 1166), 'evaluate.report.RecallReport.from_files', 'RecallReport.from_files', (['all_recall_reports_for_one_sample'], {'concatenate_dfs_one_by_one_keeping_only_best_mappings': '(True)'}), '(all_recall_reports_for_one_sample,\n concatenate_dfs_one_by_one_keeping_only_best_mappings=True)\n', (1067, 1166), False, 'from evaluate.report import RecallReport\n'), ((1204, 1240), 'logging.info', 'logging.info', (['f"""Creating calculator"""'], {}), "(f'Creating calculator')\n", (1216, 1240), False, 'import logging\n'), ((1261, 1292), 'evaluate.calculator.RecallCalculator', 'RecallCalculator', (['recall_report'], {}), '(recall_report)\n', (1277, 1292), False, 'from evaluate.calculator import RecallCalculator\n'), ((1294, 1329), 'logging.info', 'logging.info', (['f"""Calculating recall"""'], {}), "(f'Calculating recall')\n", (1306, 1329), False, 'import logging\n'), ((1866, 1909), 'pandas.concat', 'pd.concat', (['[recall_df, metadata_df]'], {'axis': '(1)'}), '([recall_df, metadata_df], axis=1)\n', (1875, 1909), True, 'import pandas as pd\n'), ((1921, 1960), 'logging.info', 'logging.info', (['f"""Outputting recall file"""'], {}), "(f'Outputting recall file')\n", (1933, 1960), False, 'import logging\n'), ((2042, 2063), 'logging.info', 'logging.info', (['f"""Done"""'], {}), "(f'Done')\n", (2054, 2063), False, 'import logging\n'), ((56, 62), 'pathlib.Path', 'Path', ([], {}), '()\n', (60, 62), False, 'from pathlib import Path\n')]
|
import argparse
import os
import random
import shutil
import time
import warnings
import pickle
from collections import OrderedDict
from datetime import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models as models
from utils.meters import AverageMeter
from evaluate import Evaluator, ClassifierGenerator
from utils.data.datasets import img_list_dataloader
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('-f', '--use-feat', dest='use_feat', action='store_true',
help='evaluate model with feature')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='pre-trained model dir')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://172.16.17.32:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--old-fc', default=None, type=str, metavar='PATH',
help='old-classifier dir')
parser.add_argument('--n2o-map', default=None, type=str, metavar='PATH',
help='new to old label mapping dictionary dir')
parser.add_argument('--cross-eval', action='store_true',
help='conduct cross evaluation between diff models')
parser.add_argument('--old-arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('--old-checkpoint', default=None, type=str, metavar='PATH',
help='old backbone dir')
parser.add_argument('-g', '--generate-cls', action='store_true',
help='generate a pseudo classifier on current training set'
' with a trained model')
parser.add_argument('--train-img-list', default=None, type=str, metavar='PATH',
help='train images txt')
parser.add_argument('--l2', action='store_true',
help='use l2 loss for compatible learning')
parser.add_argument('--lwf', action='store_true',
help='use l2 loss for compatible learning')
parser.add_argument('--val', action='store_true',
help='conduct validating when an epoch is finished')
parser.add_argument('--triplet', action='store_true',
help='use triplet loss for compatible learning')
parser.add_argument('--contra', action='store_true',
help='use contrastive loss for compatible learning')
parser.add_argument('--use-norm-sm', action='store_true',
help='use normed softmax for training')
parser.add_argument('--temp', default=0.05, type=float,
help='temperature for contrastive loss (default: 0.05)')
best_acc1 = 0.
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_trans = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
train_dataset = datasets.ImageFolder(traindir, train_trans)
if args.train_img_list is None:
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
cls_num = len([d.name for d in os.scandir(traindir) if d.is_dir()])
else:
train_loader, cls_num, train_sampler = img_list_dataloader(traindir, args.train_img_list, train_trans,
args.distributed, batch_size=args.batch_size,
num_workers=args.workers)
print('==> Using {} for loading data!'.format(args.train_img_list))
print('==> Data loading is done!')
if args.use_feat or args.cross_eval:
cls_num = 0
print('==> Using feature distance, no classifier will be used!')
else:
print('==> Total {} classes!'.format(cls_num))
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
print("=> loading model from '{}'".format(args.checkpoint))
model = models.__dict__[args.arch](old_fc=args.old_fc,
use_feat=args.use_feat,
num_classes=cls_num,
norm_sm=args.use_norm_sm)
checkpoint = torch.load(args.checkpoint)
c_state_dict = OrderedDict()
if 'state_dict' in checkpoint:
checkpoint_dict = checkpoint['state_dict']
else:
checkpoint_dict = checkpoint
for key, value in checkpoint_dict.items():
if 'module.' in key:
# remove 'module.' of data parallel
name = key[7:]
c_state_dict[name] = value
else:
c_state_dict[key] = value
unfit_keys = model.load_state_dict(c_state_dict, strict=False)
print('=> these keys in model are not in state dict: {}'.format(unfit_keys.missing_keys))
print('=> these keys in state dict are not in model: {}'.format(unfit_keys.unexpected_keys))
print("=> loading done!")
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch](old_fc=args.old_fc,
use_feat=args.use_feat,
num_classes=cls_num,
norm_sm=args.use_norm_sm)
if args.lwf:
# According to Learning without Forgetting original paper (Li et.al. 2016),
# the old classifier should be finetuned. However, it will not work for BCT.
# So we freeze the old classifier.
for para in model.old_fc.parameters():
para.requires_grad = False
model = cudalize(model, ngpus_per_node, args)
if args.old_checkpoint is not None:
print("=> using old model '{}'".format(args.old_arch))
print("=> loading old model from '{}'".format(args.old_checkpoint))
old_model = models.__dict__[args.old_arch](use_feat=True,
num_classes=0)
old_checkpoint = torch.load(args.old_checkpoint)
oc_state_dict = OrderedDict()
if 'state_dict' in old_checkpoint:
old_checkpoint_dict = old_checkpoint['state_dict']
else:
old_checkpoint_dict = old_checkpoint
for key, value in old_checkpoint_dict.items():
if 'module.' in key:
# remove 'module.' of data parallel
name = key[7:]
oc_state_dict[name] = value
else:
oc_state_dict[key] = value
unfit_keys = old_model.load_state_dict(oc_state_dict, strict=False)
print('=> these keys in model are not in state dict: {}'.format(unfit_keys.missing_keys))
print('=> these keys in state dict are not in model: {}'.format(unfit_keys.unexpected_keys))
print("=> loading done!")
old_model = cudalize(old_model, ngpus_per_node, args)
else:
old_model = None
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if args.evaluate:
if args.cross_eval:
print("==> cross test start...")
validate(val_loader, model, criterion, args, old_model=old_model)
return
print("==> self test start...")
validate(val_loader, model, criterion, args, cls_num=cls_num)
return
if args.generate_cls:
print('==> generating the pseudo classifier on current training data')
if args.train_img_list is not None:
extract_loader, cls_num = img_list_dataloader(traindir, args.train_img_list,
transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize]),
distributed=args.distributed, batch_size=args.batch_size,
num_workers=args.workers, pin_memory=False,
)
else:
extract_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=False)
s_clsfier = generate_pseudo_classifier(extract_loader, old_model,
cls_num=cls_num)
if not os.path.isdir('./results/'):
os.mkdir('./results/')
with open(f'results/synth_clsfier.npy', 'wb') as f:
np.save(f, s_clsfier)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args, ngpus_per_node,
old_model=old_model)
if args.val:
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args, cls_num=cls_num)
else:
acc1 = 100.0 # always save the newest one
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
if not os.path.isdir('./results'):
os.mkdir('./results')
dirname = './results/' + '_'.join([str(args.arch),
'dataset:' + str(args.train_img_list).split('/')[-1],
'bct:' + str(args.old_fc).split('/')[-1],
'lr:' + str(args.lr),
'bs:' + str(args.batch_size),
])
if not os.path.isdir(dirname):
os.mkdir(dirname)
print('==> Saving checkpoint to {}'.format(dirname))
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}, is_best, filename=dirname + '/' + '_'.join(['epoch:' + str(epoch),
datetime.now().strftime("%Y-%m-%d-%H:%M:%S"),
'checkpoint.pth.tar'
]))
def train(train_loader, model, criterion, optimizer, epoch, args, ngpus_per_node, old_model=None):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
if args.old_fc is not None:
n2o_map = np.load(args.n2o_map, allow_pickle=True).item() if args.n2o_map is not None else None
old_losses = AverageMeter('Old Loss', ':.4e')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, old_losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
if args.triplet:
tri_losses = AverageMeter('Triplet Loss', ':.4e')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, old_losses, tri_losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
if args.contra:
contra_losses = AverageMeter('Contrastive Loss', ':.4e')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, old_losses, contra_losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
else:
if args.l2:
l2_losses = AverageMeter('L2 Loss', ':.4e')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, l2_losses],
prefix="Epoch: [{}]".format(epoch))
else:
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
if old_model is not None:
old_model.eval() # fix old model
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
if args.old_fc is None:
# if use l2 loss between new and old model
if args.l2:
l2_criterion = nn.MSELoss().cuda(args.gpu)
output_feat = model(images)
old_output_feat = old_model(images)
old_dim = old_output_feat.size(1)
new_dim = output_feat.size(1)
if old_dim < new_dim:
output_feat = F.normalize(output_feat[:, :old_dim], dim=1)
if old_dim > new_dim:
old_output_feat = F.normalize(old_output_feat[:, :new_dim], dim=1)
l2_loss = l2_criterion(output_feat, old_output_feat)
loss = 0.
else:
output = model(images)
loss = criterion(output, target)
old_loss = 0.
else:
output, old_output, output_feat = model(images)
loss = criterion(output, target)
valid_ind = []
o_target = []
if n2o_map is not None:
for ind, t in enumerate(target):
if int(t) in n2o_map:
o_target.append(n2o_map[int(t)])
valid_ind.append(ind)
if torch.cuda.is_available():
o_target = torch.LongTensor(o_target).cuda()
else:
o_target = torch.LongTensor(o_target)
else:
# If there is no overlap, please use learning without forgetting,
# or create pseudo old classifier with feature extraction.
valid_ind = range(len(target))
o_target = target
if len(valid_ind) != 0:
if args.lwf:
old_output_feat = old_model(images)
if torch.cuda.is_available():
pseudo_score = model.module.old_fc(old_output_feat)
else:
pseudo_score = model.old_fc(old_output_feat)
pseudo_label = F.softmax(pseudo_score, dim=1)
old_loss = -torch.sum(F.log_softmax(old_output[valid_ind]) * pseudo_label) / images.size(0)
else:
old_loss = criterion(old_output[valid_ind], o_target)
else:
old_loss = 0.
# if use triplet loss between new and old model
if args.triplet:
tri_criterion = nn.TripletMarginLoss().cuda(args.gpu)
pos_old_output_feat = old_model(images)
# find the hardest negative
n = target.size(0)
mask = target.expand(n, n).eq(target.expand(n, n).t())
dist = torch.pow(output_feat, 2).sum(dim=1, keepdim=True).expand(n, n) + \
torch.pow(pos_old_output_feat, 2).sum(dim=1, keepdim=True).expand(n, n).t()
dist = dist - 2 * torch.mm(output_feat, pos_old_output_feat.t())
hardest_neg = []
for index in range(n):
hardest_neg.append(pos_old_output_feat[dist[index][mask[index] == 0].argmin()])
hardest_neg = torch.stack(hardest_neg)
tri_loss = tri_criterion(output_feat, pos_old_output_feat, hardest_neg)
# if use contrastive loss between old and new model
if args.contra:
old_output_feat = old_model(images)
n = target.size(0)
contra_loss = 0.
old_output_feat = F.normalize(old_output_feat, dim=1)
output_feat = F.normalize(output_feat, dim=1)
for index in range(n):
# This follows supervised contrastive learning (By Khosla & Teterwak et.al. NIPS 2020)
contra_loss_inside = 0.
pos_scores = torch.mm(output_feat[index].unsqueeze(0), old_output_feat[target[index] == target].t())
neg_scores = torch.mm(output_feat[index].unsqueeze(0), old_output_feat[target[index] != target].t())
pos_set_size = pos_scores.size(0)
for pos_score in pos_scores:
all_scores = torch.cat((pos_score.unsqueeze(0), neg_scores), 1)
all_scores /= args.temp
# all positive samples are placed at 0-th position
p_label = torch.empty(1, dtype=torch.long).zero_().cuda()
contra_loss_inside += criterion(all_scores, p_label)
contra_loss += contra_loss_inside / pos_set_size
contra_loss /= n
if args.l2:
l2_losses.update(l2_loss.item(), images.size(0))
else:
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
if args.old_fc is not None and len(valid_ind) != 0:
old_losses.update(old_loss.item(), len(valid_ind))
if args.triplet:
tri_losses.update(tri_loss.item(), images.size(0))
if args.contra:
contra_losses.update(contra_loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
if args.triplet:
loss = loss + tri_loss
elif args.contra:
loss = loss + contra_loss
elif args.l2:
loss = l2_loss
else:
loss = loss + old_loss
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
if args.multiprocessing_distributed:
if args.rank % ngpus_per_node == 0:
progress.display(i)
else:
pass
else:
progress.display(i)
def validate(val_loader, model, criterion, args, old_model=None, cls_num=1000):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
if args.use_feat:
if args.cross_eval and old_model is not None:
old_model.eval()
evaluator = Evaluator(model, old_model)
else:
evaluator = Evaluator(model)
top1, top5 = evaluator.evaluate(val_loader)
print(' * Acc@1 {top1:.3f} Acc@5 {top5:.3f}'
.format(top1=top1, top5=top5))
return top1
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
if cls_num in target:
print('Only have {} classes, test stop!'.format(cls_num))
break
# compute output
if args.old_fc is None:
output = model(images)
else:
output, _, _ = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, '_'.join([filename.split('_epoch')[0], 'model_best.pth.tar']))
def cudalize(model, ngpus_per_node, args):
"""Select cuda or cpu mode on different machine"""
if not torch.cuda.is_available():
print('using CPU, this will be slow')
elif args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
return model
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def generate_pseudo_classifier(train_loader, old_model, cls_num=1000):
"""Generate the pseudo classifier with new training data and old embedding model"""
old_model.eval()
cls_generator = ClassifierGenerator(old_model, cls_num)
saved_classifier = cls_generator.generate_classifier(train_loader)
return saved_classifier
if __name__ == '__main__':
main()
|
[
"evaluate.Evaluator",
"evaluate.ClassifierGenerator"
] |
[((901, 965), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch ImageNet Training"""'}), "(description='PyTorch ImageNet Training')\n", (924, 965), False, 'import argparse\n'), ((7204, 7229), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (7227, 7229), False, 'import torch\n'), ((8872, 8904), 'os.path.join', 'os.path.join', (['args.data', '"""train"""'], {}), "(args.data, 'train')\n", (8884, 8904), False, 'import os\n'), ((8918, 8948), 'os.path.join', 'os.path.join', (['args.data', '"""val"""'], {}), "(args.data, 'val')\n", (8930, 8948), False, 'import os\n'), ((8965, 9040), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (8985, 9040), True, 'import torchvision.transforms as transforms\n'), ((9281, 9324), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['traindir', 'train_trans'], {}), '(traindir, train_trans)\n', (9301, 9324), True, 'import torchvision.datasets as datasets\n'), ((19547, 19576), 'utils.meters.AverageMeter', 'AverageMeter', (['"""Time"""', '""":6.3f"""'], {}), "('Time', ':6.3f')\n", (19559, 19576), False, 'from utils.meters import AverageMeter\n'), ((19593, 19622), 'utils.meters.AverageMeter', 'AverageMeter', (['"""Data"""', '""":6.3f"""'], {}), "('Data', ':6.3f')\n", (19605, 19622), False, 'from utils.meters import AverageMeter\n'), ((19636, 19664), 'utils.meters.AverageMeter', 'AverageMeter', (['"""Loss"""', '""":.4e"""'], {}), "('Loss', ':.4e')\n", (19648, 19664), False, 'from utils.meters import AverageMeter\n'), ((19676, 19706), 'utils.meters.AverageMeter', 'AverageMeter', (['"""Acc@1"""', '""":6.2f"""'], {}), "('Acc@1', ':6.2f')\n", (19688, 19706), False, 'from utils.meters import AverageMeter\n'), ((19718, 19748), 'utils.meters.AverageMeter', 'AverageMeter', (['"""Acc@5"""', '""":6.2f"""'], {}), "('Acc@5', ':6.2f')\n", (19730, 19748), False, 'from utils.meters import AverageMeter\n'), ((21317, 21328), 'time.time', 'time.time', ([], {}), '()\n', (21326, 21328), False, 'import time\n'), ((27828, 27857), 'utils.meters.AverageMeter', 'AverageMeter', (['"""Time"""', '""":6.3f"""'], {}), "('Time', ':6.3f')\n", (27840, 27857), False, 'from utils.meters import AverageMeter\n'), ((27871, 27899), 'utils.meters.AverageMeter', 'AverageMeter', (['"""Loss"""', '""":.4e"""'], {}), "('Loss', ':.4e')\n", (27883, 27899), False, 'from utils.meters import AverageMeter\n'), ((27911, 27941), 'utils.meters.AverageMeter', 'AverageMeter', (['"""Acc@1"""', '""":6.2f"""'], {}), "('Acc@1', ':6.2f')\n", (27923, 27941), False, 'from utils.meters import AverageMeter\n'), ((27953, 27983), 'utils.meters.AverageMeter', 'AverageMeter', (['"""Acc@5"""', '""":6.2f"""'], {}), "('Acc@5', ':6.2f')\n", (27965, 27983), False, 'from utils.meters import AverageMeter\n'), ((29926, 29953), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (29936, 29953), False, 'import torch\n'), ((32991, 33030), 'evaluate.ClassifierGenerator', 'ClassifierGenerator', (['old_model', 'cls_num'], {}), '(old_model, cls_num)\n', (33010, 33030), False, 'from evaluate import Evaluator, ClassifierGenerator\n'), ((6407, 6429), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (6418, 6429), False, 'import random\n'), ((6438, 6466), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (6455, 6466), False, 'import torch\n'), ((6510, 6735), 'warnings.warn', 'warnings.warn', (['"""You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down your training considerably! You may see unexpected behavior when restarting from checkpoints."""'], {}), "(\n 'You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down your training considerably! You may see unexpected behavior when restarting from checkpoints.'\n )\n", (6523, 6735), False, 'import warnings\n'), ((6864, 6969), 'warnings.warn', 'warnings.warn', (['"""You have chosen a specific GPU. This will completely disable data parallelism."""'], {}), "(\n 'You have chosen a specific GPU. This will completely disable data parallelism.'\n )\n", (6877, 6969), False, 'import warnings\n'), ((7579, 7652), 'torch.multiprocessing.spawn', 'mp.spawn', (['main_worker'], {'nprocs': 'ngpus_per_node', 'args': '(ngpus_per_node, args)'}), '(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n', (7587, 7652), True, 'import torch.multiprocessing as mp\n'), ((8651, 8777), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': 'args.dist_backend', 'init_method': 'args.dist_url', 'world_size': 'args.world_size', 'rank': 'args.rank'}), '(backend=args.dist_backend, init_method=args.\n dist_url, world_size=args.world_size, rank=args.rank)\n', (8674, 8777), True, 'import torch.distributed as dist\n'), ((9552, 9728), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(train_sampler is None)', 'num_workers': 'args.workers', 'pin_memory': '(True)', 'sampler': 'train_sampler'}), '(train_dataset, batch_size=args.batch_size,\n shuffle=train_sampler is None, num_workers=args.workers, pin_memory=\n True, sampler=train_sampler)\n', (9579, 9728), False, 'import torch\n'), ((9880, 10020), 'utils.data.datasets.img_list_dataloader', 'img_list_dataloader', (['traindir', 'args.train_img_list', 'train_trans', 'args.distributed'], {'batch_size': 'args.batch_size', 'num_workers': 'args.workers'}), '(traindir, args.train_img_list, train_trans, args.\n distributed, batch_size=args.batch_size, num_workers=args.workers)\n', (9899, 10020), False, 'from utils.data.datasets import img_list_dataloader\n'), ((11242, 11269), 'torch.load', 'torch.load', (['args.checkpoint'], {}), '(args.checkpoint)\n', (11252, 11269), False, 'import torch\n'), ((11293, 11306), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11304, 11306), False, 'from collections import OrderedDict\n'), ((13064, 13095), 'torch.load', 'torch.load', (['args.old_checkpoint'], {}), '(args.old_checkpoint)\n', (13074, 13095), False, 'import torch\n'), ((13120, 13133), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13131, 13133), False, 'from collections import OrderedDict\n'), ((14349, 14376), 'os.path.isfile', 'os.path.isfile', (['args.resume'], {}), '(args.resume)\n', (14363, 14376), False, 'import os\n'), ((19907, 19939), 'utils.meters.AverageMeter', 'AverageMeter', (['"""Old Loss"""', '""":.4e"""'], {}), "('Old Loss', ':.4e')\n", (19919, 19939), False, 'from utils.meters import AverageMeter\n'), ((21572, 21597), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (21595, 21597), False, 'import torch\n'), ((27437, 27448), 'time.time', 'time.time', ([], {}), '()\n', (27446, 27448), False, 'import time\n'), ((28547, 28562), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (28560, 28562), False, 'import torch\n'), ((28578, 28589), 'time.time', 'time.time', ([], {}), '()\n', (28587, 28589), False, 'import time\n'), ((30178, 30203), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (30201, 30203), False, 'import torch\n'), ((32380, 32395), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (32393, 32395), False, 'import torch\n'), ((9126, 9159), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (9154, 9159), True, 'import torchvision.transforms as transforms\n'), ((9169, 9202), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (9200, 9202), True, 'import torchvision.transforms as transforms\n'), ((9212, 9233), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (9231, 9233), True, 'import torchvision.transforms as transforms\n'), ((9419, 9481), 'torch.utils.data.distributed.DistributedSampler', 'torch.utils.data.distributed.DistributedSampler', (['train_dataset'], {}), '(train_dataset)\n', (9466, 9481), False, 'import torch\n'), ((14056, 14077), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (14075, 14077), True, 'import torch.nn as nn\n'), ((17158, 17185), 'os.path.isdir', 'os.path.isdir', (['"""./results/"""'], {}), "('./results/')\n", (17171, 17185), False, 'import os\n'), ((17199, 17221), 'os.mkdir', 'os.mkdir', (['"""./results/"""'], {}), "('./results/')\n", (17207, 17221), False, 'import os\n'), ((17294, 17315), 'numpy.save', 'np.save', (['f', 's_clsfier'], {}), '(f, s_clsfier)\n', (17301, 17315), True, 'import numpy as np\n'), ((20172, 20208), 'utils.meters.AverageMeter', 'AverageMeter', (['"""Triplet Loss"""', '""":.4e"""'], {}), "('Triplet Loss', ':.4e')\n", (20184, 20208), False, 'from utils.meters import AverageMeter\n'), ((20471, 20511), 'utils.meters.AverageMeter', 'AverageMeter', (['"""Contrastive Loss"""', '""":.4e"""'], {}), "('Contrastive Loss', ':.4e')\n", (20483, 20511), False, 'from utils.meters import AverageMeter\n'), ((20779, 20810), 'utils.meters.AverageMeter', 'AverageMeter', (['"""L2 Loss"""', '""":.4e"""'], {}), "('L2 Loss', ':.4e')\n", (20791, 20810), False, 'from utils.meters import AverageMeter\n'), ((28284, 28311), 'evaluate.Evaluator', 'Evaluator', (['model', 'old_model'], {}), '(model, old_model)\n', (28293, 28311), False, 'from evaluate import Evaluator, ClassifierGenerator\n'), ((28350, 28366), 'evaluate.Evaluator', 'Evaluator', (['model'], {}), '(model)\n', (28359, 28366), False, 'from evaluate import Evaluator, ClassifierGenerator\n'), ((28766, 28791), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (28789, 28791), False, 'import torch\n'), ((29570, 29581), 'time.time', 'time.time', ([], {}), '()\n', (29579, 29581), False, 'import time\n'), ((14508, 14531), 'torch.load', 'torch.load', (['args.resume'], {}), '(args.resume)\n', (14518, 14531), False, 'import torch\n'), ((14694, 14735), 'torch.load', 'torch.load', (['args.resume'], {'map_location': 'loc'}), '(args.resume, map_location=loc)\n', (14704, 14735), False, 'import torch\n'), ((18198, 18224), 'os.path.isdir', 'os.path.isdir', (['"""./results"""'], {}), "('./results')\n", (18211, 18224), False, 'import os\n'), ((18242, 18263), 'os.mkdir', 'os.mkdir', (['"""./results"""'], {}), "('./results')\n", (18250, 18263), False, 'import os\n'), ((18732, 18754), 'os.path.isdir', 'os.path.isdir', (['dirname'], {}), '(dirname)\n', (18745, 18754), False, 'import os\n'), ((18772, 18789), 'os.mkdir', 'os.mkdir', (['dirname'], {}), '(dirname)\n', (18780, 18789), False, 'import os\n'), ((21446, 21457), 'time.time', 'time.time', ([], {}), '()\n', (21455, 21457), False, 'import time\n'), ((22918, 22943), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (22941, 22943), False, 'import torch\n'), ((24849, 24873), 'torch.stack', 'torch.stack', (['hardest_neg'], {}), '(hardest_neg)\n', (24860, 24873), False, 'import torch\n'), ((25209, 25244), 'torch.nn.functional.normalize', 'F.normalize', (['old_output_feat'], {'dim': '(1)'}), '(old_output_feat, dim=1)\n', (25220, 25244), True, 'import torch.nn.functional as F\n'), ((25275, 25306), 'torch.nn.functional.normalize', 'F.normalize', (['output_feat'], {'dim': '(1)'}), '(output_feat, dim=1)\n', (25286, 25306), True, 'import torch.nn.functional as F\n'), ((27404, 27415), 'time.time', 'time.time', ([], {}), '()\n', (27413, 27415), False, 'import time\n'), ((30532, 30563), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.gpu'], {}), '(args.gpu)\n', (30553, 30563), False, 'import torch\n'), ((30617, 30688), 'torch.nn.parallel.DistributedDataParallel', 'torch.nn.parallel.DistributedDataParallel', (['model'], {'device_ids': '[args.gpu]'}), '(model, device_ids=[args.gpu])\n', (30658, 30688), False, 'import torch\n'), ((30884, 30932), 'torch.nn.parallel.DistributedDataParallel', 'torch.nn.parallel.DistributedDataParallel', (['model'], {}), '(model)\n', (30925, 30932), False, 'import torch\n'), ((30972, 31003), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.gpu'], {}), '(args.gpu)\n', (30993, 31003), False, 'import torch\n'), ((9786, 9806), 'os.scandir', 'os.scandir', (['traindir'], {}), '(traindir)\n', (9796, 9806), False, 'import os\n'), ((10551, 10573), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (10568, 10573), True, 'import torchvision.transforms as transforms\n'), ((10587, 10613), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (10608, 10613), True, 'import torchvision.transforms as transforms\n'), ((10627, 10648), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (10646, 10648), True, 'import torchvision.transforms as transforms\n'), ((19800, 19840), 'numpy.load', 'np.load', (['args.n2o_map'], {'allow_pickle': '(True)'}), '(args.n2o_map, allow_pickle=True)\n', (19807, 19840), True, 'import numpy as np\n'), ((22096, 22140), 'torch.nn.functional.normalize', 'F.normalize', (['output_feat[:, :old_dim]'], {'dim': '(1)'}), '(output_feat[:, :old_dim], dim=1)\n', (22107, 22140), True, 'import torch.nn.functional as F\n'), ((22217, 22265), 'torch.nn.functional.normalize', 'F.normalize', (['old_output_feat[:, :new_dim]'], {'dim': '(1)'}), '(old_output_feat[:, :new_dim], dim=1)\n', (22228, 22265), True, 'import torch.nn.functional as F\n'), ((23063, 23089), 'torch.LongTensor', 'torch.LongTensor', (['o_target'], {}), '(o_target)\n', (23079, 23089), False, 'import torch\n'), ((23490, 23515), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (23513, 23515), False, 'import torch\n'), ((23723, 23753), 'torch.nn.functional.softmax', 'F.softmax', (['pseudo_score'], {'dim': '(1)'}), '(pseudo_score, dim=1)\n', (23732, 23753), True, 'import torch.nn.functional as F\n'), ((29533, 29544), 'time.time', 'time.time', ([], {}), '()\n', (29542, 29544), False, 'import time\n'), ((31236, 31273), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model.features'], {}), '(model.features)\n', (31257, 31273), False, 'import torch\n'), ((15959, 15981), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (15976, 15981), True, 'import torchvision.transforms as transforms\n'), ((16061, 16087), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (16082, 16087), True, 'import torchvision.transforms as transforms\n'), ((16167, 16188), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (16186, 16188), True, 'import torchvision.transforms as transforms\n'), ((21804, 21816), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (21814, 21816), True, 'import torch.nn as nn\n'), ((24132, 24154), 'torch.nn.TripletMarginLoss', 'nn.TripletMarginLoss', ([], {}), '()\n', (24152, 24154), True, 'import torch.nn as nn\n'), ((16719, 16741), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (16736, 16741), True, 'import torchvision.transforms as transforms\n'), ((16763, 16789), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (16784, 16789), True, 'import torchvision.transforms as transforms\n'), ((16811, 16832), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (16830, 16832), True, 'import torchvision.transforms as transforms\n'), ((22976, 23002), 'torch.LongTensor', 'torch.LongTensor', (['o_target'], {}), '(o_target)\n', (22992, 23002), False, 'import torch\n'), ((31333, 31361), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (31354, 31361), False, 'import torch\n'), ((23796, 23832), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['old_output[valid_ind]'], {}), '(old_output[valid_ind])\n', (23809, 23832), True, 'import torch.nn.functional as F\n'), ((24399, 24424), 'torch.pow', 'torch.pow', (['output_feat', '(2)'], {}), '(output_feat, 2)\n', (24408, 24424), False, 'import torch\n'), ((19240, 19254), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (19252, 19254), False, 'from datetime import datetime\n'), ((26087, 26119), 'torch.empty', 'torch.empty', (['(1)'], {'dtype': 'torch.long'}), '(1, dtype=torch.long)\n', (26098, 26119), False, 'import torch\n'), ((24490, 24523), 'torch.pow', 'torch.pow', (['pos_old_output_feat', '(2)'], {}), '(pos_old_output_feat, 2)\n', (24499, 24523), False, 'import torch\n')]
|
# repo originally forked from https://github.com/Confusezius/Deep-Metric-Learning-Baselines
"""to do:
clean all of the files - particularly the main.py and also the losses and dataset files and the file for doing the dataloading
-- fast loading etc
need to change all of the copyrights at the top of all of the files
"""
#################### LIBRARIES ########################
import warnings
warnings.filterwarnings("ignore")
import os, numpy as np, argparse, random, matplotlib, datetime
os.chdir(os.path.dirname(os.path.realpath(__file__)))
from pathlib import Path
matplotlib.use('agg')
from tqdm import tqdm
import auxiliaries as aux
import datasets as data
import netlib as netlib
import losses as losses
import evaluate as eval
from tensorboardX import SummaryWriter
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
################### INPUT ARGUMENTS ###################
parser = argparse.ArgumentParser()
####### Main Parameter: Dataset to use for Training
parser.add_argument('--dataset', default='vehicle_id', type=str, help='Dataset to use.', choices=['Inaturalist','vehicle_id'])
### General Training Parameters
parser.add_argument('--lr', default=0.00001, type=float, help='Learning Rate for network parameters.')
parser.add_argument('--fc_lr_mul', default=5, type=float, help='OPTIONAL: Multiply the embedding layer learning rate by this value. If set to 0, the embedding layer shares the same learning rate.')
parser.add_argument('--n_epochs', default=400, type=int, help='Number of training epochs.')
parser.add_argument('--kernels', default=8, type=int, help='Number of workers for pytorch dataloader.')
parser.add_argument('--bs', default=112 , type=int, help='Mini-Batchsize to use.')
parser.add_argument('--samples_per_class', default=4, type=int, help='Number of samples in one class drawn before choosing the next class')
parser.add_argument('--seed', default=1, type=int, help='Random seed for reproducibility.')
parser.add_argument('--scheduler', default='step', type=str, help='Type of learning rate scheduling. Currently: step & exp.')
parser.add_argument('--gamma', default=0.3, type=float, help='Learning rate reduction after tau epochs.')
parser.add_argument('--decay', default=0.0004, type=float, help='Weight decay for optimizer.')
parser.add_argument('--tau', default= [200,300,300,120,220,250,280],nargs='+',type=int,help='Stepsize(s) before reducing learning rate.')
parser.add_argument('--infrequent_eval', default=0,type=int, help='only compute evaluation metrics every 10 epochs')
parser.add_argument('--opt', default = 'adam',help='adam or sgd')
##### Loss-specific Settings
parser.add_argument('--loss', default='smoothap', type=str)
parser.add_argument('--sigmoid_temperature', default=0.01, type=float, help='SmoothAP: the temperature of the sigmoid used in SmoothAP loss')
##### Evaluation Settings
parser.add_argument('--k_vals', nargs='+', default=[1,2,4,8], type=int, help='Recall @ Values.')
parser.add_argument('--resume', default='', type=str, help='path to checkpoint to load weights from (if empty then ImageNet pre-trained weights are loaded')
##### Network parameters
parser.add_argument('--embed_dim', default=512, type=int, help='Embedding dimensionality of the network')
parser.add_argument('--arch', default='resnet50', type=str, help='Network backend choice: resnet50, googlenet, BNinception')
parser.add_argument('--grad_measure', action='store_true', help='If added, gradients passed from embedding layer to the last conv-layer are stored in each iteration.')
parser.add_argument('--dist_measure', action='store_true', help='If added, the ratio between intra- and interclass distances is stored after each epoch.')
parser.add_argument('--not_pretrained', action='store_true', help='If added, the network will be trained WITHOUT ImageNet-pretrained weights.')
##### Setup Parameters
parser.add_argument('--gpu', default=0, type=int, help='GPU-id for GPU to use.')
parser.add_argument('--savename', default='', type=str, help='Save folder name if any special information is to be included.')
### Paths to datasets and storage folder
parser.add_argument('--source_path', default='/scratch/shared/beegfs/abrown/datasets', type=str, help='Path to data')
parser.add_argument('--save_path', default=os.getcwd()+'/Training_Results', type=str, help='Where to save the checkpoints')
opt = parser.parse_args()
"""============================================================================"""
opt.source_path += '/'+opt.dataset
opt.save_path += '/'+opt.dataset
if opt.dataset== 'Inaturalist':
opt.n_epochs = 90
opt.tau = [40,70]
opt.k_vals = [1,4,16,32]
if opt.dataset=='vehicle_id':
opt.k_vals = [1,5]
"""==========================================================================="""
################### TensorBoard Settings ##################
timestamp = datetime.datetime.now().strftime(r"%Y-%m-%d_%H-%M-%S")
exp_name = aux.args2exp_name(opt)
opt.save_name = f"weights_{exp_name}" +'/'+ timestamp
random.seed(opt.seed)
np.random.seed(opt.seed)
torch.manual_seed(opt.seed)
torch.cuda.manual_seed(opt.seed); torch.cuda.manual_seed_all(opt.seed)
tensorboard_path = Path(f"logs/logs_{exp_name}") / timestamp
tensorboard_path.parent.mkdir(exist_ok=True, parents=True)
global writer;
writer = SummaryWriter(tensorboard_path)
"""============================================================================"""
################### GPU SETTINGS ###########################
os.environ["CUDA_DEVICE_ORDER"] ="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]= str(opt.gpu)
"""============================================================================"""
##################### NETWORK SETUP ##################
opt.device = torch.device('cuda')
model = netlib.networkselect(opt)
#Push to Device
_ = model.to(opt.device)
#Place trainable parameter in list of parameters to train:
if 'fc_lr_mul' in vars(opt).keys() and opt.fc_lr_mul!=0:
all_but_fc_params = list(filter(lambda x: 'last_linear' not in x[0],model.named_parameters()))
for ind, param in enumerate(all_but_fc_params):
all_but_fc_params[ind] = param[1]
fc_params = model.model.last_linear.parameters()
to_optim = [{'params':all_but_fc_params,'lr':opt.lr,'weight_decay':opt.decay},
{'params':fc_params,'lr':opt.lr*opt.fc_lr_mul,'weight_decay':opt.decay}]
else:
to_optim = [{'params':model.parameters(),'lr':opt.lr,'weight_decay':opt.decay}]
"""============================================================================"""
#################### DATALOADER SETUPS ##################
#Returns a dictionary containing 'training', 'testing', and 'evaluation' dataloaders.
#The 'testing'-dataloader corresponds to the validation set, and the 'evaluation'-dataloader
#Is simply using the training set, however running under the same rules as 'testing' dataloader,
#i.e. no shuffling and no random cropping.
dataloaders = data.give_dataloaders(opt.dataset, opt)
#Because the number of supervised classes is dataset dependent, we store them after
#initializing the dataloader
opt.num_classes = len(dataloaders['training'].dataset.avail_classes)
"""============================================================================"""
#################### CREATE LOGGING FILES ###############
#Each dataset usually has a set of standard metrics to log. aux.metrics_to_examine()
#returns a dict which lists metrics to log for training ('train') and validation/testing ('val')
metrics_to_log = aux.metrics_to_examine(opt.dataset, opt.k_vals)
# example output: {'train': ['Epochs', 'Time', 'Train Loss', 'Time'],
# 'val': ['Epochs','Time','NMI','F1', 'Recall @ 1','Recall @ 2','Recall @ 4','Recall @ 8']}
#Using the provided metrics of interest, we generate a LOGGER instance.
#Note that 'start_new' denotes that a new folder should be made in which everything will be stored.
#This includes network weights as well.
LOG = aux.LOGGER(opt, metrics_to_log, name='Base', start_new=True)
#If graphviz is installed on the system, a computational graph of the underlying
#network will be made as well.
"""============================================================================"""
#################### LOSS SETUP ####################
#Depending on opt.loss and opt.sampling, the respective criterion is returned,
#and if the loss has trainable parameters, to_optim is appended.
criterion, to_optim = losses.loss_select(opt.loss, opt, to_optim)
_ = criterion.to(opt.device)
"""============================================================================"""
##################### OPTIONAL EVALUATIONS #####################
#Store the averaged gradients returned from the embedding to the last conv. layer.
if opt.grad_measure:
grad_measure = eval.GradientMeasure(opt, name='baseline')
#Store the relative distances between average intra- and inter-class distance.
if opt.dist_measure:
#Add a distance measure for training distance ratios
distance_measure = eval.DistanceMeasure(dataloaders['evaluation'], opt, name='Train', update_epochs=1)
# #If uncommented: Do the same for the test set
# distance_measure_test = eval.DistanceMeasure(dataloaders['testing'], opt, name='Train', update_epochs=1)
"""============================================================================"""
#################### OPTIM SETUP ####################
#As optimizer, Adam with standard parameters is used.
if opt.opt == 'adam':
optimizer = torch.optim.Adam(to_optim)
elif opt.opt == 'sgd':
optimizer = torch.optim.SGD(to_optim)
else:
raise Exception('unknown optimiser')
# for the SOA measures in the paper - need to use SGD and 0.05 learning rate
#optimizer = torch.optim.Adam(to_optim)
#optimizer = torch.optim.SGD(to_optim)
if opt.scheduler=='exp':
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=opt.gamma)
elif opt.scheduler=='step':
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=opt.tau, gamma=opt.gamma)
elif opt.scheduler=='none':
print('No scheduling used!')
else:
raise Exception('No scheduling option for input: {}'.format(opt.scheduler))
def same_model(model1,model2):
for p1, p2 in zip(model1.parameters(), model2.parameters()):
if p1.data.ne(p2.data).sum() > 0:
return False
return True
"""============================================================================"""
#################### TRAINER FUNCTION ############################
def train_one_epoch(train_dataloader, model, optimizer, criterion, opt, epoch):
"""
This function is called every epoch to perform training of the network over one full
(randomized) iteration of the dataset.
Args:
train_dataloader: torch.utils.data.DataLoader, returns (augmented) training data.
model: Network to train.
optimizer: Optimizer to use for training.
criterion: criterion to use during training.
opt: argparse.Namespace, Contains all relevant parameters.
epoch: int, Current epoch.
Returns:
Nothing!
"""
loss_collect = []
start = time.time()
data_iterator = tqdm(train_dataloader, desc='Epoch {} Training...'.format(epoch))
for i,(class_labels, input) in enumerate(data_iterator):
#Compute embeddings for input batch
features = model(input.to(opt.device))
#Compute loss.
loss = criterion(features)
#Ensure gradients are set to zero at beginning
optimizer.zero_grad()
#Compute gradient
loss.backward()
train_dataloader.dataset.classes_visited = []
if opt.grad_measure:
#If desired, save computed gradients.
grad_measure.include(model.model.last_linear)
#Update weights using comp. gradients.
optimizer.step()
#Store loss per iteration.
loss_collect.append(loss.item())
if i==len(train_dataloader)-1:
data_iterator.set_description('Epoch (Train) {0}: Mean Loss [{1:.4f}]'.format(epoch, np.mean(loss_collect)))
#Save metrics
LOG.log('train', LOG.metrics_to_log['train'], [epoch, np.round(time.time()-start,4), np.mean(loss_collect)])
writer.add_scalar('global/training_loss',np.mean(loss_collect),epoch)
if opt.grad_measure:
#Dump stored gradients to Pickle-File.
grad_measure.dump(epoch)
"""============================================================================"""
"""========================== MAIN TRAINING PART =============================="""
"""============================================================================"""
################### SCRIPT MAIN ##########################
print('\n-----\n')
# Each dataset requires slightly different dataloaders.
if opt.dataset == 'Inaturalist':
eval_params = {'dataloader': dataloaders['testing'], 'model': model, 'opt': opt, 'epoch': 0}
elif opt.dataset == 'vehicle_id':
eval_params = {
'dataloaders': [dataloaders['testing_set1'], dataloaders['testing_set2'], dataloaders['testing_set3']],
'model': model, 'opt': opt, 'epoch': 0}
# Compute Evaluation metrics, print them and store in LOG.
print('epochs -> '+str(opt.n_epochs))
import time
for epoch in range(opt.n_epochs):
### Print current learning rates for all parameters
if opt.scheduler!='none': print('Running with learning rates {}...'.format(' | '.join('{}'.format(x) for x in scheduler.get_lr())))
### Train one epoch
_ = model.train()
train_one_epoch(dataloaders['training'], model, optimizer, criterion, opt, epoch)
dataloaders['training'].dataset.reshuffle()
### Evaluate
_ = model.eval()
#Each dataset requires slightly different dataloaders.
if opt.dataset == 'Inaturalist':
eval_params = {'dataloader':dataloaders['testing'], 'model':model, 'opt':opt, 'epoch':epoch}
elif opt.dataset=='vehicle_id':
eval_params = {'dataloaders':[dataloaders['testing_set1'], dataloaders['testing_set2'], dataloaders['testing_set3']], 'model':model, 'opt':opt, 'epoch':epoch}
#Compute Evaluation metrics, print them and store in LOG.
if opt.infrequent_eval == 1:
epoch_freq = 10
else:
epoch_freq = 1
if not opt.dataset == 'vehicle_id':
if epoch%epoch_freq == 0:
results = eval.evaluate(opt.dataset, LOG, save=True, **eval_params)
writer.add_scalar('global/recall1',results[0][0],epoch+1)
writer.add_scalar('global/recall2',results[0][1],epoch+1)
writer.add_scalar('global/recall3',results[0][2],epoch+1)
writer.add_scalar('global/recall4',results[0][3],epoch+1)
writer.add_scalar('global/NMI',results[1],epoch+1)
writer.add_scalar('global/F1',results[2],epoch+1)
else:
results = eval.evaluate(opt.dataset, LOG, save=True, **eval_params)
writer.add_scalar('global/recall1',results[2],epoch+1)
writer.add_scalar('global/recall2',results[3],epoch+1)#writer.add_scalar('global/recall3',results[0][2],0)
writer.add_scalar('global/recall3',results[6],epoch+1)
writer.add_scalar('global/recall4',results[7],epoch+1)
writer.add_scalar('global/recall5',results[10],epoch+1)
writer.add_scalar('global/recall6',results[11],epoch+1)
#Update the Metric Plot and save it.
#LOG.update_info_plot()
#(optional) compute ratio of intra- to interdistances.
if opt.dist_measure:
distance_measure.measure(model, epoch)
# distance_measure_test.measure(model, epoch)
### Learning Rate Scheduling Step
if opt.scheduler != 'none':
scheduler.step()
print('\n-----\n')
|
[
"evaluate.DistanceMeasure",
"evaluate.evaluate",
"evaluate.GradientMeasure"
] |
[((399, 432), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (422, 432), False, 'import warnings\n'), ((576, 597), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (590, 597), False, 'import os, numpy as np, argparse, random, matplotlib, datetime\n'), ((935, 960), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (958, 960), False, 'import os, numpy as np, argparse, random, matplotlib, datetime\n'), ((5271, 5293), 'auxiliaries.args2exp_name', 'aux.args2exp_name', (['opt'], {}), '(opt)\n', (5288, 5293), True, 'import auxiliaries as aux\n'), ((5348, 5369), 'random.seed', 'random.seed', (['opt.seed'], {}), '(opt.seed)\n', (5359, 5369), False, 'import os, numpy as np, argparse, random, matplotlib, datetime\n'), ((5370, 5394), 'numpy.random.seed', 'np.random.seed', (['opt.seed'], {}), '(opt.seed)\n', (5384, 5394), True, 'import os, numpy as np, argparse, random, matplotlib, datetime\n'), ((5639, 5670), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['tensorboard_path'], {}), '(tensorboard_path)\n', (5652, 5670), False, 'from tensorboardX import SummaryWriter\n'), ((6099, 6124), 'netlib.networkselect', 'netlib.networkselect', (['opt'], {}), '(opt)\n', (6119, 6124), True, 'import netlib as netlib\n'), ((7312, 7351), 'datasets.give_dataloaders', 'data.give_dataloaders', (['opt.dataset', 'opt'], {}), '(opt.dataset, opt)\n', (7333, 7351), True, 'import datasets as data\n'), ((7877, 7924), 'auxiliaries.metrics_to_examine', 'aux.metrics_to_examine', (['opt.dataset', 'opt.k_vals'], {}), '(opt.dataset, opt.k_vals)\n', (7899, 7924), True, 'import auxiliaries as aux\n'), ((8323, 8383), 'auxiliaries.LOGGER', 'aux.LOGGER', (['opt', 'metrics_to_log'], {'name': '"""Base"""', 'start_new': '(True)'}), "(opt, metrics_to_log, name='Base', start_new=True)\n", (8333, 8383), True, 'import auxiliaries as aux\n'), ((8799, 8842), 'losses.loss_select', 'losses.loss_select', (['opt.loss', 'opt', 'to_optim'], {}), '(opt.loss, opt, to_optim)\n', (8817, 8842), True, 'import losses as losses\n'), ((5513, 5542), 'pathlib.Path', 'Path', (['f"""logs/logs_{exp_name}"""'], {}), "(f'logs/logs_{exp_name}')\n", (5517, 5542), False, 'from pathlib import Path\n'), ((9144, 9186), 'evaluate.GradientMeasure', 'eval.GradientMeasure', (['opt'], {'name': '"""baseline"""'}), "(opt, name='baseline')\n", (9164, 9186), True, 'import evaluate as eval\n'), ((9367, 9454), 'evaluate.DistanceMeasure', 'eval.DistanceMeasure', (["dataloaders['evaluation']", 'opt'], {'name': '"""Train"""', 'update_epochs': '(1)'}), "(dataloaders['evaluation'], opt, name='Train',\n update_epochs=1)\n", (9387, 9454), True, 'import evaluate as eval\n'), ((11556, 11567), 'time.time', 'time.time', ([], {}), '()\n', (11565, 11567), False, 'import time\n'), ((522, 548), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (538, 548), False, 'import os, numpy as np, argparse, random, matplotlib, datetime\n'), ((5205, 5228), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5226, 5228), False, 'import os, numpy as np, argparse, random, matplotlib, datetime\n'), ((12689, 12710), 'numpy.mean', 'np.mean', (['loss_collect'], {}), '(loss_collect)\n', (12696, 12710), True, 'import os, numpy as np, argparse, random, matplotlib, datetime\n'), ((15264, 15321), 'evaluate.evaluate', 'eval.evaluate', (['opt.dataset', 'LOG'], {'save': '(True)'}), '(opt.dataset, LOG, save=True, **eval_params)\n', (15277, 15321), True, 'import evaluate as eval\n'), ((4629, 4640), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4638, 4640), False, 'import os, numpy as np, argparse, random, matplotlib, datetime\n'), ((12620, 12641), 'numpy.mean', 'np.mean', (['loss_collect'], {}), '(loss_collect)\n', (12627, 12641), True, 'import os, numpy as np, argparse, random, matplotlib, datetime\n'), ((14772, 14829), 'evaluate.evaluate', 'eval.evaluate', (['opt.dataset', 'LOG'], {'save': '(True)'}), '(opt.dataset, LOG, save=True, **eval_params)\n', (14785, 14829), True, 'import evaluate as eval\n'), ((12488, 12509), 'numpy.mean', 'np.mean', (['loss_collect'], {}), '(loss_collect)\n', (12495, 12509), True, 'import os, numpy as np, argparse, random, matplotlib, datetime\n'), ((12598, 12609), 'time.time', 'time.time', ([], {}), '()\n', (12607, 12609), False, 'import time\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 3 14:33:25 2018
@author: iswariya
"""
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.models as models
from dataloader import SoccerDataset, get_dataloader
from evaluate import predict
from metric import plot_confusion_matrix
from model import UNet
from train import fit, train
from utils import load_sample_image, CyclicLearningRate, lr_finder_plot, plot_mask
if __name__ == '__main__':
cuda0 = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(cuda0)
print(torch.cuda.is_available())
PATH = '/opt/datasets/Soccer_dataset/Train_1/'
image_folder = 'JPEG_images'
mask_folder = 'Annotations'
BATCH_SIZE = 8
full_resolution = (640, 480)
downsampled_resolution = (320, 160)
soccer_dataset_low_resolution = SoccerDataset(PATH, image_folder,
mask_folder, downsampled_resolution,
transform=['Horizontal_Flip', 'Brightness_adjust'])
soccer_dataset_full_resolution = SoccerDataset(PATH, image_folder,
mask_folder, full_resolution,
transform=['Horizontal_Flip', 'Brightness_adjust'])
train_loader_low_resolution, val_loader_low_resolution = get_dataloader(soccer_dataset_low_resolution, BATCH_SIZE)
train_loader_full_resolution, val_loader_full_resolution = get_dataloader(soccer_dataset_full_resolution, BATCH_SIZE)
load_sample_image(train_loader_low_resolution)
resnet_layers = models.resnet18(pretrained=True)
for param in resnet_layers.parameters():
param.requires_grad = False
model = UNet(resnet_layers)
model = nn.DataParallel(model)
model = model.to(cuda0)
params = list(model.parameters())[61:]
learning_rate = 0.001
optimizer = optim.Adam(params, lr=learning_rate)
criterion = nn.CrossEntropyLoss().to(cuda0)
learning_rate = CyclicLearningRate(train_loader_low_resolution, 0.1, 1e-5)
learning_rate_array = learning_rate.get_learning_rate()
_, batch_loss, _ = fit(model, criterion, optimizer, parameters=params,
dataloader=train_loader_low_resolution,
phase='Training', lr_range_val=learning_rate_array,
lr_finder=True, device=cuda0)
lr_finder_plot(learning_rate_array, batch_loss)
model_checkpoint = train(train_loader_low_resolution,
train_loader_full_resolution,
val_loader_low_resolution,
val_loader_full_resolution,
model, criterion, params, cuda0)
model.load_state_dict(model_checkpoint)
torch.save(model.state_dict(), "Unet.th")
prediction, label, iou, cm = predict(model, val_loader_full_resolution,
criterion, device=cuda0,
batchsize=BATCH_SIZE)
cmap = np.array([[0, 0, 0],
[245, 130, 48],
[0, 130, 200],
[60, 180, 75]], dtype=np.uint8)
x = np.array(prediction[9], dtype=np.uint8)
plot_mask(prediction[9:15], label[9:15], cmap)
classes = ('Backround', 'Ball', 'Field Lines', 'Field')
plot_confusion_matrix(cm.T, classes)
|
[
"evaluate.predict"
] |
[((890, 1016), 'dataloader.SoccerDataset', 'SoccerDataset', (['PATH', 'image_folder', 'mask_folder', 'downsampled_resolution'], {'transform': "['Horizontal_Flip', 'Brightness_adjust']"}), "(PATH, image_folder, mask_folder, downsampled_resolution,\n transform=['Horizontal_Flip', 'Brightness_adjust'])\n", (903, 1016), False, 'from dataloader import SoccerDataset, get_dataloader\n'), ((1150, 1270), 'dataloader.SoccerDataset', 'SoccerDataset', (['PATH', 'image_folder', 'mask_folder', 'full_resolution'], {'transform': "['Horizontal_Flip', 'Brightness_adjust']"}), "(PATH, image_folder, mask_folder, full_resolution, transform=[\n 'Horizontal_Flip', 'Brightness_adjust'])\n", (1163, 1270), False, 'from dataloader import SoccerDataset, get_dataloader\n'), ((1430, 1487), 'dataloader.get_dataloader', 'get_dataloader', (['soccer_dataset_low_resolution', 'BATCH_SIZE'], {}), '(soccer_dataset_low_resolution, BATCH_SIZE)\n', (1444, 1487), False, 'from dataloader import SoccerDataset, get_dataloader\n'), ((1551, 1609), 'dataloader.get_dataloader', 'get_dataloader', (['soccer_dataset_full_resolution', 'BATCH_SIZE'], {}), '(soccer_dataset_full_resolution, BATCH_SIZE)\n', (1565, 1609), False, 'from dataloader import SoccerDataset, get_dataloader\n'), ((1615, 1661), 'utils.load_sample_image', 'load_sample_image', (['train_loader_low_resolution'], {}), '(train_loader_low_resolution)\n', (1632, 1661), False, 'from utils import load_sample_image, CyclicLearningRate, lr_finder_plot, plot_mask\n'), ((1683, 1715), 'torchvision.models.resnet18', 'models.resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1698, 1715), True, 'import torchvision.models as models\n'), ((1811, 1830), 'model.UNet', 'UNet', (['resnet_layers'], {}), '(resnet_layers)\n', (1815, 1830), False, 'from model import UNet\n'), ((1843, 1865), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (1858, 1865), True, 'import torch.nn as nn\n'), ((1981, 2017), 'torch.optim.Adam', 'optim.Adam', (['params'], {'lr': 'learning_rate'}), '(params, lr=learning_rate)\n', (1991, 2017), True, 'import torch.optim as optim\n'), ((2087, 2146), 'utils.CyclicLearningRate', 'CyclicLearningRate', (['train_loader_low_resolution', '(0.1)', '(1e-05)'], {}), '(train_loader_low_resolution, 0.1, 1e-05)\n', (2105, 2146), False, 'from utils import load_sample_image, CyclicLearningRate, lr_finder_plot, plot_mask\n'), ((2230, 2413), 'train.fit', 'fit', (['model', 'criterion', 'optimizer'], {'parameters': 'params', 'dataloader': 'train_loader_low_resolution', 'phase': '"""Training"""', 'lr_range_val': 'learning_rate_array', 'lr_finder': '(True)', 'device': 'cuda0'}), "(model, criterion, optimizer, parameters=params, dataloader=\n train_loader_low_resolution, phase='Training', lr_range_val=\n learning_rate_array, lr_finder=True, device=cuda0)\n", (2233, 2413), False, 'from train import fit, train\n'), ((2489, 2536), 'utils.lr_finder_plot', 'lr_finder_plot', (['learning_rate_array', 'batch_loss'], {}), '(learning_rate_array, batch_loss)\n', (2503, 2536), False, 'from utils import load_sample_image, CyclicLearningRate, lr_finder_plot, plot_mask\n'), ((2561, 2721), 'train.train', 'train', (['train_loader_low_resolution', 'train_loader_full_resolution', 'val_loader_low_resolution', 'val_loader_full_resolution', 'model', 'criterion', 'params', 'cuda0'], {}), '(train_loader_low_resolution, train_loader_full_resolution,\n val_loader_low_resolution, val_loader_full_resolution, model, criterion,\n params, cuda0)\n', (2566, 2721), False, 'from train import fit, train\n'), ((2954, 3047), 'evaluate.predict', 'predict', (['model', 'val_loader_full_resolution', 'criterion'], {'device': 'cuda0', 'batchsize': 'BATCH_SIZE'}), '(model, val_loader_full_resolution, criterion, device=cuda0,\n batchsize=BATCH_SIZE)\n', (2961, 3047), False, 'from evaluate import predict\n'), ((3137, 3225), 'numpy.array', 'np.array', (['[[0, 0, 0], [245, 130, 48], [0, 130, 200], [60, 180, 75]]'], {'dtype': 'np.uint8'}), '([[0, 0, 0], [245, 130, 48], [0, 130, 200], [60, 180, 75]], dtype=\n np.uint8)\n', (3145, 3225), True, 'import numpy as np\n'), ((3293, 3332), 'numpy.array', 'np.array', (['prediction[9]'], {'dtype': 'np.uint8'}), '(prediction[9], dtype=np.uint8)\n', (3301, 3332), True, 'import numpy as np\n'), ((3337, 3383), 'utils.plot_mask', 'plot_mask', (['prediction[9:15]', 'label[9:15]', 'cmap'], {}), '(prediction[9:15], label[9:15], cmap)\n', (3346, 3383), False, 'from utils import load_sample_image, CyclicLearningRate, lr_finder_plot, plot_mask\n'), ((3448, 3484), 'metric.plot_confusion_matrix', 'plot_confusion_matrix', (['cm.T', 'classes'], {}), '(cm.T, classes)\n', (3469, 3484), False, 'from metric import plot_confusion_matrix\n'), ((617, 642), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (640, 642), False, 'import torch\n'), ((552, 577), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (575, 577), False, 'import torch\n'), ((2034, 2055), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2053, 2055), True, 'import torch.nn as nn\n')]
|
from argparse import ArgumentParser
from pathlib import Path
import time
import os
import gin
import numpy as np
import torch
import torch.nn as nn
import torch.optim.lr_scheduler as lr_scheduler
from torch.nn import CrossEntropyLoss
from datasets import get_dataset
from models.gan import get_architecture
from torch.utils.data import DataLoader
from models.gan.base import LinearWrapper
from evaluate import AverageMeter
from evaluate.classifier import accuracy
from evaluate.classifier import test_classifier
from utils import init_logfile, fwrite
# import for gin binding
import augment
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def parse_args():
parser = ArgumentParser(description='Testing script: Linear evaluation')
parser.add_argument('model_path', type=str, help='Path to the (discriminator) model checkpoint')
parser.add_argument('architecture', type=str, help='Architecture')
parser.add_argument('--n_classes', type=int, default=10,
help='Number of classes (default: 10)')
parser.add_argument('--batch_size', default=256, type=int,
help='Batch size (default: 256)')
return parser.parse_args()
@gin.configurable("options")
def get_options_dict(dataset=gin.REQUIRED,
loss=gin.REQUIRED,
batch_size=64, fid_size=10000,
max_steps=200000, warmup=0, n_critic=1,
lr=2e-4, lr_d=None, beta=(.5, .999),
lbd=10., lbd2=10.):
if lr_d is None:
lr_d = lr
return {
"dataset": dataset,
"batch_size": batch_size,
"fid_size": fid_size,
"loss": loss,
"max_steps": max_steps, "warmup": warmup,
"n_critic": n_critic,
"lr": lr, "lr_d": lr_d, "beta": beta,
"lbd": lbd, "lbd2": lbd2
}
def train(epoch, loader, model, optimizer, criterion):
batch_time = AverageMeter()
data_time = AverageMeter()
train_loss = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.eval()
end = time.time()
for i, (inputs, targets) in enumerate(loader):
# measure data loading time
inputs, targets = inputs.to(device), targets.to(device)
data_time.update(time.time() - end)
batch_size = inputs.size(0)
with torch.no_grad():
_, aux = model(inputs, penultimate=True)
penultimate = aux['penultimate'].detach()
outputs = model.linear(penultimate)
loss = criterion(outputs, targets)
# measure accuracy and record loss
acc1, acc5 = accuracy(outputs, targets, topk=(1, 5))
train_loss.update(loss.item(), batch_size)
top1.update(acc1.item(), batch_size)
top5.update(acc5.item(), batch_size)
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 50 == 0:
print('Epoch {0}: [{1}/{2}]\t'
'Time {batch_time.average:.3f}\t'
'Data {data_time.average:.3f}\t'
'Loss {train_loss.average:.4f}\t'
'Acc@1 {top1.average:.3f}\t'
'Acc@5 {top5.average:.3f}'.format(
epoch, i, len(loader), batch_time=batch_time,
data_time=data_time, train_loss=train_loss, top1=top1, top5=top5))
return {
'loss': train_loss.average,
'time/batch': batch_time.average,
'acc@1': top1.average,
'acc@5': top5.average
}
if __name__ == '__main__':
P = parse_args()
logdir = Path(P.model_path).parent
gin_config = sorted(logdir.glob("*.gin"))[0]
gin.parse_config_files_and_bindings(['configs/defaults/gan.gin',
'configs/defaults/augment.gin',
gin_config], [])
options = get_options_dict()
if options['dataset'] in ['cifar10', 'cifar10_hflip']:
dataset = "cifar10_lin"
elif options['dataset'] in ['cifar100', 'cifar100_hflip']:
dataset = "cifar100_lin"
else:
raise NotImplementedError()
train_set, test_set, image_size = get_dataset(dataset=dataset)
pin_memory = ("imagenet" in options["dataset"])
train_loader = DataLoader(train_set, shuffle=True, batch_size=P.batch_size,
pin_memory=pin_memory)
test_loader = DataLoader(test_set, shuffle=False, batch_size=P.batch_size,
pin_memory=pin_memory)
_, model = get_architecture(P.architecture, image_size)
checkpoint = torch.load(P.model_path)
model.load_state_dict(checkpoint)
model.eval()
model.linear = LinearWrapper(model.d_penul, P.n_classes)
model.to(device)
optimizer = torch.optim.SGD(model.linear.parameters(), lr=0.1)
scheduler = lr_scheduler.MultiStepLR(optimizer, gamma=0.1, milestones=[60, 75, 90])
criterion = CrossEntropyLoss().to(device)
seed = np.random.randint(10000)
logfilename = os.path.join(logdir, f'lin_eval_{seed}.csv')
save_path = os.path.join(logdir, f'lin_eval_{seed}.pth.tar')
init_logfile(logfilename, "epoch,time,lr,train loss,train acc,test loss,test acc")
for epoch in range(100):
print("Epoch {}".format(epoch))
before = time.time()
train_out = train(epoch, train_loader, model, optimizer, criterion)
test_out = test_classifier(model, test_loader, ["loss", "error@1"])
after = time.time()
epoch_time = after - before
fwrite(logfilename, "{},{:.8},{:.4},{:.4},{:.4},{:.4},{:.4}".format(
epoch, epoch_time, scheduler.get_lr()[0],
train_out['loss'], train_out['acc@1'],
test_out['loss'], 100 - test_out['error@1']))
print(' * [Loss %.3f] [Err@1 %.3f]' % (test_out['loss'], test_out['error@1']))
# In PyTorch 1.1.0 and later, you should call `optimizer.step()` before `lr_scheduler.step()`.
# See more details at https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
scheduler.step()
torch.save({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
}, save_path)
|
[
"evaluate.classifier.test_classifier",
"evaluate.classifier.accuracy",
"evaluate.AverageMeter"
] |
[((1217, 1244), 'gin.configurable', 'gin.configurable', (['"""options"""'], {}), "('options')\n", (1233, 1244), False, 'import gin\n'), ((699, 762), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Testing script: Linear evaluation"""'}), "(description='Testing script: Linear evaluation')\n", (713, 762), False, 'from argparse import ArgumentParser\n'), ((1945, 1959), 'evaluate.AverageMeter', 'AverageMeter', ([], {}), '()\n', (1957, 1959), False, 'from evaluate import AverageMeter\n'), ((1976, 1990), 'evaluate.AverageMeter', 'AverageMeter', ([], {}), '()\n', (1988, 1990), False, 'from evaluate import AverageMeter\n'), ((2008, 2022), 'evaluate.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2020, 2022), False, 'from evaluate import AverageMeter\n'), ((2034, 2048), 'evaluate.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2046, 2048), False, 'from evaluate import AverageMeter\n'), ((2060, 2074), 'evaluate.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2072, 2074), False, 'from evaluate import AverageMeter\n'), ((2130, 2141), 'time.time', 'time.time', ([], {}), '()\n', (2139, 2141), False, 'import time\n'), ((3874, 3991), 'gin.parse_config_files_and_bindings', 'gin.parse_config_files_and_bindings', (["['configs/defaults/gan.gin', 'configs/defaults/augment.gin', gin_config]", '[]'], {}), "(['configs/defaults/gan.gin',\n 'configs/defaults/augment.gin', gin_config], [])\n", (3909, 3991), False, 'import gin\n'), ((4376, 4404), 'datasets.get_dataset', 'get_dataset', ([], {'dataset': 'dataset'}), '(dataset=dataset)\n', (4387, 4404), False, 'from datasets import get_dataset\n'), ((4476, 4564), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'shuffle': '(True)', 'batch_size': 'P.batch_size', 'pin_memory': 'pin_memory'}), '(train_set, shuffle=True, batch_size=P.batch_size, pin_memory=\n pin_memory)\n', (4486, 4564), False, 'from torch.utils.data import DataLoader\n'), ((4608, 4696), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'shuffle': '(False)', 'batch_size': 'P.batch_size', 'pin_memory': 'pin_memory'}), '(test_set, shuffle=False, batch_size=P.batch_size, pin_memory=\n pin_memory)\n', (4618, 4696), False, 'from torch.utils.data import DataLoader\n'), ((4737, 4781), 'models.gan.get_architecture', 'get_architecture', (['P.architecture', 'image_size'], {}), '(P.architecture, image_size)\n', (4753, 4781), False, 'from models.gan import get_architecture\n'), ((4799, 4823), 'torch.load', 'torch.load', (['P.model_path'], {}), '(P.model_path)\n', (4809, 4823), False, 'import torch\n'), ((4899, 4940), 'models.gan.base.LinearWrapper', 'LinearWrapper', (['model.d_penul', 'P.n_classes'], {}), '(model.d_penul, P.n_classes)\n', (4912, 4940), False, 'from models.gan.base import LinearWrapper\n'), ((5046, 5117), 'torch.optim.lr_scheduler.MultiStepLR', 'lr_scheduler.MultiStepLR', (['optimizer'], {'gamma': '(0.1)', 'milestones': '[60, 75, 90]'}), '(optimizer, gamma=0.1, milestones=[60, 75, 90])\n', (5070, 5117), True, 'import torch.optim.lr_scheduler as lr_scheduler\n'), ((5176, 5200), 'numpy.random.randint', 'np.random.randint', (['(10000)'], {}), '(10000)\n', (5193, 5200), True, 'import numpy as np\n'), ((5219, 5263), 'os.path.join', 'os.path.join', (['logdir', 'f"""lin_eval_{seed}.csv"""'], {}), "(logdir, f'lin_eval_{seed}.csv')\n", (5231, 5263), False, 'import os\n'), ((5280, 5328), 'os.path.join', 'os.path.join', (['logdir', 'f"""lin_eval_{seed}.pth.tar"""'], {}), "(logdir, f'lin_eval_{seed}.pth.tar')\n", (5292, 5328), False, 'import os\n'), ((5333, 5419), 'utils.init_logfile', 'init_logfile', (['logfilename', '"""epoch,time,lr,train loss,train acc,test loss,test acc"""'], {}), "(logfilename,\n 'epoch,time,lr,train loss,train acc,test loss,test acc')\n", (5345, 5419), False, 'from utils import init_logfile, fwrite\n'), ((628, 653), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (651, 653), False, 'import torch\n'), ((2659, 2698), 'evaluate.classifier.accuracy', 'accuracy', (['outputs', 'targets'], {'topk': '(1, 5)'}), '(outputs, targets, topk=(1, 5))\n', (2667, 2698), False, 'from evaluate.classifier import accuracy\n'), ((3054, 3065), 'time.time', 'time.time', ([], {}), '()\n', (3063, 3065), False, 'import time\n'), ((3795, 3813), 'pathlib.Path', 'Path', (['P.model_path'], {}), '(P.model_path)\n', (3799, 3813), False, 'from pathlib import Path\n'), ((5504, 5515), 'time.time', 'time.time', ([], {}), '()\n', (5513, 5515), False, 'import time\n'), ((5611, 5667), 'evaluate.classifier.test_classifier', 'test_classifier', (['model', 'test_loader', "['loss', 'error@1']"], {}), "(model, test_loader, ['loss', 'error@1'])\n", (5626, 5667), False, 'from evaluate.classifier import test_classifier\n'), ((5684, 5695), 'time.time', 'time.time', ([], {}), '()\n', (5693, 5695), False, 'import time\n'), ((2387, 2402), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2400, 2402), False, 'import torch\n'), ((5134, 5152), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (5150, 5152), False, 'from torch.nn import CrossEntropyLoss\n'), ((2318, 2329), 'time.time', 'time.time', ([], {}), '()\n', (2327, 2329), False, 'import time\n'), ((3021, 3032), 'time.time', 'time.time', ([], {}), '()\n', (3030, 3032), False, 'import time\n')]
|
import os
from generate_libfm_data import generate_libfm_data, tfidf2str
from evaluate import scoring
if not os.path.exists('news_tfidf.pkl') or not os.path.exists('user_tfidf.pkl'):
os.system('python generate_tf_idf_feature_file.py')
if not os.path.exists('train.libfm') or not os.path.exists('dev.libfm') or not os.path.exists('test.libfm'):
generate_libfm_data()
if not os.path.exists('./dev/res/libfm'):
os.mkdir('./dev/res/libfm')
if not os.path.exists('./test'):
os.mkdir('./test')
if not os.path.exists('./test/ref'):
os.mkdir('./test/ref')
if not os.path.exists('./test/res'):
os.mkdir('./test/res')
if not os.path.exists('./test/res/libfm'):
os.mkdir('./test/res/libfm')
if not os.path.exists('./results'):
os.mkdir('./results')
if not os.path.exists('./results/libfm'):
os.mkdir('./results/libfm')
if not os.path.exists('./test/ref/truth.txt'):
with open('../../MIND/200000/test/behaviors.tsv', 'r', encoding='utf-8') as test_f:
with open('./test/ref/truth.txt', 'w', encoding='utf-8') as truth_f:
for test_ID, line in enumerate(test_f):
impression_ID, user_ID, time, history, impressions = line.split('\t')
labels = [int(impression[-1]) for impression in impressions.strip().split(' ')]
truth_f.write(('' if test_ID == 0 else '\n') + str(test_ID + 1) + ' ' + str(labels).replace(' ', ''))
def get_run_index():
max_index = 0
for result_file in os.listdir('./results/libfm'):
if result_file.strip()[0] == '#' and result_file.strip()[-5:] == '-test':
index = int(result_file.strip()[1:-5])
max_index = max(index, max_index)
with open('./results/libfm/#' + str(max_index + 1) + '-test', 'w', encoding='utf-8') as result_f:
pass
return max_index + 1
def write_result_file(probs, libfm_result_file):
k = 0
with open('../../MIND/200000/test/behaviors.tsv', 'r', encoding='utf-8') as behaviors_f:
with open(libfm_result_file, 'w', encoding='utf-8') as f:
for i, line in enumerate(behaviors_f):
impression_ID, user_ID, time, history, impressions = line.split('\t')
num = len(impressions.strip().split(' '))
scores = []
for j in range(num):
scores.append([probs[k], j])
k += 1
scores.sort(key=lambda x: x[0], reverse=True)
result = [0 for _ in range(num)]
for j in range(num):
result[scores[j][1]] = j + 1
f.write(('' if i == 0 else '\n') + str(i + 1) + ' ' + str(result).replace(' ', ''))
assert len(probs) == k, str(len(probs)) + ' - ' + str(k)
if __name__ == '__main__':
run_index = get_run_index()
os.mkdir('./test/res/libfm/%d' % run_index)
print('Running : libfm\t#' + str(run_index))
os.system('./libfm/bin/libFM -task r -train train.libfm -test test.libfm -out ./test/res/libfm/%d/libfm' % run_index)
probs = []
with open('./test/res/libfm/%d/libfm' % run_index, 'r', encoding='utf-8') as f:
for line in f:
if len(line.strip()) > 0:
probs.append(float(line.strip()))
write_result_file(probs, './test/res/libfm/%d/libfm.txt' % run_index)
with open('./test/ref/truth.txt', 'r', encoding='utf-8') as truth_f, open('./test/res/libfm/%d/libfm.txt' % run_index, 'r', encoding='utf-8') as res_f:
auc, mrr, ndcg, ndcg10 = scoring(truth_f, res_f)
print('AUC =', auc)
print('MRR =', mrr)
print('nDCG@5 =', ndcg)
print('nDCG@10 =', ndcg10)
with open('./results/libfm/#%d-test' % run_index, 'w', encoding='utf-8') as f:
f.write('#' + str(run_index) + '\t' + str(auc) + '\t' + str(mrr) + '\t' + str(ndcg) + '\t' + str(ndcg10) + '\n')
|
[
"evaluate.scoring"
] |
[((189, 240), 'os.system', 'os.system', (['"""python generate_tf_idf_feature_file.py"""'], {}), "('python generate_tf_idf_feature_file.py')\n", (198, 240), False, 'import os\n'), ((354, 375), 'generate_libfm_data.generate_libfm_data', 'generate_libfm_data', ([], {}), '()\n', (373, 375), False, 'from generate_libfm_data import generate_libfm_data, tfidf2str\n'), ((383, 416), 'os.path.exists', 'os.path.exists', (['"""./dev/res/libfm"""'], {}), "('./dev/res/libfm')\n", (397, 416), False, 'import os\n'), ((422, 449), 'os.mkdir', 'os.mkdir', (['"""./dev/res/libfm"""'], {}), "('./dev/res/libfm')\n", (430, 449), False, 'import os\n'), ((457, 481), 'os.path.exists', 'os.path.exists', (['"""./test"""'], {}), "('./test')\n", (471, 481), False, 'import os\n'), ((487, 505), 'os.mkdir', 'os.mkdir', (['"""./test"""'], {}), "('./test')\n", (495, 505), False, 'import os\n'), ((513, 541), 'os.path.exists', 'os.path.exists', (['"""./test/ref"""'], {}), "('./test/ref')\n", (527, 541), False, 'import os\n'), ((547, 569), 'os.mkdir', 'os.mkdir', (['"""./test/ref"""'], {}), "('./test/ref')\n", (555, 569), False, 'import os\n'), ((577, 605), 'os.path.exists', 'os.path.exists', (['"""./test/res"""'], {}), "('./test/res')\n", (591, 605), False, 'import os\n'), ((611, 633), 'os.mkdir', 'os.mkdir', (['"""./test/res"""'], {}), "('./test/res')\n", (619, 633), False, 'import os\n'), ((641, 675), 'os.path.exists', 'os.path.exists', (['"""./test/res/libfm"""'], {}), "('./test/res/libfm')\n", (655, 675), False, 'import os\n'), ((681, 709), 'os.mkdir', 'os.mkdir', (['"""./test/res/libfm"""'], {}), "('./test/res/libfm')\n", (689, 709), False, 'import os\n'), ((717, 744), 'os.path.exists', 'os.path.exists', (['"""./results"""'], {}), "('./results')\n", (731, 744), False, 'import os\n'), ((750, 771), 'os.mkdir', 'os.mkdir', (['"""./results"""'], {}), "('./results')\n", (758, 771), False, 'import os\n'), ((779, 812), 'os.path.exists', 'os.path.exists', (['"""./results/libfm"""'], {}), "('./results/libfm')\n", (793, 812), False, 'import os\n'), ((818, 845), 'os.mkdir', 'os.mkdir', (['"""./results/libfm"""'], {}), "('./results/libfm')\n", (826, 845), False, 'import os\n'), ((853, 891), 'os.path.exists', 'os.path.exists', (['"""./test/ref/truth.txt"""'], {}), "('./test/ref/truth.txt')\n", (867, 891), False, 'import os\n'), ((1474, 1503), 'os.listdir', 'os.listdir', (['"""./results/libfm"""'], {}), "('./results/libfm')\n", (1484, 1503), False, 'import os\n'), ((2802, 2845), 'os.mkdir', 'os.mkdir', (["('./test/res/libfm/%d' % run_index)"], {}), "('./test/res/libfm/%d' % run_index)\n", (2810, 2845), False, 'import os\n'), ((2899, 3026), 'os.system', 'os.system', (["('./libfm/bin/libFM -task r -train train.libfm -test test.libfm -out ./test/res/libfm/%d/libfm'\n % run_index)"], {}), "(\n './libfm/bin/libFM -task r -train train.libfm -test test.libfm -out ./test/res/libfm/%d/libfm'\n % run_index)\n", (2908, 3026), False, 'import os\n'), ((111, 143), 'os.path.exists', 'os.path.exists', (['"""news_tfidf.pkl"""'], {}), "('news_tfidf.pkl')\n", (125, 143), False, 'import os\n'), ((151, 183), 'os.path.exists', 'os.path.exists', (['"""user_tfidf.pkl"""'], {}), "('user_tfidf.pkl')\n", (165, 183), False, 'import os\n'), ((248, 277), 'os.path.exists', 'os.path.exists', (['"""train.libfm"""'], {}), "('train.libfm')\n", (262, 277), False, 'import os\n'), ((285, 312), 'os.path.exists', 'os.path.exists', (['"""dev.libfm"""'], {}), "('dev.libfm')\n", (299, 312), False, 'import os\n'), ((320, 348), 'os.path.exists', 'os.path.exists', (['"""test.libfm"""'], {}), "('test.libfm')\n", (334, 348), False, 'import os\n'), ((3490, 3513), 'evaluate.scoring', 'scoring', (['truth_f', 'res_f'], {}), '(truth_f, res_f)\n', (3497, 3513), False, 'from evaluate import scoring\n')]
|
'''
Usage:
quote_detection.py <model-name> -t <corpus-type> -c <corpus-path> -e <embedding-path>
Options:
-t Corpus type (either parc, rwg, or stop)
-c Path to corpus
-e Path to embeddings file
'''
import random
from docopt import docopt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.parameter import Parameter
import numpy as np
from evaluate import evaluate, report
from progressify import progressify
class LSTMSeq2Seq(nn.Module):
def __init__(self, embedding_dim, hidden_dim, n_labels, layers=2, bidirectional=True):
super().__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.n_labels = n_labels
self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=layers, bidirectional=bidirectional, dropout=0.5, batch_first=True)
self.linear = nn.Linear(hidden_dim * 2 if bidirectional else hidden_dim, n_labels)
def forward(self, x):
x = self.lstm(x)[0]
if isinstance(x, nn.utils.rnn.PackedSequence):
data = self.linear(x.data)
return nn.utils.rnn.PackedSequence(data, x.batch_sizes)
else:
x = self.linear(x)
return x
class SelfAttentionLayer(nn.Module):
def __init__(self, d_x, d_k, d_v):
super().__init__()
self.d_x = d_x
self.d_k = d_k
self.d_v = d_v
self.w_q = nn.Linear(d_x, d_k)
self.w_k = nn.Linear(d_x, d_k)
self.w_v = nn.Linear(d_x, d_v)
def forward(self, x):
# x: float[batch, sequence_length, d_x]
Q = self.w_q(x)
# Q: float[batch, sequence_length, d_k]
K = self.w_k(x)
# K: float[batch, sequence_length, d_k]
V = self.w_v(x)
# V: float[batch, sequence_length, d_v]
logits = torch.bmm(K, V.permute(0, 2, 1)) / np.sqrt(self.d_k)
# logits float[batch, sequence_length, sequence_length]
return torch.bmm(torch.softmax(logits, dim=-1), V)
# return float[batch, sequence_length, d_v]
class MultiHeadedAttentionLayer(nn.Module):
def __init__(self, d_x, n_heads):
super().__init__()
assert d_x % n_heads == 0
self.n_heads = n_heads
self.heads = [
SelfAttentionLayer(d_x, d_x // n_heads, d_x // n_heads)
for _ in range(n_heads)
]
def forward(self, x):
# x: float[batch, sequence_length, d_x]
return torch.cat([
head(x) for head in self.heads
], dim=-1)
class TransformerLayer(nn.Module):
def __init__(self, d_x, n_heads, activation=F.relu):
super().__init__()
self.d_x = d_x
self.n_heads = n_heads
self.activation = activation
self.attention = MultiHeadedAttentionLayer(d_x, n_heads)
self.linear = nn.Linear(d_x, d_x)
self.ln1 = nn.LayerNorm([d_x])
self.ln2 = nn.LayerNorm([d_x])
self.dropout = nn.Dropout(p=0.1)
def forward(self, x):
# x: float[batch, sequence, d_x]
x_attn = self.dropout(self.attention(x))
# x_attn: float[batch, sequence, d_x]
x = self.ln1(x + x_attn)
x_ff = self.dropout(self.activation(self.linear(x)))
x = self.ln2(x + x_ff)
return x
class TransformerSeq2Seq(nn.Module):
def __init__(self, d_x, n_layers, n_heads, d_out, pos_encode=False):
super().__init__()
self.d_x = d_x
self.d_out = d_out
self.n_heads = n_heads
self.pos_encode = pos_encode
self.layers = [TransformerLayer(d_x, n_heads) for _ in range(n_layers)]
self.out = nn.Linear(d_x, d_out)
self.dropout = nn.Dropout(p=0.1)
def _pos_encode(self, x):
# x: float[batch, sequence_length, n_dims]
batch, sequence_length, n_dims = x.shape
positions = torch.arange(sequence_length, dtype=torch.float)
# positions: float[sequence_length]
frequencies = 10000 ** (-torch.arange(n_dims/2, dtype=torch.float) / (n_dims/2))
# frequencies: float[n_dims / 2]
coss = torch.cos(torch.ger(positions, frequencies))
sins = torch.sin(torch.ger(positions, frequencies))
pes = torch.cat([coss, sins], -1)
# pes: float[sequence_length, n_dims]
return self.dropout(x + pes)
def forward(self, x):
was_packed = False
if isinstance(x, nn.utils.rnn.PackedSequence):
was_packed = True
x, lengths = nn.utils.rnn.pad_packed_sequence(x, batch_first=True)
# x float:[batch, sequence_length, d_x]
if self.pos_encode:
x = self._pos_encode(x)
for layer in self.layers:
x = layer(x)
x = self.out(x)
if was_packed:
x = nn.utils.rnn.pack_padded_sequence(x, lengths, batch_first=True)
return x
class ComposedSeq2Seq(nn.Module):
def __init__(self, dim, lstm_layers=2, transformer_layers=6):
super().__init__()
self.lstm = LSTMSeq2Seq(dim, dim, dim, layers=lstm_layers)
self.transformer = TransformerSeq2Seq(dim, transformer_layers, 8, dim)
def forward(self, x):
return self.lstm(self.transformer(x))
class IdentityLayer(nn.Module):
def forward(self, x):
return x
class LSTMCRF(nn.Module):
# only supports packedsequences of length 1...
def __init__(self, dim, n_tags, lstm_layers=2):
super().__init__()
self.lstm = LSTMSeq2Seq(dim, dim, dim, layers=lstm_layers)
self.crf = CRFLayer(dim, n_tags)
# hack so we can print these nicely
self.transitions = self.crf.transitions
def forward(self, x):
x = self.lstm(x)
if isinstance(x, nn.utils.rnn.PackedSequence):
return self.crf(x.data)
else:
return self.crf(x)
def neg_log_likelihood(self, x, tags):
x = self.lstm(x)
if isinstance(x, nn.utils.rnn.PackedSequence):
return self.crf.neg_log_likelihood(x.data, tags.data)
else:
return self.crf.neg_log_likelihood(x, tags)
class QuoteDetectionModel(nn.Module):
def __init__(
self,
n_features,
dim=300,
lam=1e-4,
encoder=None,
use_crf=False,
transformer=False,
sample_steps=1,
label_scheme='BE',
viterbi=False,
pipeline=True,
):
super().__init__()
self.n_features = n_features
self.dim = dim
self.lam = lam
self.use_crf = use_crf
self.sample_steps = sample_steps
self.label_scheme = label_scheme
self.viterbi = viterbi
self.pipeline = pipeline
self.embedding = nn.EmbeddingBag(n_features, dim, mode='sum')
self.roles = list(roles)
if encoder is None:
self.encoder = lambda x: x
elif encoder == 'transformer':
self.encoder = TransformerSeq2Seq(dim, 6, 10, dim, pos_encode=True)
if use_crf:
self.seq2seqs = nn.ModuleList()
self.crf_encoders = nn.ModuleList()
self.crfs = nn.ModuleList()
for role in self.roles:
if transformer:
self.seq2seqs.append(TransformerSeq2Seq(dim, 6, 10, dim, pos_encode=True))
else:
self.seq2seqs.append(LSTMSeq2Seq(dim, dim, dim, layers=2, bidirectional=True))
#self.crfs.append(CRFLayer(dim, 3))
self.crf_encoders.append(nn.Linear(dim, 3))
self.crfs.append(ConditionalRandomField(3))
else:
self.seq2seqs = nn.ModuleList()
self.outputs = nn.ModuleList()
for role in self.roles:
if transformer:
self.seq2seqs.append(TransformerSeq2Seq(dim, 6, 10, dim, pos_encode=True))
else:
self.seq2seqs.append(LSTMSeq2Seq(dim, dim, dim, layers=2, bidirectional=True))
self.outputs.append(nn.Linear(dim, 3))
self.tag_embeddings = Parameter(torch.Tensor(3, 3, dim))
nn.init.normal_(self.tag_embeddings)
def forward(self, x):
# x: long[batch_size, sequence_length, bag_size]
nn.init.zeros_(self.embedding.weight[0])
batch_size, sequence_length, bag_size = x.shape
embedded = self.embedding(x.view(-1, bag_size)).view(batch_size, sequence_length, self.dim)
# embedded: float[batch_size, sequence_length, self.dim]
embedded = self.encoder(embedded)
prediction_embeddings = torch.zeros_like(embedded)
if self.use_crf:
for step in range(self.sample_steps):
paths = []
for i, (seq2seq, crf_encoder, crf) in enumerate(zip(self.seq2seqs, self.crf_encoders, self.crfs)):
res = seq2seq(embedded + prediction_embeddings)
# res: float[batch_size, sequence_length, self.dim]
sequence_length = res.shape[1]
path, score = crf.viterbi_tags(crf_encoder(res).cpu(), torch.tensor([sequence_length], dtype=torch.int64).cpu())[0]
path = torch.tensor([path])
if self.pipeline:
prediction_embeddings = prediction_embeddings + F.embedding(path, self.tag_embeddings[i])
paths.append(path)
return torch.stack(paths, dim=1)
# return float:[batch_size, len(self.roles), sequence_length]
else:
for step in range(self.sample_steps):
paths = []
for i, (seq2seq, output) in enumerate(zip(self.seq2seqs, self.outputs)):
res = seq2seq(embedded + prediction_embeddings)
# res: float[batch_size, sequence_length, self.dim]
logits = output(res)
# logits: float[batch_size, sequence_length, 3]
if self.viterbi:
#breakpoint()
probs = torch.softmax(logits, dim=-1)
path = self.viterbi_sequence(probs)
else:
path = torch.argmax(logits, dim=-1)
if self.pipeline:
prediction_embeddings = prediction_embeddings + F.embedding(path, self.tag_embeddings[i])
paths.append(path)
return torch.stack(paths, dim=1)
# return float:[batch_size, len(self.roles), sequence_length]
def neg_log_likelihood(self, x, tags):
# x: long[batch_size, sequence_length, bag_size]
# tags: int[batch_size, len(self.roles), sequence_length]
nn.init.zeros_(self.embedding.weight[0])
batch_size, sequence_length, bag_size = x.shape
embedded = self.embedding(x.view(-1, bag_size)).view(batch_size, sequence_length, self.dim)
# embedded: float[batch_size, sequence_length, self.dim]
embedded = self.encoder(embedded)
nll = 0
prediction_embeddings = torch.zeros_like(embedded)
if self.use_crf:
for step in range(self.sample_steps):
for i, (seq2seq, crf_encoder, crf) in enumerate(zip(self.seq2seqs, self.crf_encoders, self.crfs)):
role_tags = tags[:,i,:]
res = seq2seq(embedded + prediction_embeddings)
nll -= torch.sum(crf(crf_encoder(res), role_tags))
if self.pipeline:
score, path = crf(res)
prediction_embeddings = prediction_embeddings + F.embedding(path, self.tag_embeddings[i])
return nll
else:
for step in range(self.sample_steps):
for i, (seq2seq, output) in enumerate(zip(self.seq2seqs, self.outputs)):
role_tags = tags[:,i,:]
# role_tags: long[batch_size, sequence_length]
res = seq2seq(embedded + prediction_embeddings)
# res: float[batch_size, sequence_length, self.dim]
logits = output(res)
# logits: float[batch_size, sequence_length, 3]
nll += F.cross_entropy(logits.view(-1, 3), role_tags.view(-1), reduction='sum')
path = torch.argmax(logits, dim=-1)
if self.pipeline:
if self.viterbi:
# It is too expensive to do the viterbi step during training I think
# instead, let's use gold-standard labels
# error propogation yada yada
prediction_embeddings = prediction_embeddings + F.embedding(role_tags, self.tag_embeddings[i])
else:
prediction_embeddings = prediction_embeddings + F.embedding(path, self.tag_embeddings[i])
return nll
def viterbi_sequence(self, probs):
if self.label_scheme == "BE":
#naive = [" BE"[m] for m in torch.argmax(probs, -1)]
paths = torch.zeros(probs.shape[:-1], dtype=torch.long)
inf = float('inf')
# OIB probabilities
for i, pi in enumerate(probs):
state_lps = [(0, -inf, -inf)]
for lp_, lpb, lpe in torch.log(pi):
state_lpb = max(state_lps[-1]) + lpb
state_lpi = max(state_lps[-1][1], state_lps[-1][2]) + lp_
state_lpo = max(
state_lps[-1][0] + lp_,
state_lps[-1][1] + lpe,
state_lps[-1][2] + lpe
)
state_lps.append((state_lpo, state_lpi, state_lpb))
#construct the most likely BIO sequence from back to front
bios = []
outside = True
for (lpo, lpi, lpb) in reversed(state_lps[1:]):
if outside:
if lpo > lpi and lpo > lpb:
bios.append('O')
elif lpb > lpo and lpb > lpi:
bios.append('B')
else:
bios.append('I')
outside = False
else:
if lpb > lpi:
bios.append('B')
outside = True
else:
bios.append('I')
bios.reverse()
outside = True
for j, bio in enumerate(bios):
if bio == 'B':
#final_labels.append('B')
paths[i,j] = 1
outside = False
elif bio == 'I':
#final_labels.append(' ')
paths[i,j] = 0
assert not outside
elif bio == 'O':
if not outside:
#final_labels.append('E')
paths[i,j] = 2
else:
#final_labels.append(' ')
paths[i,j] = 0
outside = True
return paths
def jointshuffle(l1, l2):
print("shuffling...")
zipped = list(zip(l1, l2))
random.shuffle(zipped)
c, d = zip(*zipped)
print("done shuffling")
return list(c), list(d)
def batchify(feats, labels, batch_size):
feats, labels = jointshuffle(feats, labels)
feats.sort(key=len)
labels.sort(key=lambda lab:lab.shape[1])
def encode_feats(*corpora):
seen_features = {None: 0}
encoded_corpora = []
for corpus in corpora:
encoded_corpus = []
for document in corpus:
bag_size = max([len(token) for token in document])
document_tensor = []
for token in document:
bag = []
for feature in token:
if feature not in seen_features:
seen_features[feature] = len(seen_features)
bag.append(seen_features[feature])
while len(bag) < bag_size:
bag.append(0)
document_tensor.append(bag)
document_tensor = torch.tensor(document_tensor, dtype=torch.long)
encoded_corpus.append(document_tensor)
encoded_corpora.append(encoded_corpus)
return encoded_corpora, seen_features
def encode_labels(*corpora, scheme='BE'):
if scheme == 'BE':
tags = ' BE'
elif scheme == 'BIO':
tags = 'OIB'
encoded_corpora = []
for corpus in corpora:
encoded_corpus = []
for document in corpus:
document_tensor = []
for role in roles:
role_tensor = []
for token in document:
role_tensor.append(tags.index(token[role]))
document_tensor.append(role_tensor)
document_tensor = torch.tensor(document_tensor, dtype=torch.long)
encoded_corpus.append(document_tensor)
encoded_corpora.append(encoded_corpus)
return encoded_corpora
def inject_pretrained_embeddings(model, embedding_path, feat_indices):
print("loading pre-trained embeddings...")
with open(embedding_path) as f:
for line in f:
line = line.split(' ')
word = line[0]
if ('word', word) in feat_indices:
index = feat_indices[('word', word)]
v = torch.Tensor([float(li) for li in line[1:]])
model.embedding.weight.data[index] = v
if ('lemma', word) in feat_indices:
index = feat_indices[('lemma', word)]
v = torch.Tensor([float(li) for li in reversed(line[1:])])
model.embedding.weight.data[index] = v
print("done!")
def eval(loss_func, feats, labels):
with torch.no_grad():
loss = 0
for fi, li in zip(feats, progressify(labels, "Evaluating datum %%i / %d" % len(labels))):
loss += loss_func(fi, li)
return loss / len(feats)
def train(loss_func, optimizer, feats, labels, lamb=1e-4):
feats, labels = jointshuffle(feats, labels)
mean_loss = None
def progressify_str(i, _):
s = "training datum %d / %d." % (i, len(labels))
if i > 0:
s += " Mean training loss: %f" % mean_loss
return s
for fi, li in zip(feats, progressify(labels, progressify_str)):
optimizer.zero_grad()
f = fi.unsqueeze(0)
loss = loss_func(f, li.unsqueeze(0))
if mean_loss is None:
mean_loss = loss
else:
mean_loss = .995 * mean_loss + .005 * loss
# l2 regularization
for param_group in optimizer.param_groups:
for param in param_group['params']:
loss += lamb * torch.sum(param ** 2)
loss.backward()
optimizer.step()
def predict(forward_func, feats):
predictions = []
with torch.no_grad():
for datum in feats:
batch = datum.unsqueeze(0)
#batch_feats = torch.stack(feats[i:i+predict_batch_size])
batch_predictions = forward_func(batch)
for pi in batch_predictions:
predictions.append(pi)
return predictions
def train_loop(model, optimizer, train_feats, train_labels, dev_feats, dev_labels, gamma=0.75, callback=None):
loss_func = model.neg_log_likelihood
#breakpoint()
running_average = -1
epoch = 0
while True:
print("Epoch %d" % epoch)
model.eval()
dev_score = callback()
#print("Dev loss: %f" % dev_loss)
running_average = gamma * running_average + (1-gamma) * dev_score
print("Running average: %f" % running_average)
if dev_score < running_average:
break
model.train()
train(loss_func, optimizer, train_feats, train_labels)
epoch += 1
def get_ev(model, feats, raw_labels, eval_mode='exact'):
print("Predicting spans...")
predicted = predict(model, feats)
predicted_processed = []
for doc in predicted:
doc_processed = []
for i in range(len(doc[0])):
token_processed = {}
for r, role in enumerate(roles):
if scheme == 'BE':
token_processed[role] = ' BE '[doc[r][i]]
elif scheme == 'BIO':
token_processed[role] = 'OIBOO'[doc[r][i]]
doc_processed.append(token_processed)
predicted_processed.append(doc_processed)
# BUG HERE!! should say scheme=scheme
return evaluate(predicted_processed, raw_labels, roles=roles, mode=eval_mode)
def run_model(
raw_train_feats, raw_train_labels, raw_dev_feats, raw_dev_labels, raw_test_feats, raw_test_label
):
(train_feats, dev_feats, test_feats), feat_indices = encode_feats(raw_train_feats, raw_dev_feats, raw_test_feats)
train_labels, dev_labels, test_labels = encode_labels(raw_train_labels, raw_dev_labels, raw_test_labels)
n_feats = len(feat_indices)
model = QuoteDetectionModel(n_feats, use_crf=use_crf, sample_steps=1, label_scheme=scheme, viterbi=False, transformer=False, pipeline=False)
if embedding_path is not None:
inject_pretrained_embeddings(model, embedding_path, feat_indices)
optimizer = optim.Adam(model.parameters())
best_f1 = -1
def training_callback():
nonlocal best_f1
if check_presence:
ev = get_ev(model, dev_feats, raw_dev_labels, eval_mode='presence')
else:
ev = get_ev(model, dev_feats, raw_dev_labels)
print(report(ev, roles=roles))
f1 = 0
if 'content' in ev:
tp = ev['content']['tp']
fp = ev['content']['fp']
fn = ev['content']['fn']
else:
tp = 0
fp = 0
fn = 0
for role in ev:
tp += ev[role]['tp']
fp += ev[role]['fp']
fn += ev[role]['fn']
if tp != 0:
p = tp / (tp + fp)
r = tp / (tp + fn)
f1 = 2 / (1/p + 1/r)
if f1 > best_f1:
print('Best model so far! Saving...')
best_f1 = f1
torch.save(model.state_dict(), model_path)
return f1
train_loop(model, optimizer, train_feats, train_labels, dev_feats, dev_labels, callback=training_callback)
print("Loading best model...")
model.load_state_dict(torch.load(model_path))
print("Evaluating on test-set...")
ev = get_ev(model, test_feats, raw_test_labels, eval_mode='exact')
print(report(ev, roles=roles))
if check_presence:
ev_presence = get_ev(model, test_feats, raw_test_labels, eval_mode='presence')
print('presence/absence:')
print(report(ev_presence, roles=roles))
return ev, ev_presence
else:
return ev
if __name__ == '__main__':
arguments = docopt(__doc__)
model_name = arguments['<model-name>']
corpus_type = arguments['<corpus-type>']
corpus_path = arguments['<corpus-path>']
embedding_path = arguments['<embedding-path>']
assert corpus_type in {'parc', 'stop', 'rwg'}
xvalidate = (corpus_type in {'stop', 'rwg'})
check_presence = (corpus_type == 'rwg')
if corpus_type in {'rwg', 'stop'}:
roles = [
'direct',
'indirect',
'free_indirect',
'reported'
]
elif corpus_type == 'parc':
roles = ['content']
cuda = True
if cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if corpus_type == 'rwg':
from rwg2feat import corpus_feats_and_labels
elif corpus_type == 'stop':
from stop2feat import corpus_feats_and_labels
elif corpus_type == 'parc':
from parc2feat import corpus_feats_and_labels
import sys
import os
use_crf = False
scheme = 'BE'
model_path = model_name + '.pkl'
if xvalidate:
raw_feats, raw_labels = corpus_feats_and_labels(corpus_path, label_scheme=scheme)
i_dev = len(raw_feats) // 10
i_train = 2 * i_dev
raw_feats, raw_labels = jointshuffle(raw_feats, raw_labels)
for step in range(10):
print("Cross-validation step %d" % step)
raw_test_feats = raw_feats[:i_dev]
raw_dev_feats = raw_feats[i_dev:i_train]
raw_train_feats = raw_feats[i_train:]
raw_test_labels = raw_labels[:i_dev]
raw_dev_labels = raw_labels[i_dev:i_train]
raw_train_labels = raw_labels[i_train:]
run_model(
raw_train_feats, raw_train_labels, raw_dev_feats, raw_dev_labels, raw_test_feats, raw_test_labels,
)
# cycle
raw_feats = raw_feats[i_dev:] + raw_feats[:i_dev]
raw_labels = raw_labels[i_dev:] + raw_labels[:i_dev]
else:
print('loading training data')
raw_train_feats, raw_train_labels = corpus_feats_and_labels(os.path.join(corpus_path, 'train'), label_scheme=scheme)
print('loading dev data')
raw_dev_feats, raw_dev_labels = corpus_feats_and_labels(os.path.join(corpus_path, 'dev'), label_scheme=scheme)
print('loading test data')
raw_test_feats, raw_test_labels = corpus_feats_and_labels(os.path.join(corpus_path, 'test'), label_scheme=scheme)
(train_feats, dev_feats, test_feats), feat_indices = encode_feats(raw_train_feats, raw_dev_feats, raw_test_feats)
n_feats = len(feat_indices)
train_labels, dev_labels, test_labels = encode_labels(raw_train_labels, raw_dev_labels, raw_test_labels, scheme=scheme)
model = QuoteDetectionModel(n_feats, use_crf=use_crf, sample_steps=1, label_scheme=scheme, viterbi=False, transformer=False, pipeline=False)
if embedding_path is not None:
inject_pretrained_embeddings(model, embedding_path, feat_indices)
optimizer = optim.Adam(model.parameters())
best_f1 = -1
def training_callback():
global best_f1
if check_presence:
ev = get_ev(model, dev_feats, raw_dev_labels, eval_mode='presence')
else:
ev = get_ev(model, dev_feats, raw_dev_labels)
print(report(ev, roles=roles))
f1 = 0
if 'content' in ev:
tp = ev['content']['tp']
fp = ev['content']['fp']
fn = ev['content']['fn']
else:
tp = 0
fp = 0
fn = 0
for role in ev:
tp += ev[role]['tp']
fp += ev[role]['fp']
fn += ev[role]['fn']
if tp != 0:
p = tp / (tp + fp)
r = tp / (tp + fn)
f1 = 2 / (1/p + 1/r)
if f1 > best_f1:
print('Best model so far! Saving...')
best_f1 = f1
torch.save(model.state_dict(), model_path)
return f1
train_loop(model, optimizer, train_feats, train_labels, dev_feats, dev_labels, callback=training_callback)
print("Loading best model...")
model.load_state_dict(torch.load(model_path))
print("Evaluating on test-set...")
ev = get_ev(model, test_feats, raw_test_labels)
print(report(ev, roles=roles))
|
[
"evaluate.report",
"evaluate.evaluate"
] |
[((15507, 15529), 'random.shuffle', 'random.shuffle', (['zipped'], {}), '(zipped)\n', (15521, 15529), False, 'import random\n'), ((20864, 20934), 'evaluate.evaluate', 'evaluate', (['predicted_processed', 'raw_labels'], {'roles': 'roles', 'mode': 'eval_mode'}), '(predicted_processed, raw_labels, roles=roles, mode=eval_mode)\n', (20872, 20934), False, 'from evaluate import evaluate, report\n'), ((23231, 23246), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (23237, 23246), False, 'from docopt import docopt\n'), ((794, 912), 'torch.nn.LSTM', 'nn.LSTM', (['embedding_dim', 'hidden_dim'], {'num_layers': 'layers', 'bidirectional': 'bidirectional', 'dropout': '(0.5)', 'batch_first': '(True)'}), '(embedding_dim, hidden_dim, num_layers=layers, bidirectional=\n bidirectional, dropout=0.5, batch_first=True)\n', (801, 912), True, 'import torch.nn as nn\n'), ((930, 998), 'torch.nn.Linear', 'nn.Linear', (['(hidden_dim * 2 if bidirectional else hidden_dim)', 'n_labels'], {}), '(hidden_dim * 2 if bidirectional else hidden_dim, n_labels)\n', (939, 998), True, 'import torch.nn as nn\n'), ((1482, 1501), 'torch.nn.Linear', 'nn.Linear', (['d_x', 'd_k'], {}), '(d_x, d_k)\n', (1491, 1501), True, 'import torch.nn as nn\n'), ((1521, 1540), 'torch.nn.Linear', 'nn.Linear', (['d_x', 'd_k'], {}), '(d_x, d_k)\n', (1530, 1540), True, 'import torch.nn as nn\n'), ((1560, 1579), 'torch.nn.Linear', 'nn.Linear', (['d_x', 'd_v'], {}), '(d_x, d_v)\n', (1569, 1579), True, 'import torch.nn as nn\n'), ((2891, 2910), 'torch.nn.Linear', 'nn.Linear', (['d_x', 'd_x'], {}), '(d_x, d_x)\n', (2900, 2910), True, 'import torch.nn as nn\n'), ((2930, 2949), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['[d_x]'], {}), '([d_x])\n', (2942, 2949), True, 'import torch.nn as nn\n'), ((2969, 2988), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['[d_x]'], {}), '([d_x])\n', (2981, 2988), True, 'import torch.nn as nn\n'), ((3012, 3029), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.1)'}), '(p=0.1)\n', (3022, 3029), True, 'import torch.nn as nn\n'), ((3707, 3728), 'torch.nn.Linear', 'nn.Linear', (['d_x', 'd_out'], {}), '(d_x, d_out)\n', (3716, 3728), True, 'import torch.nn as nn\n'), ((3752, 3769), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.1)'}), '(p=0.1)\n', (3762, 3769), True, 'import torch.nn as nn\n'), ((3921, 3969), 'torch.arange', 'torch.arange', (['sequence_length'], {'dtype': 'torch.float'}), '(sequence_length, dtype=torch.float)\n', (3933, 3969), False, 'import torch\n'), ((4278, 4305), 'torch.cat', 'torch.cat', (['[coss, sins]', '(-1)'], {}), '([coss, sins], -1)\n', (4287, 4305), False, 'import torch\n'), ((6781, 6825), 'torch.nn.EmbeddingBag', 'nn.EmbeddingBag', (['n_features', 'dim'], {'mode': '"""sum"""'}), "(n_features, dim, mode='sum')\n", (6796, 6825), True, 'import torch.nn as nn\n'), ((8166, 8202), 'torch.nn.init.normal_', 'nn.init.normal_', (['self.tag_embeddings'], {}), '(self.tag_embeddings)\n', (8181, 8202), True, 'import torch.nn as nn\n'), ((8295, 8335), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['self.embedding.weight[0]'], {}), '(self.embedding.weight[0])\n', (8309, 8335), True, 'import torch.nn as nn\n'), ((8631, 8657), 'torch.zeros_like', 'torch.zeros_like', (['embedded'], {}), '(embedded)\n', (8647, 8657), False, 'import torch\n'), ((10769, 10809), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['self.embedding.weight[0]'], {}), '(self.embedding.weight[0])\n', (10783, 10809), True, 'import torch.nn as nn\n'), ((11121, 11147), 'torch.zeros_like', 'torch.zeros_like', (['embedded'], {}), '(embedded)\n', (11137, 11147), False, 'import torch\n'), ((18115, 18130), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18128, 18130), False, 'import torch\n'), ((18654, 18690), 'progressify.progressify', 'progressify', (['labels', 'progressify_str'], {}), '(labels, progressify_str)\n', (18665, 18690), False, 'from progressify import progressify\n'), ((19227, 19242), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19240, 19242), False, 'import torch\n'), ((22761, 22783), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (22771, 22783), False, 'import torch\n'), ((22905, 22928), 'evaluate.report', 'report', (['ev'], {'roles': 'roles'}), '(ev, roles=roles)\n', (22911, 22928), False, 'from evaluate import evaluate, report\n'), ((23841, 23896), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['"""torch.cuda.FloatTensor"""'], {}), "('torch.cuda.FloatTensor')\n", (23870, 23896), False, 'import torch\n'), ((24314, 24371), 'parc2feat.corpus_feats_and_labels', 'corpus_feats_and_labels', (['corpus_path'], {'label_scheme': 'scheme'}), '(corpus_path, label_scheme=scheme)\n', (24337, 24371), False, 'from parc2feat import corpus_feats_and_labels\n'), ((1175, 1223), 'torch.nn.utils.rnn.PackedSequence', 'nn.utils.rnn.PackedSequence', (['data', 'x.batch_sizes'], {}), '(data, x.batch_sizes)\n', (1202, 1223), True, 'import torch.nn as nn\n'), ((1923, 1940), 'numpy.sqrt', 'np.sqrt', (['self.d_k'], {}), '(self.d_k)\n', (1930, 1940), True, 'import numpy as np\n'), ((2030, 2059), 'torch.softmax', 'torch.softmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (2043, 2059), False, 'import torch\n'), ((4169, 4202), 'torch.ger', 'torch.ger', (['positions', 'frequencies'], {}), '(positions, frequencies)\n', (4178, 4202), False, 'import torch\n'), ((4229, 4262), 'torch.ger', 'torch.ger', (['positions', 'frequencies'], {}), '(positions, frequencies)\n', (4238, 4262), False, 'import torch\n'), ((4561, 4614), 'torch.nn.utils.rnn.pad_packed_sequence', 'nn.utils.rnn.pad_packed_sequence', (['x'], {'batch_first': '(True)'}), '(x, batch_first=True)\n', (4593, 4614), True, 'import torch.nn as nn\n'), ((4849, 4912), 'torch.nn.utils.rnn.pack_padded_sequence', 'nn.utils.rnn.pack_padded_sequence', (['x', 'lengths'], {'batch_first': '(True)'}), '(x, lengths, batch_first=True)\n', (4882, 4912), True, 'import torch.nn as nn\n'), ((7093, 7108), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (7106, 7108), True, 'import torch.nn as nn\n'), ((7141, 7156), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (7154, 7156), True, 'import torch.nn as nn\n'), ((7181, 7196), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (7194, 7196), True, 'import torch.nn as nn\n'), ((7695, 7710), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (7708, 7710), True, 'import torch.nn as nn\n'), ((7738, 7753), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (7751, 7753), True, 'import torch.nn as nn\n'), ((8133, 8156), 'torch.Tensor', 'torch.Tensor', (['(3)', '(3)', 'dim'], {}), '(3, 3, dim)\n', (8145, 8156), False, 'import torch\n'), ((9460, 9485), 'torch.stack', 'torch.stack', (['paths'], {'dim': '(1)'}), '(paths, dim=1)\n', (9471, 9485), False, 'import torch\n'), ((10482, 10507), 'torch.stack', 'torch.stack', (['paths'], {'dim': '(1)'}), '(paths, dim=1)\n', (10493, 10507), False, 'import torch\n'), ((13173, 13220), 'torch.zeros', 'torch.zeros', (['probs.shape[:-1]'], {'dtype': 'torch.long'}), '(probs.shape[:-1], dtype=torch.long)\n', (13184, 13220), False, 'import torch\n'), ((16468, 16515), 'torch.tensor', 'torch.tensor', (['document_tensor'], {'dtype': 'torch.long'}), '(document_tensor, dtype=torch.long)\n', (16480, 16515), False, 'import torch\n'), ((17185, 17232), 'torch.tensor', 'torch.tensor', (['document_tensor'], {'dtype': 'torch.long'}), '(document_tensor, dtype=torch.long)\n', (17197, 17232), False, 'import torch\n'), ((21894, 21917), 'evaluate.report', 'report', (['ev'], {'roles': 'roles'}), '(ev, roles=roles)\n', (21900, 21917), False, 'from evaluate import evaluate, report\n'), ((23089, 23121), 'evaluate.report', 'report', (['ev_presence'], {'roles': 'roles'}), '(ev_presence, roles=roles)\n', (23095, 23121), False, 'from evaluate import evaluate, report\n'), ((25327, 25361), 'os.path.join', 'os.path.join', (['corpus_path', '"""train"""'], {}), "(corpus_path, 'train')\n", (25339, 25361), False, 'import os\n'), ((25482, 25514), 'os.path.join', 'os.path.join', (['corpus_path', '"""dev"""'], {}), "(corpus_path, 'dev')\n", (25494, 25514), False, 'import os\n'), ((25638, 25671), 'os.path.join', 'os.path.join', (['corpus_path', '"""test"""'], {}), "(corpus_path, 'test')\n", (25650, 25671), False, 'import os\n'), ((27547, 27569), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (27557, 27569), False, 'import torch\n'), ((27684, 27707), 'evaluate.report', 'report', (['ev'], {'roles': 'roles'}), '(ev, roles=roles)\n', (27690, 27707), False, 'from evaluate import evaluate, report\n'), ((13410, 13423), 'torch.log', 'torch.log', (['pi'], {}), '(pi)\n', (13419, 13423), False, 'import torch\n'), ((26596, 26619), 'evaluate.report', 'report', (['ev'], {'roles': 'roles'}), '(ev, roles=roles)\n', (26602, 26619), False, 'from evaluate import evaluate, report\n'), ((4047, 4090), 'torch.arange', 'torch.arange', (['(n_dims / 2)'], {'dtype': 'torch.float'}), '(n_dims / 2, dtype=torch.float)\n', (4059, 4090), False, 'import torch\n'), ((7574, 7591), 'torch.nn.Linear', 'nn.Linear', (['dim', '(3)'], {}), '(dim, 3)\n', (7583, 7591), True, 'import torch.nn as nn\n'), ((8074, 8091), 'torch.nn.Linear', 'nn.Linear', (['dim', '(3)'], {}), '(dim, 3)\n', (8083, 8091), True, 'import torch.nn as nn\n'), ((9229, 9249), 'torch.tensor', 'torch.tensor', (['[path]'], {}), '([path])\n', (9241, 9249), False, 'import torch\n'), ((12383, 12411), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (12395, 12411), False, 'import torch\n'), ((19091, 19112), 'torch.sum', 'torch.sum', (['(param ** 2)'], {}), '(param ** 2)\n', (19100, 19112), False, 'import torch\n'), ((10096, 10125), 'torch.softmax', 'torch.softmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (10109, 10125), False, 'import torch\n'), ((10243, 10271), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (10255, 10271), False, 'import torch\n'), ((9360, 9401), 'torch.nn.functional.embedding', 'F.embedding', (['path', 'self.tag_embeddings[i]'], {}), '(path, self.tag_embeddings[i])\n', (9371, 9401), True, 'import torch.nn.functional as F\n'), ((10382, 10423), 'torch.nn.functional.embedding', 'F.embedding', (['path', 'self.tag_embeddings[i]'], {}), '(path, self.tag_embeddings[i])\n', (10393, 10423), True, 'import torch.nn.functional as F\n'), ((11678, 11719), 'torch.nn.functional.embedding', 'F.embedding', (['path', 'self.tag_embeddings[i]'], {}), '(path, self.tag_embeddings[i])\n', (11689, 11719), True, 'import torch.nn.functional as F\n'), ((12792, 12838), 'torch.nn.functional.embedding', 'F.embedding', (['role_tags', 'self.tag_embeddings[i]'], {}), '(role_tags, self.tag_embeddings[i])\n', (12803, 12838), True, 'import torch.nn.functional as F\n'), ((12945, 12986), 'torch.nn.functional.embedding', 'F.embedding', (['path', 'self.tag_embeddings[i]'], {}), '(path, self.tag_embeddings[i])\n', (12956, 12986), True, 'import torch.nn.functional as F\n'), ((9141, 9191), 'torch.tensor', 'torch.tensor', (['[sequence_length]'], {'dtype': 'torch.int64'}), '([sequence_length], dtype=torch.int64)\n', (9153, 9191), False, 'import torch\n')]
|
import evaluate
evaluate.dump_aggregated_period()
|
[
"evaluate.dump_aggregated_period"
] |
[((17, 50), 'evaluate.dump_aggregated_period', 'evaluate.dump_aggregated_period', ([], {}), '()\n', (48, 50), False, 'import evaluate\n')]
|
from data_processing import preprocess_28D
import autoencoders.autoencoder as ae
import autoencoders.variational_autoencoder as vae
import autoencoders.sparse_autoencoder as sae
from create_plots import plot_initial_data, plot_test_pred_data, correlation_plot
from evaluate import evaluate_model
import pandas as pd
import argparse
from data_loader import load_cms_data
if __name__ == "__main__":
# constructing argument parsers
ap = argparse.ArgumentParser()
ap.add_argument('-e', '--epochs', type=int, default=50,
help='number of epochs to train our autoencoder for')
ap.add_argument('-v', '--num_variables', type=int, default=24,
help='Number of variables we want to compress (either 19 or 24)')
ap.add_argument('-cn', '--custom_norm', type=bool, default=False,
help='Whether to normalize all variables with min_max scaler or also use custom normalization for 4-momentum')
ap.add_argument('-vae', '--use_vae', type=bool, default=True,
help='Whether to use Variational AE')
ap.add_argument('-sae', '--use_sae', type=bool, default=False,
help='Whether to use Sparse AE')
ap.add_argument('-l1', '--l1', type=bool, default=True,
help='Whether to use L1 loss or KL-divergence in the Sparse AE')
ap.add_argument('-p', '--plot', type=bool, default=False,
help='Whether to make plots')
args = vars(ap.parse_args())
epochs = args['epochs']
use_vae = args['use_vae']
use_sae = args['use_sae']
custom_norm = args['custom_norm']
num_of_variables = args['num_variables']
create_plots = args['plot']
l1 = args['l1']
reg_param = 0.001
# sparsity parameter for KL loss in SAE
RHO = 0.05
# learning rate
lr = 0.001
cms_data_df = load_cms_data(filename="open_cms_data.root")
data_df = pd.read_csv('27D_openCMS_data.csv')
if create_plots:
# Plot the original data
plot_initial_data(input_data=data_df, num_variables=num_of_variables)
# Plot correlation matrix between the input variables of the data
correlation_plot(data_df)
# Preprocess data
data_df, train_data, test_data, scaler = preprocess_28D(data_df=data_df, num_variables=num_of_variables, custom_norm=custom_norm)
if create_plots:
# Plot preprocessed data
plot_initial_data(input_data=data_df, num_variables=num_of_variables, normalized=True)
if use_vae:
# Run the Variational Autoencoder and obtain the reconstructed data
test_data, reconstructed_data = vae.train(variables=num_of_variables, train_data=train_data, test_data=test_data, epochs=epochs, learning_rate=lr)
# Plot the reconstructed along with the initial data
plot_test_pred_data(test_data, reconstructed_data, num_variables=num_of_variables, vae=True)
elif use_sae:
# Run the Sparse Autoencoder and obtain the reconstructed data
test_data, reconstructed_data = sae.train(variables=num_of_variables, train_data=train_data,
test_data=test_data, learning_rate=lr, reg_param=reg_param, epochs=epochs, RHO=RHO, l1=l1)
# Plot the reconstructed along with the initial data
plot_test_pred_data(test_data, reconstructed_data, num_variables=num_of_variables, sae=True)
else:
# Initialize the Autoencoder
standard_ae = ae.Autoencoder(train_data, test_data, num_variables=num_of_variables)
# Train the standard Autoencoder and obtain the reconstructions
test_data, reconstructed_data = standard_ae.train(test_data, epochs=epochs)
# Plot the reconstructed along with the initial data
plot_test_pred_data(test_data, reconstructed_data, num_variables=num_of_variables)
# Evaluate the reconstructions of the network based on various metrics
evaluate_model(y_true=test_data, y_predicted=reconstructed_data)
|
[
"evaluate.evaluate_model"
] |
[((444, 469), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (467, 469), False, 'import argparse\n'), ((1854, 1898), 'data_loader.load_cms_data', 'load_cms_data', ([], {'filename': '"""open_cms_data.root"""'}), "(filename='open_cms_data.root')\n", (1867, 1898), False, 'from data_loader import load_cms_data\n'), ((1913, 1948), 'pandas.read_csv', 'pd.read_csv', (['"""27D_openCMS_data.csv"""'], {}), "('27D_openCMS_data.csv')\n", (1924, 1948), True, 'import pandas as pd\n'), ((2259, 2352), 'data_processing.preprocess_28D', 'preprocess_28D', ([], {'data_df': 'data_df', 'num_variables': 'num_of_variables', 'custom_norm': 'custom_norm'}), '(data_df=data_df, num_variables=num_of_variables, custom_norm\n =custom_norm)\n', (2273, 2352), False, 'from data_processing import preprocess_28D\n'), ((3929, 3993), 'evaluate.evaluate_model', 'evaluate_model', ([], {'y_true': 'test_data', 'y_predicted': 'reconstructed_data'}), '(y_true=test_data, y_predicted=reconstructed_data)\n', (3943, 3993), False, 'from evaluate import evaluate_model\n'), ((2012, 2081), 'create_plots.plot_initial_data', 'plot_initial_data', ([], {'input_data': 'data_df', 'num_variables': 'num_of_variables'}), '(input_data=data_df, num_variables=num_of_variables)\n', (2029, 2081), False, 'from create_plots import plot_initial_data, plot_test_pred_data, correlation_plot\n'), ((2165, 2190), 'create_plots.correlation_plot', 'correlation_plot', (['data_df'], {}), '(data_df)\n', (2181, 2190), False, 'from create_plots import plot_initial_data, plot_test_pred_data, correlation_plot\n'), ((2411, 2501), 'create_plots.plot_initial_data', 'plot_initial_data', ([], {'input_data': 'data_df', 'num_variables': 'num_of_variables', 'normalized': '(True)'}), '(input_data=data_df, num_variables=num_of_variables,\n normalized=True)\n', (2428, 2501), False, 'from create_plots import plot_initial_data, plot_test_pred_data, correlation_plot\n'), ((2631, 2750), 'autoencoders.variational_autoencoder.train', 'vae.train', ([], {'variables': 'num_of_variables', 'train_data': 'train_data', 'test_data': 'test_data', 'epochs': 'epochs', 'learning_rate': 'lr'}), '(variables=num_of_variables, train_data=train_data, test_data=\n test_data, epochs=epochs, learning_rate=lr)\n', (2640, 2750), True, 'import autoencoders.variational_autoencoder as vae\n'), ((2815, 2912), 'create_plots.plot_test_pred_data', 'plot_test_pred_data', (['test_data', 'reconstructed_data'], {'num_variables': 'num_of_variables', 'vae': '(True)'}), '(test_data, reconstructed_data, num_variables=\n num_of_variables, vae=True)\n', (2834, 2912), False, 'from create_plots import plot_initial_data, plot_test_pred_data, correlation_plot\n'), ((3037, 3198), 'autoencoders.sparse_autoencoder.train', 'sae.train', ([], {'variables': 'num_of_variables', 'train_data': 'train_data', 'test_data': 'test_data', 'learning_rate': 'lr', 'reg_param': 'reg_param', 'epochs': 'epochs', 'RHO': 'RHO', 'l1': 'l1'}), '(variables=num_of_variables, train_data=train_data, test_data=\n test_data, learning_rate=lr, reg_param=reg_param, epochs=epochs, RHO=\n RHO, l1=l1)\n', (3046, 3198), True, 'import autoencoders.sparse_autoencoder as sae\n'), ((3308, 3405), 'create_plots.plot_test_pred_data', 'plot_test_pred_data', (['test_data', 'reconstructed_data'], {'num_variables': 'num_of_variables', 'sae': '(True)'}), '(test_data, reconstructed_data, num_variables=\n num_of_variables, sae=True)\n', (3327, 3405), False, 'from create_plots import plot_initial_data, plot_test_pred_data, correlation_plot\n'), ((3470, 3539), 'autoencoders.autoencoder.Autoencoder', 'ae.Autoencoder', (['train_data', 'test_data'], {'num_variables': 'num_of_variables'}), '(train_data, test_data, num_variables=num_of_variables)\n', (3484, 3539), True, 'import autoencoders.autoencoder as ae\n'), ((3766, 3853), 'create_plots.plot_test_pred_data', 'plot_test_pred_data', (['test_data', 'reconstructed_data'], {'num_variables': 'num_of_variables'}), '(test_data, reconstructed_data, num_variables=\n num_of_variables)\n', (3785, 3853), False, 'from create_plots import plot_initial_data, plot_test_pred_data, correlation_plot\n')]
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__)))
import yaml
import logging
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.cuda import amp
from tools.datasets import create_dataloader, preprocess
from tqdm import tqdm
import math
import numpy as np
import time
import evaluate
from tools.general import (set_logging,
init_seeds,
check_dataset,
check_img_size,
torch_distributed_zero_first,
plot_labels,
labels_to_class_weights,
compute_loss,
plot_images,
fitness,
check_anchors
)
from tools.torch_utils import select_device, ModelEMA
logger = logging.getLogger(__name__)
class obj(object):
def __init__(self, d):
for a, b in d.items():
if isinstance(b, (list, tuple)):
setattr(self, a, [obj(x) if isinstance(x, dict) else x for x in b])
else:
setattr(self, a, obj(b) if isinstance(b, dict) else b)
def train(modelWrapper, data, hyp, opt, device):
model = modelWrapper.model
ckpt = modelWrapper.config['ckpt']
logger.info(f'Hyperparameters {hyp}')
log_dir = opt.modelPath
wdir = log_dir + '/weights'
os.makedirs(wdir, exist_ok=True)
last = wdir + '/last.pt'
best = wdir + '/best.pt'
results_file = log_dir + '/results.txt'
epochs, batch_size, total_batch_size, weights, rank = \
opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
with open(log_dir + '/hyp-train.yaml', 'w') as f:
yaml.dump(hyp, f, sort_keys=False)
with open(log_dir + '/opt-train.yaml', 'w') as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
cuda = device.type != 'cpu'
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.FullLoader)
with torch_distributed_zero_first(rank):
check_dataset(data_dict)
train_path = data_dict['train']
test_path = data_dict['val']
nc, names = (int(data_dict['nc']), data_dict['names'])
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data)
# Optimizer
nbs = 64
accumulate = max(round(nbs / total_batch_size), 1)
hyp['weight_decay'] *= total_batch_size * accumulate / nbs
pg0, pg1, pg2 = [], [], []
for k, v in model.named_parameters():
v.requires_grad = True
if '.bias' in k:
pg2.append(v)
elif '.weight' in k and '.bn' not in k:
pg1.append(v)
else:
pg0.append(v)
optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']})
optimizer.add_param_group({'params': pg2})
logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
del pg0, pg1, pg2
lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - hyp['lrf']) + hyp['lrf'] # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
start_epoch, best_fitness = 0, 0.0
# Optimizer
if ckpt['optimizer'] is not None:
optimizer.load_state_dict(ckpt['optimizer'])
best_fitness = ckpt['best_fitness']
# Results
if ckpt.get('training_results') is not None:
with open(results_file, 'w') as file:
file.write(ckpt['training_results'])
# Epochs
start_epoch = ckpt['epoch'] + 1
if epochs < start_epoch:
logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
(weights, ckpt['epoch'], epochs))
epochs += ckpt['epoch']
del ckpt
# Image sizes
gs = int(max(model.stride))
imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size]
# Exponential moving average
ema = ModelEMA(model)
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
hyp=hyp, augment=True)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max()
nb = len(dataloader)
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
ema.updates = start_epoch * nb // accumulate
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0])
plot_labels(labels, save_dir=log_dir)
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
# Model parameters
hyp['cls'] *= nc / 80.
model.nc = nc
model.hyp = hyp
model.gr = 1.0
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device)
model.names = names
# Start training
t0 = time.time()
nw = max(round(hyp['warmup_epochs'] * nb), 1e3)
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, [email protected], [email protected], val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
logger.info('Image sizes %g train, %g test\n'
'Using %g dataloader workers\nLogging results to %s\n'
'Starting training for %g epochs...' % (imgsz, imgsz_test, dataloader.num_workers, log_dir, epochs))
logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'targets', 'img_size'))
for epoch in range(start_epoch, epochs):
logger.info('Epoch: ' + str(epoch))
model.train()
mloss = torch.zeros(4, device=device) # mean losses
pbar = enumerate(dataloader)
optimizer.zero_grad()
for i, (imgs, targets, paths, _) in pbar:
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
if 'momentum' in x:
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
# Forward
with amp.autocast(enabled=cuda):
pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device), model) # loss scaled by batch_size
if rank != -1:
loss *= opt.world_size # gradient averaged between devices in DDP mode
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Print
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
s = ('%10s' * 2 + '%10.4g' * 6) % (
'%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
# Plot
if ni < 3:
f = str(('log_dir/train_batch%g.jpg' % ni)) # filename
result = plot_images(images=imgs, targets=targets, paths=paths, fname=f)
# end batch ------------------------------------------------------------------------------------------------
logger.info(s)
# Scheduler
lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
scheduler.step()
# DDP process 0 or single-GPU
# mAP
if ema:
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride'])
final_epoch = epoch + 1 == epochs
results, maps, times = evaluate.test(opt.data,
batch_size=total_batch_size,
imgsz=imgsz_test,
model=ema.ema,
single_cls=opt.single_cls,
dataloader=dataloader,
save_dir=log_dir,
plots=epoch == 0 or final_epoch)
# Write
with open(results_file, 'a') as f:
f.write(s + '%10.4g' * 7 % results + '\n') # P, R, [email protected], [email protected], val_loss(box, obj, cls)
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, [email protected], [email protected]]
if fi > best_fitness:
best_fitness = fi
logger.info('Current Best Map: ' + str(fi))
# Save model
with open(results_file, 'r') as f: # create checkpoint
ckpt = {'epoch': epoch,
'best_fitness': best_fitness,
'training_results': f.read(),
'model': ema.ema,
'optimizer': None if final_epoch else optimizer.state_dict()}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
return imgsz
# end training
def main(data, model, args):
opt = obj({})
opt.total_batch_size = 16 if not hasattr(args, 'batch_size') else args.batchSize
opt.epochs = 300 if not hasattr(args, 'epochs') else args.epochs
opt.batch_size = opt.total_batch_size
opt.world_size = 1
opt.global_rank = -1
opt.hyp = os.path.join(os.path.dirname(__file__), 'config/hyp.scratch.yaml')
opt.device = ''
opt.weights = 'yolov5s.pt'
opt.single_cls = False
opt.modelPath = args.modelPath
opt.img_size = model.config['img_size']
set_logging(opt.global_rank)
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size)))
device = select_device(opt.device, batch_size=opt.batch_size)
logger.info(opt)
with open(opt.hyp) as f:
hyp = yaml.load(f, Loader=yaml.FullLoader)
dataconfig = preprocess(data)
model.cfg = obj({})
model.cfg.data = opt.data = dataconfig
imgsz = train(model, data, hyp, opt, device)
model.cfg.imgsz = imgsz
sys.path.pop()
return model
|
[
"evaluate.test"
] |
[((674, 701), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (691, 701), False, 'import logging\n'), ((1182, 1214), 'os.makedirs', 'os.makedirs', (['wdir'], {'exist_ok': '(True)'}), '(wdir, exist_ok=True)\n', (1193, 1214), False, 'import os\n'), ((1689, 1709), 'tools.general.init_seeds', 'init_seeds', (['(2 + rank)'], {}), '(2 + rank)\n', (1699, 1709), False, 'from tools.general import set_logging, init_seeds, check_dataset, check_img_size, torch_distributed_zero_first, plot_labels, labels_to_class_weights, compute_loss, plot_images, fitness, check_anchors\n'), ((2491, 2561), 'torch.optim.SGD', 'optim.SGD', (['pg0'], {'lr': "hyp['lr0']", 'momentum': "hyp['momentum']", 'nesterov': '(True)'}), "(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)\n", (2500, 2561), True, 'import torch.optim as optim\n'), ((2930, 2976), 'torch.optim.lr_scheduler.LambdaLR', 'lr_scheduler.LambdaLR', (['optimizer'], {'lr_lambda': 'lf'}), '(optimizer, lr_lambda=lf)\n', (2951, 2976), True, 'import torch.optim.lr_scheduler as lr_scheduler\n'), ((3726, 3741), 'tools.torch_utils.ModelEMA', 'ModelEMA', (['model'], {}), '(model)\n', (3734, 3741), False, 'from tools.torch_utils import select_device, ModelEMA\n'), ((3767, 3852), 'tools.datasets.create_dataloader', 'create_dataloader', (['train_path', 'imgsz', 'batch_size', 'gs', 'opt'], {'hyp': 'hyp', 'augment': '(True)'}), '(train_path, imgsz, batch_size, gs, opt, hyp=hyp, augment=True\n )\n', (3784, 3852), False, 'from tools.datasets import create_dataloader, preprocess\n'), ((4145, 4178), 'numpy.concatenate', 'np.concatenate', (['dataset.labels', '(0)'], {}), '(dataset.labels, 0)\n', (4159, 4178), True, 'import numpy as np\n'), ((4185, 4211), 'torch.tensor', 'torch.tensor', (['labels[:, 0]'], {}), '(labels[:, 0])\n', (4197, 4211), False, 'import torch\n'), ((4214, 4251), 'tools.general.plot_labels', 'plot_labels', (['labels'], {'save_dir': 'log_dir'}), '(labels, save_dir=log_dir)\n', (4225, 4251), False, 'from tools.general import set_logging, init_seeds, check_dataset, check_img_size, torch_distributed_zero_first, plot_labels, labels_to_class_weights, compute_loss, plot_images, fitness, check_anchors\n'), ((4254, 4323), 'tools.general.check_anchors', 'check_anchors', (['dataset'], {'model': 'model', 'thr': "hyp['anchor_t']", 'imgsz': 'imgsz'}), "(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)\n", (4267, 4323), False, 'from tools.general import set_logging, init_seeds, check_dataset, check_img_size, torch_distributed_zero_first, plot_labels, labels_to_class_weights, compute_loss, plot_images, fitness, check_anchors\n'), ((4550, 4561), 'time.time', 'time.time', ([], {}), '()\n', (4559, 4561), False, 'import time\n'), ((4621, 4633), 'numpy.zeros', 'np.zeros', (['nc'], {}), '(nc)\n', (4629, 4633), True, 'import numpy as np\n'), ((4805, 4833), 'torch.cuda.amp.GradScaler', 'amp.GradScaler', ([], {'enabled': 'cuda'}), '(enabled=cuda)\n', (4819, 4833), False, 'from torch.cuda import amp\n'), ((9589, 9617), 'tools.general.set_logging', 'set_logging', (['opt.global_rank'], {}), '(opt.global_rank)\n', (9600, 9617), False, 'from tools.general import set_logging, init_seeds, check_dataset, check_img_size, torch_distributed_zero_first, plot_labels, labels_to_class_weights, compute_loss, plot_images, fitness, check_anchors\n'), ((9698, 9750), 'tools.torch_utils.select_device', 'select_device', (['opt.device'], {'batch_size': 'opt.batch_size'}), '(opt.device, batch_size=opt.batch_size)\n', (9711, 9750), False, 'from tools.torch_utils import select_device, ModelEMA\n'), ((9859, 9875), 'tools.datasets.preprocess', 'preprocess', (['data'], {}), '(data)\n', (9869, 9875), False, 'from tools.datasets import create_dataloader, preprocess\n'), ((10015, 10029), 'sys.path.pop', 'sys.path.pop', ([], {}), '()\n', (10027, 10029), False, 'import sys\n'), ((50, 75), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (65, 75), False, 'import os\n'), ((1510, 1544), 'yaml.dump', 'yaml.dump', (['hyp', 'f'], {'sort_keys': '(False)'}), '(hyp, f, sort_keys=False)\n', (1519, 1544), False, 'import yaml\n'), ((1755, 1791), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (1764, 1791), False, 'import yaml\n'), ((1799, 1833), 'tools.general.torch_distributed_zero_first', 'torch_distributed_zero_first', (['rank'], {}), '(rank)\n', (1827, 1833), False, 'from tools.general import set_logging, init_seeds, check_dataset, check_img_size, torch_distributed_zero_first, plot_labels, labels_to_class_weights, compute_loss, plot_images, fitness, check_anchors\n'), ((1839, 1863), 'tools.general.check_dataset', 'check_dataset', (['data_dict'], {}), '(data_dict)\n', (1852, 1863), False, 'from tools.general import set_logging, init_seeds, check_dataset, check_img_size, torch_distributed_zero_first, plot_labels, labels_to_class_weights, compute_loss, plot_images, fitness, check_anchors\n'), ((3641, 3662), 'tools.general.check_img_size', 'check_img_size', (['x', 'gs'], {}), '(x, gs)\n', (3655, 3662), False, 'from tools.general import set_logging, init_seeds, check_dataset, check_img_size, torch_distributed_zero_first, plot_labels, labels_to_class_weights, compute_loss, plot_images, fitness, check_anchors\n'), ((5291, 5320), 'torch.zeros', 'torch.zeros', (['(4)'], {'device': 'device'}), '(4, device=device)\n', (5302, 5320), False, 'import torch\n'), ((7855, 8049), 'evaluate.test', 'evaluate.test', (['opt.data'], {'batch_size': 'total_batch_size', 'imgsz': 'imgsz_test', 'model': 'ema.ema', 'single_cls': 'opt.single_cls', 'dataloader': 'dataloader', 'save_dir': 'log_dir', 'plots': '(epoch == 0 or final_epoch)'}), '(opt.data, batch_size=total_batch_size, imgsz=imgsz_test,\n model=ema.ema, single_cls=opt.single_cls, dataloader=dataloader,\n save_dir=log_dir, plots=epoch == 0 or final_epoch)\n', (7868, 8049), False, 'import evaluate\n'), ((8833, 8855), 'torch.save', 'torch.save', (['ckpt', 'last'], {}), '(ckpt, last)\n', (8843, 8855), False, 'import torch\n'), ((9384, 9409), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (9399, 9409), False, 'import os\n'), ((9807, 9843), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (9816, 9843), False, 'import yaml\n'), ((4446, 4489), 'tools.general.labels_to_class_weights', 'labels_to_class_weights', (['dataset.labels', 'nc'], {}), '(dataset.labels, nc)\n', (4469, 4489), False, 'from tools.general import set_logging, init_seeds, check_dataset, check_img_size, torch_distributed_zero_first, plot_labels, labels_to_class_weights, compute_loss, plot_images, fitness, check_anchors\n'), ((8891, 8913), 'torch.save', 'torch.save', (['ckpt', 'best'], {}), '(ckpt, best)\n', (8901, 8913), False, 'import torch\n'), ((3898, 3931), 'numpy.concatenate', 'np.concatenate', (['dataset.labels', '(0)'], {}), '(dataset.labels, 0)\n', (3912, 3931), True, 'import numpy as np\n'), ((6313, 6339), 'torch.cuda.amp.autocast', 'amp.autocast', ([], {'enabled': 'cuda'}), '(enabled=cuda)\n', (6325, 6339), False, 'from torch.cuda import amp\n'), ((7337, 7400), 'tools.general.plot_images', 'plot_images', ([], {'images': 'imgs', 'targets': 'targets', 'paths': 'paths', 'fname': 'f'}), '(images=imgs, targets=targets, paths=paths, fname=f)\n', (7348, 7400), False, 'from tools.general import set_logging, init_seeds, check_dataset, check_img_size, torch_distributed_zero_first, plot_labels, labels_to_class_weights, compute_loss, plot_images, fitness, check_anchors\n'), ((7037, 7062), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7060, 7062), False, 'import torch\n'), ((8293, 8310), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (8301, 8310), True, 'import numpy as np\n'), ((2837, 2867), 'math.cos', 'math.cos', (['(x * math.pi / epochs)'], {}), '(x * math.pi / epochs)\n', (2845, 2867), False, 'import math\n'), ((6220, 6280), 'numpy.interp', 'np.interp', (['ni', 'xi', "[hyp['warmup_momentum'], hyp['momentum']]"], {}), "(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])\n", (6229, 6280), True, 'import numpy as np\n'), ((6999, 7027), 'torch.cuda.memory_reserved', 'torch.cuda.memory_reserved', ([], {}), '()\n', (7025, 7027), False, 'import torch\n'), ((5832, 5878), 'numpy.interp', 'np.interp', (['ni', 'xi', '[1, nbs / total_batch_size]'], {}), '(ni, xi, [1, nbs / total_batch_size])\n', (5841, 5878), True, 'import numpy as np\n')]
|
import json
from typing import List
from resource_mapping.result_analyzer import ResultAnalyzer
from execution_handler.execution_handler import ExecutionHandler
from aggregator.aggregator import Aggregator, AggregatorResults
from datetime import datetime, date
from quantum_execution_job import QuantumExecutionJob
from queue import Queue
from evaluate.util import counts_to_probability, sv_to_probability
import numpy as np
from qiskit import execute
from evaluate.circuit_gen import circ_gen
import os
import ibmq_account
import config.load_config as cfg
from resource_mapping.backend_chooser import Backend_Data
import logger
from qiskit.providers.aer import Aer, AerJob
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
if isinstance(obj, complex):
return str(obj)
raise TypeError ("Type %s not serializable" % type(obj))
def write_file(dir_path, backend, results, agg_results, sv_res_prob: List[np.ndarray], n_qubits: int, circuits, circuit_type, shots):
res_prob = [counts_to_probability(r.get_counts(), n_qubits) for r in results]
agg_res_prob = [counts_to_probability(r.get_counts(), n_qubits) for r in agg_results]
data = []
n_circuits = len(circuits)
for i in range(n_circuits):
data.append({"circuit":circuits[i].qasm(), "sv-result":sv_res_prob[i].tolist(), "result":res_prob[i].tolist(), "agg-result":agg_res_prob[i].tolist()})
backend_dict = {"name":backend.name()}
if backend.configuration() != None:
backend_dict["config"] = backend.configuration().to_dict()
if backend.status() != None:
backend_dict["status"] = backend.status().to_dict()
if backend.properties() != None:
backend_dict["properties"] = backend.properties().to_dict()
now = datetime.now()
now_str = now.strftime('%Y-%m-%d-%H-%M-%S')
with open(f'{dir_path}/{backend.name()}/{backend.name()}_{circuit_type}.json', 'w') as f:
json.dump({"date":now_str, "circuit_type":circuit_type, "n_circuits":n_circuits, "n_qubits":n_qubits, "shots":shots, "backend":backend_dict, "data":data}, f, indent=4, default=json_serial)
log.info("Wrote results to file.")
if __name__ == "__main__":
"""
Configure the evaluation here:
"""
# backend_names = ['ibmq_qasm_simulator' , 'ibmq_athens', 'ibmq_santiago', 'ibmq_belem']
# backend_names = ['ibmq_qasm_simulator' , 'ibmq_athens', 'ibmq_santiago', 'ibmq_quito', 'ibmq_lima', 'ibmq_belem']
backend_names = ['ibmq_qasm_simulator']
circuit_types = ["grover", "bv", "qft", "hwea", "uccsd", "supremacy_linear"]
shots = 8192
n_circuits = 2
n_qubits = 2
"""
Configuration End
"""
config = cfg.load_or_create()
logger.set_log_level_from_config(config)
provider = ibmq_account.get_provider(config)
log = logger.get_logger("Evaluate")
now = datetime.now()
now_str = now.strftime('%Y-%m-%d-%H-%M-%S')
dir_path = f"agg_data_circ/{now_str}"
os.makedirs(dir_path)
log.info(f"Created directory {dir_path}")
circuits = {}
for type in circuit_types:
circ, _ = circ_gen(type, n_qubits, 1)
circ = circ[0]
circuits[type] = circ
log.info(f"Generated circuits for the types: {circuit_types}")
statevector_backend = Aer.get_backend('statevector_simulator')
sv_results = {}
for type, circ in circuits.items():
sv_job:AerJob = execute(circ, statevector_backend)
sv_res = sv_job.result()
sv_result = sv_res.get_statevector(circ)
sv_results[type] = sv_to_probability(sv_result)
log.info("Executed the circuits with local statevector simulator")
backend_data_list = []
backends = {}
for backend_name in backend_names:
backend = provider.get_backend(backend_name)
backend_data = Backend_Data(backend)
backend_data_list.append(backend_data)
backends[backend_name] = {"backend":backend, "backend_data":backend_data}
os.makedirs(f"{dir_path}/{backend.name()}")
for type in circuit_types:
circuits[type] = [circuits[type]]*n_circuits
sv_results[type] = [sv_results[type]]*n_circuits
input_pipeline = Queue()
input_exec = Queue()
output_exec = Queue()
agg_results = Queue()
output_pipline = Queue()
for backend_data in backend_data_list:
for type in circuit_types:
for circ in circuits[type]:
input_pipeline.put(QuantumExecutionJob(circuit=circ.measure_all(inplace=False), shots=shots, backend_data=backend_data))
input_exec.put(QuantumExecutionJob(circuit=circ.measure_all(inplace=False), shots=shots, backend_data=backend_data))
agg_job_dict = {}
aggregator = Aggregator(input=input_pipeline, output=input_exec, job_dict=agg_job_dict, timeout=10)
aggregator.start()
exec_handler = ExecutionHandler(provider, input=input_exec, output=output_exec, batch_timeout=5)
exec_handler.start()
result_analyzer = ResultAnalyzer(input=output_exec, output=output_pipline, output_agg=agg_results, output_part=None)
result_analyzer.start()
aggregator_results = AggregatorResults(input=agg_results, output=output_pipline, job_dict=agg_job_dict)
aggregator_results.start()
log.info("Started the Aggrgator pipeline")
result_counter = {}
results = {}
agg_results = {}
n_results = 2*n_circuits*len(backend_names)*len(circuits)
for backend_name in backend_names:
result_counter[backend_name] = 0
results[backend_name] = {}
agg_results[backend_name] = {}
for type in circuit_types:
results[backend_name][type] = []
agg_results[backend_name][type] = []
for i in range(n_results):
job = output_pipline.get()
r = job.result
backend_name = job.backend_data.name
log.debug(f"{i}: Got job {job.id},type {job.type}, from backend {backend_name}, success: {r.success}")
count = result_counter[backend_name]
count = count % (len(circuit_types)*n_circuits)
type_index = int(count/n_circuits)
type = circuit_types[type_index]
result_counter[backend_name] += 1
if len(results[backend_name][type]) < n_circuits:
results[backend_name][type].append(r)
else:
agg_results[backend_name][type].append(r)
if len(results[backend_name][type]) == n_circuits and len(agg_results[backend_name][type]) == 0:
log.info(f"All results for not aggregated circuits {type} are available for backend {backend_name}")
elif len(agg_results[backend_name][type]) == n_circuits:
log.info(f"All results for aggregated circuits {type} are available for backend {backend_name}")
write_file(dir_path, backends[backend_name]["backend"], results[backend_name].pop(type), agg_results[backend_name].pop(type), sv_results[type], n_qubits, circuits[type], type, shots)
|
[
"evaluate.circuit_gen.circ_gen",
"evaluate.util.sv_to_probability"
] |
[((1883, 1897), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1895, 1897), False, 'from datetime import datetime, date\n'), ((2802, 2822), 'config.load_config.load_or_create', 'cfg.load_or_create', ([], {}), '()\n', (2820, 2822), True, 'import config.load_config as cfg\n'), ((2827, 2867), 'logger.set_log_level_from_config', 'logger.set_log_level_from_config', (['config'], {}), '(config)\n', (2859, 2867), False, 'import logger\n'), ((2883, 2916), 'ibmq_account.get_provider', 'ibmq_account.get_provider', (['config'], {}), '(config)\n', (2908, 2916), False, 'import ibmq_account\n'), ((2928, 2957), 'logger.get_logger', 'logger.get_logger', (['"""Evaluate"""'], {}), "('Evaluate')\n", (2945, 2957), False, 'import logger\n'), ((2970, 2984), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2982, 2984), False, 'from datetime import datetime, date\n'), ((3079, 3100), 'os.makedirs', 'os.makedirs', (['dir_path'], {}), '(dir_path)\n', (3090, 3100), False, 'import os\n'), ((3393, 3433), 'qiskit.providers.aer.Aer.get_backend', 'Aer.get_backend', (['"""statevector_simulator"""'], {}), "('statevector_simulator')\n", (3408, 3433), False, 'from qiskit.providers.aer import Aer, AerJob\n'), ((4292, 4299), 'queue.Queue', 'Queue', ([], {}), '()\n', (4297, 4299), False, 'from queue import Queue\n'), ((4317, 4324), 'queue.Queue', 'Queue', ([], {}), '()\n', (4322, 4324), False, 'from queue import Queue\n'), ((4343, 4350), 'queue.Queue', 'Queue', ([], {}), '()\n', (4348, 4350), False, 'from queue import Queue\n'), ((4369, 4376), 'queue.Queue', 'Queue', ([], {}), '()\n', (4374, 4376), False, 'from queue import Queue\n'), ((4398, 4405), 'queue.Queue', 'Queue', ([], {}), '()\n', (4403, 4405), False, 'from queue import Queue\n'), ((4837, 4927), 'aggregator.aggregator.Aggregator', 'Aggregator', ([], {'input': 'input_pipeline', 'output': 'input_exec', 'job_dict': 'agg_job_dict', 'timeout': '(10)'}), '(input=input_pipeline, output=input_exec, job_dict=agg_job_dict,\n timeout=10)\n', (4847, 4927), False, 'from aggregator.aggregator import Aggregator, AggregatorResults\n'), ((4967, 5052), 'execution_handler.execution_handler.ExecutionHandler', 'ExecutionHandler', (['provider'], {'input': 'input_exec', 'output': 'output_exec', 'batch_timeout': '(5)'}), '(provider, input=input_exec, output=output_exec,\n batch_timeout=5)\n', (4983, 5052), False, 'from execution_handler.execution_handler import ExecutionHandler\n'), ((5097, 5200), 'resource_mapping.result_analyzer.ResultAnalyzer', 'ResultAnalyzer', ([], {'input': 'output_exec', 'output': 'output_pipline', 'output_agg': 'agg_results', 'output_part': 'None'}), '(input=output_exec, output=output_pipline, output_agg=\n agg_results, output_part=None)\n', (5111, 5200), False, 'from resource_mapping.result_analyzer import ResultAnalyzer\n'), ((5250, 5337), 'aggregator.aggregator.AggregatorResults', 'AggregatorResults', ([], {'input': 'agg_results', 'output': 'output_pipline', 'job_dict': 'agg_job_dict'}), '(input=agg_results, output=output_pipline, job_dict=\n agg_job_dict)\n', (5267, 5337), False, 'from aggregator.aggregator import Aggregator, AggregatorResults\n'), ((2048, 2251), 'json.dump', 'json.dump', (["{'date': now_str, 'circuit_type': circuit_type, 'n_circuits': n_circuits,\n 'n_qubits': n_qubits, 'shots': shots, 'backend': backend_dict, 'data': data\n }", 'f'], {'indent': '(4)', 'default': 'json_serial'}), "({'date': now_str, 'circuit_type': circuit_type, 'n_circuits':\n n_circuits, 'n_qubits': n_qubits, 'shots': shots, 'backend':\n backend_dict, 'data': data}, f, indent=4, default=json_serial)\n", (2057, 2251), False, 'import json\n'), ((3217, 3244), 'evaluate.circuit_gen.circ_gen', 'circ_gen', (['type', 'n_qubits', '(1)'], {}), '(type, n_qubits, 1)\n', (3225, 3244), False, 'from evaluate.circuit_gen import circ_gen\n'), ((3519, 3553), 'qiskit.execute', 'execute', (['circ', 'statevector_backend'], {}), '(circ, statevector_backend)\n', (3526, 3553), False, 'from qiskit import execute\n'), ((3663, 3691), 'evaluate.util.sv_to_probability', 'sv_to_probability', (['sv_result'], {}), '(sv_result)\n', (3680, 3691), False, 'from evaluate.util import counts_to_probability, sv_to_probability\n'), ((3925, 3946), 'resource_mapping.backend_chooser.Backend_Data', 'Backend_Data', (['backend'], {}), '(backend)\n', (3937, 3946), False, 'from resource_mapping.backend_chooser import Backend_Data\n')]
|
#!/usr/bin/env python3
import argparse
import os
import time
import numpy as np
import pandas as pd
import torch
from torch import optim
import callbacks as cb
import evaluate
import utils
import visual_plt
from continual_learner import ContinualLearner
from data import get_multitask_experiment
from encoder import Classifier
from exemplars import ExemplarHandler
from param_stamp import get_param_stamp, get_param_stamp_from_args
from param_values import set_default_values
from replayer import Replayer
from train import train_cl
from vae_models import AutoEncoder
parser = argparse.ArgumentParser('./main.py', description='Run individual continual learning experiment.')
parser.add_argument('--get-stamp', action='store_true', help='print param-stamp & exit')
parser.add_argument('--seed', type=int, default=0, help='random seed (for each random-module used)')
parser.add_argument('--no-gpus', action='store_false', dest='cuda', help="don't use GPUs")
parser.add_argument('--data-dir', type=str, default='./datasets', dest='d_dir', help="default: %(default)s")
parser.add_argument('--plot-dir', type=str, default='./plots', dest='p_dir', help="default: %(default)s")
parser.add_argument('--results-dir', type=str, default='./results', dest='r_dir', help="default: %(default)s")
# expirimental task parameters
task_params = parser.add_argument_group('Task Parameters')
task_params.add_argument('--experiment', type=str, default='splitMNIST', choices=['permMNIST', 'splitMNIST'])
task_params.add_argument('--scenario', type=str, default='class', choices=['task', 'domain', 'class'])
task_params.add_argument('--tasks', type=int, help='number of tasks')
# specify loss functions to be used
loss_params = parser.add_argument_group('Loss Parameters')
loss_params.add_argument('--bce', action='store_true', help="use binary (instead of multi-class) classication loss")
loss_params.add_argument('--bce-distill', action='store_true', help='distilled loss on previous classes for new'
' examples (only if --bce & --scenario="class")')
# model architecture parameters
model_params = parser.add_argument_group('Model Parameters')
model_params.add_argument('--fc-layers', type=int, default=3, dest='fc_lay', help="# of fully-connected layers")
model_params.add_argument('--fc-units', type=int, metavar="N", help="# of units in first fc-layers")
model_params.add_argument('--fc-drop', type=float, default=0., help="dropout probability for fc-units")
model_params.add_argument('--fc-bn', type=str, default="no", help="use batch-norm in the fc-layers (no|yes)")
model_params.add_argument('--fc-nl', type=str, default="relu", choices=["relu", "leakyrelu"])
model_params.add_argument('--singlehead', action='store_true', help="for Task-IL: use a 'single-headed' output layer "
" (instead of a 'multi-headed' one)")
# training hyperparameters / initialization
train_params = parser.add_argument_group('Training Parameters')
train_params.add_argument('--iters', type=int, help="# batches to optimize solver")
train_params.add_argument('--lr', type=float, help="learning rate")
train_params.add_argument('--batch', type=int, default=128, help="batch-size")
train_params.add_argument('--optimizer', type=str, choices=['adam', 'adam_reset', 'sgd'], default='adam')
# "memory replay" parameters
replay_params = parser.add_argument_group('Replay Parameters')
replay_params.add_argument('--feedback', action="store_true", help="equip model with feedback connections")
replay_params.add_argument('--z-dim', type=int, default=100, help='size of latent representation (default: 100)')
replay_choices = ['offline', 'exact', 'generative', 'none', 'current', 'exemplars']
replay_params.add_argument('--replay', type=str, default='none', choices=replay_choices)
replay_params.add_argument('--distill', action='store_true', help="use distillation for replay?")
replay_params.add_argument('--temp', type=float, default=2., dest='temp', help="temperature for distillation")
replay_params.add_argument('--agem', action='store_true', help="use gradient of replay as inequality constraint")
# -generative model parameters (if separate model)
genmodel_params = parser.add_argument_group('Generative Model Parameters')
genmodel_params.add_argument('--g-z-dim', type=int, default=100, help='size of latent representation (default: 100)')
genmodel_params.add_argument('--g-fc-lay', type=int, help='[fc_layers] in generator (default: same as classifier)')
genmodel_params.add_argument('--g-fc-uni', type=int, help='[fc_units] in generator (default: same as classifier)')
# - hyper-parameters for generative model (if separate model)
gen_params = parser.add_argument_group('Generator Hyper Parameters')
gen_params.add_argument('--g-iters', type=int, help="# batches to train generator (default: as classifier)")
gen_params.add_argument('--lr-gen', type=float, help="learning rate generator (default: lr)")
# "memory allocation" parameters
cl_params = parser.add_argument_group('Memory Allocation Parameters')
cl_params.add_argument('--ewc', action='store_true', help="use 'EWC' (Kirkpatrick et al, 2017)")
cl_params.add_argument('--lambda', type=float, dest="ewc_lambda", help="--> EWC: regularisation strength")
cl_params.add_argument('--fisher-n', type=int, help="--> EWC: sample size estimating Fisher Information")
cl_params.add_argument('--online', action='store_true', help="--> EWC: perform 'online EWC'")
cl_params.add_argument('--gamma', type=float, help="--> EWC: forgetting coefficient (for 'online EWC')")
cl_params.add_argument('--emp-fi', action='store_true', help="--> EWC: estimate FI with provided labels")
cl_params.add_argument('--si', action='store_true', help="use 'Synaptic Intelligence' (Zenke, Poole et al, 2017)")
cl_params.add_argument('--c', type=float, dest="si_c", help="--> SI: regularisation strength")
cl_params.add_argument('--epsilon', type=float, default=0.1, dest="epsilon", help="--> SI: dampening parameter")
cl_params.add_argument('--xdg', action='store_true', help="Use 'Context-dependent Gating' (Masse et al, 2018)")
cl_params.add_argument('--gating-prop', type=float, metavar="PROP", help="--> XdG: prop neurons per layer to gate")
# data storage ('exemplars') parameters
store_params = parser.add_argument_group('Data Storage Parameters')
store_params.add_argument('--icarl', action='store_true', help="bce-distill, use-exemplars & add-exemplars")
store_params.add_argument('--use-exemplars', action='store_true', help="use exemplars for classification")
store_params.add_argument('--add-exemplars', action='store_true', help="add exemplars to current task's training set")
store_params.add_argument('--budget', type=int, default=1000, dest="budget", help="how many samples can be stored?")
store_params.add_argument('--herding', action='store_true',
help="use herding to select stored data (instead of random)")
store_params.add_argument('--norm-exemplars', action='store_true', help="normalize features/averages of exemplars")
# evaluation parameters
eval_params = parser.add_argument_group('Evaluation Parameters')
eval_params.add_argument('--time', action='store_true', help="keep track of total training time")
eval_params.add_argument('--metrics', action='store_true', help="calculate additional metrics (e.g., BWT, forgetting)")
eval_params.add_argument('--pdf', action='store_true', help="generate pdf with results")
eval_params.add_argument('--visdom', action='store_true', help="use visdom for on-the-fly plots")
eval_params.add_argument('--log-per-task', action='store_true', help="set all visdom-logs to [iters]")
eval_params.add_argument('--loss-log', type=int, default=200, metavar="N", help="# iters after which to plot loss")
eval_params.add_argument('--prec-log', type=int, default=200, metavar="N", help="# iters after which to plot precision")
eval_params.add_argument('--prec-n', type=int, default=1024, help="# samples for evaluating solver's precision")
eval_params.add_argument('--sample-log', type=int, default=500, metavar="N", help="# iters after which to plot samples")
eval_params.add_argument('--sample-n', type=int, default=64, help="# images to show")
def run(args, verbose=False):
# Set default arguments & check for incompatible options
args.lr_gen = args.lr if args.lr_gen is None else args.lr_gen
args.g_iters = args.iters if args.g_iters is None else args.g_iters
args.g_fc_lay = args.fc_lay if args.g_fc_lay is None else args.g_fc_lay
args.g_fc_uni = args.fc_units if args.g_fc_uni is None else args.g_fc_uni
# -if [log_per_task], reset all logs
if args.log_per_task:
args.prec_log = args.iters
args.loss_log = args.iters
args.sample_log = args.iters
# -if [iCaRL] is selected, select all accompanying options
if hasattr(args, "icarl") and args.icarl:
args.use_exemplars = True
args.add_exemplars = True
args.bce = True
args.bce_distill = True
# -if XdG is selected but not the Task-IL scenario, give error
if (not args.scenario == "task") and args.xdg:
raise ValueError("'XdG' is only compatible with the Task-IL scenario.")
# -if EWC, SI, XdG, A-GEM or iCaRL is selected together with 'feedback', give error
if args.feedback and (args.ewc or args.si or args.xdg or args.icarl or args.agem):
raise NotImplementedError("EWC, SI, XdG, A-GEM and iCaRL are not supported with feedback connections.")
# -if A-GEM is selected without any replay, give warning
if args.agem and args.replay == "none":
raise Warning("The '--agem' flag is selected, but without any type of replay. "
"For the original A-GEM method, also select --replay='exemplars'.")
# -if EWC, SI, XdG, A-GEM or iCaRL is selected together with offline-replay, give error
if args.replay == "offline" and (args.ewc or args.si or args.xdg or args.icarl or args.agem):
raise NotImplementedError("Offline replay cannot be combined with EWC, SI, XdG, A-GEM or iCaRL.")
# -if binary classification loss is selected together with 'feedback', give error
if args.feedback and args.bce:
raise NotImplementedError("Binary classification loss not supported with feedback connections.")
# -if XdG is selected together with both replay and EWC, give error (either one of them alone with XdG is fine)
if (args.xdg and args.gating_prop > 0) and (not args.replay == "none") and (args.ewc or args.si):
raise NotImplementedError("XdG is not supported with both '{}' replay and EWC / SI.".format(args.replay))
# --> problem is that applying different task-masks interferes with gradient calculation
# (should be possible to overcome by calculating backward step on EWC/SI-loss also for each mask separately)
# -if 'BCEdistill' is selected for other than scenario=="class", give error
if args.bce_distill and not args.scenario == "class":
raise ValueError("BCE-distill can only be used for class-incremental learning.")
# -create plots- and results-directories if needed
if not os.path.isdir(args.r_dir):
os.mkdir(args.r_dir)
if args.pdf and not os.path.isdir(args.p_dir):
os.mkdir(args.p_dir)
scenario = args.scenario
# If Task-IL scenario is chosen with single-headed output layer, set args.scenario to "domain"
# (but note that when XdG is used, task-identity information is being used so the actual scenario is still Task-IL)
if args.singlehead and args.scenario == "task":
scenario = "domain"
# If only want param-stamp, get it printed to screen and exit
if hasattr(args, "get_stamp") and args.get_stamp:
print(get_param_stamp_from_args(args=args))
exit()
# Use cuda?
cuda = torch.cuda.is_available() and args.cuda
device = torch.device("cuda" if cuda else "cpu")
if verbose:
print("CUDA is {}used".format("" if cuda else "NOT(!!) "))
# Set random seeds
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if cuda:
torch.cuda.manual_seed(args.seed)
# -------------------------------------------------------------------------------------------------#
# ----------------#
# ----- DATA -----#
# ----------------#
# Prepare data for chosen experiment
if verbose:
print("\nPreparing the data...")
(train_datasets, test_datasets), config, classes_per_task = get_multitask_experiment(
name=args.experiment, scenario=scenario, tasks=args.tasks, data_dir=args.d_dir,
verbose=verbose, exception=True if args.seed == 0 else False,
)
# -------------------------------------------------------------------------------------------------#
# ------------------------------#
# ----- MODEL (CLASSIFIER) -----#
# ------------------------------#
# Define main model (i.e., classifier, if requested with feedback connections)
if args.feedback:
model = AutoEncoder(
image_size=config['size'], image_channels=config['channels'], classes=config['classes'],
fc_layers=args.fc_lay, fc_units=args.fc_units, z_dim=args.z_dim,
fc_drop=args.fc_drop, fc_bn=True if args.fc_bn == "yes" else False, fc_nl=args.fc_nl,
).to(device)
model.lamda_pl = 1. # --> to make that this VAE is also trained to classify
else:
model = Classifier(
image_size=config['size'], image_channels=config['channels'], classes=config['classes'],
fc_layers=args.fc_lay, fc_units=args.fc_units, fc_drop=args.fc_drop, fc_nl=args.fc_nl,
fc_bn=True if args.fc_bn == "yes" else False,
excit_buffer=True if args.xdg and args.gating_prop > 0 else False,
binaryCE=args.bce, binaryCE_distill=args.bce_distill, AGEM=args.agem,
).to(device)
# Define optimizer (only include parameters that "requires_grad")
model.optim_list = [{'params': filter(lambda p: p.requires_grad, model.parameters()), 'lr': args.lr}]
model.optim_type = args.optimizer
if model.optim_type in ("adam", "adam_reset"):
model.optimizer = optim.Adam(model.optim_list, betas=(0.9, 0.999))
elif model.optim_type == "sgd":
model.optimizer = optim.SGD(model.optim_list)
else:
raise ValueError("Unrecognized optimizer, '{}' is not currently a valid option".format(args.optimizer))
# -------------------------------------------------------------------------------------------------#
# ----------------------------------#
# ----- CL-STRATEGY: EXEMPLARS -----#
# ----------------------------------#
# Store in model whether, how many and in what way to store exemplars
if isinstance(model, ExemplarHandler) and (args.use_exemplars or args.add_exemplars or args.replay == "exemplars"):
model.memory_budget = args.budget
model.norm_exemplars = args.norm_exemplars
model.herding = args.herding
# -------------------------------------------------------------------------------------------------#
# -----------------------------------#
# ----- CL-STRATEGY: ALLOCATION -----#
# -----------------------------------#
# Elastic Weight Consolidation (EWC)
if isinstance(model, ContinualLearner):
model.ewc_lambda = args.ewc_lambda if args.ewc else 0
if args.ewc:
model.fisher_n = args.fisher_n
model.gamma = args.gamma
model.online = args.online
model.emp_FI = args.emp_fi
# Synpatic Intelligence (SI)
if isinstance(model, ContinualLearner):
model.si_c = args.si_c if args.si else 0
if args.si:
model.epsilon = args.epsilon
# XdG: create for every task a "mask" for each hidden fully connected layer
if isinstance(model, ContinualLearner) and (args.xdg and args.gating_prop > 0):
mask_dict = {}
excit_buffer_list = []
for task_id in range(args.tasks):
mask_dict[task_id + 1] = {}
for i in range(model.fcE.layers):
layer = getattr(model.fcE, "fcLayer{}".format(i + 1)).linear
if task_id == 0:
excit_buffer_list.append(layer.excit_buffer)
n_units = len(layer.excit_buffer)
gated_units = np.random.choice(n_units, size=int(args.gating_prop * n_units), replace=False)
mask_dict[task_id + 1][i] = gated_units
model.mask_dict = mask_dict
model.excit_buffer_list = excit_buffer_list
# -------------------------------------------------------------------------------------------------#
# -------------------------------#
# ----- CL-STRATEGY: REPLAY -----#
# -------------------------------#
# Use distillation loss (i.e., soft targets) for replayed data? (and set temperature)
if isinstance(model, Replayer):
model.replay_targets = "soft" if args.distill else "hard"
model.KD_temp = args.temp
# If needed, specify separate model for the generator
train_gen = True if (args.replay == "generative" and not args.feedback) else False
if train_gen:
# -specify architecture
generator = AutoEncoder(
image_size=config['size'], image_channels=config['channels'],
fc_layers=args.g_fc_lay, fc_units=args.g_fc_uni, z_dim=args.g_z_dim, classes=config['classes'],
fc_drop=args.fc_drop, fc_bn=True if args.fc_bn == "yes" else False, fc_nl=args.fc_nl,
).to(device)
# -set optimizer(s)
generator.optim_list = [
{'params': filter(lambda p: p.requires_grad, generator.parameters()), 'lr': args.lr_gen}]
generator.optim_type = args.optimizer
if generator.optim_type in ("adam", "adam_reset"):
generator.optimizer = optim.Adam(generator.optim_list, betas=(0.9, 0.999))
elif generator.optim_type == "sgd":
generator.optimizer = optim.SGD(generator.optim_list)
else:
generator = None
# -------------------------------------------------------------------------------------------------#
# ---------------------#
# ----- REPORTING -----#
# ---------------------#
# Get parameter-stamp (and print on screen)
if verbose:
print("\nParameter-stamp...")
param_stamp = get_param_stamp(
args, model.name, verbose=verbose, replay=True if (not args.replay == "none") else False,
replay_model_name=generator.name if (args.replay == "generative" and not args.feedback) else None,
)
# Print some model-characteristics on the screen
if verbose:
# -main model
utils.print_model_info(model, title="MAIN MODEL")
# -generator
if generator is not None:
utils.print_model_info(generator, title="GENERATOR")
# Prepare for keeping track of statistics required for metrics (also used for plotting in pdf)
if args.pdf or args.metrics:
# -define [metrics_dict] to keep track of performance during training for storing & for later plotting in pdf
metrics_dict = evaluate.initiate_metrics_dict(n_tasks=args.tasks, scenario=args.scenario)
# -evaluate randomly initiated model on all tasks & store accuracies in [metrics_dict] (for calculating metrics)
if not args.use_exemplars:
metrics_dict = evaluate.intial_accuracy(model, test_datasets, metrics_dict,
classes_per_task=classes_per_task, scenario=scenario,
test_size=None, no_task_mask=False)
else:
metrics_dict = None
# Prepare for plotting in visdom
# -visdom-settings
if args.visdom:
env_name = "{exp}{tasks}-{scenario}".format(exp=args.experiment, tasks=args.tasks, scenario=args.scenario)
graph_name = "{fb}{replay}{syn}{ewc}{xdg}{icarl}{bud}".format(
fb="1M-" if args.feedback else "",
replay="{}{}{}".format(args.replay, "D" if args.distill else "", "-aGEM" if args.agem else ""),
syn="-si{}".format(args.si_c) if args.si else "",
ewc="-ewc{}{}".format(args.ewc_lambda,
"-O{}".format(args.gamma) if args.online else "") if args.ewc else "",
xdg="" if (not args.xdg) or args.gating_prop == 0 else "-XdG{}".format(args.gating_prop),
icarl="-iCaRL" if (args.use_exemplars and args.add_exemplars and args.bce and args.bce_distill) else "",
bud="-bud{}".format(args.budget) if (
args.use_exemplars or args.add_exemplars or args.replay == "exemplars"
) else "",
)
visdom = {'env': env_name, 'graph': graph_name}
else:
visdom = None
# -------------------------------------------------------------------------------------------------#
# ---------------------#
# ----- CALLBACKS -----#
# ---------------------#
# Callbacks for reporting on and visualizing loss
generator_loss_cbs = [
cb._VAE_loss_cb(log=args.loss_log, visdom=visdom, model=model if args.feedback else generator, tasks=args.tasks,
iters_per_task=args.iters if args.feedback else args.g_iters,
replay=False if args.replay == "none" else True)
] if (train_gen or args.feedback) else [None]
solver_loss_cbs = [
cb._solver_loss_cb(log=args.loss_log, visdom=visdom, model=model, tasks=args.tasks,
iters_per_task=args.iters, replay=False if args.replay == "none" else True)
] if (not args.feedback) else [None]
# Callbacks for evaluating and plotting generated / reconstructed samples
sample_cbs = [
cb._sample_cb(log=args.sample_log, visdom=visdom, config=config, test_datasets=test_datasets,
sample_size=args.sample_n, iters_per_task=args.iters if args.feedback else args.g_iters)
] if (train_gen or args.feedback) else [None]
# Callbacks for reporting and visualizing accuracy
# -visdom (i.e., after each [prec_log]
eval_cbs = [
cb._eval_cb(log=args.prec_log, test_datasets=test_datasets, visdom=visdom,
iters_per_task=args.iters, test_size=args.prec_n, classes_per_task=classes_per_task,
scenario=scenario, with_exemplars=False)
] if (not args.use_exemplars) else [None]
# --> during training on a task, evaluation cannot be with exemplars as those are only selected after training
# (instead, evaluation for visdom is only done after each task, by including callback-function into [metric_cbs])
# Callbacks for calculating statists required for metrics
# -pdf / reporting: summary plots (i.e, only after each task) (when using exemplars, also for visdom)
metric_cbs = [
cb._metric_cb(log=args.iters, test_datasets=test_datasets,
classes_per_task=classes_per_task, metrics_dict=metrics_dict, scenario=scenario,
iters_per_task=args.iters, with_exemplars=args.use_exemplars),
cb._eval_cb(log=args.iters, test_datasets=test_datasets, visdom=visdom,
iters_per_task=args.iters, test_size=args.prec_n, classes_per_task=classes_per_task,
scenario=scenario, with_exemplars=True) if args.use_exemplars else None
]
# -------------------------------------------------------------------------------------------------#
# --------------------#
# ----- TRAINING -----#
# --------------------#
if verbose:
print("\nTraining...")
# Keep track of training-time
start = time.time()
# Train model
train_cl(
model, train_datasets, replay_mode=args.replay, scenario=scenario, classes_per_task=classes_per_task,
iters=args.iters, batch_size=args.batch,
generator=generator, gen_iters=args.g_iters, gen_loss_cbs=generator_loss_cbs,
sample_cbs=sample_cbs, eval_cbs=eval_cbs, loss_cbs=generator_loss_cbs if args.feedback else solver_loss_cbs,
metric_cbs=metric_cbs, use_exemplars=args.use_exemplars, add_exemplars=args.add_exemplars,
param_stamp=param_stamp,
)
# Get total training-time in seconds, and write to file
if args.time:
training_time = time.time() - start
time_file = open("{}/time-{}.txt".format(args.r_dir, param_stamp), 'w')
time_file.write('{}\n'.format(training_time))
time_file.close()
# -------------------------------------------------------------------------------------------------#
# ----------------------#
# ----- EVALUATION -----#
# ----------------------#
if verbose:
print("\n\nEVALUATION RESULTS:")
# Evaluate precision of final model on full test-set
precs = [evaluate.validate(
model, test_datasets[i], verbose=False, test_size=None, task=i + 1, with_exemplars=False,
allowed_classes=list(range(classes_per_task * i, classes_per_task * (i + 1))) if scenario == "task" else None
) for i in range(args.tasks)]
average_precs = sum(precs) / args.tasks
# -print on screen
if verbose:
print("\n Precision on test-set{}:".format(" (softmax classification)" if args.use_exemplars else ""))
for i in range(args.tasks):
print(
" - Task {} [{}-{}]: {:.4f}".format(i + 1, classes_per_task * i, classes_per_task * (i + 1) - 1, precs[i]))
print('=> Average precision over all {} tasks: {:.4f}\n'.format(args.tasks, average_precs))
# -with exemplars
if args.use_exemplars:
precs = [evaluate.validate(
model, test_datasets[i], verbose=False, test_size=None, task=i + 1, with_exemplars=True,
allowed_classes=list(
range(classes_per_task * i, classes_per_task * (i + 1))) if scenario == "task" else None
) for i in range(args.tasks)]
average_precs_ex = sum(precs) / args.tasks
# -print on screen
if verbose:
print(" Precision on test-set (classification using exemplars):")
for i in range(args.tasks):
print(" - Task {}: {:.4f}".format(i + 1, precs[i]))
print('=> Average precision over all {} tasks: {:.4f}\n'.format(args.tasks, average_precs_ex))
if args.metrics:
# Accuracy matrix
if args.scenario in ('task', 'domain'):
R = pd.DataFrame(data=metrics_dict['acc per task'],
index=['after task {}'.format(i + 1) for i in range(args.tasks)])
R.loc['at start'] = metrics_dict['initial acc per task'] if (not args.use_exemplars) else [
'NA' for _ in range(args.tasks)
]
R = R.reindex(['at start'] + ['after task {}'.format(i + 1) for i in range(args.tasks)])
BWTs = [(R.loc['after task {}'.format(args.tasks), 'task {}'.format(i + 1)] - \
R.loc['after task {}'.format(i + 1), 'task {}'.format(i + 1)]) for i in range(args.tasks - 1)]
FWTs = [0. if args.use_exemplars else (
R.loc['after task {}'.format(i + 1), 'task {}'.format(i + 2)] - R.loc[
'at start', 'task {}'.format(i + 2)]
) for i in range(args.tasks - 1)]
forgetting = []
for i in range(args.tasks - 1):
forgetting.append(max(R.iloc[1:args.tasks, i]) - R.iloc[args.tasks, i])
R.loc['FWT (per task)'] = ['NA'] + FWTs
R.loc['BWT (per task)'] = BWTs + ['NA']
R.loc['F (per task)'] = forgetting + ['NA']
BWT = sum(BWTs) / (args.tasks - 1)
F = sum(forgetting) / (args.tasks - 1)
FWT = sum(FWTs) / (args.tasks - 1)
metrics_dict['BWT'] = BWT
metrics_dict['F'] = F
metrics_dict['FWT'] = FWT
# -print on screen
if verbose:
print("Accuracy matrix")
print(R)
print("\nFWT = {:.4f}".format(FWT))
print("BWT = {:.4f}".format(BWT))
print(" F = {:.4f}\n\n".format(F))
else:
if verbose:
# Accuracy matrix based only on classes in that task (i.e., evaluation as if Task-IL scenario)
R = pd.DataFrame(data=metrics_dict['acc per task (only classes in task)'],
index=['after task {}'.format(i + 1) for i in range(args.tasks)])
R.loc['at start'] = metrics_dict[
'initial acc per task (only classes in task)'
] if not args.use_exemplars else ['NA' for _ in range(args.tasks)]
R = R.reindex(['at start'] + ['after task {}'.format(i + 1) for i in range(args.tasks)])
print("Accuracy matrix, based on only classes in that task ('as if Task-IL scenario')")
print(R)
# Accuracy matrix, always based on all classes
R = pd.DataFrame(data=metrics_dict['acc per task (all classes)'],
index=['after task {}'.format(i + 1) for i in range(args.tasks)])
R.loc['at start'] = metrics_dict[
'initial acc per task (only classes in task)'
] if not args.use_exemplars else ['NA' for _ in range(args.tasks)]
R = R.reindex(['at start'] + ['after task {}'.format(i + 1) for i in range(args.tasks)])
print("\nAccuracy matrix, always based on all classes")
print(R)
# Accuracy matrix, based on all classes thus far
R = pd.DataFrame(data=metrics_dict['acc per task (all classes up to trained task)'],
index=['after task {}'.format(i + 1) for i in range(args.tasks)])
print("\nAccuracy matrix, based on all classes up to the trained task")
print(R)
# Accuracy matrix, based on all classes up to the task being evaluated
# (this is the accuracy-matrix used for calculating the metrics in the Class-IL scenario)
R = pd.DataFrame(data=metrics_dict['acc per task (all classes up to evaluated task)'],
index=['after task {}'.format(i + 1) for i in range(args.tasks)])
R.loc['at start'] = metrics_dict[
'initial acc per task (only classes in task)'
] if not args.use_exemplars else ['NA' for _ in range(args.tasks)]
R = R.reindex(['at start'] + ['after task {}'.format(i + 1) for i in range(args.tasks)])
BWTs = [(R.loc['after task {}'.format(args.tasks), 'task {}'.format(i + 1)] - \
R.loc['after task {}'.format(i + 1), 'task {}'.format(i + 1)]) for i in range(args.tasks - 1)]
FWTs = [0. if args.use_exemplars else (
R.loc['after task {}'.format(i + 1), 'task {}'.format(i + 2)] - R.loc[
'at start', 'task {}'.format(i + 2)]
) for i in range(args.tasks - 1)]
forgetting = []
for i in range(args.tasks - 1):
forgetting.append(max(R.iloc[1:args.tasks, i]) - R.iloc[args.tasks, i])
R.loc['FWT (per task)'] = ['NA'] + FWTs
R.loc['BWT (per task)'] = BWTs + ['NA']
R.loc['F (per task)'] = forgetting + ['NA']
BWT = sum(BWTs) / (args.tasks - 1)
F = sum(forgetting) / (args.tasks - 1)
FWT = sum(FWTs) / (args.tasks - 1)
metrics_dict['BWT'] = BWT
metrics_dict['F'] = F
metrics_dict['FWT'] = FWT
# -print on screen
if verbose:
print("\nAccuracy matrix, based on all classes up to the evaluated task")
print(R)
print("\n=> FWT = {:.4f}".format(FWT))
print("=> BWT = {:.4f}".format(BWT))
print("=> F = {:.4f}\n".format(F))
if verbose and args.time:
print("=> Total training time = {:.1f} seconds\n".format(training_time))
# -------------------------------------------------------------------------------------------------#
# ------------------#
# ----- OUTPUT -----#
# ------------------#
# Average precision on full test set
output_file = open("{}/prec-{}.txt".format(args.r_dir, param_stamp), 'w')
output_file.write('{}\n'.format(average_precs_ex if args.use_exemplars else average_precs))
output_file.close()
# -metrics-dict
if args.metrics:
file_name = "{}/dict-{}".format(args.r_dir, param_stamp)
utils.save_object(metrics_dict, file_name)
# -------------------------------------------------------------------------------------------------#
# --------------------#
# ----- PLOTTING -----#
# --------------------#
# If requested, generate pdf
if args.pdf:
# -open pdf
plot_name = "{}/{}.pdf".format(args.p_dir, param_stamp)
pp = visual_plt.open_pdf(plot_name)
# -show samples and reconstructions (either from main model or from separate generator)
if args.feedback or args.replay == "generative":
evaluate.show_samples(model if args.feedback else generator, config, size=args.sample_n, pdf=pp)
for i in range(args.tasks):
evaluate.show_reconstruction(model if args.feedback else generator, test_datasets[i], config, pdf=pp,
task=i + 1)
# -show metrics reflecting progression during training
figure_list = [] # -> create list to store all figures to be plotted
# -generate all figures (and store them in [figure_list])
key = "acc per task ({} task)".format("all classes up to trained" if scenario == 'class' else "only classes in")
plot_list = []
for i in range(args.tasks):
plot_list.append(metrics_dict[key]["task {}".format(i + 1)])
figure = visual_plt.plot_lines(
plot_list, x_axes=metrics_dict["x_task"],
line_names=['task {}'.format(i + 1) for i in range(args.tasks)]
)
figure_list.append(figure)
figure = visual_plt.plot_lines(
[metrics_dict["average"]], x_axes=metrics_dict["x_task"],
line_names=['average all tasks so far']
)
figure_list.append(figure)
# -add figures to pdf (and close this pdf).
for figure in figure_list:
pp.savefig(figure)
# -close pdf
pp.close()
# -print name of generated plot on screen
if verbose:
print("\nGenerated plot: {}\n".format(plot_name))
if __name__ == '__main__':
# -load input-arguments
args = parser.parse_args()
# -set default-values for certain arguments based on chosen scenario & experiment
args = set_default_values(args)
# -run experiment
print(f"Running with args:\n{args}")
run(args, verbose=True)
|
[
"evaluate.intial_accuracy",
"evaluate.show_reconstruction",
"evaluate.show_samples",
"evaluate.initiate_metrics_dict"
] |
[((580, 682), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""./main.py"""'], {'description': '"""Run individual continual learning experiment."""'}), "('./main.py', description=\n 'Run individual continual learning experiment.')\n", (603, 682), False, 'import argparse\n'), ((11920, 11959), 'torch.device', 'torch.device', (["('cuda' if cuda else 'cpu')"], {}), "('cuda' if cuda else 'cpu')\n", (11932, 11959), False, 'import torch\n'), ((12071, 12096), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (12085, 12096), True, 'import numpy as np\n'), ((12101, 12129), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (12118, 12129), False, 'import torch\n'), ((12527, 12703), 'data.get_multitask_experiment', 'get_multitask_experiment', ([], {'name': 'args.experiment', 'scenario': 'scenario', 'tasks': 'args.tasks', 'data_dir': 'args.d_dir', 'verbose': 'verbose', 'exception': '(True if args.seed == 0 else False)'}), '(name=args.experiment, scenario=scenario, tasks=\n args.tasks, data_dir=args.d_dir, verbose=verbose, exception=True if \n args.seed == 0 else False)\n', (12551, 12703), False, 'from data import get_multitask_experiment\n'), ((18416, 18626), 'param_stamp.get_param_stamp', 'get_param_stamp', (['args', 'model.name'], {'verbose': 'verbose', 'replay': "(True if not args.replay == 'none' else False)", 'replay_model_name': "(generator.name if args.replay == 'generative' and not args.feedback else None)"}), "(args, model.name, verbose=verbose, replay=True if not args.\n replay == 'none' else False, replay_model_name=generator.name if args.\n replay == 'generative' and not args.feedback else None)\n", (18431, 18626), False, 'from param_stamp import get_param_stamp, get_param_stamp_from_args\n'), ((23745, 23756), 'time.time', 'time.time', ([], {}), '()\n', (23754, 23756), False, 'import time\n'), ((23779, 24262), 'train.train_cl', 'train_cl', (['model', 'train_datasets'], {'replay_mode': 'args.replay', 'scenario': 'scenario', 'classes_per_task': 'classes_per_task', 'iters': 'args.iters', 'batch_size': 'args.batch', 'generator': 'generator', 'gen_iters': 'args.g_iters', 'gen_loss_cbs': 'generator_loss_cbs', 'sample_cbs': 'sample_cbs', 'eval_cbs': 'eval_cbs', 'loss_cbs': '(generator_loss_cbs if args.feedback else solver_loss_cbs)', 'metric_cbs': 'metric_cbs', 'use_exemplars': 'args.use_exemplars', 'add_exemplars': 'args.add_exemplars', 'param_stamp': 'param_stamp'}), '(model, train_datasets, replay_mode=args.replay, scenario=scenario,\n classes_per_task=classes_per_task, iters=args.iters, batch_size=args.\n batch, generator=generator, gen_iters=args.g_iters, gen_loss_cbs=\n generator_loss_cbs, sample_cbs=sample_cbs, eval_cbs=eval_cbs, loss_cbs=\n generator_loss_cbs if args.feedback else solver_loss_cbs, metric_cbs=\n metric_cbs, use_exemplars=args.use_exemplars, add_exemplars=args.\n add_exemplars, param_stamp=param_stamp)\n', (23787, 24262), False, 'from train import train_cl\n'), ((34940, 34964), 'param_values.set_default_values', 'set_default_values', (['args'], {}), '(args)\n', (34958, 34964), False, 'from param_values import set_default_values\n'), ((11186, 11211), 'os.path.isdir', 'os.path.isdir', (['args.r_dir'], {}), '(args.r_dir)\n', (11199, 11211), False, 'import os\n'), ((11221, 11241), 'os.mkdir', 'os.mkdir', (['args.r_dir'], {}), '(args.r_dir)\n', (11229, 11241), False, 'import os\n'), ((11301, 11321), 'os.mkdir', 'os.mkdir', (['args.p_dir'], {}), '(args.p_dir)\n', (11309, 11321), False, 'import os\n'), ((11867, 11892), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11890, 11892), False, 'import torch\n'), ((12151, 12184), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (12173, 12184), False, 'import torch\n'), ((14225, 14273), 'torch.optim.Adam', 'optim.Adam', (['model.optim_list'], {'betas': '(0.9, 0.999)'}), '(model.optim_list, betas=(0.9, 0.999))\n', (14235, 14273), False, 'from torch import optim\n'), ((18744, 18793), 'utils.print_model_info', 'utils.print_model_info', (['model'], {'title': '"""MAIN MODEL"""'}), "(model, title='MAIN MODEL')\n", (18766, 18793), False, 'import utils\n'), ((19188, 19262), 'evaluate.initiate_metrics_dict', 'evaluate.initiate_metrics_dict', ([], {'n_tasks': 'args.tasks', 'scenario': 'args.scenario'}), '(n_tasks=args.tasks, scenario=args.scenario)\n', (19218, 19262), False, 'import evaluate\n'), ((22930, 23140), 'callbacks._metric_cb', 'cb._metric_cb', ([], {'log': 'args.iters', 'test_datasets': 'test_datasets', 'classes_per_task': 'classes_per_task', 'metrics_dict': 'metrics_dict', 'scenario': 'scenario', 'iters_per_task': 'args.iters', 'with_exemplars': 'args.use_exemplars'}), '(log=args.iters, test_datasets=test_datasets, classes_per_task\n =classes_per_task, metrics_dict=metrics_dict, scenario=scenario,\n iters_per_task=args.iters, with_exemplars=args.use_exemplars)\n', (22943, 23140), True, 'import callbacks as cb\n'), ((32687, 32729), 'utils.save_object', 'utils.save_object', (['metrics_dict', 'file_name'], {}), '(metrics_dict, file_name)\n', (32704, 32729), False, 'import utils\n'), ((33069, 33099), 'visual_plt.open_pdf', 'visual_plt.open_pdf', (['plot_name'], {}), '(plot_name)\n', (33088, 33099), False, 'import visual_plt\n'), ((34272, 34397), 'visual_plt.plot_lines', 'visual_plt.plot_lines', (["[metrics_dict['average']]"], {'x_axes': "metrics_dict['x_task']", 'line_names': "['average all tasks so far']"}), "([metrics_dict['average']], x_axes=metrics_dict[\n 'x_task'], line_names=['average all tasks so far'])\n", (34293, 34397), False, 'import visual_plt\n'), ((11266, 11291), 'os.path.isdir', 'os.path.isdir', (['args.p_dir'], {}), '(args.p_dir)\n', (11279, 11291), False, 'import os\n'), ((11786, 11822), 'param_stamp.get_param_stamp_from_args', 'get_param_stamp_from_args', ([], {'args': 'args'}), '(args=args)\n', (11811, 11822), False, 'from param_stamp import get_param_stamp, get_param_stamp_from_args\n'), ((14336, 14363), 'torch.optim.SGD', 'optim.SGD', (['model.optim_list'], {}), '(model.optim_list)\n', (14345, 14363), False, 'from torch import optim\n'), ((17903, 17955), 'torch.optim.Adam', 'optim.Adam', (['generator.optim_list'], {'betas': '(0.9, 0.999)'}), '(generator.optim_list, betas=(0.9, 0.999))\n', (17913, 17955), False, 'from torch import optim\n'), ((18861, 18913), 'utils.print_model_info', 'utils.print_model_info', (['generator'], {'title': '"""GENERATOR"""'}), "(generator, title='GENERATOR')\n", (18883, 18913), False, 'import utils\n'), ((19446, 19604), 'evaluate.intial_accuracy', 'evaluate.intial_accuracy', (['model', 'test_datasets', 'metrics_dict'], {'classes_per_task': 'classes_per_task', 'scenario': 'scenario', 'test_size': 'None', 'no_task_mask': '(False)'}), '(model, test_datasets, metrics_dict,\n classes_per_task=classes_per_task, scenario=scenario, test_size=None,\n no_task_mask=False)\n', (19470, 19604), False, 'import evaluate\n'), ((21144, 21380), 'callbacks._VAE_loss_cb', 'cb._VAE_loss_cb', ([], {'log': 'args.loss_log', 'visdom': 'visdom', 'model': '(model if args.feedback else generator)', 'tasks': 'args.tasks', 'iters_per_task': '(args.iters if args.feedback else args.g_iters)', 'replay': "(False if args.replay == 'none' else True)"}), "(log=args.loss_log, visdom=visdom, model=model if args.\n feedback else generator, tasks=args.tasks, iters_per_task=args.iters if\n args.feedback else args.g_iters, replay=False if args.replay == 'none' else\n True)\n", (21159, 21380), True, 'import callbacks as cb\n'), ((21498, 21666), 'callbacks._solver_loss_cb', 'cb._solver_loss_cb', ([], {'log': 'args.loss_log', 'visdom': 'visdom', 'model': 'model', 'tasks': 'args.tasks', 'iters_per_task': 'args.iters', 'replay': "(False if args.replay == 'none' else True)"}), "(log=args.loss_log, visdom=visdom, model=model, tasks=\n args.tasks, iters_per_task=args.iters, replay=False if args.replay ==\n 'none' else True)\n", (21516, 21666), True, 'import callbacks as cb\n'), ((21832, 22023), 'callbacks._sample_cb', 'cb._sample_cb', ([], {'log': 'args.sample_log', 'visdom': 'visdom', 'config': 'config', 'test_datasets': 'test_datasets', 'sample_size': 'args.sample_n', 'iters_per_task': '(args.iters if args.feedback else args.g_iters)'}), '(log=args.sample_log, visdom=visdom, config=config,\n test_datasets=test_datasets, sample_size=args.sample_n, iters_per_task=\n args.iters if args.feedback else args.g_iters)\n', (21845, 22023), True, 'import callbacks as cb\n'), ((22211, 22420), 'callbacks._eval_cb', 'cb._eval_cb', ([], {'log': 'args.prec_log', 'test_datasets': 'test_datasets', 'visdom': 'visdom', 'iters_per_task': 'args.iters', 'test_size': 'args.prec_n', 'classes_per_task': 'classes_per_task', 'scenario': 'scenario', 'with_exemplars': '(False)'}), '(log=args.prec_log, test_datasets=test_datasets, visdom=visdom,\n iters_per_task=args.iters, test_size=args.prec_n, classes_per_task=\n classes_per_task, scenario=scenario, with_exemplars=False)\n', (22222, 22420), True, 'import callbacks as cb\n'), ((23185, 23390), 'callbacks._eval_cb', 'cb._eval_cb', ([], {'log': 'args.iters', 'test_datasets': 'test_datasets', 'visdom': 'visdom', 'iters_per_task': 'args.iters', 'test_size': 'args.prec_n', 'classes_per_task': 'classes_per_task', 'scenario': 'scenario', 'with_exemplars': '(True)'}), '(log=args.iters, test_datasets=test_datasets, visdom=visdom,\n iters_per_task=args.iters, test_size=args.prec_n, classes_per_task=\n classes_per_task, scenario=scenario, with_exemplars=True)\n', (23196, 23390), True, 'import callbacks as cb\n'), ((24391, 24402), 'time.time', 'time.time', ([], {}), '()\n', (24400, 24402), False, 'import time\n'), ((33266, 33367), 'evaluate.show_samples', 'evaluate.show_samples', (['(model if args.feedback else generator)', 'config'], {'size': 'args.sample_n', 'pdf': 'pp'}), '(model if args.feedback else generator, config, size=\n args.sample_n, pdf=pp)\n', (33287, 33367), False, 'import evaluate\n'), ((13060, 13325), 'vae_models.AutoEncoder', 'AutoEncoder', ([], {'image_size': "config['size']", 'image_channels': "config['channels']", 'classes': "config['classes']", 'fc_layers': 'args.fc_lay', 'fc_units': 'args.fc_units', 'z_dim': 'args.z_dim', 'fc_drop': 'args.fc_drop', 'fc_bn': "(True if args.fc_bn == 'yes' else False)", 'fc_nl': 'args.fc_nl'}), "(image_size=config['size'], image_channels=config['channels'],\n classes=config['classes'], fc_layers=args.fc_lay, fc_units=args.\n fc_units, z_dim=args.z_dim, fc_drop=args.fc_drop, fc_bn=True if args.\n fc_bn == 'yes' else False, fc_nl=args.fc_nl)\n", (13071, 13325), False, 'from vae_models import AutoEncoder\n'), ((13481, 13874), 'encoder.Classifier', 'Classifier', ([], {'image_size': "config['size']", 'image_channels': "config['channels']", 'classes': "config['classes']", 'fc_layers': 'args.fc_lay', 'fc_units': 'args.fc_units', 'fc_drop': 'args.fc_drop', 'fc_nl': 'args.fc_nl', 'fc_bn': "(True if args.fc_bn == 'yes' else False)", 'excit_buffer': '(True if args.xdg and args.gating_prop > 0 else False)', 'binaryCE': 'args.bce', 'binaryCE_distill': 'args.bce_distill', 'AGEM': 'args.agem'}), "(image_size=config['size'], image_channels=config['channels'],\n classes=config['classes'], fc_layers=args.fc_lay, fc_units=args.\n fc_units, fc_drop=args.fc_drop, fc_nl=args.fc_nl, fc_bn=True if args.\n fc_bn == 'yes' else False, excit_buffer=True if args.xdg and args.\n gating_prop > 0 else False, binaryCE=args.bce, binaryCE_distill=args.\n bce_distill, AGEM=args.agem)\n", (13491, 13874), False, 'from encoder import Classifier\n'), ((17287, 17555), 'vae_models.AutoEncoder', 'AutoEncoder', ([], {'image_size': "config['size']", 'image_channels': "config['channels']", 'fc_layers': 'args.g_fc_lay', 'fc_units': 'args.g_fc_uni', 'z_dim': 'args.g_z_dim', 'classes': "config['classes']", 'fc_drop': 'args.fc_drop', 'fc_bn': "(True if args.fc_bn == 'yes' else False)", 'fc_nl': 'args.fc_nl'}), "(image_size=config['size'], image_channels=config['channels'],\n fc_layers=args.g_fc_lay, fc_units=args.g_fc_uni, z_dim=args.g_z_dim,\n classes=config['classes'], fc_drop=args.fc_drop, fc_bn=True if args.\n fc_bn == 'yes' else False, fc_nl=args.fc_nl)\n", (17298, 17555), False, 'from vae_models import AutoEncoder\n'), ((18034, 18065), 'torch.optim.SGD', 'optim.SGD', (['generator.optim_list'], {}), '(generator.optim_list)\n', (18043, 18065), False, 'from torch import optim\n'), ((33419, 33536), 'evaluate.show_reconstruction', 'evaluate.show_reconstruction', (['(model if args.feedback else generator)', 'test_datasets[i]', 'config'], {'pdf': 'pp', 'task': '(i + 1)'}), '(model if args.feedback else generator,\n test_datasets[i], config, pdf=pp, task=i + 1)\n', (33447, 33536), False, 'import evaluate\n')]
|
import argparse
import os
import sys
import json
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from datasets.ct_dataset import get_dataloaders
from evaluate import evaluate
from config import *
from metrics import VolumeLoss
from models import get_model
def test(train_data_root, model_dir, ckpt_name='best', dump_debug_images=False):
"""
Test the model in a checkpoint on the entire dataset.
"""
ckpt = torch.load(f'{model_dir}/{ckpt_name}.pth')
config = ExperimentConfigs(**json.load(open(f"{model_dir}/exp_configs.json")))
config.data_path = train_data_root
# get dataloaders
config.batch_size = 1
train_loader, val_loader = get_dataloaders(config.get_data_config())
train_loader.dataset.transforms = val_loader.dataset.transforms # avoid slicing and use full volumes
# get model
model = get_model(config.get_model_config())
model.load_state_dict(ckpt['model'])
model.to(config.device)
volume_crieteria = VolumeLoss(config.dice_loss_weight, config.wce_loss_weight, config.ce_loss_weight)
outputs_dir = os.path.join(model_dir, f'ckpt-{ckpt_name}', "val_debug") if dump_debug_images else None
validation_report = evaluate(model, val_loader, config.device, volume_crieteria, outputs_dir=outputs_dir)
print(validation_report)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Preprocess Lits2017 dataset')
parser.add_argument('model_dir')
parser.add_argument('train_data_root')
parser.add_argument('--checkpoint_name', default='best')
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
test(args.train_data_root, args.model_dir, args.checkpoint_name, dump_debug_images=args.debug)
|
[
"evaluate.evaluate"
] |
[((1011, 1098), 'metrics.VolumeLoss', 'VolumeLoss', (['config.dice_loss_weight', 'config.wce_loss_weight', 'config.ce_loss_weight'], {}), '(config.dice_loss_weight, config.wce_loss_weight, config.\n ce_loss_weight)\n', (1021, 1098), False, 'from metrics import VolumeLoss\n'), ((1226, 1316), 'evaluate.evaluate', 'evaluate', (['model', 'val_loader', 'config.device', 'volume_crieteria'], {'outputs_dir': 'outputs_dir'}), '(model, val_loader, config.device, volume_crieteria, outputs_dir=\n outputs_dir)\n', (1234, 1316), False, 'from evaluate import evaluate\n'), ((1383, 1449), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Preprocess Lits2017 dataset"""'}), "(description='Preprocess Lits2017 dataset')\n", (1406, 1449), False, 'import argparse\n'), ((1112, 1169), 'os.path.join', 'os.path.join', (['model_dir', 'f"""ckpt-{ckpt_name}"""', '"""val_debug"""'], {}), "(model_dir, f'ckpt-{ckpt_name}', 'val_debug')\n", (1124, 1169), False, 'import os\n'), ((95, 120), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (110, 120), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
import time
import numpy as np
from evaluate import get_idcg_list, evaluate, evaluate_ranking, evaluate_alpha
from clicks import *
import StartStatus as ss
class SingleSimulation(object):
def __init__(self, sim_args, output_queue, click_model, datafold):
self.train_only = sim_args.train_only
self.n_impressions = sim_args.n_impressions
self.n_results = sim_args.n_results
self.click_model = click_model
self.datafold = datafold
if not self.train_only:
self.test_idcg_vector = get_idcg_list(self.datafold.test_label_vector,
self.datafold.test_doclist_ranges,
self.n_results, spread=True)
self.train_idcg_vector = get_idcg_list(self.datafold.train_label_vector,
self.datafold.train_doclist_ranges,
self.n_results)
self.run_details = {
'data folder': str(self.datafold.data_path),
'held-out data': str(self.datafold.heldout_tag),
'click model': self.click_model.get_name(),
}
self.output_queue = output_queue
self.print_frequency = sim_args.print_freq
self.print_all_train = sim_args.all_train
self.print_logscale = sim_args.print_logscale
if self.print_logscale:
self.print_scale = self.print_frequency
self.print_next_scale = self.print_scale
self.print_frequency = 1
self.last_print = 0
self.next_print = 0
self.online_score = 0.0
self.cur_online_discount = 1.0
self.online_discount = 0.6
def timestep_evaluate(self, results, iteration, ranker, ranking_i,
train_ranking, ranking_labels, fea_mat,
alpha_result):
test_print = (not self.train_only
and (iteration == self.last_print
or iteration == self.next_print
or iteration == self.n_impressions))
if test_print:
cur_results = self.evaluate_ranker(iteration,
ranker,
ranking_i,
train_ranking,
ranking_labels,
fea_mat,
alpha_result)
self.online_score += cur_results['display']*self.cur_online_discount
cur_results['cumulative-display'] = self.online_score
results.append(cur_results)
else:
cur_results = self.evaluate_ranker_train_only(iteration,
ranker,
ranking_i,
train_ranking,
ranking_labels,
fea_mat,
alpha_result)
self.online_score += cur_results['display']*self.cur_online_discount
if self.print_all_train:
cur_results['cumulative-display'] = self.online_score
results.append(cur_results)
self.cur_online_discount *= self.online_discount
if iteration >= self.next_print:
if self.print_logscale and iteration >= self.print_next_scale:
self.print_next_scale *= self.print_scale
self.print_frequency *= self.print_scale
self.last_print = self.next_print
self.next_print = self.next_print + self.print_frequency
def evaluate_ranker(self, iteration, ranker,
ranking_i, train_ranking,
ranking_labels, fea_mat,
alpha_result):
test_rankings = ranker.get_test_rankings(
self.datafold.test_feature_matrix,
self.datafold.test_doclist_ranges,
inverted=True)
test_ndcg = evaluate(
test_rankings,
self.datafold.test_label_vector,
self.test_idcg_vector,
self.datafold.test_doclist_ranges.shape[0] - 1,
self.n_results)
train_ndcg = evaluate_ranking(
train_ranking,
ranking_labels,
self.train_idcg_vector[ranking_i],
self.n_results)
results = {
'iteration': iteration,
'heldout': np.mean(test_ndcg),
'display': np.mean(train_ndcg),
}
for name, value in ranker.get_messages().items():
results[name] = value
return results
def evaluate_ranker_train_only(self, iteration, ranker,
ranking_i, train_ranking,
ranking_labels, fea_mat,
alpha_result):
train_ndcg = evaluate_ranking(
train_ranking,
ranking_labels,
self.train_idcg_vector[ranking_i],
self.n_results)
train_alpha = evaluate_alpha(
train_ranking,
fea_mat,
alpha_result,
self.n_results
)
results = {
'iteration': iteration,
'display': np.mean(train_ndcg),
'alpha': np.mean(train_alpha)
}
for name, value in ranker.get_messages().items():
results[name] = value
return results
def sample_and_rank(self, ranker):
ranking_i = np.random.choice(self.datafold.n_train_queries())
train_ranking = ranker.get_train_query_ranking(ranking_i)
assert train_ranking.shape[0] <= self.n_results, 'Shape is %s' % (train_ranking.shape,)
assert len(train_ranking.shape) == 1, 'Shape is %s' % (train_ranking.shape,)
return ranking_i, train_ranking
def run(self, ranker, output_key):
starttime = time.time()
ranker.setup(train_features = self.datafold.train_feature_matrix,
train_query_ranges = self.datafold.train_doclist_ranges)
run_results = []
impressions = 0
for impressions in range(self.n_impressions):
ranking_i, train_ranking = self.sample_and_rank(ranker)
ranking_labels = self.datafold.train_query_labels(ranking_i)
if ss.isCold or impressions > 0:
clicks = self.click_model.generate_clicks(train_ranking, ranking_labels)
else:
train_ranking = np.array(self.datafold.coldstart)
# TODO HERE
clicks = self.click_model.generate_clicks(train_ranking, ranking_labels)
while np.sum(clicks) <= 0:
clicks = self.click_model.generate_clicks(train_ranking, ranking_labels)
self.timestep_evaluate(run_results, impressions, ranker,
ranking_i, train_ranking, ranking_labels,
self.datafold.train_query_fea_mat(ranking_i),
self.datafold.alpha_result)
ranker.process_clicks(clicks)
# evaluate after final iteration
ranking_i, train_ranking = self.sample_and_rank(ranker)
ranking_labels = self.datafold.train_query_labels(ranking_i)
impressions += 1
self.timestep_evaluate(run_results, impressions, ranker,
ranking_i, train_ranking, ranking_labels,
self.datafold.train_query_fea_mat(ranking_i),
self.datafold.alpha_result)
ranker.clean()
self.run_details['runtime'] = time.time() - starttime
output = {'run_details': self.run_details,
'run_results': run_results}
self.output_queue.put((output_key, output))
|
[
"evaluate.evaluate_ranking",
"evaluate.get_idcg_list",
"evaluate.evaluate_alpha",
"evaluate.evaluate"
] |
[((765, 869), 'evaluate.get_idcg_list', 'get_idcg_list', (['self.datafold.train_label_vector', 'self.datafold.train_doclist_ranges', 'self.n_results'], {}), '(self.datafold.train_label_vector, self.datafold.\n train_doclist_ranges, self.n_results)\n', (778, 869), False, 'from evaluate import get_idcg_list, evaluate, evaluate_ranking, evaluate_alpha\n'), ((3997, 4150), 'evaluate.evaluate', 'evaluate', (['test_rankings', 'self.datafold.test_label_vector', 'self.test_idcg_vector', '(self.datafold.test_doclist_ranges.shape[0] - 1)', 'self.n_results'], {}), '(test_rankings, self.datafold.test_label_vector, self.\n test_idcg_vector, self.datafold.test_doclist_ranges.shape[0] - 1, self.\n n_results)\n', (4005, 4150), False, 'from evaluate import get_idcg_list, evaluate, evaluate_ranking, evaluate_alpha\n'), ((4250, 4353), 'evaluate.evaluate_ranking', 'evaluate_ranking', (['train_ranking', 'ranking_labels', 'self.train_idcg_vector[ranking_i]', 'self.n_results'], {}), '(train_ranking, ranking_labels, self.train_idcg_vector[\n ranking_i], self.n_results)\n', (4266, 4353), False, 'from evaluate import get_idcg_list, evaluate, evaluate_ranking, evaluate_alpha\n'), ((4871, 4974), 'evaluate.evaluate_ranking', 'evaluate_ranking', (['train_ranking', 'ranking_labels', 'self.train_idcg_vector[ranking_i]', 'self.n_results'], {}), '(train_ranking, ranking_labels, self.train_idcg_vector[\n ranking_i], self.n_results)\n', (4887, 4974), False, 'from evaluate import get_idcg_list, evaluate, evaluate_ranking, evaluate_alpha\n'), ((5038, 5106), 'evaluate.evaluate_alpha', 'evaluate_alpha', (['train_ranking', 'fea_mat', 'alpha_result', 'self.n_results'], {}), '(train_ranking, fea_mat, alpha_result, self.n_results)\n', (5052, 5106), False, 'from evaluate import get_idcg_list, evaluate, evaluate_ranking, evaluate_alpha\n'), ((5823, 5834), 'time.time', 'time.time', ([], {}), '()\n', (5832, 5834), False, 'import time\n'), ((537, 652), 'evaluate.get_idcg_list', 'get_idcg_list', (['self.datafold.test_label_vector', 'self.datafold.test_doclist_ranges', 'self.n_results'], {'spread': '(True)'}), '(self.datafold.test_label_vector, self.datafold.\n test_doclist_ranges, self.n_results, spread=True)\n', (550, 652), False, 'from evaluate import get_idcg_list, evaluate, evaluate_ranking, evaluate_alpha\n'), ((4462, 4480), 'numpy.mean', 'np.mean', (['test_ndcg'], {}), '(test_ndcg)\n', (4469, 4480), True, 'import numpy as np\n'), ((4499, 4518), 'numpy.mean', 'np.mean', (['train_ndcg'], {}), '(train_ndcg)\n', (4506, 4518), True, 'import numpy as np\n'), ((5225, 5244), 'numpy.mean', 'np.mean', (['train_ndcg'], {}), '(train_ndcg)\n', (5232, 5244), True, 'import numpy as np\n'), ((5261, 5281), 'numpy.mean', 'np.mean', (['train_alpha'], {}), '(train_alpha)\n', (5268, 5281), True, 'import numpy as np\n'), ((7411, 7422), 'time.time', 'time.time', ([], {}), '()\n', (7420, 7422), False, 'import time\n'), ((6357, 6390), 'numpy.array', 'np.array', (['self.datafold.coldstart'], {}), '(self.datafold.coldstart)\n', (6365, 6390), True, 'import numpy as np\n'), ((6506, 6520), 'numpy.sum', 'np.sum', (['clicks'], {}), '(clicks)\n', (6512, 6520), True, 'import numpy as np\n')]
|
import os
import time
import json
import logging
import argparse
import sys
sys.path.append("libs")
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import backend as K
from data import CollaborativeVAEDataGenerator
from train_vae import get_collabo_vae
from evaluate import EvaluateModel
from evaluate import Recall_at_k, NDCG_at_k
def predict_and_evaluate():
### Parse the console arguments.
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str,
help="specify the dataset for experiment")
parser.add_argument("--split", type=int,
help="specify the split of the dataset")
parser.add_argument("--batch_size", type=int, default=128,
help="specify the batch size prediction")
parser.add_argument("--device" , type=str, default="0",
help="specify the visible GPU device")
parser.add_argument("--model_root", type=str, default=None,
help="specify the trained model root (optional)")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
### Set up the tensorflow session.
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
K.set_session(sess)
### Fix the random seeds.
np.random.seed(98765)
tf.set_random_seed(98765)
### Get the test data generator for content vae
data_root = os.path.join("data", args.dataset, str(args.split))
if args.model_root:
model_root = args.model_root
else:
model_root = os.path.join("models", args.dataset, str(args.split))
params_path = os.path.join(model_root, "hyperparams.json")
with open(params_path, "r") as params_file:
params = json.load(params_file)
bstep_test_gen = CollaborativeVAEDataGenerator(
data_root = data_root, phase = "test",
batch_size = args.batch_size, joint=True,
shuffle=False
)
### Build test model and load trained weights
collab_vae = get_collabo_vae(params, bstep_test_gen.num_items)
collab_vae.load_weights(os.path.join(model_root, "best_bstep.model"))
vae_eval = collab_vae.build_vae_eval()
### Evaluate and save the results
k4recalls = [20, 25, 30, 35, 40, 45, 50]
k4ndcgs = [50, 100]
recalls, NDCGs = [], []
for k in k4recalls:
recalls.append("{:.4f}".format(EvaluateModel(vae_eval, bstep_test_gen, Recall_at_k, k=k)))
for k in k4ndcgs:
NDCGs.append("{:.4f}".format(EvaluateModel(vae_eval, bstep_test_gen, NDCG_at_k, k=k)))
recall_table = pd.DataFrame({"k":k4recalls, "recalls":recalls}, columns=["k", "recalls"])
recall_table.to_csv(os.path.join(model_root, "recalls.csv"), index=False)
ndcg_table = pd.DataFrame({"k":k4ndcgs, "NDCGs": NDCGs}, columns=["k", "NDCGs"])
ndcg_table.to_csv(os.path.join(model_root, "NDCGs.csv"), index=False)
print("Done evaluation! Results saved to {}".format(model_root))
if __name__ == '__main__':
predict_and_evaluate()
|
[
"evaluate.EvaluateModel"
] |
[((84, 107), 'sys.path.append', 'sys.path.append', (['"""libs"""'], {}), "('libs')\n", (99, 107), False, 'import sys\n'), ((476, 501), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (499, 501), False, 'import argparse\n'), ((1188, 1204), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1202, 1204), True, 'import tensorflow as tf\n'), ((1259, 1284), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1269, 1284), True, 'import tensorflow as tf\n'), ((1290, 1309), 'tensorflow.keras.backend.set_session', 'K.set_session', (['sess'], {}), '(sess)\n', (1303, 1309), True, 'from tensorflow.keras import backend as K\n'), ((1348, 1369), 'numpy.random.seed', 'np.random.seed', (['(98765)'], {}), '(98765)\n', (1362, 1369), True, 'import numpy as np\n'), ((1375, 1400), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(98765)'], {}), '(98765)\n', (1393, 1400), True, 'import tensorflow as tf\n'), ((1694, 1738), 'os.path.join', 'os.path.join', (['model_root', '"""hyperparams.json"""'], {}), "(model_root, 'hyperparams.json')\n", (1706, 1738), False, 'import os\n'), ((1853, 1977), 'data.CollaborativeVAEDataGenerator', 'CollaborativeVAEDataGenerator', ([], {'data_root': 'data_root', 'phase': '"""test"""', 'batch_size': 'args.batch_size', 'joint': '(True)', 'shuffle': '(False)'}), "(data_root=data_root, phase='test', batch_size\n =args.batch_size, joint=True, shuffle=False)\n", (1882, 1977), False, 'from data import CollaborativeVAEDataGenerator\n'), ((2085, 2134), 'train_vae.get_collabo_vae', 'get_collabo_vae', (['params', 'bstep_test_gen.num_items'], {}), '(params, bstep_test_gen.num_items)\n', (2100, 2134), False, 'from train_vae import get_collabo_vae\n'), ((2661, 2737), 'pandas.DataFrame', 'pd.DataFrame', (["{'k': k4recalls, 'recalls': recalls}"], {'columns': "['k', 'recalls']"}), "({'k': k4recalls, 'recalls': recalls}, columns=['k', 'recalls'])\n", (2673, 2737), True, 'import pandas as pd\n'), ((2835, 2903), 'pandas.DataFrame', 'pd.DataFrame', (["{'k': k4ndcgs, 'NDCGs': NDCGs}"], {'columns': "['k', 'NDCGs']"}), "({'k': k4ndcgs, 'NDCGs': NDCGs}, columns=['k', 'NDCGs'])\n", (2847, 2903), True, 'import pandas as pd\n'), ((1806, 1828), 'json.load', 'json.load', (['params_file'], {}), '(params_file)\n', (1815, 1828), False, 'import json\n'), ((2164, 2208), 'os.path.join', 'os.path.join', (['model_root', '"""best_bstep.model"""'], {}), "(model_root, 'best_bstep.model')\n", (2176, 2208), False, 'import os\n'), ((2761, 2800), 'os.path.join', 'os.path.join', (['model_root', '"""recalls.csv"""'], {}), "(model_root, 'recalls.csv')\n", (2773, 2800), False, 'import os\n'), ((2926, 2963), 'os.path.join', 'os.path.join', (['model_root', '"""NDCGs.csv"""'], {}), "(model_root, 'NDCGs.csv')\n", (2938, 2963), False, 'import os\n'), ((2460, 2517), 'evaluate.EvaluateModel', 'EvaluateModel', (['vae_eval', 'bstep_test_gen', 'Recall_at_k'], {'k': 'k'}), '(vae_eval, bstep_test_gen, Recall_at_k, k=k)\n', (2473, 2517), False, 'from evaluate import EvaluateModel\n'), ((2581, 2636), 'evaluate.EvaluateModel', 'EvaluateModel', (['vae_eval', 'bstep_test_gen', 'NDCG_at_k'], {'k': 'k'}), '(vae_eval, bstep_test_gen, NDCG_at_k, k=k)\n', (2594, 2636), False, 'from evaluate import EvaluateModel\n')]
|
import operator
import numpy as np
import tensorflow as tf
from data import get_best_span, get_phrase
from evaluate import evaluate
class Evaluation(object):
pass
class SquadEvaluation(Evaluation):
def __init__(self, data, inputs=None, outputs=None, loss=None, global_step=None):
self.inputs = inputs or {}
self.outputs = outputs or {}
self.global_step = global_step
self.data = data
self.loss = loss
self.acc = None
self.score = None
@property
def num_examples(self):
if len(self.inputs) == 0:
return 0
return len(self.inputs['q'])
def __add__(self, other):
assert isinstance(other, SquadEvaluation)
if self.loss is None or other.loss is None:
loss = None
else:
if self.num_examples + other.num_examples == 0:
loss = 0
else:
loss = (self.loss * self.num_examples + other.loss * other.num_examples) / (self.num_examples + other.num_examples)
global_step = self.global_step or other.global_step
inputs, outputs = {}, {}
if other.inputs is not None:
for key, vals in other.inputs.items():
if key in self.inputs:
inputs[key] = np.append(self.inputs[key], vals, axis=0)
else:
inputs[key] = vals
if other.outputs is not None:
for key, vals in other.outputs.items():
if key in self.outputs:
outputs[key] = np.append(self.outputs[key], vals, axis=0)
else:
outputs[key] = vals
return SquadEvaluation(self.data, inputs=inputs, outputs=outputs, loss=loss, global_step=global_step)
def __repr__(self):
acc1, acc2 = self.get_acc()['acc1'], self.get_acc()['acc2']
em, f1 = self.get_score()['exact_match'], self.get_score()['f1']
return str('<{} at {}> loss: {:.4f}, acc1: {:.3f}%, acc2: {:.3f}%, EM: {:.3f}%, F1: {:.3f}%'.format(self.data.data_type, self.global_step, self.loss, acc1, acc2, em, f1))
def get_answers(self):
idxs = self.inputs['idxs']
logits1_list, logits2_list = self.outputs['logits1'], self.outputs['logits2']
answers = {}
for idx, logits1, logits2 in zip(idxs, logits1_list, logits2_list):
each = self.data.get(idx)
context, context_words, id_ = [each[key] for key in ['context', 'context_words', 'ids']]
best_span, best_score = get_best_span(logits1, logits2, op=operator.add)
# rx = self.data.data['*x'][idx]
# context, context_words = self.data.shared['context'][rx], self.data.shared['context_words'][rx]
answer = get_phrase(context, context_words, best_span)
id_ = each['ids']
answers[id_] = answer
return answers
def get_score(self):
if self.score is not None:
return self.score
answers = self.get_answers()
official = evaluate(self.data.squad['data'], answers)
self.score = official
return official
def get_acc(self):
if self.acc is not None:
return self.acc
y1, y2 = self.inputs['y1'], self.inputs['y2'] # [N]
yp1, yp2 = self.outputs['yp1'], self.outputs['yp2'] # [N]
acc1 = 100 * np.mean(np.equal(y1, yp1))
acc2 = 100 * np.mean(np.equal(y2, yp2))
acc = {'acc1': acc1, 'acc2': acc2}
self.acc = acc
return acc
def get_summaries(self):
acc = self.get_acc()
score = self.get_score()
acc1, acc2 = acc['acc1'], acc['acc2']
em, f1 = score['exact_match'], score['f1']
loss = self.loss
data_type = self.data.config.data_type
loss_summary = tf.Summary(value=[tf.Summary.Value(tag='{}/loss'.format(data_type), simple_value=loss)])
acc1_summary = tf.Summary(value=[tf.Summary.Value(tag='{}/acc1'.format(data_type), simple_value=acc1)])
acc2_summary = tf.Summary(value=[tf.Summary.Value(tag='{}/acc2'.format(data_type), simple_value=acc2)])
em_summary = tf.Summary(value=[tf.Summary.Value(tag='{}/em'.format(data_type), simple_value=em)])
f1_summary = tf.Summary(value=[tf.Summary.Value(tag='{}/f1'.format(data_type), simple_value=f1)])
summaries = [loss_summary, acc1_summary, acc2_summary, em_summary, f1_summary]
return summaries
|
[
"evaluate.evaluate"
] |
[((3054, 3096), 'evaluate.evaluate', 'evaluate', (["self.data.squad['data']", 'answers'], {}), "(self.data.squad['data'], answers)\n", (3062, 3096), False, 'from evaluate import evaluate\n'), ((2549, 2597), 'data.get_best_span', 'get_best_span', (['logits1', 'logits2'], {'op': 'operator.add'}), '(logits1, logits2, op=operator.add)\n', (2562, 2597), False, 'from data import get_best_span, get_phrase\n'), ((2774, 2819), 'data.get_phrase', 'get_phrase', (['context', 'context_words', 'best_span'], {}), '(context, context_words, best_span)\n', (2784, 2819), False, 'from data import get_best_span, get_phrase\n'), ((3393, 3410), 'numpy.equal', 'np.equal', (['y1', 'yp1'], {}), '(y1, yp1)\n', (3401, 3410), True, 'import numpy as np\n'), ((3441, 3458), 'numpy.equal', 'np.equal', (['y2', 'yp2'], {}), '(y2, yp2)\n', (3449, 3458), True, 'import numpy as np\n'), ((1300, 1341), 'numpy.append', 'np.append', (['self.inputs[key]', 'vals'], {'axis': '(0)'}), '(self.inputs[key], vals, axis=0)\n', (1309, 1341), True, 'import numpy as np\n'), ((1568, 1610), 'numpy.append', 'np.append', (['self.outputs[key]', 'vals'], {'axis': '(0)'}), '(self.outputs[key], vals, axis=0)\n', (1577, 1610), True, 'import numpy as np\n')]
|
"""Train an encoder using Contrastive Learning."""
import argparse
import os
import subprocess
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from optimizers import LARS # from torchlars import LARS
from tqdm import tqdm
from dataset import get_datasets
from critic import LinearCriticSimCLR as LinearCritic
from evaluate import save_checkpoint, encode_train_set, train_clf, test
from models import *
from scheduler import CosineAnnealingWithLinearRampLR
import hydra
import pytorch_lightning as pl
from pytorch_lightning.loggers import TensorBoardLogger
from utils import *
import logging
log = logging.getLogger(__name__)
@hydra.main(config_path="conf", config_name="config")
def main(args):
# Check whether config already exists and job has been successful
run_path = os.getcwd()
success_path, checkpoint_path = manage_exisiting_config(run_path)
args.lr = args.base_lr * (args.batch_size / 256)
with open(".hydra/config.yaml", "r") as fp:
OmegaConf.save(config=args, f=fp.name)
# For reproducibility purposes
args.seed = args.run if args.seed == 0 else int(torch.randint(0, 2 ** 32 - 1, (1,)).item())
pl.seed_everything(args.seed)
save_reproduce(sys.argv, args.seed, run_path, git_hash)
log.info("Run in parallel with {} gpus".format(torch.cuda.device_count()))
device = "cuda" if torch.cuda.is_available() else "cpu"
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
clf = None
log.info("==> Preparing data..")
trainset, testset, clftrainset, num_classes, stem = get_datasets(args.dataset)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True
)
testloader = torch.utils.data.DataLoader(testset, batch_size=1000, shuffle=False, num_workers=args.num_workers, pin_memory=True)
clftrainloader = torch.utils.data.DataLoader(clftrainset, batch_size=1000, shuffle=False, num_workers=args.num_workers, pin_memory=True)
# Model
log.info("==> Building model..")
##############################################################
# Encoder
##############################################################
if args.arch in ["ResNet18", "ResNet34", "ResNet50"]:
net = eval(args.arch)(stem=stem, num_channels=args.dataset.shape[0]).to(device)
args.representation_dim = args.representation_dim
else:
raise ValueError("Bad architecture specification")
##############################################################
# Critic
##############################################################
critic = LinearCritic(net.representation_dim, temperature=args.temperature).to(device)
if device == "cuda":
repr_dim = net.representation_dim
net = torch.nn.DataParallel(net)
net.representation_dim = repr_dim
if checkpoint_path is not None:
# Load checkpoint.
log.info("==> Resuming from checkpoint..")
checkpoint = torch.load(checkpoint_path)
net.load_state_dict(checkpoint["net"])
critic.load_state_dict(checkpoint["critic"])
best_acc = checkpoint["acc"] if "acc" in checkpoint else 0.0
start_epoch = checkpoint["epoch"]
criterion = nn.CrossEntropyLoss()
base_optimizer = optim.SGD(list(net.parameters()) + list(critic.parameters()), lr=args.lr, weight_decay=1e-6, momentum=args.momentum)
if args.cosine_anneal:
scheduler = CosineAnnealingWithLinearRampLR(base_optimizer, args.num_epochs)
encoder_optimizer = LARS(base_optimizer, 1e-3)
# Training
def train(epoch):
log.info("\nEpoch: %d" % epoch)
net.train()
critic.train()
train_loss = 0
t = tqdm(enumerate(trainloader), desc="Loss: **** ", total=len(trainloader), bar_format="{desc}{bar}{r_bar}")
for batch_idx, (inputs, _, _) in t:
x1, x2 = inputs
x1, x2 = x1.to(device), x2.to(device)
encoder_optimizer.zero_grad()
representation1, representation2 = net(x1), net(x2)
raw_scores, pseudotargets = critic(representation1, representation2)
loss = criterion(raw_scores, pseudotargets)
loss.backward()
encoder_optimizer.step()
train_loss += loss.item()
t.set_description("Loss: %.3f " % (train_loss / (batch_idx + 1)))
for epoch in range(start_epoch, start_epoch + args.num_epochs):
train(epoch)
if (args.val_freq > 0) and (epoch % args.val_freq == (args.val_freq - 1)):
X, y = encode_train_set(clftrainloader, device, net)
clf = train_clf(X, y, net.representation_dim, num_classes, device, reg_weight=1e-5)
acc = test(testloader, device, net, clf)
if acc > best_acc:
best_acc = acc
save_checkpoint(net, clf, critic, epoch, args, os.path.basename(__file__))
elif args.val_freq == 0:
save_checkpoint(net, clf, critic, epoch, args, os.path.basename(__file__))
if args.cosine_anneal:
scheduler.step()
open(success_path, "w+").close()
if __name__ == "__main__":
cudnn.benchmark = True
# create directory in `scratch` drive and symlink experiments to it
# create_symlink('conf/config.yaml')
git_hash = subprocess.check_output(["git", "rev-parse", "--verify", "HEAD"])
main()
|
[
"evaluate.test",
"evaluate.encode_train_set",
"evaluate.train_clf"
] |
[((655, 682), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (672, 682), False, 'import logging\n'), ((686, 738), 'hydra.main', 'hydra.main', ([], {'config_path': '"""conf"""', 'config_name': '"""config"""'}), "(config_path='conf', config_name='config')\n", (696, 738), False, 'import hydra\n'), ((840, 851), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (849, 851), False, 'import os\n'), ((1207, 1236), 'pytorch_lightning.seed_everything', 'pl.seed_everything', (['args.seed'], {}), '(args.seed)\n', (1225, 1236), True, 'import pytorch_lightning as pl\n'), ((1652, 1678), 'dataset.get_datasets', 'get_datasets', (['args.dataset'], {}), '(args.dataset)\n', (1664, 1678), False, 'from dataset import get_datasets\n'), ((1698, 1829), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(trainset, batch_size=args.batch_size, shuffle=\n True, num_workers=args.num_workers, pin_memory=True)\n', (1725, 1829), False, 'import torch\n'), ((1856, 1975), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': '(1000)', 'shuffle': '(False)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(testset, batch_size=1000, shuffle=False,\n num_workers=args.num_workers, pin_memory=True)\n', (1883, 1975), False, 'import torch\n'), ((1993, 2116), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['clftrainset'], {'batch_size': '(1000)', 'shuffle': '(False)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(clftrainset, batch_size=1000, shuffle=False,\n num_workers=args.num_workers, pin_memory=True)\n', (2020, 2116), False, 'import torch\n'), ((3366, 3387), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3385, 3387), True, 'import torch.nn as nn\n'), ((3662, 3689), 'optimizers.LARS', 'LARS', (['base_optimizer', '(0.001)'], {}), '(base_optimizer, 0.001)\n', (3666, 3689), False, 'from optimizers import LARS\n'), ((5437, 5502), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'rev-parse', '--verify', 'HEAD']"], {}), "(['git', 'rev-parse', '--verify', 'HEAD'])\n", (5460, 5502), False, 'import subprocess\n'), ((1400, 1425), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1423, 1425), False, 'import torch\n'), ((2905, 2931), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['net'], {}), '(net)\n', (2926, 2931), False, 'import torch\n'), ((3110, 3137), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (3120, 3137), False, 'import torch\n'), ((3573, 3637), 'scheduler.CosineAnnealingWithLinearRampLR', 'CosineAnnealingWithLinearRampLR', (['base_optimizer', 'args.num_epochs'], {}), '(base_optimizer, args.num_epochs)\n', (3604, 3637), False, 'from scheduler import CosineAnnealingWithLinearRampLR\n'), ((1349, 1374), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1372, 1374), False, 'import torch\n'), ((2745, 2811), 'critic.LinearCriticSimCLR', 'LinearCritic', (['net.representation_dim'], {'temperature': 'args.temperature'}), '(net.representation_dim, temperature=args.temperature)\n', (2757, 2811), True, 'from critic import LinearCriticSimCLR as LinearCritic\n'), ((4691, 4736), 'evaluate.encode_train_set', 'encode_train_set', (['clftrainloader', 'device', 'net'], {}), '(clftrainloader, device, net)\n', (4707, 4736), False, 'from evaluate import save_checkpoint, encode_train_set, train_clf, test\n'), ((4755, 4833), 'evaluate.train_clf', 'train_clf', (['X', 'y', 'net.representation_dim', 'num_classes', 'device'], {'reg_weight': '(1e-05)'}), '(X, y, net.representation_dim, num_classes, device, reg_weight=1e-05)\n', (4764, 4833), False, 'from evaluate import save_checkpoint, encode_train_set, train_clf, test\n'), ((4851, 4885), 'evaluate.test', 'test', (['testloader', 'device', 'net', 'clf'], {}), '(testloader, device, net, clf)\n', (4855, 4885), False, 'from evaluate import save_checkpoint, encode_train_set, train_clf, test\n'), ((5007, 5033), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (5023, 5033), False, 'import os\n'), ((1159, 1194), 'torch.randint', 'torch.randint', (['(0)', '(2 ** 32 - 1)', '(1,)'], {}), '(0, 2 ** 32 - 1, (1,))\n', (1172, 1194), False, 'import torch\n'), ((5127, 5153), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (5143, 5153), False, 'import os\n')]
|
# from rdkit import Chem
# from torch_geometric.data import Data, Batch
from train import train
from data import ModalRetriever
# import torch
import os
# from train_utils import setup_common
# from utils import mkdir
# from model.load_model import get
# from transformers import BertTokenizer
# from transformers import AutoTokenizer
from options import read_args
from train_utils import *
from utils import dump_file, load_file
from pprint import pprint as pp
from sklearn.metrics import f1_score
from evaluate import evaluate
from pprint import pprint as pp
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# torch.use_deterministic_algorithms(True)
from data import ChemetDataset
if __name__ == '__main__':
args = read_args()
# Specializing args for the experiment
args.data_dir = "../data_online/chemet/"
fname = "anno"
args.train_file = args.data_dir + "distant_training_new.json"
args.val_file = args.data_dir + "dev_anno_unseen_removed.json"
args.test_file = args.data_dir + "test_anno_unseen_removed.json"
data_dir = args.data_dir
train_file = args.train_file
test_file = args.test_file
val_file = args.val_file
# args.val_file = data_dir + "dev.txt"
# args.test_file = data_dir + "test.txt"
# set params Fixed + Tunable
args.useful_params = [
# fixed
"exp",
"max_grad_norm",
"g_global_pooling",
"mult_mask",
"g_mult_mask",
"grad_accumulation_steps",
"model_name",
# tuning
"exp_id",
"activation",
"batch_size",
"cm_type",
"debug",
"dropout",
"gnn_type",
"g_dim",
"patience",
"plm",
"plm_hidden_dim",
"plm_lr",
"pool_type",
"lr",
"model_type",
"num_epochs",
"num_gnn_layers",
"use_cache",
"num_atom_types",
"num_edge_types",
]
args.downstream_layers = ["combiner", "gnn", "cm_attn", 'the_zero', 'the_one', 'rand_emb']
# args.model_path=""
# args.model_name = "fet_model"
# args.model_name = "fet_model"
args.exp = "fet"
args.plm = "sci"
args.plm = get_plm_fullname(args.plm)
print("torch.cuda.device_count()", torch.cuda.device_count())
# if torch.cuda.device_count() > 2:
# args.gpu_id = 2
# args.num_epoch = 50
# args.batch_size = 8
# args.g_dim = 128
# args.patience = 8
# args.lr = 1e-4
# args.plm_lr = 3e-5
# args.use_cache = True
# args.model_type = "tdg"
# args.activation = "gelu"
if args.debug:
print("Debug Mode ON")
args.plm = get_plm_fullname("tiny")
args.batch_size = 2
args.num_epochs = 2
args.patience = 3
# args.use_cache = False
print("PLM is", args.plm)
print("model is", args.model_name)
# Prepare Data and Model
set_seeds(args)
tokenizer = get_tokenizer(args.plm)
modal_retriever = ModalRetriever(data_dir + "mention2ent.json", data_dir + "cmpd_info.json")
labels_path = data_dir + fname + "_labels.json"
if not os.path.exists(labels_path):
labels = ChemetDataset.collect_labels([train_file, val_file, test_file], labels_path)
else:
labels = load_file(labels_path)
# print("labels", labels)
train_data, val_data, test_data = ChemetDataset(args, train_file, tokenizer, modal_retriever, labels), \
ChemetDataset(args, val_file, tokenizer, modal_retriever, labels), \
ChemetDataset(args, test_file, tokenizer, modal_retriever, labels)
#### for LSTM input
word_embed=None
if args.model_name=="lstm":
main_dir="/"
word_embed_type = "glove.840B.300d"
args.embed_dim=300
word_embed_type = "patent_w2v"
args.embed_dim=200
embed_file = os.path.join('../embeddings/' + word_embed_type + '.txt')
word_embed_path = os.path.join("../embeddings", word_embed_type + "word_embed.pkl")
word_vocab_path = os.path.join("../embeddings", word_embed_type + "word_vocab.pkl")
files_vocab_path = None
if not (os.path.isfile(word_embed_path)) or not (os.path.isfile(word_vocab_path)):
print("No word_embed or word_vocab save, dumping...")
word_embed, word_vocab = load_word_embed(embed_file,
args.embed_dim,
skip_first=True, file_vocab_path=files_vocab_path)
pkl.dump(word_embed, open(word_embed_path, "wb"))
pkl.dump(word_vocab, open(word_vocab_path, "wb"))
print("word_embed Saved")
word_embed = pkl.load(open(word_embed_path, "rb"))
word_vocab = pkl.load(open(word_vocab_path, "rb"))
# reversed_word_vocab = {value: key for (key, value) in word_vocab.items()}
# vocabs = {'word': word_vocab}
for data_split in [train_data, val_data, test_data]:
for i, sample in enumerate(data_split):
sample["word_ids"]=[word_vocab.get(t, word_vocab.get(t.lower(), 0))
for t in sample["original_text"]]
sample["mention_masks"]=[1 if sample["original_pos"][0] <= i < sample["original_pos"][1] else 0
for i in range(len(sample["original_text"]))]
sample["context_masks"]=[1 for _ in range(len(sample["original_text"]))]
sample["is_rnn"]=True
else:
for data_split in [train_data, val_data, test_data]:
for i, sample in enumerate(data_split):
sample["is_rnn"]=False
#
# # add glove indicies
# def load_glove_vectors(glove_file="./data/glove.6B/glove.6B.50d.txt"):
# """Load the glove word vectors"""
# word_vectors = {}
# with open(glove_file) as f:
# for line in f:
# split = line.split()
# word_vectors[split[0]] = np.array([float(x) for x in split[1:]])
# return word_vectors
#
#
# def get_emb_matrix(pretrained, word_counts, emb_size=50):
# """ Creates embedding matrix from word vectors"""
# vocab_size = len(word_counts) + 2
# vocab_to_idx = {}
# vocab = ["", "UNK"]
# W = np.zeros((vocab_size, emb_size), dtype="float32")
# W[0] = np.zeros(emb_size, dtype='float32') # adding a vector for padding
# W[1] = np.random.uniform(-0.25, 0.25, emb_size) # adding a vector for unknown words
# vocab_to_idx["UNK"] = 1
# i = 2
# for word in word_counts:
# if word in word_vecs:
# W[i] = word_vecs[word]
# else:
# W[i] = np.random.uniform(-0.25, 0.25, emb_size)
# vocab_to_idx[word] = i
# vocab.append(word)
# i += 1
# return W, np.array(vocab), vocab_to_idx
#
#
# word_vecs = load_glove_vectors()
# pretrained_weights, vocab, vocab2index = get_emb_matrix(word_vecs, counts)
# exit()
print("args.num_atom_types,args.num_edge_types", args.num_atom_types, args.num_edge_types)
args, model, optimizer = setup_common(args, word_embed)
# train or analyze
if not args.eval:
train(args, model, optimizer, (train_data, val_data, test_data))
else:
print("Eval")
model.load_state_dict(torch.load(args.model_path)['model_state_dict'])
test_score, output = evaluate(args, model, test_data)
print(test_score)
# val_score, output2 = evaluate(args, model, val_data)
# print(val_score)
if args.error_analysis:
sample_id = 0
original_data = load_file(test_file)
final_data = []
for idx in range(len(original_data)):
sample = original_data[idx]
text = sample["tokens"]
for mention in sample["annotations"]:
sample_id += 1
m_s, m_e = mention["start"], mention["end"]
m = " ".join(text[m_s:m_e])
m = m.replace(" ", " ")
final_data.append({"original_text": text, 'mention_name': m, "original_labels": mention["labels"]})
for id, pred, label in output:
print("\n\nsample", id)
sample = final_data[id]
# print("text is", sample["original_text"])
print(" ".join(sample["original_text"]) )
print("\nmention is", sample['mention_name'])
print("original labels are")
pp(sorted(sample["original_labels"]))
here_labels = sorted([labels[i] for i, c in enumerate(pred) if label[i] == 1])
predicted_labels = sorted([labels[i] for i, c in enumerate(pred) if c == 1])
# print("has labels", sorted(here_labels))
print("predicted labels")
pp(sorted(predicted_labels))
here_labels = set(here_labels)
predicted_labels = set(predicted_labels)
missed_labels = here_labels.difference(predicted_labels)
incorrected_included_labels = predicted_labels.difference(here_labels)
if missed_labels:
print("missed_labels")
pp(missed_labels)
if incorrected_included_labels:
print("incorrected_included_labels")
pp(incorrected_included_labels)
# if args.attn_analysis:
# for i, c in enumerate(pred):
# if label[i] == 1:
# print("has label", labels[i])
# for i, c in enumerate(pred):
# if c == 1:
# print("predicted label", labels[i])
# if label[i] != c:
# print("pred")
# print(labels[i])
exit()
rels = ['AGONIST-ACTIVATOR',
'DOWNREGULATOR', 'SUBSTRATE_PRODUCT-OF',
'AGONIST', 'INHIBITOR',
'PRODUCT-OF', 'ANTAGONIST',
'ACTIVATOR', 'INDIRECT-UPREGULATOR',
'SUBSTRATE', 'INDIRECT-DOWNREGULATOR',
'AGONIST-INHIBITOR', 'UPREGULATOR', ]
output = load_file("analyze/output.json")
t0, t1 = [], []
null_cnt = 0
total_cnt = 0
for id, pred in output:
instance = test_data[id]
if instance["label"] != pred:
total_cnt += 1
if 0 in [instance["modal_data"][0][2], instance["modal_data"][0][3],
instance["modal_data"][1][1]]: null_cnt += 1
print("\nid:", id, "pred:", rels[pred], " label:", rels[instance["label"]])
print(str(instance["text"].encode(errors="ignore")))
t0.append(pred)
t1.append(instance["label"])
print("modal_data:")
pp(instance["modal_data"])
# for id, pred, tgt in output:
# instance = test_data[id]
# print("\nid:", id, " tgt:", tgt, "pred:", pred, " label:", instance["label"])
#
# t0.append(pred)
# t1.append(instance["label"])
# pp(" modal_data:")
# pp( instance["modal_data"])
print(null_cnt)
print(total_cnt)
print(f1_score(t1, t0, average="micro"))
|
[
"evaluate.evaluate"
] |
[((758, 769), 'options.read_args', 'read_args', ([], {}), '()\n', (767, 769), False, 'from options import read_args\n'), ((3010, 3084), 'data.ModalRetriever', 'ModalRetriever', (["(data_dir + 'mention2ent.json')", "(data_dir + 'cmpd_info.json')"], {}), "(data_dir + 'mention2ent.json', data_dir + 'cmpd_info.json')\n", (3024, 3084), False, 'from data import ModalRetriever\n'), ((3149, 3176), 'os.path.exists', 'os.path.exists', (['labels_path'], {}), '(labels_path)\n', (3163, 3176), False, 'import os\n'), ((3195, 3271), 'data.ChemetDataset.collect_labels', 'ChemetDataset.collect_labels', (['[train_file, val_file, test_file]', 'labels_path'], {}), '([train_file, val_file, test_file], labels_path)\n', (3223, 3271), False, 'from data import ChemetDataset\n'), ((3299, 3321), 'utils.load_file', 'load_file', (['labels_path'], {}), '(labels_path)\n', (3308, 3321), False, 'from utils import dump_file, load_file\n'), ((3391, 3458), 'data.ChemetDataset', 'ChemetDataset', (['args', 'train_file', 'tokenizer', 'modal_retriever', 'labels'], {}), '(args, train_file, tokenizer, modal_retriever, labels)\n', (3404, 3458), False, 'from data import ChemetDataset\n'), ((3500, 3565), 'data.ChemetDataset', 'ChemetDataset', (['args', 'val_file', 'tokenizer', 'modal_retriever', 'labels'], {}), '(args, val_file, tokenizer, modal_retriever, labels)\n', (3513, 3565), False, 'from data import ChemetDataset\n'), ((3607, 3673), 'data.ChemetDataset', 'ChemetDataset', (['args', 'test_file', 'tokenizer', 'modal_retriever', 'labels'], {}), '(args, test_file, tokenizer, modal_retriever, labels)\n', (3620, 3673), False, 'from data import ChemetDataset\n'), ((3930, 3987), 'os.path.join', 'os.path.join', (["('../embeddings/' + word_embed_type + '.txt')"], {}), "('../embeddings/' + word_embed_type + '.txt')\n", (3942, 3987), False, 'import os\n'), ((4014, 4079), 'os.path.join', 'os.path.join', (['"""../embeddings"""', "(word_embed_type + 'word_embed.pkl')"], {}), "('../embeddings', word_embed_type + 'word_embed.pkl')\n", (4026, 4079), False, 'import os\n'), ((4106, 4171), 'os.path.join', 'os.path.join', (['"""../embeddings"""', "(word_embed_type + 'word_vocab.pkl')"], {}), "('../embeddings', word_embed_type + 'word_vocab.pkl')\n", (4118, 4171), False, 'import os\n'), ((7367, 7431), 'train.train', 'train', (['args', 'model', 'optimizer', '(train_data, val_data, test_data)'], {}), '(args, model, optimizer, (train_data, val_data, test_data))\n', (7372, 7431), False, 'from train import train\n'), ((7574, 7606), 'evaluate.evaluate', 'evaluate', (['args', 'model', 'test_data'], {}), '(args, model, test_data)\n', (7582, 7606), False, 'from evaluate import evaluate\n'), ((10374, 10406), 'utils.load_file', 'load_file', (['"""analyze/output.json"""'], {}), "('analyze/output.json')\n", (10383, 10406), False, 'from utils import dump_file, load_file\n'), ((7813, 7833), 'utils.load_file', 'load_file', (['test_file'], {}), '(test_file)\n', (7822, 7833), False, 'from utils import dump_file, load_file\n'), ((11482, 11515), 'sklearn.metrics.f1_score', 'f1_score', (['t1', 't0'], {'average': '"""micro"""'}), "(t1, t0, average='micro')\n", (11490, 11515), False, 'from sklearn.metrics import f1_score\n'), ((4220, 4251), 'os.path.isfile', 'os.path.isfile', (['word_embed_path'], {}), '(word_embed_path)\n', (4234, 4251), False, 'import os\n'), ((4261, 4292), 'os.path.isfile', 'os.path.isfile', (['word_vocab_path'], {}), '(word_vocab_path)\n', (4275, 4292), False, 'import os\n'), ((11063, 11089), 'pprint.pprint', 'pp', (["instance['modal_data']"], {}), "(instance['modal_data'])\n", (11065, 11089), True, 'from pprint import pprint as pp\n'), ((9466, 9483), 'pprint.pprint', 'pp', (['missed_labels'], {}), '(missed_labels)\n', (9468, 9483), True, 'from pprint import pprint as pp\n'), ((9609, 9640), 'pprint.pprint', 'pp', (['incorrected_included_labels'], {}), '(incorrected_included_labels)\n', (9611, 9640), True, 'from pprint import pprint as pp\n')]
|
# python train.py anchor-based --model-dir ../models/ab_mobilenet --splits ../splits/tvsum.yml ../splits/summe.yml --max-epoch 50 --cnn mobilenet --base-model attention --num-feature 1280 --num-head 10
# python train.py anchor-based --model-dir ../models/ab_mobilenet_lstm --splits ../splits/tvsum.yml ../splits/summe.yml --max-epoch 50 --cnn mobilenet --base-model lstm --num-feature 1280
import logging
import numpy as np
import torch
from torch import nn
from anchor_based import anchor_helper
from anchor_based.dsnet import DSNet
from anchor_based.losses import calc_cls_loss, calc_loc_loss
from evaluate import evaluate
from helpers import data_helper, vsumm_helper, bbox_helper
from helpers import init_helper, data_helper, vsumm_helper, bbox_helper
logger = logging.getLogger()
def xavier_init(module):
cls_name = module.__class__.__name__
if 'Linear' in cls_name or 'Conv' in cls_name:
nn.init.xavier_uniform_(module.weight, gain=np.sqrt(2.0))
if module.bias is not None:
nn.init.constant_(module.bias, 0.1)
def train(args, split, save_path):
# print(args.num_feature)
model = DSNet(base_model=args.base_model, num_feature=args.num_feature,
num_hidden=args.num_hidden, anchor_scales=args.anchor_scales,
num_head=args.num_head)
model = model.to(args.device)
model.apply(xavier_init)
parameters = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.Adam(parameters, lr=args.lr,
weight_decay=args.weight_decay)
max_val_fscore = -1
train_set = data_helper.VideoDataset(split['train_keys'])
train_loader = data_helper.DataLoader(train_set, shuffle=True)
val_set = data_helper.VideoDataset(split['test_keys'])
val_loader = data_helper.DataLoader(val_set, shuffle=False)
cnn = args.cnn
for epoch in range(args.max_epoch):
model.train()
stats = data_helper.AverageMeter('loss', 'cls_loss', 'loc_loss')
#for _, seq, gtscore, cps, n_frames, nfps, picks, _ in train_loader:
for _, _, n_frames, picks, gtscore, _, _, \
seq_default, cps_default, nfps_default, \
seq_lenet, seq_alexnet, seq_mobilenet, seq_squeeze, seq_resnet, \
seq_lenet_c, seq_alexnet_c, seq_mobilenet_c, seq_squeeze_c, seq_resnet_c, \
cps_lenet_c, cps_alexnet_c, cps_mobilenet_c, cps_squeeze_c, cps_resnet_c, \
_, _, _, cps_lenet, cps_alexnet, cps_mobilenet, cps_squeeze, cps_resnet in train_loader:
if cnn == "default":
seq = seq_default
cps = cps_default
nfps = nfps_default
else:
if cnn == "lenet":
seq = seq_lenet_c
change_points = cps_lenet_c
if cnn == "alexnet":
seq = seq_alexnet_c
change_points = cps_alexnet_c
if cnn == "mobilenet":
seq = seq_mobilenet_c
change_points = cps_mobilenet_c
if cnn == "squeeze":
seq = seq_squeeze_c
change_points = cps_squeeze_c
if cnn == "resnet":
seq = seq_resnet_c
change_points = cps_resnet_c
begin_frames = change_points[:-1]
end_frames = change_points[1:]
cps = np.vstack((begin_frames, end_frames)).T
# Here, the change points are detected (Change-point positions t0, t1, ..., t_{m-1})
nfps = end_frames - begin_frames
#seq = seq_resnet
#cps = cps_default
#nfps = nfps_default
# Obtain a keyshot summary from gtscore (the 1D-array with shape (n_steps),
# stores ground truth improtance score (used for training)
keyshot_summ = vsumm_helper.get_keyshot_summ(
gtscore, cps, n_frames, nfps, picks)
target = vsumm_helper.downsample_summ(keyshot_summ)
if not target.any():
continue
target_bboxes = bbox_helper.seq2bbox(target)
target_bboxes = bbox_helper.lr2cw(target_bboxes)
anchors = anchor_helper.get_anchors(target.size, args.anchor_scales)
# Get class and location label for positive samples
cls_label, loc_label = anchor_helper.get_pos_label(
anchors, target_bboxes, args.pos_iou_thresh)
# Get negative samples
num_pos = cls_label.sum()
cls_label_neg, _ = anchor_helper.get_pos_label(
anchors, target_bboxes, args.neg_iou_thresh)
cls_label_neg = anchor_helper.get_neg_label(
cls_label_neg, int(args.neg_sample_ratio * num_pos))
# Get incomplete samples
cls_label_incomplete, _ = anchor_helper.get_pos_label(
anchors, target_bboxes, args.incomplete_iou_thresh)
cls_label_incomplete[cls_label_neg != 1] = 1
cls_label_incomplete = anchor_helper.get_neg_label(
cls_label_incomplete,
int(args.incomplete_sample_ratio * num_pos))
cls_label[cls_label_neg == -1] = -1
cls_label[cls_label_incomplete == -1] = -1
cls_label = torch.tensor(cls_label, dtype=torch.float32).to(args.device)
loc_label = torch.tensor(loc_label, dtype=torch.float32).to(args.device)
seq = torch.tensor(seq, dtype=torch.float32).unsqueeze(0).to(args.device)
# Pass 2D-array features (seq) into the models
pred_cls, pred_loc = model(seq)
# Calculate loss functions
loc_loss = calc_loc_loss(pred_loc, loc_label, cls_label)
cls_loss = calc_cls_loss(pred_cls, cls_label)
loss = cls_loss + args.lambda_reg * loc_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Update loss functions for each video
stats.update(loss=loss.item(), cls_loss=cls_loss.item(),
loc_loss=loc_loss.item())
# For each epoch, evaluate the model
args = init_helper.get_arguments()
seg_algo = args.segment_algo
cnn = args.cnn
init_helper.init_logger(args.model_dir, args.log_file)
init_helper.set_random_seed(args.seed)
# logger.info(vars(args))
val_fscore, _ = evaluate(model, cnn, seg_algo, val_loader, args.nms_thresh, args.device)
# val_fscore, _ = evaluate(model, val_loader, args.nms_thresh, args.device)
if max_val_fscore < val_fscore:
max_val_fscore = val_fscore
torch.save(model.state_dict(), str(save_path))
logger.info(f'Epoch: {epoch}/{args.max_epoch} '
f'Loss: {stats.cls_loss:.4f}/{stats.loc_loss:.4f}/{stats.loss:.4f} '
f'F-score cur/max: {val_fscore:.4f}/{max_val_fscore:.4f}')
return max_val_fscore
|
[
"evaluate.evaluate"
] |
[((770, 789), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (787, 789), False, 'import logging\n'), ((1138, 1292), 'anchor_based.dsnet.DSNet', 'DSNet', ([], {'base_model': 'args.base_model', 'num_feature': 'args.num_feature', 'num_hidden': 'args.num_hidden', 'anchor_scales': 'args.anchor_scales', 'num_head': 'args.num_head'}), '(base_model=args.base_model, num_feature=args.num_feature, num_hidden=\n args.num_hidden, anchor_scales=args.anchor_scales, num_head=args.num_head)\n', (1143, 1292), False, 'from anchor_based.dsnet import DSNet\n'), ((1473, 1545), 'torch.optim.Adam', 'torch.optim.Adam', (['parameters'], {'lr': 'args.lr', 'weight_decay': 'args.weight_decay'}), '(parameters, lr=args.lr, weight_decay=args.weight_decay)\n', (1489, 1545), False, 'import torch\n'), ((1621, 1666), 'helpers.data_helper.VideoDataset', 'data_helper.VideoDataset', (["split['train_keys']"], {}), "(split['train_keys'])\n", (1645, 1666), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((1686, 1733), 'helpers.data_helper.DataLoader', 'data_helper.DataLoader', (['train_set'], {'shuffle': '(True)'}), '(train_set, shuffle=True)\n', (1708, 1733), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((1749, 1793), 'helpers.data_helper.VideoDataset', 'data_helper.VideoDataset', (["split['test_keys']"], {}), "(split['test_keys'])\n", (1773, 1793), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((1811, 1857), 'helpers.data_helper.DataLoader', 'data_helper.DataLoader', (['val_set'], {'shuffle': '(False)'}), '(val_set, shuffle=False)\n', (1833, 1857), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((1957, 2013), 'helpers.data_helper.AverageMeter', 'data_helper.AverageMeter', (['"""loss"""', '"""cls_loss"""', '"""loc_loss"""'], {}), "('loss', 'cls_loss', 'loc_loss')\n", (1981, 2013), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((6297, 6324), 'helpers.init_helper.get_arguments', 'init_helper.get_arguments', ([], {}), '()\n', (6322, 6324), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((6394, 6448), 'helpers.init_helper.init_logger', 'init_helper.init_logger', (['args.model_dir', 'args.log_file'], {}), '(args.model_dir, args.log_file)\n', (6417, 6448), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((6457, 6495), 'helpers.init_helper.set_random_seed', 'init_helper.set_random_seed', (['args.seed'], {}), '(args.seed)\n', (6484, 6495), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((6556, 6628), 'evaluate.evaluate', 'evaluate', (['model', 'cnn', 'seg_algo', 'val_loader', 'args.nms_thresh', 'args.device'], {}), '(model, cnn, seg_algo, val_loader, args.nms_thresh, args.device)\n', (6564, 6628), False, 'from evaluate import evaluate\n'), ((1023, 1058), 'torch.nn.init.constant_', 'nn.init.constant_', (['module.bias', '(0.1)'], {}), '(module.bias, 0.1)\n', (1040, 1058), False, 'from torch import nn\n'), ((3947, 4013), 'helpers.vsumm_helper.get_keyshot_summ', 'vsumm_helper.get_keyshot_summ', (['gtscore', 'cps', 'n_frames', 'nfps', 'picks'], {}), '(gtscore, cps, n_frames, nfps, picks)\n', (3976, 4013), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((4052, 4094), 'helpers.vsumm_helper.downsample_summ', 'vsumm_helper.downsample_summ', (['keyshot_summ'], {}), '(keyshot_summ)\n', (4080, 4094), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((4183, 4211), 'helpers.bbox_helper.seq2bbox', 'bbox_helper.seq2bbox', (['target'], {}), '(target)\n', (4203, 4211), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((4240, 4272), 'helpers.bbox_helper.lr2cw', 'bbox_helper.lr2cw', (['target_bboxes'], {}), '(target_bboxes)\n', (4257, 4272), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((4295, 4353), 'anchor_based.anchor_helper.get_anchors', 'anchor_helper.get_anchors', (['target.size', 'args.anchor_scales'], {}), '(target.size, args.anchor_scales)\n', (4320, 4353), False, 'from anchor_based import anchor_helper\n'), ((4466, 4538), 'anchor_based.anchor_helper.get_pos_label', 'anchor_helper.get_pos_label', (['anchors', 'target_bboxes', 'args.pos_iou_thresh'], {}), '(anchors, target_bboxes, args.pos_iou_thresh)\n', (4493, 4538), False, 'from anchor_based import anchor_helper\n'), ((4661, 4733), 'anchor_based.anchor_helper.get_pos_label', 'anchor_helper.get_pos_label', (['anchors', 'target_bboxes', 'args.neg_iou_thresh'], {}), '(anchors, target_bboxes, args.neg_iou_thresh)\n', (4688, 4733), False, 'from anchor_based import anchor_helper\n'), ((4953, 5032), 'anchor_based.anchor_helper.get_pos_label', 'anchor_helper.get_pos_label', (['anchors', 'target_bboxes', 'args.incomplete_iou_thresh'], {}), '(anchors, target_bboxes, args.incomplete_iou_thresh)\n', (4980, 5032), False, 'from anchor_based import anchor_helper\n'), ((5799, 5844), 'anchor_based.losses.calc_loc_loss', 'calc_loc_loss', (['pred_loc', 'loc_label', 'cls_label'], {}), '(pred_loc, loc_label, cls_label)\n', (5812, 5844), False, 'from anchor_based.losses import calc_cls_loss, calc_loc_loss\n'), ((5868, 5902), 'anchor_based.losses.calc_cls_loss', 'calc_cls_loss', (['pred_cls', 'cls_label'], {}), '(pred_cls, cls_label)\n', (5881, 5902), False, 'from anchor_based.losses import calc_cls_loss, calc_loc_loss\n'), ((961, 973), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (968, 973), True, 'import numpy as np\n'), ((3474, 3511), 'numpy.vstack', 'np.vstack', (['(begin_frames, end_frames)'], {}), '((begin_frames, end_frames))\n', (3483, 3511), True, 'import numpy as np\n'), ((5399, 5443), 'torch.tensor', 'torch.tensor', (['cls_label'], {'dtype': 'torch.float32'}), '(cls_label, dtype=torch.float32)\n', (5411, 5443), False, 'import torch\n'), ((5484, 5528), 'torch.tensor', 'torch.tensor', (['loc_label'], {'dtype': 'torch.float32'}), '(loc_label, dtype=torch.float32)\n', (5496, 5528), False, 'import torch\n'), ((5564, 5602), 'torch.tensor', 'torch.tensor', (['seq'], {'dtype': 'torch.float32'}), '(seq, dtype=torch.float32)\n', (5576, 5602), False, 'import torch\n')]
|
from __future__ import print_function, division
import sys
sys.path.append('core')
import copy
from datetime import datetime
import argparse
import os
import time
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributed as dist
from network import CRAFT
from raft import RAFT
from craft_nogma import CRAFT_nogma
import datasets
import evaluate
from utils.utils import print0
from torch.cuda.amp import GradScaler
# exclude extremly large displacements
MAX_FLOW = 400
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def sequence_loss(flow_preds, flow_gt, valid, gamma):
""" Loss function defined over sequence of flow predictions """
# n_predictions = args.iters = 12
n_predictions = len(flow_preds)
flow_loss = 0.0
# exclude invalid pixels and extremely large displacements.
# MAX_FLOW = 400.
valid = (valid >= 0.5) & ((flow_gt**2).sum(dim=1).sqrt() < MAX_FLOW)
for i in range(n_predictions):
# Exponentially increasing weights. (Eq.7 in RAFT paper)
# As i increases, flow_preds[i] is expected to be more and more accurate,
# so we are less and less tolerant to errors through gradually increased i_weight.
i_weight = gamma**(n_predictions - i - 1)
i_loss = (flow_preds[i] - flow_gt).abs()
flow_loss += i_weight * (valid[:, None] * i_loss).mean()
epe = torch.sum((flow_preds[-1] - flow_gt)**2, dim=1).sqrt()
epe = epe.view(-1)[valid.view(-1)]
world_size = int(os.environ.get('WORLD_SIZE', 1))
if world_size > 1:
flow_loss = reduce_tensor(flow_loss, world_size)
epe = gather_tensor(epe, world_size)
metrics = {
'epe': epe.mean().item(),
'1px': (epe < 1).float().mean().item(),
'3px': (epe < 3).float().mean().item(),
'5px': (epe < 5).float().mean().item(),
}
return flow_loss, metrics
def fetch_optimizer(args, model):
""" Create the optimizer and learning rate scheduler """
optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)
pct_start = 0.05
scheduler = optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=args.lr, total_steps=args.num_steps+100,
pct_start=pct_start, cycle_momentum=False, anneal_strategy='linear')
return optimizer, scheduler
def reduce_tensor(tensor, world_size):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= world_size
return rt
def gather_tensor(tensor, world_size):
tensor_list = [torch.zeros_like(tensor) for _ in range(world_size)]
dist.all_gather(tensor_list, tensor)
gathered_tensor = torch.cat(tensor_list, dim=0)
return gathered_tensor
class Logger:
def __init__(self, scheduler, args):
self.args = args
self.scheduler = scheduler
self.total_steps = 0
self.running_loss_dict = {}
self.train_epe_list = []
self.train_steps_list = []
self.val_steps_list = []
self.val_results_dict = {}
def _print_training_status(self):
metrics_data = [np.mean(self.running_loss_dict[k]) for k in sorted(self.running_loss_dict.keys())]
training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps+1, self.scheduler.get_lr()[0])
# metrics_data[:-1]: '1px', '3px', '5px', 'epe'. metrics_data[-1] is 'time'.
metrics_str = ("{:10.4f}, "*len(metrics_data[:-1])).format(*metrics_data[:-1])
# Compute time left
time_left_sec = (self.args.num_steps - (self.total_steps+1)) * metrics_data[-1]
time_left_sec = time_left_sec.astype(int)
time_left_hm = "{:02d}h{:02d}m".format(time_left_sec // 3600, time_left_sec % 3600 // 60)
time_left_hm = f"{time_left_hm:>9}"
# print the training status
print0(training_str + metrics_str + time_left_hm)
# logging running loss to total loss
self.train_epe_list.append(np.mean(self.running_loss_dict['epe']))
self.train_steps_list.append(self.total_steps)
for key in self.running_loss_dict:
self.running_loss_dict[key] = []
def push(self, metrics):
self.total_steps += 1
for key in metrics:
if key not in self.running_loss_dict:
self.running_loss_dict[key] = []
self.running_loss_dict[key].append(metrics[key])
if self.total_steps % self.args.print_freq == self.args.print_freq-1:
self._print_training_status()
self.running_loss_dict = {}
def save_checkpoint(cp_path, model, optimizer, lr_scheduler, logger):
logger_dict = copy.copy(logger.__dict__)
for key in ('args', 'scheduler'):
if key in logger_dict:
del logger_dict[key]
save_state = { 'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'logger': logger_dict
}
torch.save(save_state, cp_path)
print0(f"{cp_path} saved")
def load_checkpoint(args, model, optimizer, lr_scheduler, logger):
checkpoint = torch.load(args.restore_ckpt, map_location='cuda')
if 'model' in checkpoint:
msg = model.load_state_dict(checkpoint['model'], strict=False)
else:
# Load old checkpoint.
msg = model.load_state_dict(checkpoint, strict=False)
print0(f"Model checkpoint loaded from {args.restore_ckpt}: {msg}.")
if args.load_optimizer_state and 'optimizer' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
print0("Optimizer state loaded.")
else:
print0("Optimizer state NOT loaded.")
if args.load_scheduler_state and 'lr_scheduler' in checkpoint:
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
print0("Scheduler state loaded.")
if 'logger' in checkpoint:
# https://stackoverflow.com/questions/243836/how-to-copy-all-properties-of-an-object-to-another-object-in-python
logger.__dict__.update(checkpoint['logger'])
print0("Logger loaded.")
else:
print0("Logger NOT loaded.")
else:
print0("Scheduler state NOT loaded.")
print0("Logger NOT loaded.")
def main(args):
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
if args.raft:
model = RAFT(args)
elif args.nogma:
model = CRAFT_nogma(args)
else:
model = CRAFT(args)
model.cuda()
model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
print0(f"Parameter Count: {count_parameters(model)}")
model.train()
train_loader = datasets.fetch_dataloader(args)
optimizer, scheduler = fetch_optimizer(args, model)
logger = Logger(scheduler, args)
if args.restore_ckpt is not None:
load_checkpoint(args, model, optimizer, scheduler, logger)
if args.freeze_bn and args.stage != 'chairs':
model.module.freeze_bn()
while logger.total_steps <= args.num_steps:
train(model, train_loader, optimizer, scheduler, logger, args)
if logger.total_steps >= args.num_steps:
plot_train(logger, args)
plot_val(logger, args)
break
PATH = args.output+f'/{args.name}.pth'
if args.local_rank == 0:
save_checkpoint(PATH, model, optimizer, scheduler, logger)
return PATH
def train(model, train_loader, optimizer, scheduler, logger, args):
# Recreate scaler every epoch.
scaler = GradScaler(enabled=args.mixed_precision)
for i_batch, data_blob in enumerate(train_loader):
tic = time.time()
# the last element in data_blob is extra_info, which is a list of strings.
image1, image2, flow, valid = [x.cuda() for x in data_blob[:4]]
if args.add_noise:
stdv = np.random.uniform(0.0, 5.0)
image1 = (image1 + stdv * torch.randn(*image1.shape).cuda()).clamp(0.0, 255.0)
image2 = (image2 + stdv * torch.randn(*image2.shape).cuda()).clamp(0.0, 255.0)
optimizer.zero_grad()
flow_pred = model(image1, image2, iters=args.iters)
loss, metrics = sequence_loss(flow_pred, flow, valid, args.gamma)
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
scaler.step(optimizer)
if scheduler is not None:
scheduler.step()
scaler.update()
toc = time.time()
metrics['time'] = toc - tic
# metrics is a dict, with keys: 'epe', '1px', '3px', '5px', 'time'.
logger.push(metrics)
# Validate
if logger.total_steps % args.val_freq == args.val_freq - 1:
PATH = args.output + f'/{logger.total_steps+1}_{args.name}.pth'
if args.local_rank == 0:
save_checkpoint(PATH, model, optimizer, scheduler, logger)
validate(model, args, logger)
plot_train(logger, args)
plot_val(logger, args)
if logger.total_steps >= args.num_steps:
break
def validate(model, args, logger):
model.eval()
results = {}
# Evaluate results
for val_dataset in args.validation:
if val_dataset == 'chairs':
results.update(evaluate.validate_chairs(model.module, args.iters))
if val_dataset == 'things':
results.update(evaluate.validate_things(model.module, args.iters))
elif val_dataset == 'sintel':
results.update(evaluate.validate_sintel(model.module, args.iters))
elif val_dataset == 'kitti':
results.update(evaluate.validate_kitti(model.module, args.iters))
elif val_dataset == 'kittitrain':
results.update(evaluate.validate_kitti(model.module, args.iters, use_kitti_train=True))
elif val_dataset == 'viper':
results.update(evaluate.validate_viper(model.module, args.iters))
# Record results in logger
for key in results.keys():
if key not in logger.val_results_dict.keys():
logger.val_results_dict[key] = []
logger.val_results_dict[key].append(results[key])
logger.val_steps_list.append(logger.total_steps)
model.train()
if args.freeze_bn and args.stage != 'chairs':
model.module.freeze_bn()
def plot_val(logger, args):
for key in logger.val_results_dict.keys():
# plot validation curve
plt.figure()
plt.plot(logger.val_steps_list, logger.val_results_dict[key])
plt.xlabel('x_steps')
plt.ylabel(key)
plt.title(f'Results for {key} for the validation set')
plt.savefig(args.output+f"/{key}.png", bbox_inches='tight')
plt.close()
def plot_train(logger, args):
# plot training curve
plt.figure()
plt.plot(logger.train_steps_list, logger.train_epe_list)
plt.xlabel('x_steps')
plt.ylabel('EPE')
plt.title('Running training error (EPE)')
plt.savefig(args.output+"/train_epe.png", bbox_inches='tight')
plt.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='craft', help="name your experiment")
parser.add_argument('--stage', help="determines which dataset to use for training")
parser.add_argument('--craft', dest='craft', action='store_true',
help='use craft (Cross-Attentional Flow Transformer)')
parser.add_argument('--setrans', dest='setrans', action='store_true',
help='use setrans (Squeeze-Expansion Transformer) as the intra-frame attention')
parser.add_argument('--raft', action='store_true', help='use raft')
parser.add_argument('--nogma', action='store_true', help='(ablation) Do not use GMA')
parser.add_argument('--validation', type=str, nargs='+')
parser.add_argument('--restore_ckpt', help="restore checkpoint")
parser.add_argument('--loadopt', dest='load_optimizer_state', action='store_true',
help='Do not load optimizer state from checkpoint (default: not load)')
parser.add_argument('--loadsched', dest='load_scheduler_state', action='store_true',
help='Load scheduler state from checkpoint (default: not load)')
parser.add_argument('--output', type=str, default='checkpoints',
help='output directory to save checkpoints and plots')
parser.add_argument('--radius', dest='corr_radius', type=int, default=4)
parser.add_argument('--lr', type=float, default=0.00002)
parser.add_argument('--num_steps', type=int, default=100000)
parser.add_argument('--batch_size', type=int, default=6)
parser.add_argument('--workers', dest='num_workers', type=int, default=4)
parser.add_argument('--image_size', type=int, nargs='+', default=[384, 512])
parser.add_argument('--gpus', type=int, nargs='+', default=[0, 1])
parser.add_argument('--mixed_precision', default=False, action='store_true', help='use mixed precision')
parser.add_argument('--wdecay', type=float, default=.00005)
parser.add_argument('--epsilon', type=float, default=1e-8)
parser.add_argument('--clip', type=float, default=1.0)
parser.add_argument('--dropout', type=float, default=0.0, help='Dropout rate for fnet and cnet')
parser.add_argument('--upsample-learn', action='store_true', default=False,
help='If True, use learned upsampling, otherwise, use bilinear upsampling.')
parser.add_argument('--gamma', type=float, default=0.8, help='exponential loss weighting of the sequential predictions')
parser.add_argument('--add_noise', action='store_true')
parser.add_argument('--shiftprob', dest='shift_aug_prob', type=float,
default=0.0, help='Probability of shifting augmentation')
parser.add_argument('--shiftsigmas', dest='shift_sigmas', default="16,10", type=str,
help='Stds of shifts for shifting consistency loss')
# default: not to freeze bn.
parser.add_argument('--freeze_bn', action='store_true')
parser.add_argument('--iters', type=int, default=12)
parser.add_argument('--val_freq', type=int, default=10000,
help='validation frequency')
parser.add_argument('--print_freq', type=int, default=100,
help='printing frequency')
parser.add_argument('--model_name', default='', help='specify model name')
parser.add_argument('--position_only', default=False, action='store_true',
help='(GMA) only use position-wise attention')
parser.add_argument('--position_and_content', default=False, action='store_true',
help='(GMA) use position and content-wise attention')
parser.add_argument('--num_heads', default=1, type=int,
help='(GMA) number of heads in attention and aggregation')
parser.add_argument('--posr', dest='pos_bias_radius', type=int, default=7,
help='The radius of positional biases')
# f1trans is for ablation only, not suggested.
parser.add_argument('--f1', dest='f1trans', type=str,
choices=['none', 'full', 'half'], default='none',
help='Whether to use transformer on frame 1 features. '
'Half: do self-attention only on half of the channels')
parser.add_argument('--f2', dest='f2trans', type=str,
choices=['none', 'full', 'half'], default='none',
help='Whether to use transformer on frame 2 features. '
'Half: do self-attention only on half of the channels')
parser.add_argument('--f2posw', dest='f2_pos_code_weight', type=float, default=0.5)
parser.add_argument('--f2radius', dest='f2_attn_mask_radius', type=int, default=-1)
parser.add_argument('--intermodes', dest='inter_num_modes', type=int, default=4,
help='Number of modes in inter-frame attention')
parser.add_argument('--intramodes', dest='intra_num_modes', type=int, default=4,
help='Number of modes in intra-frame attention')
parser.add_argument('--f2modes', dest='f2_num_modes', type=int, default=4,
help='Number of modes in F2 Transformer')
# In inter-frame attention, having QK biases performs slightly better.
parser.add_argument('--interqknobias', dest='inter_qk_have_bias', action='store_false',
help='Do not use biases in the QK projections in the inter-frame attention')
parser.add_argument('--interpos', dest='inter_pos_code_type', type=str,
choices=['lsinu', 'bias'], default='bias')
parser.add_argument('--interposw', dest='inter_pos_code_weight', type=float, default=0.5)
parser.add_argument('--intrapos', dest='intra_pos_code_type', type=str,
choices=['lsinu', 'bias'], default='bias')
parser.add_argument('--intraposw', dest='intra_pos_code_weight', type=float, default=1.0)
args = parser.parse_args()
args.ddp = True
torch.manual_seed(1234)
np.random.seed(1234)
args.local_rank = int(os.environ.get('LOCAL_RANK', 0))
if args.local_rank == 0 and not os.path.isdir(args.output):
os.makedirs(args.output)
args.shift_sigmas = [ int(s) for s in args.shift_sigmas.split(",") ]
timestamp = datetime.now().strftime("%m%d%H%M")
print0("Time: {}".format(timestamp))
print0("Args:\n{}".format(args))
main(args)
|
[
"evaluate.validate_chairs",
"evaluate.validate_kitti",
"evaluate.validate_viper",
"evaluate.validate_sintel",
"evaluate.validate_things"
] |
[((59, 82), 'sys.path.append', 'sys.path.append', (['"""core"""'], {}), "('core')\n", (74, 82), False, 'import sys\n'), ((201, 222), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (215, 222), False, 'import matplotlib\n'), ((2289, 2467), 'torch.optim.lr_scheduler.OneCycleLR', 'optim.lr_scheduler.OneCycleLR', ([], {'optimizer': 'optimizer', 'max_lr': 'args.lr', 'total_steps': '(args.num_steps + 100)', 'pct_start': 'pct_start', 'cycle_momentum': '(False)', 'anneal_strategy': '"""linear"""'}), "(optimizer=optimizer, max_lr=args.lr,\n total_steps=args.num_steps + 100, pct_start=pct_start, cycle_momentum=\n False, anneal_strategy='linear')\n", (2318, 2467), True, 'import torch.optim as optim\n'), ((2604, 2645), 'torch.distributed.all_reduce', 'dist.all_reduce', (['rt'], {'op': 'dist.ReduceOp.SUM'}), '(rt, op=dist.ReduceOp.SUM)\n', (2619, 2645), True, 'import torch.distributed as dist\n'), ((2797, 2833), 'torch.distributed.all_gather', 'dist.all_gather', (['tensor_list', 'tensor'], {}), '(tensor_list, tensor)\n', (2812, 2833), True, 'import torch.distributed as dist\n'), ((2856, 2885), 'torch.cat', 'torch.cat', (['tensor_list'], {'dim': '(0)'}), '(tensor_list, dim=0)\n', (2865, 2885), False, 'import torch\n'), ((4814, 4840), 'copy.copy', 'copy.copy', (['logger.__dict__'], {}), '(logger.__dict__)\n', (4823, 4840), False, 'import copy\n'), ((5191, 5222), 'torch.save', 'torch.save', (['save_state', 'cp_path'], {}), '(save_state, cp_path)\n', (5201, 5222), False, 'import torch\n'), ((5227, 5253), 'utils.utils.print0', 'print0', (['f"""{cp_path} saved"""'], {}), "(f'{cp_path} saved')\n", (5233, 5253), False, 'from utils.utils import print0\n'), ((5339, 5389), 'torch.load', 'torch.load', (['args.restore_ckpt'], {'map_location': '"""cuda"""'}), "(args.restore_ckpt, map_location='cuda')\n", (5349, 5389), False, 'import torch\n'), ((5600, 5667), 'utils.utils.print0', 'print0', (['f"""Model checkpoint loaded from {args.restore_ckpt}: {msg}."""'], {}), "(f'Model checkpoint loaded from {args.restore_ckpt}: {msg}.')\n", (5606, 5667), False, 'from utils.utils import print0\n'), ((6504, 6542), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.local_rank'], {}), '(args.local_rank)\n', (6525, 6542), False, 'import torch\n'), ((6547, 6621), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""', 'init_method': '"""env://"""'}), "(backend='nccl', init_method='env://')\n", (6583, 6621), False, 'import torch\n'), ((6832, 6968), 'torch.nn.parallel.DistributedDataParallel', 'nn.parallel.DistributedDataParallel', (['model'], {'device_ids': '[args.local_rank]', 'output_device': 'args.local_rank', 'find_unused_parameters': '(True)'}), '(model, device_ids=[args.local_rank],\n output_device=args.local_rank, find_unused_parameters=True)\n', (6867, 6968), True, 'import torch.nn as nn\n'), ((7159, 7190), 'datasets.fetch_dataloader', 'datasets.fetch_dataloader', (['args'], {}), '(args)\n', (7184, 7190), False, 'import datasets\n'), ((8008, 8048), 'torch.cuda.amp.GradScaler', 'GradScaler', ([], {'enabled': 'args.mixed_precision'}), '(enabled=args.mixed_precision)\n', (8018, 8048), False, 'from torch.cuda.amp import GradScaler\n'), ((11311, 11323), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11321, 11323), True, 'import matplotlib.pyplot as plt\n'), ((11328, 11384), 'matplotlib.pyplot.plot', 'plt.plot', (['logger.train_steps_list', 'logger.train_epe_list'], {}), '(logger.train_steps_list, logger.train_epe_list)\n', (11336, 11384), True, 'import matplotlib.pyplot as plt\n'), ((11389, 11410), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x_steps"""'], {}), "('x_steps')\n", (11399, 11410), True, 'import matplotlib.pyplot as plt\n'), ((11415, 11432), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""EPE"""'], {}), "('EPE')\n", (11425, 11432), True, 'import matplotlib.pyplot as plt\n'), ((11437, 11478), 'matplotlib.pyplot.title', 'plt.title', (['"""Running training error (EPE)"""'], {}), "('Running training error (EPE)')\n", (11446, 11478), True, 'import matplotlib.pyplot as plt\n'), ((11483, 11547), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(args.output + '/train_epe.png')"], {'bbox_inches': '"""tight"""'}), "(args.output + '/train_epe.png', bbox_inches='tight')\n", (11494, 11547), True, 'import matplotlib.pyplot as plt\n'), ((11550, 11561), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11559, 11561), True, 'import matplotlib.pyplot as plt\n'), ((11604, 11629), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11627, 11629), False, 'import argparse\n'), ((17800, 17823), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (17817, 17823), False, 'import torch\n'), ((17828, 17848), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (17842, 17848), True, 'import numpy as np\n'), ((1651, 1682), 'os.environ.get', 'os.environ.get', (['"""WORLD_SIZE"""', '(1)'], {}), "('WORLD_SIZE', 1)\n", (1665, 1682), False, 'import os\n'), ((2740, 2764), 'torch.zeros_like', 'torch.zeros_like', (['tensor'], {}), '(tensor)\n', (2756, 2764), False, 'import torch\n'), ((4000, 4049), 'utils.utils.print0', 'print0', (['(training_str + metrics_str + time_left_hm)'], {}), '(training_str + metrics_str + time_left_hm)\n', (4006, 4049), False, 'from utils.utils import print0\n'), ((5800, 5833), 'utils.utils.print0', 'print0', (['"""Optimizer state loaded."""'], {}), "('Optimizer state loaded.')\n", (5806, 5833), False, 'from utils.utils import print0\n'), ((5852, 5889), 'utils.utils.print0', 'print0', (['"""Optimizer state NOT loaded."""'], {}), "('Optimizer state NOT loaded.')\n", (5858, 5889), False, 'from utils.utils import print0\n'), ((6039, 6072), 'utils.utils.print0', 'print0', (['"""Scheduler state loaded."""'], {}), "('Scheduler state loaded.')\n", (6045, 6072), False, 'from utils.utils import print0\n'), ((6400, 6437), 'utils.utils.print0', 'print0', (['"""Scheduler state NOT loaded."""'], {}), "('Scheduler state NOT loaded.')\n", (6406, 6437), False, 'from utils.utils import print0\n'), ((6446, 6474), 'utils.utils.print0', 'print0', (['"""Logger NOT loaded."""'], {}), "('Logger NOT loaded.')\n", (6452, 6474), False, 'from utils.utils import print0\n'), ((6698, 6708), 'raft.RAFT', 'RAFT', (['args'], {}), '(args)\n', (6702, 6708), False, 'from raft import RAFT\n'), ((8119, 8130), 'time.time', 'time.time', ([], {}), '()\n', (8128, 8130), False, 'import time\n'), ((8986, 8997), 'time.time', 'time.time', ([], {}), '()\n', (8995, 8997), False, 'import time\n'), ((10961, 10973), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10971, 10973), True, 'import matplotlib.pyplot as plt\n'), ((10982, 11043), 'matplotlib.pyplot.plot', 'plt.plot', (['logger.val_steps_list', 'logger.val_results_dict[key]'], {}), '(logger.val_steps_list, logger.val_results_dict[key])\n', (10990, 11043), True, 'import matplotlib.pyplot as plt\n'), ((11052, 11073), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x_steps"""'], {}), "('x_steps')\n", (11062, 11073), True, 'import matplotlib.pyplot as plt\n'), ((11082, 11097), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['key'], {}), '(key)\n', (11092, 11097), True, 'import matplotlib.pyplot as plt\n'), ((11106, 11160), 'matplotlib.pyplot.title', 'plt.title', (['f"""Results for {key} for the validation set"""'], {}), "(f'Results for {key} for the validation set')\n", (11115, 11160), True, 'import matplotlib.pyplot as plt\n'), ((11169, 11230), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(args.output + f'/{key}.png')"], {'bbox_inches': '"""tight"""'}), "(args.output + f'/{key}.png', bbox_inches='tight')\n", (11180, 11230), True, 'import matplotlib.pyplot as plt\n'), ((11237, 11248), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11246, 11248), True, 'import matplotlib.pyplot as plt\n'), ((17876, 17907), 'os.environ.get', 'os.environ.get', (['"""LOCAL_RANK"""', '(0)'], {}), "('LOCAL_RANK', 0)\n", (17890, 17907), False, 'import os\n'), ((17982, 18006), 'os.makedirs', 'os.makedirs', (['args.output'], {}), '(args.output)\n', (17993, 18006), False, 'import os\n'), ((1531, 1580), 'torch.sum', 'torch.sum', (['((flow_preds[-1] - flow_gt) ** 2)'], {'dim': '(1)'}), '((flow_preds[-1] - flow_gt) ** 2, dim=1)\n', (1540, 1580), False, 'import torch\n'), ((3293, 3327), 'numpy.mean', 'np.mean', (['self.running_loss_dict[k]'], {}), '(self.running_loss_dict[k])\n', (3300, 3327), True, 'import numpy as np\n'), ((4131, 4169), 'numpy.mean', 'np.mean', (["self.running_loss_dict['epe']"], {}), "(self.running_loss_dict['epe'])\n", (4138, 4169), True, 'import numpy as np\n'), ((6302, 6326), 'utils.utils.print0', 'print0', (['"""Logger loaded."""'], {}), "('Logger loaded.')\n", (6308, 6326), False, 'from utils.utils import print0\n'), ((6353, 6381), 'utils.utils.print0', 'print0', (['"""Logger NOT loaded."""'], {}), "('Logger NOT loaded.')\n", (6359, 6381), False, 'from utils.utils import print0\n'), ((6746, 6763), 'craft_nogma.CRAFT_nogma', 'CRAFT_nogma', (['args'], {}), '(args)\n', (6757, 6763), False, 'from craft_nogma import CRAFT_nogma\n'), ((6790, 6801), 'network.CRAFT', 'CRAFT', (['args'], {}), '(args)\n', (6795, 6801), False, 'from network import CRAFT\n'), ((8333, 8360), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(5.0)'], {}), '(0.0, 5.0)\n', (8350, 8360), True, 'import numpy as np\n'), ((17946, 17972), 'os.path.isdir', 'os.path.isdir', (['args.output'], {}), '(args.output)\n', (17959, 17972), False, 'import os\n'), ((18101, 18115), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (18113, 18115), False, 'from datetime import datetime\n'), ((9808, 9858), 'evaluate.validate_chairs', 'evaluate.validate_chairs', (['model.module', 'args.iters'], {}), '(model.module, args.iters)\n', (9832, 9858), False, 'import evaluate\n'), ((9923, 9973), 'evaluate.validate_things', 'evaluate.validate_things', (['model.module', 'args.iters'], {}), '(model.module, args.iters)\n', (9947, 9973), False, 'import evaluate\n'), ((10040, 10090), 'evaluate.validate_sintel', 'evaluate.validate_sintel', (['model.module', 'args.iters'], {}), '(model.module, args.iters)\n', (10064, 10090), False, 'import evaluate\n'), ((10156, 10205), 'evaluate.validate_kitti', 'evaluate.validate_kitti', (['model.module', 'args.iters'], {}), '(model.module, args.iters)\n', (10179, 10205), False, 'import evaluate\n'), ((10276, 10347), 'evaluate.validate_kitti', 'evaluate.validate_kitti', (['model.module', 'args.iters'], {'use_kitti_train': '(True)'}), '(model.module, args.iters, use_kitti_train=True)\n', (10299, 10347), False, 'import evaluate\n'), ((10413, 10462), 'evaluate.validate_viper', 'evaluate.validate_viper', (['model.module', 'args.iters'], {}), '(model.module, args.iters)\n', (10436, 10462), False, 'import evaluate\n'), ((8399, 8425), 'torch.randn', 'torch.randn', (['*image1.shape'], {}), '(*image1.shape)\n', (8410, 8425), False, 'import torch\n'), ((8490, 8516), 'torch.randn', 'torch.randn', (['*image2.shape'], {}), '(*image2.shape)\n', (8501, 8516), False, 'import torch\n')]
|
# python train.py anchor-free --model-dir ../models/af_mobilenet --splits ../splits/tvsum.yml ../splits/summe.yml --max-epoch 50 --cnn mobilenet --base-model attention --num-feature 1280 --num-head 10 --nms-thresh 0.4
# python train.py anchor-free --model-dir ../models/af_default --splits ../splits/tvsum.yml ../splits/summe.yml --max-epoch 50 --cnn default --base-model attention --num-feature 1024 --num-head 8 --num-hidden 128 --nms-thresh 0.4
# python train.py anchor-free --model-dir ../models/af_mobilenet_bilstm --splits ../splits/tvsum.yml ../splits/summe.yml --max-epoch 50 --cnn mobilenet --base-model bilstm --num-feature 1280 --nms-thresh 0.4
# python train.py anchor-based --model-dir ../models/ab_mobilenet_bilstm --splits ../splits/tvsum.yml ../splits/summe.yml --max-epoch 50 --cnn mobilenet --base-model bilstm --num-feature 1280 --nms-thresh 0.4
# python train.py anchor-based --model-dir ../models/ab_squeeze --splits ../splits/tvsum.yml ../splits/summe.yml --max-epoch 50 --cnn squeeze --base-model attention --num-feature 1000 --num-head 10 --num-hidden 125 --nms-thresh 0.4
import logging
import torch
import numpy as np
from anchor_free import anchor_free_helper
from anchor_free.dsnet_af import DSNetAF
from anchor_free.losses import calc_ctr_loss, calc_cls_loss, calc_loc_loss
from evaluate import evaluate
from helpers import data_helper, vsumm_helper
from helpers import init_helper, data_helper, vsumm_helper, bbox_helper
logger = logging.getLogger()
def train(args, split, save_path):
model = DSNetAF(base_model=args.base_model, num_feature=args.num_feature,
num_hidden=args.num_hidden, num_head=args.num_head)
model = model.to(args.device)
model.train()
parameters = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.Adam(parameters, lr=args.lr,
weight_decay=args.weight_decay)
max_val_fscore = -1
train_set = data_helper.VideoDataset(split['train_keys'])
train_loader = data_helper.DataLoader(train_set, shuffle=True)
val_set = data_helper.VideoDataset(split['test_keys'])
val_loader = data_helper.DataLoader(val_set, shuffle=False)
cnn = args.cnn
for epoch in range(args.max_epoch):
model.train()
stats = data_helper.AverageMeter('loss', 'cls_loss', 'loc_loss',
'ctr_loss')
#for _, seq, gtscore, change_points, n_frames, nfps, picks, _ in train_loader:
for _, _, n_frames, picks, gtscore, _, _, \
seq_default, cps_default, nfps_default, \
seq_lenet, seq_alexnet, seq_mobilenet, seq_squeeze, seq_resnet, \
seq_lenet_c, seq_alexnet_c, seq_mobilenet_c, seq_squeeze_c, seq_resnet_c, \
cps_lenet_c, cps_alexnet_c, cps_mobilenet_c, cps_squeeze_c, cps_resnet_c, \
_, _, _, cps_lenet, cps_alexnet, cps_mobilenet, cps_squeeze, cps_resnet in train_loader:
if cnn == "default":
seq = seq_default
cps = cps_default
nfps = nfps_default
else:
if cnn == "lenet":
seq = seq_lenet_c
change_points = cps_lenet_c
if cnn == "alexnet":
seq = seq_alexnet_c
change_points = cps_alexnet_c
if cnn == "mobilenet":
seq = seq_mobilenet_c
change_points = cps_mobilenet_c
if cnn == "squeeze":
seq = seq_squeeze_c
change_points = cps_squeeze_c
if cnn == "resnet":
seq = seq_resnet_c
change_points = cps_resnet_c
begin_frames = change_points[:-1]
end_frames = change_points[1:]
cps = np.vstack((begin_frames, end_frames)).T
# Here, the change points are detected (Change-point positions t0, t1, ..., t_{m-1})
nfps = end_frames - begin_frames
#seq = seq_resnet
#cps = cps_default
#nfps = nfps_default
keyshot_summ = vsumm_helper.get_keyshot_summ(
gtscore, cps, n_frames, nfps, picks)
target = vsumm_helper.downsample_summ(keyshot_summ)
if not target.any():
continue
seq = torch.tensor(seq, dtype=torch.float32).unsqueeze(0).to(args.device)
cls_label = target
loc_label = anchor_free_helper.get_loc_label(target)
ctr_label = anchor_free_helper.get_ctr_label(target, loc_label)
pred_cls, pred_loc, pred_ctr = model(seq)
cls_label = torch.tensor(cls_label, dtype=torch.float32).to(args.device)
loc_label = torch.tensor(loc_label, dtype=torch.float32).to(args.device)
ctr_label = torch.tensor(ctr_label, dtype=torch.float32).to(args.device)
cls_loss = calc_cls_loss(pred_cls, cls_label, args.cls_loss)
loc_loss = calc_loc_loss(pred_loc, loc_label, cls_label,
args.reg_loss)
ctr_loss = calc_ctr_loss(pred_ctr, ctr_label, cls_label)
loss = cls_loss + args.lambda_reg * loc_loss + args.lambda_ctr * ctr_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
stats.update(loss=loss.item(), cls_loss=cls_loss.item(),
loc_loss=loc_loss.item(), ctr_loss=ctr_loss.item())
# For each epoch, evaluate the model
args = init_helper.get_arguments()
seg_algo = args.segment_algo
cnn = args.cnn
init_helper.init_logger(args.model_dir, args.log_file)
init_helper.set_random_seed(args.seed)
# logger.info(vars(args))
val_fscore, _ = evaluate(model, cnn, seg_algo, val_loader, args.nms_thresh, args.device)
# val_fscore, _ = evaluate(model, val_loader, args.nms_thresh, args.device)
if max_val_fscore < val_fscore:
max_val_fscore = val_fscore
torch.save(model.state_dict(), str(save_path))
logger.info(f'Epoch: {epoch}/{args.max_epoch} '
f'Loss: {stats.cls_loss:.4f}/{stats.loc_loss:.4f}/{stats.ctr_loss:.4f}/{stats.loss:.4f} '
f'F-score cur/max: {val_fscore:.4f}/{max_val_fscore:.4f}')
return max_val_fscore
|
[
"evaluate.evaluate"
] |
[((1476, 1495), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1493, 1495), False, 'import logging\n'), ((1545, 1666), 'anchor_free.dsnet_af.DSNetAF', 'DSNetAF', ([], {'base_model': 'args.base_model', 'num_feature': 'args.num_feature', 'num_hidden': 'args.num_hidden', 'num_head': 'args.num_head'}), '(base_model=args.base_model, num_feature=args.num_feature,\n num_hidden=args.num_hidden, num_head=args.num_head)\n', (1552, 1666), False, 'from anchor_free.dsnet_af import DSNetAF\n'), ((1821, 1893), 'torch.optim.Adam', 'torch.optim.Adam', (['parameters'], {'lr': 'args.lr', 'weight_decay': 'args.weight_decay'}), '(parameters, lr=args.lr, weight_decay=args.weight_decay)\n', (1837, 1893), False, 'import torch\n'), ((1969, 2014), 'helpers.data_helper.VideoDataset', 'data_helper.VideoDataset', (["split['train_keys']"], {}), "(split['train_keys'])\n", (1993, 2014), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((2034, 2081), 'helpers.data_helper.DataLoader', 'data_helper.DataLoader', (['train_set'], {'shuffle': '(True)'}), '(train_set, shuffle=True)\n', (2056, 2081), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((2097, 2141), 'helpers.data_helper.VideoDataset', 'data_helper.VideoDataset', (["split['test_keys']"], {}), "(split['test_keys'])\n", (2121, 2141), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((2159, 2205), 'helpers.data_helper.DataLoader', 'data_helper.DataLoader', (['val_set'], {'shuffle': '(False)'}), '(val_set, shuffle=False)\n', (2181, 2205), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((2305, 2373), 'helpers.data_helper.AverageMeter', 'data_helper.AverageMeter', (['"""loss"""', '"""cls_loss"""', '"""loc_loss"""', '"""ctr_loss"""'], {}), "('loss', 'cls_loss', 'loc_loss', 'ctr_loss')\n", (2329, 2373), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((5627, 5654), 'helpers.init_helper.get_arguments', 'init_helper.get_arguments', ([], {}), '()\n', (5652, 5654), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((5724, 5778), 'helpers.init_helper.init_logger', 'init_helper.init_logger', (['args.model_dir', 'args.log_file'], {}), '(args.model_dir, args.log_file)\n', (5747, 5778), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((5787, 5825), 'helpers.init_helper.set_random_seed', 'init_helper.set_random_seed', (['args.seed'], {}), '(args.seed)\n', (5814, 5825), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((5886, 5958), 'evaluate.evaluate', 'evaluate', (['model', 'cnn', 'seg_algo', 'val_loader', 'args.nms_thresh', 'args.device'], {}), '(model, cnn, seg_algo, val_loader, args.nms_thresh, args.device)\n', (5894, 5958), False, 'from evaluate import evaluate\n'), ((4198, 4264), 'helpers.vsumm_helper.get_keyshot_summ', 'vsumm_helper.get_keyshot_summ', (['gtscore', 'cps', 'n_frames', 'nfps', 'picks'], {}), '(gtscore, cps, n_frames, nfps, picks)\n', (4227, 4264), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((4303, 4345), 'helpers.vsumm_helper.downsample_summ', 'vsumm_helper.downsample_summ', (['keyshot_summ'], {}), '(keyshot_summ)\n', (4331, 4345), False, 'from helpers import init_helper, data_helper, vsumm_helper, bbox_helper\n'), ((4548, 4588), 'anchor_free.anchor_free_helper.get_loc_label', 'anchor_free_helper.get_loc_label', (['target'], {}), '(target)\n', (4580, 4588), False, 'from anchor_free import anchor_free_helper\n'), ((4613, 4664), 'anchor_free.anchor_free_helper.get_ctr_label', 'anchor_free_helper.get_ctr_label', (['target', 'loc_label'], {}), '(target, loc_label)\n', (4645, 4664), False, 'from anchor_free import anchor_free_helper\n'), ((5000, 5049), 'anchor_free.losses.calc_cls_loss', 'calc_cls_loss', (['pred_cls', 'cls_label', 'args.cls_loss'], {}), '(pred_cls, cls_label, args.cls_loss)\n', (5013, 5049), False, 'from anchor_free.losses import calc_ctr_loss, calc_cls_loss, calc_loc_loss\n'), ((5073, 5133), 'anchor_free.losses.calc_loc_loss', 'calc_loc_loss', (['pred_loc', 'loc_label', 'cls_label', 'args.reg_loss'], {}), '(pred_loc, loc_label, cls_label, args.reg_loss)\n', (5086, 5133), False, 'from anchor_free.losses import calc_ctr_loss, calc_cls_loss, calc_loc_loss\n'), ((5194, 5239), 'anchor_free.losses.calc_ctr_loss', 'calc_ctr_loss', (['pred_ctr', 'ctr_label', 'cls_label'], {}), '(pred_ctr, ctr_label, cls_label)\n', (5207, 5239), False, 'from anchor_free.losses import calc_ctr_loss, calc_cls_loss, calc_loc_loss\n'), ((3885, 3922), 'numpy.vstack', 'np.vstack', (['(begin_frames, end_frames)'], {}), '((begin_frames, end_frames))\n', (3894, 3922), True, 'import numpy as np\n'), ((4745, 4789), 'torch.tensor', 'torch.tensor', (['cls_label'], {'dtype': 'torch.float32'}), '(cls_label, dtype=torch.float32)\n', (4757, 4789), False, 'import torch\n'), ((4830, 4874), 'torch.tensor', 'torch.tensor', (['loc_label'], {'dtype': 'torch.float32'}), '(loc_label, dtype=torch.float32)\n', (4842, 4874), False, 'import torch\n'), ((4915, 4959), 'torch.tensor', 'torch.tensor', (['ctr_label'], {'dtype': 'torch.float32'}), '(ctr_label, dtype=torch.float32)\n', (4927, 4959), False, 'import torch\n'), ((4424, 4462), 'torch.tensor', 'torch.tensor', (['seq'], {'dtype': 'torch.float32'}), '(seq, dtype=torch.float32)\n', (4436, 4462), False, 'import torch\n')]
|
import pickle
import os
import sys
import pathlib
import numpy as np
from torch import optim
from torch.utils.data import DataLoader
import torch
from torch.nn.utils import clip_grad_norm_
from tqdm import tqdm
from tensorboardX import SummaryWriter
abs_path = pathlib.Path(__file__).parent.absolute()
sys.path.append(sys.path.append(abs_path))
from dataset import PairDataset
from model import PGN
import config
from evaluate import evaluate
from dataset import collate_fn, SampleDataset
def train(dataset, val_dataset, v, start_epoch=0):
"""Train the model, evaluate it and store it.
Args:
dataset (dataset.PairDataset): The training dataset.
val_dataset (dataset.PairDataset): The evaluation dataset.
v (vocab.Vocab): The vocabulary built from the training dataset.
start_epoch (int, optional): The starting epoch number. Defaults to 0.
"""
DEVICE = torch.device("cuda" if config.is_cuda else "cpu")
model = PGN(v)
model.load_model()
model.to(DEVICE)
if config.fine_tune:
# In fine-tuning mode, we fix the weights of all parameters except attention.wc.
print('Fine-tuning mode.')
for name, params in model.named_parameters():
if name != 'attention.wc.weight':
params.requires_grad=False
# forward
print("loading data")
train_data = SampleDataset(dataset.pairs, v)
val_data = SampleDataset(val_dataset.pairs, v)
print("initializing optimizer")
# Define the optimizer.
optimizer = optim.Adam(model.parameters(),
lr=config.learning_rate)
train_dataloader = DataLoader(dataset=train_data,
batch_size=config.batch_size,
shuffle=True,
collate_fn=collate_fn)
val_losses = np.inf
if (os.path.exists(config.losses_path)):
with open(config.losses_path, 'rb') as f:
val_losses = pickle.load(f)
# torch.cuda.empty_cache()
# SummaryWriter: Log writer used for TensorboardX visualization.
writer = SummaryWriter(config.log_path)
# tqdm: A tool for drawing progress bars during training.
with tqdm(total=config.epochs) as epoch_progress:
for epoch in range(start_epoch, config.epochs):
batch_losses = [] # Get loss of each batch.
num_batches = len(train_dataloader)
with tqdm(total=num_batches//100) as batch_progress:
for batch, data in enumerate(tqdm(train_dataloader)):
x, y, x_len, y_len, oov, len_oovs = data
assert not np.any(np.isnan(x.numpy()))
if config.is_cuda: # Training with GPUs.
x = x.to(DEVICE)
y = y.to(DEVICE)
x_len = x_len.to(DEVICE)
len_oovs = len_oovs.to(DEVICE)
model.train() # Sets the module in training mode.
optimizer.zero_grad() # Clear gradients.
# Calculate loss.
loss = model(x, x_len, y, len_oovs, batch=batch, num_batches=num_batches)
batch_losses.append(loss.item())
loss.backward() # Backpropagation.
# Do gradient clipping to prevent gradient explosion.
clip_grad_norm_(model.encoder.parameters(),
config.max_grad_norm)
clip_grad_norm_(model.decoder.parameters(),
config.max_grad_norm)
clip_grad_norm_(model.attention.parameters(),
config.max_grad_norm)
optimizer.step() # Update weights.
# Output and record epoch loss every 100 batches.
if (batch % 100) == 0:
batch_progress.set_description(f'Epoch {epoch}')
batch_progress.set_postfix(Batch=batch,
Loss=loss.item())
batch_progress.update()
# Write loss for tensorboard.
writer.add_scalar(f'Average loss for epoch {epoch}',
np.mean(batch_losses),
global_step=batch)
# Calculate average loss over all batches in an epoch.
epoch_loss = np.mean(batch_losses)
epoch_progress.set_description(f'Epoch {epoch}')
epoch_progress.set_postfix(Loss=epoch_loss)
epoch_progress.update()
avg_val_loss = evaluate(model, val_data, epoch)
print('training loss:{}'.format(epoch_loss),
'validation loss:{}'.format(avg_val_loss))
# Update minimum evaluating loss.
if (avg_val_loss < val_losses):
torch.save(model.encoder, config.encoder_save_name)
torch.save(model.decoder, config.decoder_save_name)
torch.save(model.attention, config.attention_save_name)
torch.save(model.reduce_state, config.reduce_state_save_name)
val_losses = avg_val_loss
with open(config.losses_path, 'wb') as f:
pickle.dump(val_losses, f)
writer.close()
if __name__ == "__main__":
# Prepare dataset for training.
DEVICE = torch.device('cuda') if config.is_cuda else torch.device('cpu')
dataset = PairDataset(config.data_path,
max_src_len=config.max_src_len,
max_tgt_len=config.max_tgt_len,
truncate_src=config.truncate_src,
truncate_tgt=config.truncate_tgt)
val_dataset = PairDataset(config.val_data_path,
max_src_len=config.max_src_len,
max_tgt_len=config.max_tgt_len,
truncate_src=config.truncate_src,
truncate_tgt=config.truncate_tgt)
vocab = dataset.build_vocab(embed_file=config.embed_file)
train(dataset, val_dataset, vocab, start_epoch=0)
|
[
"evaluate.evaluate"
] |
[((320, 345), 'sys.path.append', 'sys.path.append', (['abs_path'], {}), '(abs_path)\n', (335, 345), False, 'import sys\n'), ((908, 957), 'torch.device', 'torch.device', (["('cuda' if config.is_cuda else 'cpu')"], {}), "('cuda' if config.is_cuda else 'cpu')\n", (920, 957), False, 'import torch\n'), ((971, 977), 'model.PGN', 'PGN', (['v'], {}), '(v)\n', (974, 977), False, 'from model import PGN\n'), ((1375, 1406), 'dataset.SampleDataset', 'SampleDataset', (['dataset.pairs', 'v'], {}), '(dataset.pairs, v)\n', (1388, 1406), False, 'from dataset import collate_fn, SampleDataset\n'), ((1422, 1457), 'dataset.SampleDataset', 'SampleDataset', (['val_dataset.pairs', 'v'], {}), '(val_dataset.pairs, v)\n', (1435, 1457), False, 'from dataset import collate_fn, SampleDataset\n'), ((1646, 1747), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_data', 'batch_size': 'config.batch_size', 'shuffle': '(True)', 'collate_fn': 'collate_fn'}), '(dataset=train_data, batch_size=config.batch_size, shuffle=True,\n collate_fn=collate_fn)\n', (1656, 1747), False, 'from torch.utils.data import DataLoader\n'), ((1879, 1913), 'os.path.exists', 'os.path.exists', (['config.losses_path'], {}), '(config.losses_path)\n', (1893, 1913), False, 'import os\n'), ((2120, 2150), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['config.log_path'], {}), '(config.log_path)\n', (2133, 2150), False, 'from tensorboardX import SummaryWriter\n'), ((5570, 5741), 'dataset.PairDataset', 'PairDataset', (['config.data_path'], {'max_src_len': 'config.max_src_len', 'max_tgt_len': 'config.max_tgt_len', 'truncate_src': 'config.truncate_src', 'truncate_tgt': 'config.truncate_tgt'}), '(config.data_path, max_src_len=config.max_src_len, max_tgt_len=\n config.max_tgt_len, truncate_src=config.truncate_src, truncate_tgt=\n config.truncate_tgt)\n', (5581, 5741), False, 'from dataset import PairDataset\n'), ((5854, 6027), 'dataset.PairDataset', 'PairDataset', (['config.val_data_path'], {'max_src_len': 'config.max_src_len', 'max_tgt_len': 'config.max_tgt_len', 'truncate_src': 'config.truncate_src', 'truncate_tgt': 'config.truncate_tgt'}), '(config.val_data_path, max_src_len=config.max_src_len,\n max_tgt_len=config.max_tgt_len, truncate_src=config.truncate_src,\n truncate_tgt=config.truncate_tgt)\n', (5865, 6027), False, 'from dataset import PairDataset\n'), ((2222, 2247), 'tqdm.tqdm', 'tqdm', ([], {'total': 'config.epochs'}), '(total=config.epochs)\n', (2226, 2247), False, 'from tqdm import tqdm\n'), ((5492, 5512), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (5504, 5512), False, 'import torch\n'), ((5536, 5555), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5548, 5555), False, 'import torch\n'), ((263, 285), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (275, 285), False, 'import pathlib\n'), ((1991, 2005), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2002, 2005), False, 'import pickle\n'), ((4522, 4543), 'numpy.mean', 'np.mean', (['batch_losses'], {}), '(batch_losses)\n', (4529, 4543), True, 'import numpy as np\n'), ((4726, 4758), 'evaluate.evaluate', 'evaluate', (['model', 'val_data', 'epoch'], {}), '(model, val_data, epoch)\n', (4734, 4758), False, 'from evaluate import evaluate\n'), ((2445, 2475), 'tqdm.tqdm', 'tqdm', ([], {'total': '(num_batches // 100)'}), '(total=num_batches // 100)\n', (2449, 2475), False, 'from tqdm import tqdm\n'), ((4985, 5036), 'torch.save', 'torch.save', (['model.encoder', 'config.encoder_save_name'], {}), '(model.encoder, config.encoder_save_name)\n', (4995, 5036), False, 'import torch\n'), ((5053, 5104), 'torch.save', 'torch.save', (['model.decoder', 'config.decoder_save_name'], {}), '(model.decoder, config.decoder_save_name)\n', (5063, 5104), False, 'import torch\n'), ((5121, 5176), 'torch.save', 'torch.save', (['model.attention', 'config.attention_save_name'], {}), '(model.attention, config.attention_save_name)\n', (5131, 5176), False, 'import torch\n'), ((5193, 5254), 'torch.save', 'torch.save', (['model.reduce_state', 'config.reduce_state_save_name'], {}), '(model.reduce_state, config.reduce_state_save_name)\n', (5203, 5254), False, 'import torch\n'), ((5367, 5393), 'pickle.dump', 'pickle.dump', (['val_losses', 'f'], {}), '(val_losses, f)\n', (5378, 5393), False, 'import pickle\n'), ((2538, 2560), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {}), '(train_dataloader)\n', (2542, 2560), False, 'from tqdm import tqdm\n'), ((4346, 4367), 'numpy.mean', 'np.mean', (['batch_losses'], {}), '(batch_losses)\n', (4353, 4367), True, 'import numpy as np\n')]
|
""" Evaluate the output files to get the numbers reported in ACL18"""
import argparse
from os.path import join, abspath, dirname, exists
from evaluate import eval_meteor, eval_rouge
_REF_DIR = join(abspath(dirname(__file__)), 'acl18_results')
def main(args):
dec_dir = args.decode_dir
ref_dir = join(_REF_DIR, 'reference')
if args.rouge:
dec_pattern = r'(\d+).dec'
ref_pattern = '#ID#.ref'
output = eval_rouge(dec_pattern, dec_dir, ref_pattern, ref_dir)
else:
dec_pattern = '[0-9]+.dec'
ref_pattern = '[0-9]+.ref'
output = eval_meteor(dec_pattern, dec_dir, ref_pattern, ref_dir)
print(output)
if __name__ == '__main__':
assert exists(_REF_DIR)
parser = argparse.ArgumentParser(
description='Evaluate the output files to get the numbers reported'
' as in the ACL paper'
)
# choose metric to evaluate
metric_opt = parser.add_mutually_exclusive_group(required=True)
metric_opt.add_argument('--rouge', action='store_true',
help='ROUGE evaluation')
metric_opt.add_argument('--meteor', action='store_true',
help='METEOR evaluation')
parser.add_argument('--decode_dir', action='store', required=True,
help='directory of decoded summaries')
args = parser.parse_args()
main(args)
|
[
"evaluate.eval_rouge",
"evaluate.eval_meteor"
] |
[((308, 335), 'os.path.join', 'join', (['_REF_DIR', '"""reference"""'], {}), "(_REF_DIR, 'reference')\n", (312, 335), False, 'from os.path import join, abspath, dirname, exists\n'), ((706, 722), 'os.path.exists', 'exists', (['_REF_DIR'], {}), '(_REF_DIR)\n', (712, 722), False, 'from os.path import join, abspath, dirname, exists\n'), ((736, 858), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate the output files to get the numbers reported as in the ACL paper"""'}), "(description=\n 'Evaluate the output files to get the numbers reported as in the ACL paper'\n )\n", (759, 858), False, 'import argparse\n'), ((209, 226), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (216, 226), False, 'from os.path import join, abspath, dirname, exists\n'), ((440, 494), 'evaluate.eval_rouge', 'eval_rouge', (['dec_pattern', 'dec_dir', 'ref_pattern', 'ref_dir'], {}), '(dec_pattern, dec_dir, ref_pattern, ref_dir)\n', (450, 494), False, 'from evaluate import eval_meteor, eval_rouge\n'), ((592, 647), 'evaluate.eval_meteor', 'eval_meteor', (['dec_pattern', 'dec_dir', 'ref_pattern', 'ref_dir'], {}), '(dec_pattern, dec_dir, ref_pattern, ref_dir)\n', (603, 647), False, 'from evaluate import eval_meteor, eval_rouge\n')]
|
from torch.utils.data.dataloader import default_collate
import visual_visdom
import evaluate
#########################################################
## Callback-functions for evaluating model-performance ##
#########################################################
def _sample_cb(log, config, visdom=None, test_datasets=None, sample_size=64, iters_per_task=None):
'''Initiates function for evaluating samples of generative model.
[test_datasets] None or <list> of <Datasets> (if provided, also reconstructions are shown)'''
def sample_cb(generator, batch, task=1):
'''Callback-function, to evaluate sample (and reconstruction) ability of the model.'''
iteration = batch if task==1 else (task-1)*iters_per_task + batch
if iteration % log == 0:
# Evaluate reconstruction-ability of model on [test_dataset]
if test_datasets is not None:
# Reconstruct samples from current task
evaluate.show_reconstruction(generator, test_datasets[task-1], config, size=int(sample_size/2),
visdom=visdom, task=task)
# Generate samples
evaluate.show_samples(generator, config, visdom=visdom, size=sample_size,
title="Generated images after {} iters in task {}".format(batch, task))
# Return the callback-function (except if neither visdom or pdf is selected!)
return sample_cb if (visdom is not None) else None
def _eval_cb(log, test_datasets, visdom=None, precision_dict=None, collate_fn=default_collate, iters_per_task=None,
test_size=None, classes_per_task=None, scenario="class", summary_graph=True, task_mask=False):
'''Initiates function for evaluating performance of classifier (in terms of precision).
[test_datasets] <list> of <Datasets>; also if only 1 task, it should be presented as a list!
[classes_per_task] <int> number of "active" classes per task
[scenario] <str> how to decide which classes to include during evaluating precision'''
def eval_cb(classifier, batch, task=1):
'''Callback-function, to evaluate performance of classifier.'''
iteration = batch if task==1 else (task-1)*iters_per_task + batch
# evaluate the solver on multiple tasks (and log to visdom)
if iteration % log == 0:
evaluate.precision(classifier, test_datasets, task, iteration,
classes_per_task=classes_per_task, scenario=scenario, precision_dict=precision_dict,
collate_fn=collate_fn, test_size=test_size, visdom=visdom, summary_graph=summary_graph,
task_mask=task_mask)
## Return the callback-function (except if neither visdom or [precision_dict] is selected!)
return eval_cb if ((visdom is not None) or (precision_dict is not None)) else None
##------------------------------------------------------------------------------------------------------------------##
###############################################################
## Callback-functions for keeping track of training-progress ##
###############################################################
def _solver_loss_cb(log, visdom, model=None, tasks=None, iters_per_task=None, replay=False):
'''Initiates function for keeping track of, and reporting on, the progress of the solver's training.'''
def cb(bar, iter, loss_dict, task=1):
'''Callback-function, to call on every iteration to keep track of training progress.'''
iteration = iter if task==1 else (task-1)*iters_per_task + iter
##--------------------------------PROGRESS BAR---------------------------------##
task_stm = "" if (tasks is None) else " Task: {}/{} |".format(task, tasks)
bar.set_description(
' <SOLVER> |{t_stm} training loss: {loss:.3} | training precision: {prec:.3} |'
.format(t_stm=task_stm, loss=loss_dict['loss_total'], prec=loss_dict['precision'])
)
bar.update()
##-----------------------------------------------------------------------------##
# log the loss of the solver (to visdom)
if (iteration % log == 0) and (visdom is not None):
plot_data = [loss_dict['pred']]
names = ['prediction']
if tasks is not None:
if tasks > 1:
plot_data += [loss_dict['ewc'], loss_dict['si_loss']]
names += ['EWC', 'SI']
if tasks is not None and replay:
if tasks>1:
plot_data += [loss_dict['pred_r'], loss_dict['distil_r']]
names += ['pred - r', 'KD - r']
visual_visdom.visualize_scalars(
plot_data, names, "solver: all losses ({})".format(visdom["graph"]),
iteration, env=visdom["env"], ylabel='training loss'
)
if tasks is not None:
if tasks>1:
weight_new_task = 1./task if replay else 1.
plot_data = [weight_new_task*loss_dict['pred']]
names = ['pred']
if replay:
if model.replay_targets=="hard":
plot_data += [(1-weight_new_task)*loss_dict['pred_r']]
names += ['pred - r']
elif model.replay_targets=="soft":
plot_data += [(1-weight_new_task)*loss_dict['distil_r']]
names += ['KD - r']
if model.ewc_lambda>0:
plot_data += [model.ewc_lambda * loss_dict['ewc']]
names += ['EWC (lambda={})'.format(model.ewc_lambda)]
if model.si_c>0:
plot_data += [model.si_c * loss_dict['si_loss']]
names += ['SI (c={})'.format(model.si_c)]
visual_visdom.visualize_scalars(
plot_data, names,
"solver: weighted loss ({})".format(visdom["graph"]),
iteration, env=visdom["env"], ylabel='training loss'
)
# Return the callback-function.
return cb
def _VAE_loss_cb(log, visdom, model, tasks=None, iters_per_task=None, replay=False):
'''Initiates functions for keeping track of, and reporting on, the progress of the generator's training.'''
def cb(bar, iter, loss_dict, task=1):
'''Callback-function, to perform on every iteration to keep track of training progress.'''
iteration = iter if task==1 else (task-1)*iters_per_task + iter
##--------------------------------PROGRESS BAR---------------------------------##
task_stm = "" if (tasks is None) else " Task: {}/{} |".format(task, tasks)
bar.set_description(
' <VAE> |{t_stm} training loss: {loss:.3} | training precision: {prec:.3} |'
.format(t_stm=task_stm, loss=loss_dict['loss_total'], prec=loss_dict['precision'])
)
bar.update()
##-----------------------------------------------------------------------------##
# plot training loss every [log]
if (iteration % log == 0) and (visdom is not None):
##--------------------------------PROGRESS PLOTS--------------------------------##
plot_data = [loss_dict['recon'], loss_dict['variat']]
names = ['Recon', 'Variat']
if model.lamda_pl>0:
plot_data += [loss_dict['pred']]
names += ['Prediction']
if tasks is not None and replay:
if tasks>1:
plot_data += [loss_dict['recon_r'], loss_dict['variat_r']]
names += ['Recon - r', 'Variat - r']
if model.lamda_pl>0:
plot_data += [loss_dict['pred_r'], loss_dict['distil_r']]
names += ['Pred - r', 'Distill - r']
visual_visdom.visualize_scalars(
plot_data, names, title="VAE: all losses ({})".format(visdom["graph"]),
iteration=iteration, env=visdom["env"], ylabel="training loss"
)
plot_data = list()
names = list()
weight_new_task = 1./task if replay else 1.
if model.lamda_rcl>0:
plot_data += [weight_new_task*model.lamda_rcl*loss_dict['recon']]
names += ['Recon (x{})'.format(model.lamda_rcl)]
if model.lamda_vl>0:
plot_data += [weight_new_task*model.lamda_vl*loss_dict['variat']]
names += ['Variat (x{})'.format(model.lamda_vl)]
if model.lamda_pl>0:
plot_data += [weight_new_task*model.lamda_pl*loss_dict['pred']]
names += ['Prediction (x{})'.format(model.lamda_pl)]
if tasks is not None and replay:
if tasks>1:
if model.lamda_rcl > 0:
plot_data += [(1-weight_new_task)*model.lamda_rcl * loss_dict['recon_r']]
names += ['Recon - r (x{})'.format(model.lamda_rcl)]
if model.lamda_vl > 0:
plot_data += [(1-weight_new_task)*model.lamda_vl * loss_dict['variat_r']]
names += ['Variat - r (x{})'.format(model.lamda_vl)]
if model.lamda_pl > 0:
if model.replay_targets=="hard":
plot_data += [(1-weight_new_task)*model.lamda_pl * loss_dict['pred_r']]
names += ['Prediction - r (x{})'.format(model.lamda_pl)]
elif model.replay_targets=="soft":
plot_data += [(1-weight_new_task)*model.lamda_pl * loss_dict['distil_r']]
names += ['Distill - r (x{})'.format(model.lamda_pl)]
visual_visdom.visualize_scalars(plot_data, names, title="VAE: weighted loss ({})".format(visdom["graph"]),
iteration=iteration, env=visdom["env"], ylabel="training loss")
##-----------------------------------------------------------------------------##
# Return the callback-function
return cb
|
[
"evaluate.precision"
] |
[((2402, 2672), 'evaluate.precision', 'evaluate.precision', (['classifier', 'test_datasets', 'task', 'iteration'], {'classes_per_task': 'classes_per_task', 'scenario': 'scenario', 'precision_dict': 'precision_dict', 'collate_fn': 'collate_fn', 'test_size': 'test_size', 'visdom': 'visdom', 'summary_graph': 'summary_graph', 'task_mask': 'task_mask'}), '(classifier, test_datasets, task, iteration,\n classes_per_task=classes_per_task, scenario=scenario, precision_dict=\n precision_dict, collate_fn=collate_fn, test_size=test_size, visdom=\n visdom, summary_graph=summary_graph, task_mask=task_mask)\n', (2420, 2672), False, 'import evaluate\n')]
|
import torch
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import argparse
import numpy as np
import os
from data import build_train_dataset
from gmflow.gmflow import GMFlow
from loss import flow_loss_func
from evaluate import (validate_chairs, validate_things, validate_sintel, validate_kitti,
create_sintel_submission, create_kitti_submission, inference_on_dir)
from utils.logger import Logger
from utils import misc
from utils.dist_utils import get_dist_info, init_dist, setup_for_distributed
def get_args_parser():
parser = argparse.ArgumentParser()
# dataset
parser.add_argument('--checkpoint_dir', default='tmp', type=str,
help='where to save the training log and models')
parser.add_argument('--stage', default='chairs', type=str,
help='training stage')
parser.add_argument('--image_size', default=[384, 512], type=int, nargs='+',
help='image size for training')
parser.add_argument('--padding_factor', default=16, type=int,
help='the input should be divisible by padding_factor, otherwise do padding')
parser.add_argument('--max_flow', default=400, type=int,
help='exclude very large motions during training')
parser.add_argument('--val_dataset', default=['chairs'], type=str, nargs='+',
help='validation dataset')
parser.add_argument('--with_speed_metric', action='store_true',
help='with speed metric when evaluation')
# training
parser.add_argument('--lr', default=4e-4, type=float)
parser.add_argument('--batch_size', default=12, type=int)
parser.add_argument('--num_workers', default=4, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--grad_clip', default=1.0, type=float)
parser.add_argument('--num_steps', default=100000, type=int)
parser.add_argument('--seed', default=326, type=int)
parser.add_argument('--summary_freq', default=100, type=int)
parser.add_argument('--val_freq', default=10000, type=int)
parser.add_argument('--save_ckpt_freq', default=10000, type=int)
parser.add_argument('--save_latest_ckpt_freq', default=1000, type=int)
# resume pretrained model or resume training
parser.add_argument('--resume', default=None, type=str,
help='resume from pretrain model for finetuing or resume from terminated training')
parser.add_argument('--strict_resume', action='store_true')
parser.add_argument('--no_resume_optimizer', action='store_true')
# GMFlow model
parser.add_argument('--num_scales', default=1, type=int,
help='basic gmflow model uses a single 1/8 feature, the refinement uses 1/4 feature')
parser.add_argument('--feature_channels', default=128, type=int)
parser.add_argument('--upsample_factor', default=8, type=int)
parser.add_argument('--num_transformer_layers', default=6, type=int)
parser.add_argument('--num_head', default=1, type=int)
parser.add_argument('--attention_type', default='swin', type=str)
parser.add_argument('--ffn_dim_expansion', default=4, type=int)
parser.add_argument('--attn_splits_list', default=[2], type=int, nargs='+',
help='number of splits in attention')
parser.add_argument('--corr_radius_list', default=[-1], type=int, nargs='+',
help='correlation radius for matching, -1 indicates global matching')
parser.add_argument('--prop_radius_list', default=[-1], type=int, nargs='+',
help='self-attention radius for flow propagation, -1 indicates global attention')
# loss
parser.add_argument('--gamma', default=0.9, type=float,
help='loss weight')
# evaluation
parser.add_argument('--eval', action='store_true')
parser.add_argument('--save_eval_to_file', action='store_true')
parser.add_argument('--evaluate_matched_unmatched', action='store_true')
# inference on a directory
parser.add_argument('--inference_dir', default=None, type=str)
parser.add_argument('--inference_size', default=None, type=int, nargs='+',
help='can specify the inference size')
parser.add_argument('--dir_paired_data', action='store_true',
help='Paired data in a dir instead of a sequence')
parser.add_argument('--save_flo_flow', action='store_true')
parser.add_argument('--pred_bidir_flow', action='store_true',
help='predict bidirectional flow')
parser.add_argument('--fwd_bwd_consistency_check', action='store_true',
help='forward backward consistency check with bidirection flow')
# predict on sintel and kitti test set for submission
parser.add_argument('--submission', action='store_true',
help='submission to sintel or kitti test sets')
parser.add_argument('--output_path', default='output', type=str,
help='where to save the prediction results')
parser.add_argument('--save_vis_flow', action='store_true',
help='visualize flow prediction as .png image')
parser.add_argument('--no_save_flo', action='store_true',
help='not save flow as .flo')
# distributed training
parser.add_argument('--local_rank', default=0, type=int)
parser.add_argument('--distributed', action='store_true')
parser.add_argument('--launcher', default='none', type=str, choices=['none', 'pytorch'])
parser.add_argument('--gpu_ids', default=0, type=int, nargs='+')
parser.add_argument('--count_time', action='store_true',
help='measure the inference time on sintel')
return parser
def main(args):
if not args.eval and not args.submission and args.inference_dir is None:
if args.local_rank == 0:
print('pytorch version:', torch.__version__)
print(args)
misc.save_args(args)
misc.check_path(args.checkpoint_dir)
misc.save_command(args.checkpoint_dir)
seed = args.seed
torch.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.benchmark = True
if args.launcher == 'none':
args.distributed = False
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
else:
args.distributed = True
# adjust batch size for each gpu
assert args.batch_size % torch.cuda.device_count() == 0
args.batch_size = args.batch_size // torch.cuda.device_count()
dist_params = dict(backend='nccl')
init_dist(args.launcher, **dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
args.gpu_ids = range(world_size)
device = torch.device('cuda:{}'.format(args.local_rank))
setup_for_distributed(args.local_rank == 0)
# model
model = GMFlow(feature_channels=args.feature_channels,
num_scales=args.num_scales,
upsample_factor=args.upsample_factor,
num_head=args.num_head,
attention_type=args.attention_type,
ffn_dim_expansion=args.ffn_dim_expansion,
num_transformer_layers=args.num_transformer_layers,
).to(device)
if not args.eval and not args.submission and not args.inference_dir:
print('Model definition:')
print(model)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model.to(device),
device_ids=[args.local_rank],
output_device=args.local_rank)
model_without_ddp = model.module
else:
if torch.cuda.device_count() > 1:
print('Use %d GPUs' % torch.cuda.device_count())
model = torch.nn.DataParallel(model)
model_without_ddp = model.module
else:
model_without_ddp = model
num_params = sum(p.numel() for p in model.parameters())
print('Number of params:', num_params)
if not args.eval and not args.submission and args.inference_dir is None:
save_name = '%d_parameters' % num_params
open(os.path.join(args.checkpoint_dir, save_name), 'a').close()
optimizer = torch.optim.AdamW(model_without_ddp.parameters(), lr=args.lr,
weight_decay=args.weight_decay)
start_epoch = 0
start_step = 0
# resume checkpoints
if args.resume:
print('Load checkpoint: %s' % args.resume)
loc = 'cuda:{}'.format(args.local_rank)
checkpoint = torch.load(args.resume, map_location=loc)
weights = checkpoint['model'] if 'model' in checkpoint else checkpoint
model_without_ddp.load_state_dict(weights, strict=args.strict_resume)
if 'optimizer' in checkpoint and 'step' in checkpoint and 'epoch' in checkpoint and not \
args.no_resume_optimizer:
print('Load optimizer')
optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch']
start_step = checkpoint['step']
print('start_epoch: %d, start_step: %d' % (start_epoch, start_step))
# evaluate
if args.eval:
val_results = {}
if 'chairs' in args.val_dataset:
results_dict = validate_chairs(model_without_ddp,
with_speed_metric=args.with_speed_metric,
attn_splits_list=args.attn_splits_list,
corr_radius_list=args.corr_radius_list,
prop_radius_list=args.prop_radius_list,
)
val_results.update(results_dict)
if 'things' in args.val_dataset:
results_dict = validate_things(model_without_ddp,
padding_factor=args.padding_factor,
with_speed_metric=args.with_speed_metric,
attn_splits_list=args.attn_splits_list,
corr_radius_list=args.corr_radius_list,
prop_radius_list=args.prop_radius_list,
)
val_results.update(results_dict)
if 'sintel' in args.val_dataset:
results_dict = validate_sintel(model_without_ddp,
count_time=args.count_time,
padding_factor=args.padding_factor,
with_speed_metric=args.with_speed_metric,
evaluate_matched_unmatched=args.evaluate_matched_unmatched,
attn_splits_list=args.attn_splits_list,
corr_radius_list=args.corr_radius_list,
prop_radius_list=args.prop_radius_list,
)
val_results.update(results_dict)
if 'kitti' in args.val_dataset:
results_dict = validate_kitti(model_without_ddp,
padding_factor=args.padding_factor,
with_speed_metric=args.with_speed_metric,
attn_splits_list=args.attn_splits_list,
corr_radius_list=args.corr_radius_list,
prop_radius_list=args.prop_radius_list,
)
val_results.update(results_dict)
if args.save_eval_to_file:
misc.check_path(args.checkpoint_dir)
val_file = os.path.join(args.checkpoint_dir, 'val_results.txt')
with open(val_file, 'a') as f:
f.write('\neval results after training done\n\n')
metrics = ['chairs_epe', 'chairs_s0_10', 'chairs_s10_40', 'chairs_s40+',
'things_clean_epe', 'things_clean_s0_10', 'things_clean_s10_40', 'things_clean_s40+',
'things_final_epe', 'things_final_s0_10', 'things_final_s10_40', 'things_final_s40+',
'sintel_clean_epe', 'sintel_clean_s0_10', 'sintel_clean_s10_40', 'sintel_clean_s40+',
'sintel_final_epe', 'sintel_final_s0_10', 'sintel_final_s10_40', 'sintel_final_s40+',
'kitti_epe', 'kitti_f1', 'kitti_s0_10', 'kitti_s10_40', 'kitti_s40+',
]
eval_metrics = []
for metric in metrics:
if metric in val_results.keys():
eval_metrics.append(metric)
metrics_values = [val_results[metric] for metric in eval_metrics]
num_metrics = len(eval_metrics)
# save as markdown format
f.write(("| {:>20} " * num_metrics + '\n').format(*eval_metrics))
f.write(("| {:20.3f} " * num_metrics).format(*metrics_values))
f.write('\n\n')
return
# Sintel and KITTI submission
if args.submission:
# NOTE: args.val_dataset is a list
if args.val_dataset[0] == 'sintel':
create_sintel_submission(model_without_ddp,
output_path=args.output_path,
padding_factor=args.padding_factor,
save_vis_flow=args.save_vis_flow,
no_save_flo=args.no_save_flo,
attn_splits_list=args.attn_splits_list,
corr_radius_list=args.corr_radius_list,
prop_radius_list=args.prop_radius_list,
)
elif args.val_dataset[0] == 'kitti':
create_kitti_submission(model_without_ddp,
output_path=args.output_path,
padding_factor=args.padding_factor,
save_vis_flow=args.save_vis_flow,
attn_splits_list=args.attn_splits_list,
corr_radius_list=args.corr_radius_list,
prop_radius_list=args.prop_radius_list,
)
else:
raise ValueError(f'Not supported dataset for submission')
return
# inferece on a dir
if args.inference_dir is not None:
inference_on_dir(model_without_ddp,
inference_dir=args.inference_dir,
output_path=args.output_path,
padding_factor=args.padding_factor,
inference_size=args.inference_size,
paired_data=args.dir_paired_data,
save_flo_flow=args.save_flo_flow,
attn_splits_list=args.attn_splits_list,
corr_radius_list=args.corr_radius_list,
prop_radius_list=args.prop_radius_list,
pred_bidir_flow=args.pred_bidir_flow,
fwd_bwd_consistency_check=args.fwd_bwd_consistency_check,
)
return
# training datset
train_dataset = build_train_dataset(args)
print('Number of training images:', len(train_dataset))
# Multi-processing
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset,
num_replicas=torch.cuda.device_count(),
rank=args.local_rank)
else:
train_sampler = None
shuffle = False if args.distributed else True
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size,
shuffle=shuffle, num_workers=args.num_workers,
pin_memory=True, drop_last=True,
sampler=train_sampler)
last_epoch = start_step if args.resume and start_step > 0 else -1
lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer, args.lr,
args.num_steps + 10,
pct_start=0.05,
cycle_momentum=False,
anneal_strategy='cos',
last_epoch=last_epoch,
)
if args.local_rank == 0:
summary_writer = SummaryWriter(args.checkpoint_dir)
logger = Logger(lr_scheduler, summary_writer, args.summary_freq,
start_step=start_step)
total_steps = start_step
epoch = start_epoch
print('Start training')
while total_steps < args.num_steps:
model.train()
# mannual change random seed for shuffling every epoch
if args.distributed:
train_sampler.set_epoch(epoch)
for i, sample in enumerate(train_loader):
img1, img2, flow_gt, valid = [x.to(device) for x in sample]
results_dict = model(img1, img2,
attn_splits_list=args.attn_splits_list,
corr_radius_list=args.corr_radius_list,
prop_radius_list=args.prop_radius_list,
)
flow_preds = results_dict['flow_preds']
loss, metrics = flow_loss_func(flow_preds, flow_gt, valid,
gamma=args.gamma,
max_flow=args.max_flow,
)
if isinstance(loss, float):
continue
if torch.isnan(loss):
continue
metrics.update({'total_loss': loss.item()})
# more efficient zero_grad
for param in model_without_ddp.parameters():
param.grad = None
loss.backward()
# Gradient clipping
torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
lr_scheduler.step()
if args.local_rank == 0:
logger.push(metrics)
logger.add_image_summary(img1, img2, flow_preds, flow_gt)
total_steps += 1
if total_steps % args.save_ckpt_freq == 0 or total_steps == args.num_steps:
if args.local_rank == 0:
checkpoint_path = os.path.join(args.checkpoint_dir, 'step_%06d.pth' % total_steps)
torch.save({
'model': model_without_ddp.state_dict()
}, checkpoint_path)
if total_steps % args.save_latest_ckpt_freq == 0:
checkpoint_path = os.path.join(args.checkpoint_dir, 'checkpoint_latest.pth')
if args.local_rank == 0:
torch.save({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'step': total_steps,
'epoch': epoch,
}, checkpoint_path)
if total_steps % args.val_freq == 0:
print('Start validation')
val_results = {}
# support validation on multiple datasets
if 'chairs' in args.val_dataset:
results_dict = validate_chairs(model_without_ddp,
with_speed_metric=args.with_speed_metric,
attn_splits_list=args.attn_splits_list,
corr_radius_list=args.corr_radius_list,
prop_radius_list=args.prop_radius_list,
)
if args.local_rank == 0:
val_results.update(results_dict)
if 'things' in args.val_dataset:
results_dict = validate_things(model_without_ddp,
padding_factor=args.padding_factor,
with_speed_metric=args.with_speed_metric,
attn_splits_list=args.attn_splits_list,
corr_radius_list=args.corr_radius_list,
prop_radius_list=args.prop_radius_list,
)
if args.local_rank == 0:
val_results.update(results_dict)
if 'sintel' in args.val_dataset:
results_dict = validate_sintel(model_without_ddp,
count_time=args.count_time,
padding_factor=args.padding_factor,
with_speed_metric=args.with_speed_metric,
evaluate_matched_unmatched=args.evaluate_matched_unmatched,
attn_splits_list=args.attn_splits_list,
corr_radius_list=args.corr_radius_list,
prop_radius_list=args.prop_radius_list,
)
if args.local_rank == 0:
val_results.update(results_dict)
if 'kitti' in args.val_dataset:
results_dict = validate_kitti(model_without_ddp,
padding_factor=args.padding_factor,
with_speed_metric=args.with_speed_metric,
attn_splits_list=args.attn_splits_list,
corr_radius_list=args.corr_radius_list,
prop_radius_list=args.prop_radius_list,
)
if args.local_rank == 0:
val_results.update(results_dict)
if args.local_rank == 0:
logger.write_dict(val_results)
# Save validation results
val_file = os.path.join(args.checkpoint_dir, 'val_results.txt')
with open(val_file, 'a') as f:
f.write('step: %06d\n' % total_steps)
if args.evaluate_matched_unmatched:
metrics = ['chairs_epe',
'chairs_s0_10', 'chairs_s10_40', 'chairs_s40+',
'things_clean_epe', 'things_clean_s0_10', 'things_clean_s10_40',
'things_clean_s40+',
'sintel_clean_epe', 'sintel_clean_matched', 'sintel_clean_unmatched',
'sintel_clean_s0_10', 'sintel_clean_s10_40',
'sintel_clean_s40+',
'sintel_final_epe', 'sintel_final_matched', 'sintel_final_unmatched',
'sintel_final_s0_10', 'sintel_final_s10_40',
'sintel_final_s40+',
'kitti_epe', 'kitti_f1', 'kitti_s0_10', 'kitti_s10_40', 'kitti_s40+',
]
else:
metrics = ['chairs_epe', 'chairs_s0_10', 'chairs_s10_40', 'chairs_s40+',
'things_clean_epe', 'things_clean_s0_10', 'things_clean_s10_40',
'things_clean_s40+',
'sintel_clean_epe', 'sintel_clean_s0_10', 'sintel_clean_s10_40',
'sintel_clean_s40+',
'sintel_final_epe', 'sintel_final_s0_10', 'sintel_final_s10_40',
'sintel_final_s40+',
'kitti_epe', 'kitti_f1', 'kitti_s0_10', 'kitti_s10_40', 'kitti_s40+',
]
eval_metrics = []
for metric in metrics:
if metric in val_results.keys():
eval_metrics.append(metric)
metrics_values = [val_results[metric] for metric in eval_metrics]
num_metrics = len(eval_metrics)
# save as markdown format
if args.evaluate_matched_unmatched:
f.write(("| {:>25} " * num_metrics + '\n').format(*eval_metrics))
f.write(("| {:25.3f} " * num_metrics).format(*metrics_values))
else:
f.write(("| {:>20} " * num_metrics + '\n').format(*eval_metrics))
f.write(("| {:20.3f} " * num_metrics).format(*metrics_values))
f.write('\n\n')
model.train()
if total_steps >= args.num_steps:
print('Training done')
return
epoch += 1
if __name__ == '__main__':
parser = get_args_parser()
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
main(args)
|
[
"evaluate.validate_chairs",
"evaluate.create_sintel_submission",
"evaluate.validate_kitti",
"evaluate.create_kitti_submission",
"evaluate.inference_on_dir",
"evaluate.validate_sintel",
"evaluate.validate_things"
] |
[((623, 648), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (646, 648), False, 'import argparse\n'), ((6394, 6417), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (6411, 6417), False, 'import torch\n'), ((6423, 6443), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6437, 6443), True, 'import numpy as np\n'), ((16120, 16145), 'data.build_train_dataset', 'build_train_dataset', (['args'], {}), '(args)\n', (16139, 16145), False, 'from data import build_train_dataset\n'), ((16563, 16744), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': 'shuffle', 'num_workers': 'args.num_workers', 'pin_memory': '(True)', 'drop_last': '(True)', 'sampler': 'train_sampler'}), '(train_dataset, batch_size=args.batch_size,\n shuffle=shuffle, num_workers=args.num_workers, pin_memory=True,\n drop_last=True, sampler=train_sampler)\n', (16590, 16744), False, 'import torch\n'), ((16974, 17143), 'torch.optim.lr_scheduler.OneCycleLR', 'torch.optim.lr_scheduler.OneCycleLR', (['optimizer', 'args.lr', '(args.num_steps + 10)'], {'pct_start': '(0.05)', 'cycle_momentum': '(False)', 'anneal_strategy': '"""cos"""', 'last_epoch': 'last_epoch'}), "(optimizer, args.lr, args.num_steps + 10,\n pct_start=0.05, cycle_momentum=False, anneal_strategy='cos', last_epoch\n =last_epoch)\n", (17009, 17143), False, 'import torch\n'), ((6917, 6956), 'utils.dist_utils.init_dist', 'init_dist', (['args.launcher'], {}), '(args.launcher, **dist_params)\n', (6926, 6956), False, 'from utils.dist_utils import get_dist_info, init_dist, setup_for_distributed\n'), ((7039, 7054), 'utils.dist_utils.get_dist_info', 'get_dist_info', ([], {}), '()\n', (7052, 7054), False, 'from utils.dist_utils import get_dist_info, init_dist, setup_for_distributed\n'), ((7174, 7217), 'utils.dist_utils.setup_for_distributed', 'setup_for_distributed', (['(args.local_rank == 0)'], {}), '(args.local_rank == 0)\n', (7195, 7217), False, 'from utils.dist_utils import get_dist_info, init_dist, setup_for_distributed\n'), ((8987, 9028), 'torch.load', 'torch.load', (['args.resume'], {'map_location': 'loc'}), '(args.resume, map_location=loc)\n', (8997, 9028), False, 'import torch\n'), ((15286, 15767), 'evaluate.inference_on_dir', 'inference_on_dir', (['model_without_ddp'], {'inference_dir': 'args.inference_dir', 'output_path': 'args.output_path', 'padding_factor': 'args.padding_factor', 'inference_size': 'args.inference_size', 'paired_data': 'args.dir_paired_data', 'save_flo_flow': 'args.save_flo_flow', 'attn_splits_list': 'args.attn_splits_list', 'corr_radius_list': 'args.corr_radius_list', 'prop_radius_list': 'args.prop_radius_list', 'pred_bidir_flow': 'args.pred_bidir_flow', 'fwd_bwd_consistency_check': 'args.fwd_bwd_consistency_check'}), '(model_without_ddp, inference_dir=args.inference_dir,\n output_path=args.output_path, padding_factor=args.padding_factor,\n inference_size=args.inference_size, paired_data=args.dir_paired_data,\n save_flo_flow=args.save_flo_flow, attn_splits_list=args.\n attn_splits_list, corr_radius_list=args.corr_radius_list,\n prop_radius_list=args.prop_radius_list, pred_bidir_flow=args.\n pred_bidir_flow, fwd_bwd_consistency_check=args.fwd_bwd_consistency_check)\n', (15302, 15767), False, 'from evaluate import validate_chairs, validate_things, validate_sintel, validate_kitti, create_sintel_submission, create_kitti_submission, inference_on_dir\n'), ((17255, 17289), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['args.checkpoint_dir'], {}), '(args.checkpoint_dir)\n', (17268, 17289), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((17308, 17386), 'utils.logger.Logger', 'Logger', (['lr_scheduler', 'summary_writer', 'args.summary_freq'], {'start_step': 'start_step'}), '(lr_scheduler, summary_writer, args.summary_freq, start_step=start_step)\n', (17314, 17386), False, 'from utils.logger import Logger\n'), ((6242, 6262), 'utils.misc.save_args', 'misc.save_args', (['args'], {}), '(args)\n', (6256, 6262), False, 'from utils import misc\n'), ((6276, 6312), 'utils.misc.check_path', 'misc.check_path', (['args.checkpoint_dir'], {}), '(args.checkpoint_dir)\n', (6291, 6312), False, 'from utils import misc\n'), ((6326, 6364), 'utils.misc.save_command', 'misc.save_command', (['args.checkpoint_dir'], {}), '(args.checkpoint_dir)\n', (6343, 6364), False, 'from utils import misc\n'), ((6836, 6861), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6859, 6861), False, 'import torch\n'), ((7246, 7525), 'gmflow.gmflow.GMFlow', 'GMFlow', ([], {'feature_channels': 'args.feature_channels', 'num_scales': 'args.num_scales', 'upsample_factor': 'args.upsample_factor', 'num_head': 'args.num_head', 'attention_type': 'args.attention_type', 'ffn_dim_expansion': 'args.ffn_dim_expansion', 'num_transformer_layers': 'args.num_transformer_layers'}), '(feature_channels=args.feature_channels, num_scales=args.num_scales,\n upsample_factor=args.upsample_factor, num_head=args.num_head,\n attention_type=args.attention_type, ffn_dim_expansion=args.\n ffn_dim_expansion, num_transformer_layers=args.num_transformer_layers)\n', (7252, 7525), False, 'from gmflow.gmflow import GMFlow\n'), ((8071, 8096), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (8094, 8096), False, 'import torch\n'), ((8185, 8213), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (8206, 8213), False, 'import torch\n'), ((9744, 9949), 'evaluate.validate_chairs', 'validate_chairs', (['model_without_ddp'], {'with_speed_metric': 'args.with_speed_metric', 'attn_splits_list': 'args.attn_splits_list', 'corr_radius_list': 'args.corr_radius_list', 'prop_radius_list': 'args.prop_radius_list'}), '(model_without_ddp, with_speed_metric=args.with_speed_metric,\n attn_splits_list=args.attn_splits_list, corr_radius_list=args.\n corr_radius_list, prop_radius_list=args.prop_radius_list)\n', (9759, 9949), False, 'from evaluate import validate_chairs, validate_things, validate_sintel, validate_kitti, create_sintel_submission, create_kitti_submission, inference_on_dir\n'), ((10283, 10528), 'evaluate.validate_things', 'validate_things', (['model_without_ddp'], {'padding_factor': 'args.padding_factor', 'with_speed_metric': 'args.with_speed_metric', 'attn_splits_list': 'args.attn_splits_list', 'corr_radius_list': 'args.corr_radius_list', 'prop_radius_list': 'args.prop_radius_list'}), '(model_without_ddp, padding_factor=args.padding_factor,\n with_speed_metric=args.with_speed_metric, attn_splits_list=args.\n attn_splits_list, corr_radius_list=args.corr_radius_list,\n prop_radius_list=args.prop_radius_list)\n', (10298, 10528), False, 'from evaluate import validate_chairs, validate_things, validate_sintel, validate_kitti, create_sintel_submission, create_kitti_submission, inference_on_dir\n'), ((10900, 11243), 'evaluate.validate_sintel', 'validate_sintel', (['model_without_ddp'], {'count_time': 'args.count_time', 'padding_factor': 'args.padding_factor', 'with_speed_metric': 'args.with_speed_metric', 'evaluate_matched_unmatched': 'args.evaluate_matched_unmatched', 'attn_splits_list': 'args.attn_splits_list', 'corr_radius_list': 'args.corr_radius_list', 'prop_radius_list': 'args.prop_radius_list'}), '(model_without_ddp, count_time=args.count_time,\n padding_factor=args.padding_factor, with_speed_metric=args.\n with_speed_metric, evaluate_matched_unmatched=args.\n evaluate_matched_unmatched, attn_splits_list=args.attn_splits_list,\n corr_radius_list=args.corr_radius_list, prop_radius_list=args.\n prop_radius_list)\n', (10915, 11243), False, 'from evaluate import validate_chairs, validate_things, validate_sintel, validate_kitti, create_sintel_submission, create_kitti_submission, inference_on_dir\n'), ((11692, 11936), 'evaluate.validate_kitti', 'validate_kitti', (['model_without_ddp'], {'padding_factor': 'args.padding_factor', 'with_speed_metric': 'args.with_speed_metric', 'attn_splits_list': 'args.attn_splits_list', 'corr_radius_list': 'args.corr_radius_list', 'prop_radius_list': 'args.prop_radius_list'}), '(model_without_ddp, padding_factor=args.padding_factor,\n with_speed_metric=args.with_speed_metric, attn_splits_list=args.\n attn_splits_list, corr_radius_list=args.corr_radius_list,\n prop_radius_list=args.prop_radius_list)\n', (11706, 11936), False, 'from evaluate import validate_chairs, validate_things, validate_sintel, validate_kitti, create_sintel_submission, create_kitti_submission, inference_on_dir\n'), ((12281, 12317), 'utils.misc.check_path', 'misc.check_path', (['args.checkpoint_dir'], {}), '(args.checkpoint_dir)\n', (12296, 12317), False, 'from utils import misc\n'), ((12342, 12394), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', '"""val_results.txt"""'], {}), "(args.checkpoint_dir, 'val_results.txt')\n", (12354, 12394), False, 'import os\n'), ((13923, 14233), 'evaluate.create_sintel_submission', 'create_sintel_submission', (['model_without_ddp'], {'output_path': 'args.output_path', 'padding_factor': 'args.padding_factor', 'save_vis_flow': 'args.save_vis_flow', 'no_save_flo': 'args.no_save_flo', 'attn_splits_list': 'args.attn_splits_list', 'corr_radius_list': 'args.corr_radius_list', 'prop_radius_list': 'args.prop_radius_list'}), '(model_without_ddp, output_path=args.output_path,\n padding_factor=args.padding_factor, save_vis_flow=args.save_vis_flow,\n no_save_flo=args.no_save_flo, attn_splits_list=args.attn_splits_list,\n corr_radius_list=args.corr_radius_list, prop_radius_list=args.\n prop_radius_list)\n', (13947, 14233), False, 'from evaluate import validate_chairs, validate_things, validate_sintel, validate_kitti, create_sintel_submission, create_kitti_submission, inference_on_dir\n'), ((18222, 18311), 'loss.flow_loss_func', 'flow_loss_func', (['flow_preds', 'flow_gt', 'valid'], {'gamma': 'args.gamma', 'max_flow': 'args.max_flow'}), '(flow_preds, flow_gt, valid, gamma=args.gamma, max_flow=args.\n max_flow)\n', (18236, 18311), False, 'from loss import flow_loss_func\n'), ((18528, 18545), 'torch.isnan', 'torch.isnan', (['loss'], {}), '(loss)\n', (18539, 18545), False, 'import torch\n'), ((6599, 6624), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6622, 6624), False, 'import torch\n'), ((6759, 6784), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6782, 6784), False, 'import torch\n'), ((14582, 14857), 'evaluate.create_kitti_submission', 'create_kitti_submission', (['model_without_ddp'], {'output_path': 'args.output_path', 'padding_factor': 'args.padding_factor', 'save_vis_flow': 'args.save_vis_flow', 'attn_splits_list': 'args.attn_splits_list', 'corr_radius_list': 'args.corr_radius_list', 'prop_radius_list': 'args.prop_radius_list'}), '(model_without_ddp, output_path=args.output_path,\n padding_factor=args.padding_factor, save_vis_flow=args.save_vis_flow,\n attn_splits_list=args.attn_splits_list, corr_radius_list=args.\n corr_radius_list, prop_radius_list=args.prop_radius_list)\n', (14605, 14857), False, 'from evaluate import validate_chairs, validate_things, validate_sintel, validate_kitti, create_sintel_submission, create_kitti_submission, inference_on_dir\n'), ((16387, 16412), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (16410, 16412), False, 'import torch\n'), ((19644, 19702), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', '"""checkpoint_latest.pth"""'], {}), "(args.checkpoint_dir, 'checkpoint_latest.pth')\n", (19656, 19702), False, 'import os\n'), ((8137, 8162), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (8160, 8162), False, 'import torch\n'), ((8565, 8609), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', 'save_name'], {}), '(args.checkpoint_dir, save_name)\n', (8577, 8609), False, 'import os\n'), ((19339, 19403), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', "('step_%06d.pth' % total_steps)"], {}), "(args.checkpoint_dir, 'step_%06d.pth' % total_steps)\n", (19351, 19403), False, 'import os\n'), ((20313, 20518), 'evaluate.validate_chairs', 'validate_chairs', (['model_without_ddp'], {'with_speed_metric': 'args.with_speed_metric', 'attn_splits_list': 'args.attn_splits_list', 'corr_radius_list': 'args.corr_radius_list', 'prop_radius_list': 'args.prop_radius_list'}), '(model_without_ddp, with_speed_metric=args.with_speed_metric,\n attn_splits_list=args.attn_splits_list, corr_radius_list=args.\n corr_radius_list, prop_radius_list=args.prop_radius_list)\n', (20328, 20518), False, 'from evaluate import validate_chairs, validate_things, validate_sintel, validate_kitti, create_sintel_submission, create_kitti_submission, inference_on_dir\n'), ((20964, 21209), 'evaluate.validate_things', 'validate_things', (['model_without_ddp'], {'padding_factor': 'args.padding_factor', 'with_speed_metric': 'args.with_speed_metric', 'attn_splits_list': 'args.attn_splits_list', 'corr_radius_list': 'args.corr_radius_list', 'prop_radius_list': 'args.prop_radius_list'}), '(model_without_ddp, padding_factor=args.padding_factor,\n with_speed_metric=args.with_speed_metric, attn_splits_list=args.\n attn_splits_list, corr_radius_list=args.corr_radius_list,\n prop_radius_list=args.prop_radius_list)\n', (20979, 21209), False, 'from evaluate import validate_chairs, validate_things, validate_sintel, validate_kitti, create_sintel_submission, create_kitti_submission, inference_on_dir\n'), ((21703, 22046), 'evaluate.validate_sintel', 'validate_sintel', (['model_without_ddp'], {'count_time': 'args.count_time', 'padding_factor': 'args.padding_factor', 'with_speed_metric': 'args.with_speed_metric', 'evaluate_matched_unmatched': 'args.evaluate_matched_unmatched', 'attn_splits_list': 'args.attn_splits_list', 'corr_radius_list': 'args.corr_radius_list', 'prop_radius_list': 'args.prop_radius_list'}), '(model_without_ddp, count_time=args.count_time,\n padding_factor=args.padding_factor, with_speed_metric=args.\n with_speed_metric, evaluate_matched_unmatched=args.\n evaluate_matched_unmatched, attn_splits_list=args.attn_splits_list,\n corr_radius_list=args.corr_radius_list, prop_radius_list=args.\n prop_radius_list)\n', (21718, 22046), False, 'from evaluate import validate_chairs, validate_things, validate_sintel, validate_kitti, create_sintel_submission, create_kitti_submission, inference_on_dir\n'), ((22633, 22877), 'evaluate.validate_kitti', 'validate_kitti', (['model_without_ddp'], {'padding_factor': 'args.padding_factor', 'with_speed_metric': 'args.with_speed_metric', 'attn_splits_list': 'args.attn_splits_list', 'corr_radius_list': 'args.corr_radius_list', 'prop_radius_list': 'args.prop_radius_list'}), '(model_without_ddp, padding_factor=args.padding_factor,\n with_speed_metric=args.with_speed_metric, attn_splits_list=args.\n attn_splits_list, corr_radius_list=args.corr_radius_list,\n prop_radius_list=args.prop_radius_list)\n', (22647, 22877), False, 'from evaluate import validate_chairs, validate_things, validate_sintel, validate_kitti, create_sintel_submission, create_kitti_submission, inference_on_dir\n'), ((23454, 23506), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', '"""val_results.txt"""'], {}), "(args.checkpoint_dir, 'val_results.txt')\n", (23466, 23506), False, 'import os\n')]
|
#!/usr/bin/env python2
from __future__ import print_function
import os
import sys
from joblib import Parallel, delayed
import numpy as np
from tqdm import tqdm
sys.path.insert(0, '../prog_data/')
import evaluate
sys.path.append('%s/../prog_common' % os.path.dirname(os.path.realpath(__file__)))
from cmd_args import cmd_args
sys.path.append('%s/../prog_vae' % os.path.dirname(os.path.realpath(__file__)))
from prog_vae import ProgVAE, ProgAutoEncoder
sys.path.append('%s/../cfg_parser' % os.path.dirname(os.path.realpath(__file__)))
import cfg_parser as parser
def main():
seed = 10960817
np.random.seed(seed)
from att_model_proxy import AttProgProxy, batch_decode
model = AttProgProxy()
# 0. Constants
nb_latent_point = 1000
sample_times = 100
chunk_size = 100
def cal_valid_prior(model, latent_dim):
parser = evaluate.get_parser(cmd_args.grammar_file)
whole_valid, whole_total = 0, 0
latent_points = np.random.normal(size=(nb_latent_point, latent_dim)).astype(np.float32)
raw_logits = model.pred_raw_logits(latent_points)
result_list = batch_decode(raw_logits, True, sample_times)
pbar = tqdm(list(range(nb_latent_point)), desc='sampling')
for _sample in pbar:
_result = result_list[_sample]
assert len(_result) == sample_times
for index, s in enumerate(_result):
prog = s
# trying evaluate it
try:
tokens = evaluate.tokenize(prog)
tree = evaluate.parse(parser, tokens)
if tree is not None:
x = 0.12345
y, msg = evaluate.eval_at(tree, v0_val=x)
if y is not None or (y is None and msg.startswith('runtime error:')):
whole_valid += 1
except ValueError:
pass
whole_total += 1
pbar.set_description(
'valid : total = %d : %d = %.5f' % (whole_valid, whole_total, whole_valid * 1.0 / whole_total)
)
return 1.0 * whole_valid / whole_total
# 2. test model
valid_prior = cal_valid_prior(model, cmd_args.latent_dim)
valid_prior_save_file = cmd_args.saved_model + '_valid_prior.txt'
print('valid prior:', valid_prior)
with open(valid_prior_save_file, 'w') as fout:
print('valid prior:', valid_prior, file=fout)
import pdb, traceback, sys, code
if __name__ == '__main__':
try:
main()
except:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
|
[
"evaluate.tokenize",
"evaluate.parse",
"evaluate.get_parser",
"evaluate.eval_at"
] |
[((164, 199), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../prog_data/"""'], {}), "(0, '../prog_data/')\n", (179, 199), False, 'import pdb, traceback, sys, code\n'), ((606, 626), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (620, 626), True, 'import numpy as np\n'), ((699, 713), 'att_model_proxy.AttProgProxy', 'AttProgProxy', ([], {}), '()\n', (711, 713), False, 'from att_model_proxy import AttProgProxy, batch_decode\n'), ((867, 909), 'evaluate.get_parser', 'evaluate.get_parser', (['cmd_args.grammar_file'], {}), '(cmd_args.grammar_file)\n', (886, 909), False, 'import evaluate\n'), ((1130, 1174), 'att_model_proxy.batch_decode', 'batch_decode', (['raw_logits', '(True)', 'sample_times'], {}), '(raw_logits, True, sample_times)\n', (1142, 1174), False, 'from att_model_proxy import AttProgProxy, batch_decode\n'), ((271, 297), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (287, 297), False, 'import os\n'), ((382, 408), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (398, 408), False, 'import os\n'), ((511, 537), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (527, 537), False, 'import os\n'), ((2617, 2631), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2629, 2631), False, 'import pdb, traceback, sys, code\n'), ((2640, 2661), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2659, 2661), False, 'import pdb, traceback, sys, code\n'), ((2670, 2689), 'pdb.post_mortem', 'pdb.post_mortem', (['tb'], {}), '(tb)\n', (2685, 2689), False, 'import pdb, traceback, sys, code\n'), ((976, 1028), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(nb_latent_point, latent_dim)'}), '(size=(nb_latent_point, latent_dim))\n', (992, 1028), True, 'import numpy as np\n'), ((1524, 1547), 'evaluate.tokenize', 'evaluate.tokenize', (['prog'], {}), '(prog)\n', (1541, 1547), False, 'import evaluate\n'), ((1575, 1605), 'evaluate.parse', 'evaluate.parse', (['parser', 'tokens'], {}), '(parser, tokens)\n', (1589, 1605), False, 'import evaluate\n'), ((1716, 1748), 'evaluate.eval_at', 'evaluate.eval_at', (['tree'], {'v0_val': 'x'}), '(tree, v0_val=x)\n', (1732, 1748), False, 'import evaluate\n')]
|
import itertools
import math
import os
import random
import time
import datetime
from statistics import mean
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from torch.utils.tensorboard import SummaryWriter # tensorboard --logdir /home/isaeng/Exjobb/states/runs/
from torch.utils.data import DataLoader
from pytorch_metric_learning import losses, miners, distances, reducers
from learn import learn
from evaluate import evaluate
from models.RNN import GenRNNNet
from dataset import FOIKinematicPoseDataset, DataLimiter, LoadData, create_samplers
from sequence_transforms import FilterJoints, ChangePoseOrigin, ToTensor, NormalisePoses, ReshapePoses # AddNoise
from helpers import write_to_json, print_setup, make_save_dirs_for_net
from helpers.paths import EXTR_PATH_SSD, TB_RUNS_PATH, BACKUP_MODELS_PATH, RUNS_INFO_PATH
from helpers.result_formatter import mean_confidence_interval
# Const paths
JSON_PATH_SSD = os.path.join(EXTR_PATH_SSD, "final_data_info.json")
ROOT_DIR_SSD = os.path.join(EXTR_PATH_SSD, "final/")
# Data limiter: Go to definition for more info
DATA_LOAD_LIMITER = DataLimiter(
subjects=None,
sessions=[0],
views=None,
)
# Load the data of the dataset into memory from json
print(f"| Loading data into memory..")
LOAD_START_TIME = time.time()
LOADED_DATA = LoadData(root_dir=ROOT_DIR_SSD, data_limiter=DATA_LOAD_LIMITER, num_workers=8)
print(f"| Loading finished in {time.time() - LOAD_START_TIME:0.1f}s")
print('-' * 72)
def parameters():
"""
Initialise the hyperparameters (+ some other params)
- Batch size - tightly linked with gradient descent. The number of samples worked through before the params of the
model are updated.
- Advice from <NAME>un, batch_size <= 32: arxiv.org/abs/1804.07612
:return: params: dict()
"""
params = dict()
# Pick OpenPose joints for the model,
# these are used in the FilterPose() transform, as well as when deciding the input_size/number of features
params['joints_activator'] = "op_idx"
# OpenPose indices, same as in the OpenPose repo.
if params['joints_activator'] == "op_idx":
params['joint_filter'] = [1, 8, 9, 10, 11, 12, 13, 14, 19, 20, 21, 22, 23, 24] # Select OpenPose indices
elif params['joints_activator'] == "name":
params['joint_filter'] = ["nose", "c_hip", "neck"] # Joint names, see more in the 'joints_lookup.json' file
else:
NotImplementedError(f"The Joint filter of the '{params['joints_activator']}' activator is not implemented.")
params['views'] = list(DATA_LOAD_LIMITER.views)
params['num_epochs'] = 250
params['batch_size'] = 32
params['learning_rate'] = 0.0005 # 5e-4 # 0.05 5e-4 5e-8
params['learning_rate_lim'] = 5.1e-6 # The network starts its breaking phase when this LR is reached
params['load_best_post_lr_step'] = False # If the best performing model should be loaded before a LR step
params['step_size'] = 1 # Used in the lr_scheduler of Torch
params['bad_val_lim_first'] = 1 # How many un-increasing validations to allow before taking the FIRST step
params['bad_val_lim'] = 1 # How many un-increasing validations to allow before taking the REMAINING steps
# Get the active number of OpenPose joints from the joint_filter. For full kinetic pose, this will be 25,
# The joint_filter will also be applied further down, in the FilterJoints() transform.
num_joints = len(params['joint_filter'])
# The number of coordinates for each of the OpenPose joints, is 2 if using x, y coords
num_joint_coords = 2
# Number of features
params['input_size'] = num_joints * num_joint_coords # 28
# Length of a sequence, the length represent the number of frames.
# The FOI dataset is captured at 50 fps
params['sequence_len'] = 100
params['simulated_len'] = 800 # A limiter to how many sequences can be created (weird param used to evaluate RNNs)
# Network / Model params
params['num_layers'] = 2 # Number of stacked RNN layers
params['hidden_size'] = 256*2 # Number of features in hidden state
params['net_type'] = 'gru'
params['bidirectional'] = False
params['max_norm'] = 1
# If the fc layer is used, the network will apply a fully connected layer to transform
# the embedding space to the dimensionality of embedding_dims
params['use_fc_layer'] = True
# Reduce the embedding space if fully connected layer is used, otherwise, the embedding space will have the same
# dimensionality as the hidden_size of the RNN network
if params['use_fc_layer']:
params['embedding_dims'] = 10
else:
params['embedding_dims'] = params['hidden_size']
# Loss settings
params['task'] = 'metric' # 'classification'/'metric'
params['loss_type'] = 'triplet' # 'single'/'triplet'/'contrastive'
params['loss_margin'] = 0.1 # The margin for certain loss functions
# PyTorch Deep metric learning specific params
params['use_musgrave'] = True # Use the PyTorch deep metric library?
params['metric_distance'] = 'lp_distance' # Which distance metric to use
# Settings for running double losses, one for subject, the other for view
params['penalise_view'] = True # Run a second loss function for the deep metric learning to penalise the view?
params['label_type'] = 'sub' # How to label each sequence of the dataset. See more in
params['class_loss_margin'] = 0.1
# Settings for the network run
params['num_repeats'] = 1 # Run a network setup multiple times? Will save the confidence score
params['should_learn'] = True # Learn/train the network?
params['should_write'] = True # Write to TensorBoard?
params['should_test_unseen_sessions'] = False # Test the unseen sessions (sess1) for sub 0 and 1
params['should_val_unseen_sessions'] = False # Val split from unseen sessions, otherwise uses seen session (sess0)
params['should_test_unseen_subjects'] = False # Test
params['should_test_unseen_views'] = True
params['num_unseen_sub'] = 3
params['checkpoint_to_load'] = os.path.join(RUNS_INFO_PATH, 'backups/512_dim/d210601_h09m46_ṛun1_rep1_e58_best.pth')
if params['should_learn']:
params['should_load_checkpoints'] = False
else:
params['should_load_checkpoints'] = True
return params
def multi_grid():
"""
Run multiple grid searches. Specify the grids in the grids list.
Overrides parameter(), however, not all params are cleared to work, might e.g. be problematic if a param is a list.
"""
grids = [
{
'num_repeats': 3,
'loss_margin': 0.5,
'class_loss_margin': 0.1,
'penalise_view': True,
'batch_size': 64,
'loss_type': 'triplet',
'num_epochs': 250,
'use_fc_layer': True,
'embedding_dims': 10,
'bad_val_lim_first': 5,
'bad_val_lim': 3,
'label_type': 'full'
},
]
# Loop over all grids
num_grids = len(grids)
for grid_idx, grid in enumerate(grids):
print(f"| Grid {grid_idx+1}/{num_grids}")
multi_results = grid_search(grid_idx, grid)
print(f"| ", "_____-----"*10)
def grid_search(outer_grid_idx=-1, grid=None):
multi_start = datetime.datetime.now() # Date and time of start
multi_start_time = time.time() # Time of start
# Set a grid if this function wasn't called from multi_grid()
if grid is None:
# Override any parameter in parameter()
grid = {
# 'bidirectional': [False, True],
# 'net_type': ['gru'],
# 'sequence_len': [5, 10, 15, 20, 25],
# 'hidden_size': [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024],
# 'max_norm': [0.01, 0.1, 1]
# 'num_epochs': 2
# 'loss_margin': [50, 100]
}
# Wrap every value in a list if it isn't already the case
for key, value in grid.items():
grid[key] = [value]
# Create every combination from the lists in grid
all_grid_combinations = [dict(zip(grid, value)) for value in itertools.product(*grid.values())]
num_runs = len(all_grid_combinations)
run_formatter = int(math.log10(num_runs)) + 1 # Used for printing spacing
params = parameters()
params['num_runs'] = num_runs
# Store runtime information
multi_info = {'at': str(multi_start).split('.')[0], 'duration': None, 'num_runs': num_runs, 'num_reps': params['num_repeats']}
multi_runs = dict()
multi_results = dict()
# Formatting of run_info save file name
run_name = f'd{multi_start.strftime("%y")}{multi_start.strftime("%m")}{multi_start.strftime("%d")}_h{multi_start.strftime("%H")}m{multi_start.strftime("%M")}.json'
params['run_name'] = run_name
# Run the network by firstly overriding the params with the grid.
for grid_idx, override_params in enumerate(all_grid_combinations):
if outer_grid_idx != -1:
print(f"| Grid {grid_idx+1}")
# Print the current run index and the current notable params
print(f"| Run {grid_idx+1:{run_formatter}.0f}/{num_runs}")
[print(f"| {idx+1}. {key}: {val}", end=' ') for idx, (key, val) in enumerate(override_params.items())]
print('\n')
params.update(override_params) # Override the params
params['run_idx'] = grid_idx
# Run the network num_reps times
reps_info = repeat_run(params)
# Store runtime information
multi_runs[grid_idx] = dict()
multi_runs[grid_idx] = reps_info
multi_runs[grid_idx]['notable_params'] = override_params
multi_runs[grid_idx]['params'] = params
# Store runtime information
multi_results[grid_idx] = {
'setup': override_params,
'duration': reps_info['duration'],
'accuracy': reps_info['accuracies_mean'],
'confidence_scores': reps_info['confidence_scores']
}
print('---*' * 20)
# Store runtime information
multi_info['multi_runs'] = multi_runs
multi_info['duration'] = time.time() - multi_start_time
multi_results['duration'] = multi_info['duration']
# Formatting of run_info save file name
full_info_path = os.path.join('./saves/runs', run_name)
result_info_path = os.path.join('./saves/runs', 'r_' + run_name)
# Save the results
write_to_json(multi_info, full_info_path) # Naming format: dYYMMDD_hHHmMM.json
write_to_json(multi_results, result_info_path) # Naming format: r_dYYMMDD_hHHmMM.json
# Print the results
print(f"| Total time {multi_results['duration']:.2f}")
for grid_idx, grid_result in multi_results.items():
if isinstance(grid_idx, int):
print(f"| Notable parameters -", end='')
[print(f" {param_name}: {param} ", end='|') for param_name, param in grid_result['setup'].items()]
print(f"\n| Scores:")
[print(f"| - {name}: {score:.3f} +/- {conf:.3f} ") for name, (score, conf) in grid_result['confidence_scores'].items()]
print(f"|---------")
return multi_results
def repeat_run(params: dict = None) -> dict:
"""
Run a network of the same settings a number of times. Used to get the confidence interval scores of several runs
"""
reps_start = str(datetime.datetime.now()).split('.')[0]
reps_start_time = time.time()
if params is None:
params = parameters()
repetitions = dict()
test_scores = dict()
confusion_matrices = dict()
test_accs = []
for rep_idx in range(params['num_repeats']):
params['rep_idx'] = rep_idx
# Run the network
run_info = run_network(params)
# Store runtime information
test_accs.append(run_info['test_info']['accuracy'])
confusion_matrices[rep_idx] = run_info['test_info'].pop('confusion_matrix', None)
test_scores[rep_idx] = run_info['test_info']
repetitions[rep_idx] = run_info
# Add the scores from each rep into lists, one list for each score type
scores_concat = next(iter(test_scores.values()))
for key in scores_concat:
score_list = []
for rep_scores in test_scores.values():
score_list.append(rep_scores[key])
scores_concat[key] = score_list
# Calculate the confidence interval for the different scores
confidence_scores = dict()
for score_name, score in scores_concat.items():
confidence_scores[score_name] = mean_confidence_interval(score)
# Store runtime information
reps_info = {
'at': reps_start,
'duration': time.time() - reps_start_time,
'accuracies_mean': mean(test_accs),
'accuracies': test_accs,
'repetitions': repetitions,
'scores': scores_concat,
'confidence_scores': confidence_scores,
'confusion_matrices': confusion_matrices
}
return reps_info
def generate_random_class_split(num_classes=10, test_split=3):
"""
Generates a random class split used when training on some subjects and tested on others
"""
classes = set(range(num_classes))
assert test_split < num_classes
test_classes = set(random.sample(classes, test_split))
learn_classes = list([x for x in classes if x not in test_classes])
test_classes = list(test_classes)
return learn_classes, test_classes
def run_network(params: dict = None) -> dict:
"""
Run the network. Either called directly, or from repeat_run()
"""
if params is None:
params = parameters()
run_start = datetime.datetime.now() # Save Date and time of run
# Instantiate new data limiter
data_limiter = DataLimiter(
subjects=None,
sessions=[0],
views=params['views'],
)
# Transforms
composed = transforms.Compose([
NormalisePoses(low=1, high=100),
ChangePoseOrigin(),
FilterJoints(activator=params['joints_activator'], joint_filter=params['joint_filter']),
ReshapePoses(),
# AddNoise(scale=1),
ToTensor()
])
# Create save dir if they don't exist
make_save_dirs_for_net()
train_dataset = FOIKinematicPoseDataset(
data=LOADED_DATA,
json_path=JSON_PATH_SSD,
sequence_len=params['sequence_len'],
data_limiter=data_limiter,
transform=composed,
label_type=params['label_type']
)
# Create samplers, see definition for details
train_sampler, test_sampler, val_sampler = create_samplers(
dataset_len=len(train_dataset),
train_split=.70,
val_split=.15,
val_from_train=False,
shuffle=True,
# split_limit_factor=params['sequence_len']/params['simulated_len']
)
# Create DataLoaders
train_loader = DataLoader(train_dataset, params['batch_size'], sampler=train_sampler, num_workers=4)
test_loader = DataLoader(train_dataset, params['batch_size'], sampler=test_sampler, num_workers=4)
val_loader = DataLoader(train_dataset, params['batch_size'], sampler=val_sampler, num_workers=4)
comparison_loader = train_loader # Used for comparing embeddings
# This block is run when the task is to test unseen sessions
if params['should_test_unseen_sessions']:
print("| Getting unseen sessions")
unseen_sessions_limiter = DataLimiter(
subjects=[0, 1],
sessions=[1],
views=DATA_LOAD_LIMITER.views,
)
print(f"| Loading data into memory..")
load_start_time = time.time()
unseen_sessions_data = LoadData(root_dir=ROOT_DIR_SSD, data_limiter=unseen_sessions_limiter, num_workers=8)
print(f"| Loading finished in {time.time() - load_start_time:0.1f}s")
print('-' * 72)
test_dataset = FOIKinematicPoseDataset(
data=unseen_sessions_data,
json_path=JSON_PATH_SSD,
sequence_len=params['sequence_len'],
data_limiter=unseen_sessions_limiter,
transform=composed,
label_type=params['label_type']
)
_, test_sampler, temp_sampler = create_samplers(
dataset_len=len(test_dataset),
train_split=.0,
val_split=.0,
val_from_train=False,
shuffle=True,
)
test_loader = DataLoader(test_dataset, params['batch_size'], sampler=test_sampler, num_workers=4)
if params['should_val_unseen_sessions']:
val_sampler = temp_sampler
val_loader = DataLoader(test_dataset, params['batch_size'], sampler=val_sampler, num_workers=4)
# This block is run when the task is to test unseen subjects
if params['should_test_unseen_subjects']:
learn_classes, test_classes = generate_random_class_split(len(DATA_LOAD_LIMITER.subjects), params['num_unseen_sub'])
data_limiter = DataLimiter(
subjects=learn_classes,
sessions=[0],
views=params['views'],
)
test_limiter = DataLimiter(
subjects=test_classes,
sessions=[0],
views=params['views'],
)
print("| Running on unseen subjects")
print(f'| Learning classes: {data_limiter.subjects} | Testing Classes: {test_limiter.subjects}')
train_dataset = FOIKinematicPoseDataset(
data=LOADED_DATA,
json_path=JSON_PATH_SSD,
sequence_len=params['sequence_len'],
data_limiter=data_limiter,
transform=composed,
label_type=params['label_type']
)
test_dataset = FOIKinematicPoseDataset(
data=LOADED_DATA,
json_path=JSON_PATH_SSD,
sequence_len=params['sequence_len'],
data_limiter=test_limiter,
transform=composed,
label_type=params['label_type']
)
train_sampler, _, val_sampler = create_samplers(
dataset_len=len(train_dataset),
train_split=.85,
val_split=.15,
val_from_train=False,
shuffle=True,
)
comparison_sampler, test_sampler, _ = create_samplers(
dataset_len=len(test_dataset),
train_split=.7,
val_split=.0,
val_from_train=False,
shuffle=True,
)
train_loader = DataLoader(train_dataset, params['batch_size'], sampler=train_sampler, num_workers=4)
val_loader = DataLoader(train_dataset, params['batch_size'], sampler=val_sampler, num_workers=4)
test_loader = DataLoader(test_dataset, params['batch_size'], sampler=test_sampler, num_workers=4)
comparison_loader = DataLoader(test_dataset, params['batch_size'], sampler=comparison_sampler, num_workers=4)
# This block is run when the task is to test unseen views
if params['should_test_unseen_views']:
learn_views, test_view = generate_random_class_split(len(DATA_LOAD_LIMITER.views), 1)
data_limiter = DataLimiter(
subjects=data_limiter.subjects,
sessions=[0],
views=learn_views,
)
test_limiter = DataLimiter(
subjects=data_limiter.subjects,
sessions=[0],
views=test_view,
)
print("| Running on unseen subjects")
print(f'| Learning Views: {data_limiter.views} | Testing View: {test_limiter.views}')
train_dataset = FOIKinematicPoseDataset(
data=LOADED_DATA,
json_path=JSON_PATH_SSD,
sequence_len=params['sequence_len'],
data_limiter=data_limiter,
transform=composed,
label_type=params['label_type']
)
test_dataset = FOIKinematicPoseDataset(
data=LOADED_DATA,
json_path=JSON_PATH_SSD,
sequence_len=params['sequence_len'],
data_limiter=test_limiter,
transform=composed,
label_type=params['label_type']
)
train_sampler, _, val_sampler = create_samplers(
dataset_len=len(train_dataset),
train_split=.85,
val_split=.15,
val_from_train=False,
shuffle=True,
)
comparison_sampler, test_sampler, _ = create_samplers(
dataset_len=len(test_dataset),
train_split=.7,
val_split=.0,
val_from_train=False,
shuffle=True,
)
train_loader = DataLoader(train_dataset, params['batch_size'], sampler=train_sampler, num_workers=4)
val_loader = DataLoader(train_dataset, params['batch_size'], sampler=val_sampler, num_workers=4)
test_loader = DataLoader(test_dataset, params['batch_size'], sampler=test_sampler, num_workers=4)
comparison_loader = DataLoader(test_dataset, params['batch_size'], sampler=comparison_sampler, num_workers=4)
# Use cuda if possible
# TODO: Bug - Not everything is being sent to the cpu, fix in other parts of the scripts
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
cudnn.benchmark = torch.cuda.is_available()
# The recurrent neural net model, RNN, GRU or LSTM
model = GenRNNNet(
input_size=params['input_size'],
hidden_size=params['hidden_size'],
num_layers=params['num_layers'],
use_fc_layer=params['use_fc_layer'],
embedding_dims=params['embedding_dims'],
device=device,
bidirectional=params['bidirectional'],
net_type=params['net_type'],
).to(device)
# The optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=params['learning_rate'])
loss_function = None
mining_function = None
class_loss_function = None
class_mining_function = None
#reducer = None
if params['use_musgrave']:
reducer = reducers.ThresholdReducer(low=0)
if params['metric_distance'] == 'cosine_similarity':
distance = distances.CosineSimilarity()
elif params['metric_distance'] == 'lp_distance':
distance = distances.LpDistance()
else:
raise Exception("Invalid metric distance")
if params['loss_type'] == "single":
raise NotImplementedError
elif params['loss_type'] == 'contrastive':
mining_function = miners.PairMarginMiner(pos_margin=0, neg_margin=params['loss_margin'])
loss_function = losses.ContrastiveLoss(pos_margin=0, neg_margin=params['loss_margin'])
elif params['loss_type'] == "triplet":
print('Using Musgrave triplet')
mining_function = miners.TripletMarginMiner(margin=params['loss_margin'], distance=distance, type_of_triplets="semihard")
loss_function = losses.TripletMarginLoss(margin=params['loss_margin'], distance=distance, reducer=reducer)
if params['penalise_view']:
print(f"| Penalising views")
class_mining_function = miners.TripletMarginMiner(margin=params['class_loss_margin'], distance=distance, type_of_triplets="semihard")
class_loss_function = losses.TripletMarginLoss(margin=params['class_loss_margin'], distance=distance, reducer=reducer)
elif params['loss_type'] == 'n_pairs':
#loss_function = losses.NPairsLoss()
raise NotImplementedError
else:
if params['loss_type'] == "single":
loss_function = nn.CrossEntropyLoss()
elif params['loss_type'] == "triplet":
loss_function = nn.TripletMarginLoss(margin=params['loss_margin'])
else:
raise Exception("Invalid loss type when not using musgrave implementation")
# print_setup(setup=run_info, params=params)
writer = SummaryWriter(TB_RUNS_PATH) if params['should_write'] else None
start_time = time.time()
model, learn_info = learn(
train_loader=train_loader,
val_loader=val_loader,
model=model,
optimizer=optimizer,
loss_function=loss_function,
class_loss_function=class_loss_function,
mining_function=mining_function,
class_mining_function=class_mining_function,
num_epochs=params['num_epochs'],
device=device,
classes=data_limiter.subjects,
task=params['task'],
tb_writer=writer,
params=params,
)
if params['should_load_checkpoints']:
print('| Loading network checkpoints for testing..')
checkpoint = torch.load(params['checkpoint_to_load'])
model.load_state_dict(checkpoint['net'])
test_info, _ = evaluate(
train_loader=comparison_loader,
eval_loader=test_loader,
model=model,
task=params['task'],
device=device,
tb_writer=writer,
is_test=False,
embedding_dims=params['embedding_dims'],
)
# Close TensorBoard writer if it exists
if writer is not None:
writer.close()
# Dict to save all the run info. When learning and evaluating is finished, this will be saved to disk.
run_info = {
'at': str(run_start).split('.')[0],
'duration': time.time() - start_time,
'device': str(device),
'model_name': str(type(model)).split('.')[-1][:-2],
'optimizer_name': str(type(optimizer)).split('.')[-1][:-2],
'loss_function_name': str(type(loss_function)).split('.')[-1][:-2],
'transforms': [transform.split(' ')[0].split('.')[1] for transform in str(composed).split('<')[1:]],
'split': {
'tot_num_seqs': len(train_dataset), 'batch_size': params['batch_size'], 'train_split': len(train_sampler),
'val_split': len(val_sampler), 'test_split': len(test_sampler), 'num_train_batches': len(train_loader),
'num_val_batches': len(val_loader), 'num_test_batches': len(test_loader)
},
'learn_info': learn_info,
'test_info': test_info
}
print(f"| Finished testing | Accuracy: {test_info['accuracy']:.6f} | Run time: {run_info['duration'] :.2f}s\n\n")
return run_info
if __name__ == "__main__":
multi_grid()
# grid_search()
# run_network()
# result_info_path = os.path.join('./saves/runs', 'r_' + run_name)
# res = read_from_json('./saves/runs/r_d210511_h17m18.json')
# print(res)
|
[
"evaluate.evaluate"
] |
[((989, 1040), 'os.path.join', 'os.path.join', (['EXTR_PATH_SSD', '"""final_data_info.json"""'], {}), "(EXTR_PATH_SSD, 'final_data_info.json')\n", (1001, 1040), False, 'import os\n'), ((1056, 1093), 'os.path.join', 'os.path.join', (['EXTR_PATH_SSD', '"""final/"""'], {}), "(EXTR_PATH_SSD, 'final/')\n", (1068, 1093), False, 'import os\n'), ((1163, 1215), 'dataset.DataLimiter', 'DataLimiter', ([], {'subjects': 'None', 'sessions': '[0]', 'views': 'None'}), '(subjects=None, sessions=[0], views=None)\n', (1174, 1215), False, 'from dataset import FOIKinematicPoseDataset, DataLimiter, LoadData, create_samplers\n'), ((1343, 1354), 'time.time', 'time.time', ([], {}), '()\n', (1352, 1354), False, 'import time\n'), ((1369, 1447), 'dataset.LoadData', 'LoadData', ([], {'root_dir': 'ROOT_DIR_SSD', 'data_limiter': 'DATA_LOAD_LIMITER', 'num_workers': '(8)'}), '(root_dir=ROOT_DIR_SSD, data_limiter=DATA_LOAD_LIMITER, num_workers=8)\n', (1377, 1447), False, 'from dataset import FOIKinematicPoseDataset, DataLimiter, LoadData, create_samplers\n'), ((6174, 6263), 'os.path.join', 'os.path.join', (['RUNS_INFO_PATH', '"""backups/512_dim/d210601_h09m46_ṛun1_rep1_e58_best.pth"""'], {}), "(RUNS_INFO_PATH,\n 'backups/512_dim/d210601_h09m46_ṛun1_rep1_e58_best.pth')\n", (6186, 6263), False, 'import os\n'), ((7393, 7416), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7414, 7416), False, 'import datetime\n'), ((7466, 7477), 'time.time', 'time.time', ([], {}), '()\n', (7475, 7477), False, 'import time\n'), ((10364, 10402), 'os.path.join', 'os.path.join', (['"""./saves/runs"""', 'run_name'], {}), "('./saves/runs', run_name)\n", (10376, 10402), False, 'import os\n'), ((10426, 10471), 'os.path.join', 'os.path.join', (['"""./saves/runs"""', "('r_' + run_name)"], {}), "('./saves/runs', 'r_' + run_name)\n", (10438, 10471), False, 'import os\n'), ((10500, 10541), 'helpers.write_to_json', 'write_to_json', (['multi_info', 'full_info_path'], {}), '(multi_info, full_info_path)\n', (10513, 10541), False, 'from helpers import write_to_json, print_setup, make_save_dirs_for_net\n'), ((10584, 10630), 'helpers.write_to_json', 'write_to_json', (['multi_results', 'result_info_path'], {}), '(multi_results, result_info_path)\n', (10597, 10630), False, 'from helpers import write_to_json, print_setup, make_save_dirs_for_net\n'), ((11501, 11512), 'time.time', 'time.time', ([], {}), '()\n', (11510, 11512), False, 'import time\n'), ((13696, 13719), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (13717, 13719), False, 'import datetime\n'), ((13804, 13867), 'dataset.DataLimiter', 'DataLimiter', ([], {'subjects': 'None', 'sessions': '[0]', 'views': "params['views']"}), "(subjects=None, sessions=[0], views=params['views'])\n", (13815, 13867), False, 'from dataset import FOIKinematicPoseDataset, DataLimiter, LoadData, create_samplers\n'), ((14245, 14269), 'helpers.make_save_dirs_for_net', 'make_save_dirs_for_net', ([], {}), '()\n', (14267, 14269), False, 'from helpers import write_to_json, print_setup, make_save_dirs_for_net\n'), ((14291, 14482), 'dataset.FOIKinematicPoseDataset', 'FOIKinematicPoseDataset', ([], {'data': 'LOADED_DATA', 'json_path': 'JSON_PATH_SSD', 'sequence_len': "params['sequence_len']", 'data_limiter': 'data_limiter', 'transform': 'composed', 'label_type': "params['label_type']"}), "(data=LOADED_DATA, json_path=JSON_PATH_SSD,\n sequence_len=params['sequence_len'], data_limiter=data_limiter,\n transform=composed, label_type=params['label_type'])\n", (14314, 14482), False, 'from dataset import FOIKinematicPoseDataset, DataLimiter, LoadData, create_samplers\n'), ((14911, 15000), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset', "params['batch_size']"], {'sampler': 'train_sampler', 'num_workers': '(4)'}), "(train_dataset, params['batch_size'], sampler=train_sampler,\n num_workers=4)\n", (14921, 15000), False, 'from torch.utils.data import DataLoader\n'), ((15015, 15103), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset', "params['batch_size']"], {'sampler': 'test_sampler', 'num_workers': '(4)'}), "(train_dataset, params['batch_size'], sampler=test_sampler,\n num_workers=4)\n", (15025, 15103), False, 'from torch.utils.data import DataLoader\n'), ((15117, 15204), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset', "params['batch_size']"], {'sampler': 'val_sampler', 'num_workers': '(4)'}), "(train_dataset, params['batch_size'], sampler=val_sampler,\n num_workers=4)\n", (15127, 15204), False, 'from torch.utils.data import DataLoader\n'), ((21226, 21251), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (21249, 21251), False, 'import torch\n'), ((23948, 23959), 'time.time', 'time.time', ([], {}), '()\n', (23957, 23959), False, 'import time\n'), ((23985, 24378), 'learn.learn', 'learn', ([], {'train_loader': 'train_loader', 'val_loader': 'val_loader', 'model': 'model', 'optimizer': 'optimizer', 'loss_function': 'loss_function', 'class_loss_function': 'class_loss_function', 'mining_function': 'mining_function', 'class_mining_function': 'class_mining_function', 'num_epochs': "params['num_epochs']", 'device': 'device', 'classes': 'data_limiter.subjects', 'task': "params['task']", 'tb_writer': 'writer', 'params': 'params'}), "(train_loader=train_loader, val_loader=val_loader, model=model,\n optimizer=optimizer, loss_function=loss_function, class_loss_function=\n class_loss_function, mining_function=mining_function,\n class_mining_function=class_mining_function, num_epochs=params[\n 'num_epochs'], device=device, classes=data_limiter.subjects, task=\n params['task'], tb_writer=writer, params=params)\n", (23990, 24378), False, 'from learn import learn\n'), ((24710, 24908), 'evaluate.evaluate', 'evaluate', ([], {'train_loader': 'comparison_loader', 'eval_loader': 'test_loader', 'model': 'model', 'task': "params['task']", 'device': 'device', 'tb_writer': 'writer', 'is_test': '(False)', 'embedding_dims': "params['embedding_dims']"}), "(train_loader=comparison_loader, eval_loader=test_loader, model=\n model, task=params['task'], device=device, tb_writer=writer, is_test=\n False, embedding_dims=params['embedding_dims'])\n", (24718, 24908), False, 'from evaluate import evaluate\n'), ((10212, 10223), 'time.time', 'time.time', ([], {}), '()\n', (10221, 10223), False, 'import time\n'), ((12609, 12640), 'helpers.result_formatter.mean_confidence_interval', 'mean_confidence_interval', (['score'], {}), '(score)\n', (12633, 12640), False, 'from helpers.result_formatter import mean_confidence_interval\n'), ((12796, 12811), 'statistics.mean', 'mean', (['test_accs'], {}), '(test_accs)\n', (12800, 12811), False, 'from statistics import mean\n'), ((13310, 13344), 'random.sample', 'random.sample', (['classes', 'test_split'], {}), '(classes, test_split)\n', (13323, 13344), False, 'import random\n'), ((15461, 15534), 'dataset.DataLimiter', 'DataLimiter', ([], {'subjects': '[0, 1]', 'sessions': '[1]', 'views': 'DATA_LOAD_LIMITER.views'}), '(subjects=[0, 1], sessions=[1], views=DATA_LOAD_LIMITER.views)\n', (15472, 15534), False, 'from dataset import FOIKinematicPoseDataset, DataLimiter, LoadData, create_samplers\n'), ((15656, 15667), 'time.time', 'time.time', ([], {}), '()\n', (15665, 15667), False, 'import time\n'), ((15699, 15787), 'dataset.LoadData', 'LoadData', ([], {'root_dir': 'ROOT_DIR_SSD', 'data_limiter': 'unseen_sessions_limiter', 'num_workers': '(8)'}), '(root_dir=ROOT_DIR_SSD, data_limiter=unseen_sessions_limiter,\n num_workers=8)\n', (15707, 15787), False, 'from dataset import FOIKinematicPoseDataset, DataLimiter, LoadData, create_samplers\n'), ((15910, 16127), 'dataset.FOIKinematicPoseDataset', 'FOIKinematicPoseDataset', ([], {'data': 'unseen_sessions_data', 'json_path': 'JSON_PATH_SSD', 'sequence_len': "params['sequence_len']", 'data_limiter': 'unseen_sessions_limiter', 'transform': 'composed', 'label_type': "params['label_type']"}), "(data=unseen_sessions_data, json_path=JSON_PATH_SSD,\n sequence_len=params['sequence_len'], data_limiter=\n unseen_sessions_limiter, transform=composed, label_type=params[\n 'label_type'])\n", (15933, 16127), False, 'from dataset import FOIKinematicPoseDataset, DataLimiter, LoadData, create_samplers\n'), ((16444, 16531), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset', "params['batch_size']"], {'sampler': 'test_sampler', 'num_workers': '(4)'}), "(test_dataset, params['batch_size'], sampler=test_sampler,\n num_workers=4)\n", (16454, 16531), False, 'from torch.utils.data import DataLoader\n'), ((16986, 17058), 'dataset.DataLimiter', 'DataLimiter', ([], {'subjects': 'learn_classes', 'sessions': '[0]', 'views': "params['views']"}), "(subjects=learn_classes, sessions=[0], views=params['views'])\n", (16997, 17058), False, 'from dataset import FOIKinematicPoseDataset, DataLimiter, LoadData, create_samplers\n'), ((17130, 17201), 'dataset.DataLimiter', 'DataLimiter', ([], {'subjects': 'test_classes', 'sessions': '[0]', 'views': "params['views']"}), "(subjects=test_classes, sessions=[0], views=params['views'])\n", (17141, 17201), False, 'from dataset import FOIKinematicPoseDataset, DataLimiter, LoadData, create_samplers\n'), ((17426, 17617), 'dataset.FOIKinematicPoseDataset', 'FOIKinematicPoseDataset', ([], {'data': 'LOADED_DATA', 'json_path': 'JSON_PATH_SSD', 'sequence_len': "params['sequence_len']", 'data_limiter': 'data_limiter', 'transform': 'composed', 'label_type': "params['label_type']"}), "(data=LOADED_DATA, json_path=JSON_PATH_SSD,\n sequence_len=params['sequence_len'], data_limiter=data_limiter,\n transform=composed, label_type=params['label_type'])\n", (17449, 17617), False, 'from dataset import FOIKinematicPoseDataset, DataLimiter, LoadData, create_samplers\n'), ((17716, 17907), 'dataset.FOIKinematicPoseDataset', 'FOIKinematicPoseDataset', ([], {'data': 'LOADED_DATA', 'json_path': 'JSON_PATH_SSD', 'sequence_len': "params['sequence_len']", 'data_limiter': 'test_limiter', 'transform': 'composed', 'label_type': "params['label_type']"}), "(data=LOADED_DATA, json_path=JSON_PATH_SSD,\n sequence_len=params['sequence_len'], data_limiter=test_limiter,\n transform=composed, label_type=params['label_type'])\n", (17739, 17907), False, 'from dataset import FOIKinematicPoseDataset, DataLimiter, LoadData, create_samplers\n'), ((18465, 18554), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset', "params['batch_size']"], {'sampler': 'train_sampler', 'num_workers': '(4)'}), "(train_dataset, params['batch_size'], sampler=train_sampler,\n num_workers=4)\n", (18475, 18554), False, 'from torch.utils.data import DataLoader\n'), ((18572, 18659), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset', "params['batch_size']"], {'sampler': 'val_sampler', 'num_workers': '(4)'}), "(train_dataset, params['batch_size'], sampler=val_sampler,\n num_workers=4)\n", (18582, 18659), False, 'from torch.utils.data import DataLoader\n'), ((18678, 18765), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset', "params['batch_size']"], {'sampler': 'test_sampler', 'num_workers': '(4)'}), "(test_dataset, params['batch_size'], sampler=test_sampler,\n num_workers=4)\n", (18688, 18765), False, 'from torch.utils.data import DataLoader\n'), ((18790, 18883), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset', "params['batch_size']"], {'sampler': 'comparison_sampler', 'num_workers': '(4)'}), "(test_dataset, params['batch_size'], sampler=comparison_sampler,\n num_workers=4)\n", (18800, 18883), False, 'from torch.utils.data import DataLoader\n'), ((19105, 19181), 'dataset.DataLimiter', 'DataLimiter', ([], {'subjects': 'data_limiter.subjects', 'sessions': '[0]', 'views': 'learn_views'}), '(subjects=data_limiter.subjects, sessions=[0], views=learn_views)\n', (19116, 19181), False, 'from dataset import FOIKinematicPoseDataset, DataLimiter, LoadData, create_samplers\n'), ((19253, 19327), 'dataset.DataLimiter', 'DataLimiter', ([], {'subjects': 'data_limiter.subjects', 'sessions': '[0]', 'views': 'test_view'}), '(subjects=data_limiter.subjects, sessions=[0], views=test_view)\n', (19264, 19327), False, 'from dataset import FOIKinematicPoseDataset, DataLimiter, LoadData, create_samplers\n'), ((19541, 19732), 'dataset.FOIKinematicPoseDataset', 'FOIKinematicPoseDataset', ([], {'data': 'LOADED_DATA', 'json_path': 'JSON_PATH_SSD', 'sequence_len': "params['sequence_len']", 'data_limiter': 'data_limiter', 'transform': 'composed', 'label_type': "params['label_type']"}), "(data=LOADED_DATA, json_path=JSON_PATH_SSD,\n sequence_len=params['sequence_len'], data_limiter=data_limiter,\n transform=composed, label_type=params['label_type'])\n", (19564, 19732), False, 'from dataset import FOIKinematicPoseDataset, DataLimiter, LoadData, create_samplers\n'), ((19831, 20022), 'dataset.FOIKinematicPoseDataset', 'FOIKinematicPoseDataset', ([], {'data': 'LOADED_DATA', 'json_path': 'JSON_PATH_SSD', 'sequence_len': "params['sequence_len']", 'data_limiter': 'test_limiter', 'transform': 'composed', 'label_type': "params['label_type']"}), "(data=LOADED_DATA, json_path=JSON_PATH_SSD,\n sequence_len=params['sequence_len'], data_limiter=test_limiter,\n transform=composed, label_type=params['label_type'])\n", (19854, 20022), False, 'from dataset import FOIKinematicPoseDataset, DataLimiter, LoadData, create_samplers\n'), ((20580, 20669), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset', "params['batch_size']"], {'sampler': 'train_sampler', 'num_workers': '(4)'}), "(train_dataset, params['batch_size'], sampler=train_sampler,\n num_workers=4)\n", (20590, 20669), False, 'from torch.utils.data import DataLoader\n'), ((20687, 20774), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset', "params['batch_size']"], {'sampler': 'val_sampler', 'num_workers': '(4)'}), "(train_dataset, params['batch_size'], sampler=val_sampler,\n num_workers=4)\n", (20697, 20774), False, 'from torch.utils.data import DataLoader\n'), ((20793, 20880), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset', "params['batch_size']"], {'sampler': 'test_sampler', 'num_workers': '(4)'}), "(test_dataset, params['batch_size'], sampler=test_sampler,\n num_workers=4)\n", (20803, 20880), False, 'from torch.utils.data import DataLoader\n'), ((20905, 20998), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset', "params['batch_size']"], {'sampler': 'comparison_sampler', 'num_workers': '(4)'}), "(test_dataset, params['batch_size'], sampler=comparison_sampler,\n num_workers=4)\n", (20915, 20998), False, 'from torch.utils.data import DataLoader\n'), ((21153, 21178), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (21176, 21178), False, 'import torch\n'), ((21129, 21149), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (21141, 21149), False, 'import torch\n'), ((21184, 21203), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (21196, 21203), False, 'import torch\n'), ((21966, 21998), 'pytorch_metric_learning.reducers.ThresholdReducer', 'reducers.ThresholdReducer', ([], {'low': '(0)'}), '(low=0)\n', (21991, 21998), False, 'from pytorch_metric_learning import losses, miners, distances, reducers\n'), ((23866, 23893), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['TB_RUNS_PATH'], {}), '(TB_RUNS_PATH)\n', (23879, 23893), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((24600, 24640), 'torch.load', 'torch.load', (["params['checkpoint_to_load']"], {}), "(params['checkpoint_to_load'])\n", (24610, 24640), False, 'import torch\n'), ((8322, 8342), 'math.log10', 'math.log10', (['num_runs'], {}), '(num_runs)\n', (8332, 8342), False, 'import math\n'), ((12738, 12749), 'time.time', 'time.time', ([], {}), '()\n', (12747, 12749), False, 'import time\n'), ((13961, 13992), 'sequence_transforms.NormalisePoses', 'NormalisePoses', ([], {'low': '(1)', 'high': '(100)'}), '(low=1, high=100)\n', (13975, 13992), False, 'from sequence_transforms import FilterJoints, ChangePoseOrigin, ToTensor, NormalisePoses, ReshapePoses\n'), ((14002, 14020), 'sequence_transforms.ChangePoseOrigin', 'ChangePoseOrigin', ([], {}), '()\n', (14018, 14020), False, 'from sequence_transforms import FilterJoints, ChangePoseOrigin, ToTensor, NormalisePoses, ReshapePoses\n'), ((14030, 14122), 'sequence_transforms.FilterJoints', 'FilterJoints', ([], {'activator': "params['joints_activator']", 'joint_filter': "params['joint_filter']"}), "(activator=params['joints_activator'], joint_filter=params[\n 'joint_filter'])\n", (14042, 14122), False, 'from sequence_transforms import FilterJoints, ChangePoseOrigin, ToTensor, NormalisePoses, ReshapePoses\n'), ((14127, 14141), 'sequence_transforms.ReshapePoses', 'ReshapePoses', ([], {}), '()\n', (14139, 14141), False, 'from sequence_transforms import FilterJoints, ChangePoseOrigin, ToTensor, NormalisePoses, ReshapePoses\n'), ((14180, 14190), 'sequence_transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (14188, 14190), False, 'from sequence_transforms import FilterJoints, ChangePoseOrigin, ToTensor, NormalisePoses, ReshapePoses\n'), ((16642, 16728), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset', "params['batch_size']"], {'sampler': 'val_sampler', 'num_workers': '(4)'}), "(test_dataset, params['batch_size'], sampler=val_sampler,\n num_workers=4)\n", (16652, 16728), False, 'from torch.utils.data import DataLoader\n'), ((21320, 21605), 'models.RNN.GenRNNNet', 'GenRNNNet', ([], {'input_size': "params['input_size']", 'hidden_size': "params['hidden_size']", 'num_layers': "params['num_layers']", 'use_fc_layer': "params['use_fc_layer']", 'embedding_dims': "params['embedding_dims']", 'device': 'device', 'bidirectional': "params['bidirectional']", 'net_type': "params['net_type']"}), "(input_size=params['input_size'], hidden_size=params['hidden_size'\n ], num_layers=params['num_layers'], use_fc_layer=params['use_fc_layer'],\n embedding_dims=params['embedding_dims'], device=device, bidirectional=\n params['bidirectional'], net_type=params['net_type'])\n", (21329, 21605), False, 'from models.RNN import GenRNNNet\n'), ((22084, 22112), 'pytorch_metric_learning.distances.CosineSimilarity', 'distances.CosineSimilarity', ([], {}), '()\n', (22110, 22112), False, 'from pytorch_metric_learning import losses, miners, distances, reducers\n'), ((23552, 23573), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (23571, 23573), True, 'import torch.nn as nn\n'), ((25254, 25265), 'time.time', 'time.time', ([], {}), '()\n', (25263, 25265), False, 'import time\n'), ((1479, 1490), 'time.time', 'time.time', ([], {}), '()\n', (1488, 1490), False, 'import time\n'), ((22193, 22215), 'pytorch_metric_learning.distances.LpDistance', 'distances.LpDistance', ([], {}), '()\n', (22213, 22215), False, 'from pytorch_metric_learning import losses, miners, distances, reducers\n'), ((22449, 22519), 'pytorch_metric_learning.miners.PairMarginMiner', 'miners.PairMarginMiner', ([], {'pos_margin': '(0)', 'neg_margin': "params['loss_margin']"}), "(pos_margin=0, neg_margin=params['loss_margin'])\n", (22471, 22519), False, 'from pytorch_metric_learning import losses, miners, distances, reducers\n'), ((22548, 22618), 'pytorch_metric_learning.losses.ContrastiveLoss', 'losses.ContrastiveLoss', ([], {'pos_margin': '(0)', 'neg_margin': "params['loss_margin']"}), "(pos_margin=0, neg_margin=params['loss_margin'])\n", (22570, 22618), False, 'from pytorch_metric_learning import losses, miners, distances, reducers\n'), ((23650, 23700), 'torch.nn.TripletMarginLoss', 'nn.TripletMarginLoss', ([], {'margin': "params['loss_margin']"}), "(margin=params['loss_margin'])\n", (23670, 23700), True, 'import torch.nn as nn\n'), ((11440, 11463), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11461, 11463), False, 'import datetime\n'), ((22741, 22848), 'pytorch_metric_learning.miners.TripletMarginMiner', 'miners.TripletMarginMiner', ([], {'margin': "params['loss_margin']", 'distance': 'distance', 'type_of_triplets': '"""semihard"""'}), "(margin=params['loss_margin'], distance=distance,\n type_of_triplets='semihard')\n", (22766, 22848), False, 'from pytorch_metric_learning import losses, miners, distances, reducers\n'), ((22873, 22967), 'pytorch_metric_learning.losses.TripletMarginLoss', 'losses.TripletMarginLoss', ([], {'margin': "params['loss_margin']", 'distance': 'distance', 'reducer': 'reducer'}), "(margin=params['loss_margin'], distance=distance,\n reducer=reducer)\n", (22897, 22967), False, 'from pytorch_metric_learning import losses, miners, distances, reducers\n'), ((15823, 15834), 'time.time', 'time.time', ([], {}), '()\n', (15832, 15834), False, 'import time\n'), ((23090, 23204), 'pytorch_metric_learning.miners.TripletMarginMiner', 'miners.TripletMarginMiner', ([], {'margin': "params['class_loss_margin']", 'distance': 'distance', 'type_of_triplets': '"""semihard"""'}), "(margin=params['class_loss_margin'], distance=\n distance, type_of_triplets='semihard')\n", (23115, 23204), False, 'from pytorch_metric_learning import losses, miners, distances, reducers\n'), ((23238, 23339), 'pytorch_metric_learning.losses.TripletMarginLoss', 'losses.TripletMarginLoss', ([], {'margin': "params['class_loss_margin']", 'distance': 'distance', 'reducer': 'reducer'}), "(margin=params['class_loss_margin'], distance=\n distance, reducer=reducer)\n", (23262, 23339), False, 'from pytorch_metric_learning import losses, miners, distances, reducers\n')]
|
# @yifan
# 2021.12.05
# Hierarchical VQ
# return label and depth
import numpy as np
import faiss
from myKMeans import myKMeans
from evaluate import MSE
class HKM():
def __init__(self, n_nodes_p_level):
self.n_nodes_p_level = n_nodes_p_level
self.depth = len(n_nodes_p_level)
self.init_cluster_centers_ = None
self.cluster_centers_ = {}
self.mapping = {}
self.TH = -1
def fit_fast(self, init_cluster_centers_):
self.init_cluster_centers_ = init_cluster_centers_
self.cluster_centers_['L-'+str(self.depth-1)] = self.init_cluster_centers_
for i in range(self.depth-2, -1, -1):
km = myKMeans(n_clusters=self.n_nodes_p_level[i])
km.fit(self.cluster_centers_['L-'+str(i+1)])
l = km.predict(self.cluster_centers_['L-'+str(i+1)]).reshape(-1)
self.cluster_centers_['L-'+str(i)] = km.cluster_centers_
mp = {}
for j in range(len(l)):
if l[j] in mp.keys():
mp[l[j]].append(j)
else:
mp[l[j]] = [j]
self.mapping['L-'+str(i)+'~L-'+str(i+1)] = mp
return self
def fit(self, X):
for i in range(self.depth):
km = myKMeans(n_clusters=self.n_nodes_p_level[i])
km.fit(X)
self.cluster_centers_['L-'+str(i)] = km.cluster_centers_
for i in range(1, self.depth):
l = self.Cpredict(self.cluster_centers_['L-'+str(i)], self.cluster_centers_['L-'+str(i-1)]).reshape(-1)
mp = {}
for j in range(len(l)):
if l[j] in mp.keys():
mp[l[j]].append(j)
else:
mp[l[j]] = [j]
self.mapping['L-'+str(i-1)+'~L-'+str(i)] = mp
return self
def Cpredict(self, X, cent):
index = faiss.IndexFlatL2(cent.shape[1])
index.add(cent)
D, I = index.search(X, 1)
return I
def predict(self, X, TH):
self.TH = TH
S = (list)(X.shape)
S[-1] = -1
X = X.reshape(-1, X.shape[-1])
depth, label = np.zeros(X.shape[0])-1, np.zeros(X.shape[0])-1
tmp0 = self.Cpredict(X, self.cluster_centers_['L-'+str(0)]).reshape(-1)
iX0 = self.cluster_centers_['L-'+str(0)][tmp0]
n0 = self.cluster_centers_['L-'+str(0)].shape[0]
for i in range(1, self.depth):
tmp = self.Cpredict(X, self.cluster_centers_['L-'+str(i)]).reshape(-1)
iX = self.cluster_centers_['L-'+str(i)][tmp]
n = self.cluster_centers_['L-'+str(i)].shape[0]
for j in range(X.shape[0]):
if label[j] < 0:
a, b = MSE(X[j], iX0[j]) - MSE(X[j], iX[j]), np.log2(n) - np.log2(n0)
#print(' dMSE=%3.4f, dbpp=%3.4f, dMSE/dbpp=%3.4f'%(a,b,a/b))
if a / b < self.TH:
label[j] = tmp[j]
depth[j] = i
tmp0, iX0, n0 = tmp, iX, n
for j in range(X.shape[0]):
if label[j] < 0 or depth[j] < 0:
label[j] = tmp[j]
depth[j] = i
return label.reshape(S).astype('int16'), depth.reshape(S)
def inverse_predict(self, label, depth):
S = (list)(label.shape)
S[-1] = -1
label, depth = label.reshape(-1), depth.reshape(-1)
iX = np.zeros((label.shape[0], self.cluster_centers_['L-'+str(0)].shape[-1]))
for i in range(self.depth):
idx = (depth == i)
iX[idx] = self.cluster_centers_['L-'+str(i)][label[idx]]
return iX.reshape(S)
|
[
"evaluate.MSE"
] |
[((1879, 1911), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['cent.shape[1]'], {}), '(cent.shape[1])\n', (1896, 1911), False, 'import faiss\n'), ((674, 718), 'myKMeans.myKMeans', 'myKMeans', ([], {'n_clusters': 'self.n_nodes_p_level[i]'}), '(n_clusters=self.n_nodes_p_level[i])\n', (682, 718), False, 'from myKMeans import myKMeans\n'), ((1270, 1314), 'myKMeans.myKMeans', 'myKMeans', ([], {'n_clusters': 'self.n_nodes_p_level[i]'}), '(n_clusters=self.n_nodes_p_level[i])\n', (1278, 1314), False, 'from myKMeans import myKMeans\n'), ((2166, 2186), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (2174, 2186), True, 'import numpy as np\n'), ((2190, 2210), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (2198, 2210), True, 'import numpy as np\n'), ((2744, 2761), 'evaluate.MSE', 'MSE', (['X[j]', 'iX0[j]'], {}), '(X[j], iX0[j])\n', (2747, 2761), False, 'from evaluate import MSE\n'), ((2764, 2780), 'evaluate.MSE', 'MSE', (['X[j]', 'iX[j]'], {}), '(X[j], iX[j])\n', (2767, 2780), False, 'from evaluate import MSE\n'), ((2782, 2792), 'numpy.log2', 'np.log2', (['n'], {}), '(n)\n', (2789, 2792), True, 'import numpy as np\n'), ((2796, 2807), 'numpy.log2', 'np.log2', (['n0'], {}), '(n0)\n', (2803, 2807), True, 'import numpy as np\n')]
|
from flask import Flask
from flask import render_template
from flask import request
from time import process_time
from game import Game
import evaluate
app = Flask(__name__)
game = Game()
@app.route("/")
def main():
global game
game = Game()
game.depth = 4
game.timeout = 60
return render_template('index.html')
@app.route('/static/<path:path>')
def send_static(path):
return send_from_directory('static', path)
@app.route('/move/', methods=['POST'])
def move():
data = request.json
move = game.board.parse_san(data['san'])
game.move(move)
t0 = process_time()
best_move = game.go()
t1 = process_time() - t0
san = game.board.san(best_move)
game.move(best_move)
print("Time:\t\t" + str(round(t1, 3)))
print("Positions:\t" + str(game.count))
print("Positions/s:\t" + str(round(game.count/t1)))
print("Score:\t\t" + str(evaluate.evaluate(game)))
print("Board:\n" + str(game.board))
game.count = 0
return san
|
[
"evaluate.evaluate"
] |
[((160, 175), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (165, 175), False, 'from flask import Flask\n'), ((184, 190), 'game.Game', 'Game', ([], {}), '()\n', (188, 190), False, 'from game import Game\n'), ((243, 249), 'game.Game', 'Game', ([], {}), '()\n', (247, 249), False, 'from game import Game\n'), ((297, 326), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (312, 326), False, 'from flask import render_template\n'), ((574, 588), 'time.process_time', 'process_time', ([], {}), '()\n', (586, 588), False, 'from time import process_time\n'), ((620, 634), 'time.process_time', 'process_time', ([], {}), '()\n', (632, 634), False, 'from time import process_time\n'), ((863, 886), 'evaluate.evaluate', 'evaluate.evaluate', (['game'], {}), '(game)\n', (880, 886), False, 'import evaluate\n')]
|
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import h5py
import math
import time
import logging
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import pickle
from utilities import (create_folder, get_filename, create_logging, load_scalar, segment_prediction_to_clip_prediction, write_submission)
from data_generator import DataGeneratorMT as DataGenerator, TestDataGeneratorMT as TestDataGenerator
from models_mtl import (Cnn_9layers_AvgPooling, Cnn_9layers_AvgPooling2, Cnn_9layers_AvgPooling_Emb, Cnn_9layers_AvgPooling_GCNEmb, ResNet_V101_AvgPooling, ResNet_V101_AvgPooling_GCNEmb)
from losses import binary_cross_entropy
from evaluate import Evaluator, StatisticsContainer
from pytorch_utils import move_data_to_gpu, forward
import config
def get_model(classes_num, model_type, args):
Model = eval(model_type)
if model_type.endswith("GCNEmb"):
with open(args.graph_dir, "rb") as f:
graph, class_indices = pickle.load(f)
model = Model(classes_num, graph, class_indices)
else:
model = Model(classes_num)
return model
def train(args):
'''Training. Model will be saved after several iterations.
Args:
dataset_dir: string, directory of dataset
workspace: string, directory of workspace
train_sources: 'curated' | 'noisy' | 'curated_and_noisy'
segment_seconds: float, duration of audio recordings to be padded or split
hop_seconds: float, hop seconds between segments
pad_type: 'constant' | 'repeat'
holdout_fold: '1', '2', '3', '4' | 'none', set `none` for training
on all data without validation
model_type: string, e.g. 'Cnn_9layers_AvgPooling'
batch_size: int
cuda: bool
mini_data: bool, set True for debugging on a small part of data
'''
# Arugments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
train_source = args.train_source
segment_seconds = args.segment_seconds
hop_seconds = args.hop_seconds
pad_type = args.pad_type
holdout_fold = args.holdout_fold
model_type = args.model_type
if not model_type.endswith("GCNEmb"):
model_marker = model_type
else:
graph_marker = args.graph_dir[args.graph_dir.rfind("/")+1:-8]
model_marker = f"{model_type}_{graph_marker}"
model_marker = "mtl_"+model_marker
batch_size = args.batch_size
cuda = args.cuda and torch.cuda.is_available()
mini_data = args.mini_data
filename = args.filename
mel_bins = config.mel_bins
classes_num = config.classes_num
frames_per_second = config.frames_per_second
max_iteration = 500 # Number of mini-batches to evaluate on training data
reduce_lr = False
# Paths
if mini_data:
prefix = 'minidata_'
else:
prefix = ''
curated_feature_hdf5_path = os.path.join(workspace, 'features',
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'train_curated.h5')
noisy_feature_hdf5_path = os.path.join(workspace, 'features',
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'train_noisy.h5')
curated_cross_validation_path = os.path.join(workspace,
'cross_validation_metadata', 'train_curated_cross_validation.csv')
noisy_cross_validation_path = os.path.join(workspace,
'cross_validation_metadata', 'train_noisy_cross_validation.csv')
scalar_path = os.path.join(workspace, 'scalars',
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'train_noisy.h5')
checkpoints_dir = os.path.join(workspace, 'checkpoints', filename,
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'train_source={}'.format(train_source), 'segment={}s,hop={}s,pad_type={}'.format(
segment_seconds, hop_seconds, pad_type), 'holdout_fold={}'.format(holdout_fold),
model_marker)
create_folder(checkpoints_dir)
validate_statistics_path = os.path.join(workspace, 'statistics', filename,
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'train_source={}'.format(train_source), 'segment={}s,hop={}s,pad_type={}'.format(
segment_seconds, hop_seconds, pad_type), 'holdout_fold={}'.format(holdout_fold),
model_marker, 'validate_statistics.pickle')
create_folder(os.path.dirname(validate_statistics_path))
logs_dir = os.path.join(workspace, 'logs', filename, args.mode,
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'train_source={}'.format(train_source), 'segment={}s,hop={}s,pad_type={}'.format(
segment_seconds, hop_seconds, pad_type), 'holdout_fold={}'.format(holdout_fold),
model_marker)
create_logging(logs_dir, 'w')
logging.info(args)
# Load scalar
scalar = load_scalar(scalar_path)
# Model
model = get_model(classes_num, model_type, args)
if cuda:
model.cuda()
if model_type.endswith("GCNEmb"):
model.graph_encoder.graph = model.graph_encoder.graph.to(model.dummy_param.device)
# Optimizer
optimizer = optim.Adam(model.parameters(), lr=1e-3, betas=(0.9, 0.999),
eps=1e-08, weight_decay=0., amsgrad=True)
# Data generator
data_generator = DataGenerator(
curated_feature_hdf5_path=curated_feature_hdf5_path,
noisy_feature_hdf5_path=noisy_feature_hdf5_path,
curated_cross_validation_path=curated_cross_validation_path,
noisy_cross_validation_path=noisy_cross_validation_path,
train_source=train_source,
holdout_fold=holdout_fold,
segment_seconds=segment_seconds,
hop_seconds=hop_seconds,
pad_type=pad_type,
scalar=scalar,
batch_size=batch_size)
# Evaluator
evaluator = Evaluator(
model=model,
data_generator=data_generator,
cuda=cuda, mtl=True)
# Statistics
validate_statistics_container = StatisticsContainer(validate_statistics_path)
train_bgn_time = time.time()
iteration = 0
# Train on mini batches
for batch_data_dict in data_generator.generate_train():
# Evaluate
if iteration % 500 == 0:
logging.info('------------------------------------')
logging.info('Iteration: {}'.format(iteration))
train_fin_time = time.time()
# Evaluate on partial of train data
logging.info('Train statistics:')
for target_source in ['curated', 'noisy']:
validate_curated_statistics = evaluator.evaluate(
data_type='train',
target_source=target_source,
max_iteration=max_iteration,
verbose=False)
# Evaluate on holdout validation data
if holdout_fold != 'none':
logging.info('Validate statistics:')
for target_source in ['curated', 'noisy']:
validate_curated_statistics = evaluator.evaluate(
data_type='validate',
target_source=target_source,
max_iteration=None,
verbose=False)
validate_statistics_container.append(
iteration, target_source, validate_curated_statistics)
validate_statistics_container.dump()
train_time = train_fin_time - train_bgn_time
validate_time = time.time() - train_fin_time
logging.info(
'Train time: {:.3f} s, validate time: {:.3f} s'
''.format(train_time, validate_time))
train_bgn_time = time.time()
# Save model
if iteration % 1000 == 0 and iteration > 0:
checkpoint = {
'iteration': iteration,
'model': model.state_dict(),
'optimizer': optimizer.state_dict()}
checkpoint_path = os.path.join(
checkpoints_dir, '{}_iterations.pth'.format(iteration))
torch.save(checkpoint, checkpoint_path)
logging.info('Model saved to {}'.format(checkpoint_path))
# Reduce learning rate
if reduce_lr and iteration % 200 == 0 and iteration > 0:
for param_group in optimizer.param_groups:
param_group['lr'] *= 0.9
# Move data to GPU
for key in batch_data_dict.keys():
if key in ['feature', 'mask', 'target', 'source']:
batch_data_dict[key] = move_data_to_gpu(
batch_data_dict[key], cuda)
# Train
model.train()
batch_output = model(batch_data_dict['feature'], batch_data_dict['source'])
# loss
loss = binary_cross_entropy(batch_output, batch_data_dict['target'])
# Backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Stop learning
if iteration == args.max_iters:
break
iteration += 1
def inference_validation(args):
'''Inference and calculate metrics on validation data.
Args:
dataset_dir: string, directory of dataset
workspace: string, directory of workspace
train_sources: 'curated' | 'noisy' | 'curated_and_noisy'
segment_seconds: float, duration of audio recordings to be padded or split
hop_seconds: float, hop seconds between segments
pad_type: 'constant' | 'repeat'
holdout_fold: '1', '2', '3', '4'
model_type: string, e.g. 'Cnn_9layers_AvgPooling'
iteration: int, load model of this iteration
batch_size: int
cuda: bool
mini_data: bool, set True for debugging on a small part of data
visualize: bool, visualize the logmel spectrogram of segments
'''
# Arugments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
train_source = args.train_source
segment_seconds = args.segment_seconds
hop_seconds = args.hop_seconds
pad_type = args.pad_type
holdout_fold = args.holdout_fold
model_type = args.model_type
if not model_type.endswith("GCNEmb"):
model_marker = model_type
else:
graph_marker = args.graph_dir[args.graph_dir.rfind("/")+1:-8]
model_marker = f"{model_type}_{graph_marker}"
model_marker = "mtl_"+model_marker
iteration = args.iteration
batch_size = args.batch_size
cuda = args.cuda and torch.cuda.is_available()
mini_data = args.mini_data
visualize = args.visualize
filename = args.filename
mel_bins = config.mel_bins
classes_num = config.classes_num
frames_per_second = config.frames_per_second
# Paths
if mini_data:
prefix = 'minidata_'
else:
prefix = ''
curated_feature_hdf5_path = os.path.join(workspace, 'features',
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'train_curated.h5')
noisy_feature_hdf5_path = os.path.join(workspace, 'features',
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'train_noisy.h5')
curated_cross_validation_path = os.path.join(workspace,
'cross_validation_metadata', 'train_curated_cross_validation.csv')
noisy_cross_validation_path = os.path.join(workspace,
'cross_validation_metadata', 'train_noisy_cross_validation.csv')
scalar_path = os.path.join(workspace, 'scalars',
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'train_noisy.h5')
checkpoint_path = os.path.join(workspace, 'checkpoints', filename,
'logmel_{}frames_{}melbins'.format(frames_per_second, mel_bins),
'train_source={}'.format(train_source), 'segment={}s,hop={}s,pad_type={}'
''.format(segment_seconds, hop_seconds, pad_type), 'holdout_fold={}'
''.format(holdout_fold), model_marker, '{}_iterations.pth'.format(iteration))
figs_dir = os.path.join(workspace, 'figures')
create_folder(figs_dir)
logs_dir = os.path.join(workspace, 'logs', filename, args.mode,
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'train_source={}'.format(train_source), 'segment={}s,hop={}s,pad_type={}'
''.format(segment_seconds, hop_seconds, pad_type),
'holdout_fold={}'.format(holdout_fold), model_marker)
create_logging(logs_dir, 'w')
logging.info(args)
# Load scalar
scalar = load_scalar(scalar_path)
# Model
model = get_model(classes_num, model_type, args)
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint['model'])
if cuda:
model.cuda()
if model_type.endswith("GCNEmb"):
model.graph_encoder.graph = model.graph_encoder.graph.to(model.dummy_param.device)
# Data generator
data_generator = DataGenerator(
curated_feature_hdf5_path=curated_feature_hdf5_path,
noisy_feature_hdf5_path=noisy_feature_hdf5_path,
curated_cross_validation_path=curated_cross_validation_path,
noisy_cross_validation_path=noisy_cross_validation_path,
train_source=train_source,
holdout_fold=holdout_fold,
segment_seconds=segment_seconds,
hop_seconds=hop_seconds,
pad_type=pad_type,
scalar=scalar,
batch_size=batch_size)
# Evaluator
evaluator = Evaluator(
model=model,
data_generator=data_generator,
cuda=cuda, mtl=True)
# Evaluate
for target_source in ['curated', 'noisy']:
validate_curated_statistics = evaluator.evaluate(
data_type='validate',
target_source='curated',
max_iteration=None,
verbose=True)
# Visualize
if visualize:
save_fig_path = os.path.join(figs_dir,
'{}_logmel.png'.format(target_source))
validate_curated_statistics = evaluator.visualize(
data_type='validate',
target_source=target_source,
save_fig_path=save_fig_path,
max_iteration=None,
verbose=False)
def inference_test(args):
'''Inference and calculate metrics on validation data.
Args:
dataset_dir: string, directory of dataset
workspace: string, directory of workspace
train_sources: 'curated' | 'noisy' | 'curated_and_noisy'
segment_seconds: float, duration of audio recordings to be padded or split
hop_seconds: float, hop seconds between segments
pad_type: 'constant' | 'repeat'
model_type: string, e.g. 'Cnn_9layers_AvgPooling'
iteration: int, load model of this iteration
batch_size: int
cuda: bool
mini_data: bool, set True for debugging on a small part of data
visualize: bool, visualize the logmel spectrogram of segments
'''
# Arugments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
train_source = args.train_source
segment_seconds = args.segment_seconds
hop_seconds = args.hop_seconds
pad_type = args.pad_type
model_type = args.model_type
if not model_type.endswith("GCNEmb"):
model_marker = model_type
else:
graph_marker = args.graph_dir[args.graph_dir.rfind("/")+1:-8]
model_marker = f"{model_type}_{graph_marker}"
model_marker = "mtl_"+model_marker
iteration = args.iteration
batch_size = args.batch_size
cuda = args.cuda and torch.cuda.is_available()
mini_data = args.mini_data
filename = args.filename
holdout_fold = 'none' # Use model trained on full data without validation
mel_bins = config.mel_bins
classes_num = config.classes_num
frames_per_second = config.frames_per_second
# Paths
if mini_data:
prefix = 'minidata_'
else:
prefix = ''
test_feature_hdf5_path = os.path.join(workspace, 'features',
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'test.h5')
scalar_path = os.path.join(workspace, 'scalars',
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'train_noisy.h5')
checkpoint_path = os.path.join(workspace, 'checkpoints', filename,
'logmel_{}frames_{}melbins'.format(frames_per_second, mel_bins),
'train_source={}'.format(train_source), 'segment={}s,hop={}s,pad_type={}'
''.format(segment_seconds, hop_seconds, pad_type), 'holdout_fold={}'
''.format(holdout_fold), model_marker, '{}_iterations.pth'.format(iteration))
submission_path = os.path.join(workspace, 'submissions', filename,
'logmel_{}frames_{}melbins'.format(frames_per_second, mel_bins),
'train_source={}'.format(train_source), 'segment={}s,hop={}s,pad_type={}'
''.format(segment_seconds, hop_seconds, pad_type), 'holdout_fold={}'
''.format(holdout_fold), model_marker, '{}_iterations_submission.csv'
''.format(iteration))
create_folder(os.path.dirname(submission_path))
# Load scalar
scalar = load_scalar(scalar_path)
# Model
model = get_model(classes_num, model_type, args)
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint['model'])
if cuda:
model.cuda()
if model_type.endswith("GCNEmb"):
model.graph_encoder.graph = model.graph_encoder.graph.to(model.dummy_param.device)
# Data generator
data_generator = TestDataGenerator(
test_feature_hdf5_path=test_feature_hdf5_path,
segment_seconds=segment_seconds,
hop_seconds=hop_seconds,
pad_type=pad_type,
scalar=scalar,
batch_size=batch_size)
generate_func = data_generator.generate_test()
# Results of segments
output_dict = forward(
model=model,
generate_func=generate_func,
cuda=cuda, mtl=True)
# Results of audio recordings
result_dict = segment_prediction_to_clip_prediction(
output_dict, average='arithmetic')
# Write submission
write_submission(result_dict, submission_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example of parser. ')
subparsers = parser.add_subparsers(dest='mode')
# Train
parser_train = subparsers.add_parser('train')
parser_train.add_argument('--dataset_dir', type=str, required=True, help='Directory of dataset.')
parser_train.add_argument('--graph_dir', type=str, default="", help='Directory of graph.')
parser_train.add_argument('--workspace', type=str, required=True, help='Directory of your workspace.')
parser_train.add_argument('--train_source', type=str, choices=['curated', 'noisy', 'curated_and_noisy'], required=True)
parser_train.add_argument('--segment_seconds', type=float, required=True, help='Segment duration for training.')
parser_train.add_argument('--hop_seconds', type=float, required=True, help='Hop duration between segments.')
parser_train.add_argument('--pad_type', type=str, choices=['constant', 'repeat'], required=True, help='Pad short audio recordings with constant silence or repetition.')
parser_train.add_argument('--holdout_fold', type=str, choices=['1', '2', '3', '4', 'none'], required=True, help='Set `none` for training on all data without validation.')
parser_train.add_argument('--model_type', type=str, required=True, help='E.g., Cnn_9layers_AvgPooling.')
parser_train.add_argument('--batch_size', type=int, required=True)
parser_train.add_argument('--cuda', action='store_true', default=False)
parser_train.add_argument('--max_iters', type=int, default=20000)
parser_train.add_argument('--mini_data', action='store_true', default=False, help='Set True for debugging on a small part of data.')
# Inference validation data
parser_inference_validation = subparsers.add_parser('inference_validation')
parser_inference_validation.add_argument('--dataset_dir', type=str, required=True, help='Directory of dataset.')
parser_inference_validation.add_argument('--graph_dir', type=str, default="", help='Directory of graph.')
parser_inference_validation.add_argument('--workspace', type=str, required=True, help='Directory of your workspace.')
parser_inference_validation.add_argument('--train_source', type=str, choices=['curated', 'noisy', 'curated_and_noisy'], required=True)
parser_inference_validation.add_argument('--segment_seconds', type=float, required=True, help='Segment duration for training.')
parser_inference_validation.add_argument('--hop_seconds', type=float, required=True, help='Hop duration between segments.')
parser_inference_validation.add_argument('--pad_type', type=str, choices=['constant', 'repeat'], required=True, help='Pad short audio recordings with constant silence or repetition.')
parser_inference_validation.add_argument('--holdout_fold', type=str, choices=['1', '2', '3', '4', 'none'], required=True, help='Set `none` for training on all data without validation.')
parser_inference_validation.add_argument('--model_type', type=str, required=True, help='E.g., Cnn_9layers_AvgPooling.')
parser_inference_validation.add_argument('--iteration', type=int, required=True, help='Load model of this iteration.')
parser_inference_validation.add_argument('--batch_size', type=int, required=True)
parser_inference_validation.add_argument('--cuda', action='store_true', default=False)
parser_inference_validation.add_argument('--visualize', action='store_true', default=False, help='Visualize log mel spectrogram of different sound classes.')
parser_inference_validation.add_argument('--mini_data', action='store_true', default=False, help='Set True for debugging on a small part of data.')
# Inference test data
parser_inference_validation = subparsers.add_parser('inference_test')
parser_inference_validation.add_argument('--dataset_dir', type=str, required=True, help='Directory of dataset.')
parser_inference_validation.add_argument('--workspace', type=str, required=True, help='Directory of your workspace.')
parser_inference_validation.add_argument('--train_source', type=str, choices=['curated', 'noisy', 'curated_and_noisy'], required=True)
parser_inference_validation.add_argument('--segment_seconds', type=float, required=True, help='Segment duration for training.')
parser_inference_validation.add_argument('--hop_seconds', type=float, required=True, help='Hop duration between segments.')
parser_inference_validation.add_argument('--pad_type', type=str, choices=['constant', 'repeat'], required=True, help='Pad short audio recordings with constant silence or repetition.')
parser_inference_validation.add_argument('--model_type', type=str, required=True, help='E.g., Cnn_9layers_AvgPooling.')
parser_inference_validation.add_argument('--iteration', type=int, required=True, help='Load model of this iteration.')
parser_inference_validation.add_argument('--batch_size', type=int, required=True)
parser_inference_validation.add_argument('--cuda', action='store_true', default=False)
parser_inference_validation.add_argument('--mini_data', action='store_true', default=False, help='Set True for debugging on a small part of data.')
# Parse arguments
args = parser.parse_args()
args.filename = get_filename(__file__)
if args.mode == 'train':
train(args)
elif args.mode == 'inference_validation':
inference_validation(args)
elif args.mode == 'inference_test':
inference_test(args)
else:
raise Exception('Error argument!')
|
[
"evaluate.StatisticsContainer",
"evaluate.Evaluator"
] |
[((40, 77), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""../utils"""'], {}), "(sys.path[0], '../utils')\n", (52, 77), False, 'import os\n'), ((3402, 3496), 'os.path.join', 'os.path.join', (['workspace', '"""cross_validation_metadata"""', '"""train_curated_cross_validation.csv"""'], {}), "(workspace, 'cross_validation_metadata',\n 'train_curated_cross_validation.csv')\n", (3414, 3496), False, 'import os\n'), ((3537, 3629), 'os.path.join', 'os.path.join', (['workspace', '"""cross_validation_metadata"""', '"""train_noisy_cross_validation.csv"""'], {}), "(workspace, 'cross_validation_metadata',\n 'train_noisy_cross_validation.csv')\n", (3549, 3629), False, 'import os\n'), ((4179, 4209), 'utilities.create_folder', 'create_folder', (['checkpoints_dir'], {}), '(checkpoints_dir)\n', (4192, 4209), False, 'from utilities import create_folder, get_filename, create_logging, load_scalar, segment_prediction_to_clip_prediction, write_submission\n'), ((5032, 5061), 'utilities.create_logging', 'create_logging', (['logs_dir', '"""w"""'], {}), "(logs_dir, 'w')\n", (5046, 5061), False, 'from utilities import create_folder, get_filename, create_logging, load_scalar, segment_prediction_to_clip_prediction, write_submission\n'), ((5066, 5084), 'logging.info', 'logging.info', (['args'], {}), '(args)\n', (5078, 5084), False, 'import logging\n'), ((5117, 5141), 'utilities.load_scalar', 'load_scalar', (['scalar_path'], {}), '(scalar_path)\n', (5128, 5141), False, 'from utilities import create_folder, get_filename, create_logging, load_scalar, segment_prediction_to_clip_prediction, write_submission\n'), ((5574, 6003), 'data_generator.DataGeneratorMT', 'DataGenerator', ([], {'curated_feature_hdf5_path': 'curated_feature_hdf5_path', 'noisy_feature_hdf5_path': 'noisy_feature_hdf5_path', 'curated_cross_validation_path': 'curated_cross_validation_path', 'noisy_cross_validation_path': 'noisy_cross_validation_path', 'train_source': 'train_source', 'holdout_fold': 'holdout_fold', 'segment_seconds': 'segment_seconds', 'hop_seconds': 'hop_seconds', 'pad_type': 'pad_type', 'scalar': 'scalar', 'batch_size': 'batch_size'}), '(curated_feature_hdf5_path=curated_feature_hdf5_path,\n noisy_feature_hdf5_path=noisy_feature_hdf5_path,\n curated_cross_validation_path=curated_cross_validation_path,\n noisy_cross_validation_path=noisy_cross_validation_path, train_source=\n train_source, holdout_fold=holdout_fold, segment_seconds=\n segment_seconds, hop_seconds=hop_seconds, pad_type=pad_type, scalar=\n scalar, batch_size=batch_size)\n', (5587, 6003), True, 'from data_generator import DataGeneratorMT as DataGenerator, TestDataGeneratorMT as TestDataGenerator\n'), ((6113, 6187), 'evaluate.Evaluator', 'Evaluator', ([], {'model': 'model', 'data_generator': 'data_generator', 'cuda': 'cuda', 'mtl': '(True)'}), '(model=model, data_generator=data_generator, cuda=cuda, mtl=True)\n', (6122, 6187), False, 'from evaluate import Evaluator, StatisticsContainer\n'), ((6273, 6318), 'evaluate.StatisticsContainer', 'StatisticsContainer', (['validate_statistics_path'], {}), '(validate_statistics_path)\n', (6292, 6318), False, 'from evaluate import Evaluator, StatisticsContainer\n'), ((6345, 6356), 'time.time', 'time.time', ([], {}), '()\n', (6354, 6356), False, 'import time\n'), ((11741, 11835), 'os.path.join', 'os.path.join', (['workspace', '"""cross_validation_metadata"""', '"""train_curated_cross_validation.csv"""'], {}), "(workspace, 'cross_validation_metadata',\n 'train_curated_cross_validation.csv')\n", (11753, 11835), False, 'import os\n'), ((11876, 11968), 'os.path.join', 'os.path.join', (['workspace', '"""cross_validation_metadata"""', '"""train_noisy_cross_validation.csv"""'], {}), "(workspace, 'cross_validation_metadata',\n 'train_noisy_cross_validation.csv')\n", (11888, 11968), False, 'import os\n'), ((12571, 12605), 'os.path.join', 'os.path.join', (['workspace', '"""figures"""'], {}), "(workspace, 'figures')\n", (12583, 12605), False, 'import os\n'), ((12610, 12633), 'utilities.create_folder', 'create_folder', (['figs_dir'], {}), '(figs_dir)\n', (12623, 12633), False, 'from utilities import create_folder, get_filename, create_logging, load_scalar, segment_prediction_to_clip_prediction, write_submission\n'), ((13004, 13033), 'utilities.create_logging', 'create_logging', (['logs_dir', '"""w"""'], {}), "(logs_dir, 'w')\n", (13018, 13033), False, 'from utilities import create_folder, get_filename, create_logging, load_scalar, segment_prediction_to_clip_prediction, write_submission\n'), ((13038, 13056), 'logging.info', 'logging.info', (['args'], {}), '(args)\n', (13050, 13056), False, 'import logging\n'), ((13089, 13113), 'utilities.load_scalar', 'load_scalar', (['scalar_path'], {}), '(scalar_path)\n', (13100, 13113), False, 'from utilities import create_folder, get_filename, create_logging, load_scalar, segment_prediction_to_clip_prediction, write_submission\n'), ((13206, 13233), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (13216, 13233), False, 'import torch\n'), ((13508, 13937), 'data_generator.DataGeneratorMT', 'DataGenerator', ([], {'curated_feature_hdf5_path': 'curated_feature_hdf5_path', 'noisy_feature_hdf5_path': 'noisy_feature_hdf5_path', 'curated_cross_validation_path': 'curated_cross_validation_path', 'noisy_cross_validation_path': 'noisy_cross_validation_path', 'train_source': 'train_source', 'holdout_fold': 'holdout_fold', 'segment_seconds': 'segment_seconds', 'hop_seconds': 'hop_seconds', 'pad_type': 'pad_type', 'scalar': 'scalar', 'batch_size': 'batch_size'}), '(curated_feature_hdf5_path=curated_feature_hdf5_path,\n noisy_feature_hdf5_path=noisy_feature_hdf5_path,\n curated_cross_validation_path=curated_cross_validation_path,\n noisy_cross_validation_path=noisy_cross_validation_path, train_source=\n train_source, holdout_fold=holdout_fold, segment_seconds=\n segment_seconds, hop_seconds=hop_seconds, pad_type=pad_type, scalar=\n scalar, batch_size=batch_size)\n', (13521, 13937), True, 'from data_generator import DataGeneratorMT as DataGenerator, TestDataGeneratorMT as TestDataGenerator\n'), ((14047, 14121), 'evaluate.Evaluator', 'Evaluator', ([], {'model': 'model', 'data_generator': 'data_generator', 'cuda': 'cuda', 'mtl': '(True)'}), '(model=model, data_generator=data_generator, cuda=cuda, mtl=True)\n', (14056, 14121), False, 'from evaluate import Evaluator, StatisticsContainer\n'), ((17822, 17846), 'utilities.load_scalar', 'load_scalar', (['scalar_path'], {}), '(scalar_path)\n', (17833, 17846), False, 'from utilities import create_folder, get_filename, create_logging, load_scalar, segment_prediction_to_clip_prediction, write_submission\n'), ((17939, 17966), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (17949, 17966), False, 'import torch\n'), ((18241, 18429), 'data_generator.TestDataGeneratorMT', 'TestDataGenerator', ([], {'test_feature_hdf5_path': 'test_feature_hdf5_path', 'segment_seconds': 'segment_seconds', 'hop_seconds': 'hop_seconds', 'pad_type': 'pad_type', 'scalar': 'scalar', 'batch_size': 'batch_size'}), '(test_feature_hdf5_path=test_feature_hdf5_path,\n segment_seconds=segment_seconds, hop_seconds=hop_seconds, pad_type=\n pad_type, scalar=scalar, batch_size=batch_size)\n', (18258, 18429), True, 'from data_generator import DataGeneratorMT as DataGenerator, TestDataGeneratorMT as TestDataGenerator\n'), ((18584, 18654), 'pytorch_utils.forward', 'forward', ([], {'model': 'model', 'generate_func': 'generate_func', 'cuda': 'cuda', 'mtl': '(True)'}), '(model=model, generate_func=generate_func, cuda=cuda, mtl=True)\n', (18591, 18654), False, 'from pytorch_utils import move_data_to_gpu, forward\n'), ((18739, 18811), 'utilities.segment_prediction_to_clip_prediction', 'segment_prediction_to_clip_prediction', (['output_dict'], {'average': '"""arithmetic"""'}), "(output_dict, average='arithmetic')\n", (18776, 18811), False, 'from utilities import create_folder, get_filename, create_logging, load_scalar, segment_prediction_to_clip_prediction, write_submission\n'), ((18853, 18899), 'utilities.write_submission', 'write_submission', (['result_dict', 'submission_path'], {}), '(result_dict, submission_path)\n', (18869, 18899), False, 'from utilities import create_folder, get_filename, create_logging, load_scalar, segment_prediction_to_clip_prediction, write_submission\n'), ((18950, 19008), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Example of parser. """'}), "(description='Example of parser. ')\n", (18973, 19008), False, 'import argparse\n'), ((24151, 24173), 'utilities.get_filename', 'get_filename', (['__file__'], {}), '(__file__)\n', (24163, 24173), False, 'from utilities import create_folder, get_filename, create_logging, load_scalar, segment_prediction_to_clip_prediction, write_submission\n'), ((2561, 2586), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2584, 2586), False, 'import torch\n'), ((4625, 4666), 'os.path.dirname', 'os.path.dirname', (['validate_statistics_path'], {}), '(validate_statistics_path)\n', (4640, 4666), False, 'import os\n'), ((9271, 9332), 'losses.binary_cross_entropy', 'binary_cross_entropy', (['batch_output', "batch_data_dict['target']"], {}), "(batch_output, batch_data_dict['target'])\n", (9291, 9332), False, 'from losses import binary_cross_entropy\n'), ((10985, 11010), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11008, 11010), False, 'import torch\n'), ((16195, 16220), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (16218, 16220), False, 'import torch\n'), ((17748, 17780), 'os.path.dirname', 'os.path.dirname', (['submission_path'], {}), '(submission_path)\n', (17763, 17780), False, 'import os\n'), ((1087, 1101), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1098, 1101), False, 'import pickle\n'), ((6541, 6593), 'logging.info', 'logging.info', (['"""------------------------------------"""'], {}), "('------------------------------------')\n", (6553, 6593), False, 'import logging\n'), ((6684, 6695), 'time.time', 'time.time', ([], {}), '()\n', (6693, 6695), False, 'import time\n'), ((6769, 6802), 'logging.info', 'logging.info', (['"""Train statistics:"""'], {}), "('Train statistics:')\n", (6781, 6802), False, 'import logging\n'), ((8140, 8151), 'time.time', 'time.time', ([], {}), '()\n', (8149, 8151), False, 'import time\n'), ((8539, 8578), 'torch.save', 'torch.save', (['checkpoint', 'checkpoint_path'], {}), '(checkpoint, checkpoint_path)\n', (8549, 8578), False, 'import torch\n'), ((7246, 7282), 'logging.info', 'logging.info', (['"""Validate statistics:"""'], {}), "('Validate statistics:')\n", (7258, 7282), False, 'import logging\n'), ((7936, 7947), 'time.time', 'time.time', ([], {}), '()\n', (7945, 7947), False, 'import time\n'), ((9035, 9079), 'pytorch_utils.move_data_to_gpu', 'move_data_to_gpu', (['batch_data_dict[key]', 'cuda'], {}), '(batch_data_dict[key], cuda)\n', (9051, 9079), False, 'from pytorch_utils import move_data_to_gpu, forward\n')]
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import division, print_function, unicode_literals
import argparse
import json
import os
import shutil
import time
from datetime import datetime
from io import open
from itertools import product
from pathlib import Path
import submitit
import torch
import wandb
from filelock import FileLock
from evaluate import MultiWozEvaluator
from experiments.multiwoz_lstm.utils import util
from model.model import Model
os.environ["WANDB_API_KEY"] = ''
os.environ["WANDB_MODE"] = "dryrun"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def load_config(args):
config = util.unicode_to_utf8(
json.load(open('%s.json' % args.model_path, 'rb')))
for key, value in args.__args.items():
try:
config[key] = value.value
except:
config[key] = value
return config
def loadModelAndData(args, num):
# Load dictionaries
with open('data/input_lang.index2word.json') as f:
input_lang_index2word = json.load(f)
with open('data/input_lang.word2index.json') as f:
input_lang_word2index = json.load(f)
with open('data/output_lang.index2word.json') as f:
output_lang_index2word = json.load(f)
with open('data/output_lang.word2index.json') as f:
output_lang_word2index = json.load(f)
# Reload existing checkpoint
model = Model(args, input_lang_index2word, output_lang_index2word,
input_lang_word2index, output_lang_word2index)
if args.load_param:
model.loadModel(iter=num)
# Load data
if os.path.exists(args.decode_output):
shutil.rmtree(args.decode_output)
os.makedirs(args.decode_output)
else:
os.makedirs(args.decode_output)
if os.path.exists(args.valid_output):
shutil.rmtree(args.valid_output)
os.makedirs(args.valid_output)
else:
os.makedirs(args.valid_output)
# Load validation file list:
with open('data/val_dials.json') as outfile:
val_dials = json.load(outfile)
# Load test file list:
with open('data/test_dials.json') as outfile:
test_dials = json.load(outfile)
return model, val_dials, test_dials
def decode(args, num=1):
model, val_dials, test_dials = loadModelAndData(args, num)
evaluator_valid = MultiWozEvaluator("valid")
evaluator_test = MultiWozEvaluator("test")
start_time = time.time()
for ii in range(1):
if ii == 0:
print(50 * '-' + 'GREEDY')
model.beam_search = False
else:
print(50 * '-' + 'BEAM')
model.beam_search = True
# VALIDATION
val_dials_gen = {}
valid_loss = 0
for name, val_file in val_dials.items():
input_tensor = [];
target_tensor = [];
bs_tensor = [];
db_tensor = []
input_tensor, target_tensor, bs_tensor, db_tensor = util.loadDialogue(
model,val_file, input_tensor, target_tensor, bs_tensor, db_tensor)
# create an empty matrix with padding tokens
input_tensor, input_lengths = util.padSequence(input_tensor)
target_tensor, target_lengths = util.padSequence(target_tensor)
bs_tensor = torch.tensor(bs_tensor, dtype=torch.float, device=device)
db_tensor = torch.tensor(db_tensor, dtype=torch.float, device=device)
output_words, loss_sentence = model.predict(input_tensor, input_lengths,
target_tensor, target_lengths,
db_tensor, bs_tensor)
valid_loss += 0
val_dials_gen[name] = output_words
print('Current VALID LOSS:', valid_loss)
with open(args.valid_output + 'val_dials_gen.json', 'w') as outfile:
json.dump(val_dials_gen, outfile)
evaluator_valid.evaluateModel(val_dials_gen, val_dials, mode='valid')
# TESTING
test_dials_gen = {}
test_loss = 0
for name, test_file in test_dials.items():
input_tensor = [];
target_tensor = [];
bs_tensor = [];
db_tensor = []
input_tensor, target_tensor, bs_tensor, db_tensor = util.loadDialogue(
model, test_file, input_tensor, target_tensor, bs_tensor, db_tensor)
# create an empty matrix with padding tokens
input_tensor, input_lengths = util.padSequence(input_tensor)
target_tensor, target_lengths = util.padSequence(target_tensor)
bs_tensor = torch.tensor(bs_tensor, dtype=torch.float, device=device)
db_tensor = torch.tensor(db_tensor, dtype=torch.float, device=device)
output_words, loss_sentence = model.predict(input_tensor, input_lengths,
target_tensor, target_lengths,
db_tensor, bs_tensor)
test_loss += 0
test_dials_gen[name] = output_words
test_loss /= len(test_dials)
print('Current TEST LOSS:', test_loss)
with open(args.decode_output + 'test_dials_gen.json', 'w') as outfile:
json.dump(test_dials_gen, outfile)
blue_score, successes, matches = evaluator_test.evaluateModel(test_dials_gen,
test_dials,
mode='test')
lock = FileLock(os.path.join(args.model_dir + '_test_logs.txt' + '.new.lock'))
with lock:
with open(os.path.join(args.model_dir + '_test_logs.txt'), 'a') as f:
f.write(
f'| Test BLEU: {blue_score:.3f} | Test Success: {successes:.3f} | '
f'Test Matches: {matches:.3f} \n')
wandb.log({'Test BLEU': blue_score, 'Test Success': successes,
'Test Matches': matches})
lock.release()
print('TIME:', time.time() - start_time)
def decodeWrapper(args):
# Load config file
with open(args.model_path + '.config') as f:
add_args = json.load(f)
for k, v in add_args.items():
setattr(args, k, v)
args.mode = 'test'
args.load_param = True
args.dropout = 0.0
assert args.dropout == 0.0
# Start going through models
args.model_path = args.model_dir + 'translate.ckpt'
args.original = args.model_path
for ii in range(5, args.no_models + 1):
print(70 * '-' + 'EVALUATING EPOCH %s' % ii)
args.model_path = args.model_path + '-' + str(ii)
try:
decode(args, ii)
except:
print('cannot decode')
args.model_path = args.original
def hyper_evaluate(config):
parser = argparse.ArgumentParser(description='S2S')
parser.add_argument('--no_cuda', type=util.str2bool, nargs='?', const=True,
default=True,
help='enables CUDA training')
parser.add_argument('--no_models', type=int, default=5,
help='how many models to evaluate')
parser.add_argument('--original', type=str, default='model/model/',
help='Original path.')
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--model', type=str, default='lstm')
parser.add_argument('--use_emb', type=str, default='False')
parser.add_argument('--seed', type=int, default=config['seed'], metavar='S',
help='random seed (default: 1)')
parser.add_argument('--lr_rate', type=float, default=config['lr'])
parser.add_argument('--optdecay', type=float, default=config['decay'])
parser.add_argument('--topC', type=int, default=config['topC'])
parser.add_argument('--optim', type=str, default=config['optim'])
parser.add_argument('--beam_width', type=int, default=2,
help='Beam width used in beamsearch')
parser.add_argument('--write_n_best', type=util.str2bool, nargs='?', const=True,
default=False,
help='Write n-best list (n=beam_width)')
parser.add_argument('--model_path', type=str, default='model/model/translate.ckpt',
help='Path to a specific model checkpoint.')
parser.add_argument('--model_dir', type=str, default='model/')
parser.add_argument('--model_name', type=str, default='translate.ckpt')
parser.add_argument('--valid_output', type=str, default='model/data/val_dials/',
help='Validation Decoding output dir path')
parser.add_argument('--decode_output', type=str, default='model/data/test_dials/',
help='Decoding output dir path')
args, _ = parser.parse_known_args()
args.cuda = True
run_id = args.optim + '_' + str(args.optdecay) + '_' + str(args.topC) + '_' + str(
args.lr_rate) + '_exp_seed_{}'.format(args.seed)
wandb.init(project="critical-gradients-mutliwozlstm-test", reinit=True)
wandb.run.name = run_id
wandb.config.update(config)
print('Came here')
args.model_dir = os.path.join('Results', 'multiwoz-lstm', 'lstm', run_id, 'model')
torch.manual_seed(args.seed)
decodeWrapper(args)
myparser = argparse.ArgumentParser()
myparser.add_argument('--first_launch', action='store_true')
myparser.add_argument('--is_slurm', action='store_false')
myargs = myparser.parse_args()
best_hyperparameters = None
PARAM_GRID = list(product(
[100, 101, 102, 103, 104], # seeds
['sgd_c', 'sgdm_c', 'sgd', 'sgdm', 'adam', 'adam_c', 'rmsprop', 'rmsprop_c'],
# optimizer # lr
# ['none'], # aggr
# [1.0] # kappa
))
h_param_list = []
for param_ix in range(len(PARAM_GRID)):
params = PARAM_GRID[param_ix]
s, o = params
config = {}
config['seed'] = s
if 'sgd' in o:
config['lr'] = 0.1
else:
config['lr'] = 0.001
config['optim'] = o
if '_c' in o:
config['decay'] = 0.7
config['topC'] = 5
else:
config['decay'] = 0
config['topC'] = 0
if config not in h_param_list:
h_param_list.append(config)
print(len(h_param_list))
if myargs.is_slurm:
# run by submitit
d = datetime.today()
exp_dir = (
Path("./dumps/")
/ "projects"
/ "crit-grad"
/ "multiwoz-lstm"
/ f"{d.strftime('%Y-%m-%d')}_rand_eval_multiwoz"
)
exp_dir.mkdir(parents=True, exist_ok=True)
submitit_logdir = exp_dir / "submitit_logs"
num_gpus = 1
workers_per_gpu = 10
executor = submitit.AutoExecutor(folder=submitit_logdir)
executor.update_parameters(
timeout_min=60,
gpus_per_node=num_gpus,
slurm_additional_parameters={"account": "rrg-bengioy-ad"},
tasks_per_node=num_gpus,
cpus_per_task=workers_per_gpu,
slurm_mem="16G", # 16G
slurm_array_parallelism=50,
)
job = executor.map_array(hyper_evaluate, h_param_list)
print('Jobs submitted!')
else:
print("Don\'t provide the slurm argument")
|
[
"evaluate.MultiWozEvaluator"
] |
[((9406, 9431), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9429, 9431), False, 'import argparse\n'), ((1397, 1506), 'model.model.Model', 'Model', (['args', 'input_lang_index2word', 'output_lang_index2word', 'input_lang_word2index', 'output_lang_word2index'], {}), '(args, input_lang_index2word, output_lang_index2word,\n input_lang_word2index, output_lang_word2index)\n', (1402, 1506), False, 'from model.model import Model\n'), ((1603, 1637), 'os.path.exists', 'os.path.exists', (['args.decode_output'], {}), '(args.decode_output)\n', (1617, 1637), False, 'import os\n'), ((1779, 1812), 'os.path.exists', 'os.path.exists', (['args.valid_output'], {}), '(args.valid_output)\n', (1793, 1812), False, 'import os\n'), ((2335, 2361), 'evaluate.MultiWozEvaluator', 'MultiWozEvaluator', (['"""valid"""'], {}), "('valid')\n", (2352, 2361), False, 'from evaluate import MultiWozEvaluator\n'), ((2383, 2408), 'evaluate.MultiWozEvaluator', 'MultiWozEvaluator', (['"""test"""'], {}), "('test')\n", (2400, 2408), False, 'from evaluate import MultiWozEvaluator\n'), ((2427, 2438), 'time.time', 'time.time', ([], {}), '()\n', (2436, 2438), False, 'import time\n'), ((6902, 6944), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""S2S"""'}), "(description='S2S')\n", (6925, 6944), False, 'import argparse\n'), ((9090, 9161), 'wandb.init', 'wandb.init', ([], {'project': '"""critical-gradients-mutliwozlstm-test"""', 'reinit': '(True)'}), "(project='critical-gradients-mutliwozlstm-test', reinit=True)\n", (9100, 9161), False, 'import wandb\n'), ((9195, 9222), 'wandb.config.update', 'wandb.config.update', (['config'], {}), '(config)\n', (9214, 9222), False, 'import wandb\n'), ((9268, 9333), 'os.path.join', 'os.path.join', (['"""Results"""', '"""multiwoz-lstm"""', '"""lstm"""', 'run_id', '"""model"""'], {}), "('Results', 'multiwoz-lstm', 'lstm', run_id, 'model')\n", (9280, 9333), False, 'import os\n'), ((9339, 9367), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (9356, 9367), False, 'import torch\n'), ((9631, 9747), 'itertools.product', 'product', (['[100, 101, 102, 103, 104]', "['sgd_c', 'sgdm_c', 'sgd', 'sgdm', 'adam', 'adam_c', 'rmsprop', 'rmsprop_c']"], {}), "([100, 101, 102, 103, 104], ['sgd_c', 'sgdm_c', 'sgd', 'sgdm',\n 'adam', 'adam_c', 'rmsprop', 'rmsprop_c'])\n", (9638, 9747), False, 'from itertools import product\n'), ((10407, 10423), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (10421, 10423), False, 'from datetime import datetime\n'), ((10769, 10814), 'submitit.AutoExecutor', 'submitit.AutoExecutor', ([], {'folder': 'submitit_logdir'}), '(folder=submitit_logdir)\n', (10790, 10814), False, 'import submitit\n'), ((569, 594), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (592, 594), False, 'import torch\n'), ((956, 995), 'io.open', 'open', (['"""data/input_lang.index2word.json"""'], {}), "('data/input_lang.index2word.json')\n", (960, 995), False, 'from io import open\n'), ((1034, 1046), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1043, 1046), False, 'import json\n'), ((1056, 1095), 'io.open', 'open', (['"""data/input_lang.word2index.json"""'], {}), "('data/input_lang.word2index.json')\n", (1060, 1095), False, 'from io import open\n'), ((1134, 1146), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1143, 1146), False, 'import json\n'), ((1156, 1196), 'io.open', 'open', (['"""data/output_lang.index2word.json"""'], {}), "('data/output_lang.index2word.json')\n", (1160, 1196), False, 'from io import open\n'), ((1236, 1248), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1245, 1248), False, 'import json\n'), ((1258, 1298), 'io.open', 'open', (['"""data/output_lang.word2index.json"""'], {}), "('data/output_lang.word2index.json')\n", (1262, 1298), False, 'from io import open\n'), ((1338, 1350), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1347, 1350), False, 'import json\n'), ((1647, 1680), 'shutil.rmtree', 'shutil.rmtree', (['args.decode_output'], {}), '(args.decode_output)\n', (1660, 1680), False, 'import shutil\n'), ((1689, 1720), 'os.makedirs', 'os.makedirs', (['args.decode_output'], {}), '(args.decode_output)\n', (1700, 1720), False, 'import os\n'), ((1739, 1770), 'os.makedirs', 'os.makedirs', (['args.decode_output'], {}), '(args.decode_output)\n', (1750, 1770), False, 'import os\n'), ((1822, 1854), 'shutil.rmtree', 'shutil.rmtree', (['args.valid_output'], {}), '(args.valid_output)\n', (1835, 1854), False, 'import shutil\n'), ((1863, 1893), 'os.makedirs', 'os.makedirs', (['args.valid_output'], {}), '(args.valid_output)\n', (1874, 1893), False, 'import os\n'), ((1912, 1942), 'os.makedirs', 'os.makedirs', (['args.valid_output'], {}), '(args.valid_output)\n', (1923, 1942), False, 'import os\n'), ((1986, 2013), 'io.open', 'open', (['"""data/val_dials.json"""'], {}), "('data/val_dials.json')\n", (1990, 2013), False, 'from io import open\n'), ((2046, 2064), 'json.load', 'json.load', (['outfile'], {}), '(outfile)\n', (2055, 2064), False, 'import json\n'), ((2102, 2130), 'io.open', 'open', (['"""data/test_dials.json"""'], {}), "('data/test_dials.json')\n", (2106, 2130), False, 'from io import open\n'), ((2164, 2182), 'json.load', 'json.load', (['outfile'], {}), '(outfile)\n', (2173, 2182), False, 'import json\n'), ((6185, 6218), 'io.open', 'open', (["(args.model_path + '.config')"], {}), "(args.model_path + '.config')\n", (6189, 6218), False, 'from io import open\n'), ((6244, 6256), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6253, 6256), False, 'import json\n'), ((685, 724), 'io.open', 'open', (["('%s.json' % args.model_path)", '"""rb"""'], {}), "('%s.json' % args.model_path, 'rb')\n", (689, 724), False, 'from io import open\n'), ((2951, 3040), 'experiments.multiwoz_lstm.utils.util.loadDialogue', 'util.loadDialogue', (['model', 'val_file', 'input_tensor', 'target_tensor', 'bs_tensor', 'db_tensor'], {}), '(model, val_file, input_tensor, target_tensor, bs_tensor,\n db_tensor)\n', (2968, 3040), False, 'from experiments.multiwoz_lstm.utils import util\n'), ((3152, 3182), 'experiments.multiwoz_lstm.utils.util.padSequence', 'util.padSequence', (['input_tensor'], {}), '(input_tensor)\n', (3168, 3182), False, 'from experiments.multiwoz_lstm.utils import util\n'), ((3227, 3258), 'experiments.multiwoz_lstm.utils.util.padSequence', 'util.padSequence', (['target_tensor'], {}), '(target_tensor)\n', (3243, 3258), False, 'from experiments.multiwoz_lstm.utils import util\n'), ((3283, 3340), 'torch.tensor', 'torch.tensor', (['bs_tensor'], {'dtype': 'torch.float', 'device': 'device'}), '(bs_tensor, dtype=torch.float, device=device)\n', (3295, 3340), False, 'import torch\n'), ((3365, 3422), 'torch.tensor', 'torch.tensor', (['db_tensor'], {'dtype': 'torch.float', 'device': 'device'}), '(db_tensor, dtype=torch.float, device=device)\n', (3377, 3422), False, 'import torch\n'), ((3813, 3864), 'io.open', 'open', (["(args.valid_output + 'val_dials_gen.json')", '"""w"""'], {}), "(args.valid_output + 'val_dials_gen.json', 'w')\n", (3817, 3864), False, 'from io import open\n'), ((3889, 3922), 'json.dump', 'json.dump', (['val_dials_gen', 'outfile'], {}), '(val_dials_gen, outfile)\n', (3898, 3922), False, 'import json\n'), ((4303, 4393), 'experiments.multiwoz_lstm.utils.util.loadDialogue', 'util.loadDialogue', (['model', 'test_file', 'input_tensor', 'target_tensor', 'bs_tensor', 'db_tensor'], {}), '(model, test_file, input_tensor, target_tensor, bs_tensor,\n db_tensor)\n', (4320, 4393), False, 'from experiments.multiwoz_lstm.utils import util\n'), ((4507, 4537), 'experiments.multiwoz_lstm.utils.util.padSequence', 'util.padSequence', (['input_tensor'], {}), '(input_tensor)\n', (4523, 4537), False, 'from experiments.multiwoz_lstm.utils import util\n'), ((4582, 4613), 'experiments.multiwoz_lstm.utils.util.padSequence', 'util.padSequence', (['target_tensor'], {}), '(target_tensor)\n', (4598, 4613), False, 'from experiments.multiwoz_lstm.utils import util\n'), ((4638, 4695), 'torch.tensor', 'torch.tensor', (['bs_tensor'], {'dtype': 'torch.float', 'device': 'device'}), '(bs_tensor, dtype=torch.float, device=device)\n', (4650, 4695), False, 'import torch\n'), ((4720, 4777), 'torch.tensor', 'torch.tensor', (['db_tensor'], {'dtype': 'torch.float', 'device': 'device'}), '(db_tensor, dtype=torch.float, device=device)\n', (4732, 4777), False, 'import torch\n'), ((5202, 5255), 'io.open', 'open', (["(args.decode_output + 'test_dials_gen.json')", '"""w"""'], {}), "(args.decode_output + 'test_dials_gen.json', 'w')\n", (5206, 5255), False, 'from io import open\n'), ((5280, 5314), 'json.dump', 'json.dump', (['test_dials_gen', 'outfile'], {}), '(test_dials_gen, outfile)\n', (5289, 5314), False, 'import json\n'), ((5590, 5651), 'os.path.join', 'os.path.join', (["(args.model_dir + '_test_logs.txt' + '.new.lock')"], {}), "(args.model_dir + '_test_logs.txt' + '.new.lock')\n", (5602, 5651), False, 'import os\n'), ((6100, 6111), 'time.time', 'time.time', ([], {}), '()\n', (6109, 6111), False, 'import time\n'), ((5938, 6030), 'wandb.log', 'wandb.log', (["{'Test BLEU': blue_score, 'Test Success': successes, 'Test Matches': matches}"], {}), "({'Test BLEU': blue_score, 'Test Success': successes,\n 'Test Matches': matches})\n", (5947, 6030), False, 'import wandb\n'), ((5694, 5741), 'os.path.join', 'os.path.join', (["(args.model_dir + '_test_logs.txt')"], {}), "(args.model_dir + '_test_logs.txt')\n", (5706, 5741), False, 'import os\n'), ((10452, 10468), 'pathlib.Path', 'Path', (['"""./dumps/"""'], {}), "('./dumps/')\n", (10456, 10468), False, 'from pathlib import Path\n')]
|
from evaluate import evaluate_activity_detection
from app import main, init_activity_detection, init_classificator, init_feature_extraction, init_pre_processing
from models import Waveform
from utils import load_groundtruth, load_annotation, read_csv, plot_metrics_boxplot
import numpy as np
import os
import csv
'''
Activity detection evaluation
The purpose of this script is to obtain the precision, recall, f-score and accuracy metrics of the beatbox activity detection interface
by comparing the activity detection output with the corresponding groundtruth (annotation file).
To obtain the activity detection output, we run the system simulation through an audio or multiple audios.
An initialization of the system makes possible to bypass non-desired interfaces of the system, as the feature extraction or the classification stages.
The main script (the simulation script) is designed in such a way that returns an array of the detected activity (1 activity, 0 non-activity), we obtain this array and then
compare it with the grountruth annotation which is provided by the csv annotation of the audio.
'''
# save test metrics in a csv to then make
tests_dir = 'evaluation_logs/activity_detection_evaluation'
# create root folder
if not os.path.exists(tests_dir):
os.makedirs(tests_dir)
def run_test(wav_dir, csv_dir, buffer_size, log_file):
'''
Run test function:
input:
- wav_dir: Location of the audio
- csv_dir: Location of the csv annotation
- buffer_size: Default is 512 but it can be modified to test the system on different buffer sizes
- log_file: Location of the file where all results are logged.
'''
# Load audio and its annotation
print(wav_dir)
audio = Waveform(path=wav_dir)
groundtruth_annotation = load_annotation(csv_dir)
# Init system simulation
init_pre_processing()
init_activity_detection(func_type=1)
init_feature_extraction(by_pass=True)
init_classificator(by_pass=True)
# run simulation
result = main(audio, buffer_size)
# Init groundtruth activity array
groundtruth_activity = np.zeros(len(result['ONSET_LOCATIONS']))
sample_rate = audio.sample_rate
# Transform annotation in the desired format (1 activity, 0 non-activity)
for i in range(0, len(groundtruth_annotation), 2):
sample_instant_1 = int(float(groundtruth_annotation[i][0]) * sample_rate)
sample_instant_2 = int(float(groundtruth_annotation[i + 1][0]) * sample_rate)
groundtruth_activity[sample_instant_1:sample_instant_2] = 1
# evaluate activity detection
precision, recall, f1_score, accuracy = evaluate_activity_detection(groundtruth_activity, result['ONSET_LOCATIONS'])
row = [wav_dir, precision, recall, f1_score, accuracy]
with open(log_file, 'a+', newline='') as file:
w = csv.writer(file)
w.writerow(row)
file.close()
def all_dataset_test(startpath, buffer_size=512, proposal=3):
'''
all_dataset_test:
input:
- startpath: root directory of audios
- buffer_size: test the system on different buffer sizes
given a directory run test for each audio, results are stored in the log file
'''
# Create dataset_log.csv file where all the metadata will be located.
log_file = tests_dir + '/proposal_' + str(proposal) + '/activity_detection_log_' + str(buffer_size) + '.csv'
with open(log_file, 'w', newline='') as f:
# create the csv writer
writer = csv.writer(f)
# write a row to the csv file
header = ['Audio', 'Precision', 'Recall', 'F1-Score', 'Accuracy']
writer.writerow(header)
# close the file
f.close()
for root, _, files in os.walk(startpath):
folder = '/' + os.path.basename(root) + '/'
# #TODO: Optimize parsing csv and its wav (currently double for...)
for f in files:
if f.endswith('.wav') and os.path.isfile(startpath + folder + f.split('.')[0] + '.csv'):
wav_dir = startpath + folder + f
csv_dir = startpath + folder + f.split('.')[0] + '.csv'
run_test(wav_dir, csv_dir, buffer_size, log_file)
def generate_plots(buffer_sizes, proposal=3):
'''
Read log file and creates a boxplot
'''
for buffer_size in buffer_sizes:
evaluation_csv = read_csv(
tests_dir + '/proposal_' + str(proposal) + '/activity_detection_log_' + str(buffer_size) + '.csv')
precision = []
recall = []
f1_score = []
accuracy = []
for i in range(1, len(evaluation_csv), 1):
precision.append(evaluation_csv[i][1])
recall.append(evaluation_csv[i][2])
f1_score.append(evaluation_csv[i][3])
accuracy.append(evaluation_csv[i][4])
plot_metrics_boxplot(precision, recall, f1_score, accuracy, buffer_size)
def buffer_size_test(path, buffer_sizes):
# Run all dataset_test with different buffer size
for buffer_size in buffer_sizes:
all_dataset_test(path, buffer_size=buffer_size, proposal=3)
startpath = "../../RawDataset" # Root dir of test audios
buffer_sizes = [512] # Different buffer size of the test
buffer_size_test(startpath, buffer_sizes)
# Run tests
#all_dataset_test(startpath)
# Save plots
generate_plots(buffer_sizes, proposal=3)
|
[
"evaluate.evaluate_activity_detection"
] |
[((1249, 1274), 'os.path.exists', 'os.path.exists', (['tests_dir'], {}), '(tests_dir)\n', (1263, 1274), False, 'import os\n'), ((1280, 1302), 'os.makedirs', 'os.makedirs', (['tests_dir'], {}), '(tests_dir)\n', (1291, 1302), False, 'import os\n'), ((1746, 1768), 'models.Waveform', 'Waveform', ([], {'path': 'wav_dir'}), '(path=wav_dir)\n', (1754, 1768), False, 'from models import Waveform\n'), ((1798, 1822), 'utils.load_annotation', 'load_annotation', (['csv_dir'], {}), '(csv_dir)\n', (1813, 1822), False, 'from utils import load_groundtruth, load_annotation, read_csv, plot_metrics_boxplot\n'), ((1857, 1878), 'app.init_pre_processing', 'init_pre_processing', ([], {}), '()\n', (1876, 1878), False, 'from app import main, init_activity_detection, init_classificator, init_feature_extraction, init_pre_processing\n'), ((1883, 1919), 'app.init_activity_detection', 'init_activity_detection', ([], {'func_type': '(1)'}), '(func_type=1)\n', (1906, 1919), False, 'from app import main, init_activity_detection, init_classificator, init_feature_extraction, init_pre_processing\n'), ((1924, 1961), 'app.init_feature_extraction', 'init_feature_extraction', ([], {'by_pass': '(True)'}), '(by_pass=True)\n', (1947, 1961), False, 'from app import main, init_activity_detection, init_classificator, init_feature_extraction, init_pre_processing\n'), ((1966, 1998), 'app.init_classificator', 'init_classificator', ([], {'by_pass': '(True)'}), '(by_pass=True)\n', (1984, 1998), False, 'from app import main, init_activity_detection, init_classificator, init_feature_extraction, init_pre_processing\n'), ((2034, 2058), 'app.main', 'main', (['audio', 'buffer_size'], {}), '(audio, buffer_size)\n', (2038, 2058), False, 'from app import main, init_activity_detection, init_classificator, init_feature_extraction, init_pre_processing\n'), ((2651, 2727), 'evaluate.evaluate_activity_detection', 'evaluate_activity_detection', (['groundtruth_activity', "result['ONSET_LOCATIONS']"], {}), "(groundtruth_activity, result['ONSET_LOCATIONS'])\n", (2678, 2727), False, 'from evaluate import evaluate_activity_detection\n'), ((3734, 3752), 'os.walk', 'os.walk', (['startpath'], {}), '(startpath)\n', (3741, 3752), False, 'import os\n'), ((2852, 2868), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (2862, 2868), False, 'import csv\n'), ((3506, 3519), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (3516, 3519), False, 'import csv\n'), ((4828, 4900), 'utils.plot_metrics_boxplot', 'plot_metrics_boxplot', (['precision', 'recall', 'f1_score', 'accuracy', 'buffer_size'], {}), '(precision, recall, f1_score, accuracy, buffer_size)\n', (4848, 4900), False, 'from utils import load_groundtruth, load_annotation, read_csv, plot_metrics_boxplot\n'), ((3777, 3799), 'os.path.basename', 'os.path.basename', (['root'], {}), '(root)\n', (3793, 3799), False, 'import os\n')]
|
import sys
import os
import keras
import random as rn
import numpy as np
import tensorflow as tf
from keras.optimizers import Adam
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from evaluate import evaluate
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config = config)
K.tensorflow_backend.set_session(sess)
import pandas as pd
import math
from sklearn.utils import shuffle
import model as M
import time
from generateNegatives import getNegativeSamples
from TimePreprocessor import timestamp_processor
embedding_size = 32
batch_size = 256
learning_rate = 0.001
patience = 10
sequence_length = 5
width = 128
depth = 4
dropout_rate = 0.1
tr_dataset = pd.read_csv("movielens/train.txt",sep=',',names="user_id,item_id,rating,timestamp".split(","))
va_dataset = pd.read_csv("movielens/validation.txt",sep=',',names="user_id,item_id,rating,timestamp".split(","))
te_dataset = pd.read_csv("movielens/test.txt",sep=',',names="user_id,item_id,rating,timestamp".split(","))
userSortedTimestamp = {}
for uid in tr_dataset.user_id.unique().tolist():
trPosInstance = tr_dataset.loc[tr_dataset['user_id'] == uid]
temp = va_dataset.loc[va_dataset['user_id'] == uid]
vaPosInstance = temp.loc[temp['rating'] == 1]
temp = te_dataset.loc[te_dataset['user_id'] == uid]
tePosInstance = temp.loc[temp['rating'] == 1]
posInstance = pd.concat([trPosInstance, vaPosInstance, tePosInstance], ignore_index=True)
userSortedTimestamp[uid] = posInstance.sort_values(by=['timestamp'])
tr_dataset = timestamp_processor(tr_dataset, userSortedTimestamp, sequence_length)
va_dataset = timestamp_processor(va_dataset, userSortedTimestamp, sequence_length)
te_dataset = timestamp_processor(te_dataset, userSortedTimestamp, sequence_length)
num_users = max(tr_dataset['user_id'])
num_items = max(max(tr_dataset['item_id']), max(va_dataset['item_id']), max(te_dataset['item_id']))
tr_dataset['timestamp_hour'] = (tr_dataset['timestamp'] / 3600).astype(int)
dataset = tr_dataset.groupby('user_id')
userUninteractedItems = {}
userUninteractedTimes = {}
for uid, user_data in dataset:
userItem = list(user_data['item_id'].unique())
userTime = list(user_data['timestamp_hour'].unique())
max_th = max(user_data['timestamp_hour'])
min_th = min(user_data['timestamp_hour'])
userUninteractedItems[uid] = list(set(range(1, num_items + 1)) - set(userItem))
userUninteractedTimes[uid] = list(set(range(min_th, max_th + 1)) - set(userTime))
model = M.TimelyRec([6], num_users, num_items, embedding_size, sequence_length, width, depth, dropout=dropout_rate)
model.compile(loss='binary_crossentropy',
optimizer=Adam(lr=learning_rate))
best_hr1 = 0
best_hr5 = 0
best_ndcg5 = 0
best_hr10 = 0
best_ndcg10 = 0
best_hr10_i = 0
best_hr10_i = 0
for epoch in range(200):
print ("Epoch " + str(epoch))
print ("Generating negative samples...")
t0 = time.time()
tr_neg_item_dataset, tr_neg_time_dataset, tr_neg_itemtime_dataset = getNegativeSamples(tr_dataset, userUninteractedItems, userUninteractedTimes, num_users, num_items)
tr_neg_time_dataset = tr_neg_time_dataset.drop(['year', 'month', 'date','hour', 'day_of_week'], axis=1)
for i in range(sequence_length):
tr_neg_time_dataset = tr_neg_time_dataset.drop(['month' + str(i), 'date' + str(i), 'hour' + str(i), 'day_of_week' + str(i), 'timestamp' + str(i), 'item_id' + str(i)], axis=1)
tr_neg_itemtime_dataset = tr_neg_itemtime_dataset.drop(['year', 'month', 'date', 'hour', 'day_of_week'], axis=1)
for i in range(sequence_length):
tr_neg_itemtime_dataset = tr_neg_itemtime_dataset.drop(['month' + str(i), 'date' + str(i), 'hour' + str(i), 'day_of_week' + str(i), 'timestamp' + str(i), 'item_id' + str(i)], axis=1)
tr_neg_time_dataset = timestamp_processor(tr_neg_time_dataset, userSortedTimestamp, sequence_length)
tr_neg_itemtime_dataset = timestamp_processor(tr_neg_itemtime_dataset, userSortedTimestamp, sequence_length)
tr_neg_dataset = pd.concat([tr_neg_item_dataset, tr_neg_time_dataset, tr_neg_itemtime_dataset])
tr_posneg_dataset = shuffle(pd.concat([tr_dataset, tr_neg_dataset], join='inner', ignore_index=True))
print ("Training...")
t1 = time.time()
# Train
for i in range(int(len(tr_posneg_dataset) / batch_size) + 1):
if (i + 1) * batch_size > len(tr_posneg_dataset):
tr_batch = tr_posneg_dataset.iloc[i * batch_size : ]
else:
tr_batch = tr_posneg_dataset.iloc[i * batch_size : (i + 1) * batch_size]
user_input = tr_batch.user_id
item_input = tr_batch.item_id
recent_month_inputs = []
recent_day_inputs = []
recent_date_inputs = []
recent_hour_inputs = []
recent_timestamp_inputs = []
recent_itemid_inputs = []
month_input = tr_batch.month
day_input = tr_batch.day_of_week
date_input = tr_batch.date
hour_input = tr_batch.hour
timestamp_input = tr_batch.timestamp
for j in range(sequence_length):
recent_month_inputs.append(tr_batch['month' + str(j)])
recent_day_inputs.append(tr_batch['day_of_week' + str(j)])
recent_date_inputs.append(tr_batch['date' + str(j)])
recent_hour_inputs.append(tr_batch['hour' + str(j)])
recent_timestamp_inputs.append(tr_batch['timestamp' + str(j)])
recent_itemid_inputs.append(tr_batch['item_id' + str(j)])
labels = tr_batch.rating
hist = model.fit([user_input, item_input, month_input, day_input, date_input, hour_input, timestamp_input] + [recent_month_inputs[j] for j in range(sequence_length)]+ [recent_day_inputs[j] for j in range(sequence_length)]+ [recent_date_inputs[j] for j in range(sequence_length)]+ [recent_hour_inputs[j] for j in range(sequence_length)]+ [recent_timestamp_inputs[j] for j in range(sequence_length)] + [recent_itemid_inputs[j] for j in range(sequence_length)], labels,
batch_size=len(tr_batch), nb_epoch=1, verbose=0, shuffle=False)
print ("Training time: " + str(round(time.time() - t1, 1)))
print('Iteration %d: loss = %.4f'
% (epoch, hist.history['loss'][0]))
print ("Evaluating...")
t2 = time.time()
# Evaluation
HR1, HR5, NDCG5, HR10, NDCG10 = evaluate(model, va_dataset, num_candidates=301, sequence_length=sequence_length)
print ("Test time: " + str(round(time.time() - t2, 1)))
print ("Val")
print ("HR@1 : " + str(round(HR1, 4)))
print ("HR@5 : " + str(round(HR5, 4)))
print ("NDCG@5 : " + str(round(NDCG5, 4)))
print ("HR@10 : " + str(round(HR10, 4)))
print ("NDCG@10: " + str(round(NDCG10, 4)))
print ("")
if HR10 > best_hr10:
best_hr1 = HR1
best_hr5 = HR5
best_ndcg5 = NDCG5
best_hr10 = HR10
best_ndcg10 = NDCG10
best_hr10_i = epoch
model.save_weights("saved_model.h5")
print ("Best HR@1 : " + str(round(best_hr1, 4)))
print ("Best HR@5 : " + str(round(best_hr5, 4)))
print ("Best NDCG@5 : " + str(round(best_ndcg5, 4)))
print ("Best HR@10 : " + str(round(best_hr10, 4)))
print ("Best NDCG@10: " + str(round(best_ndcg10, 4)))
print ('')
if best_hr10_i + patience < epoch:
exit(1)
|
[
"evaluate.evaluate"
] |
[((266, 282), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (280, 282), True, 'import tensorflow as tf\n'), ((330, 355), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (340, 355), True, 'import tensorflow as tf\n'), ((358, 396), 'keras.backend.tensorflow_backend.set_session', 'K.tensorflow_backend.set_session', (['sess'], {}), '(sess)\n', (390, 396), True, 'from keras import backend as K\n'), ((1594, 1663), 'TimePreprocessor.timestamp_processor', 'timestamp_processor', (['tr_dataset', 'userSortedTimestamp', 'sequence_length'], {}), '(tr_dataset, userSortedTimestamp, sequence_length)\n', (1613, 1663), False, 'from TimePreprocessor import timestamp_processor\n'), ((1677, 1746), 'TimePreprocessor.timestamp_processor', 'timestamp_processor', (['va_dataset', 'userSortedTimestamp', 'sequence_length'], {}), '(va_dataset, userSortedTimestamp, sequence_length)\n', (1696, 1746), False, 'from TimePreprocessor import timestamp_processor\n'), ((1760, 1829), 'TimePreprocessor.timestamp_processor', 'timestamp_processor', (['te_dataset', 'userSortedTimestamp', 'sequence_length'], {}), '(te_dataset, userSortedTimestamp, sequence_length)\n', (1779, 1829), False, 'from TimePreprocessor import timestamp_processor\n'), ((2555, 2666), 'model.TimelyRec', 'M.TimelyRec', (['[6]', 'num_users', 'num_items', 'embedding_size', 'sequence_length', 'width', 'depth'], {'dropout': 'dropout_rate'}), '([6], num_users, num_items, embedding_size, sequence_length,\n width, depth, dropout=dropout_rate)\n', (2566, 2666), True, 'import model as M\n'), ((1431, 1506), 'pandas.concat', 'pd.concat', (['[trPosInstance, vaPosInstance, tePosInstance]'], {'ignore_index': '(True)'}), '([trPosInstance, vaPosInstance, tePosInstance], ignore_index=True)\n', (1440, 1506), True, 'import pandas as pd\n'), ((2973, 2984), 'time.time', 'time.time', ([], {}), '()\n', (2982, 2984), False, 'import time\n'), ((3057, 3159), 'generateNegatives.getNegativeSamples', 'getNegativeSamples', (['tr_dataset', 'userUninteractedItems', 'userUninteractedTimes', 'num_users', 'num_items'], {}), '(tr_dataset, userUninteractedItems, userUninteractedTimes,\n num_users, num_items)\n', (3075, 3159), False, 'from generateNegatives import getNegativeSamples\n'), ((3858, 3936), 'TimePreprocessor.timestamp_processor', 'timestamp_processor', (['tr_neg_time_dataset', 'userSortedTimestamp', 'sequence_length'], {}), '(tr_neg_time_dataset, userSortedTimestamp, sequence_length)\n', (3877, 3936), False, 'from TimePreprocessor import timestamp_processor\n'), ((3967, 4053), 'TimePreprocessor.timestamp_processor', 'timestamp_processor', (['tr_neg_itemtime_dataset', 'userSortedTimestamp', 'sequence_length'], {}), '(tr_neg_itemtime_dataset, userSortedTimestamp,\n sequence_length)\n', (3986, 4053), False, 'from TimePreprocessor import timestamp_processor\n'), ((4071, 4149), 'pandas.concat', 'pd.concat', (['[tr_neg_item_dataset, tr_neg_time_dataset, tr_neg_itemtime_dataset]'], {}), '([tr_neg_item_dataset, tr_neg_time_dataset, tr_neg_itemtime_dataset])\n', (4080, 4149), True, 'import pandas as pd\n'), ((4296, 4307), 'time.time', 'time.time', ([], {}), '()\n', (4305, 4307), False, 'import time\n'), ((6335, 6346), 'time.time', 'time.time', ([], {}), '()\n', (6344, 6346), False, 'import time\n'), ((6400, 6485), 'evaluate.evaluate', 'evaluate', (['model', 'va_dataset'], {'num_candidates': '(301)', 'sequence_length': 'sequence_length'}), '(model, va_dataset, num_candidates=301, sequence_length=sequence_length\n )\n', (6408, 6485), False, 'from evaluate import evaluate\n'), ((2730, 2752), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (2734, 2752), False, 'from keras.optimizers import Adam\n'), ((4187, 4259), 'pandas.concat', 'pd.concat', (['[tr_dataset, tr_neg_dataset]'], {'join': '"""inner"""', 'ignore_index': '(True)'}), "([tr_dataset, tr_neg_dataset], join='inner', ignore_index=True)\n", (4196, 4259), True, 'import pandas as pd\n'), ((6186, 6197), 'time.time', 'time.time', ([], {}), '()\n', (6195, 6197), False, 'import time\n'), ((6519, 6530), 'time.time', 'time.time', ([], {}), '()\n', (6528, 6530), False, 'import time\n')]
|
import glob
import logging
import os
import pickle
import random
import re
import shutil
from typing import Dict, List, Tuple
import pandas as pd
import numpy as np
import torch
from sklearn.model_selection import train_test_split
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm.notebook import tqdm, trange
from pathlib import Path
from transformers import (
MODEL_WITH_LM_HEAD_MAPPING,
WEIGHTS_NAME,
AdamW,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
PreTrainedModel,
PreTrainedTokenizer,
get_linear_schedule_with_warmup,
)
# try:
# from torch.utils.tensorboard import SummaryWriter
# except ImportError:
# from tensorboardX import SummaryWriter
import evaluate
import train
import dataset
import utils
# Configs
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
# Args to allow for easy convertion of python script to notebook
class Args():
def __init__(self):
self.output_dir = 'output-small-save'
self.model_type = 'gpt2'
self.model_name_or_path = 'microsoft/DialoGPT-small'
self.config_name = 'microsoft/DialoGPT-small'
self.tokenizer_name = 'microsoft/DialoGPT-small'
self.cache_dir = 'cached'
self.block_size = 512
self.do_train = False
self.do_eval = True
self.evaluate_during_training = False
self.per_gpu_train_batch_size = 4
self.per_gpu_eval_batch_size = 4
self.gradient_accumulation_steps = 1
self.learning_rate = 5e-5
self.weight_decay = 0.0
self.adam_epsilon = 1e-8
self.max_grad_norm = 1.0
self.num_train_epochs = 3
self.max_steps = -1
self.warmup_steps = 0
self.logging_steps = 1000
self.save_steps = 3500
self.save_total_limit = None
self.eval_all_checkpoints = False
self.no_cuda = False
self.overwrite_output_dir = True
self.overwrite_cache = True
self.should_continue = False
self.seed = 42
self.local_rank = -1
self.fp16 = False
self.fp16_opt_level = 'O1'
args = Args()
# Main runner
def main(path_to_train_data, path_to_validation_data):
args = Args()
df_trn, df_val = dataset.make_dataset(path_to_train_data, path_to_validation_data)
if args.should_continue:
sorted_checkpoints = _sorted_checkpoints(args)
if len(sorted_checkpoints) == 0:
raise ValueError("Used --should_continue but no checkpoint was found in --output_dir.")
else:
args.model_name_or_path = sorted_checkpoints[-1]
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
and not args.should_continue
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup CUDA, GPU & distributed training
device = torch.device("cuda")
args.n_gpu = torch.cuda.device_count()
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
utils.set_seed(args)
config = AutoConfig.from_pretrained(args.config_name, cache_dir=args.cache_dir)
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, cache_dir=args.cache_dir)
model = AutoModelWithLMHead.from_pretrained(
args.model_name_or_path,
from_tf=False,
config=config,
cache_dir=args.cache_dir,
)
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = utils.load_and_cache_examples(args, tokenizer, df_trn, df_val, evaluate=False)
global_step, tr_loss = train.train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use save_pretrained for the model and tokenizer, you can reload them using from_pretrained()
if args.do_train:
# Create output directory if needed
os.makedirs(args.output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = AutoModelWithLMHead.from_pretrained(args.output_dir)
tokenizer = AutoTokenizer.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = AutoModelWithLMHead.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate.evaluate(args, model, tokenizer, df_trn, df_val, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
main(path_to_train_data="data/train_data.json", path_to_validation_data="data/validate_data.json")
|
[
"evaluate.evaluate"
] |
[((974, 1001), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (991, 1001), False, 'import logging\n'), ((1033, 1066), 'transformers.MODEL_WITH_LM_HEAD_MAPPING.keys', 'MODEL_WITH_LM_HEAD_MAPPING.keys', ([], {}), '()\n', (1064, 1066), False, 'from transformers import MODEL_WITH_LM_HEAD_MAPPING, WEIGHTS_NAME, AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, PreTrainedModel, PreTrainedTokenizer, get_linear_schedule_with_warmup\n'), ((2585, 2650), 'dataset.make_dataset', 'dataset.make_dataset', (['path_to_train_data', 'path_to_validation_data'], {}), '(path_to_train_data, path_to_validation_data)\n', (2605, 2650), False, 'import dataset\n'), ((3439, 3459), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3451, 3459), False, 'import torch\n'), ((3478, 3503), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3501, 3503), False, 'import torch\n'), ((3558, 3753), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': '(logging.INFO if args.local_rank in [-1, 0] else logging.WARN)'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO if args.local_rank in [-1, 0] else\n logging.WARN)\n", (3577, 3753), False, 'import logging\n'), ((4048, 4068), 'utils.set_seed', 'utils.set_seed', (['args'], {}), '(args)\n', (4062, 4068), False, 'import utils\n'), ((4085, 4155), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', (['args.config_name'], {'cache_dir': 'args.cache_dir'}), '(args.config_name, cache_dir=args.cache_dir)\n', (4111, 4155), False, 'from transformers import MODEL_WITH_LM_HEAD_MAPPING, WEIGHTS_NAME, AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, PreTrainedModel, PreTrainedTokenizer, get_linear_schedule_with_warmup\n'), ((4173, 4249), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['args.tokenizer_name'], {'cache_dir': 'args.cache_dir'}), '(args.tokenizer_name, cache_dir=args.cache_dir)\n', (4202, 4249), False, 'from transformers import MODEL_WITH_LM_HEAD_MAPPING, WEIGHTS_NAME, AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, PreTrainedModel, PreTrainedTokenizer, get_linear_schedule_with_warmup\n'), ((4263, 4383), 'transformers.AutoModelWithLMHead.from_pretrained', 'AutoModelWithLMHead.from_pretrained', (['args.model_name_or_path'], {'from_tf': '(False)', 'config': 'config', 'cache_dir': 'args.cache_dir'}), '(args.model_name_or_path, from_tf=False,\n config=config, cache_dir=args.cache_dir)\n', (4298, 4383), False, 'from transformers import MODEL_WITH_LM_HEAD_MAPPING, WEIGHTS_NAME, AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, PreTrainedModel, PreTrainedTokenizer, get_linear_schedule_with_warmup\n'), ((2984, 3015), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (2998, 3015), False, 'import os\n'), ((3029, 3056), 'os.listdir', 'os.listdir', (['args.output_dir'], {}), '(args.output_dir)\n', (3039, 3056), False, 'import os\n'), ((4583, 4661), 'utils.load_and_cache_examples', 'utils.load_and_cache_examples', (['args', 'tokenizer', 'df_trn', 'df_val'], {'evaluate': '(False)'}), '(args, tokenizer, df_trn, df_val, evaluate=False)\n', (4612, 4661), False, 'import utils\n'), ((4696, 4746), 'train.train', 'train.train', (['args', 'train_dataset', 'model', 'tokenizer'], {}), '(args, train_dataset, model, tokenizer)\n', (4707, 4746), False, 'import train\n'), ((5039, 5082), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (5050, 5082), False, 'import os\n'), ((5822, 5874), 'transformers.AutoModelWithLMHead.from_pretrained', 'AutoModelWithLMHead.from_pretrained', (['args.output_dir'], {}), '(args.output_dir)\n', (5857, 5874), False, 'from transformers import MODEL_WITH_LM_HEAD_MAPPING, WEIGHTS_NAME, AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, PreTrainedModel, PreTrainedTokenizer, get_linear_schedule_with_warmup\n'), ((5896, 5942), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['args.output_dir'], {}), '(args.output_dir)\n', (5925, 5942), False, 'from transformers import MODEL_WITH_LM_HEAD_MAPPING, WEIGHTS_NAME, AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, PreTrainedModel, PreTrainedTokenizer, get_linear_schedule_with_warmup\n'), ((5679, 5729), 'os.path.join', 'os.path.join', (['args.output_dir', '"""training_args.bin"""'], {}), "(args.output_dir, 'training_args.bin')\n", (5691, 5729), False, 'import os\n'), ((6735, 6782), 'transformers.AutoModelWithLMHead.from_pretrained', 'AutoModelWithLMHead.from_pretrained', (['checkpoint'], {}), '(checkpoint)\n', (6770, 6782), False, 'from transformers import MODEL_WITH_LM_HEAD_MAPPING, WEIGHTS_NAME, AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, PreTrainedModel, PreTrainedTokenizer, get_linear_schedule_with_warmup\n'), ((6840, 6912), 'evaluate.evaluate', 'evaluate.evaluate', (['args', 'model', 'tokenizer', 'df_trn', 'df_val'], {'prefix': 'prefix'}), '(args, model, tokenizer, df_trn, df_val, prefix=prefix)\n', (6857, 6912), False, 'import evaluate\n'), ((6195, 6213), 'os.path.dirname', 'os.path.dirname', (['c'], {}), '(c)\n', (6210, 6213), False, 'import os\n'), ((6326, 6374), 'logging.getLogger', 'logging.getLogger', (['"""transformers.modeling_utils"""'], {}), "('transformers.modeling_utils')\n", (6343, 6374), False, 'import logging\n'), ((6230, 6296), 'glob.glob', 'glob.glob', (["(args.output_dir + '/**/' + WEIGHTS_NAME)"], {'recursive': '(True)'}), "(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)\n", (6239, 6296), False, 'import glob\n')]
|
import pandas as pd
from pandas.testing import assert_frame_equal
from evaluate.report import (
PrecisionReport,
RecallReport,
Report,
DelimNotFoundError,
ReturnTypeDoesNotMatchError
)
from evaluate.classification import AlignmentAssessment
import pytest
from io import StringIO
import math
from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row
from unittest.mock import patch
class TestReport:
def test___get_report_satisfying_confidence_threshold(self):
report = Report([
pd.read_csv(StringIO(
"""id,GT_CONF
0,2
1,1
2,3
""")),
pd.read_csv(StringIO(
"""id,GT_CONF
4,3
5,1
6,2
"""))
])
actual_report = report.get_report_satisfying_confidence_threshold(2)
expected_report = Report([
pd.read_csv(StringIO(
"""id,GT_CONF
0,2
2,3
4,3
6,2
"""))])
assert actual_report==expected_report
def test___get_value_from_header_fast___field_is_in_header(self):
actual_value = Report.get_value_from_header_fast("FIELD_1=10;", "FIELD_1", int, -1, delim=";")
expected_value = 10
assert actual_value==expected_value
def test___get_value_from_header_fast___field_is_in_header_between_two_other_fields(self):
actual_value = Report.get_value_from_header_fast("DUMMY_1=asd;FIELD_1=10;DUMMY_2=99;", "FIELD_1", int, -1, delim=";")
expected_value = 10
assert actual_value==expected_value
def test___get_value_from_header_fast___field_is_first_before_two_other_fields(self):
actual_value = Report.get_value_from_header_fast("FIELD_1=10;DUMMY_1=asd;DUMMY_2=99;", "FIELD_1", int, -1, delim=";")
expected_value = 10
assert actual_value==expected_value
def test___get_value_from_header_fast___field_is_last_after_two_other_fields(self):
actual_value = Report.get_value_from_header_fast("DUMMY_1=asd;DUMMY_2=99;FIELD_1=10;", "FIELD_1", int, -1, delim=";")
expected_value = 10
assert actual_value==expected_value
def test___get_value_from_header_fast___field_is_not_in_header(self):
actual_value = Report.get_value_from_header_fast("DUMMY_1=asd;FIELD_1=10;DUMMY_2=99;", "FIELD_2", int, -1, delim=";")
expected_value = -1
assert actual_value==expected_value
def test___get_value_from_header_fast___field_is_in_header___return_type_does_not_match(self):
with pytest.raises(ReturnTypeDoesNotMatchError):
Report.get_value_from_header_fast("DUMMY_1=asd;FIELD_1=asd;DUMMY_2=99;", "FIELD_1", int, -1, delim=";")
def test___get_value_from_header_fast___field_is_in_header___delim_is_not(self):
with pytest.raises(DelimNotFoundError):
Report.get_value_from_header_fast("DUMMY_1=asd;FIELD_1=asd;DUMMY_2=99;", "FIELD_1", int, -1, delim="~")
def test____create_field_from_header(self):
report = Report([
pd.read_csv(StringIO(
"""id,header
1,SEQ=ACGT;LEN=4;
2,SEQ=TG;LEN=2;
3,dummy
"""))])
report._create_field_from_header("SEQ", "header", str, "A")
report._create_field_from_header("LEN", "header", int, 1)
expected_report = Report([
pd.read_csv(StringIO(
"""id,header,SEQ,LEN
1,SEQ=ACGT;LEN=4;,ACGT,4
2,SEQ=TG;LEN=2;,TG,2
3,dummy,A,1
"""))])
assert report==expected_report
def test____create_good_eval_column(self):
report = Report([
pd.read_csv(StringIO(
"""classification
primary_correct
whatever
secondary_correct
dummy
supplementary_correct
woot
"""))])
report._create_good_eval_column()
expected_report = Report([
pd.read_csv(StringIO(
"""classification,good_eval
primary_correct,True
whatever,False
secondary_correct,True
dummy,False
supplementary_correct,True
woot,False
"""))])
assert report==expected_report
def test_getMaximumGtConf_no_gt_conf_columnRaisesKeyError(self):
report = Report([pd.DataFrame()])
with pytest.raises(KeyError):
report.get_maximum_gt_conf()
def test_getMaximumGtConf_emptyReportReturnsNaN(self):
report = Report([pd.DataFrame(data={"GT_CONF": []})])
actual = report.get_maximum_gt_conf()
assert math.isnan(actual)
def test_getMaximumGtConf_oneGTConfInReportReturnsGTConf(self):
report = Report([pd.DataFrame(data={"GT_CONF": [1.5]})])
actual = report.get_maximum_gt_conf()
expected = 1.5
assert actual == expected
def test_getMaximumGtConf_threeGTConfsInReportReturnsHighest(self):
report = Report([pd.DataFrame(data={"GT_CONF": [1.5, 10.5, 5.0]})])
actual = report.get_maximum_gt_conf()
expected = 10.5
assert actual == expected
def test_getMinimumGtConf_no_gt_conf_columnRaisesKeyError(self):
report = Report([pd.DataFrame()])
with pytest.raises(KeyError):
report.get_minimum_gt_conf()
def test_getMinimumGtConf_emptyReportReturnsNaN(self):
report = Report([pd.DataFrame(data={"GT_CONF": []})])
actual = report.get_minimum_gt_conf()
assert math.isnan(actual)
def test_getMinimumGtConf_oneGTConfInReportReturnsGTConf(self):
report = Report([pd.DataFrame(data={"GT_CONF": [1.5]})])
actual = report.get_minimum_gt_conf()
expected = 1.5
assert actual == expected
def test_getMinimumGtConf_threeGTConfsInReportReturnsHighest(self):
report = Report([pd.DataFrame(data={"GT_CONF": [10.5, 5.0, 0.2]})])
actual = report.get_minimum_gt_conf()
expected = 0.2
assert actual == expected
class TestPrecisionReporter:
def test_init_gtconfIsExtractedCorrectly(self):
columns = ["sample", "query_probe_header", "ref_probe_header", "classification"]
dfs = pd.DataFrame(
data=[
create_precision_report_row(0.0, gt_conf=100),
create_precision_report_row(0.0, gt_conf=100),
create_precision_report_row(0.0, gt_conf=10),
create_precision_report_row(0.0, gt_conf=100),
],
columns=columns,
)
report = PrecisionReport([dfs])
actual = report.report.GT_CONF
expected = pd.Series([100.0, 100.0, 10.0, 100.0])
assert actual.equals(expected)
def test_fromFiles_TwoFilesReturnsValidRecallReport(self):
contents_1 = """sample query_probe_header ref_probe_header classification
CFT073 >CHROM=1;POS=1246;IV=[20,30);PVID=1;NB_ALL=1;ALL_ID=1;NB_DIFF_ALL_SEQ=1;ALL_SEQ_ID=1; >GT_CONF=1; unmapped
CFT073 >CHROM=1;POS=1248;IV=[30,40);PVID=2;NB_ALL=2;ALL_ID=2;NB_DIFF_ALL_SEQ=2;ALL_SEQ_ID=2; >CHROM=GC00005358_3;SAMPLE=CFT073;POS=1;IV=[0,17);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=6;GT_CONF=60.1133; primary_correct
CFT073 >CHROM=1;POS=1252;IV=[40,50);PVID=3;NB_ALL=3;ALL_ID=3;NB_DIFF_ALL_SEQ=3;ALL_SEQ_ID=3; >GT_CONF=3; unmapped
"""
contents_2 = """sample query_probe_header ref_probe_header classification
CFT073 >CHROM=1;POS=1260;IV=[50,60);PVID=4;NB_ALL=4;ALL_ID=4;NB_DIFF_ALL_SEQ=4;ALL_SEQ_ID=4; >CHROM=GC00000578_3;SAMPLE=CFT073;POS=165;IV=[25,29);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=3;GT_CONF=3.22199; primary_incorrect
CFT073 >CHROM=1;POS=1262;IV=[60,70);PVID=5;NB_ALL=5;ALL_ID=5;NB_DIFF_ALL_SEQ=5;ALL_SEQ_ID=5; >GT_CONF=5; unmapped
CFT073 >CHROM=1;POS=1281;IV=[70,80);PVID=6;NB_ALL=6;ALL_ID=6;NB_DIFF_ALL_SEQ=6;ALL_SEQ_ID=6; >GT_CONF=6; unmapped
"""
path_1 = create_tmp_file(contents_1)
path_2 = create_tmp_file(contents_2)
contents_1_input = StringIO(contents_1)
contents_2_input = StringIO(contents_2)
dataframes = [
pd.read_csv(contents_1_input, sep="\t", keep_default_na=False),
pd.read_csv(contents_2_input, sep="\t", keep_default_na=False),
]
actual = PrecisionReport.from_files([path_1, path_2])
expected = PrecisionReport(dataframes)
path_1.unlink()
path_2.unlink()
assert actual == expected
class TestRecallReport:
def test_fromFiles_TwoFilesReturnsValidRecallReport(self):
contents_1 = """sample query_probe_header ref_probe_header classification
CFT073 >CHROM=1;POS=1246;IV=[20,30);PVID=1;NB_ALL=1;ALL_ID=1;NB_DIFF_ALL_SEQ=1;ALL_SEQ_ID=1; >GT_CONF=1; unmapped
CFT073 >CHROM=1;POS=1248;IV=[30,40);PVID=2;NB_ALL=2;ALL_ID=2;NB_DIFF_ALL_SEQ=2;ALL_SEQ_ID=2; >CHROM=GC00005358_3;SAMPLE=CFT073;POS=1;IV=[0,17);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=6;GT_CONF=60.1133; primary_correct
CFT073 >CHROM=1;POS=1252;IV=[40,50);PVID=3;NB_ALL=3;ALL_ID=3;NB_DIFF_ALL_SEQ=3;ALL_SEQ_ID=3; >GT_CONF=3; unmapped
"""
contents_2 = """sample query_probe_header ref_probe_header classification
CFT073 >CHROM=1;POS=1260;IV=[50,60);PVID=4;NB_ALL=4;ALL_ID=4;NB_DIFF_ALL_SEQ=4;ALL_SEQ_ID=4; >CHROM=GC00000578_3;SAMPLE=CFT073;POS=165;IV=[25,29);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=3;GT_CONF=3.22199; primary_incorrect
CFT073 >CHROM=1;POS=1262;IV=[60,70);PVID=5;NB_ALL=5;ALL_ID=5;NB_DIFF_ALL_SEQ=5;ALL_SEQ_ID=5; >GT_CONF=5; unmapped
CFT073 >CHROM=1;POS=1281;IV=[70,80);PVID=6;NB_ALL=6;ALL_ID=6;NB_DIFF_ALL_SEQ=6;ALL_SEQ_ID=6; >GT_CONF=6; unmapped
"""
path_1 = create_tmp_file(contents_1)
path_2 = create_tmp_file(contents_2)
contents_1_input = StringIO(contents_1)
contents_2_input = StringIO(contents_2)
dataframes = [
pd.read_csv(contents_1_input, sep="\t", keep_default_na=False),
pd.read_csv(contents_2_input, sep="\t", keep_default_na=False),
]
actual = RecallReport.from_files([path_1, path_2])
expected = RecallReport(dataframes)
path_1.unlink()
path_2.unlink()
assert actual == expected
def test_init(self):
contents_1 = """sample query_probe_header ref_probe_header classification
CFT073 >CHROM=1;POS=1246;IV=[20,30);PVID=1;NB_ALL=1;ALL_ID=1;NB_DIFF_ALL_SEQ=1;ALL_SEQ_ID=1;NB_OF_SAMPLES=10; >GT_CONF=1; unmapped
CFT073 >CHROM=1;POS=1248;IV=[30,40);PVID=2;NB_ALL=2;ALL_ID=2;NB_DIFF_ALL_SEQ=2;ALL_SEQ_ID=2;NB_OF_SAMPLES=20; >CHROM=GC00005358_3;SAMPLE=CFT073;POS=1;IV=[0,17);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=6;GT_CONF=60.1133; primary_correct
CFT073 >CHROM=1;POS=1252;IV=[40,50);PVID=3;NB_ALL=3;ALL_ID=3;NB_DIFF_ALL_SEQ=3;ALL_SEQ_ID=3;NB_OF_SAMPLES=30; >GT_CONF=3; unmapped
"""
contents_1_input = StringIO(contents_1)
dataframes = [pd.read_csv(contents_1_input, sep="\t", keep_default_na=False)]
report = RecallReport(dataframes)
actual_df = report.report
expected_df = pd.read_csv(StringIO(
"""sample query_probe_header ref_probe_header classification GT_CONF PVID NB_ALL ALL_ID NB_DIFF_ALL_SEQ ALL_SEQ_ID NB_OF_SAMPLES good_eval
CFT073 >CHROM=1;POS=1246;IV=[20,30);PVID=1;NB_ALL=1;ALL_ID=1;NB_DIFF_ALL_SEQ=1;ALL_SEQ_ID=1;NB_OF_SAMPLES=10; >GT_CONF=1; unmapped 1.0 1 1 1 1 1 10 False
CFT073 >CHROM=1;POS=1248;IV=[30,40);PVID=2;NB_ALL=2;ALL_ID=2;NB_DIFF_ALL_SEQ=2;ALL_SEQ_ID=2;NB_OF_SAMPLES=20; >CHROM=GC00005358_3;SAMPLE=CFT073;POS=1;IV=[0,17);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=6;GT_CONF=60.1133; primary_correct 60.1133 2 2 2 2 2 20 True
CFT073 >CHROM=1;POS=1252;IV=[40,50);PVID=3;NB_ALL=3;ALL_ID=3;NB_DIFF_ALL_SEQ=3;ALL_SEQ_ID=3;NB_OF_SAMPLES=30; >GT_CONF=3; unmapped 3.0 3 3 3 3 3 30 False
"""), sep="\t")
assert actual_df.equals(expected_df)
def test_checkIfOnlyBestMappingIsKept_hasPrimaryMapping(self):
dfs = pd.DataFrame(
data=[
create_recall_report_row("truth_probe_1", AlignmentAssessment.UNMAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PARTIALLY_MAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_CORRECT, gt_conf=100, with_gt_conf=True),
],
)
report = RecallReport([dfs])
actual = report.report
expected = pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_CORRECT, gt_conf=100, with_gt_conf=True)])
assert_frame_equal(actual, expected, check_dtype=False)
def test_checkIfOnlyBestMappingIsKept_hasSecondaryMapping(self):
dfs = pd.DataFrame(
data=[
create_recall_report_row("truth_probe_1", AlignmentAssessment.UNMAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PARTIALLY_MAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_CORRECT, gt_conf=100, with_gt_conf=True),
],
)
report = RecallReport([dfs])
actual = report.report
expected = pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_CORRECT, gt_conf=100, with_gt_conf=True)])
assert_frame_equal(actual, expected, check_dtype=False)
def test_checkIfOnlyBestMappingIsKept_hasSupplementaryMapping(self):
dfs = pd.DataFrame(
data=[
create_recall_report_row("truth_probe_1", AlignmentAssessment.UNMAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PARTIALLY_MAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_CORRECT, gt_conf=100, with_gt_conf=True),
],
)
report = RecallReport([dfs])
actual = report.report
expected = pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_CORRECT, gt_conf=100, with_gt_conf=True)])
assert_frame_equal(actual, expected, check_dtype=False)
def test_checkIfOnlyBestMappingIsKept_ChoosesTheOneWithHighestGTConf(self):
dfs = pd.DataFrame(
data=[
create_recall_report_row("truth_probe_1", AlignmentAssessment.UNMAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PARTIALLY_MAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_CORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_CORRECT, gt_conf=200, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_CORRECT, gt_conf=150, with_gt_conf=True),
],
)
report = RecallReport([dfs])
actual = report.report
expected = pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_CORRECT, gt_conf=200, with_gt_conf=True)])
assert_frame_equal(actual, expected, check_dtype=False)
def test_checkIfOnlyBestMappingIsKept_hasNoCorrectMapping_ChoosesTheOneWithHighestGTConf(self):
dfs = pd.DataFrame(
data=[
create_recall_report_row("truth_probe_1", AlignmentAssessment.UNMAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PARTIALLY_MAPPED, gt_conf=140, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=150, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_INCORRECT, gt_conf=110, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_INCORRECT, gt_conf=120, with_gt_conf=True),
],
)
report = RecallReport([dfs])
actual = report.report
expected = pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=150, with_gt_conf=True)])
assert_frame_equal(actual, expected, check_dtype=False)
def test_checkIfOnlyBestMappingIsKept_hasPrimaryCorrectMappingWithLowGTConf_ChoosesPrimaryCorrectMapping(self):
dfs = pd.DataFrame(
data=[
create_recall_report_row("truth_probe_1", AlignmentAssessment.UNMAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PARTIALLY_MAPPED, gt_conf=140, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=150, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_INCORRECT, gt_conf=110, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_INCORRECT, gt_conf=120, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_CORRECT, gt_conf=1, with_gt_conf=True),
],
)
report = RecallReport([dfs])
actual = report.report
expected = pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_CORRECT, gt_conf=1, with_gt_conf=True)])
assert_frame_equal(actual, expected, check_dtype=False)
def test_checkIfOnlyBestMappingIsKept_hasPrimaryMapping_and_several_dfs(self):
df_1 = pd.DataFrame(
data=[
create_recall_report_row("truth_probe_1", AlignmentAssessment.UNMAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PARTIALLY_MAPPED, gt_conf=100, with_gt_conf=True),
],
)
df_2 = pd.DataFrame(
data=[
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=100,
with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_INCORRECT, gt_conf=100,
with_gt_conf=True),
],
)
df_3 = pd.DataFrame(
data=[
create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_INCORRECT, gt_conf=100,
with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_CORRECT, gt_conf=100,
with_gt_conf=True),
],
)
report = RecallReport([df_1, df_2, df_3])
actual = report.report
expected = pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_CORRECT, gt_conf=100, with_gt_conf=True)])
assert_frame_equal(actual, expected, check_dtype=False)
def test_checkIfOnlyBestMappingIsKept_hasNoCorrectMapping_ChoosesTheOneWithHighestGTConf_with_several_dfs(self):
dfs = [pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.UNMAPPED, gt_conf=100, with_gt_conf=True)]),
pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.PARTIALLY_MAPPED, gt_conf=140, with_gt_conf=True)]),
pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=150, with_gt_conf=True)]),
pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_INCORRECT, gt_conf=110, with_gt_conf=True)]),
pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_INCORRECT, gt_conf=120, with_gt_conf=True)])]
report = RecallReport(dfs)
actual = report.report
expected = pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=150, with_gt_conf=True)])
assert_frame_equal(actual, expected, check_dtype=False)
def test_simple_concatenation_with_several_dfs(self):
df_1 = pd.DataFrame(
data=[
create_recall_report_row("truth_probe_1", AlignmentAssessment.UNMAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_2", AlignmentAssessment.PARTIALLY_MAPPED, gt_conf=100, with_gt_conf=True),
],
)
df_2 = pd.DataFrame(
data=[
create_recall_report_row("truth_probe_3", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=100,
with_gt_conf=True),
create_recall_report_row("truth_probe_4", AlignmentAssessment.SECONDARY_INCORRECT, gt_conf=100,
with_gt_conf=True),
],
)
df_3 = pd.DataFrame(
data=[
create_recall_report_row("truth_probe_5", AlignmentAssessment.SUPPLEMENTARY_INCORRECT, gt_conf=100,
with_gt_conf=True),
create_recall_report_row("truth_probe_6", AlignmentAssessment.PRIMARY_CORRECT, gt_conf=100,
with_gt_conf=True),
],
)
report = RecallReport([df_1, df_2, df_3], concatenate_dfs_one_by_one_keeping_only_best_mappings=False)
actual = report.report
expected = pd.DataFrame(data=[
create_recall_report_row("truth_probe_1", AlignmentAssessment.UNMAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_2", AlignmentAssessment.PARTIALLY_MAPPED, gt_conf=100,
with_gt_conf=True),
create_recall_report_row("truth_probe_3", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=100,
with_gt_conf=True),
create_recall_report_row("truth_probe_4", AlignmentAssessment.SECONDARY_INCORRECT, gt_conf=100,
with_gt_conf=True),
create_recall_report_row("truth_probe_5", AlignmentAssessment.SUPPLEMENTARY_INCORRECT, gt_conf=100,
with_gt_conf=True),
create_recall_report_row("truth_probe_6", AlignmentAssessment.PRIMARY_CORRECT, gt_conf=100,
with_gt_conf=True),
])
assert_frame_equal(actual, expected, check_dtype=False)
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
def test____get_id_to_nb_of_allele_sequences_found(self, *mocks):
contents = StringIO(
"""sample,query_probe_header,PVID,ALL_SEQ_ID,good_eval
S1,0,2,0,True
S2,1,0,2,False
S3,2,1,1,True
S4,3,0,2,True
S5,4,1,1,False
S6,5,1,2,False
S7,6,2,1,True
S8,7,1,2,True
S1,8,2,2,True
S1,9,0,2,False
S1,10,2,3,True
S1,11,1,3,False
S1,12,2,4,False
S1,13,2,5,False
S1,14,2,6,False
S1,15,3,0,False
S1,16,3,1,False
""")
report = RecallReport([pd.read_csv(contents)], False)
actual=report._get_id_to_nb_of_allele_sequences_found()
expected=pd.read_csv(StringIO(
"""PVID,NB_OF_ALL_SEQ_ID_FOUND
0,1
1,2
2,4
3,0
"""), index_col="PVID")
assert actual.equals(expected)
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
def test____get_id_to_nb_of_different_allele_sequences(self, *mocks):
contents = StringIO(
"""sample,query_probe_header,PVID,NB_DIFF_ALL_SEQ,good_eval
S1,0,2,10,True
S2,1,0,1,False
S3,2,1,3,True
S4,3,0,1,True
S5,4,1,3,False
S6,5,1,3,False
S7,6,2,10,True
S8,7,1,3,True
S1,8,2,10,True
S1,9,0,1,False
S1,10,2,10,True
S1,11,1,3,False
S1,12,2,10,False
S1,13,2,10,False
S1,14,2,10,False
S1,15,3,2,False
S1,16,3,2,False
""")
report = RecallReport([pd.read_csv(contents)], False)
actual = report._get_id_to_nb_of_different_allele_sequences()
expected = pd.read_csv(StringIO(
"""PVID,NB_DIFF_ALL_SEQ
0,1
1,3
2,10
3,2
"""), index_col="PVID")
assert actual.equals(expected)
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
def test____get_id_to_nb_of_samples(self, *mocks):
contents = StringIO(
"""sample,query_probe_header,PVID,NB_OF_SAMPLES
S1,0,2,3
S2,1,0,4
S3,2,1,10
""")
report = RecallReport([pd.read_csv(contents)], False)
actual = report._get_id_to_nb_of_samples()
expected = pd.read_csv(StringIO(
"""PVID,NB_OF_SAMPLES
0,4
1,10
2,3
"""), index_col="PVID")
assert actual.equals(expected)
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
def test___get_proportion_of_allele_seqs_found_for_each_variant(self, *mocks):
contents = StringIO(
"""sample,query_probe_header,PVID,ALL_SEQ_ID,NB_DIFF_ALL_SEQ,good_eval,ALL_ID,NB_ALL
S1,1,2,0,10,True,0,0
S2,2,0,2,1,False,0,0
S3,3,1,1,3,True,0,0
S4,4,0,2,1,True,0,0
S5,5,1,1,3,False,0,0
S6,6,1,2,3,False,0,0
S7,7,2,1,10,True,0,0
S8,8,1,2,3,True,0,0
S1,9,2,2,10,True,0,0
S1,10,0,2,1,False,0,0
S1,11,2,3,10,True,0,0
S1,12,1,3,3,False,0,0
S1,13,2,4,10,False,0,0
S1,14,2,5,10,False,0,0
S1,15,2,6,10,False,0,0
S1,16,3,0,2,False,0,0
S1,17,3,1,2,False,0,0
""")
report = RecallReport([pd.read_csv(contents)], False)
# non binary
actual = report.get_proportion_of_allele_seqs_found_for_each_variant(binary=False)
expected = [1/1, 2/3, 4/10, 0/2]
assert actual == expected
# binary
actual = report.get_proportion_of_allele_seqs_found_for_each_variant(binary=True)
expected = [1, 0, 0, 0]
assert actual == expected
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
def test___get_proportion_of_allele_seqs_found_for_each_variant_with_nb_of_samples(self, *mocks):
contents = StringIO(
"""sample,query_probe_header,PVID,ALL_SEQ_ID,NB_DIFF_ALL_SEQ,good_eval,ALL_ID,NB_ALL,NB_OF_SAMPLES
S1,1,2,0,10,True,0,0,20
S2,2,0,2,1,False,0,0,1
S3,3,1,1,3,True,0,0,10
S4,4,0,2,1,True,0,0,1
S5,5,1,1,3,False,0,0,10
S6,6,1,2,3,False,0,0,10
S7,7,2,1,10,True,0,0,20
S8,8,1,2,3,True,0,0,10
S1,9,2,2,10,True,0,0,20
S1,10,0,2,1,False,0,0,1
S1,11,2,3,10,True,0,0,20
S1,12,1,3,3,False,0,0,10
S1,13,2,4,10,False,0,0,20
S1,14,2,5,10,False,0,0,20
S1,15,2,6,10,False,0,0,20
S1,16,3,0,2,False,0,0,30
S1,17,3,1,2,False,0,0,30
""")
report = RecallReport([pd.read_csv(contents)], False)
# non binary
actual = report.get_proportion_of_allele_seqs_found_for_each_variant_with_nb_of_samples(binary=False)
expected = pd.read_csv(StringIO(
"""PVID,proportion_of_allele_seqs_found,NB_OF_SAMPLES
0,1.0,1
1,0.6666666666666666,10
2,0.4,20
3,0.0,30
"""
), index_col="PVID")
assert actual.equals(expected)
# binary
actual = report.get_proportion_of_allele_seqs_found_for_each_variant_with_nb_of_samples(binary=True)
expected = pd.read_csv(StringIO(
"""PVID,proportion_of_allele_seqs_found_binary,NB_OF_SAMPLES
0,1,1
1,0,10
2,0,20
3,0,30
"""
), index_col="PVID")
assert actual.equals(expected)
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
def test___get_proportion_of_alleles_found_for_each_variant_with_nb_of_samples(self, *mocks):
contents = StringIO(
"""sample,query_probe_header,PVID,ALL_SEQ_ID,NB_DIFF_ALL_SEQ,good_eval,ALL_ID,NB_ALL,NB_OF_SAMPLES
S1,1,2,0,10,True,4,5,20
S2,2,0,2,1,False,0,1,1
S3,3,1,1,3,True,5,10,10
S4,4,0,2,1,True,0,1,1
S5,5,1,1,3,False,4,10,10
S6,6,1,2,3,False,9,10,10
S7,7,2,1,10,True,3,5,20
S8,8,1,2,3,True,8,10,10
S1,9,2,2,10,True,2,5,20
S1,10,0,2,1,False,0,1,1
S1,11,2,3,10,True,1,5,20
S1,12,1,3,3,False,7,10,10
S1,13,2,4,10,True,3,5,20
S1,14,2,5,10,True,1,5,20
S1,15,2,6,10,True,2,5,20
S1,16,3,0,2,False,0,3,30
S1,17,3,1,2,False,0,3,30
""")
report = RecallReport([pd.read_csv(contents)], False)
actual = report.get_proportion_of_alleles_found_for_each_variant_with_nb_of_samples()
expected = pd.read_csv(StringIO(
"""PVID,proportion_of_alleles_found,NB_OF_SAMPLES
0,1.0,1
1,0.2,10
2,0.8,20
3,0.0,30
"""
), index_col="PVID")
assert actual.equals(expected)
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
def test___get_proportion_of_allele_seqs_found_for_each_variant___duplicated_evaluation_is_disregarded(self, *mocks):
contents = StringIO(
"""sample,query_probe_header,PVID,ALL_SEQ_ID,NB_DIFF_ALL_SEQ,good_eval,ALL_ID,NB_ALL
S1,1,0,0,5,True,0,0
S1,2,0,1,5,True,0,0
S1,3,0,0,5,True,0,0
S1,4,0,0,5,True,0,0
S1,5,0,1,5,True,0,0
""")
report = RecallReport([pd.read_csv(contents)], False)
actual = report.get_proportion_of_allele_seqs_found_for_each_variant(binary=False)
expected = [2 / 5]
assert actual == expected
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
def test___get_proportion_of_alleles_found_for_each_variant(self, *mocks):
contents = StringIO(
"""sample,query_probe_header,PVID,ALL_ID,NB_ALL,good_eval,ALL_SEQ_ID,NB_DIFF_ALL_SEQ
S1,0,2,0,10,True,0,0
S2,1,0,2,1,False,0,0
S3,2,1,1,3,True,0,0
S4,3,0,2,1,True,0,0
S5,4,1,1,3,False,0,0
S6,5,1,2,3,False,0,0
S7,6,2,1,10,True,0,0
S8,7,1,2,3,True,0,0
S1,8,2,2,10,True,0,0
S1,9,0,2,1,False,0,0
S1,10,2,3,10,True,0,0
S1,11,1,3,3,False,0,0
S1,12,2,4,10,False,0,0
S1,13,2,5,10,False,0,0
S1,14,2,6,10,False,0,0
S1,15,3,0,2,False,0,0
S1,16,3,1,2,False,0,0
""")
report = RecallReport([pd.read_csv(contents)], False)
actual = report.get_proportion_of_alleles_found_for_each_variant()
expected = [1/1, 2/3, 4/10, 0/2]
assert actual == expected
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
def test___get_proportion_of_alleles_found_for_each_variant___duplicated_evaluation_is_disregarded(self, *mocks):
contents = StringIO(
"""sample,query_probe_header,PVID,ALL_ID,NB_ALL,good_eval,ALL_SEQ_ID,NB_DIFF_ALL_SEQ
S1,1,0,0,5,True,0,0
S1,2,0,1,5,True,0,0
S1,3,0,0,5,True,0,0
S1,4,0,0,5,True,0,0
S1,5,0,1,5,True,0,0
""")
report = RecallReport([pd.read_csv(contents)], False)
actual = report.get_proportion_of_alleles_found_for_each_variant()
expected = [2 / 5]
assert actual == expected
|
[
"evaluate.report.RecallReport",
"evaluate.report.PrecisionReport",
"evaluate.report.PrecisionReport.from_files",
"evaluate.report.RecallReport.from_files",
"evaluate.report.Report.get_value_from_header_fast"
] |
[((23973, 24045), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport._create_helper_columns.__name__'], {}), '(RecallReport, RecallReport._create_helper_columns.__name__)\n', (23985, 24045), False, 'from unittest.mock import patch\n'), ((24742, 24814), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport._create_helper_columns.__name__'], {}), '(RecallReport, RecallReport._create_helper_columns.__name__)\n', (24754, 24814), False, 'from unittest.mock import patch\n'), ((25529, 25601), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport._create_helper_columns.__name__'], {}), '(RecallReport, RecallReport._create_helper_columns.__name__)\n', (25541, 25601), False, 'from unittest.mock import patch\n'), ((26148, 26220), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport._create_helper_columns.__name__'], {}), '(RecallReport, RecallReport._create_helper_columns.__name__)\n', (26160, 26220), False, 'from unittest.mock import patch\n'), ((27218, 27290), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport._create_helper_columns.__name__'], {}), '(RecallReport, RecallReport._create_helper_columns.__name__)\n', (27230, 27290), False, 'from unittest.mock import patch\n'), ((29057, 29129), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport._create_helper_columns.__name__'], {}), '(RecallReport, RecallReport._create_helper_columns.__name__)\n', (29069, 29129), False, 'from unittest.mock import patch\n'), ((30438, 30510), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport._create_helper_columns.__name__'], {}), '(RecallReport, RecallReport._create_helper_columns.__name__)\n', (30450, 30510), False, 'from unittest.mock import patch\n'), ((31157, 31229), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport._create_helper_columns.__name__'], {}), '(RecallReport, RecallReport._create_helper_columns.__name__)\n', (31169, 31229), False, 'from unittest.mock import patch\n'), ((32012, 32084), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport._create_helper_columns.__name__'], {}), '(RecallReport, RecallReport._create_helper_columns.__name__)\n', (32024, 32084), False, 'from unittest.mock import patch\n'), ((1018, 1097), 'evaluate.report.Report.get_value_from_header_fast', 'Report.get_value_from_header_fast', (['"""FIELD_1=10;"""', '"""FIELD_1"""', 'int', '(-1)'], {'delim': '""";"""'}), "('FIELD_1=10;', 'FIELD_1', int, -1, delim=';')\n", (1051, 1097), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((1289, 1395), 'evaluate.report.Report.get_value_from_header_fast', 'Report.get_value_from_header_fast', (['"""DUMMY_1=asd;FIELD_1=10;DUMMY_2=99;"""', '"""FIELD_1"""', 'int', '(-1)'], {'delim': '""";"""'}), "('DUMMY_1=asd;FIELD_1=10;DUMMY_2=99;',\n 'FIELD_1', int, -1, delim=';')\n", (1322, 1395), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((1578, 1684), 'evaluate.report.Report.get_value_from_header_fast', 'Report.get_value_from_header_fast', (['"""FIELD_1=10;DUMMY_1=asd;DUMMY_2=99;"""', '"""FIELD_1"""', 'int', '(-1)'], {'delim': '""";"""'}), "('FIELD_1=10;DUMMY_1=asd;DUMMY_2=99;',\n 'FIELD_1', int, -1, delim=';')\n", (1611, 1684), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((1865, 1971), 'evaluate.report.Report.get_value_from_header_fast', 'Report.get_value_from_header_fast', (['"""DUMMY_1=asd;DUMMY_2=99;FIELD_1=10;"""', '"""FIELD_1"""', 'int', '(-1)'], {'delim': '""";"""'}), "('DUMMY_1=asd;DUMMY_2=99;FIELD_1=10;',\n 'FIELD_1', int, -1, delim=';')\n", (1898, 1971), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((2138, 2244), 'evaluate.report.Report.get_value_from_header_fast', 'Report.get_value_from_header_fast', (['"""DUMMY_1=asd;FIELD_1=10;DUMMY_2=99;"""', '"""FIELD_2"""', 'int', '(-1)'], {'delim': '""";"""'}), "('DUMMY_1=asd;FIELD_1=10;DUMMY_2=99;',\n 'FIELD_2', int, -1, delim=';')\n", (2171, 2244), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((4220, 4238), 'math.isnan', 'math.isnan', (['actual'], {}), '(actual)\n', (4230, 4238), False, 'import math\n'), ((5107, 5125), 'math.isnan', 'math.isnan', (['actual'], {}), '(actual)\n', (5117, 5125), False, 'import math\n'), ((6160, 6182), 'evaluate.report.PrecisionReport', 'PrecisionReport', (['[dfs]'], {}), '([dfs])\n', (6175, 6182), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((6242, 6280), 'pandas.Series', 'pd.Series', (['[100.0, 100.0, 10.0, 100.0]'], {}), '([100.0, 100.0, 10.0, 100.0])\n', (6251, 6280), True, 'import pandas as pd\n'), ((7481, 7508), 'tests.common.create_tmp_file', 'create_tmp_file', (['contents_1'], {}), '(contents_1)\n', (7496, 7508), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((7526, 7553), 'tests.common.create_tmp_file', 'create_tmp_file', (['contents_2'], {}), '(contents_2)\n', (7541, 7553), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((7582, 7602), 'io.StringIO', 'StringIO', (['contents_1'], {}), '(contents_1)\n', (7590, 7602), False, 'from io import StringIO\n'), ((7630, 7650), 'io.StringIO', 'StringIO', (['contents_2'], {}), '(contents_2)\n', (7638, 7650), False, 'from io import StringIO\n'), ((7854, 7898), 'evaluate.report.PrecisionReport.from_files', 'PrecisionReport.from_files', (['[path_1, path_2]'], {}), '([path_1, path_2])\n', (7880, 7898), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((7918, 7945), 'evaluate.report.PrecisionReport', 'PrecisionReport', (['dataframes'], {}), '(dataframes)\n', (7933, 7945), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((9216, 9243), 'tests.common.create_tmp_file', 'create_tmp_file', (['contents_1'], {}), '(contents_1)\n', (9231, 9243), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((9261, 9288), 'tests.common.create_tmp_file', 'create_tmp_file', (['contents_2'], {}), '(contents_2)\n', (9276, 9288), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((9317, 9337), 'io.StringIO', 'StringIO', (['contents_1'], {}), '(contents_1)\n', (9325, 9337), False, 'from io import StringIO\n'), ((9365, 9385), 'io.StringIO', 'StringIO', (['contents_2'], {}), '(contents_2)\n', (9373, 9385), False, 'from io import StringIO\n'), ((9589, 9630), 'evaluate.report.RecallReport.from_files', 'RecallReport.from_files', (['[path_1, path_2]'], {}), '([path_1, path_2])\n', (9612, 9630), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((9650, 9674), 'evaluate.report.RecallReport', 'RecallReport', (['dataframes'], {}), '(dataframes)\n', (9662, 9674), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((10400, 10420), 'io.StringIO', 'StringIO', (['contents_1'], {}), '(contents_1)\n', (10408, 10420), False, 'from io import StringIO\n'), ((10524, 10548), 'evaluate.report.RecallReport', 'RecallReport', (['dataframes'], {}), '(dataframes)\n', (10536, 10548), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((12338, 12357), 'evaluate.report.RecallReport', 'RecallReport', (['[dfs]'], {}), '([dfs])\n', (12350, 12357), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((12548, 12603), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {'check_dtype': '(False)'}), '(actual, expected, check_dtype=False)\n', (12566, 12603), False, 'from pandas.testing import assert_frame_equal\n'), ((13542, 13561), 'evaluate.report.RecallReport', 'RecallReport', (['[dfs]'], {}), '([dfs])\n', (13554, 13561), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((13754, 13809), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {'check_dtype': '(False)'}), '(actual, expected, check_dtype=False)\n', (13772, 13809), False, 'from pandas.testing import assert_frame_equal\n'), ((14756, 14775), 'evaluate.report.RecallReport', 'RecallReport', (['[dfs]'], {}), '([dfs])\n', (14768, 14775), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((14972, 15027), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {'check_dtype': '(False)'}), '(actual, expected, check_dtype=False)\n', (14990, 15027), False, 'from pandas.testing import assert_frame_equal\n'), ((16240, 16259), 'evaluate.report.RecallReport', 'RecallReport', (['[dfs]'], {}), '([dfs])\n', (16252, 16259), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((16452, 16507), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {'check_dtype': '(False)'}), '(actual, expected, check_dtype=False)\n', (16470, 16507), False, 'from pandas.testing import assert_frame_equal\n'), ((17348, 17367), 'evaluate.report.RecallReport', 'RecallReport', (['[dfs]'], {}), '([dfs])\n', (17360, 17367), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((17560, 17615), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {'check_dtype': '(False)'}), '(actual, expected, check_dtype=False)\n', (17578, 17615), False, 'from pandas.testing import assert_frame_equal\n'), ((18598, 18617), 'evaluate.report.RecallReport', 'RecallReport', (['[dfs]'], {}), '([dfs])\n', (18610, 18617), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((18806, 18861), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {'check_dtype': '(False)'}), '(actual, expected, check_dtype=False)\n', (18824, 18861), False, 'from pandas.testing import assert_frame_equal\n'), ((20124, 20156), 'evaluate.report.RecallReport', 'RecallReport', (['[df_1, df_2, df_3]'], {}), '([df_1, df_2, df_3])\n', (20136, 20156), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((20347, 20402), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {'check_dtype': '(False)'}), '(actual, expected, check_dtype=False)\n', (20365, 20402), False, 'from pandas.testing import assert_frame_equal\n'), ((21286, 21303), 'evaluate.report.RecallReport', 'RecallReport', (['dfs'], {}), '(dfs)\n', (21298, 21303), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((21496, 21551), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {'check_dtype': '(False)'}), '(actual, expected, check_dtype=False)\n', (21514, 21551), False, 'from pandas.testing import assert_frame_equal\n'), ((22790, 22887), 'evaluate.report.RecallReport', 'RecallReport', (['[df_1, df_2, df_3]'], {'concatenate_dfs_one_by_one_keeping_only_best_mappings': '(False)'}), '([df_1, df_2, df_3],\n concatenate_dfs_one_by_one_keeping_only_best_mappings=False)\n', (22802, 22887), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((23910, 23965), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {'check_dtype': '(False)'}), '(actual, expected, check_dtype=False)\n', (23928, 23965), False, 'from pandas.testing import assert_frame_equal\n'), ((24135, 24468), 'io.StringIO', 'StringIO', (['"""sample,query_probe_header,PVID,ALL_SEQ_ID,good_eval\nS1,0,2,0,True\nS2,1,0,2,False\nS3,2,1,1,True\nS4,3,0,2,True\nS5,4,1,1,False\nS6,5,1,2,False\nS7,6,2,1,True\nS8,7,1,2,True\nS1,8,2,2,True\nS1,9,0,2,False\nS1,10,2,3,True\nS1,11,1,3,False\nS1,12,2,4,False\nS1,13,2,5,False\nS1,14,2,6,False\nS1,15,3,0,False\nS1,16,3,1,False\n"""'], {}), '(\n """sample,query_probe_header,PVID,ALL_SEQ_ID,good_eval\nS1,0,2,0,True\nS2,1,0,2,False\nS3,2,1,1,True\nS4,3,0,2,True\nS5,4,1,1,False\nS6,5,1,2,False\nS7,6,2,1,True\nS8,7,1,2,True\nS1,8,2,2,True\nS1,9,0,2,False\nS1,10,2,3,True\nS1,11,1,3,False\nS1,12,2,4,False\nS1,13,2,5,False\nS1,14,2,6,False\nS1,15,3,0,False\nS1,16,3,1,False\n"""\n )\n', (24143, 24468), False, 'from io import StringIO\n'), ((24908, 25253), 'io.StringIO', 'StringIO', (['"""sample,query_probe_header,PVID,NB_DIFF_ALL_SEQ,good_eval\nS1,0,2,10,True\nS2,1,0,1,False\nS3,2,1,3,True\nS4,3,0,1,True\nS5,4,1,3,False\nS6,5,1,3,False\nS7,6,2,10,True\nS8,7,1,3,True\nS1,8,2,10,True\nS1,9,0,1,False\nS1,10,2,10,True\nS1,11,1,3,False\nS1,12,2,10,False\nS1,13,2,10,False\nS1,14,2,10,False\nS1,15,3,2,False\nS1,16,3,2,False\n"""'], {}), '(\n """sample,query_probe_header,PVID,NB_DIFF_ALL_SEQ,good_eval\nS1,0,2,10,True\nS2,1,0,1,False\nS3,2,1,3,True\nS4,3,0,1,True\nS5,4,1,3,False\nS6,5,1,3,False\nS7,6,2,10,True\nS8,7,1,3,True\nS1,8,2,10,True\nS1,9,0,1,False\nS1,10,2,10,True\nS1,11,1,3,False\nS1,12,2,10,False\nS1,13,2,10,False\nS1,14,2,10,False\nS1,15,3,2,False\nS1,16,3,2,False\n"""\n )\n', (24916, 25253), False, 'from io import StringIO\n'), ((25676, 25823), 'io.StringIO', 'StringIO', (['"""sample,query_probe_header,PVID,NB_OF_SAMPLES\n S1,0,2,3\n S2,1,0,4\n S3,2,1,10\n """'], {}), '(\n """sample,query_probe_header,PVID,NB_OF_SAMPLES\n S1,0,2,3\n S2,1,0,4\n S3,2,1,10\n """\n )\n', (25684, 25823), False, 'from io import StringIO\n'), ((26323, 26796), 'io.StringIO', 'StringIO', (['"""sample,query_probe_header,PVID,ALL_SEQ_ID,NB_DIFF_ALL_SEQ,good_eval,ALL_ID,NB_ALL\nS1,1,2,0,10,True,0,0\nS2,2,0,2,1,False,0,0\nS3,3,1,1,3,True,0,0\nS4,4,0,2,1,True,0,0\nS5,5,1,1,3,False,0,0\nS6,6,1,2,3,False,0,0\nS7,7,2,1,10,True,0,0\nS8,8,1,2,3,True,0,0\nS1,9,2,2,10,True,0,0\nS1,10,0,2,1,False,0,0\nS1,11,2,3,10,True,0,0\nS1,12,1,3,3,False,0,0\nS1,13,2,4,10,False,0,0\nS1,14,2,5,10,False,0,0\nS1,15,2,6,10,False,0,0\nS1,16,3,0,2,False,0,0\nS1,17,3,1,2,False,0,0\n"""'], {}), '(\n """sample,query_probe_header,PVID,ALL_SEQ_ID,NB_DIFF_ALL_SEQ,good_eval,ALL_ID,NB_ALL\nS1,1,2,0,10,True,0,0\nS2,2,0,2,1,False,0,0\nS3,3,1,1,3,True,0,0\nS4,4,0,2,1,True,0,0\nS5,5,1,1,3,False,0,0\nS6,6,1,2,3,False,0,0\nS7,7,2,1,10,True,0,0\nS8,8,1,2,3,True,0,0\nS1,9,2,2,10,True,0,0\nS1,10,0,2,1,False,0,0\nS1,11,2,3,10,True,0,0\nS1,12,1,3,3,False,0,0\nS1,13,2,4,10,False,0,0\nS1,14,2,5,10,False,0,0\nS1,15,2,6,10,False,0,0\nS1,16,3,0,2,False,0,0\nS1,17,3,1,2,False,0,0\n"""\n )\n', (26331, 26796), False, 'from io import StringIO\n'), ((27412, 28163), 'io.StringIO', 'StringIO', (['"""sample,query_probe_header,PVID,ALL_SEQ_ID,NB_DIFF_ALL_SEQ,good_eval,ALL_ID,NB_ALL,NB_OF_SAMPLES\n S1,1,2,0,10,True,0,0,20\n S2,2,0,2,1,False,0,0,1\n S3,3,1,1,3,True,0,0,10\n S4,4,0,2,1,True,0,0,1\n S5,5,1,1,3,False,0,0,10\n S6,6,1,2,3,False,0,0,10\n S7,7,2,1,10,True,0,0,20\n S8,8,1,2,3,True,0,0,10\n S1,9,2,2,10,True,0,0,20\n S1,10,0,2,1,False,0,0,1\n S1,11,2,3,10,True,0,0,20\n S1,12,1,3,3,False,0,0,10\n S1,13,2,4,10,False,0,0,20\n S1,14,2,5,10,False,0,0,20\n S1,15,2,6,10,False,0,0,20\n S1,16,3,0,2,False,0,0,30\n S1,17,3,1,2,False,0,0,30\n """'], {}), '(\n """sample,query_probe_header,PVID,ALL_SEQ_ID,NB_DIFF_ALL_SEQ,good_eval,ALL_ID,NB_ALL,NB_OF_SAMPLES\n S1,1,2,0,10,True,0,0,20\n S2,2,0,2,1,False,0,0,1\n S3,3,1,1,3,True,0,0,10\n S4,4,0,2,1,True,0,0,1\n S5,5,1,1,3,False,0,0,10\n S6,6,1,2,3,False,0,0,10\n S7,7,2,1,10,True,0,0,20\n S8,8,1,2,3,True,0,0,10\n S1,9,2,2,10,True,0,0,20\n S1,10,0,2,1,False,0,0,1\n S1,11,2,3,10,True,0,0,20\n S1,12,1,3,3,False,0,0,10\n S1,13,2,4,10,False,0,0,20\n S1,14,2,5,10,False,0,0,20\n S1,15,2,6,10,False,0,0,20\n S1,16,3,0,2,False,0,0,30\n S1,17,3,1,2,False,0,0,30\n """\n )\n', (27420, 28163), False, 'from io import StringIO\n'), ((29247, 30000), 'io.StringIO', 'StringIO', (['"""sample,query_probe_header,PVID,ALL_SEQ_ID,NB_DIFF_ALL_SEQ,good_eval,ALL_ID,NB_ALL,NB_OF_SAMPLES\n S1,1,2,0,10,True,4,5,20\n S2,2,0,2,1,False,0,1,1\n S3,3,1,1,3,True,5,10,10\n S4,4,0,2,1,True,0,1,1\n S5,5,1,1,3,False,4,10,10\n S6,6,1,2,3,False,9,10,10\n S7,7,2,1,10,True,3,5,20\n S8,8,1,2,3,True,8,10,10\n S1,9,2,2,10,True,2,5,20\n S1,10,0,2,1,False,0,1,1\n S1,11,2,3,10,True,1,5,20\n S1,12,1,3,3,False,7,10,10\n S1,13,2,4,10,True,3,5,20\n S1,14,2,5,10,True,1,5,20\n S1,15,2,6,10,True,2,5,20\n S1,16,3,0,2,False,0,3,30\n S1,17,3,1,2,False,0,3,30\n """'], {}), '(\n """sample,query_probe_header,PVID,ALL_SEQ_ID,NB_DIFF_ALL_SEQ,good_eval,ALL_ID,NB_ALL,NB_OF_SAMPLES\n S1,1,2,0,10,True,4,5,20\n S2,2,0,2,1,False,0,1,1\n S3,3,1,1,3,True,5,10,10\n S4,4,0,2,1,True,0,1,1\n S5,5,1,1,3,False,4,10,10\n S6,6,1,2,3,False,9,10,10\n S7,7,2,1,10,True,3,5,20\n S8,8,1,2,3,True,8,10,10\n S1,9,2,2,10,True,2,5,20\n S1,10,0,2,1,False,0,1,1\n S1,11,2,3,10,True,1,5,20\n S1,12,1,3,3,False,7,10,10\n S1,13,2,4,10,True,3,5,20\n S1,14,2,5,10,True,1,5,20\n S1,15,2,6,10,True,2,5,20\n S1,16,3,0,2,False,0,3,30\n S1,17,3,1,2,False,0,3,30\n """\n )\n', (29255, 30000), False, 'from io import StringIO\n'), ((30652, 30932), 'io.StringIO', 'StringIO', (['"""sample,query_probe_header,PVID,ALL_SEQ_ID,NB_DIFF_ALL_SEQ,good_eval,ALL_ID,NB_ALL\n S1,1,0,0,5,True,0,0\n S1,2,0,1,5,True,0,0\n S1,3,0,0,5,True,0,0\n S1,4,0,0,5,True,0,0\n S1,5,0,1,5,True,0,0\n """'], {}), '(\n """sample,query_probe_header,PVID,ALL_SEQ_ID,NB_DIFF_ALL_SEQ,good_eval,ALL_ID,NB_ALL\n S1,1,0,0,5,True,0,0\n S1,2,0,1,5,True,0,0\n S1,3,0,0,5,True,0,0\n S1,4,0,0,5,True,0,0\n S1,5,0,1,5,True,0,0\n """\n )\n', (30660, 30932), False, 'from io import StringIO\n'), ((31328, 31800), 'io.StringIO', 'StringIO', (['"""sample,query_probe_header,PVID,ALL_ID,NB_ALL,good_eval,ALL_SEQ_ID,NB_DIFF_ALL_SEQ\nS1,0,2,0,10,True,0,0\nS2,1,0,2,1,False,0,0\nS3,2,1,1,3,True,0,0\nS4,3,0,2,1,True,0,0\nS5,4,1,1,3,False,0,0\nS6,5,1,2,3,False,0,0\nS7,6,2,1,10,True,0,0\nS8,7,1,2,3,True,0,0\nS1,8,2,2,10,True,0,0\nS1,9,0,2,1,False,0,0\nS1,10,2,3,10,True,0,0\nS1,11,1,3,3,False,0,0\nS1,12,2,4,10,False,0,0\nS1,13,2,5,10,False,0,0\nS1,14,2,6,10,False,0,0\nS1,15,3,0,2,False,0,0\nS1,16,3,1,2,False,0,0\n"""'], {}), '(\n """sample,query_probe_header,PVID,ALL_ID,NB_ALL,good_eval,ALL_SEQ_ID,NB_DIFF_ALL_SEQ\nS1,0,2,0,10,True,0,0\nS2,1,0,2,1,False,0,0\nS3,2,1,1,3,True,0,0\nS4,3,0,2,1,True,0,0\nS5,4,1,1,3,False,0,0\nS6,5,1,2,3,False,0,0\nS7,6,2,1,10,True,0,0\nS8,7,1,2,3,True,0,0\nS1,8,2,2,10,True,0,0\nS1,9,0,2,1,False,0,0\nS1,10,2,3,10,True,0,0\nS1,11,1,3,3,False,0,0\nS1,12,2,4,10,False,0,0\nS1,13,2,5,10,False,0,0\nS1,14,2,6,10,False,0,0\nS1,15,3,0,2,False,0,0\nS1,16,3,1,2,False,0,0\n"""\n )\n', (31336, 31800), False, 'from io import StringIO\n'), ((32222, 32502), 'io.StringIO', 'StringIO', (['"""sample,query_probe_header,PVID,ALL_ID,NB_ALL,good_eval,ALL_SEQ_ID,NB_DIFF_ALL_SEQ\n S1,1,0,0,5,True,0,0\n S1,2,0,1,5,True,0,0\n S1,3,0,0,5,True,0,0\n S1,4,0,0,5,True,0,0\n S1,5,0,1,5,True,0,0\n """'], {}), '(\n """sample,query_probe_header,PVID,ALL_ID,NB_ALL,good_eval,ALL_SEQ_ID,NB_DIFF_ALL_SEQ\n S1,1,0,0,5,True,0,0\n S1,2,0,1,5,True,0,0\n S1,3,0,0,5,True,0,0\n S1,4,0,0,5,True,0,0\n S1,5,0,1,5,True,0,0\n """\n )\n', (32230, 32502), False, 'from io import StringIO\n'), ((2426, 2468), 'pytest.raises', 'pytest.raises', (['ReturnTypeDoesNotMatchError'], {}), '(ReturnTypeDoesNotMatchError)\n', (2439, 2468), False, 'import pytest\n'), ((2482, 2589), 'evaluate.report.Report.get_value_from_header_fast', 'Report.get_value_from_header_fast', (['"""DUMMY_1=asd;FIELD_1=asd;DUMMY_2=99;"""', '"""FIELD_1"""', 'int', '(-1)'], {'delim': '""";"""'}), "('DUMMY_1=asd;FIELD_1=asd;DUMMY_2=99;',\n 'FIELD_1', int, -1, delim=';')\n", (2515, 2589), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((2685, 2718), 'pytest.raises', 'pytest.raises', (['DelimNotFoundError'], {}), '(DelimNotFoundError)\n', (2698, 2718), False, 'import pytest\n'), ((2732, 2839), 'evaluate.report.Report.get_value_from_header_fast', 'Report.get_value_from_header_fast', (['"""DUMMY_1=asd;FIELD_1=asd;DUMMY_2=99;"""', '"""FIELD_1"""', 'int', '(-1)'], {'delim': '"""~"""'}), "('DUMMY_1=asd;FIELD_1=asd;DUMMY_2=99;',\n 'FIELD_1', int, -1, delim='~')\n", (2765, 2839), False, 'from evaluate.report import PrecisionReport, RecallReport, Report, DelimNotFoundError, ReturnTypeDoesNotMatchError\n'), ((3970, 3993), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (3983, 3993), False, 'import pytest\n'), ((4857, 4880), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (4870, 4880), False, 'import pytest\n'), ((7686, 7748), 'pandas.read_csv', 'pd.read_csv', (['contents_1_input'], {'sep': '"""\t"""', 'keep_default_na': '(False)'}), "(contents_1_input, sep='\\t', keep_default_na=False)\n", (7697, 7748), True, 'import pandas as pd\n'), ((7762, 7824), 'pandas.read_csv', 'pd.read_csv', (['contents_2_input'], {'sep': '"""\t"""', 'keep_default_na': '(False)'}), "(contents_2_input, sep='\\t', keep_default_na=False)\n", (7773, 7824), True, 'import pandas as pd\n'), ((9421, 9483), 'pandas.read_csv', 'pd.read_csv', (['contents_1_input'], {'sep': '"""\t"""', 'keep_default_na': '(False)'}), "(contents_1_input, sep='\\t', keep_default_na=False)\n", (9432, 9483), True, 'import pandas as pd\n'), ((9497, 9559), 'pandas.read_csv', 'pd.read_csv', (['contents_2_input'], {'sep': '"""\t"""', 'keep_default_na': '(False)'}), "(contents_2_input, sep='\\t', keep_default_na=False)\n", (9508, 9559), True, 'import pandas as pd\n'), ((10443, 10505), 'pandas.read_csv', 'pd.read_csv', (['contents_1_input'], {'sep': '"""\t"""', 'keep_default_na': '(False)'}), "(contents_1_input, sep='\\t', keep_default_na=False)\n", (10454, 10505), True, 'import pandas as pd\n'), ((10617, 11353), 'io.StringIO', 'StringIO', (['"""sample\tquery_probe_header\tref_probe_header\tclassification\tGT_CONF\tPVID\tNB_ALL\tALL_ID\tNB_DIFF_ALL_SEQ\tALL_SEQ_ID\tNB_OF_SAMPLES\tgood_eval\nCFT073\t>CHROM=1;POS=1246;IV=[20,30);PVID=1;NB_ALL=1;ALL_ID=1;NB_DIFF_ALL_SEQ=1;ALL_SEQ_ID=1;NB_OF_SAMPLES=10;\t>GT_CONF=1;\tunmapped\t1.0\t1\t1\t1\t1\t1\t10\tFalse\nCFT073\t>CHROM=1;POS=1248;IV=[30,40);PVID=2;NB_ALL=2;ALL_ID=2;NB_DIFF_ALL_SEQ=2;ALL_SEQ_ID=2;NB_OF_SAMPLES=20;\t>CHROM=GC00005358_3;SAMPLE=CFT073;POS=1;IV=[0,17);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=6;GT_CONF=60.1133;\tprimary_correct\t60.1133\t2\t2\t2\t2\t2\t20\tTrue\nCFT073\t>CHROM=1;POS=1252;IV=[40,50);PVID=3;NB_ALL=3;ALL_ID=3;NB_DIFF_ALL_SEQ=3;ALL_SEQ_ID=3;NB_OF_SAMPLES=30;\t>GT_CONF=3;\tunmapped\t3.0\t3\t3\t3\t3\t3\t30\tFalse\n"""'], {}), '(\n """sample\tquery_probe_header\tref_probe_header\tclassification\tGT_CONF\tPVID\tNB_ALL\tALL_ID\tNB_DIFF_ALL_SEQ\tALL_SEQ_ID\tNB_OF_SAMPLES\tgood_eval\nCFT073\t>CHROM=1;POS=1246;IV=[20,30);PVID=1;NB_ALL=1;ALL_ID=1;NB_DIFF_ALL_SEQ=1;ALL_SEQ_ID=1;NB_OF_SAMPLES=10;\t>GT_CONF=1;\tunmapped\t1.0\t1\t1\t1\t1\t1\t10\tFalse\nCFT073\t>CHROM=1;POS=1248;IV=[30,40);PVID=2;NB_ALL=2;ALL_ID=2;NB_DIFF_ALL_SEQ=2;ALL_SEQ_ID=2;NB_OF_SAMPLES=20;\t>CHROM=GC00005358_3;SAMPLE=CFT073;POS=1;IV=[0,17);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=6;GT_CONF=60.1133;\tprimary_correct\t60.1133\t2\t2\t2\t2\t2\t20\tTrue\nCFT073\t>CHROM=1;POS=1252;IV=[40,50);PVID=3;NB_ALL=3;ALL_ID=3;NB_DIFF_ALL_SEQ=3;ALL_SEQ_ID=3;NB_OF_SAMPLES=30;\t>GT_CONF=3;\tunmapped\t3.0\t3\t3\t3\t3\t3\t30\tFalse\n"""\n )\n', (10625, 11353), False, 'from io import StringIO\n'), ((24615, 24675), 'io.StringIO', 'StringIO', (['"""PVID,NB_OF_ALL_SEQ_ID_FOUND\n0,1\n1,2\n2,4\n3,0\n"""'], {}), '("""PVID,NB_OF_ALL_SEQ_ID_FOUND\n0,1\n1,2\n2,4\n3,0\n""")\n', (24623, 24675), False, 'from io import StringIO\n'), ((25408, 25462), 'io.StringIO', 'StringIO', (['"""PVID,NB_DIFF_ALL_SEQ\n0,1\n1,3\n2,10\n3,2\n"""'], {}), '("""PVID,NB_DIFF_ALL_SEQ\n0,1\n1,3\n2,10\n3,2\n""")\n', (25416, 25462), False, 'from io import StringIO\n'), ((25971, 26077), 'io.StringIO', 'StringIO', (['"""PVID,NB_OF_SAMPLES\n 0,4\n 1,10\n 2,3\n """'], {}), '(\n """PVID,NB_OF_SAMPLES\n 0,4\n 1,10\n 2,3\n """\n )\n', (25979, 26077), False, 'from io import StringIO\n'), ((28392, 28579), 'io.StringIO', 'StringIO', (['"""PVID,proportion_of_allele_seqs_found,NB_OF_SAMPLES\n 0,1.0,1\n 1,0.6666666666666666,10\n 2,0.4,20\n 3,0.0,30\n """'], {}), '(\n """PVID,proportion_of_allele_seqs_found,NB_OF_SAMPLES\n 0,1.0,1\n 1,0.6666666666666666,10\n 2,0.4,20\n 3,0.0,30\n """\n )\n', (28400, 28579), False, 'from io import StringIO\n'), ((28808, 28979), 'io.StringIO', 'StringIO', (['"""PVID,proportion_of_allele_seqs_found_binary,NB_OF_SAMPLES\n 0,1,1\n 1,0,10\n 2,0,20\n 3,0,30\n """'], {}), '(\n """PVID,proportion_of_allele_seqs_found_binary,NB_OF_SAMPLES\n 0,1,1\n 1,0,10\n 2,0,20\n 3,0,30\n """\n )\n', (28816, 28979), False, 'from io import StringIO\n'), ((30192, 30360), 'io.StringIO', 'StringIO', (['"""PVID,proportion_of_alleles_found,NB_OF_SAMPLES\n 0,1.0,1\n 1,0.2,10\n 2,0.8,20\n 3,0.0,30\n """'], {}), '(\n """PVID,proportion_of_alleles_found,NB_OF_SAMPLES\n 0,1.0,1\n 1,0.2,10\n 2,0.8,20\n 3,0.0,30\n """\n )\n', (30200, 30360), False, 'from io import StringIO\n'), ((3940, 3954), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3952, 3954), True, 'import pandas as pd\n'), ((4121, 4155), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'GT_CONF': []}"}), "(data={'GT_CONF': []})\n", (4133, 4155), True, 'import pandas as pd\n'), ((4333, 4370), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'GT_CONF': [1.5]}"}), "(data={'GT_CONF': [1.5]})\n", (4345, 4370), True, 'import pandas as pd\n'), ((4575, 4623), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'GT_CONF': [1.5, 10.5, 5.0]}"}), "(data={'GT_CONF': [1.5, 10.5, 5.0]})\n", (4587, 4623), True, 'import pandas as pd\n'), ((4827, 4841), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4839, 4841), True, 'import pandas as pd\n'), ((5008, 5042), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'GT_CONF': []}"}), "(data={'GT_CONF': []})\n", (5020, 5042), True, 'import pandas as pd\n'), ((5220, 5257), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'GT_CONF': [1.5]}"}), "(data={'GT_CONF': [1.5]})\n", (5232, 5257), True, 'import pandas as pd\n'), ((5462, 5510), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'GT_CONF': [10.5, 5.0, 0.2]}"}), "(data={'GT_CONF': [10.5, 5.0, 0.2]})\n", (5474, 5510), True, 'import pandas as pd\n'), ((24491, 24512), 'pandas.read_csv', 'pd.read_csv', (['contents'], {}), '(contents)\n', (24502, 24512), True, 'import pandas as pd\n'), ((25276, 25297), 'pandas.read_csv', 'pd.read_csv', (['contents'], {}), '(contents)\n', (25287, 25297), True, 'import pandas as pd\n'), ((25858, 25879), 'pandas.read_csv', 'pd.read_csv', (['contents'], {}), '(contents)\n', (25869, 25879), True, 'import pandas as pd\n'), ((26819, 26840), 'pandas.read_csv', 'pd.read_csv', (['contents'], {}), '(contents)\n', (26830, 26840), True, 'import pandas as pd\n'), ((28198, 28219), 'pandas.read_csv', 'pd.read_csv', (['contents'], {}), '(contents)\n', (28209, 28219), True, 'import pandas as pd\n'), ((30035, 30056), 'pandas.read_csv', 'pd.read_csv', (['contents'], {}), '(contents)\n', (30046, 30056), True, 'import pandas as pd\n'), ((30967, 30988), 'pandas.read_csv', 'pd.read_csv', (['contents'], {}), '(contents)\n', (30978, 30988), True, 'import pandas as pd\n'), ((31823, 31844), 'pandas.read_csv', 'pd.read_csv', (['contents'], {}), '(contents)\n', (31834, 31844), True, 'import pandas as pd\n'), ((32537, 32558), 'pandas.read_csv', 'pd.read_csv', (['contents'], {}), '(contents)\n', (32548, 32558), True, 'import pandas as pd\n'), ((573, 612), 'io.StringIO', 'StringIO', (['"""id,GT_CONF\n0,2\n1,1\n2,3\n"""'], {}), '("""id,GT_CONF\n0,2\n1,1\n2,3\n""")\n', (581, 612), False, 'from io import StringIO\n'), ((640, 679), 'io.StringIO', 'StringIO', (['"""id,GT_CONF\n4,3\n5,1\n6,2\n"""'], {}), '("""id,GT_CONF\n4,3\n5,1\n6,2\n""")\n', (648, 679), False, 'from io import StringIO\n'), ((830, 873), 'io.StringIO', 'StringIO', (['"""id,GT_CONF\n0,2\n2,3\n4,3\n6,2\n"""'], {}), '("""id,GT_CONF\n0,2\n2,3\n4,3\n6,2\n""")\n', (838, 873), False, 'from io import StringIO\n'), ((2935, 3003), 'io.StringIO', 'StringIO', (['"""id,header\n1,SEQ=ACGT;LEN=4;\n2,SEQ=TG;LEN=2;\n3,dummy\n"""'], {}), '("""id,header\n1,SEQ=ACGT;LEN=4;\n2,SEQ=TG;LEN=2;\n3,dummy\n""")\n', (2943, 3003), False, 'from io import StringIO\n'), ((3202, 3304), 'io.StringIO', 'StringIO', (['"""id,header,SEQ,LEN\n1,SEQ=ACGT;LEN=4;,ACGT,4\n2,SEQ=TG;LEN=2;,TG,2\n3,dummy,A,1\n"""'], {}), '(\n """id,header,SEQ,LEN\n1,SEQ=ACGT;LEN=4;,ACGT,4\n2,SEQ=TG;LEN=2;,TG,2\n3,dummy,A,1\n"""\n )\n', (3210, 3304), False, 'from io import StringIO\n'), ((3437, 3554), 'io.StringIO', 'StringIO', (['"""classification\nprimary_correct\nwhatever\nsecondary_correct\ndummy\nsupplementary_correct\nwoot\n"""'], {}), '(\n """classification\nprimary_correct\nwhatever\nsecondary_correct\ndummy\nsupplementary_correct\nwoot\n"""\n )\n', (3445, 3554), False, 'from io import StringIO\n'), ((3651, 3811), 'io.StringIO', 'StringIO', (['"""classification,good_eval\nprimary_correct,True\nwhatever,False\nsecondary_correct,True\ndummy,False\nsupplementary_correct,True\nwoot,False\n"""'], {}), '(\n """classification,good_eval\nprimary_correct,True\nwhatever,False\nsecondary_correct,True\ndummy,False\nsupplementary_correct,True\nwoot,False\n"""\n )\n', (3659, 3811), False, 'from io import StringIO\n'), ((5854, 5899), 'tests.common.create_precision_report_row', 'create_precision_report_row', (['(0.0)'], {'gt_conf': '(100)'}), '(0.0, gt_conf=100)\n', (5881, 5899), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((5917, 5962), 'tests.common.create_precision_report_row', 'create_precision_report_row', (['(0.0)'], {'gt_conf': '(100)'}), '(0.0, gt_conf=100)\n', (5944, 5962), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((5980, 6024), 'tests.common.create_precision_report_row', 'create_precision_report_row', (['(0.0)'], {'gt_conf': '(10)'}), '(0.0, gt_conf=10)\n', (6007, 6024), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((6042, 6087), 'tests.common.create_precision_report_row', 'create_precision_report_row', (['(0.0)'], {'gt_conf': '(100)'}), '(0.0, gt_conf=100)\n', (6069, 6087), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((11535, 11642), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.UNMAPPED'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.UNMAPPED,\n gt_conf=100, with_gt_conf=True)\n", (11559, 11642), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((11656, 11772), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PARTIALLY_MAPPED'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PARTIALLY_MAPPED, gt_conf=100, with_gt_conf=True)\n", (11680, 11772), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((11785, 11902), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PRIMARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PRIMARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (11809, 11902), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((11915, 12034), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SECONDARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SECONDARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (11939, 12034), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((12047, 12170), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SUPPLEMENTARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SUPPLEMENTARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (12071, 12170), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((12183, 12298), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PRIMARY_CORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PRIMARY_CORRECT, gt_conf=100, with_gt_conf=True)\n", (12207, 12298), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((12427, 12542), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PRIMARY_CORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PRIMARY_CORRECT, gt_conf=100, with_gt_conf=True)\n", (12451, 12542), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((12737, 12844), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.UNMAPPED'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.UNMAPPED,\n gt_conf=100, with_gt_conf=True)\n", (12761, 12844), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((12858, 12974), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PARTIALLY_MAPPED'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PARTIALLY_MAPPED, gt_conf=100, with_gt_conf=True)\n", (12882, 12974), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((12987, 13104), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PRIMARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PRIMARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (13011, 13104), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((13117, 13236), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SECONDARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SECONDARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (13141, 13236), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((13249, 13372), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SUPPLEMENTARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SUPPLEMENTARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (13273, 13372), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((13385, 13502), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SECONDARY_CORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SECONDARY_CORRECT, gt_conf=100, with_gt_conf=True)\n", (13409, 13502), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((13631, 13748), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SECONDARY_CORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SECONDARY_CORRECT, gt_conf=100, with_gt_conf=True)\n", (13655, 13748), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((13947, 14054), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.UNMAPPED'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.UNMAPPED,\n gt_conf=100, with_gt_conf=True)\n", (13971, 14054), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((14068, 14184), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PARTIALLY_MAPPED'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PARTIALLY_MAPPED, gt_conf=100, with_gt_conf=True)\n", (14092, 14184), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((14197, 14314), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PRIMARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PRIMARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (14221, 14314), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((14327, 14446), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SECONDARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SECONDARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (14351, 14446), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((14459, 14582), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SUPPLEMENTARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SUPPLEMENTARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (14483, 14582), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((14595, 14716), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SUPPLEMENTARY_CORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SUPPLEMENTARY_CORRECT, gt_conf=100, with_gt_conf=True)\n", (14619, 14716), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((14845, 14966), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SUPPLEMENTARY_CORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SUPPLEMENTARY_CORRECT, gt_conf=100, with_gt_conf=True)\n", (14869, 14966), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((15172, 15279), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.UNMAPPED'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.UNMAPPED,\n gt_conf=100, with_gt_conf=True)\n", (15196, 15279), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((15293, 15409), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PARTIALLY_MAPPED'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PARTIALLY_MAPPED, gt_conf=100, with_gt_conf=True)\n", (15317, 15409), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((15422, 15539), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PRIMARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PRIMARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (15446, 15539), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((15552, 15671), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SECONDARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SECONDARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (15576, 15671), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((15684, 15807), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SUPPLEMENTARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SUPPLEMENTARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (15708, 15807), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((15820, 15935), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PRIMARY_CORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PRIMARY_CORRECT, gt_conf=100, with_gt_conf=True)\n", (15844, 15935), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((15948, 16065), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SECONDARY_CORRECT'], {'gt_conf': '(200)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SECONDARY_CORRECT, gt_conf=200, with_gt_conf=True)\n", (15972, 16065), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((16078, 16199), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SUPPLEMENTARY_CORRECT'], {'gt_conf': '(150)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SUPPLEMENTARY_CORRECT, gt_conf=150, with_gt_conf=True)\n", (16102, 16199), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((16329, 16446), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SECONDARY_CORRECT'], {'gt_conf': '(200)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SECONDARY_CORRECT, gt_conf=200, with_gt_conf=True)\n", (16353, 16446), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((16673, 16780), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.UNMAPPED'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.UNMAPPED,\n gt_conf=100, with_gt_conf=True)\n", (16697, 16780), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((16794, 16910), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PARTIALLY_MAPPED'], {'gt_conf': '(140)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PARTIALLY_MAPPED, gt_conf=140, with_gt_conf=True)\n", (16818, 16910), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((16923, 17040), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PRIMARY_INCORRECT'], {'gt_conf': '(150)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PRIMARY_INCORRECT, gt_conf=150, with_gt_conf=True)\n", (16947, 17040), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((17053, 17172), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SECONDARY_INCORRECT'], {'gt_conf': '(110)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SECONDARY_INCORRECT, gt_conf=110, with_gt_conf=True)\n", (17077, 17172), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((17185, 17308), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SUPPLEMENTARY_INCORRECT'], {'gt_conf': '(120)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SUPPLEMENTARY_INCORRECT, gt_conf=120, with_gt_conf=True)\n", (17209, 17308), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((17437, 17554), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PRIMARY_INCORRECT'], {'gt_conf': '(150)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PRIMARY_INCORRECT, gt_conf=150, with_gt_conf=True)\n", (17461, 17554), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((17797, 17904), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.UNMAPPED'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.UNMAPPED,\n gt_conf=100, with_gt_conf=True)\n", (17821, 17904), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((17918, 18034), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PARTIALLY_MAPPED'], {'gt_conf': '(140)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PARTIALLY_MAPPED, gt_conf=140, with_gt_conf=True)\n", (17942, 18034), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((18047, 18164), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PRIMARY_INCORRECT'], {'gt_conf': '(150)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PRIMARY_INCORRECT, gt_conf=150, with_gt_conf=True)\n", (18071, 18164), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((18177, 18296), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SECONDARY_INCORRECT'], {'gt_conf': '(110)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SECONDARY_INCORRECT, gt_conf=110, with_gt_conf=True)\n", (18201, 18296), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((18309, 18432), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SUPPLEMENTARY_INCORRECT'], {'gt_conf': '(120)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SUPPLEMENTARY_INCORRECT, gt_conf=120, with_gt_conf=True)\n", (18333, 18432), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((18445, 18558), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PRIMARY_CORRECT'], {'gt_conf': '(1)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PRIMARY_CORRECT, gt_conf=1, with_gt_conf=True)\n", (18469, 18558), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((18687, 18800), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PRIMARY_CORRECT'], {'gt_conf': '(1)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PRIMARY_CORRECT, gt_conf=1, with_gt_conf=True)\n", (18711, 18800), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((19011, 19118), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.UNMAPPED'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.UNMAPPED,\n gt_conf=100, with_gt_conf=True)\n", (19035, 19118), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((19132, 19248), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PARTIALLY_MAPPED'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PARTIALLY_MAPPED, gt_conf=100, with_gt_conf=True)\n", (19156, 19248), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((19334, 19451), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PRIMARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PRIMARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (19358, 19451), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((19505, 19624), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SECONDARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SECONDARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (19529, 19624), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((19751, 19874), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SUPPLEMENTARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SUPPLEMENTARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (19775, 19874), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((19928, 20043), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PRIMARY_CORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PRIMARY_CORRECT, gt_conf=100, with_gt_conf=True)\n", (19952, 20043), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((20226, 20341), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PRIMARY_CORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PRIMARY_CORRECT, gt_conf=100, with_gt_conf=True)\n", (20250, 20341), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((21373, 21490), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PRIMARY_INCORRECT'], {'gt_conf': '(150)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PRIMARY_INCORRECT, gt_conf=150, with_gt_conf=True)\n", (21397, 21490), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((21677, 21784), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.UNMAPPED'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.UNMAPPED,\n gt_conf=100, with_gt_conf=True)\n", (21701, 21784), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((21798, 21914), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_2"""', 'AlignmentAssessment.PARTIALLY_MAPPED'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_2', AlignmentAssessment.\n PARTIALLY_MAPPED, gt_conf=100, with_gt_conf=True)\n", (21822, 21914), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((22000, 22117), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_3"""', 'AlignmentAssessment.PRIMARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_3', AlignmentAssessment.\n PRIMARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (22024, 22117), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((22171, 22290), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_4"""', 'AlignmentAssessment.SECONDARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_4', AlignmentAssessment.\n SECONDARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (22195, 22290), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((22417, 22540), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_5"""', 'AlignmentAssessment.SUPPLEMENTARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_5', AlignmentAssessment.\n SUPPLEMENTARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (22441, 22540), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((22594, 22709), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_6"""', 'AlignmentAssessment.PRIMARY_CORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_6', AlignmentAssessment.\n PRIMARY_CORRECT, gt_conf=100, with_gt_conf=True)\n", (22618, 22709), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((22966, 23073), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.UNMAPPED'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.UNMAPPED,\n gt_conf=100, with_gt_conf=True)\n", (22990, 23073), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((23083, 23199), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_2"""', 'AlignmentAssessment.PARTIALLY_MAPPED'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_2', AlignmentAssessment.\n PARTIALLY_MAPPED, gt_conf=100, with_gt_conf=True)\n", (23107, 23199), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((23245, 23362), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_3"""', 'AlignmentAssessment.PRIMARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_3', AlignmentAssessment.\n PRIMARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (23269, 23362), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((23408, 23527), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_4"""', 'AlignmentAssessment.SECONDARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_4', AlignmentAssessment.\n SECONDARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (23432, 23527), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((23573, 23696), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_5"""', 'AlignmentAssessment.SUPPLEMENTARY_INCORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_5', AlignmentAssessment.\n SUPPLEMENTARY_INCORRECT, gt_conf=100, with_gt_conf=True)\n", (23597, 23696), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((23742, 23857), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_6"""', 'AlignmentAssessment.PRIMARY_CORRECT'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_6', AlignmentAssessment.\n PRIMARY_CORRECT, gt_conf=100, with_gt_conf=True)\n", (23766, 23857), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((20555, 20662), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.UNMAPPED'], {'gt_conf': '(100)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.UNMAPPED,\n gt_conf=100, with_gt_conf=True)\n", (20579, 20662), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((20696, 20812), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PARTIALLY_MAPPED'], {'gt_conf': '(140)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PARTIALLY_MAPPED, gt_conf=140, with_gt_conf=True)\n", (20720, 20812), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((20845, 20962), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.PRIMARY_INCORRECT'], {'gt_conf': '(150)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n PRIMARY_INCORRECT, gt_conf=150, with_gt_conf=True)\n", (20869, 20962), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((20995, 21114), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SECONDARY_INCORRECT'], {'gt_conf': '(110)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SECONDARY_INCORRECT, gt_conf=110, with_gt_conf=True)\n", (21019, 21114), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n'), ((21147, 21270), 'tests.common.create_recall_report_row', 'create_recall_report_row', (['"""truth_probe_1"""', 'AlignmentAssessment.SUPPLEMENTARY_INCORRECT'], {'gt_conf': '(120)', 'with_gt_conf': '(True)'}), "('truth_probe_1', AlignmentAssessment.\n SUPPLEMENTARY_INCORRECT, gt_conf=120, with_gt_conf=True)\n", (21171, 21270), False, 'from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row\n')]
|
#!/usr/bin/env python
import math
import config
import configspark
import ml_parse
import evaluate
sc = configspark.SPARK_CONTEXT
def clean():
config.clean_path(config.ML_MODEL)
def main():
clean()
ratings_train_text = sc.textFile(config.ML_RATINGS_TRAIN)
ratings_train = (
ratings_train_text
.map(ml_parse.parse_line)
.map(ml_parse.rating_convert))
ratings_validation_text = sc.textFile(config.ML_RATINGS_VALIDATION)
ratings_validation = (
ratings_validation_text
.map(ml_parse.parse_line)
.map(ml_parse.rating_convert))
ratings_train.cache()
ratings_validation.cache()
best_result = evaluate.evaluate(ratings_train, ratings_validation,
config.ML_RESULTS_FILE)
with open(config.ML_BEST_PARAMS_FILE, "w") as outfile:
outfile.write("%s,%s\n" % ("rank", "lambda"))
outfile.write("%s,%s" % (
best_result.get("rank"), best_result.get("lambda")))
best_model = best_result.get("model")
best_model.save(sc, config.ML_MODEL)
sc.stop()
if __name__ == "__main__":
main()
|
[
"evaluate.evaluate"
] |
[((151, 185), 'config.clean_path', 'config.clean_path', (['config.ML_MODEL'], {}), '(config.ML_MODEL)\n', (168, 185), False, 'import config\n'), ((679, 755), 'evaluate.evaluate', 'evaluate.evaluate', (['ratings_train', 'ratings_validation', 'config.ML_RESULTS_FILE'], {}), '(ratings_train, ratings_validation, config.ML_RESULTS_FILE)\n', (696, 755), False, 'import evaluate\n')]
|
""" Evaluate trained models and print errors on test and dev datasets """
import logging
import os
from os.path import basename
from evaluate import evaluate
import config
def run_evaluate():
"""
Run evaluation on 2 models:
* baseline
* baseline + lexical
Save evaluation and error analysis data to reports directory.
"""
checkpoints = [
config.BSL_MODEL_CKPT,
config.LEX_MODEL_CKPT,
]
datasets = [
config.DEV_FEATURES,
config.TEST_CIRCLE_FEATURES,
config.TEST_CARDBOARD_FEATURES,
]
reports_dir = config.REPORTS_DIR
logging.info(f"Reports directory: {reports_dir}")
os.makedirs(reports_dir, exist_ok=True)
for ckpt in checkpoints:
for dataset in datasets:
lex = "_lex" in ckpt
error_analysis_file = f'{reports_dir}/{basename(dataset)}.{"lex." if lex else ""}error_analysis.txt'
metrics_file = (
f'{reports_dir}/{basename(dataset)}.{"lex." if lex else ""}metrics.txt'
)
logging.info(
f"\nModel: {ckpt}\nDataset: {dataset}\n-> Error Analysis: {error_analysis_file}\n-> Metrics: {metrics_file}"
)
evaluate(
ckpt=ckpt,
dataset_file=dataset,
error_analysis_fname=error_analysis_file,
classification_metrics_fname=metrics_file,
)
if __name__ == "__main__":
logging.basicConfig(level=config.LOG_LEVEL, format="%(asctime)s: %(message)s")
run_evaluate()
|
[
"evaluate.evaluate"
] |
[((617, 666), 'logging.info', 'logging.info', (['f"""Reports directory: {reports_dir}"""'], {}), "(f'Reports directory: {reports_dir}')\n", (629, 666), False, 'import logging\n'), ((671, 710), 'os.makedirs', 'os.makedirs', (['reports_dir'], {'exist_ok': '(True)'}), '(reports_dir, exist_ok=True)\n', (682, 710), False, 'import os\n'), ((1468, 1546), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'config.LOG_LEVEL', 'format': '"""%(asctime)s: %(message)s"""'}), "(level=config.LOG_LEVEL, format='%(asctime)s: %(message)s')\n", (1487, 1546), False, 'import logging\n'), ((1063, 1195), 'logging.info', 'logging.info', (['f"""\nModel: {ckpt}\nDataset: {dataset}\n-> Error Analysis: {error_analysis_file}\n-> Metrics: {metrics_file}"""'], {}), '(\n f"""\nModel: {ckpt}\nDataset: {dataset}\n-> Error Analysis: {error_analysis_file}\n-> Metrics: {metrics_file}"""\n )\n', (1075, 1195), False, 'import logging\n'), ((1229, 1360), 'evaluate.evaluate', 'evaluate', ([], {'ckpt': 'ckpt', 'dataset_file': 'dataset', 'error_analysis_fname': 'error_analysis_file', 'classification_metrics_fname': 'metrics_file'}), '(ckpt=ckpt, dataset_file=dataset, error_analysis_fname=\n error_analysis_file, classification_metrics_fname=metrics_file)\n', (1237, 1360), False, 'from evaluate import evaluate\n'), ((858, 875), 'os.path.basename', 'basename', (['dataset'], {}), '(dataset)\n', (866, 875), False, 'from os.path import basename\n'), ((982, 999), 'os.path.basename', 'basename', (['dataset'], {}), '(dataset)\n', (990, 999), False, 'from os.path import basename\n')]
|
import argparse, torch, gc, os, random, json
from data import MyDataset, load_data, my_collate_fn, device, word2feature, label_multihot, mask_noFeature
from model import MCEncoder
from lcmodel import LCEncoder
from tqdm import tqdm
from evaluate import evaluate, evaluate_test
import numpy as np
import os
def main(frequency, batch_size, epoch_num, verbose, MODE, lc, directory, loss):
mode = MODE
if lc:
Encoder = LCEncoder
else:
Encoder = MCEncoder
os.makedirs(directory, exist_ok = True) # save json here
word2index, index2word, word2vec, index2each, label_size_each, data_idx_each = load_data(frequency)
(label_size, label_lexname_size, label_rootaffix_size, label_sememe_size) = label_size_each
(data_train_idx, data_dev_idx, data_test_500_seen_idx, data_test_500_unseen_idx, data_defi_c_idx, data_desc_c_idx) = data_idx_each
(index2sememe, index2lexname, index2rootaffix) = index2each
index2word = np.array(index2word)
test_dataset = MyDataset(data_test_500_seen_idx + data_test_500_unseen_idx + data_desc_c_idx)
valid_dataset = MyDataset(data_dev_idx)
train_dataset = MyDataset(data_train_idx + data_defi_c_idx)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=my_collate_fn)
valid_dataloader = torch.utils.data.DataLoader(valid_dataset, batch_size=batch_size, shuffle=True, collate_fn=my_collate_fn)
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, collate_fn=my_collate_fn)
print('DataLoader prepared. Batch_size [%d]'%batch_size)
print('Train dataset: ', len(train_dataset))
print('Valid dataset: ', len(valid_dataset))
print('Test dataset: ', len(test_dataset))
data_all_idx = data_train_idx + data_dev_idx + data_test_500_seen_idx + data_test_500_unseen_idx + data_defi_c_idx
sememe_num = len(index2sememe)
wd2sem = word2feature(data_all_idx, label_size, sememe_num, 'sememes') # label_size, not len(word2index). we only use target_words' feature
wd_sems = label_multihot(wd2sem, sememe_num)
wd_sems = torch.from_numpy(np.array(wd_sems)).to(device) #torch.from_numpy(np.array(wd_sems[:label_size])).to(device)
lexname_num = len(index2lexname)
wd2lex = word2feature(data_all_idx, label_size, lexname_num, 'lexnames')
wd_lex = label_multihot(wd2lex, lexname_num)
wd_lex = torch.from_numpy(np.array(wd_lex)).to(device)
rootaffix_num = len(index2rootaffix)
wd2ra = word2feature(data_all_idx, label_size, rootaffix_num, 'root_affix')
wd_ra = label_multihot(wd2ra, rootaffix_num)
wd_ra = torch.from_numpy(np.array(wd_ra)).to(device)
mask_s = mask_noFeature(label_size, wd2sem, sememe_num)
mask_l = mask_noFeature(label_size, wd2lex, lexname_num)
mask_r = mask_noFeature(label_size, wd2ra, rootaffix_num)
model = Encoder(vocab_size=len(word2index),
embed_dim=word2vec.shape[1],
hidden_dim=300,
layers=1,
class_num=label_size,
sememe_num=sememe_num,
lexname_num=lexname_num,
rootaffix_num=rootaffix_num,
loss=loss,
mode=MODE)
model.embedding.weight.data = torch.from_numpy(word2vec)
# model.target_embedding.weight.data = torch.from_numpy(word2vec) # target embedding!!
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001) # Adam
best_valid_accu = 0
DEF_UPDATE = True
for epoch in range(epoch_num):
print('epoch: ', epoch)
model.train()
train_loss = 0
label_list = list()
pred_list = list()
for words_t, definition_words_t in tqdm(train_dataloader, total=len(train_dataloader), disable=verbose):
optimizer.zero_grad()
# print ("definitions: {} \n {}".format( definition_words_t.size(), definition_words_t[0]))
# print ("words: {} \n {}".format( words_t.size(), words_t))
# print ("words sems: {} \n {}".format( wd_sems.size(), wd_sems))
# print ("words lex: {} \n {}".format( wd_lex.size(), wd_lex))
loss, _, indices = model('train', x=definition_words_t, w=words_t, ws=wd_sems, wl=wd_lex, wr=wd_ra, msk_s=mask_s, msk_l=mask_l, msk_r=mask_r, mode=MODE)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
# target!
model.update_target()
predicted = indices[:, :100].detach().cpu().numpy().tolist()
train_loss += loss.item()
label_list.extend(words_t.detach().cpu().numpy())
pred_list.extend(predicted)
train_accu_1, train_accu_10, train_accu_100 = evaluate(label_list, pred_list)
del label_list
del pred_list
gc.collect()
print('train_loss: ', train_loss/len(train_dataset))
print('train_accu(1/10/100): %.2f %.2F %.2f'%(train_accu_1, train_accu_10, train_accu_100))
model.eval()
with torch.no_grad():
valid_loss = 0
label_list = []
pred_list = []
for words_t, definition_words_t in tqdm(valid_dataloader, disable=verbose):
loss, _, indices = model('train', x=definition_words_t, w=words_t, ws=wd_sems, wl=wd_lex, wr=wd_ra, msk_s=mask_s, msk_l=mask_l, msk_r=mask_r, mode=MODE)
predicted = indices[:, :100].detach().cpu().numpy().tolist()
valid_loss += loss.item()
label_list.extend(words_t.detach().cpu().numpy())
pred_list.extend(predicted)
valid_accu_1, valid_accu_10, valid_accu_100 = evaluate(label_list, pred_list)
print('valid_loss: ', valid_loss/len(valid_dataset))
print('valid_accu(1/10/100): %.2f %.2F %.2f'%(valid_accu_1, valid_accu_10, valid_accu_100))
del label_list
del pred_list
gc.collect()
if valid_accu_10>best_valid_accu:
best_valid_accu = valid_accu_10
print('-----best_valid_accu-----')
#torch.save(model, 'saved.model')
test_loss = 0
label_list = []
pred_list = []
for words_t, definition_words_t in tqdm(test_dataloader, disable=verbose):
indices = model('test', x=definition_words_t, w=words_t, ws=wd_sems, wl=wd_lex, wr=wd_ra, msk_s=mask_s, msk_l=mask_l, msk_r=mask_r, mode=MODE)
predicted = indices[:, :1000].detach().cpu().numpy().tolist()
label_list.extend(words_t.detach().cpu().numpy())
pred_list.extend(predicted)
test_accu_1, test_accu_10, test_accu_100, median, variance = evaluate_test(label_list, pred_list)
print('test_accu(1/10/100): %.2f %.2F %.2f %.2f %.2f'%(test_accu_1, test_accu_10, test_accu_100, median, variance))
# if epoch>0: #5
json.dump((index2word[label_list]).tolist(), open(directory+mode+'_label_list.json', 'w'))
json.dump((index2word[np.array(pred_list)]).tolist(), open(directory+mode+'_pred_list.json', 'w'))
del label_list
del pred_list
gc.collect()
torch.save(model.state_dict(), directory+mode+"model_e{}".format(epoch))
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--frequency', type=int, default=20) # 5~25
parser.add_argument('-b', '--batch_size', type=int, default=128) # 64
parser.add_argument('-e', '--epoch_num', type=int, default=15) # 10
parser.add_argument('-v', '--verbose',default=True, action='store_false')
parser.add_argument('-g', '--gpu', type=str, default='0')
parser.add_argument('-m', '--mode', type=str, default='m')
parser.add_argument('-sd', '--seed', type=int, default=543624)
parser.add_argument('-lc', '--learn_channel',default=False, action='store_true')
parser.add_argument('-dr', '--save_dir', type=str, default='../runs/run_10/')
parser.add_argument('-ls', '--loss', type=str, default='ce')
args = parser.parse_args()
setup_seed(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
main(args.frequency, args.batch_size, args.epoch_num, args.verbose, args.mode, args.learn_channel, args.save_dir, args.loss)
|
[
"evaluate.evaluate_test",
"evaluate.evaluate"
] |
[((486, 523), 'os.makedirs', 'os.makedirs', (['directory'], {'exist_ok': '(True)'}), '(directory, exist_ok=True)\n', (497, 523), False, 'import os\n'), ((627, 647), 'data.load_data', 'load_data', (['frequency'], {}), '(frequency)\n', (636, 647), False, 'from data import MyDataset, load_data, my_collate_fn, device, word2feature, label_multihot, mask_noFeature\n'), ((960, 980), 'numpy.array', 'np.array', (['index2word'], {}), '(index2word)\n', (968, 980), True, 'import numpy as np\n'), ((1000, 1078), 'data.MyDataset', 'MyDataset', (['(data_test_500_seen_idx + data_test_500_unseen_idx + data_desc_c_idx)'], {}), '(data_test_500_seen_idx + data_test_500_unseen_idx + data_desc_c_idx)\n', (1009, 1078), False, 'from data import MyDataset, load_data, my_collate_fn, device, word2feature, label_multihot, mask_noFeature\n'), ((1099, 1122), 'data.MyDataset', 'MyDataset', (['data_dev_idx'], {}), '(data_dev_idx)\n', (1108, 1122), False, 'from data import MyDataset, load_data, my_collate_fn, device, word2feature, label_multihot, mask_noFeature\n'), ((1143, 1186), 'data.MyDataset', 'MyDataset', (['(data_train_idx + data_defi_c_idx)'], {}), '(data_train_idx + data_defi_c_idx)\n', (1152, 1186), False, 'from data import MyDataset, load_data, my_collate_fn, device, word2feature, label_multihot, mask_noFeature\n'), ((1211, 1321), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'collate_fn': 'my_collate_fn'}), '(train_dataset, batch_size=batch_size, shuffle=\n True, collate_fn=my_collate_fn)\n', (1238, 1321), False, 'import argparse, torch, gc, os, random, json\n'), ((1340, 1450), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'collate_fn': 'my_collate_fn'}), '(valid_dataset, batch_size=batch_size, shuffle=\n True, collate_fn=my_collate_fn)\n', (1367, 1450), False, 'import argparse, torch, gc, os, random, json\n'), ((1468, 1578), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'collate_fn': 'my_collate_fn'}), '(test_dataset, batch_size=batch_size, shuffle=\n False, collate_fn=my_collate_fn)\n', (1495, 1578), False, 'import argparse, torch, gc, os, random, json\n'), ((1957, 2018), 'data.word2feature', 'word2feature', (['data_all_idx', 'label_size', 'sememe_num', '"""sememes"""'], {}), "(data_all_idx, label_size, sememe_num, 'sememes')\n", (1969, 2018), False, 'from data import MyDataset, load_data, my_collate_fn, device, word2feature, label_multihot, mask_noFeature\n'), ((2102, 2136), 'data.label_multihot', 'label_multihot', (['wd2sem', 'sememe_num'], {}), '(wd2sem, sememe_num)\n', (2116, 2136), False, 'from data import MyDataset, load_data, my_collate_fn, device, word2feature, label_multihot, mask_noFeature\n'), ((2309, 2372), 'data.word2feature', 'word2feature', (['data_all_idx', 'label_size', 'lexname_num', '"""lexnames"""'], {}), "(data_all_idx, label_size, lexname_num, 'lexnames')\n", (2321, 2372), False, 'from data import MyDataset, load_data, my_collate_fn, device, word2feature, label_multihot, mask_noFeature\n'), ((2387, 2422), 'data.label_multihot', 'label_multihot', (['wd2lex', 'lexname_num'], {}), '(wd2lex, lexname_num)\n', (2401, 2422), False, 'from data import MyDataset, load_data, my_collate_fn, device, word2feature, label_multihot, mask_noFeature\n'), ((2535, 2602), 'data.word2feature', 'word2feature', (['data_all_idx', 'label_size', 'rootaffix_num', '"""root_affix"""'], {}), "(data_all_idx, label_size, rootaffix_num, 'root_affix')\n", (2547, 2602), False, 'from data import MyDataset, load_data, my_collate_fn, device, word2feature, label_multihot, mask_noFeature\n'), ((2616, 2652), 'data.label_multihot', 'label_multihot', (['wd2ra', 'rootaffix_num'], {}), '(wd2ra, rootaffix_num)\n', (2630, 2652), False, 'from data import MyDataset, load_data, my_collate_fn, device, word2feature, label_multihot, mask_noFeature\n'), ((2723, 2769), 'data.mask_noFeature', 'mask_noFeature', (['label_size', 'wd2sem', 'sememe_num'], {}), '(label_size, wd2sem, sememe_num)\n', (2737, 2769), False, 'from data import MyDataset, load_data, my_collate_fn, device, word2feature, label_multihot, mask_noFeature\n'), ((2783, 2830), 'data.mask_noFeature', 'mask_noFeature', (['label_size', 'wd2lex', 'lexname_num'], {}), '(label_size, wd2lex, lexname_num)\n', (2797, 2830), False, 'from data import MyDataset, load_data, my_collate_fn, device, word2feature, label_multihot, mask_noFeature\n'), ((2844, 2892), 'data.mask_noFeature', 'mask_noFeature', (['label_size', 'wd2ra', 'rootaffix_num'], {}), '(label_size, wd2ra, rootaffix_num)\n', (2858, 2892), False, 'from data import MyDataset, load_data, my_collate_fn, device, word2feature, label_multihot, mask_noFeature\n'), ((3344, 3370), 'torch.from_numpy', 'torch.from_numpy', (['word2vec'], {}), '(word2vec)\n', (3360, 3370), False, 'import argparse, torch, gc, os, random, json\n'), ((7551, 7574), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (7568, 7574), False, 'import argparse, torch, gc, os, random, json\n'), ((7579, 7611), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (7605, 7611), False, 'import argparse, torch, gc, os, random, json\n'), ((7616, 7636), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7630, 7636), True, 'import numpy as np\n'), ((7641, 7658), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (7652, 7658), False, 'import argparse, torch, gc, os, random, json\n'), ((7746, 7771), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7769, 7771), False, 'import argparse, torch, gc, os, random, json\n'), ((4856, 4887), 'evaluate.evaluate', 'evaluate', (['label_list', 'pred_list'], {}), '(label_list, pred_list)\n', (4864, 4887), False, 'from evaluate import evaluate, evaluate_test\n'), ((4941, 4953), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4951, 4953), False, 'import argparse, torch, gc, os, random, json\n'), ((5149, 5164), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5162, 5164), False, 'import argparse, torch, gc, os, random, json\n'), ((5295, 5334), 'tqdm.tqdm', 'tqdm', (['valid_dataloader'], {'disable': 'verbose'}), '(valid_dataloader, disable=verbose)\n', (5299, 5334), False, 'from tqdm import tqdm\n'), ((5792, 5823), 'evaluate.evaluate', 'evaluate', (['label_list', 'pred_list'], {}), '(label_list, pred_list)\n', (5800, 5823), False, 'from evaluate import evaluate, evaluate_test\n'), ((6058, 6070), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6068, 6070), False, 'import argparse, torch, gc, os, random, json\n'), ((2168, 2185), 'numpy.array', 'np.array', (['wd_sems'], {}), '(wd_sems)\n', (2176, 2185), True, 'import numpy as np\n'), ((2453, 2469), 'numpy.array', 'np.array', (['wd_lex'], {}), '(wd_lex)\n', (2461, 2469), True, 'import numpy as np\n'), ((2682, 2697), 'numpy.array', 'np.array', (['wd_ra'], {}), '(wd_ra)\n', (2690, 2697), True, 'import numpy as np\n'), ((6423, 6461), 'tqdm.tqdm', 'tqdm', (['test_dataloader'], {'disable': 'verbose'}), '(test_dataloader, disable=verbose)\n', (6427, 6461), False, 'from tqdm import tqdm\n'), ((6903, 6939), 'evaluate.evaluate_test', 'evaluate_test', (['label_list', 'pred_list'], {}), '(label_list, pred_list)\n', (6916, 6939), False, 'from evaluate import evaluate, evaluate_test\n'), ((7421, 7433), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7431, 7433), False, 'import argparse, torch, gc, os, random, json\n'), ((7250, 7269), 'numpy.array', 'np.array', (['pred_list'], {}), '(pred_list)\n', (7258, 7269), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
from pathlib import Path
import argparse
import listing
# Version of the format where I save the models, cases, etc. If in the future
# I change the format I can just change the string, so a new folder will be made
# and old things will be left ignored in old folder
DATA_VERSION = "v5"
CASES_PATH = Path(f"./cases/{DATA_VERSION}/")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
def train(args):
# Import now because loads tensorflow and I dont want to be done in
# other cases because it is slow
import train
train.train(CASES_PATH)
parser_train = subparsers.add_parser(
"train",
help=("Train case. It will create the model defined in the source "
"code, train it, and save everything in the \"cases\" folder: "
"the model, a json description, etc.")
)
parser_train.set_defaults(func=train)
def eval_(args):
# Import now because loads tensorflow and I dont want to be done in
# other cases because it is slow
import evaluate
evaluate.evaluate(
args.id, CASES_PATH,
args.plot_history, args.history_ignore,
args.examples
)
parser_eval = subparsers.add_parser(
"eval",
help=("Evaluate case. It will load the given model, evaluate it, "
"show plots, etc.")
)
parser_eval.add_argument("id",
type=str,
help="ID of case to evaluate",
)
parser_eval.add_argument("--plot-history", "-p",
action="store_true",
help="Plot history",
)
parser_eval.add_argument("--history-ignore", "-i",
nargs="+",
type=str,
default=[],
help="Metrics to ignore when showing history",
)
parser_eval.add_argument("--examples", "-e",
action="store_true",
help="Plot examples",
)
parser_eval.set_defaults(func=eval_)
def list_(args):
listing.listing(CASES_PATH, args.filter, args.print_layers, args.verbose)
parser_eval = subparsers.add_parser(
"list",
help=("List cases. It searchs all the saved cases/models and lists "
"them. Allows to filter results and select information to "
"show."))
parser_eval.add_argument("--verbose", "-v",
action="store_true",
help="Show lots of information about each case",
)
parser_eval.add_argument("--print-layers", "-l",
action="store_true",
help="Show list of layers for each case. To be used with -v",
)
parser_eval.add_argument("--filter", "-f",
type=str,
nargs=2,
help="Filter field of case description",
)
parser_eval.set_defaults(func=list_)
args = parser.parse_args()
# Call function corresponding to the selected subparser
args.func(args)
|
[
"evaluate.evaluate"
] |
[((326, 358), 'pathlib.Path', 'Path', (['f"""./cases/{DATA_VERSION}/"""'], {}), "(f'./cases/{DATA_VERSION}/')\n", (330, 358), False, 'from pathlib import Path\n'), ((401, 426), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (424, 426), False, 'import argparse\n'), ((636, 659), 'train.train', 'train.train', (['CASES_PATH'], {}), '(CASES_PATH)\n', (647, 659), False, 'import train\n'), ((1166, 1264), 'evaluate.evaluate', 'evaluate.evaluate', (['args.id', 'CASES_PATH', 'args.plot_history', 'args.history_ignore', 'args.examples'], {}), '(args.id, CASES_PATH, args.plot_history, args.\n history_ignore, args.examples)\n', (1183, 1264), False, 'import evaluate\n'), ((2140, 2213), 'listing.listing', 'listing.listing', (['CASES_PATH', 'args.filter', 'args.print_layers', 'args.verbose'], {}), '(CASES_PATH, args.filter, args.print_layers, args.verbose)\n', (2155, 2213), False, 'import listing\n')]
|
# Entry point for a script to attempt vocal and music separation using Tensorflow
# Accompanying thesis: Vocal and Audio separation using deep learning for Riga Technical University.
# Author: <NAME> <<EMAIL>>, Student ID: 161RDB280
import logging
import argparse
import os
import sys
from dataset import Dataset
from model import Model
from song import Song
from config import prepare_config
from evaluate import Evaluator
# Set up - Load config, arguments and set up logging
config = prepare_config('config.ini')
if config.get("logging", "logtype") == "file":
logging.basicConfig(filename=config.get('logging', 'logfile'), level=logging.getLevelName(config.get('logging', 'loglevel')), filemode='a', format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
elif config.get("logging", "logtype") == "console":
logging.basicConfig(level=logging.getLevelName(config.get('logging', 'loglevel')), format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
logging.addLevelName(55, "Hello!")
logging.addLevelName(56, "Goodbye!")
parser = argparse.ArgumentParser(description="Neural network for vocal and music splitting")
parser.add_argument("--mode", default="train", type=str, help="Mode in which the script is run (train/separate/evaluate).")
parser.add_argument("--weights", default="network.weights", type=str, help="File containing the weights to be used with the neural network. Will be created if it doesn't exist. Required for separation. Default is network.weights.")
parser.add_argument("--datadir", default="data", type=str, help="Directory in which the training data is located in. Default is data. (requires --mode=train)")
parser.add_argument("--validationdir", default="data-valid", type=str, help="Directory in which the validation data is located in. Default is data-valid. (requires --mode=train)")
parser.add_argument("--evaluationdir", default="evaluate", type=str, help="Directory in which separated data and the originals are located in. Default is evaluate. (requires --mode=evaluate)")
parser.add_argument("--epochs", default=1, type=int, help="How many times will the network go over the data. default - 1. (requires --mode=train)")
parser.add_argument("--file", default="mixture.wav", type=str, help="Name of the file from which to extract vocals. (requires --mode=separate)")
parser.add_argument("--output", default="vocals.wav", type=str, help="Name of the file to which the vocals will be written to. (requires --mode=separate)")
parser.add_argument("--dump_data", default="false", type=str, help="If set to true, dumps raw data for everything. Takes up a lot of space, but can be potentially useful for comparing results. (requires --mode=separate)")
parser.add_argument("--save_accompaniment", default="false", type=str, help="If set to true, the accompaniment will also be saved as a separate file (requires --mode=separate)")
args = parser.parse_args()
logging.log(55, 'Script started.')
if args.mode == "train":
logging.info("Preparing to train a model...")
dataset = Dataset(logging, config)
dataset.load(args.datadir)
dataset.get_data_for_cnn()
dataset.get_labels_for_cnn()
validation_set = Dataset(logging, config)
validation_set.load(args.validationdir)
validation_set.get_data_for_cnn()
validation_set.get_labels_for_cnn()
model = Model(logging, config, dataset, validation_set)
model.build(output_summary=True)
if os.path.isfile(args.weights):
logging.info("Found existing weights, loading them...")
model.load(args.weights)
model.train(args.epochs, save_log=config.getboolean("model", "save_history"), log_name=config.get("model", "history_filename"))
logging.info("Saving weights...")
model.save(args.weights)
elif args.mode == "separate":
logging.info("Preparing to separate vocals from instrumentals...")
mixture = Song(logging, "a mixture", config)
mixture.load_file(args.file)
mixture.compute_stft(keep_spectrogram=True)
dump_data = True if args.dump_data.lower() in ("yes", "true", "y", "t", "1") else False
save_accompaniment = True if args.save_accompaniment.lower() in ("yes", "true", "y", "t", "1") else False
if dump_data is True:
mixture.dump_amplitude("original")
mixture.dump_spectrogram("original")
model = Model(logging, config)
model.build()
if os.path.isfile(args.weights):
model.load(args.weights)
else:
logging.critical("Couldn't find a weights file.")
sys.exit(11)
if dump_data is True:
model.isolate(mixture, args.output, save_accompaniment=save_accompaniment, save_original_mask=True, save_original_probabilities=True)
mixture.dump_spectrogram("processed")
else:
model.isolate(mixture, args.output)
elif args.mode == "evaluate":
logging.info("Preparing to evaluate the effectiveness of an output")
evaluator = Evaluator(logging, config)
evaluator.load_data(args.evaluationdir)
evaluator.prepare_data()
sdr, sir, sar = evaluator.calculate_metrics()
evaluator.print_metrics(sdr, sir, sar)
else:
logging.critical("Invalid action - %s", args.mode)
sys.exit(12)
logging.log(56, "Script finished!")
|
[
"evaluate.Evaluator"
] |
[((488, 516), 'config.prepare_config', 'prepare_config', (['"""config.ini"""'], {}), "('config.ini')\n", (502, 516), False, 'from config import prepare_config\n'), ((1009, 1043), 'logging.addLevelName', 'logging.addLevelName', (['(55)', '"""Hello!"""'], {}), "(55, 'Hello!')\n", (1029, 1043), False, 'import logging\n'), ((1044, 1080), 'logging.addLevelName', 'logging.addLevelName', (['(56)', '"""Goodbye!"""'], {}), "(56, 'Goodbye!')\n", (1064, 1080), False, 'import logging\n'), ((1091, 1179), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Neural network for vocal and music splitting"""'}), "(description=\n 'Neural network for vocal and music splitting')\n", (1114, 1179), False, 'import argparse\n'), ((2941, 2975), 'logging.log', 'logging.log', (['(55)', '"""Script started."""'], {}), "(55, 'Script started.')\n", (2952, 2975), False, 'import logging\n'), ((5200, 5235), 'logging.log', 'logging.log', (['(56)', '"""Script finished!"""'], {}), "(56, 'Script finished!')\n", (5211, 5235), False, 'import logging\n'), ((3005, 3050), 'logging.info', 'logging.info', (['"""Preparing to train a model..."""'], {}), "('Preparing to train a model...')\n", (3017, 3050), False, 'import logging\n'), ((3065, 3089), 'dataset.Dataset', 'Dataset', (['logging', 'config'], {}), '(logging, config)\n', (3072, 3089), False, 'from dataset import Dataset\n'), ((3206, 3230), 'dataset.Dataset', 'Dataset', (['logging', 'config'], {}), '(logging, config)\n', (3213, 3230), False, 'from dataset import Dataset\n'), ((3365, 3412), 'model.Model', 'Model', (['logging', 'config', 'dataset', 'validation_set'], {}), '(logging, config, dataset, validation_set)\n', (3370, 3412), False, 'from model import Model\n'), ((3457, 3485), 'os.path.isfile', 'os.path.isfile', (['args.weights'], {}), '(args.weights)\n', (3471, 3485), False, 'import os\n'), ((3720, 3753), 'logging.info', 'logging.info', (['"""Saving weights..."""'], {}), "('Saving weights...')\n", (3732, 3753), False, 'import logging\n'), ((3495, 3550), 'logging.info', 'logging.info', (['"""Found existing weights, loading them..."""'], {}), "('Found existing weights, loading them...')\n", (3507, 3550), False, 'import logging\n'), ((3817, 3883), 'logging.info', 'logging.info', (['"""Preparing to separate vocals from instrumentals..."""'], {}), "('Preparing to separate vocals from instrumentals...')\n", (3829, 3883), False, 'import logging\n'), ((3898, 3932), 'song.Song', 'Song', (['logging', '"""a mixture"""', 'config'], {}), "(logging, 'a mixture', config)\n", (3902, 3932), False, 'from song import Song\n'), ((4342, 4364), 'model.Model', 'Model', (['logging', 'config'], {}), '(logging, config)\n', (4347, 4364), False, 'from model import Model\n'), ((4390, 4418), 'os.path.isfile', 'os.path.isfile', (['args.weights'], {}), '(args.weights)\n', (4404, 4418), False, 'import os\n'), ((4471, 4520), 'logging.critical', 'logging.critical', (['"""Couldn\'t find a weights file."""'], {}), '("Couldn\'t find a weights file.")\n', (4487, 4520), False, 'import logging\n'), ((4529, 4541), 'sys.exit', 'sys.exit', (['(11)'], {}), '(11)\n', (4537, 4541), False, 'import sys\n'), ((4844, 4912), 'logging.info', 'logging.info', (['"""Preparing to evaluate the effectiveness of an output"""'], {}), "('Preparing to evaluate the effectiveness of an output')\n", (4856, 4912), False, 'import logging\n'), ((4929, 4955), 'evaluate.Evaluator', 'Evaluator', (['logging', 'config'], {}), '(logging, config)\n', (4938, 4955), False, 'from evaluate import Evaluator\n'), ((5132, 5182), 'logging.critical', 'logging.critical', (['"""Invalid action - %s"""', 'args.mode'], {}), "('Invalid action - %s', args.mode)\n", (5148, 5182), False, 'import logging\n'), ((5187, 5199), 'sys.exit', 'sys.exit', (['(12)'], {}), '(12)\n', (5195, 5199), False, 'import sys\n')]
|
import os
import glob
import numpy as np
import caffe
import evaluate
import config
caffe.set_mode_gpu()
caffe.set_device(0)
model_weights = os.path.join(config.LRCN_MODELS_DIR, 'RGB_lstm_model_iter_30000.caffemodel')
h5Files = glob.glob(os.path.join(config.DATASET_DIR, 'extracted_features_lstm_RGB/*.h5'))
model_lstm = 'models/deploy_lstm.prototxt'
model_fstm = 'models/deploy_fstm.prototxt'
net_lstm = caffe.Net(model_lstm, model_weights, caffe.TEST)
net = caffe.Net(model_fstm, model_weights, caffe.TEST)
d_out = 256
net.params['lstm-fc'][0].data[...] = net_lstm.params['lstm1'][0].data[3*d_out:4*d_out,:]
net.params['lstm-fc'][1].data[...] = net_lstm.params['lstm1'][1].data[3*d_out:4*d_out]
evaluate.evaluate(net, h5Files)
|
[
"evaluate.evaluate"
] |
[((86, 106), 'caffe.set_mode_gpu', 'caffe.set_mode_gpu', ([], {}), '()\n', (104, 106), False, 'import caffe\n'), ((107, 126), 'caffe.set_device', 'caffe.set_device', (['(0)'], {}), '(0)\n', (123, 126), False, 'import caffe\n'), ((144, 220), 'os.path.join', 'os.path.join', (['config.LRCN_MODELS_DIR', '"""RGB_lstm_model_iter_30000.caffemodel"""'], {}), "(config.LRCN_MODELS_DIR, 'RGB_lstm_model_iter_30000.caffemodel')\n", (156, 220), False, 'import os\n'), ((409, 457), 'caffe.Net', 'caffe.Net', (['model_lstm', 'model_weights', 'caffe.TEST'], {}), '(model_lstm, model_weights, caffe.TEST)\n', (418, 457), False, 'import caffe\n'), ((465, 513), 'caffe.Net', 'caffe.Net', (['model_fstm', 'model_weights', 'caffe.TEST'], {}), '(model_fstm, model_weights, caffe.TEST)\n', (474, 513), False, 'import caffe\n'), ((704, 735), 'evaluate.evaluate', 'evaluate.evaluate', (['net', 'h5Files'], {}), '(net, h5Files)\n', (721, 735), False, 'import evaluate\n'), ((241, 309), 'os.path.join', 'os.path.join', (['config.DATASET_DIR', '"""extracted_features_lstm_RGB/*.h5"""'], {}), "(config.DATASET_DIR, 'extracted_features_lstm_RGB/*.h5')\n", (253, 309), False, 'import os\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import os
import sys
log_level_index = sys.argv.index('--log_level') + 1 if '--log_level' in sys.argv else 0
os.environ['TF_CPP_MIN_LOG_LEVEL'] = sys.argv[log_level_index] if log_level_index > 0 and log_level_index < len(sys.argv) else '3'
import evaluate
import numpy as np
import progressbar
import shutil
import tempfile
import tensorflow as tf
import traceback
from ds_ctcdecoder import ctc_beam_search_decoder, Scorer
from six.moves import zip, range
#from tensorflow.contrib.lite.python import tflite_convert
from tensorflow.python.tools import freeze_graph
from util.audio import audiofile_to_input_vector
from util.config import Config, initialize_globals
from util.coordinator import TrainingCoordinator
from util.feeding import DataSet, ModelFeeder
from util.flags import create_flags, FLAGS
from util.logging import log_info, log_error, log_debug, log_warn
from util.preprocess import preprocess
from util.text import Alphabet
# Graph Creation
# ==============
def variable_on_worker_level(name, shape, initializer):
r'''
Next we concern ourselves with graph creation.
However, before we do so we must introduce a utility function ``variable_on_worker_level()``
used to create a variable in CPU memory.
'''
# Use the /cpu:0 device on worker_device for scoped operations
if len(FLAGS.ps_hosts) == 0:
device = Config.worker_device
else:
device = tf.train.replica_device_setter(worker_device=Config.worker_device, cluster=Config.cluster)
with tf.device(device):
# Create or get apropos variable
var = tf.get_variable(name=name, shape=shape, initializer=initializer)
return var
def BiRNN(batch_x, seq_length, dropout, reuse=False, batch_size=None, n_steps=-1, previous_state=None, tflite=False):
r'''
That done, we will define the learned variables, the weights and biases,
within the method ``BiRNN()`` which also constructs the neural network.
The variables named ``hn``, where ``n`` is an integer, hold the learned weight variables.
The variables named ``bn``, where ``n`` is an integer, hold the learned bias variables.
In particular, the first variable ``h1`` holds the learned weight matrix that
converts an input vector of dimension ``n_input + 2*n_input*n_context``
to a vector of dimension ``n_hidden_1``.
Similarly, the second variable ``h2`` holds the weight matrix converting
an input vector of dimension ``n_hidden_1`` to one of dimension ``n_hidden_2``.
The variables ``h3``, ``h5``, and ``h6`` are similar.
Likewise, the biases, ``b1``, ``b2``..., hold the biases for the various layers.
'''
layers = {}
# Input shape: [batch_size, n_steps, n_input + 2*n_input*n_context]
if not batch_size:
batch_size = tf.shape(batch_x)[0]
# Reshaping `batch_x` to a tensor with shape `[n_steps*batch_size, n_input + 2*n_input*n_context]`.
# This is done to prepare the batch for input into the first layer which expects a tensor of rank `2`.
# Permute n_steps and batch_size
batch_x = tf.transpose(batch_x, [1, 0, 2, 3])
# Reshape to prepare input for first layer
batch_x = tf.reshape(batch_x, [-1, Config.n_input + 2*Config.n_input*Config.n_context]) # (n_steps*batch_size, n_input + 2*n_input*n_context)
layers['input_reshaped'] = batch_x
# The next three blocks will pass `batch_x` through three hidden layers with
# clipped RELU activation and dropout.
# 1st layer
b1 = variable_on_worker_level('b1', [Config.n_hidden_1], tf.zeros_initializer())
h1 = variable_on_worker_level('h1', [Config.n_input + 2*Config.n_input*Config.n_context, Config.n_hidden_1], tf.contrib.layers.xavier_initializer())
layer_1 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(batch_x, h1), b1)), FLAGS.relu_clip)
layer_1 = tf.nn.dropout(layer_1, (1.0 - dropout[0]))
layers['layer_1'] = layer_1
# 2nd layer
b2 = variable_on_worker_level('b2', [Config.n_hidden_2], tf.zeros_initializer())
h2 = variable_on_worker_level('h2', [Config.n_hidden_1, Config.n_hidden_2], tf.contrib.layers.xavier_initializer())
layer_2 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(layer_1, h2), b2)), FLAGS.relu_clip)
layer_2 = tf.nn.dropout(layer_2, (1.0 - dropout[1]))
layers['layer_2'] = layer_2
# 3rd layer
b3 = variable_on_worker_level('b3', [Config.n_hidden_3], tf.zeros_initializer())
h3 = variable_on_worker_level('h3', [Config.n_hidden_2, Config.n_hidden_3], tf.contrib.layers.xavier_initializer())
layer_3 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(layer_2, h3), b3)), FLAGS.relu_clip)
layer_3 = tf.nn.dropout(layer_3, (1.0 - dropout[2]))
layers['layer_3'] = layer_3
# Now we create the forward and backward LSTM units.
# Both of which have inputs of length `n_cell_dim` and bias `1.0` for the forget gate of the LSTM.
# Forward direction cell:
if not tflite:
fw_cell = tf.contrib.rnn.LSTMBlockFusedCell(Config.n_cell_dim, reuse=reuse)
layers['fw_cell'] = fw_cell
else:
fw_cell = tf.nn.rnn_cell.LSTMCell(Config.n_cell_dim, reuse=reuse)
# `layer_3` is now reshaped into `[n_steps, batch_size, 2*n_cell_dim]`,
# as the LSTM RNN expects its input to be of shape `[max_time, batch_size, input_size]`.
layer_3 = tf.reshape(layer_3, [n_steps, batch_size, Config.n_hidden_3])
if tflite:
# Generated StridedSlice, not supported by NNAPI
#n_layer_3 = []
#for l in range(layer_3.shape[0]):
# n_layer_3.append(layer_3[l])
#layer_3 = n_layer_3
# Unstack/Unpack is not supported by NNAPI
layer_3 = tf.unstack(layer_3, n_steps)
# We parametrize the RNN implementation as the training and inference graph
# need to do different things here.
if not tflite:
output, output_state = fw_cell(inputs=layer_3, dtype=tf.float32, sequence_length=seq_length, initial_state=previous_state)
else:
output, output_state = tf.nn.static_rnn(fw_cell, layer_3, previous_state, tf.float32)
output = tf.concat(output, 0)
# Reshape output from a tensor of shape [n_steps, batch_size, n_cell_dim]
# to a tensor of shape [n_steps*batch_size, n_cell_dim]
output = tf.reshape(output, [-1, Config.n_cell_dim])
layers['rnn_output'] = output
layers['rnn_output_state'] = output_state
# Now we feed `output` to the fifth hidden layer with clipped RELU activation and dropout
b5 = variable_on_worker_level('b5', [Config.n_hidden_5], tf.zeros_initializer())
h5 = variable_on_worker_level('h5', [Config.n_cell_dim, Config.n_hidden_5], tf.contrib.layers.xavier_initializer())
layer_5 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(output, h5), b5)), FLAGS.relu_clip)
layer_5 = tf.nn.dropout(layer_5, (1.0 - dropout[5]))
layers['layer_5'] = layer_5
# Now we apply the weight matrix `h6` and bias `b6` to the output of `layer_5`
# creating `n_classes` dimensional vectors, the logits.
b6 = variable_on_worker_level('b6', [Config.n_hidden_6], tf.zeros_initializer())
h6 = variable_on_worker_level('h6', [Config.n_hidden_5, Config.n_hidden_6], tf.contrib.layers.xavier_initializer())
layer_6 = tf.add(tf.matmul(layer_5, h6), b6)
layers['layer_6'] = layer_6
# Finally we reshape layer_6 from a tensor of shape [n_steps*batch_size, n_hidden_6]
# to the slightly more useful shape [n_steps, batch_size, n_hidden_6].
# Note, that this differs from the input in that it is time-major.
layer_6 = tf.reshape(layer_6, [n_steps, batch_size, Config.n_hidden_6], name="raw_logits")
layers['raw_logits'] = layer_6
# Output shape: [n_steps, batch_size, n_hidden_6]
return layer_6, layers
# Accuracy and Loss
# =================
# In accord with 'Deep Speech: Scaling up end-to-end speech recognition'
# (http://arxiv.org/abs/1412.5567),
# the loss function used by our network should be the CTC loss function
# (http://www.cs.toronto.edu/~graves/preprint.pdf).
# Conveniently, this loss function is implemented in TensorFlow.
# Thus, we can simply make use of this implementation to define our loss.
def calculate_mean_edit_distance_and_loss(model_feeder, tower, dropout, reuse):
r'''
This routine beam search decodes a mini-batch and calculates the loss and mean edit distance.
Next to total and average loss it returns the mean edit distance,
the decoded result and the batch's original Y.
'''
# Obtain the next batch of data
batch_x, batch_seq_len, batch_y = model_feeder.next_batch(tower)
# Calculate the logits of the batch using BiRNN
logits, _ = BiRNN(batch_x, batch_seq_len, dropout, reuse)
# Compute the CTC loss using TensorFlow's `ctc_loss`
total_loss = tf.nn.ctc_loss(labels=batch_y, inputs=logits, sequence_length=batch_seq_len)
# Calculate the average loss across the batch
avg_loss = tf.reduce_mean(total_loss)
# Finally we return the average loss
return avg_loss
# Adam Optimization
# =================
# In contrast to 'Deep Speech: Scaling up end-to-end speech recognition'
# (http://arxiv.org/abs/1412.5567),
# in which 'Nesterov's Accelerated Gradient Descent'
# (www.cs.toronto.edu/~fritz/absps/momentum.pdf) was used,
# we will use the Adam method for optimization (http://arxiv.org/abs/1412.6980),
# because, generally, it requires less fine-tuning.
def create_optimizer():
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate,
beta1=FLAGS.beta1,
beta2=FLAGS.beta2,
epsilon=FLAGS.epsilon)
return optimizer
# Towers
# ======
# In order to properly make use of multiple GPU's, one must introduce new abstractions,
# not present when using a single GPU, that facilitate the multi-GPU use case.
# In particular, one must introduce a means to isolate the inference and gradient
# calculations on the various GPU's.
# The abstraction we intoduce for this purpose is called a 'tower'.
# A tower is specified by two properties:
# * **Scope** - A scope, as provided by `tf.name_scope()`,
# is a means to isolate the operations within a tower.
# For example, all operations within 'tower 0' could have their name prefixed with `tower_0/`.
# * **Device** - A hardware device, as provided by `tf.device()`,
# on which all operations within the tower execute.
# For example, all operations of 'tower 0' could execute on the first GPU `tf.device('/gpu:0')`.
def get_tower_results(model_feeder, optimizer, dropout_rates):
r'''
With this preliminary step out of the way, we can for each GPU introduce a
tower for which's batch we calculate and return the optimization gradients
and the average loss across towers.
'''
# To calculate the mean of the losses
tower_avg_losses = []
# Tower gradients to return
tower_gradients = []
with tf.variable_scope(tf.get_variable_scope()):
# Loop over available_devices
for i in range(len(Config.available_devices)):
# Execute operations of tower i on device i
if len(FLAGS.ps_hosts) == 0:
device = Config.available_devices[i]
else:
device = tf.train.replica_device_setter(worker_device=Config.available_devices[i], cluster=Config.cluster)
with tf.device(device):
# Create a scope for all operations of tower i
with tf.name_scope('tower_%d' % i) as scope:
# Calculate the avg_loss and mean_edit_distance and retrieve the decoded
# batch along with the original batch's labels (Y) of this tower
avg_loss = calculate_mean_edit_distance_and_loss(model_feeder, i, dropout_rates, reuse=i>0)
# Allow for variables to be re-used by the next tower
tf.get_variable_scope().reuse_variables()
# Retain tower's avg losses
tower_avg_losses.append(avg_loss)
# Compute gradients for model parameters using tower's mini-batch
gradients = optimizer.compute_gradients(avg_loss)
# Retain tower's gradients
tower_gradients.append(gradients)
avg_loss_across_towers = tf.reduce_mean(tower_avg_losses, 0)
tf.summary.scalar(name='step_loss', tensor=avg_loss_across_towers, collections=['step_summaries'])
# Return gradients and the average loss
return tower_gradients, avg_loss_across_towers
def average_gradients(tower_gradients):
r'''
A routine for computing each variable's average of the gradients obtained from the GPUs.
Note also that this code acts as a synchronization point as it requires all
GPUs to be finished with their mini-batch before it can run to completion.
'''
# List of average gradients to return to the caller
average_grads = []
# Run this on cpu_device to conserve GPU memory
with tf.device(Config.cpu_device):
# Loop over gradient/variable pairs from all towers
for grad_and_vars in zip(*tower_gradients):
# Introduce grads to store the gradients for the current variable
grads = []
# Loop over the gradients for the current variable
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Create a gradient/variable tuple for the current variable with its average gradient
grad_and_var = (grad, grad_and_vars[0][1])
# Add the current tuple to average_grads
average_grads.append(grad_and_var)
# Return result to caller
return average_grads
# Logging
# =======
def log_variable(variable, gradient=None):
r'''
We introduce a function for logging a tensor variable's current state.
It logs scalar values for the mean, standard deviation, minimum and maximum.
Furthermore it logs a histogram of its state and (if given) of an optimization gradient.
'''
name = variable.name
mean = tf.reduce_mean(variable)
tf.summary.scalar(name='%s/mean' % name, tensor=mean)
tf.summary.scalar(name='%s/sttdev' % name, tensor=tf.sqrt(tf.reduce_mean(tf.square(variable - mean))))
tf.summary.scalar(name='%s/max' % name, tensor=tf.reduce_max(variable))
tf.summary.scalar(name='%s/min' % name, tensor=tf.reduce_min(variable))
tf.summary.histogram(name=name, values=variable)
if gradient is not None:
if isinstance(gradient, tf.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
tf.summary.histogram(name='%s/gradients' % name, values=grad_values)
def log_grads_and_vars(grads_and_vars):
r'''
Let's also introduce a helper function for logging collections of gradient/variable tuples.
'''
for gradient, variable in grads_and_vars:
log_variable(variable, gradient=gradient)
# Helpers
# =======
def send_token_to_ps(session, kill=False):
# Sending our token (the task_index as a debug opportunity) to each parameter server.
# kill switch tokens are negative and decremented by 1 to deal with task_index 0
token = -FLAGS.task_index-1 if kill else FLAGS.task_index
kind = 'kill switch' if kill else 'stop'
for index, enqueue in enumerate(Config.done_enqueues):
log_debug('Sending %s token to ps %d...' % (kind, index))
session.run(enqueue, feed_dict={ Config.token_placeholder: token })
log_debug('Sent %s token to ps %d.' % (kind, index))
def train(server=None):
r'''
Trains the network on a given server of a cluster.
If no server provided, it performs single process training.
'''
# Initializing and starting the training coordinator
coord = TrainingCoordinator(Config.is_chief)
coord.start()
# Create a variable to hold the global_step.
# It will automagically get incremented by the optimizer.
global_step = tf.Variable(0, trainable=False, name='global_step')
dropout_rates = [tf.placeholder(tf.float32, name='dropout_{}'.format(i)) for i in range(6)]
# Reading training set
train_data = preprocess(FLAGS.train_files.split(','),
FLAGS.train_batch_size,
Config.n_input,
Config.n_context,
Config.alphabet,
hdf5_cache_path=FLAGS.train_cached_features_path)
train_set = DataSet(train_data,
FLAGS.train_batch_size,
limit=FLAGS.limit_train,
next_index=lambda i: coord.get_next_index('train'))
# Reading validation set
dev_data = preprocess(FLAGS.dev_files.split(','),
FLAGS.dev_batch_size,
Config.n_input,
Config.n_context,
Config.alphabet,
hdf5_cache_path=FLAGS.dev_cached_features_path)
dev_set = DataSet(dev_data,
FLAGS.dev_batch_size,
limit=FLAGS.limit_dev,
next_index=lambda i: coord.get_next_index('dev'))
# Combining all sets to a multi set model feeder
model_feeder = ModelFeeder(train_set,
dev_set,
Config.n_input,
Config.n_context,
Config.alphabet,
tower_feeder_count=len(Config.available_devices))
# Create the optimizer
optimizer = create_optimizer()
# Synchronous distributed training is facilitated by a special proxy-optimizer
if not server is None:
optimizer = tf.train.SyncReplicasOptimizer(optimizer,
replicas_to_aggregate=FLAGS.replicas_to_agg,
total_num_replicas=FLAGS.replicas)
# Get the data_set specific graph end-points
gradients, loss = get_tower_results(model_feeder, optimizer, dropout_rates)
# Average tower gradients across GPUs
avg_tower_gradients = average_gradients(gradients)
# Add summaries of all variables and gradients to log
log_grads_and_vars(avg_tower_gradients)
# Op to merge all summaries for the summary hook
merge_all_summaries_op = tf.summary.merge_all()
# These are saved on every step
step_summaries_op = tf.summary.merge_all('step_summaries')
step_summary_writers = {
'train': tf.summary.FileWriter(os.path.join(FLAGS.summary_dir, 'train'), max_queue=120),
'dev': tf.summary.FileWriter(os.path.join(FLAGS.summary_dir, 'dev'), max_queue=120)
}
# Apply gradients to modify the model
apply_gradient_op = optimizer.apply_gradients(avg_tower_gradients, global_step=global_step)
if FLAGS.early_stop is True and not FLAGS.validation_step > 0:
log_warn('Parameter --validation_step needs to be >0 for early stopping to work')
class CoordHook(tf.train.SessionRunHook):
r'''
Embedded coordination hook-class that will use variables of the
surrounding Python context.
'''
def after_create_session(self, session, coord):
log_debug('Starting queue runners...')
model_feeder.start_queue_threads(session, coord)
log_debug('Queue runners started.')
def end(self, session):
# Closing the data_set queues
log_debug('Closing queues...')
model_feeder.close_queues(session)
log_debug('Queues closed.')
# Telling the ps that we are done
send_token_to_ps(session)
# Collecting the hooks
hooks = [CoordHook()]
# Hook to handle initialization and queues for sync replicas.
if not server is None:
hooks.append(optimizer.make_session_run_hook(Config.is_chief))
# Hook to save TensorBoard summaries
if FLAGS.summary_secs > 0:
hooks.append(tf.train.SummarySaverHook(save_secs=FLAGS.summary_secs, output_dir=FLAGS.summary_dir, summary_op=merge_all_summaries_op))
# Hook wih number of checkpoint files to save in checkpoint_dir
if FLAGS.train and FLAGS.max_to_keep > 0:
saver = tf.train.Saver(max_to_keep=FLAGS.max_to_keep)
hooks.append(tf.train.CheckpointSaverHook(checkpoint_dir=FLAGS.checkpoint_dir, save_secs=FLAGS.checkpoint_secs, saver=saver))
no_dropout_feed_dict = {
dropout_rates[0]: 0.,
dropout_rates[1]: 0.,
dropout_rates[2]: 0.,
dropout_rates[3]: 0.,
dropout_rates[4]: 0.,
dropout_rates[5]: 0.,
}
# Progress Bar
def update_progressbar(set_name):
if not hasattr(update_progressbar, 'current_set_name'):
update_progressbar.current_set_name = None
if (update_progressbar.current_set_name != set_name or
update_progressbar.current_job_index == update_progressbar.total_jobs):
# finish prev pbar if it exists
if hasattr(update_progressbar, 'pbar') and update_progressbar.pbar:
update_progressbar.pbar.finish()
update_progressbar.total_jobs = None
update_progressbar.current_job_index = 0
current_epoch = coord._epoch-1
if set_name == "train":
log_info('Training epoch %i...' % current_epoch)
update_progressbar.total_jobs = coord._num_jobs_train
else:
log_info('Validating epoch %i...' % current_epoch)
update_progressbar.total_jobs = coord._num_jobs_dev
# recreate pbar
update_progressbar.pbar = progressbar.ProgressBar(max_value=update_progressbar.total_jobs,
redirect_stdout=True).start()
update_progressbar.current_set_name = set_name
if update_progressbar.pbar:
update_progressbar.pbar.update(update_progressbar.current_job_index+1, force=True)
update_progressbar.current_job_index += 1
# Initialize update_progressbar()'s child fields to safe values
update_progressbar.pbar = None
# The MonitoredTrainingSession takes care of session initialization,
# restoring from a checkpoint, saving to a checkpoint, and closing when done
# or an error occurs.
try:
with tf.train.MonitoredTrainingSession(master='' if server is None else server.target,
is_chief=Config.is_chief,
hooks=hooks,
checkpoint_dir=FLAGS.checkpoint_dir,
save_checkpoint_secs=None, # already taken care of by a hook
log_step_count_steps=0, # disable logging of steps/s to avoid TF warning in validation sets
config=Config.session_config) as session:
tf.get_default_graph().finalize()
try:
if Config.is_chief:
# Retrieving global_step from the (potentially restored) model
model_feeder.set_data_set(no_dropout_feed_dict, model_feeder.train)
step = session.run(global_step, feed_dict=no_dropout_feed_dict)
coord.start_coordination(model_feeder, step)
# Get the first job
job = coord.get_job()
while job and not session.should_stop():
log_debug('Computing %s...' % job)
is_train = job.set_name == 'train'
# The feed_dict (mainly for switching between queues)
if is_train:
feed_dict = {
dropout_rates[0]: FLAGS.dropout_rate,
dropout_rates[1]: FLAGS.dropout_rate2,
dropout_rates[2]: FLAGS.dropout_rate3,
dropout_rates[3]: FLAGS.dropout_rate4,
dropout_rates[4]: FLAGS.dropout_rate5,
dropout_rates[5]: FLAGS.dropout_rate6,
}
else:
feed_dict = no_dropout_feed_dict
# Sets the current data_set for the respective placeholder in feed_dict
model_feeder.set_data_set(feed_dict, getattr(model_feeder, job.set_name))
# Initialize loss aggregator
total_loss = 0.0
# Setting the training operation in case of training requested
train_op = apply_gradient_op if is_train else []
# So far the only extra parameter is the feed_dict
extra_params = { 'feed_dict': feed_dict }
step_summary_writer = step_summary_writers.get(job.set_name)
# Loop over the batches
for job_step in range(job.steps):
if session.should_stop():
break
log_debug('Starting batch...')
# Compute the batch
_, current_step, batch_loss, step_summary = session.run([train_op, global_step, loss, step_summaries_op], **extra_params)
# Log step summaries
step_summary_writer.add_summary(step_summary, current_step)
# Uncomment the next line for debugging race conditions / distributed TF
log_debug('Finished batch step %d.' % current_step)
# Add batch to loss
total_loss += batch_loss
# Gathering job results
job.loss = total_loss / job.steps
# Display progressbar
if FLAGS.show_progressbar:
update_progressbar(job.set_name)
# Send the current job to coordinator and receive the next one
log_debug('Sending %s...' % job)
job = coord.next_job(job)
if update_progressbar.pbar:
update_progressbar.pbar.finish()
except Exception as e:
log_error(str(e))
traceback.print_exc()
# Calling all hook's end() methods to end blocking calls
for hook in hooks:
hook.end(session)
# Only chief has a SyncReplicasOptimizer queue runner that needs to be stopped for unblocking process exit.
# A rather graceful way to do this is by stopping the ps.
# Only one party can send it w/o failing.
if Config.is_chief:
send_token_to_ps(session, kill=True)
sys.exit(1)
log_debug('Session closed.')
except tf.errors.InvalidArgumentError as e:
log_error(str(e))
log_error('The checkpoint in {0} does not match the shapes of the model.'
' Did you change alphabet.txt or the --n_hidden parameter'
' between train runs using the same checkpoint dir? Try moving'
' or removing the contents of {0}.'.format(FLAGS.checkpoint_dir))
sys.exit(1)
# Stopping the coordinator
coord.stop()
def test():
# Reading test set
test_data = preprocess(FLAGS.test_files.split(','),
FLAGS.test_batch_size,
Config.n_input,
Config.n_context,
Config.alphabet,
hdf5_cache_path=FLAGS.test_cached_features_path)
graph = create_inference_graph(batch_size=FLAGS.test_batch_size, n_steps=-1)
evaluate.evaluate(test_data, graph)
def create_inference_graph(batch_size=1, n_steps=16, tflite=False):
# Input tensor will be of shape [batch_size, n_steps, 2*n_context+1, n_input]
input_tensor = tf.placeholder(tf.float32, [batch_size, n_steps if n_steps > 0 else None, 2*Config.n_context+1, Config.n_input], name='input_node')
seq_length = tf.placeholder(tf.int32, [batch_size], name='input_lengths')
if not tflite:
previous_state_c = variable_on_worker_level('previous_state_c', [batch_size, Config.n_cell_dim], initializer=None)
previous_state_h = variable_on_worker_level('previous_state_h', [batch_size, Config.n_cell_dim], initializer=None)
else:
previous_state_c = tf.placeholder(tf.float32, [batch_size, Config.n_cell_dim], name='previous_state_c')
previous_state_h = tf.placeholder(tf.float32, [batch_size, Config.n_cell_dim], name='previous_state_h')
previous_state = tf.contrib.rnn.LSTMStateTuple(previous_state_c, previous_state_h)
no_dropout = [0.0] * 6
logits, layers = BiRNN(batch_x=input_tensor,
seq_length=seq_length if FLAGS.use_seq_length else None,
dropout=no_dropout,
batch_size=batch_size,
n_steps=n_steps,
previous_state=previous_state,
tflite=tflite)
# TF Lite runtime will check that input dimensions are 1, 2 or 4
# by default we get 3, the middle one being batch_size which is forced to
# one on inference graph, so remove that dimension
if tflite:
logits = tf.squeeze(logits, [1])
# Apply softmax for CTC decoder
logits = tf.nn.softmax(logits)
new_state_c, new_state_h = layers['rnn_output_state']
# Initial zero state
if not tflite:
zero_state = tf.zeros([batch_size, Config.n_cell_dim], tf.float32)
initialize_c = tf.assign(previous_state_c, zero_state)
initialize_h = tf.assign(previous_state_h, zero_state)
initialize_state = tf.group(initialize_c, initialize_h, name='initialize_state')
with tf.control_dependencies([tf.assign(previous_state_c, new_state_c), tf.assign(previous_state_h, new_state_h)]):
logits = tf.identity(logits, name='logits')
return (
{
'input': input_tensor,
'input_lengths': seq_length,
},
{
'outputs': logits,
'initialize_state': initialize_state,
},
layers
)
else:
logits = tf.identity(logits, name='logits')
new_state_c = tf.identity(new_state_c, name='new_state_c')
new_state_h = tf.identity(new_state_h, name='new_state_h')
return (
{
'input': input_tensor,
'previous_state_c': previous_state_c,
'previous_state_h': previous_state_h,
},
{
'outputs': logits,
'new_state_c': new_state_c,
'new_state_h': new_state_h,
},
layers
)
def export():
r'''
Restores the trained variables into a simpler graph that will be exported for serving.
'''
log_info('Exporting the model...')
with tf.device('/cpu:0'):
from tensorflow.python.framework.ops import Tensor, Operation
tf.reset_default_graph()
session = tf.Session(config=Config.session_config)
inputs, outputs, _ = create_inference_graph(batch_size=1, n_steps=FLAGS.n_steps, tflite=FLAGS.export_tflite)
input_names = ",".join(tensor.op.name for tensor in inputs.values())
output_names_tensors = [ tensor.op.name for tensor in outputs.values() if isinstance(tensor, Tensor) ]
output_names_ops = [ tensor.name for tensor in outputs.values() if isinstance(tensor, Operation) ]
output_names = ",".join(output_names_tensors + output_names_ops)
input_shapes = ":".join(",".join(map(str, tensor.shape)) for tensor in inputs.values())
if not FLAGS.export_tflite:
mapping = {v.op.name: v for v in tf.global_variables() if not v.op.name.startswith('previous_state_')}
else:
# Create a saver using variables from the above newly created graph
def fixup(name):
if name.startswith('rnn/lstm_cell/'):
return name.replace('rnn/lstm_cell/', 'lstm_fused_cell/')
return name
mapping = {fixup(v.op.name): v for v in tf.global_variables()}
saver = tf.train.Saver(mapping)
# Restore variables from training checkpoint
checkpoint = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
checkpoint_path = checkpoint.model_checkpoint_path
output_filename = 'output_graph.pb'
if FLAGS.remove_export:
if os.path.isdir(FLAGS.export_dir):
log_info('Removing old export')
shutil.rmtree(FLAGS.export_dir)
try:
output_graph_path = os.path.join(FLAGS.export_dir, output_filename)
if not os.path.isdir(FLAGS.export_dir):
os.makedirs(FLAGS.export_dir)
def do_graph_freeze(output_file=None, output_node_names=None, variables_blacklist=None):
freeze_graph.freeze_graph_with_def_protos(
input_graph_def=session.graph_def,
input_saver_def=saver.as_saver_def(),
input_checkpoint=checkpoint_path,
output_node_names=output_node_names,
restore_op_name=None,
filename_tensor_name=None,
output_graph=output_file,
clear_devices=False,
variable_names_blacklist=variables_blacklist,
initializer_nodes='')
if not FLAGS.export_tflite:
do_graph_freeze(output_file=output_graph_path, output_node_names=output_names, variables_blacklist='previous_state_c,previous_state_h')
else:
temp_fd, temp_freeze = tempfile.mkstemp(dir=FLAGS.export_dir)
os.close(temp_fd)
do_graph_freeze(output_file=temp_freeze, output_node_names=output_names, variables_blacklist='')
output_tflite_path = os.path.join(FLAGS.export_dir, output_filename.replace('.pb', '.tflite'))
class TFLiteFlags():
def __init__(self):
self.graph_def_file = temp_freeze
self.inference_type = 'FLOAT'
self.input_arrays = input_names
self.input_shapes = input_shapes
self.output_arrays = output_names
self.output_file = output_tflite_path
self.output_format = 'TFLITE'
default_empty = [
'inference_input_type',
'mean_values',
'default_ranges_min', 'default_ranges_max',
'drop_control_dependency',
'reorder_across_fake_quant',
'change_concat_input_ranges',
'allow_custom_ops',
'converter_mode',
'post_training_quantize',
'dump_graphviz_dir',
'dump_graphviz_video'
]
for e in default_empty:
self.__dict__[e] = None
flags = TFLiteFlags()
tflite_convert._convert_model(flags)
os.unlink(temp_freeze)
log_info('Exported model for TF Lite engine as {}'.format(os.path.basename(output_tflite_path)))
log_info('Models exported at %s' % (FLAGS.export_dir))
except RuntimeError as e:
log_error(str(e))
def do_single_file_inference(input_file_path):
with tf.Session(config=Config.session_config) as session:
inputs, outputs, _ = create_inference_graph(batch_size=1, n_steps=-1)
# Create a saver using variables from the above newly created graph
mapping = {v.op.name: v for v in tf.global_variables() if not v.op.name.startswith('previous_state_')}
saver = tf.train.Saver(mapping)
# Restore variables from training checkpoint
# TODO: This restores the most recent checkpoint, but if we use validation to counteract
# over-fitting, we may want to restore an earlier checkpoint.
checkpoint = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if not checkpoint:
log_error('Checkpoint directory ({}) does not contain a valid checkpoint state.'.format(FLAGS.checkpoint_dir))
exit(1)
checkpoint_path = checkpoint.model_checkpoint_path
saver.restore(session, checkpoint_path)
session.run(outputs['initialize_state'])
features = audiofile_to_input_vector(input_file_path, Config.n_input, Config.n_context)
num_strides = len(features) - (Config.n_context * 2)
# Create a view into the array with overlapping strides of size
# numcontext (past) + 1 (present) + numcontext (future)
window_size = 2*Config.n_context+1
features = np.lib.stride_tricks.as_strided(
features,
(num_strides, window_size, Config.n_input),
(features.strides[0], features.strides[0], features.strides[1]),
writeable=False)
logits = session.run(outputs['outputs'], feed_dict = {
inputs['input']: [features],
inputs['input_lengths']: [num_strides],
})
logits = np.squeeze(logits)
scorer = Scorer(FLAGS.lm_alpha, FLAGS.lm_beta,
FLAGS.lm_binary_path, FLAGS.lm_trie_path,
Config.alphabet)
decoded = ctc_beam_search_decoder(logits, Config.alphabet, FLAGS.beam_width, scorer=scorer)
# Print highest probability result
print(decoded[0][1])
def main(_):
initialize_globals()
if FLAGS.train or FLAGS.test:
if len(FLAGS.worker_hosts) == 0:
# Only one local task: this process (default case - no cluster)
with tf.Graph().as_default():
tf.set_random_seed(FLAGS.random_seed)
train()
# Now do a final test epoch
if FLAGS.test:
with tf.Graph().as_default():
test()
log_debug('Done.')
else:
# Create and start a server for the local task.
server = tf.train.Server(Config.cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_index)
if FLAGS.job_name == 'ps':
# We are a parameter server and therefore we just wait for all workers to finish
# by waiting for their stop tokens.
with tf.Session(server.target) as session:
for worker in FLAGS.worker_hosts:
log_debug('Waiting for stop token...')
token = session.run(Config.done_dequeues[FLAGS.task_index])
if token < 0:
log_debug('Got a kill switch token from worker %i.' % abs(token + 1))
break
log_debug('Got a stop token from worker %i.' % token)
log_debug('Session closed.')
if FLAGS.test:
test()
elif FLAGS.job_name == 'worker':
# We are a worker and therefore we have to do some work.
# Assigns ops to the local worker by default.
with tf.device(tf.train.replica_device_setter(
worker_device=Config.worker_device,
cluster=Config.cluster)):
# Do the training
train(server)
log_debug('Server stopped.')
# Are we the main process?
if Config.is_chief:
# Doing solo/post-processing work just on the main process...
# Exporting the model
if FLAGS.export_dir:
export()
if len(FLAGS.one_shot_infer):
do_single_file_inference(FLAGS.one_shot_infer)
if __name__ == '__main__' :
create_flags()
tf.app.run(main)
|
[
"evaluate.evaluate"
] |
[((3177, 3212), 'tensorflow.transpose', 'tf.transpose', (['batch_x', '[1, 0, 2, 3]'], {}), '(batch_x, [1, 0, 2, 3])\n', (3189, 3212), True, 'import tensorflow as tf\n'), ((3274, 3360), 'tensorflow.reshape', 'tf.reshape', (['batch_x', '[-1, Config.n_input + 2 * Config.n_input * Config.n_context]'], {}), '(batch_x, [-1, Config.n_input + 2 * Config.n_input * Config.\n n_context])\n', (3284, 3360), True, 'import tensorflow as tf\n'), ((3929, 3969), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['layer_1', '(1.0 - dropout[0])'], {}), '(layer_1, 1.0 - dropout[0])\n', (3942, 3969), True, 'import tensorflow as tf\n'), ((4330, 4370), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['layer_2', '(1.0 - dropout[1])'], {}), '(layer_2, 1.0 - dropout[1])\n', (4343, 4370), True, 'import tensorflow as tf\n'), ((4731, 4771), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['layer_3', '(1.0 - dropout[2])'], {}), '(layer_3, 1.0 - dropout[2])\n', (4744, 4771), True, 'import tensorflow as tf\n'), ((5405, 5466), 'tensorflow.reshape', 'tf.reshape', (['layer_3', '[n_steps, batch_size, Config.n_hidden_3]'], {}), '(layer_3, [n_steps, batch_size, Config.n_hidden_3])\n', (5415, 5466), True, 'import tensorflow as tf\n'), ((6341, 6384), 'tensorflow.reshape', 'tf.reshape', (['output', '[-1, Config.n_cell_dim]'], {}), '(output, [-1, Config.n_cell_dim])\n', (6351, 6384), True, 'import tensorflow as tf\n'), ((6868, 6908), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['layer_5', '(1.0 - dropout[5])'], {}), '(layer_5, 1.0 - dropout[5])\n', (6881, 6908), True, 'import tensorflow as tf\n'), ((7623, 7708), 'tensorflow.reshape', 'tf.reshape', (['layer_6', '[n_steps, batch_size, Config.n_hidden_6]'], {'name': '"""raw_logits"""'}), "(layer_6, [n_steps, batch_size, Config.n_hidden_6], name='raw_logits'\n )\n", (7633, 7708), True, 'import tensorflow as tf\n'), ((8848, 8924), 'tensorflow.nn.ctc_loss', 'tf.nn.ctc_loss', ([], {'labels': 'batch_y', 'inputs': 'logits', 'sequence_length': 'batch_seq_len'}), '(labels=batch_y, inputs=logits, sequence_length=batch_seq_len)\n', (8862, 8924), True, 'import tensorflow as tf\n'), ((8991, 9017), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['total_loss'], {}), '(total_loss)\n', (9005, 9017), True, 'import tensorflow as tf\n'), ((9517, 9639), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'FLAGS.learning_rate', 'beta1': 'FLAGS.beta1', 'beta2': 'FLAGS.beta2', 'epsilon': 'FLAGS.epsilon'}), '(learning_rate=FLAGS.learning_rate, beta1=FLAGS.beta1,\n beta2=FLAGS.beta2, epsilon=FLAGS.epsilon)\n', (9539, 9639), True, 'import tensorflow as tf\n'), ((12438, 12473), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['tower_avg_losses', '(0)'], {}), '(tower_avg_losses, 0)\n', (12452, 12473), True, 'import tensorflow as tf\n'), ((12479, 12581), 'tensorflow.summary.scalar', 'tf.summary.scalar', ([], {'name': '"""step_loss"""', 'tensor': 'avg_loss_across_towers', 'collections': "['step_summaries']"}), "(name='step_loss', tensor=avg_loss_across_towers,\n collections=['step_summaries'])\n", (12496, 12581), True, 'import tensorflow as tf\n'), ((14532, 14556), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['variable'], {}), '(variable)\n', (14546, 14556), True, 'import tensorflow as tf\n'), ((14561, 14614), 'tensorflow.summary.scalar', 'tf.summary.scalar', ([], {'name': "('%s/mean' % name)", 'tensor': 'mean'}), "(name='%s/mean' % name, tensor=mean)\n", (14578, 14614), True, 'import tensorflow as tf\n'), ((14886, 14934), 'tensorflow.summary.histogram', 'tf.summary.histogram', ([], {'name': 'name', 'values': 'variable'}), '(name=name, values=variable)\n', (14906, 14934), True, 'import tensorflow as tf\n'), ((16316, 16352), 'util.coordinator.TrainingCoordinator', 'TrainingCoordinator', (['Config.is_chief'], {}), '(Config.is_chief)\n', (16335, 16352), False, 'from util.coordinator import TrainingCoordinator\n'), ((16501, 16552), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)', 'name': '"""global_step"""'}), "(0, trainable=False, name='global_step')\n", (16512, 16552), True, 'import tensorflow as tf\n'), ((18933, 18955), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (18953, 18955), True, 'import tensorflow as tf\n'), ((19017, 19055), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', (['"""step_summaries"""'], {}), "('step_summaries')\n", (19037, 19055), True, 'import tensorflow as tf\n'), ((28499, 28534), 'evaluate.evaluate', 'evaluate.evaluate', (['test_data', 'graph'], {}), '(test_data, graph)\n', (28516, 28534), False, 'import evaluate\n'), ((28706, 28845), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size, n_steps if n_steps > 0 else None, 2 * Config.n_context + 1,\n Config.n_input]'], {'name': '"""input_node"""'}), "(tf.float32, [batch_size, n_steps if n_steps > 0 else None, 2 *\n Config.n_context + 1, Config.n_input], name='input_node')\n", (28720, 28845), True, 'import tensorflow as tf\n'), ((28855, 28915), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size]'], {'name': '"""input_lengths"""'}), "(tf.int32, [batch_size], name='input_lengths')\n", (28869, 28915), True, 'import tensorflow as tf\n'), ((29438, 29503), 'tensorflow.contrib.rnn.LSTMStateTuple', 'tf.contrib.rnn.LSTMStateTuple', (['previous_state_c', 'previous_state_h'], {}), '(previous_state_c, previous_state_h)\n', (29467, 29503), True, 'import tensorflow as tf\n'), ((30216, 30237), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (30229, 30237), True, 'import tensorflow as tf\n'), ((31789, 31823), 'util.logging.log_info', 'log_info', (['"""Exporting the model..."""'], {}), "('Exporting the model...')\n", (31797, 31823), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((38757, 38777), 'util.config.initialize_globals', 'initialize_globals', ([], {}), '()\n', (38775, 38777), False, 'from util.config import Config, initialize_globals\n'), ((41013, 41027), 'util.flags.create_flags', 'create_flags', ([], {}), '()\n', (41025, 41027), False, 'from util.flags import create_flags, FLAGS\n'), ((41032, 41048), 'tensorflow.app.run', 'tf.app.run', (['main'], {}), '(main)\n', (41042, 41048), True, 'import tensorflow as tf\n'), ((152, 181), 'sys.argv.index', 'sys.argv.index', (['"""--log_level"""'], {}), "('--log_level')\n", (166, 181), False, 'import sys\n'), ((1521, 1616), 'tensorflow.train.replica_device_setter', 'tf.train.replica_device_setter', ([], {'worker_device': 'Config.worker_device', 'cluster': 'Config.cluster'}), '(worker_device=Config.worker_device, cluster=\n Config.cluster)\n', (1551, 1616), True, 'import tensorflow as tf\n'), ((1622, 1639), 'tensorflow.device', 'tf.device', (['device'], {}), '(device)\n', (1631, 1639), True, 'import tensorflow as tf\n'), ((1696, 1760), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': 'name', 'shape': 'shape', 'initializer': 'initializer'}), '(name=name, shape=shape, initializer=initializer)\n', (1711, 1760), True, 'import tensorflow as tf\n'), ((3648, 3670), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (3668, 3670), True, 'import tensorflow as tf\n'), ((3785, 3823), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (3821, 3823), True, 'import tensorflow as tf\n'), ((4082, 4104), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (4102, 4104), True, 'import tensorflow as tf\n'), ((4186, 4224), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (4222, 4224), True, 'import tensorflow as tf\n'), ((4483, 4505), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (4503, 4505), True, 'import tensorflow as tf\n'), ((4587, 4625), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (4623, 4625), True, 'import tensorflow as tf\n'), ((5035, 5100), 'tensorflow.contrib.rnn.LSTMBlockFusedCell', 'tf.contrib.rnn.LSTMBlockFusedCell', (['Config.n_cell_dim'], {'reuse': 'reuse'}), '(Config.n_cell_dim, reuse=reuse)\n', (5068, 5100), True, 'import tensorflow as tf\n'), ((5165, 5220), 'tensorflow.nn.rnn_cell.LSTMCell', 'tf.nn.rnn_cell.LSTMCell', (['Config.n_cell_dim'], {'reuse': 'reuse'}), '(Config.n_cell_dim, reuse=reuse)\n', (5188, 5220), True, 'import tensorflow as tf\n'), ((5747, 5775), 'tensorflow.unstack', 'tf.unstack', (['layer_3', 'n_steps'], {}), '(layer_3, n_steps)\n', (5757, 5775), True, 'import tensorflow as tf\n'), ((6088, 6150), 'tensorflow.nn.static_rnn', 'tf.nn.static_rnn', (['fw_cell', 'layer_3', 'previous_state', 'tf.float32'], {}), '(fw_cell, layer_3, previous_state, tf.float32)\n', (6104, 6150), True, 'import tensorflow as tf\n'), ((6168, 6188), 'tensorflow.concat', 'tf.concat', (['output', '(0)'], {}), '(output, 0)\n', (6177, 6188), True, 'import tensorflow as tf\n'), ((6621, 6643), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (6641, 6643), True, 'import tensorflow as tf\n'), ((6725, 6763), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (6761, 6763), True, 'import tensorflow as tf\n'), ((7148, 7170), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (7168, 7170), True, 'import tensorflow as tf\n'), ((7252, 7290), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (7288, 7290), True, 'import tensorflow as tf\n'), ((7313, 7335), 'tensorflow.matmul', 'tf.matmul', (['layer_5', 'h6'], {}), '(layer_5, h6)\n', (7322, 7335), True, 'import tensorflow as tf\n'), ((13126, 13154), 'tensorflow.device', 'tf.device', (['Config.cpu_device'], {}), '(Config.cpu_device)\n', (13135, 13154), True, 'import tensorflow as tf\n'), ((13245, 13266), 'six.moves.zip', 'zip', (['*tower_gradients'], {}), '(*tower_gradients)\n', (13248, 13266), False, 'from six.moves import zip, range\n'), ((15889, 15946), 'util.logging.log_debug', 'log_debug', (["('Sending %s token to ps %d...' % (kind, index))"], {}), "('Sending %s token to ps %d...' % (kind, index))\n", (15898, 15946), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((16031, 16083), 'util.logging.log_debug', 'log_debug', (["('Sent %s token to ps %d.' % (kind, index))"], {}), "('Sent %s token to ps %d.' % (kind, index))\n", (16040, 16083), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((16706, 16734), 'util.flags.FLAGS.train_files.split', 'FLAGS.train_files.split', (['""","""'], {}), "(',')\n", (16729, 16734), False, 'from util.flags import create_flags, FLAGS\n'), ((17267, 17293), 'util.flags.FLAGS.dev_files.split', 'FLAGS.dev_files.split', (['""","""'], {}), "(',')\n", (17288, 17293), False, 'from util.flags import create_flags, FLAGS\n'), ((18295, 18421), 'tensorflow.train.SyncReplicasOptimizer', 'tf.train.SyncReplicasOptimizer', (['optimizer'], {'replicas_to_aggregate': 'FLAGS.replicas_to_agg', 'total_num_replicas': 'FLAGS.replicas'}), '(optimizer, replicas_to_aggregate=FLAGS.\n replicas_to_agg, total_num_replicas=FLAGS.replicas)\n', (18325, 18421), True, 'import tensorflow as tf\n'), ((19497, 19583), 'util.logging.log_warn', 'log_warn', (['"""Parameter --validation_step needs to be >0 for early stopping to work"""'], {}), "(\n 'Parameter --validation_step needs to be >0 for early stopping to work')\n", (19505, 19583), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((20831, 20876), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': 'FLAGS.max_to_keep'}), '(max_to_keep=FLAGS.max_to_keep)\n', (20845, 20876), True, 'import tensorflow as tf\n'), ((27564, 27592), 'util.logging.log_debug', 'log_debug', (['"""Session closed."""'], {}), "('Session closed.')\n", (27573, 27592), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((28126, 28153), 'util.flags.FLAGS.test_files.split', 'FLAGS.test_files.split', (['""","""'], {}), "(',')\n", (28148, 28153), False, 'from util.flags import create_flags, FLAGS\n'), ((29219, 29308), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size, Config.n_cell_dim]'], {'name': '"""previous_state_c"""'}), "(tf.float32, [batch_size, Config.n_cell_dim], name=\n 'previous_state_c')\n", (29233, 29308), True, 'import tensorflow as tf\n'), ((29331, 29420), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size, Config.n_cell_dim]'], {'name': '"""previous_state_h"""'}), "(tf.float32, [batch_size, Config.n_cell_dim], name=\n 'previous_state_h')\n", (29345, 29420), True, 'import tensorflow as tf\n'), ((30142, 30165), 'tensorflow.squeeze', 'tf.squeeze', (['logits', '[1]'], {}), '(logits, [1])\n', (30152, 30165), True, 'import tensorflow as tf\n'), ((30363, 30416), 'tensorflow.zeros', 'tf.zeros', (['[batch_size, Config.n_cell_dim]', 'tf.float32'], {}), '([batch_size, Config.n_cell_dim], tf.float32)\n', (30371, 30416), True, 'import tensorflow as tf\n'), ((30440, 30479), 'tensorflow.assign', 'tf.assign', (['previous_state_c', 'zero_state'], {}), '(previous_state_c, zero_state)\n', (30449, 30479), True, 'import tensorflow as tf\n'), ((30503, 30542), 'tensorflow.assign', 'tf.assign', (['previous_state_h', 'zero_state'], {}), '(previous_state_h, zero_state)\n', (30512, 30542), True, 'import tensorflow as tf\n'), ((30570, 30631), 'tensorflow.group', 'tf.group', (['initialize_c', 'initialize_h'], {'name': '"""initialize_state"""'}), "(initialize_c, initialize_h, name='initialize_state')\n", (30578, 30631), True, 'import tensorflow as tf\n'), ((31117, 31151), 'tensorflow.identity', 'tf.identity', (['logits'], {'name': '"""logits"""'}), "(logits, name='logits')\n", (31128, 31151), True, 'import tensorflow as tf\n'), ((31174, 31218), 'tensorflow.identity', 'tf.identity', (['new_state_c'], {'name': '"""new_state_c"""'}), "(new_state_c, name='new_state_c')\n", (31185, 31218), True, 'import tensorflow as tf\n'), ((31241, 31285), 'tensorflow.identity', 'tf.identity', (['new_state_h'], {'name': '"""new_state_h"""'}), "(new_state_h, name='new_state_h')\n", (31252, 31285), True, 'import tensorflow as tf\n'), ((31833, 31852), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (31842, 31852), True, 'import tensorflow as tf\n'), ((31933, 31957), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (31955, 31957), True, 'import tensorflow as tf\n'), ((31976, 32016), 'tensorflow.Session', 'tf.Session', ([], {'config': 'Config.session_config'}), '(config=Config.session_config)\n', (31986, 32016), True, 'import tensorflow as tf\n'), ((33127, 33150), 'tensorflow.train.Saver', 'tf.train.Saver', (['mapping'], {}), '(mapping)\n', (33141, 33150), True, 'import tensorflow as tf\n'), ((33226, 33277), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['FLAGS.checkpoint_dir'], {}), '(FLAGS.checkpoint_dir)\n', (33255, 33277), True, 'import tensorflow as tf\n'), ((36637, 36677), 'tensorflow.Session', 'tf.Session', ([], {'config': 'Config.session_config'}), '(config=Config.session_config)\n', (36647, 36677), True, 'import tensorflow as tf\n'), ((36972, 36995), 'tensorflow.train.Saver', 'tf.train.Saver', (['mapping'], {}), '(mapping)\n', (36986, 36995), True, 'import tensorflow as tf\n'), ((37244, 37295), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['FLAGS.checkpoint_dir'], {}), '(FLAGS.checkpoint_dir)\n', (37273, 37295), True, 'import tensorflow as tf\n'), ((37644, 37720), 'util.audio.audiofile_to_input_vector', 'audiofile_to_input_vector', (['input_file_path', 'Config.n_input', 'Config.n_context'], {}), '(input_file_path, Config.n_input, Config.n_context)\n', (37669, 37720), False, 'from util.audio import audiofile_to_input_vector\n'), ((37981, 38158), 'numpy.lib.stride_tricks.as_strided', 'np.lib.stride_tricks.as_strided', (['features', '(num_strides, window_size, Config.n_input)', '(features.strides[0], features.strides[0], features.strides[1])'], {'writeable': '(False)'}), '(features, (num_strides, window_size, Config\n .n_input), (features.strides[0], features.strides[0], features.strides[\n 1]), writeable=False)\n', (38012, 38158), True, 'import numpy as np\n'), ((38384, 38402), 'numpy.squeeze', 'np.squeeze', (['logits'], {}), '(logits)\n', (38394, 38402), True, 'import numpy as np\n'), ((38421, 38522), 'ds_ctcdecoder.Scorer', 'Scorer', (['FLAGS.lm_alpha', 'FLAGS.lm_beta', 'FLAGS.lm_binary_path', 'FLAGS.lm_trie_path', 'Config.alphabet'], {}), '(FLAGS.lm_alpha, FLAGS.lm_beta, FLAGS.lm_binary_path, FLAGS.\n lm_trie_path, Config.alphabet)\n', (38427, 38522), False, 'from ds_ctcdecoder import ctc_beam_search_decoder, Scorer\n'), ((38584, 38670), 'ds_ctcdecoder.ctc_beam_search_decoder', 'ctc_beam_search_decoder', (['logits', 'Config.alphabet', 'FLAGS.beam_width'], {'scorer': 'scorer'}), '(logits, Config.alphabet, FLAGS.beam_width, scorer=\n scorer)\n', (38607, 38670), False, 'from ds_ctcdecoder import ctc_beam_search_decoder, Scorer\n'), ((2892, 2909), 'tensorflow.shape', 'tf.shape', (['batch_x'], {}), '(batch_x)\n', (2900, 2909), True, 'import tensorflow as tf\n'), ((11048, 11071), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (11069, 11071), True, 'import tensorflow as tf\n'), ((13789, 13808), 'tensorflow.concat', 'tf.concat', (['grads', '(0)'], {}), '(grads, 0)\n', (13798, 13808), True, 'import tensorflow as tf\n'), ((13828, 13851), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['grad', '(0)'], {}), '(grad, 0)\n', (13842, 13851), True, 'import tensorflow as tf\n'), ((14778, 14801), 'tensorflow.reduce_max', 'tf.reduce_max', (['variable'], {}), '(variable)\n', (14791, 14801), True, 'import tensorflow as tf\n'), ((14857, 14880), 'tensorflow.reduce_min', 'tf.reduce_min', (['variable'], {}), '(variable)\n', (14870, 14880), True, 'import tensorflow as tf\n'), ((15154, 15222), 'tensorflow.summary.histogram', 'tf.summary.histogram', ([], {'name': "('%s/gradients' % name)", 'values': 'grad_values'}), "(name='%s/gradients' % name, values=grad_values)\n", (15174, 15222), True, 'import tensorflow as tf\n'), ((16640, 16648), 'six.moves.range', 'range', (['(6)'], {}), '(6)\n', (16645, 16648), False, 'from six.moves import zip, range\n'), ((19125, 19165), 'os.path.join', 'os.path.join', (['FLAGS.summary_dir', '"""train"""'], {}), "(FLAGS.summary_dir, 'train')\n", (19137, 19165), False, 'import os\n'), ((19220, 19258), 'os.path.join', 'os.path.join', (['FLAGS.summary_dir', '"""dev"""'], {}), "(FLAGS.summary_dir, 'dev')\n", (19232, 19258), False, 'import os\n'), ((19827, 19865), 'util.logging.log_debug', 'log_debug', (['"""Starting queue runners..."""'], {}), "('Starting queue runners...')\n", (19836, 19865), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((19939, 19974), 'util.logging.log_debug', 'log_debug', (['"""Queue runners started."""'], {}), "('Queue runners started.')\n", (19948, 19974), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((20062, 20092), 'util.logging.log_debug', 'log_debug', (['"""Closing queues..."""'], {}), "('Closing queues...')\n", (20071, 20092), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((20152, 20179), 'util.logging.log_debug', 'log_debug', (['"""Queues closed."""'], {}), "('Queues closed.')\n", (20161, 20179), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((20578, 20703), 'tensorflow.train.SummarySaverHook', 'tf.train.SummarySaverHook', ([], {'save_secs': 'FLAGS.summary_secs', 'output_dir': 'FLAGS.summary_dir', 'summary_op': 'merge_all_summaries_op'}), '(save_secs=FLAGS.summary_secs, output_dir=FLAGS.\n summary_dir, summary_op=merge_all_summaries_op)\n', (20603, 20703), True, 'import tensorflow as tf\n'), ((20898, 21014), 'tensorflow.train.CheckpointSaverHook', 'tf.train.CheckpointSaverHook', ([], {'checkpoint_dir': 'FLAGS.checkpoint_dir', 'save_secs': 'FLAGS.checkpoint_secs', 'saver': 'saver'}), '(checkpoint_dir=FLAGS.checkpoint_dir, save_secs\n =FLAGS.checkpoint_secs, saver=saver)\n', (20926, 21014), True, 'import tensorflow as tf\n'), ((22972, 23224), 'tensorflow.train.MonitoredTrainingSession', 'tf.train.MonitoredTrainingSession', ([], {'master': "('' if server is None else server.target)", 'is_chief': 'Config.is_chief', 'hooks': 'hooks', 'checkpoint_dir': 'FLAGS.checkpoint_dir', 'save_checkpoint_secs': 'None', 'log_step_count_steps': '(0)', 'config': 'Config.session_config'}), "(master='' if server is None else server.\n target, is_chief=Config.is_chief, hooks=hooks, checkpoint_dir=FLAGS.\n checkpoint_dir, save_checkpoint_secs=None, log_step_count_steps=0,\n config=Config.session_config)\n", (23005, 23224), True, 'import tensorflow as tf\n'), ((28001, 28012), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (28009, 28012), False, 'import sys\n'), ((30777, 30811), 'tensorflow.identity', 'tf.identity', (['logits'], {'name': '"""logits"""'}), "(logits, name='logits')\n", (30788, 30811), True, 'import tensorflow as tf\n'), ((33429, 33460), 'os.path.isdir', 'os.path.isdir', (['FLAGS.export_dir'], {}), '(FLAGS.export_dir)\n', (33442, 33460), False, 'import os\n'), ((33603, 33650), 'os.path.join', 'os.path.join', (['FLAGS.export_dir', 'output_filename'], {}), '(FLAGS.export_dir, output_filename)\n', (33615, 33650), False, 'import os\n'), ((36461, 36513), 'util.logging.log_info', 'log_info', (["('Models exported at %s' % FLAGS.export_dir)"], {}), "('Models exported at %s' % FLAGS.export_dir)\n", (36469, 36513), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((39202, 39220), 'util.logging.log_debug', 'log_debug', (['"""Done."""'], {}), "('Done.')\n", (39211, 39220), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((39316, 39406), 'tensorflow.train.Server', 'tf.train.Server', (['Config.cluster'], {'job_name': 'FLAGS.job_name', 'task_index': 'FLAGS.task_index'}), '(Config.cluster, job_name=FLAGS.job_name, task_index=FLAGS.\n task_index)\n', (39331, 39406), True, 'import tensorflow as tf\n'), ((40655, 40683), 'util.logging.log_debug', 'log_debug', (['"""Server stopped."""'], {}), "('Server stopped.')\n", (40664, 40683), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((3868, 3890), 'tensorflow.matmul', 'tf.matmul', (['batch_x', 'h1'], {}), '(batch_x, h1)\n', (3877, 3890), True, 'import tensorflow as tf\n'), ((4269, 4291), 'tensorflow.matmul', 'tf.matmul', (['layer_1', 'h2'], {}), '(layer_1, h2)\n', (4278, 4291), True, 'import tensorflow as tf\n'), ((4670, 4692), 'tensorflow.matmul', 'tf.matmul', (['layer_2', 'h3'], {}), '(layer_2, h3)\n', (4679, 4692), True, 'import tensorflow as tf\n'), ((6808, 6829), 'tensorflow.matmul', 'tf.matmul', (['output', 'h5'], {}), '(output, h5)\n', (6817, 6829), True, 'import tensorflow as tf\n'), ((11360, 11461), 'tensorflow.train.replica_device_setter', 'tf.train.replica_device_setter', ([], {'worker_device': 'Config.available_devices[i]', 'cluster': 'Config.cluster'}), '(worker_device=Config.available_devices[i],\n cluster=Config.cluster)\n', (11390, 11461), True, 'import tensorflow as tf\n'), ((11475, 11492), 'tensorflow.device', 'tf.device', (['device'], {}), '(device)\n', (11484, 11492), True, 'import tensorflow as tf\n'), ((13576, 13596), 'tensorflow.expand_dims', 'tf.expand_dims', (['g', '(0)'], {}), '(g, 0)\n', (13590, 13596), True, 'import tensorflow as tf\n'), ((21926, 21974), 'util.logging.log_info', 'log_info', (["('Training epoch %i...' % current_epoch)"], {}), "('Training epoch %i...' % current_epoch)\n", (21934, 21974), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((22079, 22129), 'util.logging.log_info', 'log_info', (["('Validating epoch %i...' % current_epoch)"], {}), "('Validating epoch %i...' % current_epoch)\n", (22087, 22129), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((33478, 33509), 'util.logging.log_info', 'log_info', (['"""Removing old export"""'], {}), "('Removing old export')\n", (33486, 33509), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((33526, 33557), 'shutil.rmtree', 'shutil.rmtree', (['FLAGS.export_dir'], {}), '(FLAGS.export_dir)\n', (33539, 33557), False, 'import shutil\n'), ((33671, 33702), 'os.path.isdir', 'os.path.isdir', (['FLAGS.export_dir'], {}), '(FLAGS.export_dir)\n', (33684, 33702), False, 'import os\n'), ((33720, 33749), 'os.makedirs', 'os.makedirs', (['FLAGS.export_dir'], {}), '(FLAGS.export_dir)\n', (33731, 33749), False, 'import os\n'), ((34669, 34707), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'dir': 'FLAGS.export_dir'}), '(dir=FLAGS.export_dir)\n', (34685, 34707), False, 'import tempfile\n'), ((34724, 34741), 'os.close', 'os.close', (['temp_fd'], {}), '(temp_fd)\n', (34732, 34741), False, 'import os\n'), ((36312, 36334), 'os.unlink', 'os.unlink', (['temp_freeze'], {}), '(temp_freeze)\n', (36321, 36334), False, 'import os\n'), ((36886, 36907), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (36905, 36907), True, 'import tensorflow as tf\n'), ((38988, 39025), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['FLAGS.random_seed'], {}), '(FLAGS.random_seed)\n', (39006, 39025), True, 'import tensorflow as tf\n'), ((40114, 40142), 'util.logging.log_debug', 'log_debug', (['"""Session closed."""'], {}), "('Session closed.')\n", (40123, 40142), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((11578, 11607), 'tensorflow.name_scope', 'tf.name_scope', (["('tower_%d' % i)"], {}), "('tower_%d' % i)\n", (11591, 11607), True, 'import tensorflow as tf\n'), ((14694, 14720), 'tensorflow.square', 'tf.square', (['(variable - mean)'], {}), '(variable - mean)\n', (14703, 14720), True, 'import tensorflow as tf\n'), ((22265, 22355), 'progressbar.ProgressBar', 'progressbar.ProgressBar', ([], {'max_value': 'update_progressbar.total_jobs', 'redirect_stdout': '(True)'}), '(max_value=update_progressbar.total_jobs,\n redirect_stdout=True)\n', (22288, 22355), False, 'import progressbar\n'), ((23619, 23641), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (23639, 23641), True, 'import tensorflow as tf\n'), ((24180, 24214), 'util.logging.log_debug', 'log_debug', (["('Computing %s...' % job)"], {}), "('Computing %s...' % job)\n", (24189, 24214), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((25651, 25667), 'six.moves.range', 'range', (['job.steps'], {}), '(job.steps)\n', (25656, 25667), False, 'from six.moves import zip, range\n'), ((26747, 26779), 'util.logging.log_debug', 'log_debug', (["('Sending %s...' % job)"], {}), "('Sending %s...' % job)\n", (26756, 26779), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((27010, 27031), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (27029, 27031), False, 'import traceback\n'), ((27543, 27554), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (27551, 27554), False, 'import sys\n'), ((30670, 30710), 'tensorflow.assign', 'tf.assign', (['previous_state_c', 'new_state_c'], {}), '(previous_state_c, new_state_c)\n', (30679, 30710), True, 'import tensorflow as tf\n'), ((30712, 30752), 'tensorflow.assign', 'tf.assign', (['previous_state_h', 'new_state_h'], {}), '(previous_state_h, new_state_h)\n', (30721, 30752), True, 'import tensorflow as tf\n'), ((32681, 32702), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (32700, 32702), True, 'import tensorflow as tf\n'), ((33087, 33108), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (33106, 33108), True, 'import tensorflow as tf\n'), ((39611, 39636), 'tensorflow.Session', 'tf.Session', (['server.target'], {}), '(server.target)\n', (39621, 39636), True, 'import tensorflow as tf\n'), ((25778, 25808), 'util.logging.log_debug', 'log_debug', (['"""Starting batch..."""'], {}), "('Starting batch...')\n", (25787, 25808), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((26251, 26302), 'util.logging.log_debug', 'log_debug', (["('Finished batch step %d.' % current_step)"], {}), "('Finished batch step %d.' % current_step)\n", (26260, 26302), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((36409, 36445), 'os.path.basename', 'os.path.basename', (['output_tflite_path'], {}), '(output_tflite_path)\n', (36425, 36445), False, 'import os\n'), ((38947, 38957), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (38955, 38957), True, 'import tensorflow as tf\n'), ((39727, 39765), 'util.logging.log_debug', 'log_debug', (['"""Waiting for stop token..."""'], {}), "('Waiting for stop token...')\n", (39736, 39765), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((40044, 40097), 'util.logging.log_debug', 'log_debug', (["('Got a stop token from worker %i.' % token)"], {}), "('Got a stop token from worker %i.' % token)\n", (40053, 40097), False, 'from util.logging import log_info, log_error, log_debug, log_warn\n'), ((12003, 12026), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (12024, 12026), True, 'import tensorflow as tf\n'), ((39138, 39148), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (39146, 39148), True, 'import tensorflow as tf\n'), ((40413, 40508), 'tensorflow.train.replica_device_setter', 'tf.train.replica_device_setter', ([], {'worker_device': 'Config.worker_device', 'cluster': 'Config.cluster'}), '(worker_device=Config.worker_device, cluster=\n Config.cluster)\n', (40443, 40508), True, 'import tensorflow as tf\n')]
|
import misc, model, eval, evaluate
import numpy as np
import torch, time, random
from torch import nn
import TD_RvNN
device = 'cuda'
tree_file = 'resource/data.TD_RvNN.vol_5000.txt'
label_file = 'resource/Twitter15_label_All.txt'
train_file = 'nfold/RNNtrainSet_Twitter152_tree.txt'
test_file = 'nfold/RNNtestSet_Twitter152_tree.txt'
vocab_size = 5000
embed_size = 512
hidden_size = 100
num_class = 4
epoches = 600
lr = 0.005
# lr = 1
# tree_train, word_train, index_train, parent_num_train, y_train, tree_test, word_test, index_test, parent_num_test, y_test = TD_RvNN.loadData(label_file, tree_file, train_file, test_file)
tree_train, word_train, index_train, parent_num_train, y_train, tree_test, word_test, index_test, parent_num_test, y_test = TD_RvNN.loadData()
# print("train no:", len(tree_train), len(word_train), len(index_train),len(parent_num_train), len(y_train))
# print("test no:", len(tree_test), len(word_test), len(index_test), len(parent_num_test), len(y_test))
# print("dim1 for 0:", len(tree_train[0]), len(word_train[0]), len(index_train[0]))
# print("case 0:", tree_train[0][0], word_train[0][0], index_train[0][0], parent_num_train[0])
model = model.RvNN(
vocab_size=vocab_size,
embed_size=embed_size,
hidden_size=hidden_size,
num_class=num_class
).to(device)
loss_func = nn.MSELoss(reduction='sum')
model_optimizer = torch.optim.SGD(
# params=filter(lambda p: p.requires_grad, model.parameters()),
params=model.parameters(),
momentum=0.9,
lr=lr
)
losses_5, losses = [], []
num_examples_seen = 0
for epoch in range(epoches):
t_s = time.time()
train_idx_list = [_i for _i in range(len(y_train))]
# random.shuffle(train_idx_list)
for train_idx in train_idx_list:
# pred = model.forward(tree_train[train_idx+1], word_train[train_idx+1], index_train[train_idx+1])
pred = model.forward(tree_train[train_idx], word_train[train_idx], index_train[train_idx])
target = torch.FloatTensor(y_train[train_idx]).to(device)
loss = loss_func(pred, target)
model_optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=5.0)
model_optimizer.step()
losses.append(loss.data.cpu().numpy())
num_examples_seen += 1
print('epoch={}: loss={:.6f}, takes {:.2f}s'.format(epoch, np.mean(losses), time.time()-t_s))
if epoch % 5 == 0:
losses_5.append((num_examples_seen, np.mean(losses)))
print('Loss after num_examples_seen={}, epoch={}, loss={:.6f}'.format(num_examples_seen, epoch, np.mean(losses)))
# enter prediction
prediction = []
for test_idx in range(len(y_test)):
pred = model.forward(tree_train[test_idx], word_train[test_idx], index_train[test_idx])
prediction.append(pred.unsqueeze(dim=0).cpu().data.numpy())
res = evaluate.evaluation_4class(prediction, y_test)
print('results: {}'.format(res))
if len(losses_5) > 1 and losses_5[-1][1] > losses_5[-2][1]:
lr = lr * 0.5
print("Setting learning rate to {}".format(lr))
model_optimizer = torch.optim.SGD(
params=model.parameters(),
momentum=0.9,
lr=lr
)
losses = []
|
[
"evaluate.evaluation_4class"
] |
[((756, 774), 'TD_RvNN.loadData', 'TD_RvNN.loadData', ([], {}), '()\n', (772, 774), False, 'import TD_RvNN\n'), ((1309, 1336), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (1319, 1336), False, 'from torch import nn\n'), ((1575, 1586), 'time.time', 'time.time', ([], {}), '()\n', (1584, 1586), False, 'import torch, time, random\n'), ((1176, 1283), 'model.RvNN', 'model.RvNN', ([], {'vocab_size': 'vocab_size', 'embed_size': 'embed_size', 'hidden_size': 'hidden_size', 'num_class': 'num_class'}), '(vocab_size=vocab_size, embed_size=embed_size, hidden_size=\n hidden_size, num_class=num_class)\n', (1186, 1283), False, 'import misc, model, eval, evaluate\n'), ((1446, 1464), 'model.parameters', 'model.parameters', ([], {}), '()\n', (1462, 1464), False, 'import misc, model, eval, evaluate\n'), ((1818, 1906), 'model.forward', 'model.forward', (['tree_train[train_idx]', 'word_train[train_idx]', 'index_train[train_idx]'], {}), '(tree_train[train_idx], word_train[train_idx], index_train[\n train_idx])\n', (1831, 1906), False, 'import misc, model, eval, evaluate\n'), ((2733, 2779), 'evaluate.evaluation_4class', 'evaluate.evaluation_4class', (['prediction', 'y_test'], {}), '(prediction, y_test)\n', (2759, 2779), False, 'import misc, model, eval, evaluate\n'), ((2078, 2096), 'model.parameters', 'model.parameters', ([], {}), '()\n', (2094, 2096), False, 'import misc, model, eval, evaluate\n'), ((2265, 2280), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (2272, 2280), True, 'import numpy as np\n'), ((2581, 2666), 'model.forward', 'model.forward', (['tree_train[test_idx]', 'word_train[test_idx]', 'index_train[test_idx]'], {}), '(tree_train[test_idx], word_train[test_idx], index_train[test_idx]\n )\n', (2594, 2666), False, 'import misc, model, eval, evaluate\n'), ((1913, 1950), 'torch.FloatTensor', 'torch.FloatTensor', (['y_train[train_idx]'], {}), '(y_train[train_idx])\n', (1930, 1950), False, 'import torch, time, random\n'), ((2282, 2293), 'time.time', 'time.time', ([], {}), '()\n', (2291, 2293), False, 'import torch, time, random\n'), ((2359, 2374), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (2366, 2374), True, 'import numpy as np\n'), ((2475, 2490), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (2482, 2490), True, 'import numpy as np\n'), ((2997, 3015), 'model.parameters', 'model.parameters', ([], {}), '()\n', (3013, 3015), False, 'import misc, model, eval, evaluate\n')]
|
import os
import util
import time
import math
import torch
import logging
import evaluate
import coref_model
format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s'
logging.basicConfig(format=format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
if __name__ == "__main__":
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
if torch.cuda.is_available():
device = "cuda"
for i in range(torch.cuda.device_count()):
print("GPU {}: {}\n".format(i, torch.cuda.get_device_name(i)))
else:
device = "cpu"
print("GPU NOT AVAILABLE!!!!\n")
config = util.initialize_from_env()
train_dataloader = util.get_dataloader(config)
model = coref_model.BertForCoref(config, device).to(device)
bert_optimizer = torch.optim.AdamW([param for name, param in model.named_parameters() if 'bert' in name], lr=config['bert_learning_rate'], eps=config['adam_eps'])
task_optimizer = torch.optim.Adam([param for name, param in model.named_parameters() if 'bert' not in name], lr=config['task_learning_rate'], eps=config['adam_eps'])
print("Parameters:")
print("bert: {}\ntask: {}".format(len(bert_optimizer.param_groups[0]['params']), len(task_optimizer.param_groups[0]['params'])))
global_steps = 0
total_loss = 0.0
max_f1 = 0.0
model.train()
initial_time = time.time()
for epoch in range(config['num_epochs']):
for batch in train_dataloader:
bert_optimizer.zero_grad()
task_optimizer.zero_grad()
out, loss = model.forward(batch['input_ids'].to(device), batch['input_mask'].to(device), batch['text_len'],
batch['speaker_ids'].to(device), batch['genre'], True, batch['gold_starts'].to(device),
batch['gold_ends'].to(device), batch['cluster_ids'].to(device), batch['sentence_map'].to(device))
total_loss += loss.item()
loss.backward()
bert_optimizer.step()
task_optimizer.step()
global_steps += 1
if global_steps % config['report_frequency'] == 0:
total_time = time.time() - initial_time
steps_per_second = global_steps/total_time
avg_loss = total_loss/config['report_frequency']
logger.info("[{}] loss={:.2f}, steps/s={:.2f}".format(global_steps, avg_loss, steps_per_second))
total_loss = 0.0
if global_steps > 0 and global_steps % config['eval_frequency'] == 0:
eval_f1 = evaluate.evaluate(model, config, device)
path = config["log_dir"]+"/model.{}.pt".format(global_steps)
torch.save({
'eval_f1': eval_f1,
'max_f1' : max_f1,
'global_steps': global_steps,
'model': model.state_dict(),
'bert_optimizer': bert_optimizer.state_dict(),
'task_optimizer': task_optimizer.state_dict()
}, path)
if eval_f1 > max_f1:
max_f1 = eval_f1
torch.save({
'eval_f1': eval_f1,
'max_f1' : max_f1,
'global_steps': global_steps,
'model': model.state_dict(),
'bert_optimizer': bert_optimizer.state_dict(),
'task_optimizer': task_optimizer.state_dict()
}, (config["log_dir"]+"/model.max.pt"))
|
[
"evaluate.evaluate"
] |
[((174, 208), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'format'}), '(format=format)\n', (193, 208), False, 'import logging\n'), ((218, 245), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (235, 245), False, 'import logging\n'), ((357, 382), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (380, 382), False, 'import torch\n'), ((622, 648), 'util.initialize_from_env', 'util.initialize_from_env', ([], {}), '()\n', (646, 648), False, 'import util\n'), ((672, 699), 'util.get_dataloader', 'util.get_dataloader', (['config'], {}), '(config)\n', (691, 699), False, 'import util\n'), ((1361, 1372), 'time.time', 'time.time', ([], {}), '()\n', (1370, 1372), False, 'import time\n'), ((431, 456), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (454, 456), False, 'import torch\n'), ((712, 752), 'coref_model.BertForCoref', 'coref_model.BertForCoref', (['config', 'device'], {}), '(config, device)\n', (736, 752), False, 'import coref_model\n'), ((2607, 2647), 'evaluate.evaluate', 'evaluate.evaluate', (['model', 'config', 'device'], {}), '(model, config, device)\n', (2624, 2647), False, 'import evaluate\n'), ((502, 531), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['i'], {}), '(i)\n', (528, 531), False, 'import torch\n'), ((2201, 2212), 'time.time', 'time.time', ([], {}), '()\n', (2210, 2212), False, 'import time\n')]
|
import time
import numpy as np
import torch
import os
from evaluate import angle_acc
import argparse
from model import DenseNet_GCN
from loss import JSD_Loss
from dataset import Sphere_Dataset
from train import train
from torch.utils.data import DataLoader
def test(epoch, device, data_loader, model, criterion, vis, save_path, save_file_name):
"""
:param epoch : int
:param data_loader: data.DataLoader
:param model: nn.Module
:param loss: nn.Module
:param optimizer: optim
:param visdom: visdom
:return:
"""
# ------------------- load -------------------
tic = time.time()
print('Test : {}'.format(epoch))
model.load_state_dict(torch.load(os.path.join(save_path, save_file_name + '.{}.pth'.format(epoch))))
model.eval()
with torch.no_grad():
test_loss = 0
test_angle_max = 0
test_angle_exp = 0
for idx, (images, _, _, xyz, pdf, adj, rotated_points) in enumerate(data_loader):
# ------------------- cuda -------------------
images = images.to(device)
xyz = xyz.to(device)
adj = adj.to(device)
# ------------------- loss -------------------
output = model(images, adj) # B, 2
output = torch.softmax(output.squeeze(-1), dim=1)
loss = criterion(output, pdf)
# ------------------- eval -------------------
# gt
gt_xyz = xyz.cpu().detach().numpy()
gt_xyz = np.squeeze(gt_xyz)
# ---------------------------------- pred
# pred_exp
output_numpy = output.cpu().detach().numpy()
points = data_loader.dataset.points
points_x = points[:, 0]
points_y = points[:, 1]
points_z = points[:, 2]
exp_x = np.dot(output_numpy, points_x)
exp_y = np.dot(output_numpy, points_y)
exp_z = np.dot(output_numpy, points_z)
norm = np.sqrt(exp_x ** 2 + exp_y ** 2 + exp_z ** 2)
exp_x /= norm
exp_y /= norm
exp_z /= norm
pred_xyz_exp = np.stack([exp_x, exp_y, exp_z], axis=-1)
# pred_max
output_numpy = output.cpu().detach().numpy()
output_index = np.argmax(output_numpy, axis=1)
pred_xyz_max = data_loader.dataset.points[output_index]
# pred_xyz = spherical_to_cartesian(output_numpy[:, 0], output_numpy[:, 1])
angle_exp = angle_acc(gt_xyz, pred_xyz_exp)
angle_max = angle_acc(gt_xyz, pred_xyz_max)
# ------------------- print -------------------
test_loss += loss.item()
test_angle_exp += angle_exp
test_angle_max += angle_max
# print
if idx % 10 == 0:
print('Step: [{0}/{1}]\t'
'Loss: {test_loss:.4f}\t'
'Angle error_max: {test_angle_error_max:.4f}\t'
'Angle error_exp: {test_angle_error_exp:.4f}\t'
.format(idx, len(data_loader),
test_loss=loss.item(),
test_angle_error_max=angle_max,
test_angle_error_exp=angle_exp))
test_loss /= len(data_loader)
test_angle_max /= len(data_loader)
test_angle_exp /= len(data_loader)
# plot
if vis is not None:
vis.line(X=torch.ones((1, 3)).cpu() * epoch, # step
Y=torch.Tensor([test_loss, test_angle_max, test_angle_exp]).unsqueeze(0).cpu(),
win='test',
update='append',
opts=dict(xlabel='Epochs',
ylabel='Angle / Loss ',
title='Test results',
legend=['Loss', 'Angle_max', 'Angle_exp']))
print("Angle Error : {:.4f}".format(test_angle_exp))
print('test_time : {:.4f}s'.format(time.time() - tic))
if __name__ == '__main__':
# 1. parser
parser = argparse.ArgumentParser()
parser.add_argument('--epoch', type=int, default=50, help='how many the model iterate?')
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--data_path', type=str, default='D:\Data\SUN360')
parser.add_argument('--save_path', type=str, default="./saves")
parser.add_argument('--save_file_name', type=str, default="densenet_101_kappa_25")
test_opts = parser.parse_args()
print(test_opts)
# 2. device config
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 3. vis
vis = None
# 4. data loader
test_set = Sphere_Dataset(root=test_opts.data_path, split='TEST')
test_loader = DataLoader(dataset=test_set,
batch_size=test_opts.batch_size,
shuffle=True)
# 5. model
model = DenseNet_GCN().to(device)
# 6. criterion
criterion = JSD_Loss()
# 7. test
test(epoch=test_opts.epoch,
device=device,
data_loader=test_loader,
model=model,
criterion=criterion,
vis=vis,
save_path=test_opts.save_path,
save_file_name=test_opts.save_file_name)
|
[
"evaluate.angle_acc"
] |
[((610, 621), 'time.time', 'time.time', ([], {}), '()\n', (619, 621), False, 'import time\n'), ((4085, 4110), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4108, 4110), False, 'import argparse\n'), ((4717, 4771), 'dataset.Sphere_Dataset', 'Sphere_Dataset', ([], {'root': 'test_opts.data_path', 'split': '"""TEST"""'}), "(root=test_opts.data_path, split='TEST')\n", (4731, 4771), False, 'from dataset import Sphere_Dataset\n'), ((4790, 4865), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_set', 'batch_size': 'test_opts.batch_size', 'shuffle': '(True)'}), '(dataset=test_set, batch_size=test_opts.batch_size, shuffle=True)\n', (4800, 4865), False, 'from torch.utils.data import DataLoader\n'), ((5014, 5024), 'loss.JSD_Loss', 'JSD_Loss', ([], {}), '()\n', (5022, 5024), False, 'from loss import JSD_Loss\n'), ((791, 806), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (804, 806), False, 'import torch\n'), ((1499, 1517), 'numpy.squeeze', 'np.squeeze', (['gt_xyz'], {}), '(gt_xyz)\n', (1509, 1517), True, 'import numpy as np\n'), ((1829, 1859), 'numpy.dot', 'np.dot', (['output_numpy', 'points_x'], {}), '(output_numpy, points_x)\n', (1835, 1859), True, 'import numpy as np\n'), ((1880, 1910), 'numpy.dot', 'np.dot', (['output_numpy', 'points_y'], {}), '(output_numpy, points_y)\n', (1886, 1910), True, 'import numpy as np\n'), ((1931, 1961), 'numpy.dot', 'np.dot', (['output_numpy', 'points_z'], {}), '(output_numpy, points_z)\n', (1937, 1961), True, 'import numpy as np\n'), ((1981, 2026), 'numpy.sqrt', 'np.sqrt', (['(exp_x ** 2 + exp_y ** 2 + exp_z ** 2)'], {}), '(exp_x ** 2 + exp_y ** 2 + exp_z ** 2)\n', (1988, 2026), True, 'import numpy as np\n'), ((2132, 2172), 'numpy.stack', 'np.stack', (['[exp_x, exp_y, exp_z]'], {'axis': '(-1)'}), '([exp_x, exp_y, exp_z], axis=-1)\n', (2140, 2172), True, 'import numpy as np\n'), ((2281, 2312), 'numpy.argmax', 'np.argmax', (['output_numpy'], {'axis': '(1)'}), '(output_numpy, axis=1)\n', (2290, 2312), True, 'import numpy as np\n'), ((2494, 2525), 'evaluate.angle_acc', 'angle_acc', (['gt_xyz', 'pred_xyz_exp'], {}), '(gt_xyz, pred_xyz_exp)\n', (2503, 2525), False, 'from evaluate import angle_acc\n'), ((2550, 2581), 'evaluate.angle_acc', 'angle_acc', (['gt_xyz', 'pred_xyz_max'], {}), '(gt_xyz, pred_xyz_max)\n', (2559, 2581), False, 'from evaluate import angle_acc\n'), ((4613, 4638), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4636, 4638), False, 'import torch\n'), ((4952, 4966), 'model.DenseNet_GCN', 'DenseNet_GCN', ([], {}), '()\n', (4964, 4966), False, 'from model import DenseNet_GCN\n'), ((4006, 4017), 'time.time', 'time.time', ([], {}), '()\n', (4015, 4017), False, 'import time\n'), ((3464, 3482), 'torch.ones', 'torch.ones', (['(1, 3)'], {}), '((1, 3))\n', (3474, 3482), False, 'import torch\n'), ((3529, 3586), 'torch.Tensor', 'torch.Tensor', (['[test_loss, test_angle_max, test_angle_exp]'], {}), '([test_loss, test_angle_max, test_angle_exp])\n', (3541, 3586), False, 'import torch\n')]
|
import os
import time
import numpy as np
import torch
from torch.autograd import Variable
import torchvision.utils as vutils
from options import TestOptions
from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator
from model import StochCycleGAN, AugmentedCycleGAN
import cPickle as pkl
import math
from evaluate import eval_mse_A, eval_ubo_B
from model import log_prob_gaussian, gauss_reparametrize, log_prob_laplace, kld_std_guss
import random
def visualize_cycle(opt, real_A, visuals, name='cycle_test.png'):
size = real_A.size()
images = [img.cpu().unsqueeze(1) for img in visuals.values()]
vis_image = torch.cat(images, dim=1).view(size[0]*len(images),size[1],size[2],size[3])
save_path = os.path.join(opt.res_dir, name)
vutils.save_image(vis_image.cpu(), save_path,
normalize=True, range=(-1,1), nrow=len(images))
def visualize_multi_cycle(opt, real_B, model, name='multi_cycle_test.png'):
size = real_B.size()
images = model.generate_multi_cycle(real_B, steps=4)
images = [img.cpu().unsqueeze(1) for img in images]
vis_image = torch.cat(images, dim=1).view(size[0]*len(images),size[1],size[2],size[3])
save_path = os.path.join(opt.res_dir, name)
vutils.save_image(vis_image.cpu(), save_path,
normalize=True, range=(-1,1), nrow=len(images))
def visualize_cycle_B_multi(opt, real_B, model, name='cycle_B_multi_test.png'):
size = real_B.size()
with torch.no_grad():
multi_prior_z_B = Variable(real_B.data.new(opt.num_multi,
opt.nlatent, 1, 1).normal_(0, 1).repeat(size[0],1,1,1))
fake_A, multi_fake_B = model.generate_cycle_B_multi(real_B, multi_prior_z_B)
multi_fake_B = multi_fake_B.data.cpu().view(
size[0], opt.num_multi, size[1], size[2], size[3])
vis_multi_image = torch.cat([real_B.data.cpu().unsqueeze(1), fake_A.data.cpu().unsqueeze(1),
multi_fake_B], dim=1) \
.view(size[0]*(opt.num_multi+2),size[1],size[2],size[3])
save_path = os.path.join(opt.res_dir, name)
vutils.save_image(vis_multi_image.cpu(), save_path,
normalize=True, range=(-1,1), nrow=opt.num_multi+2)
def visualize_multi(opt, real_A, model, name='multi_test.png'):
size = real_A.size()
# all samples in real_A share the same prior_z_B
with torch.no_grad():
multi_prior_z_B = Variable(real_A.data.new(opt.num_multi,
opt.nlatent, 1, 1).normal_(0, 1).repeat(size[0],1,1,1))
multi_fake_B = model.generate_multi(real_A.detach(), multi_prior_z_B)
multi_fake_B = multi_fake_B.data.cpu().view(
size[0], opt.num_multi, size[1], size[2], size[3])
vis_multi_image = torch.cat([real_A.data.cpu().unsqueeze(1), multi_fake_B], dim=1) \
.view(size[0]*(opt.num_multi+1),size[1],size[2],size[3])
save_path = os.path.join(opt.res_dir, name)
vutils.save_image(vis_multi_image.cpu(), save_path,
normalize=True, range=(-1,1), nrow=opt.num_multi+1)
def visualize_inference(opt, real_A, real_B, model, name='inf_test.png'):
size = real_A.size()
real_B = real_B[:opt.num_multi]
# all samples in real_A share the same post_z_B
multi_fake_B = model.inference_multi(real_A.detach(), real_B.detach())
multi_fake_B = multi_fake_B.data.cpu().view(
size[0], opt.num_multi, size[1], size[2], size[3])
vis_multi_image = torch.cat([real_A.data.cpu().unsqueeze(1), multi_fake_B], dim=1) \
.view(size[0]*(opt.num_multi+1),size[1],size[2],size[3])
vis_multi_image = torch.cat([torch.ones(1, size[1], size[2], size[3]).cpu(), real_B.data.cpu(),
vis_multi_image.cpu()], dim=0)
save_path = os.path.join(opt.res_dir, name)
vutils.save_image(vis_multi_image.cpu(), save_path,
normalize=True, range=(-1,1), nrow=opt.num_multi+1)
def sensitivity_to_edge_noise(opt, model, data_B, use_gpu=True):
"""This is inspired from: https://arxiv.org/pdf/1712.02950.pdf"""
res = []
for std in [0, 0.1, 0.2, 0.5, 1, 2, 3, 5]:
with torch.no_grad():
real_B = Variable(data_B)
if use_gpu:
real_B = real_B.cuda()
rec_B = model.generate_noisy_cycle(real_B, std)
s = torch.abs(real_B - rec_B).sum(3).sum(2).sum(1) / (64*64*3)
res.append(s.data.cpu().numpy().tolist())
np.save('noise_sens', res)
def train_MVGauss_B(dataset):
b_mean = 0
b_var = 0
n = 0
for i,batch in enumerate(dataset):
real_B = Variable(batch['B'])
real_B = real_B.cuda()
b_mean += real_B.mean(0, keepdim=True)
n += 1
print(i)
b_mean = b_mean / n
for i,batch in enumerate(dataset):
real_B = Variable(batch['B'])
real_B = real_B.cuda()
b_var += ((real_B-b_mean)**2).mean(0, keepdim=True)
print(i)
b_var = b_var / n
return b_mean, b_var
def eval_bpp_MVGauss_B(dataset, mu, logvar):
bpp = []
for batch in dataset:
real_B = Variable(batch['B'])
real_B = real_B.cuda()
dequant = Variable(torch.zeros(*real_B.size()).uniform_(0, 1./127.5).cuda())
real_B = real_B + dequant
nll = -log_prob_gaussian(real_B, mu, logvar)
nll = nll.view(real_B.size(0), -1).sum(1) + (64*64*3) * math.log(127.5)
bpp.append(nll.mean(0).data[0] / (64*64*3*math.log(2)))
return np.mean(bpp)
def compute_bpp_MVGauss_B(dataroot):
########## WARNING: THIS WILL ASSUME IMAGES OF SIZE 64 BY DEFAULT ############
trainA, trainB, devA, devB, testA, testB = load_edges2shoes(dataroot)
train_dataset = UnalignedIterator(trainA, trainB, batch_size=200)
print('#training images = %d' % len(train_dataset))
test_dataset = AlignedIterator(testA, testB, batch_size=200)
print('#test images = %d' % len(test_dataset))
mvg_mean, mvg_var = train_MVGauss_B(train_dataset)
mvg_logvar = torch.log(mvg_var + 1e-5)
bpp = eval_bpp_MVGauss_B(test_dataset, mvg_mean, mvg_logvar)
print("MVGauss BPP: %.4f" % bpp)
def train_logvar(dataset, model, epochs=1, use_gpu=True):
logvar_B = Variable(torch.zeros(1, 3, 64, 64).fill_(math.log(0.01)).cuda(), requires_grad=True)
iterative_opt = torch.optim.RMSprop([logvar_B], lr=1e-2)
for eidx in range(epochs):
for batch in dataset:
real_B = Variable(batch['B'])
if use_gpu:
real_B = real_B.cuda()
size = real_B.size()
dequant = Variable(torch.zeros(*real_B.size()).uniform_(0, 1./127.5).cuda())
real_B = real_B + dequant
enc_mu = Variable(torch.zeros(size[0], model.opt.nlatent).cuda())
enc_logvar = Variable(torch.zeros(size[0], model.opt.nlatent).fill_(math.log(0.01)).cuda())
fake_A = model.predict_A(real_B)
if hasattr(model, 'netE_B'):
params = model.predict_enc_params(fake_A, real_B)
enc_mu = Variable(params[0].data)
if len(params) == 2:
enc_logvar = Variable(params[1].data)
z_B = gauss_reparametrize(enc_mu, enc_logvar)
fake_B = model.predict_B(fake_A, z_B)
z_B = z_B.view(size[0], model.opt.nlatent)
log_prob = log_prob_laplace(real_B, fake_B, logvar_B)
log_prob = log_prob.view(size[0], -1).sum(1)
kld = kld_std_guss(enc_mu, enc_logvar)
ubo = (-log_prob + kld) + (64*64*3) * math.log(127.5)
ubo_val_new = ubo.mean(0).data[0]
kld_val = kld.mean(0).data[0]
bpp = ubo.mean(0).data[0] / (64*64*3* math.log(2.))
print('UBO: %.4f, KLD: %.4f, BPP: %.4f' % (ubo_val_new, kld_val, bpp))
loss = ubo.mean(0)
iterative_opt.zero_grad()
loss.backward()
iterative_opt.step()
return logvar_B
def compute_train_kld(train_dataset, model):
### DEBUGGING KLD
train_kl = []
for i, batch in enumerate(train_dataset):
real_A, real_B = Variable(batch['A']), Variable(batch['B'])
real_A = real_A.cuda()
real_B = real_B.cuda()
fake_A = model.predict_A(real_B)
params = model.predict_enc_params(fake_A, real_B)
mu = params[0]
train_kl.append(kld_std_guss(mu, 0.0*mu).mean(0).data[0])
if i == 100:
break
print('train KL:',np.mean(train_kl))
def test_model():
opt = TestOptions().parse()
dataroot = opt.dataroot
# extract expr_dir from chk_path
expr_dir = os.path.dirname(opt.chk_path)
opt_path = os.path.join(expr_dir, 'opt.pkl')
# parse saved options...
opt.__dict__.update(parse_opt_file(opt_path))
opt.expr_dir = expr_dir
opt.dataroot = dataroot
# hack this for now
opt.gpu_ids = [0]
opt.seed = 12345
random.seed(opt.seed)
np.random.seed(opt.seed)
torch.manual_seed(opt.seed)
torch.cuda.manual_seed_all(opt.seed)
# create results directory (under expr_dir)
res_path = os.path.join(opt.expr_dir, opt.res_dir)
opt.res_dir = res_path
if not os.path.exists(res_path):
os.makedirs(res_path)
use_gpu = len(opt.gpu_ids) > 0
trainA, trainB, devA, devB, testA, testB = load_edges2shoes(opt.dataroot, opt.imgSize)
sub_size = int(len(trainA) * 0.2)
trainA = trainA[:sub_size]
trainB = trainB[:sub_size]
train_dataset = UnalignedIterator(trainA, trainB, batch_size=200)
print('#training images = %d' % len(train_dataset))
vis_inf = False
test_dataset = AlignedIterator(testA, testB, batch_size=200)
print('#test images = %d' % len(test_dataset))
dev_dataset = AlignedIterator(devA, devB, batch_size=200)
print('#dev images = %d' % len(dev_dataset))
vis_inf = False
if opt.model == 'stoch_cycle_gan':
model = StochCycleGAN(opt, testing=True)
elif opt.model == 'cycle_gan':
model = StochCycleGAN(opt, ignore_noise=True, testing=True)
elif opt.model == 'aug_cycle_gan':
model = AugmentedCycleGAN(opt, testing=True)
vis_inf = True
else:
raise NotImplementedError('Specified model is not implemented.')
model.load(opt.chk_path)
# model.eval()
# debug kl
# compute_train_kld(train_dataset, model)
if opt.metric == 'bpp':
if opt.train_logvar:
print("training logvar_B on training data...")
logvar_B = train_logvar(train_dataset, model)
else:
logvar_B = None
print("evaluating on test set...")
t = time.time()
test_ubo_B, test_bpp_B, test_kld_B = eval_ubo_B(test_dataset, model, 500,
visualize=True, vis_name='test_pred_B',
vis_path=opt.res_dir,
logvar_B=logvar_B,
verbose=True,
compute_l1=True)
print("TEST_BPP_B: %.4f, TIME: %.4f" % (test_bpp_B, time.time()-t))
elif opt.metric == 'mse':
dev_mse_A = eval_mse_A(dev_dataset, model)
test_mse_A = eval_mse_A(test_dataset, model)
print("DEV_MSE_A: %.4f, TEST_MSE_A: %.4f" % (dev_mse_A, test_mse_A))
elif opt.metric == 'visual':
opt.num_multi = 5
n_vis = 10
dev_dataset = AlignedIterator(devA, devB, batch_size=n_vis)
for i, vis_data in enumerate(dev_dataset):
with torch.no_grad():
real_A, real_B = Variable(vis_data['A']), Variable(vis_data['B'])
prior_z_B = Variable(real_A.data.new(n_vis, opt.nlatent, 1, 1).normal_(0, 1))
if use_gpu:
real_A = real_A.cuda()
real_B = real_B.cuda()
prior_z_B = prior_z_B.cuda()
visuals = model.generate_cycle(real_A, real_B, prior_z_B)
visualize_cycle(opt, real_A, visuals, name='cycle_%d.png' % i)
exit()
# visualize generated B with different z_B
visualize_multi(opt, real_A, model, name='multi_%d.png' % i)
visualize_cycle_B_multi(opt, real_B, model, name='cycle_B_multi_%d.png' % i)
visualize_multi_cycle(opt, real_B, model, name='multi_cycle_%d.png' % i)
if vis_inf:
# visualize generated B with different z_B infered from real_B
visualize_inference(opt, real_A, real_B, model, name='inf_%d.png' % i)
elif opt.metric == 'noise_sens':
sensitivity_to_edge_noise(opt, model, test_dataset.next()['B'])
else:
raise NotImplementedError('wrong metric!')
def parse_opt_file(opt_path):
def parse_val(s):
if s == 'None':
return None
if s == 'True':
return True
if s == 'False':
return False
if s == 'inf':
return float('inf')
try:
f = float(s)
# special case
if '.' in s:
return f
i = int(f)
return i if i == f else f
except ValueError:
return s
opt = None
with open(opt_path) as f:
if opt_path.endswith('pkl'):
opt = pkl.load(f)
else:
opt = dict()
for line in f:
if line.startswith('-----'):
continue
k,v = line.split(':')
opt[k.strip()] = parse_val(v.strip())
return opt
if __name__ == "__main__":
# CUDA_VISIBLE_DEVICES=0 python -m ipdb test.py --chk_path checkpoints/FOLDER/latest --res_dir val_res --opt_path checkpoints/FOLDER/opt.txt
test_model()
# compute_bpp_MVGauss_B('/home/a-amalma/data/edges2shoes/')
|
[
"evaluate.eval_mse_A",
"evaluate.eval_ubo_B"
] |
[((748, 779), 'os.path.join', 'os.path.join', (['opt.res_dir', 'name'], {}), '(opt.res_dir, name)\n', (760, 779), False, 'import os\n'), ((1208, 1239), 'os.path.join', 'os.path.join', (['opt.res_dir', 'name'], {}), '(opt.res_dir, name)\n', (1220, 1239), False, 'import os\n'), ((2036, 2067), 'os.path.join', 'os.path.join', (['opt.res_dir', 'name'], {}), '(opt.res_dir, name)\n', (2048, 2067), False, 'import os\n'), ((2853, 2884), 'os.path.join', 'os.path.join', (['opt.res_dir', 'name'], {}), '(opt.res_dir, name)\n', (2865, 2884), False, 'import os\n'), ((3710, 3741), 'os.path.join', 'os.path.join', (['opt.res_dir', 'name'], {}), '(opt.res_dir, name)\n', (3722, 3741), False, 'import os\n'), ((4358, 4384), 'numpy.save', 'np.save', (['"""noise_sens"""', 'res'], {}), "('noise_sens', res)\n", (4365, 4384), True, 'import numpy as np\n'), ((5379, 5391), 'numpy.mean', 'np.mean', (['bpp'], {}), '(bpp)\n', (5386, 5391), True, 'import numpy as np\n'), ((5560, 5586), 'edges2shoes_data.load_edges2shoes', 'load_edges2shoes', (['dataroot'], {}), '(dataroot)\n', (5576, 5586), False, 'from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator\n'), ((5607, 5656), 'edges2shoes_data.UnalignedIterator', 'UnalignedIterator', (['trainA', 'trainB'], {'batch_size': '(200)'}), '(trainA, trainB, batch_size=200)\n', (5624, 5656), False, 'from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator\n'), ((5733, 5778), 'edges2shoes_data.AlignedIterator', 'AlignedIterator', (['testA', 'testB'], {'batch_size': '(200)'}), '(testA, testB, batch_size=200)\n', (5748, 5778), False, 'from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator\n'), ((5903, 5929), 'torch.log', 'torch.log', (['(mvg_var + 1e-05)'], {}), '(mvg_var + 1e-05)\n', (5912, 5929), False, 'import torch\n'), ((6211, 6251), 'torch.optim.RMSprop', 'torch.optim.RMSprop', (['[logvar_B]'], {'lr': '(0.01)'}), '([logvar_B], lr=0.01)\n', (6230, 6251), False, 'import torch\n'), ((8512, 8541), 'os.path.dirname', 'os.path.dirname', (['opt.chk_path'], {}), '(opt.chk_path)\n', (8527, 8541), False, 'import os\n'), ((8557, 8590), 'os.path.join', 'os.path.join', (['expr_dir', '"""opt.pkl"""'], {}), "(expr_dir, 'opt.pkl')\n", (8569, 8590), False, 'import os\n'), ((8800, 8821), 'random.seed', 'random.seed', (['opt.seed'], {}), '(opt.seed)\n', (8811, 8821), False, 'import random\n'), ((8826, 8850), 'numpy.random.seed', 'np.random.seed', (['opt.seed'], {}), '(opt.seed)\n', (8840, 8850), True, 'import numpy as np\n'), ((8855, 8882), 'torch.manual_seed', 'torch.manual_seed', (['opt.seed'], {}), '(opt.seed)\n', (8872, 8882), False, 'import torch\n'), ((8887, 8923), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['opt.seed'], {}), '(opt.seed)\n', (8913, 8923), False, 'import torch\n'), ((8988, 9027), 'os.path.join', 'os.path.join', (['opt.expr_dir', 'opt.res_dir'], {}), '(opt.expr_dir, opt.res_dir)\n', (9000, 9027), False, 'import os\n'), ((9206, 9249), 'edges2shoes_data.load_edges2shoes', 'load_edges2shoes', (['opt.dataroot', 'opt.imgSize'], {}), '(opt.dataroot, opt.imgSize)\n', (9222, 9249), False, 'from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator\n'), ((9370, 9419), 'edges2shoes_data.UnalignedIterator', 'UnalignedIterator', (['trainA', 'trainB'], {'batch_size': '(200)'}), '(trainA, trainB, batch_size=200)\n', (9387, 9419), False, 'from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator\n'), ((9516, 9561), 'edges2shoes_data.AlignedIterator', 'AlignedIterator', (['testA', 'testB'], {'batch_size': '(200)'}), '(testA, testB, batch_size=200)\n', (9531, 9561), False, 'from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator\n'), ((9632, 9675), 'edges2shoes_data.AlignedIterator', 'AlignedIterator', (['devA', 'devB'], {'batch_size': '(200)'}), '(devA, devB, batch_size=200)\n', (9647, 9675), False, 'from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator\n'), ((1461, 1476), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1474, 1476), False, 'import torch\n'), ((2350, 2365), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2363, 2365), False, 'import torch\n'), ((4511, 4531), 'torch.autograd.Variable', 'Variable', (["batch['B']"], {}), "(batch['B'])\n", (4519, 4531), False, 'from torch.autograd import Variable\n'), ((4722, 4742), 'torch.autograd.Variable', 'Variable', (["batch['B']"], {}), "(batch['B'])\n", (4730, 4742), False, 'from torch.autograd import Variable\n'), ((5000, 5020), 'torch.autograd.Variable', 'Variable', (["batch['B']"], {}), "(batch['B'])\n", (5008, 5020), False, 'from torch.autograd import Variable\n'), ((8360, 8377), 'numpy.mean', 'np.mean', (['train_kl'], {}), '(train_kl)\n', (8367, 8377), True, 'import numpy as np\n'), ((9066, 9090), 'os.path.exists', 'os.path.exists', (['res_path'], {}), '(res_path)\n', (9080, 9090), False, 'import os\n'), ((9100, 9121), 'os.makedirs', 'os.makedirs', (['res_path'], {}), '(res_path)\n', (9111, 9121), False, 'import os\n'), ((9801, 9833), 'model.StochCycleGAN', 'StochCycleGAN', (['opt'], {'testing': '(True)'}), '(opt, testing=True)\n', (9814, 9833), False, 'from model import StochCycleGAN, AugmentedCycleGAN\n'), ((10520, 10531), 'time.time', 'time.time', ([], {}), '()\n', (10529, 10531), False, 'import time\n'), ((10577, 10729), 'evaluate.eval_ubo_B', 'eval_ubo_B', (['test_dataset', 'model', '(500)'], {'visualize': '(True)', 'vis_name': '"""test_pred_B"""', 'vis_path': 'opt.res_dir', 'logvar_B': 'logvar_B', 'verbose': '(True)', 'compute_l1': '(True)'}), "(test_dataset, model, 500, visualize=True, vis_name='test_pred_B',\n vis_path=opt.res_dir, logvar_B=logvar_B, verbose=True, compute_l1=True)\n", (10587, 10729), False, 'from evaluate import eval_mse_A, eval_ubo_B\n'), ((657, 681), 'torch.cat', 'torch.cat', (['images'], {'dim': '(1)'}), '(images, dim=1)\n', (666, 681), False, 'import torch\n'), ((1117, 1141), 'torch.cat', 'torch.cat', (['images'], {'dim': '(1)'}), '(images, dim=1)\n', (1126, 1141), False, 'import torch\n'), ((4067, 4082), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4080, 4082), False, 'import torch\n'), ((4105, 4121), 'torch.autograd.Variable', 'Variable', (['data_B'], {}), '(data_B)\n', (4113, 4121), False, 'from torch.autograd import Variable\n'), ((5186, 5223), 'model.log_prob_gaussian', 'log_prob_gaussian', (['real_B', 'mu', 'logvar'], {}), '(real_B, mu, logvar)\n', (5203, 5223), False, 'from model import log_prob_gaussian, gauss_reparametrize, log_prob_laplace, kld_std_guss\n'), ((6335, 6355), 'torch.autograd.Variable', 'Variable', (["batch['B']"], {}), "(batch['B'])\n", (6343, 6355), False, 'from torch.autograd import Variable\n'), ((7076, 7115), 'model.gauss_reparametrize', 'gauss_reparametrize', (['enc_mu', 'enc_logvar'], {}), '(enc_mu, enc_logvar)\n', (7095, 7115), False, 'from model import log_prob_gaussian, gauss_reparametrize, log_prob_laplace, kld_std_guss\n'), ((7244, 7286), 'model.log_prob_laplace', 'log_prob_laplace', (['real_B', 'fake_B', 'logvar_B'], {}), '(real_B, fake_B, logvar_B)\n', (7260, 7286), False, 'from model import log_prob_gaussian, gauss_reparametrize, log_prob_laplace, kld_std_guss\n'), ((7362, 7394), 'model.kld_std_guss', 'kld_std_guss', (['enc_mu', 'enc_logvar'], {}), '(enc_mu, enc_logvar)\n', (7374, 7394), False, 'from model import log_prob_gaussian, gauss_reparametrize, log_prob_laplace, kld_std_guss\n'), ((8006, 8026), 'torch.autograd.Variable', 'Variable', (["batch['A']"], {}), "(batch['A'])\n", (8014, 8026), False, 'from torch.autograd import Variable\n'), ((8028, 8048), 'torch.autograd.Variable', 'Variable', (["batch['B']"], {}), "(batch['B'])\n", (8036, 8048), False, 'from torch.autograd import Variable\n'), ((8409, 8422), 'options.TestOptions', 'TestOptions', ([], {}), '()\n', (8420, 8422), False, 'from options import TestOptions\n'), ((9885, 9936), 'model.StochCycleGAN', 'StochCycleGAN', (['opt'], {'ignore_noise': '(True)', 'testing': '(True)'}), '(opt, ignore_noise=True, testing=True)\n', (9898, 9936), False, 'from model import StochCycleGAN, AugmentedCycleGAN\n'), ((11134, 11164), 'evaluate.eval_mse_A', 'eval_mse_A', (['dev_dataset', 'model'], {}), '(dev_dataset, model)\n', (11144, 11164), False, 'from evaluate import eval_mse_A, eval_ubo_B\n'), ((11186, 11217), 'evaluate.eval_mse_A', 'eval_mse_A', (['test_dataset', 'model'], {}), '(test_dataset, model)\n', (11196, 11217), False, 'from evaluate import eval_mse_A, eval_ubo_B\n'), ((13262, 13273), 'cPickle.load', 'pkl.load', (['f'], {}), '(f)\n', (13270, 13273), True, 'import cPickle as pkl\n'), ((5288, 5303), 'math.log', 'math.log', (['(127.5)'], {}), '(127.5)\n', (5296, 5303), False, 'import math\n'), ((6938, 6962), 'torch.autograd.Variable', 'Variable', (['params[0].data'], {}), '(params[0].data)\n', (6946, 6962), False, 'from torch.autograd import Variable\n'), ((9992, 10028), 'model.AugmentedCycleGAN', 'AugmentedCycleGAN', (['opt'], {'testing': '(True)'}), '(opt, testing=True)\n', (10009, 10028), False, 'from model import StochCycleGAN, AugmentedCycleGAN\n'), ((11396, 11441), 'edges2shoes_data.AlignedIterator', 'AlignedIterator', (['devA', 'devB'], {'batch_size': 'n_vis'}), '(devA, devB, batch_size=n_vis)\n', (11411, 11441), False, 'from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator\n'), ((3562, 3602), 'torch.ones', 'torch.ones', (['(1)', 'size[1]', 'size[2]', 'size[3]'], {}), '(1, size[1], size[2], size[3])\n', (3572, 3602), False, 'import torch\n'), ((5354, 5365), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (5362, 5365), False, 'import math\n'), ((6147, 6161), 'math.log', 'math.log', (['(0.01)'], {}), '(0.01)\n', (6155, 6161), False, 'import math\n'), ((7033, 7057), 'torch.autograd.Variable', 'Variable', (['params[1].data'], {}), '(params[1].data)\n', (7041, 7057), False, 'from torch.autograd import Variable\n'), ((7445, 7460), 'math.log', 'math.log', (['(127.5)'], {}), '(127.5)\n', (7453, 7460), False, 'import math\n'), ((7599, 7612), 'math.log', 'math.log', (['(2.0)'], {}), '(2.0)\n', (7607, 7612), False, 'import math\n'), ((6115, 6140), 'torch.zeros', 'torch.zeros', (['(1)', '(3)', '(64)', '(64)'], {}), '(1, 3, 64, 64)\n', (6126, 6140), False, 'import torch\n'), ((6609, 6648), 'torch.zeros', 'torch.zeros', (['size[0]', 'model.opt.nlatent'], {}), '(size[0], model.opt.nlatent)\n', (6620, 6648), False, 'import torch\n'), ((11067, 11078), 'time.time', 'time.time', ([], {}), '()\n', (11076, 11078), False, 'import time\n'), ((11510, 11525), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11523, 11525), False, 'import torch\n'), ((6737, 6751), 'math.log', 'math.log', (['(0.01)'], {}), '(0.01)\n', (6745, 6751), False, 'import math\n'), ((8257, 8283), 'model.kld_std_guss', 'kld_std_guss', (['mu', '(0.0 * mu)'], {}), '(mu, 0.0 * mu)\n', (8269, 8283), False, 'from model import log_prob_gaussian, gauss_reparametrize, log_prob_laplace, kld_std_guss\n'), ((11560, 11583), 'torch.autograd.Variable', 'Variable', (["vis_data['A']"], {}), "(vis_data['A'])\n", (11568, 11583), False, 'from torch.autograd import Variable\n'), ((11585, 11608), 'torch.autograd.Variable', 'Variable', (["vis_data['B']"], {}), "(vis_data['B'])\n", (11593, 11608), False, 'from torch.autograd import Variable\n'), ((6691, 6730), 'torch.zeros', 'torch.zeros', (['size[0]', 'model.opt.nlatent'], {}), '(size[0], model.opt.nlatent)\n', (6702, 6730), False, 'import torch\n'), ((4245, 4270), 'torch.abs', 'torch.abs', (['(real_B - rec_B)'], {}), '(real_B - rec_B)\n', (4254, 4270), False, 'import torch\n')]
|
from __future__ import print_function
# from network import VGGNet
from dirtorch.utils import common
import dirtorch.nets as nets
import pandas as pd
import faiss
import torch
import torchvision.transforms as transforms
import torch.nn as nn
from six.moves import cPickle
import numpy as np
import imageio
import os
import time
from PIL import Image
from evaluate import evaluate_class
from DB import Database
def load_model(path, iscuda):
checkpoint = common.load_checkpoint(path, iscuda)
net = nets.create_model(pretrained="", **checkpoint['model_options'])
net = common.switch_model_to_cuda(net, iscuda, checkpoint)
net.load_state_dict(checkpoint['state_dict'])
net.preprocess = checkpoint.get('preprocess', net.preprocess)
# if 'pca' in checkpoint:
# net.pca = checkpoint.get('pca')
return net
# use_gpu = torch.cuda.is_available()
# torch.cuda.set_device(2)
use_gpu = False
# cache dir
cache_dir = '..\\cache'
Odic_addr = 'res101_AP_GeM-oxf-dict'
Ovec_addr = 'res101_AP_GeM-oxf-vec'
Oindex_addr = 'res101_AP_GeM-oxf-indexIPQ'
Ddic_addr = 'res101_AP_GeM-database-dict'
Dvec_addr = 'res101_AP_GeM-database-vec'
Dindex_addr = 'res101_AP_GeM-database-indexIPQ'
depth = 10
isOxford = True
# LOAD_MODEL_PATH = None
# LOAD_MODEL_PATH = '../model/imagenet-caffe-vgg16-features-d369c8e.pth'
# LOAD_MODEL_PATH = '../model/imagenet-caffe-resnet101-features-10a101d.pth'
# LOAD_WHITEN_PATH = '../model/retrieval-SfM-120k-resnet101-gem-whiten-22ab0c1.pth'
CHECKPOINT = "../model/Resnet-101-AP-GeM.pt"
IMAGE_NORMALIZER = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
REMOVE_FC = False
class ModelFeat(object):
@staticmethod
def make_samples(db, mode, is_Oxford=isOxford, verbose=True):
if is_Oxford:
dic_addr = Odic_addr
vec_addr = Odic_addr
index_addr = Oindex_addr
else:
dic_addr = Ddic_addr
vec_addr = Ddic_addr
index_addr = Dindex_addr
try:
dicbase = cPickle.load(open(os.path.join(cache_dir, dic_addr), "rb", True))
# print(dicbase)
vecbase = cPickle.load(open(os.path.join(cache_dir, vec_addr), "rb", True))
index = faiss.read_index(os.path.join(cache_dir, index_addr))
if verbose:
print("Using cache..., config=%s, depth=%s" % (vec_addr, depth))
except:
if verbose:
print("Counting histogram..., config=%s, depth=%s" % (vec_addr, depth))
# base_model = VGGNet(load_features_path=LOAD_MODEL_PATH, requires_grad=False)
# base_model = Res101(load_features_path=LOAD_MODEL_PATH,
# use_Gem_whiten=True, load_whiten_path=LOAD_WHITEN_PATH)
# base_model =
base_model = load_model(CHECKPOINT, False)
base_model.eval()
print("load successfully!")
if REMOVE_FC:
base_model = nn.Sequential(*list(base_model.children())[:-1])
print("Remove FC")
if use_gpu:
base_model = base_model.cuda()
vecbase = []
dicbase = []
data = db.get_data()
count = 1
for d in data.itertuples():
# if count == 5:
# break
d_img, d_cls = getattr(d, "img"), getattr(d, "cls")
img = imageio.imread(d_img, pilmode="RGB")
img = Image.fromarray(img)
img = IMAGE_NORMALIZER(img)
img = np.array(img)
img = np.expand_dims(img, axis=0)
if use_gpu:
inputs = torch.autograd.Variable(torch.from_numpy(img).cuda().float())
else:
inputs = torch.from_numpy(img)
d_hist = base_model(inputs).view(-1, )
d_hist = d_hist.data.cpu().numpy()
vecbase.append(d_hist)
dicbase.append((d_cls, d_img))
print(count)
count += 1
vecbase = np.array(vecbase).astype('float32')
print(vecbase.shape)
d = vecbase.shape[1]
dicbase = pd.DataFrame(dicbase, columns=['cls', 'img'])
if mode == 'Linear':
index = faiss.IndexFlatL2(d)
index.add(vecbase)
elif mode == 'IVFPQ':
n_list = 100
n_bits = 8
coarse_quantizer = faiss.IndexFlatL2(d)
index = faiss.IndexIVFPQ(coarse_quantizer, d, n_list, 8, n_bits)
index.nprobe = 10
index.train(vecbase)
index.add(vecbase)
else:
raise ValueError("you should choose a correct retrival mode")
cPickle.dump(dicbase, open(os.path.join(cache_dir, dic_addr), "wb", True))
cPickle.dump(vecbase, open(os.path.join(cache_dir, vec_addr), "wb", True))
faiss.write_index(index, os.path.join(cache_dir, index_addr))
return index, dicbase, vecbase
if __name__ == "__main__":
# evaluate database
db = Database()
start = time.time()
APs = evaluate_class(db, f_class=ModelFeat, depth=depth)
end = time.time()
# cls_MAPs = []
# with open(os.path.join(result_dir, result_csv), 'w', encoding='UTF-8') as f:
# f.write("Vgg16-oxf-cosine result: MAP&MMAP")
# for cls, cls_APs in APs.items():
# MAP = np.mean(cls_APs)
# print("Class {}, MAP {}".format(cls, MAP))
# f.write("\nClass {}, MAP {}".format(cls, MAP))
# cls_MAPs.append(MAP)
# print("MMAP", np.mean(cls_MAPs))
# f.write("\nMMAP {}".format(np.mean(cls_MAPs)))
# print("total time:", end - start)
# f.write("\ntotal time:{0:.4f}s".format(end - start))
|
[
"evaluate.evaluate_class"
] |
[((487, 523), 'dirtorch.utils.common.load_checkpoint', 'common.load_checkpoint', (['path', 'iscuda'], {}), '(path, iscuda)\n', (509, 523), False, 'from dirtorch.utils import common\n'), ((535, 598), 'dirtorch.nets.create_model', 'nets.create_model', ([], {'pretrained': '""""""'}), "(pretrained='', **checkpoint['model_options'])\n", (552, 598), True, 'import dirtorch.nets as nets\n'), ((610, 662), 'dirtorch.utils.common.switch_model_to_cuda', 'common.switch_model_to_cuda', (['net', 'iscuda', 'checkpoint'], {}), '(net, iscuda, checkpoint)\n', (637, 662), False, 'from dirtorch.utils import common\n'), ((5430, 5440), 'DB.Database', 'Database', ([], {}), '()\n', (5438, 5440), False, 'from DB import Database\n'), ((5454, 5465), 'time.time', 'time.time', ([], {}), '()\n', (5463, 5465), False, 'import time\n'), ((5477, 5527), 'evaluate.evaluate_class', 'evaluate_class', (['db'], {'f_class': 'ModelFeat', 'depth': 'depth'}), '(db, f_class=ModelFeat, depth=depth)\n', (5491, 5527), False, 'from evaluate import evaluate_class\n'), ((5539, 5550), 'time.time', 'time.time', ([], {}), '()\n', (5548, 5550), False, 'import time\n'), ((1638, 1659), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1657, 1659), True, 'import torchvision.transforms as transforms\n'), ((1701, 1776), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1721, 1776), True, 'import torchvision.transforms as transforms\n'), ((2426, 2461), 'os.path.join', 'os.path.join', (['cache_dir', 'index_addr'], {}), '(cache_dir, index_addr)\n', (2438, 2461), False, 'import os\n'), ((4469, 4514), 'pandas.DataFrame', 'pd.DataFrame', (['dicbase'], {'columns': "['cls', 'img']"}), "(dicbase, columns=['cls', 'img'])\n", (4481, 4514), True, 'import pandas as pd\n'), ((2221, 2254), 'os.path.join', 'os.path.join', (['cache_dir', 'dic_addr'], {}), '(cache_dir, dic_addr)\n', (2233, 2254), False, 'import os\n'), ((2340, 2373), 'os.path.join', 'os.path.join', (['cache_dir', 'vec_addr'], {}), '(cache_dir, vec_addr)\n', (2352, 2373), False, 'import os\n'), ((3641, 3677), 'imageio.imread', 'imageio.imread', (['d_img'], {'pilmode': '"""RGB"""'}), "(d_img, pilmode='RGB')\n", (3655, 3677), False, 'import imageio\n'), ((3703, 3723), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (3718, 3723), False, 'from PIL import Image\n'), ((3792, 3805), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3800, 3805), True, 'import numpy as np\n'), ((3831, 3858), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (3845, 3858), True, 'import numpy as np\n'), ((4574, 4594), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['d'], {}), '(d)\n', (4591, 4594), False, 'import faiss\n'), ((5284, 5319), 'os.path.join', 'os.path.join', (['cache_dir', 'index_addr'], {}), '(cache_dir, index_addr)\n', (5296, 5319), False, 'import os\n'), ((4035, 4056), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (4051, 4056), False, 'import torch\n'), ((4342, 4359), 'numpy.array', 'np.array', (['vecbase'], {}), '(vecbase)\n', (4350, 4359), True, 'import numpy as np\n'), ((4760, 4780), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['d'], {}), '(d)\n', (4777, 4780), False, 'import faiss\n'), ((4806, 4862), 'faiss.IndexIVFPQ', 'faiss.IndexIVFPQ', (['coarse_quantizer', 'd', 'n_list', '(8)', 'n_bits'], {}), '(coarse_quantizer, d, n_list, 8, n_bits)\n', (4822, 4862), False, 'import faiss\n'), ((5110, 5143), 'os.path.join', 'os.path.join', (['cache_dir', 'dic_addr'], {}), '(cache_dir, dic_addr)\n', (5122, 5143), False, 'import os\n'), ((5198, 5231), 'os.path.join', 'os.path.join', (['cache_dir', 'vec_addr'], {}), '(cache_dir, vec_addr)\n', (5210, 5231), False, 'import os\n'), ((3944, 3965), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (3960, 3965), False, 'import torch\n')]
|
from config import Arguments as args
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.train_visible_devices
import sys, random
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from config import Arguments as args
from model import VQVC
from evaluate import evaluate
from dataset import SpeechDataset #, collate_fn
from utils.scheduler import WarmupScheduler
from utils.checkpoint import load_checkpoint, save_checkpoint
from utils.writer import Writer
from utils.vocoder import get_vocgan
from tqdm import tqdm
def train(train_data_loader, eval_data_loader, model, reconstruction_loss, vocoder, mel_stat, optimizer, scheduler, global_step, writer=None, DEVICE=None):
model.train()
while global_step < args.max_training_step:
for step, (mels, _) in tqdm(enumerate(train_data_loader), total=len(train_data_loader), unit='B', ncols=70, leave=False):
mels = mels.float().to(DEVICE)
optimizer.zero_grad()
mels_hat, commitment_loss, perplexity = model(mels.detach())
commitment_loss = args.commitment_cost * commitment_loss
recon_loss = reconstruction_loss(mels_hat, mels)
loss = commitment_loss + recon_loss
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_thresh)
optimizer.step()
if global_step % args.save_checkpoint_step == 0:
save_checkpoint(checkpoint_path=args.model_checkpoint_path, model=model, optimizer=optimizer, scheduler=scheduler, global_step=global_step)
if global_step % args.eval_step == 0:
evaluate(model=model, vocoder=vocoder, eval_data_loader=eval_data_loader, criterion=reconstruction_loss, mel_stat=mel_stat, global_step=global_step, writer=writer, DEVICE=DEVICE)
model.train()
if args.log_tensorboard:
writer.add_scalars(mode="train_recon_loss", global_step=global_step, loss=recon_loss)
writer.add_scalars(mode="train_commitment_loss", global_step=global_step, loss=commitment_loss)
writer.add_scalars(mode="train_perplexity", global_step=global_step, loss=perplexity)
writer.add_scalars(mode="train_total_loss", global_step=global_step, loss=loss)
global_step += 1
scheduler.step()
def main(DEVICE):
# define model, optimizer, scheduler
model = VQVC().to(DEVICE)
recon_loss = nn.L1Loss().to(DEVICE)
vocoder = get_vocgan(ckpt_path=args.vocoder_pretrained_model_path).to(DEVICE)
mel_stat = torch.tensor(np.load(args.mel_stat_path)).to(DEVICE)
optimizer = Adam(model.parameters(), lr=args.init_lr)
scheduler = WarmupScheduler( optimizer, warmup_epochs=args.warmup_steps,
initial_lr=args.init_lr, max_lr=args.max_lr,
milestones=args.milestones, gamma=args.gamma)
global_step = load_checkpoint(checkpoint_path=args.model_checkpoint_path, model=model, optimizer=optimizer, scheduler=scheduler)
# load dataset & dataloader
train_dataset = SpeechDataset(mem_mode=args.mem_mode, meta_dir=args.prepro_meta_train, dataset_name = args.dataset_name, mel_stat_path=args.mel_stat_path, max_frame_length=args.max_frame_length)
eval_dataset = SpeechDataset(mem_mode=args.mem_mode, meta_dir=args.prepro_meta_eval, dataset_name=args.dataset_name, mel_stat_path=args.mel_stat_path, max_frame_length=args.max_frame_length)
train_data_loader = DataLoader(dataset=train_dataset, batch_size=args.train_batch_size, shuffle=True, drop_last=True, pin_memory=True, num_workers=args.n_workers)
eval_data_loader = DataLoader(dataset=eval_dataset, batch_size=args.train_batch_size, shuffle=False, pin_memory=True, drop_last=True)
# tensorboard
writer = Writer(args.model_log_path) if args.log_tensorboard else None
# train the model!
train(train_data_loader, eval_data_loader, model, recon_loss, vocoder, mel_stat, optimizer, scheduler, global_step, writer, DEVICE)
if __name__ == "__main__":
print("[LOG] Start training...")
DEVICE = torch.device("cuda" if (torch.cuda.is_available() and args.use_cuda) else "cpu")
seed = args.seed
print("[Training environment]")
print("\t\trandom_seed: ", seed)
print("\t\tuse_cuda: ", args.use_cuda)
print("\t\t{} threads are used...".format(torch.get_num_threads()))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
main(DEVICE)
|
[
"evaluate.evaluate"
] |
[((2638, 2793), 'utils.scheduler.WarmupScheduler', 'WarmupScheduler', (['optimizer'], {'warmup_epochs': 'args.warmup_steps', 'initial_lr': 'args.init_lr', 'max_lr': 'args.max_lr', 'milestones': 'args.milestones', 'gamma': 'args.gamma'}), '(optimizer, warmup_epochs=args.warmup_steps, initial_lr=args\n .init_lr, max_lr=args.max_lr, milestones=args.milestones, gamma=args.gamma)\n', (2653, 2793), False, 'from utils.scheduler import WarmupScheduler\n'), ((2821, 2939), 'utils.checkpoint.load_checkpoint', 'load_checkpoint', ([], {'checkpoint_path': 'args.model_checkpoint_path', 'model': 'model', 'optimizer': 'optimizer', 'scheduler': 'scheduler'}), '(checkpoint_path=args.model_checkpoint_path, model=model,\n optimizer=optimizer, scheduler=scheduler)\n', (2836, 2939), False, 'from utils.checkpoint import load_checkpoint, save_checkpoint\n'), ((2983, 3167), 'dataset.SpeechDataset', 'SpeechDataset', ([], {'mem_mode': 'args.mem_mode', 'meta_dir': 'args.prepro_meta_train', 'dataset_name': 'args.dataset_name', 'mel_stat_path': 'args.mel_stat_path', 'max_frame_length': 'args.max_frame_length'}), '(mem_mode=args.mem_mode, meta_dir=args.prepro_meta_train,\n dataset_name=args.dataset_name, mel_stat_path=args.mel_stat_path,\n max_frame_length=args.max_frame_length)\n', (2996, 3167), False, 'from dataset import SpeechDataset\n'), ((3178, 3361), 'dataset.SpeechDataset', 'SpeechDataset', ([], {'mem_mode': 'args.mem_mode', 'meta_dir': 'args.prepro_meta_eval', 'dataset_name': 'args.dataset_name', 'mel_stat_path': 'args.mel_stat_path', 'max_frame_length': 'args.max_frame_length'}), '(mem_mode=args.mem_mode, meta_dir=args.prepro_meta_eval,\n dataset_name=args.dataset_name, mel_stat_path=args.mel_stat_path,\n max_frame_length=args.max_frame_length)\n', (3191, 3361), False, 'from dataset import SpeechDataset\n'), ((3376, 3523), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'args.train_batch_size', 'shuffle': '(True)', 'drop_last': '(True)', 'pin_memory': '(True)', 'num_workers': 'args.n_workers'}), '(dataset=train_dataset, batch_size=args.train_batch_size, shuffle\n =True, drop_last=True, pin_memory=True, num_workers=args.n_workers)\n', (3386, 3523), False, 'from torch.utils.data import DataLoader\n'), ((3539, 3658), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'eval_dataset', 'batch_size': 'args.train_batch_size', 'shuffle': '(False)', 'pin_memory': '(True)', 'drop_last': '(True)'}), '(dataset=eval_dataset, batch_size=args.train_batch_size, shuffle=\n False, pin_memory=True, drop_last=True)\n', (3549, 3658), False, 'from torch.utils.data import DataLoader\n'), ((4252, 4269), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (4263, 4269), False, 'import sys, random\n'), ((4271, 4291), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4285, 4291), True, 'import numpy as np\n'), ((4293, 4316), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (4310, 4316), False, 'import torch\n'), ((3680, 3707), 'utils.writer.Writer', 'Writer', (['args.model_log_path'], {}), '(args.model_log_path)\n', (3686, 3707), False, 'from utils.writer import Writer\n'), ((2368, 2374), 'model.VQVC', 'VQVC', ([], {}), '()\n', (2372, 2374), False, 'from model import VQVC\n'), ((2401, 2412), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (2410, 2412), True, 'import torch.nn as nn\n'), ((2435, 2491), 'utils.vocoder.get_vocgan', 'get_vocgan', ([], {'ckpt_path': 'args.vocoder_pretrained_model_path'}), '(ckpt_path=args.vocoder_pretrained_model_path)\n', (2445, 2491), False, 'from utils.vocoder import get_vocgan\n'), ((4223, 4246), 'torch.get_num_threads', 'torch.get_num_threads', ([], {}), '()\n', (4244, 4246), False, 'import torch\n'), ((1484, 1627), 'utils.checkpoint.save_checkpoint', 'save_checkpoint', ([], {'checkpoint_path': 'args.model_checkpoint_path', 'model': 'model', 'optimizer': 'optimizer', 'scheduler': 'scheduler', 'global_step': 'global_step'}), '(checkpoint_path=args.model_checkpoint_path, model=model,\n optimizer=optimizer, scheduler=scheduler, global_step=global_step)\n', (1499, 1627), False, 'from utils.checkpoint import load_checkpoint, save_checkpoint\n'), ((1670, 1857), 'evaluate.evaluate', 'evaluate', ([], {'model': 'model', 'vocoder': 'vocoder', 'eval_data_loader': 'eval_data_loader', 'criterion': 'reconstruction_loss', 'mel_stat': 'mel_stat', 'global_step': 'global_step', 'writer': 'writer', 'DEVICE': 'DEVICE'}), '(model=model, vocoder=vocoder, eval_data_loader=eval_data_loader,\n criterion=reconstruction_loss, mel_stat=mel_stat, global_step=\n global_step, writer=writer, DEVICE=DEVICE)\n', (1678, 1857), False, 'from evaluate import evaluate\n'), ((2529, 2556), 'numpy.load', 'np.load', (['args.mel_stat_path'], {}), '(args.mel_stat_path)\n', (2536, 2556), True, 'import numpy as np\n'), ((3996, 4021), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4019, 4021), False, 'import torch\n')]
|
from evaluate import evaluate
from os_utils import OSUtils
from save_slot_checkpoint import save_slot
from torch import nn
from tqdm import tqdm
def train(model, train_dl, val_dl, args, optimizer, scheduler=None):
print(f"training on {len(train_dl.dataset)}, validating on {len(val_dl.dataset)} examples.")
criterion = nn.CrossEntropyLoss()
loss_tr = []
acc_tr = []
loss_val = []
acc_val = []
pbar = tqdm(range(1, args.num_epochs + 1), total=args.num_epochs, leave=False)
for i in pbar:
tr_loss = 0
tr_acc = 0
for j, (inputs, targets) in enumerate(train_dl):
model.train()
inputs, targets = inputs.to(args.device), targets.to(args.device)
predictions = model(inputs)
loss = criterion(predictions, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
tr_loss += loss.item()
tr_acc += targets.eq(predictions.argmax(dim=-1)).sum().item()
tr_acc, tr_loss = tr_acc / len(train_dl.dataset), tr_loss / len(train_dl)
val_loss, val_acc = evaluate(model, val_dl, criterion, args.device)
if scheduler is not None:
scheduler.step()
acc_tr.append(tr_acc)
loss_tr.append(tr_loss)
loss_val.append(val_loss)
acc_val.append(val_acc)
pbar.set_description(f"[{i}/{args.num_epochs}]")
pbar.set_postfix({"tr_acc": f"{tr_acc:.2%}", "val_acc": f"{val_acc:.2%}"}, refresh=False)
metrics = {
"tr_loss": loss_tr,
"tr_acc": acc_tr,
"val_acc": acc_val,
"val_loss": loss_val
}
OSUtils.save_torch_object(metrics, f"{args.path}", f"metrics_{args.k}.pt")
if i % args.check_point_every == 0:
if args.method == "learned":
OSUtils.save_torch_object(model.state_dict(),
f"{args.path}/checks",
f"subnet_{args.k}_{i}.pt")
else:
OSUtils.save_torch_object(save_slot(model, random=False),
f"{args.path}/checks",
f"subnet_{args.k}_{i}.pt"
)
print(f"Final results tr_acc {acc_tr[-1]:.2%} val_acc {acc_val[-1]:.2%}")
|
[
"evaluate.evaluate"
] |
[((331, 352), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (350, 352), False, 'from torch import nn\n'), ((1128, 1175), 'evaluate.evaluate', 'evaluate', (['model', 'val_dl', 'criterion', 'args.device'], {}), '(model, val_dl, criterion, args.device)\n', (1136, 1175), False, 'from evaluate import evaluate\n'), ((1692, 1766), 'os_utils.OSUtils.save_torch_object', 'OSUtils.save_torch_object', (['metrics', 'f"""{args.path}"""', 'f"""metrics_{args.k}.pt"""'], {}), "(metrics, f'{args.path}', f'metrics_{args.k}.pt')\n", (1717, 1766), False, 'from os_utils import OSUtils\n'), ((2109, 2139), 'save_slot_checkpoint.save_slot', 'save_slot', (['model'], {'random': '(False)'}), '(model, random=False)\n', (2118, 2139), False, 'from save_slot_checkpoint import save_slot\n')]
|
import os
import json
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from evaluate import evaluate
if __name__ == '__main__':
training_commands, predict_commands = [], []
seq2vec_name_to_last_layer = {"dan": 4, "gru": 4}
probing_accuracies = {}
for seq2vec_name, layer in seq2vec_name_to_last_layer.items():
# Check if Base Models have been trained first.
serialization_dir = os.path.join("serialization_dirs", f"main_{seq2vec_name}_5k_with_emb")
model_files_present = all([os.path.exists(os.path.join(serialization_dir, file_name))
for file_name in ["model.ckpt.index", "config.json", "vocab.txt"]])
epochs = 8 if seq2vec_name == "dan" else 4 # gru is slow, use only 4 epochs
if not model_files_present:
print("\nYour base model hasn't been trained yet.")
print("Please train it first with the following command:")
training_command = (f"python train.py main "
f"data/imdb_sentiment_train_5k.jsonl "
f"data/imdb_sentiment_dev.jsonl "
f"--seq2vec-choice {seq2vec_name} "
f"--embedding-dim 50 "
f"--num-layers 4 "
f"--num-epochs {epochs} "
f"--suffix-name _{seq2vec_name}_5k_with_emb "
f"--pretrained-embedding-file data/glove.6B.50d.txt ")
print(training_command)
exit()
serialization_dir = os.path.join("serialization_dirs", f"probing_bigram_order_{seq2vec_name}_with_emb_on_5k_at_layer_{layer}")
model_files_present = all([os.path.exists(os.path.join(serialization_dir, file_name))
for file_name in ["model.ckpt.index", "config.json", "vocab.txt"]])
predictions_file = (f"serialization_dirs/probing_bigram_order_{seq2vec_name}_with_emb_on_5k_at_layer_{layer}/"
f"predictions_bigram_order_test.txt")
predictions_present = os.path.exists(predictions_file)
if not model_files_present:
training_command = (f"python train.py probing "
f"data/bigram_order_train.jsonl "
f"data/bigram_order_dev.jsonl "
f"--base-model-dir serialization_dirs/main_{seq2vec_name}_5k_with_emb "
f"--layer-num {layer} "
f"--num-epochs {epochs} "
f"--suffix-name _bigram_order_{seq2vec_name}_with_emb_on_5k_at_layer_{layer}")
training_commands.append(training_command)
continue
if not os.path.exists(predictions_file):
predict_command = (f"python predict.py "
f"serialization_dirs/probing_bigram_order_{seq2vec_name}_with_emb_on_5k_at_layer_{layer} "
f"data/bigram_order_test.jsonl "
f"--predictions-file serialization_dirs/probing_bigram_order_{seq2vec_name}_with_emb_on_5k_at_layer_{layer}/"
f"predictions_bigram_order_test.txt")
predict_commands.append(predict_command)
continue
accuracy = evaluate("data/bigram_order_test.jsonl", predictions_file)
probing_accuracies[seq2vec_name] = accuracy
if training_commands:
print("\nPlease finish the missing model training using the following commands:")
print("\n".join(training_commands))
if predict_commands:
print("\nPlease finish the model predictions using the following commands:")
print("\n".join(predict_commands))
if training_commands or predict_commands:
print("\nCannot plot the results until all the files are present.")
exit()
# Make the plots
seq2vec_names = ["dan", "gru"]
plt.xticks(range(2), seq2vec_names)
plt.bar(range(2), [probing_accuracies["dan"], probing_accuracies["gru"]],
align='center', alpha=0.5)
plt.ylabel('Accuracy')
plt.title('BigramOrderTask: Probing Performance at Last Layer')
plt.savefig(os.path.join("plots", "probing_performance_on_bigram_order_task.png"))
|
[
"evaluate.evaluate"
] |
[((40, 63), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (54, 63), False, 'import matplotlib\n'), ((4198, 4220), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (4208, 4220), True, 'import matplotlib.pyplot as plt\n'), ((4225, 4288), 'matplotlib.pyplot.title', 'plt.title', (['"""BigramOrderTask: Probing Performance at Last Layer"""'], {}), "('BigramOrderTask: Probing Performance at Last Layer')\n", (4234, 4288), True, 'import matplotlib.pyplot as plt\n'), ((441, 511), 'os.path.join', 'os.path.join', (['"""serialization_dirs"""', 'f"""main_{seq2vec_name}_5k_with_emb"""'], {}), "('serialization_dirs', f'main_{seq2vec_name}_5k_with_emb')\n", (453, 511), False, 'import os\n'), ((1639, 1749), 'os.path.join', 'os.path.join', (['"""serialization_dirs"""', 'f"""probing_bigram_order_{seq2vec_name}_with_emb_on_5k_at_layer_{layer}"""'], {}), "('serialization_dirs',\n f'probing_bigram_order_{seq2vec_name}_with_emb_on_5k_at_layer_{layer}')\n", (1651, 1749), False, 'import os\n'), ((2158, 2190), 'os.path.exists', 'os.path.exists', (['predictions_file'], {}), '(predictions_file)\n', (2172, 2190), False, 'import os\n'), ((3416, 3474), 'evaluate.evaluate', 'evaluate', (['"""data/bigram_order_test.jsonl"""', 'predictions_file'], {}), "('data/bigram_order_test.jsonl', predictions_file)\n", (3424, 3474), False, 'from evaluate import evaluate\n'), ((4305, 4374), 'os.path.join', 'os.path.join', (['"""plots"""', '"""probing_performance_on_bigram_order_task.png"""'], {}), "('plots', 'probing_performance_on_bigram_order_task.png')\n", (4317, 4374), False, 'import os\n'), ((2839, 2871), 'os.path.exists', 'os.path.exists', (['predictions_file'], {}), '(predictions_file)\n', (2853, 2871), False, 'import os\n'), ((562, 604), 'os.path.join', 'os.path.join', (['serialization_dir', 'file_name'], {}), '(serialization_dir, file_name)\n', (574, 604), False, 'import os\n'), ((1796, 1838), 'os.path.join', 'os.path.join', (['serialization_dir', 'file_name'], {}), '(serialization_dir, file_name)\n', (1808, 1838), False, 'import os\n')]
|
"""
Baseline training script
"""
import os
import sys
import copy
import glob
import math
import json
import shutil
import argparse
import logging
from typing import Any, Dict, Optional
from types import SimpleNamespace
from itertools import cycle
from contextlib import ExitStack
from comet_ml import Experiment # must be before torch!
import torch
from apex import amp
from torch import nn
from tqdm import tqdm
from transformers import (
AdamW,
GPT2Config,
WEIGHTS_NAME,
get_linear_schedule_with_warmup,
)
from data.dataset import StoriumDataset
from data.utils import get_dataloader
from data.parallel import chunked_scattering
from evaluate import Evaluator
from experiment import initialize_experiment
from model import GPT2SegmentedModel
from utils import (
collect_tensors,
tqdm_wrap_stdout,
tqdm_unwrap_stdout,
refresh_cuda_memory,
release_cuda_memory,
)
import metrics
class Trainer:
"""
A class that encapsulates all the functionality needed to train a model
"""
def __init__(self, args: SimpleNamespace):
"""
Initialize the trainer
"""
self.args = args
self.step = 0
self.amp_initialized = False
self.dataset: StoriumDataset
self.modules: Dict[str, Any] = {}
self.experiment: Experiment
self._initialize()
self._initialize_metrics()
@property
def use_fp16(self):
"""
Whether to use fp16 training
"""
return torch.cuda.is_available() and self.args.optim.fp16
def try_init_amp(self):
"""
Due to the way NVIDIA's apex library works you can only call initialize
once. This leads to a chicken-and-egg problem, when trying to restore
a checkpoint to continue training.
That's why we track whether or not we called initialize, such that it
is safe to call this method multiple times (which can happen if we load
from a checkpoint that used automatic mixed precision training).
"""
if not self.amp_initialized and self.use_fp16:
model = self.modules["model"]
optimizer = self.modules["optimizer"]
model, optimizer = amp.initialize(
model.cuda(), optimizer, opt_level=self.args.optim.fp16_opt_level
)
self.modules["model"] = model
self.modules["optimizer"] = optimizer
self.amp_initialized = True
def _initialize_metrics(self):
"""
Initialize the metrics
"""
self.metric_store = metrics.MetricStore()
self.metric_store.add(
metrics.Metric("lr", "format_scientific", "g", max_history=1)
)
self.metric_store.add(
metrics.Metric("ppl", "format_dynamic_float", max_history=1000)
)
self.metric_store.add(
metrics.Metric("ntok", "format_int", "a", max_history=1000)
)
self.metric_store.add(metrics.Metric("oom", "format_int", "t"))
self.metric_store.add(metrics.Metric("nll", "format_float", max_history=1000))
self.experiment = initialize_experiment(
self.args, ("data", "model", "optim"), self.args.experiment_name
)
def _initialize(self):
"""
Load the dataset, model, etc
"""
cache_dir = self.args.cache_dir
model_name = self.args.model.model_name
logging.info("Loading dataset")
self.dataset = StoriumDataset("train", "gpt2", cache_dir=cache_dir)
self.dataset.load(self.args.data_dir)
# By default the config outputs "past", but that makes our chunked
# scattering (needed when batching based on tokens, rather than
# examples) fail since the huggingface/transformers package stacks the
# outputs on dim 0, which is normally the batch dimension. This leads
# to errors like:
#
# RuntimeError: Gather got an input of invalid size: got [2, 5, 12,
# 411, 64], but expected [2, 4, 12, 411, 64] (gather at
# /pytorch/torch/csrc/cuda/comm.cpp:226)
#
# During training we only care about the loss, so just disable all
# additional outputs.
config = GPT2Config.from_pretrained(model_name, cache_dir=cache_dir)
config.output_hidden_states = False
config.output_attentions = False
config.output_past = False
model = GPT2SegmentedModel.from_pretrained(
model_name, config=config, cache_dir=cache_dir
)
tokenizer = self.dataset.get_tokenizer()
model.resize_token_embeddings(len(tokenizer))
max_steps = self.args.optim.max_steps
optimizer = AdamW(model.parameters(), lr=self.args.optim.lr)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_training_steps=max_steps,
num_warmup_steps=self.args.optim.warmup_steps,
)
# Track the modules
self.modules["model"] = model
self.modules["optimizer"] = optimizer
self.modules["scheduler"] = scheduler
@property
def checkpoint_path(self):
"""
Return the current checkpoint path
"""
return os.path.join(self.args.output_dir, f"checkpoint-{self.step}")
def save(self):
"""
Save all the tracked modules
"""
# Save model checkpoint
checkpoint_path = self.checkpoint_path
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
logging.info("Saving model checkpoint to %s", checkpoint_path)
train_state: Dict[str, Any] = {"step": self.step}
if self.use_fp16:
# Need to save the automatic mixed precision state_dict
# See https://github.com/NVIDIA/apex#checkpointing
# But first ensure cuda memory is relatively contiguous because the
# call to `amp.state_dict()` seems to allocate cuda memory, which
# can fail if cuda memory is fragmented.
refresh_cuda_memory()
train_state["amp"] = amp.state_dict()
for name, module in self.modules.items():
if name == "model":
module.save_pretrained(checkpoint_path)
else:
train_state[name] = module.state_dict()
with open(
os.path.join(checkpoint_path, "train_state.pt"), "wb"
) as train_state_file:
torch.save(train_state, train_state_file)
with open(
os.path.join(checkpoint_path, "train_config.json"), "wt"
) as train_config_file:
json.dump(
self.args,
train_config_file,
indent=2,
default=lambda obj: getattr(obj, "__dict__", {}),
)
self.save_metrics()
def save_metrics(self):
"""
Method to save metrics to the current checkpoint path
"""
checkpoint_path = self.checkpoint_path
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
logging.info("Saving metrics to %s", checkpoint_path)
self.metric_store.save(os.path.join(checkpoint_path, "train_metrics.json"))
def on_new_best(self):
"""
Mark the latest checkpoint as the best
"""
new_best_checkpoint = os.path.join(
self.args.output_dir, f"checkpoint-{self.step}"
)
logging.info("New best %s", new_best_checkpoint)
best_checkpoint_path = os.path.join(self.args.output_dir, "best-checkpoint")
try:
# Remove the old best checkpoint path, otherwise it will error when
# trying to create the symlink
os.remove(best_checkpoint_path)
except FileNotFoundError:
pass
# Just use a symlink to denote the best checkpoint
os.symlink(
os.path.basename(new_best_checkpoint), best_checkpoint_path,
)
def prune_checkpoints(self) -> bool:
"""
Remove oldest checkpoints first if we are above the max checkpoints limit
"""
if self.args.max_checkpoints <= 0:
return False
checkpoints = glob.glob(os.path.join(self.args.output_dir, "checkpoint-*"))
sorted_checkpoints = sorted(
(int(os.path.basename(c).split("-")[1]), c) for c in checkpoints
)
try:
# Try to read the best checkpoint if it exists, otherwise set it to None
best_checkpoint_path: Optional[str] = os.readlink(
os.path.join(self.args.output_dir, "best-checkpoint")
)
except FileNotFoundError:
best_checkpoint_path = None
for _, checkpoint in sorted_checkpoints[: -self.args.max_checkpoints]:
if os.path.basename(checkpoint) == best_checkpoint_path:
# If the best checkpoint is about to removed, then we should
# stop early
logging.info("Not removing best checkpoint %s", checkpoint)
return False
logging.info("Removing checkpoint %s", checkpoint)
shutil.rmtree(checkpoint)
return True
def load(self, checkpoint_path: str):
"""
Load from checkpoint
"""
train_config_path = os.path.join(checkpoint_path, "train_config.json")
if not os.path.isfile(train_config_path):
raise RuntimeError(f"Cannot find train config file: {train_config_path}")
train_state_path = os.path.join(checkpoint_path, "train_state.pt")
if not os.path.isfile(train_state_path):
raise RuntimeError(f"Cannot find train state file: {train_state_path}")
model_state_path = os.path.join(checkpoint_path, WEIGHTS_NAME)
if not os.path.isfile(model_state_path):
raise RuntimeError(f"Cannot find model state file: {model_state_path}")
train_metrics_path = os.path.join(checkpoint_path, "train_metrics.json")
if not os.path.isfile(train_metrics_path):
raise RuntimeError(f"Cannot find metrics file: {train_metrics_path}")
# Must load the train config first
with open(train_config_path, "rt") as config_file:
self.args = json.load(
config_file, object_hook=lambda obj: SimpleNamespace(**obj)
)
train_state = torch.load(train_state_path)
if "amp" in train_state:
# Need to load the automatic mixed precision state_dict. Calling
# amp.load_state_dict requires initializing automatic mixed
# precision first.
#
# See https://github.com/NVIDIA/apex#checkpointing
self.try_init_amp()
# Also, for some reason, amp.load_state_dict needs to be before
# loading the rest of the state dicts, otherwise amp keeps the
# params on the cpu. Not sure why this happens, as the
# documentation seems to indicate you should call
# amp.load_state_dict last...
amp.load_state_dict(train_state["amp"])
model_state = torch.load(model_state_path)
for name, module in self.modules.items():
if name == "model":
module.load_state_dict(model_state)
else:
module.load_state_dict(train_state[name])
self.step = train_state["step"]
self.metric_store.load(train_metrics_path)
def __call__(self):
"""
Run the training!
"""
# Must be called first
self.try_init_amp()
model = self.modules["model"]
optimizer = self.modules["optimizer"]
scheduler = self.modules["scheduler"]
if self.args.optim.use_gradient_checkpointing:
model.enable_gradient_checkpointing()
model = nn.DataParallel(model)
dataloader = get_dataloader(
self.args.data,
self.dataset,
num_devices=len(model.device_ids),
shuffle=True,
)
def get_description():
return f"Train {self.metric_store}"
max_steps = self.args.optim.max_steps
accumulation_steps = self.args.optim.gradient_accumulation_steps
progress = tqdm(
unit="step",
initial=self.step,
dynamic_ncols=True,
desc=get_description(),
total=max_steps,
file=sys.stdout, # needed to make tqdm_wrap_stdout work
)
with ExitStack() as stack:
# pylint:disable=no-member
stack.enter_context(tqdm_wrap_stdout())
stack.enter_context(chunked_scattering())
stack.enter_context(self.experiment.train())
# pylint:enable=no-member
if self.args.optim.early_stopping:
# If using early stopping, must evaluate regularly to determine
# if training should stop early, so setup an Evaluator
eval_args = copy.deepcopy(self.args)
eval_args.data.batch_size = self.args.optim.eval_batch_size
evaluator = Evaluator(eval_args)
evaluator.model = model
evaluator.load_dataset("validation")
evaluator.initialize_experiment(experiment=self.experiment)
# Make sure we are tracking validation nll
self.metric_store.add(metrics.Metric("vnll", "format_float", "g(m)"))
# And store a local variable for easy access
vnll_metric = self.metric_store["vnll"]
loss = 0
num_tokens = 0
for step, batch in enumerate(cycle(dataloader), 1):
try:
step_loss = self.compute_gradients_and_loss(batch, model, optimizer)
run_optimizer = (step % accumulation_steps) == 0
if run_optimizer:
# Run an optimization step
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
# Update loss and num tokens after running an optimization
# step, in case it results in an out of memory error
loss += step_loss
num_tokens += batch["num_tokens"]
if run_optimizer:
# Since we ran the optimizer, increment current step
self.step += 1
self.experiment.set_step(self.step)
progress.update()
# update our metrics as well
self.update_metrics(
loss / accumulation_steps,
num_tokens,
scheduler.get_lr()[0],
)
num_tokens = 0
loss = 0
# and finally check if we should save
if (
self.args.save_steps > 0
and self.step % self.args.save_steps == 0
):
# First save the current checkpoint
self.save()
# Then if we are implementing early stopping, see
# if we achieved a new best
if self.args.optim.early_stopping:
evaluator.reset_metrics()
with ExitStack() as eval_stack:
# pylint:disable=no-member
eval_stack.enter_context(tqdm_unwrap_stdout())
eval_stack.enter_context(
release_cuda_memory(
collect_tensors(optimizer.state)
)
)
# pylint:enable=no-member
vnll = evaluator()
vnll_metric.update(vnll)
# Save the updated metrics
self.save_metrics()
if vnll == vnll_metric.min:
self.on_new_best()
# Try to combat OOM errors caused by doing evaluation
# in the same loop with training. This manifests in out
# of memory errors after the first or second evaluation
# run.
refresh_cuda_memory()
if not self.prune_checkpoints():
logging.info("Stopping early")
break
if self.step >= max_steps:
logging.info("Finished training")
break
except RuntimeError as rte:
if "out of memory" in str(rte):
self.metric_store["oom"].update(1)
logging.warning(str(rte))
else:
progress.close()
raise rte
progress.set_description_str(get_description())
progress.close()
def compute_gradients_and_loss(self, batch: Dict[str, Any], model, optimizer):
"""
Compute the gradients and loss for the specified batch
"""
model.train()
loss = model(batch, loss_only=True)[0]
# If there are multiple GPUs, then this will be a vector of losses, so
# sum over the GPUs first
loss = loss.mean()
if self.args.optim.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
return loss.item()
def update_metrics(self, loss, num_tokens, lr): # pylint:disable=invalid-name
"""
Update the metrics
"""
# Update our metrics
self.metric_store["nll"].update(loss)
self.metric_store["ntok"].update(num_tokens)
self.metric_store["ppl"].update(math.exp(loss))
self.metric_store["lr"].update(lr)
# Update the experiment logs as well
for name, metric in self.metric_store.items():
if name == "oom":
self.experiment.log_metric(name, metric.total)
else:
self.experiment.log_metric(name, metric.last_value)
def define_train_args(
sub_parsers: argparse._SubParsersAction, # pylint:disable=protected-access
):
"""
Define arguments needed for the train command
"""
parser = sub_parsers.add_parser("train", help="Train a model")
parser.add_argument(
"--track",
default=False,
const=True,
nargs="?",
help="Whether to track this experiment. If an experiment id is provided, it will track \
the existing experiment. If a filename ending with guid it is provided, it will wait \
until the file exists, then start tracking that experiment.",
)
parser.add_argument(
"--experiment-name",
type=str,
help="A name for the experiment when using comet for tracking",
)
parser.add_argument(
"--restore",
type=str,
help="Restore from the specified checkpoint before continuing training",
)
parser.add_argument(
"--save-steps",
type=int,
default=5000,
help="Save after every n number of steps",
)
parser.add_argument(
"--max-checkpoints",
type=int,
default=5,
help="The max number of checkpoints to keep",
)
model_group = parser.add_argument_group("model")
model_group.add_argument(
"--model-name",
type=str,
default="gpt2",
choices=GPT2SegmentedModel.pretrained_model_archive_map.keys(),
help="The location of the processed data",
)
data_group = parser.add_argument_group("data")
data_group.add_argument(
"--batch-size",
type=int,
default=2560, # max batch size that fits on a single 2080ti using fp16
help="The batch size to use for training",
)
data_group.add_argument(
"--batch-size-buffer",
type=int,
default=0,
help="By how many tokens to reduce the batch size on the GPU of the optimizer",
)
data_group.add_argument(
"--batch-method",
type=str,
default="token",
choices=["token", "example"],
help="Whether to batch by individual examples or by number of tokens",
)
data_group.add_argument(
"--token-bucket-granularity",
type=int,
default=3,
help="Granularity of each bucket for the token based batching method",
)
optim_group = parser.add_argument_group("optim")
optim_group.add_argument(
"--learning-rate",
dest="lr",
type=float,
default=5e-5,
help="The initial learning rate",
)
optim_group.add_argument(
"--max-steps",
type=int,
default=100000,
help="How many optimization steps to run.",
)
optim_group.add_argument(
"--warmup-steps",
type=int,
default=8000,
help="How many steps of warmup to apply.",
)
optim_group.add_argument(
"--gradient-accumulation-steps",
type=int,
default=1,
help="How many steps to accumulate gradients before doing an update",
)
optim_group.add_argument(
"--use-gradient-checkpointing",
default=False,
action="store_true",
help="Whether to use gradient checkpointing. Needed for bigger models.",
)
optim_group.add_argument(
"--fp16",
default=False,
action="store_true",
help="Whether to use 16-bit floats if available using NVIDIA apex.",
)
optim_group.add_argument(
"--fp16-opt-level",
type=str,
default="O1",
choices=[f"O{i}" for i in range(4)],
help="What optimization level to use for fp16 floats. "
"See https://nvidia.github.io/apex/amp.html#opt-levels",
)
optim_group.add_argument(
"--early-stopping",
default=False,
action="store_true",
help="Whether to use early stopping based on validation nll",
)
optim_group.add_argument(
"--eval-batch-size",
type=int,
default=9 * 1024, # Max batch size that fits on a single 2080ti
# without going oom. This is smaller than when running evaluation
# separately, since we need to account for additional training state
# and fragmentation.
help="The batch size to use for evaluation",
)
parser.set_defaults(func=perform_training)
def perform_training(args):
"""
Main entry point for training
"""
trainer = Trainer(args)
if args.restore:
trainer.load(args.restore)
trainer()
|
[
"evaluate.Evaluator"
] |
[((2586, 2607), 'metrics.MetricStore', 'metrics.MetricStore', ([], {}), '()\n', (2605, 2607), False, 'import metrics\n'), ((3138, 3230), 'experiment.initialize_experiment', 'initialize_experiment', (['self.args', "('data', 'model', 'optim')", 'self.args.experiment_name'], {}), "(self.args, ('data', 'model', 'optim'), self.args.\n experiment_name)\n", (3159, 3230), False, 'from experiment import initialize_experiment\n'), ((3434, 3465), 'logging.info', 'logging.info', (['"""Loading dataset"""'], {}), "('Loading dataset')\n", (3446, 3465), False, 'import logging\n'), ((3489, 3541), 'data.dataset.StoriumDataset', 'StoriumDataset', (['"""train"""', '"""gpt2"""'], {'cache_dir': 'cache_dir'}), "('train', 'gpt2', cache_dir=cache_dir)\n", (3503, 3541), False, 'from data.dataset import StoriumDataset\n'), ((4254, 4313), 'transformers.GPT2Config.from_pretrained', 'GPT2Config.from_pretrained', (['model_name'], {'cache_dir': 'cache_dir'}), '(model_name, cache_dir=cache_dir)\n', (4280, 4313), False, 'from transformers import AdamW, GPT2Config, WEIGHTS_NAME, get_linear_schedule_with_warmup\n'), ((4451, 4538), 'model.GPT2SegmentedModel.from_pretrained', 'GPT2SegmentedModel.from_pretrained', (['model_name'], {'config': 'config', 'cache_dir': 'cache_dir'}), '(model_name, config=config, cache_dir=\n cache_dir)\n', (4485, 4538), False, 'from model import GPT2SegmentedModel\n'), ((4796, 4919), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_training_steps': 'max_steps', 'num_warmup_steps': 'self.args.optim.warmup_steps'}), '(optimizer, num_training_steps=max_steps,\n num_warmup_steps=self.args.optim.warmup_steps)\n', (4827, 4919), False, 'from transformers import AdamW, GPT2Config, WEIGHTS_NAME, get_linear_schedule_with_warmup\n'), ((5250, 5311), 'os.path.join', 'os.path.join', (['self.args.output_dir', 'f"""checkpoint-{self.step}"""'], {}), "(self.args.output_dir, f'checkpoint-{self.step}')\n", (5262, 5311), False, 'import os\n'), ((5571, 5633), 'logging.info', 'logging.info', (['"""Saving model checkpoint to %s"""', 'checkpoint_path'], {}), "('Saving model checkpoint to %s', checkpoint_path)\n", (5583, 5633), False, 'import logging\n'), ((7131, 7184), 'logging.info', 'logging.info', (['"""Saving metrics to %s"""', 'checkpoint_path'], {}), "('Saving metrics to %s', checkpoint_path)\n", (7143, 7184), False, 'import logging\n'), ((7398, 7459), 'os.path.join', 'os.path.join', (['self.args.output_dir', 'f"""checkpoint-{self.step}"""'], {}), "(self.args.output_dir, f'checkpoint-{self.step}')\n", (7410, 7459), False, 'import os\n'), ((7490, 7538), 'logging.info', 'logging.info', (['"""New best %s"""', 'new_best_checkpoint'], {}), "('New best %s', new_best_checkpoint)\n", (7502, 7538), False, 'import logging\n'), ((7570, 7623), 'os.path.join', 'os.path.join', (['self.args.output_dir', '"""best-checkpoint"""'], {}), "(self.args.output_dir, 'best-checkpoint')\n", (7582, 7623), False, 'import os\n'), ((9370, 9420), 'os.path.join', 'os.path.join', (['checkpoint_path', '"""train_config.json"""'], {}), "(checkpoint_path, 'train_config.json')\n", (9382, 9420), False, 'import os\n'), ((9585, 9632), 'os.path.join', 'os.path.join', (['checkpoint_path', '"""train_state.pt"""'], {}), "(checkpoint_path, 'train_state.pt')\n", (9597, 9632), False, 'import os\n'), ((9794, 9837), 'os.path.join', 'os.path.join', (['checkpoint_path', 'WEIGHTS_NAME'], {}), '(checkpoint_path, WEIGHTS_NAME)\n', (9806, 9837), False, 'import os\n'), ((10001, 10052), 'os.path.join', 'os.path.join', (['checkpoint_path', '"""train_metrics.json"""'], {}), "(checkpoint_path, 'train_metrics.json')\n", (10013, 10052), False, 'import os\n'), ((10437, 10465), 'torch.load', 'torch.load', (['train_state_path'], {}), '(train_state_path)\n', (10447, 10465), False, 'import torch\n'), ((11186, 11214), 'torch.load', 'torch.load', (['model_state_path'], {}), '(model_state_path)\n', (11196, 11214), False, 'import torch\n'), ((11905, 11927), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (11920, 11927), False, 'from torch import nn\n'), ((1508, 1533), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1531, 1533), False, 'import torch\n'), ((2651, 2712), 'metrics.Metric', 'metrics.Metric', (['"""lr"""', '"""format_scientific"""', '"""g"""'], {'max_history': '(1)'}), "('lr', 'format_scientific', 'g', max_history=1)\n", (2665, 2712), False, 'import metrics\n'), ((2766, 2829), 'metrics.Metric', 'metrics.Metric', (['"""ppl"""', '"""format_dynamic_float"""'], {'max_history': '(1000)'}), "('ppl', 'format_dynamic_float', max_history=1000)\n", (2780, 2829), False, 'import metrics\n'), ((2883, 2942), 'metrics.Metric', 'metrics.Metric', (['"""ntok"""', '"""format_int"""', '"""a"""'], {'max_history': '(1000)'}), "('ntok', 'format_int', 'a', max_history=1000)\n", (2897, 2942), False, 'import metrics\n'), ((2983, 3023), 'metrics.Metric', 'metrics.Metric', (['"""oom"""', '"""format_int"""', '"""t"""'], {}), "('oom', 'format_int', 't')\n", (2997, 3023), False, 'import metrics\n'), ((3055, 3110), 'metrics.Metric', 'metrics.Metric', (['"""nll"""', '"""format_float"""'], {'max_history': '(1000)'}), "('nll', 'format_float', max_history=1000)\n", (3069, 3110), False, 'import metrics\n'), ((5488, 5519), 'os.path.exists', 'os.path.exists', (['checkpoint_path'], {}), '(checkpoint_path)\n', (5502, 5519), False, 'import os\n'), ((5533, 5561), 'os.makedirs', 'os.makedirs', (['checkpoint_path'], {}), '(checkpoint_path)\n', (5544, 5561), False, 'import os\n'), ((6074, 6095), 'utils.refresh_cuda_memory', 'refresh_cuda_memory', ([], {}), '()\n', (6093, 6095), False, 'from utils import collect_tensors, tqdm_wrap_stdout, tqdm_unwrap_stdout, refresh_cuda_memory, release_cuda_memory\n'), ((6129, 6145), 'apex.amp.state_dict', 'amp.state_dict', ([], {}), '()\n', (6143, 6145), False, 'from apex import amp\n'), ((6488, 6529), 'torch.save', 'torch.save', (['train_state', 'train_state_file'], {}), '(train_state, train_state_file)\n', (6498, 6529), False, 'import torch\n'), ((7048, 7079), 'os.path.exists', 'os.path.exists', (['checkpoint_path'], {}), '(checkpoint_path)\n', (7062, 7079), False, 'import os\n'), ((7093, 7121), 'os.makedirs', 'os.makedirs', (['checkpoint_path'], {}), '(checkpoint_path)\n', (7104, 7121), False, 'import os\n'), ((7216, 7267), 'os.path.join', 'os.path.join', (['checkpoint_path', '"""train_metrics.json"""'], {}), "(checkpoint_path, 'train_metrics.json')\n", (7228, 7267), False, 'import os\n'), ((7772, 7803), 'os.remove', 'os.remove', (['best_checkpoint_path'], {}), '(best_checkpoint_path)\n', (7781, 7803), False, 'import os\n'), ((7947, 7984), 'os.path.basename', 'os.path.basename', (['new_best_checkpoint'], {}), '(new_best_checkpoint)\n', (7963, 7984), False, 'import os\n'), ((8267, 8317), 'os.path.join', 'os.path.join', (['self.args.output_dir', '"""checkpoint-*"""'], {}), "(self.args.output_dir, 'checkpoint-*')\n", (8279, 8317), False, 'import os\n'), ((9136, 9186), 'logging.info', 'logging.info', (['"""Removing checkpoint %s"""', 'checkpoint'], {}), "('Removing checkpoint %s', checkpoint)\n", (9148, 9186), False, 'import logging\n'), ((9199, 9224), 'shutil.rmtree', 'shutil.rmtree', (['checkpoint'], {}), '(checkpoint)\n', (9212, 9224), False, 'import shutil\n'), ((9436, 9469), 'os.path.isfile', 'os.path.isfile', (['train_config_path'], {}), '(train_config_path)\n', (9450, 9469), False, 'import os\n'), ((9648, 9680), 'os.path.isfile', 'os.path.isfile', (['train_state_path'], {}), '(train_state_path)\n', (9662, 9680), False, 'import os\n'), ((9853, 9885), 'os.path.isfile', 'os.path.isfile', (['model_state_path'], {}), '(model_state_path)\n', (9867, 9885), False, 'import os\n'), ((10068, 10102), 'os.path.isfile', 'os.path.isfile', (['train_metrics_path'], {}), '(train_metrics_path)\n', (10082, 10102), False, 'import os\n'), ((11123, 11162), 'apex.amp.load_state_dict', 'amp.load_state_dict', (["train_state['amp']"], {}), "(train_state['amp'])\n", (11142, 11162), False, 'from apex import amp\n'), ((12573, 12584), 'contextlib.ExitStack', 'ExitStack', ([], {}), '()\n', (12582, 12584), False, 'from contextlib import ExitStack\n'), ((18518, 18532), 'math.exp', 'math.exp', (['loss'], {}), '(loss)\n', (18526, 18532), False, 'import math\n'), ((20236, 20290), 'model.GPT2SegmentedModel.pretrained_model_archive_map.keys', 'GPT2SegmentedModel.pretrained_model_archive_map.keys', ([], {}), '()\n', (20288, 20290), False, 'from model import GPT2SegmentedModel\n'), ((6391, 6438), 'os.path.join', 'os.path.join', (['checkpoint_path', '"""train_state.pt"""'], {}), "(checkpoint_path, 'train_state.pt')\n", (6403, 6438), False, 'import os\n'), ((6562, 6612), 'os.path.join', 'os.path.join', (['checkpoint_path', '"""train_config.json"""'], {}), "(checkpoint_path, 'train_config.json')\n", (6574, 6612), False, 'import os\n'), ((8621, 8674), 'os.path.join', 'os.path.join', (['self.args.output_dir', '"""best-checkpoint"""'], {}), "(self.args.output_dir, 'best-checkpoint')\n", (8633, 8674), False, 'import os\n'), ((8858, 8886), 'os.path.basename', 'os.path.basename', (['checkpoint'], {}), '(checkpoint)\n', (8874, 8886), False, 'import os\n'), ((9034, 9093), 'logging.info', 'logging.info', (['"""Not removing best checkpoint %s"""', 'checkpoint'], {}), "('Not removing best checkpoint %s', checkpoint)\n", (9046, 9093), False, 'import logging\n'), ((12666, 12684), 'utils.tqdm_wrap_stdout', 'tqdm_wrap_stdout', ([], {}), '()\n', (12682, 12684), False, 'from utils import collect_tensors, tqdm_wrap_stdout, tqdm_unwrap_stdout, refresh_cuda_memory, release_cuda_memory\n'), ((12718, 12738), 'data.parallel.chunked_scattering', 'chunked_scattering', ([], {}), '()\n', (12736, 12738), False, 'from data.parallel import chunked_scattering\n'), ((13062, 13086), 'copy.deepcopy', 'copy.deepcopy', (['self.args'], {}), '(self.args)\n', (13075, 13086), False, 'import copy\n'), ((13192, 13212), 'evaluate.Evaluator', 'Evaluator', (['eval_args'], {}), '(eval_args)\n', (13201, 13212), False, 'from evaluate import Evaluator\n'), ((13736, 13753), 'itertools.cycle', 'cycle', (['dataloader'], {}), '(dataloader)\n', (13741, 13753), False, 'from itertools import cycle\n'), ((18058, 18089), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'optimizer'], {}), '(loss, optimizer)\n', (18072, 18089), False, 'from apex import amp\n'), ((13480, 13526), 'metrics.Metric', 'metrics.Metric', (['"""vnll"""', '"""format_float"""', '"""g(m)"""'], {}), "('vnll', 'format_float', 'g(m)')\n", (13494, 13526), False, 'import metrics\n'), ((10377, 10399), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '(**obj)\n', (10392, 10399), False, 'from types import SimpleNamespace\n'), ((16879, 16900), 'utils.refresh_cuda_memory', 'refresh_cuda_memory', ([], {}), '()\n', (16898, 16900), False, 'from utils import collect_tensors, tqdm_wrap_stdout, tqdm_unwrap_stdout, refresh_cuda_memory, release_cuda_memory\n'), ((16995, 17025), 'logging.info', 'logging.info', (['"""Stopping early"""'], {}), "('Stopping early')\n", (17007, 17025), False, 'import logging\n'), ((17152, 17185), 'logging.info', 'logging.info', (['"""Finished training"""'], {}), "('Finished training')\n", (17164, 17185), False, 'import logging\n'), ((8373, 8392), 'os.path.basename', 'os.path.basename', (['c'], {}), '(c)\n', (8389, 8392), False, 'import os\n'), ((15669, 15680), 'contextlib.ExitStack', 'ExitStack', ([], {}), '()\n', (15678, 15680), False, 'from contextlib import ExitStack\n'), ((15820, 15840), 'utils.tqdm_unwrap_stdout', 'tqdm_unwrap_stdout', ([], {}), '()\n', (15838, 15840), False, 'from utils import collect_tensors, tqdm_wrap_stdout, tqdm_unwrap_stdout, refresh_cuda_memory, release_cuda_memory\n'), ((16009, 16041), 'utils.collect_tensors', 'collect_tensors', (['optimizer.state'], {}), '(optimizer.state)\n', (16024, 16041), False, 'from utils import collect_tensors, tqdm_wrap_stdout, tqdm_unwrap_stdout, refresh_cuda_memory, release_cuda_memory\n')]
|
import pickle
import numpy
import glob
import os
import sys
from keras.callbacks import ModelCheckpoint
from keras.layers import LSTM
from kutilities.callbacks import MetricsCallback, PlottingCallback
from kutilities.helpers.data_preparation import get_labels_to_categories_map, \
get_class_weights2, onehot_to_categories
from sklearn.metrics import f1_score, precision_score, accuracy_score
from sklearn.metrics import recall_score
from keras.callbacks import TensorBoard
sys.path.insert(
0, "{}/datastories_semeval2017_task4".format(os.getcwd()))
from models.nn_models import build_attention_RNN
from utilities_nn.data_loader import get_embeddings, Task4Loader, prepare_dataset
from evaluate.evaluate import performance_analysis
numpy.random.seed(1337) # for reproducibility
# specify the word vectors file to use.
WV_CORPUS = "embedtweets.de"
WV_DIM = 200
# Flag that sets the training mode.
# - if FINAL == False, then the dataset will be split in {train, val, test}
# - if FINAL == True, then the dataset will be split in {train, val}.
# of the labeled data will be kept for as a validation set for early stopping
FINAL = True
max_length = 50 # max text length
DATAFOLDER = "{}/data/labeled_sentiment_data/pickle_files/".format(os.getcwd())
PREPROCESS_TYP = "ekphrasis"
COPRPUSNAME = "mixed_corpus_1"
############################################################################
# PERSISTENCE
############################################################################
# if True save model checkpoints, as well as the corresponding word indices
# set PERSIST = True, in order to be able to use the trained model later
PERSIST = True
RESULT_PATH = "results_artificial_neural_network/{}/{}/".format(
PREPROCESS_TYP, COPRPUSNAME)
MODEL_FILE_NUMBER = len(
glob.glob(os.path.join(RESULT_PATH, "model_history_{}*.pickle".format(PREPROCESS_TYP)))) + 1
def best_model(): return "{}model_{}_{}.hdf5".format(
RESULT_PATH, PREPROCESS_TYP, MODEL_FILE_NUMBER)
def best_model_word_indices(): return "{}model_word_indices_{}.{}.pickle".format(DATAFOLDER, WV_CORPUS, WV_DIM)
############################################################################
# LOAD DATA
############################################################################
embeddings, word_indices = get_embeddings(corpus=WV_CORPUS, dim=WV_DIM)
if PERSIST:
if not os.path.exists(best_model_word_indices()):
pickle.dump(word_indices, open(best_model_word_indices(), 'wb'))
loader = Task4Loader(word_indices, text_lengths=max_length, loading_data=True,
datafolder=DATAFOLDER+COPRPUSNAME+"/", preprocess_typ=PREPROCESS_TYP)
if FINAL:
print("\n > running in FINAL mode!\n")
training, testing = loader.load_final() #Processing Data
else:
training, validation, testing = loader.load_train_val_test()
pickle.dump(validation, open("{}{}/validation_data_nn_{}.pickle".format(
DATAFOLDER, COPRPUSNAME, PREPROCESS_TYP), "wb"))
# training[0], training[1] = text, sentiment
pickle.dump(training, open("{}{}/training_data_nn_{}.pickle".format(
DATAFOLDER, COPRPUSNAME, PREPROCESS_TYP), "wb"))
pickle.dump(testing, open("{}{}/testing_data_nn_{}.pickle".format(
DATAFOLDER, COPRPUSNAME, PREPROCESS_TYP), "wb"))
############################################################################
# NN MODEL
############################################################################
print("Building NN Model...")
attention_model = "simple" # "simple", None
nn_model = build_attention_RNN(embeddings, classes=3, max_length=max_length, #classes = pos., neg, neutral
unit=LSTM, layers=2, cells=150,
bidirectional=True,
attention=attention_model,
noise=0.1, #0.3
final_layer=False,
dropout_final=0.1,
dropout_attention=0.1, #0.5
dropout_words=0.1,
dropout_rnn=0.1,
dropout_rnn_U=0.1,
clipnorm=1, lr=0.001, loss_l2=0.0001) # gradient clipping and learning rate
print(nn_model.summary())
############################################################################
# CALLBACKS
############################################################################
metrics = {
"f1_pn": (lambda y_test, y_pred:
f1_score(y_test, y_pred, average='macro',
labels=[class_to_cat_mapping['positive'],
class_to_cat_mapping['negative']])),
"f1_weighted": (lambda y_test, y_pred:
f1_score(y_test, y_pred, average='weighted',
labels=[class_to_cat_mapping['positive'],
class_to_cat_mapping['neutral'],
class_to_cat_mapping['negative']])),
"M_recall": (
lambda y_test, y_pred: recall_score(y_test, y_pred, average='macro')),
"M_precision": (
lambda y_test, y_pred: precision_score(y_test, y_pred,
average='macro')),
"accuracy": (
lambda y_test, y_pred: accuracy_score(y_test, y_pred))
}
classes = ['positive', 'negative', 'neutral']
class_to_cat_mapping = get_labels_to_categories_map(
classes) # {'negative': 0, 'neutral': 1, 'positive': 2}
cat_to_class_mapping = {v: k for k, v in
get_labels_to_categories_map(classes).items()} # {0: 'negative', 1: 'neutral', 2: 'positive'}
_datasets = {}
_datasets["1-train"] = training,
_datasets["2-val"] = validation if not FINAL else testing
if not FINAL:
_datasets["3-test"] = testing
metrics_callback = MetricsCallback(datasets=_datasets, metrics=metrics)
plotting = PlottingCallback(grid_ranges=(0.7, 1), height=5,
plot_name="model_{}_{}_{}".format(COPRPUSNAME, PREPROCESS_TYP, MODEL_FILE_NUMBER)) # benchmarks={"SE17": 0.681},
tensorboard = TensorBoard(log_dir='./logs/{}'.format(COPRPUSNAME))
_callbacks = []
_callbacks.append(metrics_callback)
_callbacks.append(tensorboard)
_callbacks.append(plotting)
if PERSIST:
monitor = "val_acc" # 'val.macro_recall'
mode = "max" # mode="max"
checkpointer = ModelCheckpoint(filepath=best_model(),
monitor=monitor, mode=mode,
verbose=1, save_best_only=True)
_callbacks.append(checkpointer)
############################################################################
# APPLY CLASS WEIGHTS
############################################################################
class_weights = get_class_weights2(onehot_to_categories(training[1]),
smooth_factor=0)
print("Class weights:",
{cat_to_class_mapping[c]: w for c, w in class_weights.items()})
# 50-50
epochs = 20
batch_size = 20
history = nn_model.fit(training[0], training[1],
validation_data=validation if not FINAL else testing,
epochs=epochs, batch_size=batch_size,
class_weight=class_weights, callbacks=_callbacks)
pickle.dump(history.history, open("{}model_history_{}_{}.pickle".format(
RESULT_PATH, PREPROCESS_TYP, MODEL_FILE_NUMBER), "wb"))
############################################################################
# Evaluation
############################################################################
file_name = "{}/{}/evaluation_{}_{}".format(PREPROCESS_TYP, COPRPUSNAME, PREPROCESS_TYP, MODEL_FILE_NUMBER)
file_information = "epochs = " + str(epochs) + "\nbatch_size = " + str(batch_size) + \
"\nmax textlength = " + str(max_length) + "\npreprocess-typ = " + \
PREPROCESS_TYP + "\nattention model = " + \
str(attention_model) + "\nbest model with " + mode + " " + monitor
file_information = file_information + \
"\n2 LSTM Layer\nDropout & Noise = 0.1"
performance_analysis(testing, nn_model, file_name=file_name, file_information=file_information, verbose=True, accuracy=True,
confusion_matrix=True, plotting_confusion_matrix=True, classification_report=True)
|
[
"evaluate.evaluate.performance_analysis"
] |
[((742, 765), 'numpy.random.seed', 'numpy.random.seed', (['(1337)'], {}), '(1337)\n', (759, 765), False, 'import numpy\n'), ((2296, 2340), 'utilities_nn.data_loader.get_embeddings', 'get_embeddings', ([], {'corpus': 'WV_CORPUS', 'dim': 'WV_DIM'}), '(corpus=WV_CORPUS, dim=WV_DIM)\n', (2310, 2340), False, 'from utilities_nn.data_loader import get_embeddings, Task4Loader, prepare_dataset\n'), ((2491, 2638), 'utilities_nn.data_loader.Task4Loader', 'Task4Loader', (['word_indices'], {'text_lengths': 'max_length', 'loading_data': '(True)', 'datafolder': "(DATAFOLDER + COPRPUSNAME + '/')", 'preprocess_typ': 'PREPROCESS_TYP'}), "(word_indices, text_lengths=max_length, loading_data=True,\n datafolder=DATAFOLDER + COPRPUSNAME + '/', preprocess_typ=PREPROCESS_TYP)\n", (2502, 2638), False, 'from utilities_nn.data_loader import get_embeddings, Task4Loader, prepare_dataset\n'), ((3515, 3841), 'models.nn_models.build_attention_RNN', 'build_attention_RNN', (['embeddings'], {'classes': '(3)', 'max_length': 'max_length', 'unit': 'LSTM', 'layers': '(2)', 'cells': '(150)', 'bidirectional': '(True)', 'attention': 'attention_model', 'noise': '(0.1)', 'final_layer': '(False)', 'dropout_final': '(0.1)', 'dropout_attention': '(0.1)', 'dropout_words': '(0.1)', 'dropout_rnn': '(0.1)', 'dropout_rnn_U': '(0.1)', 'clipnorm': '(1)', 'lr': '(0.001)', 'loss_l2': '(0.0001)'}), '(embeddings, classes=3, max_length=max_length, unit=LSTM,\n layers=2, cells=150, bidirectional=True, attention=attention_model,\n noise=0.1, final_layer=False, dropout_final=0.1, dropout_attention=0.1,\n dropout_words=0.1, dropout_rnn=0.1, dropout_rnn_U=0.1, clipnorm=1, lr=\n 0.001, loss_l2=0.0001)\n', (3534, 3841), False, 'from models.nn_models import build_attention_RNN\n'), ((5389, 5426), 'kutilities.helpers.data_preparation.get_labels_to_categories_map', 'get_labels_to_categories_map', (['classes'], {}), '(classes)\n', (5417, 5426), False, 'from kutilities.helpers.data_preparation import get_labels_to_categories_map, get_class_weights2, onehot_to_categories\n'), ((5815, 5867), 'kutilities.callbacks.MetricsCallback', 'MetricsCallback', ([], {'datasets': '_datasets', 'metrics': 'metrics'}), '(datasets=_datasets, metrics=metrics)\n', (5830, 5867), False, 'from kutilities.callbacks import MetricsCallback, PlottingCallback\n'), ((8029, 8248), 'evaluate.evaluate.performance_analysis', 'performance_analysis', (['testing', 'nn_model'], {'file_name': 'file_name', 'file_information': 'file_information', 'verbose': '(True)', 'accuracy': '(True)', 'confusion_matrix': '(True)', 'plotting_confusion_matrix': '(True)', 'classification_report': '(True)'}), '(testing, nn_model, file_name=file_name,\n file_information=file_information, verbose=True, accuracy=True,\n confusion_matrix=True, plotting_confusion_matrix=True,\n classification_report=True)\n', (8049, 8248), False, 'from evaluate.evaluate import performance_analysis\n'), ((1255, 1266), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1264, 1266), False, 'import os\n'), ((6776, 6809), 'kutilities.helpers.data_preparation.onehot_to_categories', 'onehot_to_categories', (['training[1]'], {}), '(training[1])\n', (6796, 6809), False, 'from kutilities.helpers.data_preparation import get_labels_to_categories_map, get_class_weights2, onehot_to_categories\n'), ((544, 555), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (553, 555), False, 'import os\n'), ((4507, 4630), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'y_pred'], {'average': '"""macro"""', 'labels': "[class_to_cat_mapping['positive'], class_to_cat_mapping['negative']]"}), "(y_test, y_pred, average='macro', labels=[class_to_cat_mapping[\n 'positive'], class_to_cat_mapping['negative']])\n", (4515, 4630), False, 'from sklearn.metrics import f1_score, precision_score, accuracy_score\n'), ((4745, 4909), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'y_pred'], {'average': '"""weighted"""', 'labels': "[class_to_cat_mapping['positive'], class_to_cat_mapping['neutral'],\n class_to_cat_mapping['negative']]"}), "(y_test, y_pred, average='weighted', labels=[class_to_cat_mapping[\n 'positive'], class_to_cat_mapping['neutral'], class_to_cat_mapping[\n 'negative']])\n", (4753, 4909), False, 'from sklearn.metrics import f1_score, precision_score, accuracy_score\n'), ((5038, 5083), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'y_pred'], {'average': '"""macro"""'}), "(y_test, y_pred, average='macro')\n", (5050, 5083), False, 'from sklearn.metrics import recall_score\n'), ((5138, 5186), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'y_pred'], {'average': '"""macro"""'}), "(y_test, y_pred, average='macro')\n", (5153, 5186), False, 'from sklearn.metrics import f1_score, precision_score, accuracy_score\n'), ((5285, 5315), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (5299, 5315), False, 'from sklearn.metrics import f1_score, precision_score, accuracy_score\n'), ((5545, 5582), 'kutilities.helpers.data_preparation.get_labels_to_categories_map', 'get_labels_to_categories_map', (['classes'], {}), '(classes)\n', (5573, 5582), False, 'from kutilities.helpers.data_preparation import get_labels_to_categories_map, get_class_weights2, onehot_to_categories\n')]
|
import argparse
import functools
import itertools
import os.path
import time
from pprint import pprint
import torch
import numpy as np
from benepar import char_lstm
from benepar import decode_chart
from benepar import nkutil
from benepar import parse_chart
import evaluate
import learning_rates
import treebanks
def format_elapsed(start_time):
elapsed_time = int(time.time() - start_time)
minutes, seconds = divmod(elapsed_time, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
elapsed_string = "{}h{:02}m{:02}s".format(hours, minutes, seconds)
if days > 0:
elapsed_string = "{}d{}".format(days, elapsed_string)
return elapsed_string
def make_hparams():
return nkutil.HParams(
# Data processing
max_len_train=0, # no length limit
max_len_dev=0, # no length limit
# Optimization
batch_size=32,
learning_rate=0.00005,
learning_rate_warmup_steps=160,
clip_grad_norm=0.0, # no clipping
checks_per_epoch=4,
step_decay_factor=0.5,
step_decay_patience=5,
max_consecutive_decays=3, # establishes a termination criterion
# CharLSTM
use_chars_lstm=False,
d_char_emb=64,
char_lstm_input_dropout=0.2,
# BERT and other pre-trained models
use_pretrained=False,
pretrained_model="bert-base-uncased",
# elmo
use_elmo=False,
# Partitioned transformer encoder
use_encoder=False,
d_model=1024,
num_layers=8,
num_heads=8,
d_kv=64,
d_ff=2048,
encoder_max_len=512,
# Dropout
morpho_emb_dropout=0.2,
attention_dropout=0.2,
relu_dropout=0.1,
residual_dropout=0.2,
elmo_dropout=0.5, # Note that this semi-stacks with morpho_emb_dropout!
# Output heads and losses
force_root_constituent="auto",
predict_tags=False,
d_label_hidden=256,
d_tag_hidden=256,
tag_loss_scale=5.0,
)
def run_train(args, hparams):
if args.numpy_seed is not None:
print("Setting numpy random seed to {}...".format(args.numpy_seed))
np.random.seed(args.numpy_seed)
# Make sure that pytorch is actually being initialized randomly.
# On my cluster I was getting highly correlated results from multiple
# runs, but calling reset_parameters() changed that. A brief look at the
# pytorch source code revealed that pytorch initializes its RNG by
# calling std::random_device, which according to the C++ spec is allowed
# to be deterministic.
seed_from_numpy = np.random.randint(2147483648)
print("Manual seed for pytorch:", seed_from_numpy)
torch.manual_seed(seed_from_numpy)
hparams.set_from_args(args)
print("Hyperparameters:")
hparams.print()
print()
pprint(vars(args))
print()
print("Loading training trees from {}...".format(args.train_path))
train_treebank = treebanks.load_trees(
args.train_path, args.train_path_text, args.text_processing
)
print("Loaded {:,} training examples.".format(len(train_treebank)))
if hparams.max_len_train > 0:
train_treebank = train_treebank.filter_by_length(hparams.max_len_train)
print("len after filtering {:,}".format(len(train_treebank)))
print("Loading development trees from {}...".format(args.dev_path))
dev_treebank = treebanks.load_trees(
args.dev_path, args.dev_path_text, args.text_processing
)
print("Loaded {:,} development examples.".format(len(dev_treebank)))
if hparams.max_len_dev > 0:
dev_treebank = dev_treebank.filter_by_length(hparams.max_len_dev)
print("len after filtering {:,}".format(len(dev_treebank)))
print("Constructing vocabularies...")
label_vocab = decode_chart.ChartDecoder.build_vocab(train_treebank.trees)
if hparams.use_chars_lstm:
char_vocab = char_lstm.RetokenizerForCharLSTM.build_vocab(train_treebank.sents)
else:
char_vocab = None
tag_vocab = set()
for tree in train_treebank.trees:
for _, tag in tree.pos():
tag_vocab.add(tag)
tag_vocab = ["UNK"] + sorted(tag_vocab)
tag_vocab = {label: i for i, label in enumerate(tag_vocab)}
if hparams.force_root_constituent.lower() in ("true", "yes", "1"):
hparams.force_root_constituent = True
elif hparams.force_root_constituent.lower() in ("false", "no", "0"):
hparams.force_root_constituent = False
elif hparams.force_root_constituent.lower() == "auto":
hparams.force_root_constituent = (
decode_chart.ChartDecoder.infer_force_root_constituent(train_treebank.trees)
)
print("Set hparams.force_root_constituent to", hparams.force_root_constituent)
print("Initializing model...")
parser = parse_chart.ChartParser(
tag_vocab=tag_vocab,
label_vocab=label_vocab,
char_vocab=char_vocab,
hparams=hparams,
)
if args.parallelize:
parser.parallelize()
elif torch.cuda.is_available():
parser.cuda()
else:
print("Not using CUDA!")
print("Initializing optimizer...")
trainable_parameters = [
param for param in parser.parameters() if param.requires_grad
]
optimizer = torch.optim.Adam(
trainable_parameters, lr=hparams.learning_rate, betas=(0.9, 0.98), eps=1e-9
)
scheduler = learning_rates.WarmupThenReduceLROnPlateau(
optimizer,
hparams.learning_rate_warmup_steps,
mode="max",
factor=hparams.step_decay_factor,
patience=hparams.step_decay_patience * hparams.checks_per_epoch,
verbose=True,
)
clippable_parameters = trainable_parameters
grad_clip_threshold = (
np.inf if hparams.clip_grad_norm == 0 else hparams.clip_grad_norm
)
print("Training...")
total_processed = 0
current_processed = 0
check_every = len(train_treebank) / hparams.checks_per_epoch
best_dev_fscore = -np.inf
best_dev_model_path = None
best_dev_processed = 0
start_time = time.time()
def check_dev():
nonlocal best_dev_fscore
nonlocal best_dev_model_path
nonlocal best_dev_processed
dev_start_time = time.time()
dev_predicted = parser.parse(
dev_treebank.without_gold_annotations(),
subbatch_max_tokens=args.subbatch_max_tokens,
)
dev_fscore = evaluate.evalb(args.evalb_dir, dev_treebank.trees, dev_predicted)
print(
"dev-fscore {} "
"dev-elapsed {} "
"total-elapsed {}".format(
dev_fscore,
format_elapsed(dev_start_time),
format_elapsed(start_time),
)
)
if dev_fscore.fscore > best_dev_fscore:
if best_dev_model_path is not None:
extensions = [".pt"]
for ext in extensions:
path = best_dev_model_path + ext
if os.path.exists(path):
print("Removing previous model file {}...".format(path))
os.remove(path)
best_dev_fscore = dev_fscore.fscore
best_dev_model_path = "{}_dev={:.2f}".format(
args.model_path_base, dev_fscore.fscore
)
best_dev_processed = total_processed
print("Saving new best model to {}...".format(best_dev_model_path))
torch.save(
{
"config": parser.config,
"state_dict": parser.state_dict(),
"optimizer": optimizer.state_dict(),
},
best_dev_model_path + ".pt",
)
data_loader = torch.utils.data.DataLoader(
train_treebank,
batch_size=hparams.batch_size,
shuffle=True,
collate_fn=functools.partial(
parser.encode_and_collate_subbatches,
subbatch_max_tokens=args.subbatch_max_tokens,
),
)
for epoch in itertools.count(start=1):
epoch_start_time = time.time()
for batch_num, batch in enumerate(data_loader, start=1):
optimizer.zero_grad()
parser.train()
batch_loss_value = 0.0
for subbatch_size, subbatch in batch:
loss = parser.compute_loss(subbatch)
loss_value = float(loss.data.cpu().numpy())
batch_loss_value += loss_value
if loss_value > 0:
loss.backward()
del loss
total_processed += subbatch_size
current_processed += subbatch_size
grad_norm = torch.nn.utils.clip_grad_norm_(
clippable_parameters, grad_clip_threshold
)
optimizer.step()
print(
"epoch {:,} "
"batch {:,}/{:,} "
"processed {:,} "
"batch-loss {:.4f} "
"grad-norm {:.4f} "
"epoch-elapsed {} "
"total-elapsed {}".format(
epoch,
batch_num,
int(np.ceil(len(train_treebank) / hparams.batch_size)),
total_processed,
batch_loss_value,
grad_norm,
format_elapsed(epoch_start_time),
format_elapsed(start_time),
)
)
if current_processed >= check_every:
current_processed -= check_every
check_dev()
scheduler.step(metrics=best_dev_fscore)
else:
scheduler.step()
if (total_processed - best_dev_processed) > (
(hparams.step_decay_patience + 1)
* hparams.max_consecutive_decays
* len(train_treebank)
):
print("Terminating due to lack of improvement in dev fscore.")
break
def run_test(args):
print("Loading test trees from {}...".format(args.test_path))
test_treebank = treebanks.load_trees(
args.test_path, args.test_path_text, args.text_processing
)
print("Loaded {:,} test examples.".format(len(test_treebank)))
if len(args.model_path) != 1:
raise NotImplementedError(
"Ensembling multiple parsers is not "
"implemented in this version of the code."
)
model_path = args.model_path[0]
print("Loading model from {}...".format(model_path))
parser = parse_chart.ChartParser.from_trained(model_path)
if args.no_predict_tags and parser.f_tag is not None:
print("Removing part-of-speech tagging head...")
parser.f_tag = None
if args.parallelize:
parser.parallelize()
elif torch.cuda.is_available():
parser.cuda()
print("Parsing test sentences...")
start_time = time.time()
test_predicted = parser.parse(
test_treebank.without_gold_annotations(),
subbatch_max_tokens=args.subbatch_max_tokens,
)
if args.output_path == "-":
for tree in test_predicted:
print(tree.pformat(margin=1e100))
elif args.output_path:
with open(args.output_path, "w") as outfile:
for tree in test_predicted:
outfile.write("{}\n".format(tree.pformat(margin=1e100)))
# The tree loader does some preprocessing to the trees (e.g. stripping TOP
# symbols or SPMRL morphological features). We compare with the input file
# directly to be extra careful about not corrupting the evaluation. We also
# allow specifying a separate "raw" file for the gold trees: the inputs to
# our parser have traces removed and may have predicted tags substituted,
# and we may wish to compare against the raw gold trees to make sure we
# haven't made a mistake. As far as we can tell all of these variations give
# equivalent results.
ref_gold_path = args.test_path
if args.test_path_raw is not None:
print("Comparing with raw trees from", args.test_path_raw)
ref_gold_path = args.test_path_raw
test_fscore = evaluate.evalb(
args.evalb_dir, test_treebank.trees, test_predicted, ref_gold_path=ref_gold_path
)
print(
"test-fscore {} "
"test-elapsed {}".format(
test_fscore,
format_elapsed(start_time),
)
)
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
hparams = make_hparams()
subparser = subparsers.add_parser("train")
subparser.set_defaults(callback=lambda args: run_train(args, hparams))
hparams.populate_arguments(subparser)
subparser.add_argument("--numpy-seed", type=int)
subparser.add_argument("--model-path-base", required=True)
subparser.add_argument("--evalb-dir", default="EVALB/")
subparser.add_argument("--train-path", default="data/wsj/train_02-21.LDC99T42")
subparser.add_argument("--train-path-text", type=str)
subparser.add_argument("--dev-path", default="data/wsj/dev_22.LDC99T42")
subparser.add_argument("--dev-path-text", type=str)
subparser.add_argument("--text-processing", default="default")
subparser.add_argument("--subbatch-max-tokens", type=int, default=2000)
subparser.add_argument("--parallelize", action="store_true")
subparser.add_argument("--print-vocabs", action="store_true")
subparser = subparsers.add_parser("test")
subparser.set_defaults(callback=run_test)
subparser.add_argument("--model-path", nargs="+", required=True)
subparser.add_argument("--evalb-dir", default="EVALB/")
subparser.add_argument("--test-path", default="data/wsj/test_23.LDC99T42")
subparser.add_argument("--test-path-text", type=str)
subparser.add_argument("--test-path-raw", type=str)
subparser.add_argument("--text-processing", default="default")
subparser.add_argument("--subbatch-max-tokens", type=int, default=500)
subparser.add_argument("--parallelize", action="store_true")
subparser.add_argument("--output-path", default="")
subparser.add_argument("--no-predict-tags", action="store_true")
args = parser.parse_args()
args.callback(args)
if __name__ == "__main__":
main()
|
[
"evaluate.evalb"
] |
[((732, 1457), 'benepar.nkutil.HParams', 'nkutil.HParams', ([], {'max_len_train': '(0)', 'max_len_dev': '(0)', 'batch_size': '(32)', 'learning_rate': '(5e-05)', 'learning_rate_warmup_steps': '(160)', 'clip_grad_norm': '(0.0)', 'checks_per_epoch': '(4)', 'step_decay_factor': '(0.5)', 'step_decay_patience': '(5)', 'max_consecutive_decays': '(3)', 'use_chars_lstm': '(False)', 'd_char_emb': '(64)', 'char_lstm_input_dropout': '(0.2)', 'use_pretrained': '(False)', 'pretrained_model': '"""bert-base-uncased"""', 'use_elmo': '(False)', 'use_encoder': '(False)', 'd_model': '(1024)', 'num_layers': '(8)', 'num_heads': '(8)', 'd_kv': '(64)', 'd_ff': '(2048)', 'encoder_max_len': '(512)', 'morpho_emb_dropout': '(0.2)', 'attention_dropout': '(0.2)', 'relu_dropout': '(0.1)', 'residual_dropout': '(0.2)', 'elmo_dropout': '(0.5)', 'force_root_constituent': '"""auto"""', 'predict_tags': '(False)', 'd_label_hidden': '(256)', 'd_tag_hidden': '(256)', 'tag_loss_scale': '(5.0)'}), "(max_len_train=0, max_len_dev=0, batch_size=32, learning_rate\n =5e-05, learning_rate_warmup_steps=160, clip_grad_norm=0.0,\n checks_per_epoch=4, step_decay_factor=0.5, step_decay_patience=5,\n max_consecutive_decays=3, use_chars_lstm=False, d_char_emb=64,\n char_lstm_input_dropout=0.2, use_pretrained=False, pretrained_model=\n 'bert-base-uncased', use_elmo=False, use_encoder=False, d_model=1024,\n num_layers=8, num_heads=8, d_kv=64, d_ff=2048, encoder_max_len=512,\n morpho_emb_dropout=0.2, attention_dropout=0.2, relu_dropout=0.1,\n residual_dropout=0.2, elmo_dropout=0.5, force_root_constituent='auto',\n predict_tags=False, d_label_hidden=256, d_tag_hidden=256,\n tag_loss_scale=5.0)\n", (746, 1457), False, 'from benepar import nkutil\n'), ((2676, 2705), 'numpy.random.randint', 'np.random.randint', (['(2147483648)'], {}), '(2147483648)\n', (2693, 2705), True, 'import numpy as np\n'), ((2765, 2799), 'torch.manual_seed', 'torch.manual_seed', (['seed_from_numpy'], {}), '(seed_from_numpy)\n', (2782, 2799), False, 'import torch\n'), ((3023, 3109), 'treebanks.load_trees', 'treebanks.load_trees', (['args.train_path', 'args.train_path_text', 'args.text_processing'], {}), '(args.train_path, args.train_path_text, args.\n text_processing)\n', (3043, 3109), False, 'import treebanks\n'), ((3475, 3552), 'treebanks.load_trees', 'treebanks.load_trees', (['args.dev_path', 'args.dev_path_text', 'args.text_processing'], {}), '(args.dev_path, args.dev_path_text, args.text_processing)\n', (3495, 3552), False, 'import treebanks\n'), ((3892, 3951), 'benepar.decode_chart.ChartDecoder.build_vocab', 'decode_chart.ChartDecoder.build_vocab', (['train_treebank.trees'], {}), '(train_treebank.trees)\n', (3929, 3951), False, 'from benepar import decode_chart\n'), ((4916, 5029), 'benepar.parse_chart.ChartParser', 'parse_chart.ChartParser', ([], {'tag_vocab': 'tag_vocab', 'label_vocab': 'label_vocab', 'char_vocab': 'char_vocab', 'hparams': 'hparams'}), '(tag_vocab=tag_vocab, label_vocab=label_vocab,\n char_vocab=char_vocab, hparams=hparams)\n', (4939, 5029), False, 'from benepar import parse_chart\n'), ((5381, 5479), 'torch.optim.Adam', 'torch.optim.Adam', (['trainable_parameters'], {'lr': 'hparams.learning_rate', 'betas': '(0.9, 0.98)', 'eps': '(1e-09)'}), '(trainable_parameters, lr=hparams.learning_rate, betas=(0.9,\n 0.98), eps=1e-09)\n', (5397, 5479), False, 'import torch\n'), ((5506, 5735), 'learning_rates.WarmupThenReduceLROnPlateau', 'learning_rates.WarmupThenReduceLROnPlateau', (['optimizer', 'hparams.learning_rate_warmup_steps'], {'mode': '"""max"""', 'factor': 'hparams.step_decay_factor', 'patience': '(hparams.step_decay_patience * hparams.checks_per_epoch)', 'verbose': '(True)'}), "(optimizer, hparams.\n learning_rate_warmup_steps, mode='max', factor=hparams.\n step_decay_factor, patience=hparams.step_decay_patience * hparams.\n checks_per_epoch, verbose=True)\n", (5548, 5735), False, 'import learning_rates\n'), ((6180, 6191), 'time.time', 'time.time', ([], {}), '()\n', (6189, 6191), False, 'import time\n'), ((8151, 8175), 'itertools.count', 'itertools.count', ([], {'start': '(1)'}), '(start=1)\n', (8166, 8175), False, 'import itertools\n'), ((10215, 10294), 'treebanks.load_trees', 'treebanks.load_trees', (['args.test_path', 'args.test_path_text', 'args.text_processing'], {}), '(args.test_path, args.test_path_text, args.text_processing)\n', (10235, 10294), False, 'import treebanks\n'), ((10668, 10716), 'benepar.parse_chart.ChartParser.from_trained', 'parse_chart.ChartParser.from_trained', (['model_path'], {}), '(model_path)\n', (10704, 10716), False, 'from benepar import parse_chart\n'), ((11029, 11040), 'time.time', 'time.time', ([], {}), '()\n', (11038, 11040), False, 'import time\n'), ((12277, 12377), 'evaluate.evalb', 'evaluate.evalb', (['args.evalb_dir', 'test_treebank.trees', 'test_predicted'], {'ref_gold_path': 'ref_gold_path'}), '(args.evalb_dir, test_treebank.trees, test_predicted,\n ref_gold_path=ref_gold_path)\n', (12291, 12377), False, 'import evaluate\n'), ((12568, 12593), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (12591, 12593), False, 'import argparse\n'), ((2226, 2257), 'numpy.random.seed', 'np.random.seed', (['args.numpy_seed'], {}), '(args.numpy_seed)\n', (2240, 2257), True, 'import numpy as np\n'), ((4004, 4070), 'benepar.char_lstm.RetokenizerForCharLSTM.build_vocab', 'char_lstm.RetokenizerForCharLSTM.build_vocab', (['train_treebank.sents'], {}), '(train_treebank.sents)\n', (4048, 4070), False, 'from benepar import char_lstm\n'), ((5128, 5153), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5151, 5153), False, 'import torch\n'), ((6346, 6357), 'time.time', 'time.time', ([], {}), '()\n', (6355, 6357), False, 'import time\n'), ((6539, 6604), 'evaluate.evalb', 'evaluate.evalb', (['args.evalb_dir', 'dev_treebank.trees', 'dev_predicted'], {}), '(args.evalb_dir, dev_treebank.trees, dev_predicted)\n', (6553, 6604), False, 'import evaluate\n'), ((8204, 8215), 'time.time', 'time.time', ([], {}), '()\n', (8213, 8215), False, 'import time\n'), ((10923, 10948), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10946, 10948), False, 'import torch\n'), ((372, 383), 'time.time', 'time.time', ([], {}), '()\n', (381, 383), False, 'import time\n'), ((7990, 8096), 'functools.partial', 'functools.partial', (['parser.encode_and_collate_subbatches'], {'subbatch_max_tokens': 'args.subbatch_max_tokens'}), '(parser.encode_and_collate_subbatches, subbatch_max_tokens\n =args.subbatch_max_tokens)\n', (8007, 8096), False, 'import functools\n'), ((8810, 8883), 'torch.nn.utils.clip_grad_norm_', 'torch.nn.utils.clip_grad_norm_', (['clippable_parameters', 'grad_clip_threshold'], {}), '(clippable_parameters, grad_clip_threshold)\n', (8840, 8883), False, 'import torch\n'), ((4693, 4769), 'benepar.decode_chart.ChartDecoder.infer_force_root_constituent', 'decode_chart.ChartDecoder.infer_force_root_constituent', (['train_treebank.trees'], {}), '(train_treebank.trees)\n', (4747, 4769), False, 'from benepar import decode_chart\n')]
|
import chess
import random
from evaluate import evaluate
#import copy
#import os
#import psutil
from multiprocessing import Pool
count = 0
# TODO Learn about castling properly!
# TODO Eliminate 3-fold repetition! See code of main.py
# TODO Implement time constraints to avoid "Black forfeits on time"
def search(board: chess.Board, turn: bool, depth: int, alpha: int = -10000, beta: int = 10000, returnMove: bool = False, returnCount: bool = False, tree: str = ""):
# Lets count all nested calls for search within current move
global count
count = 0 if returnCount else count + 1
# Just return evaluation for terminal nodes
# TODO Check for game_over ONLY if there None move was returned!
if depth == 0 or board.is_game_over():
return evaluate(board, turn)
bestMove = None
for move in board.legal_moves:
# TODO Mate in ply! Move to eval function as special heuristic?
# capturedPiece = board.piece_type_at(move.to_square)
# if capturedPiece == chess.KING:
# return 10000 - board.ply()
# if board.gives_check(move):
# score = -(10000 - board.ply())
# print("=== GIVES CHECK :", move, "|", score, "===")
board.push(move)
treeBefore = tree
tree += move.uci() + " > "
# score = -search(board, not turn, depth-1, -beta, -alpha, tree = tree)
# We should see immediate checks
if board.is_checkmate():
score = 10000 - board.ply()
#if board.ply() < 111:
# score = -(10000 - board.ply())
#else:
#score = 10000 - board.ply()
#if returnMove:
# print("=== MOVE IN IMMEDIATE CHECK :", move, "|", score, "===")
#if returnMove:
# print("=== MOVE IN CHECK ", tree, "|", score, "===")
else:
score = -search(board, not turn, depth-1, -beta, -alpha, tree = tree)
tree = treeBefore
board.pop()
if score > alpha:
#print (tree + move.uci(), "| score > alpha |", score, ">", alpha)
# TODO Should look for order of later assignments and beta check
alpha = score
bestMove = move
# Print board for "root" moves
#if returnMove:
# print("\n---------------")
# print(f"MAX", "WHITE" if board.turn else "BLACK", move, "=>", score)
# print("---------------")
# board.push(move)
# print(board)
# board.pop()
# print("---------------")
if score >= beta:
# print ("BETA |", beta, "- DEPTH |", depth-1)
if returnMove and returnCount:
return beta, bestMove, count
elif returnMove:
return beta, bestMove
else:
return beta
if returnMove and returnCount:
return alpha, bestMove, count
elif returnMove:
return alpha, bestMove
else:
return alpha
|
[
"evaluate.evaluate"
] |
[((799, 820), 'evaluate.evaluate', 'evaluate', (['board', 'turn'], {}), '(board, turn)\n', (807, 820), False, 'from evaluate import evaluate\n')]
|
""" WORKER OF HPBANDSTER OPTIMIZER """
import numpy
import time
import ConfigSpace as CS
from hpbandster.core.worker import Worker
import os
from evaluate import evaluate
from set_up_run_folder import set_up_run_folder
class MyWorker(Worker):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def compute(self, config, budget, **kwargs):
"""
Simple example for a compute function
The loss is just a the config + some noise (that decreases with the budget)
For dramatization, the function can sleep for a given interval to emphasizes
the speed ups achievable with parallel workers.
Args:
config: dictionary containing the sampled configurations by the optimizer
budget: (float) amount of time/epochs/etc. the model can use to train
Returns:
dictionary with mandatory fields:
'loss' (scalar)
'info' (dict)
"""
runs = int(budget)
model = "pmm"
ans = 0.22
rt = -0.65
lf = 0.4
# bold_scale = 0.88
bold_scale = config["bold-scale"]
# neg_bold_scale = 1
neg_bold_scale = config["neg-bold-scale"]
# bold_exp = 4
bold_exp = config["bold-exp"]
# neg_bold_exp = 15
neg_bold_exp = config["neg-bold-exp"]
# bold_positive = 1
bold_positive = config["bold-positive"]
# bold_negative = 0
bold_negative = config["bold-negative"]
# clean run env
set_up_run_folder()
# run the model n times
os.system("ccl64 -n -l -b run_direct.lisp -- " + str(runs) + " " + str(model) + " " + str(ans) + " " + str(rt) + " " + str(lf) + " " + str(bold_scale) + " " + str(neg_bold_scale) + " " + str(bold_exp) + " " + str(neg_bold_exp) + " " + str(bold_positive) + " " + str(bold_negative))
print("model run done")
corr = 1 - evaluate(plot=False)
os.system("echo \"Loss: " + str(corr) + " with " + str(runs) + " " + str(model) + " " + str(ans) + " " + str(rt) + " " + str(lf) + " " + str(bold_scale) + " " + str(neg_bold_scale) + " " + str(bold_exp) + " " + str(neg_bold_exp) + " " + str(bold_positive) + " " + str(bold_negative) + "\" >> optimizer.log")
return({
'loss': float(corr), # this is the a mandatory field to run hyperband
'info': 1 - corr # can be used for any user-defined information - also mandatory
})
@staticmethod
def get_configspace():
config_space = CS.ConfigurationSpace()
config_space.add_hyperparameter(CS.UniformFloatHyperparameter('bold-scale', lower=0, upper=5))
config_space.add_hyperparameter(CS.UniformFloatHyperparameter('neg-bold-scale', lower=0, upper=5))
config_space.add_hyperparameter(CS.UniformIntegerHyperparameter('bold-exp', lower=1, upper=30))
config_space.add_hyperparameter(CS.UniformIntegerHyperparameter('neg-bold-exp', lower=1, upper=30))
config_space.add_hyperparameter(CS.UniformFloatHyperparameter('bold-positive', lower=0, upper=5))
config_space.add_hyperparameter(CS.UniformFloatHyperparameter('bold-negative', lower=0, upper=5))
return(config_space)
|
[
"evaluate.evaluate"
] |
[((1576, 1595), 'set_up_run_folder.set_up_run_folder', 'set_up_run_folder', ([], {}), '()\n', (1593, 1595), False, 'from set_up_run_folder import set_up_run_folder\n'), ((2613, 2636), 'ConfigSpace.ConfigurationSpace', 'CS.ConfigurationSpace', ([], {}), '()\n', (2634, 2636), True, 'import ConfigSpace as CS\n'), ((1973, 1993), 'evaluate.evaluate', 'evaluate', ([], {'plot': '(False)'}), '(plot=False)\n', (1981, 1993), False, 'from evaluate import evaluate\n'), ((2677, 2738), 'ConfigSpace.UniformFloatHyperparameter', 'CS.UniformFloatHyperparameter', (['"""bold-scale"""'], {'lower': '(0)', 'upper': '(5)'}), "('bold-scale', lower=0, upper=5)\n", (2706, 2738), True, 'import ConfigSpace as CS\n'), ((2780, 2845), 'ConfigSpace.UniformFloatHyperparameter', 'CS.UniformFloatHyperparameter', (['"""neg-bold-scale"""'], {'lower': '(0)', 'upper': '(5)'}), "('neg-bold-scale', lower=0, upper=5)\n", (2809, 2845), True, 'import ConfigSpace as CS\n'), ((2887, 2949), 'ConfigSpace.UniformIntegerHyperparameter', 'CS.UniformIntegerHyperparameter', (['"""bold-exp"""'], {'lower': '(1)', 'upper': '(30)'}), "('bold-exp', lower=1, upper=30)\n", (2918, 2949), True, 'import ConfigSpace as CS\n'), ((2991, 3057), 'ConfigSpace.UniformIntegerHyperparameter', 'CS.UniformIntegerHyperparameter', (['"""neg-bold-exp"""'], {'lower': '(1)', 'upper': '(30)'}), "('neg-bold-exp', lower=1, upper=30)\n", (3022, 3057), True, 'import ConfigSpace as CS\n'), ((3099, 3163), 'ConfigSpace.UniformFloatHyperparameter', 'CS.UniformFloatHyperparameter', (['"""bold-positive"""'], {'lower': '(0)', 'upper': '(5)'}), "('bold-positive', lower=0, upper=5)\n", (3128, 3163), True, 'import ConfigSpace as CS\n'), ((3205, 3269), 'ConfigSpace.UniformFloatHyperparameter', 'CS.UniformFloatHyperparameter', (['"""bold-negative"""'], {'lower': '(0)', 'upper': '(5)'}), "('bold-negative', lower=0, upper=5)\n", (3234, 3269), True, 'import ConfigSpace as CS\n')]
|
import sys
import time
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
from ray import tune
from ray.tune.schedulers import HyperBandScheduler
from datasets import dataset_factory
from datasets.augmentation import *
from datasets.graph import Graph
from evaluate import evaluate, _evaluate_casia_b
from losses import SupConLoss
from common import *
from utils import AverageMeter
def train(train_loader, model, criterion, optimizer, scheduler, scaler, epoch, opt):
"""one epoch training"""
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
end = time.time()
for idx, (points, target) in enumerate(train_loader):
data_time.update(time.time() - end)
points = torch.cat([points[0], points[1]], dim=0)
labels = target[0]
if torch.cuda.is_available():
points = points.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
bsz = labels.shape[0]
with torch.cuda.amp.autocast(enabled=opt.use_amp):
# compute loss
features = model(points)
f1, f2 = torch.split(features, [bsz, bsz], dim=0)
features = torch.cat([f1.unsqueeze(1), f2.unsqueeze(1)], dim=1)
loss = criterion(features, labels)
# update metric
losses.update(loss.item(), bsz)
# SGD
scaler.scale(loss).backward()
scaler.step(optimizer)
scheduler.step()
scaler.update()
optimizer.zero_grad()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print info
if (idx + 1) % opt.log_interval == 0:
print(
f"Train: [{epoch}][{idx + 1}/{len(train_loader)}]\t"
f"BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t"
f"DT {data_time.val:.3f} ({data_time.avg:.3f})\t"
f"loss {losses.val:.3f} ({losses.avg:.3f})"
)
sys.stdout.flush()
return losses.avg
def main(opt):
opt = setup_environment(opt)
graph = Graph("coco")
# Dataset
transform = transforms.Compose(
[
MirrorPoses(opt.mirror_probability),
FlipSequence(opt.flip_probability),
RandomSelectSequence(opt.sequence_length),
ShuffleSequence(opt.shuffle),
PointNoise(std=opt.point_noise_std),
JointNoise(std=opt.joint_noise_std),
MultiInput(graph.connect_joint, opt.use_multi_branch),
ToTensor()
],
)
dataset_class = dataset_factory(opt.dataset)
dataset = dataset_class(
opt.train_data_path,
train=True,
sequence_length=opt.sequence_length,
transform=TwoNoiseTransform(transform),
)
dataset_valid = dataset_class(
opt.valid_data_path,
sequence_length=opt.sequence_length,
transform=transforms.Compose(
[
SelectSequenceCenter(opt.sequence_length),
MultiInput(graph.connect_joint, opt.use_multi_branch),
ToTensor()
]
),
)
train_loader = torch.utils.data.DataLoader(
dataset,
batch_size=opt.batch_size,
num_workers=opt.num_workers,
pin_memory=True,
shuffle=True,
)
val_loader = torch.utils.data.DataLoader(
dataset_valid,
batch_size=opt.batch_size_validation,
num_workers=opt.num_workers,
pin_memory=True,
)
# Model & criterion
model, model_args = get_model_resgcn(graph, opt)
criterion = SupConLoss(temperature=opt.temp)
print("# parameters: ", count_parameters(model))
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model, opt.gpus)
if opt.cuda:
model.cuda()
criterion.cuda()
# Trainer
optimizer, scheduler, scaler = get_trainer(model, opt, len(train_loader))
# Load checkpoint or weights
load_checkpoint(model, optimizer, scheduler, scaler, opt)
# Tensorboard
writer = SummaryWriter(log_dir=opt.tb_path)
sample_input = torch.zeros(opt.batch_size, model_args["num_input"], model_args["num_channel"],
opt.sequence_length, graph.num_node).cuda()
writer.add_graph(model, input_to_model=sample_input)
best_acc = 0
loss = 0
for epoch in range(opt.start_epoch, opt.epochs + 1):
# train for one epoch
time1 = time.time()
loss = train(
train_loader, model, criterion, optimizer, scheduler, scaler, epoch, opt
)
time2 = time.time()
print(f"epoch {epoch}, total time {time2 - time1:.2f}")
# tensorboard logger
writer.add_scalar("loss/train", loss, epoch)
writer.add_scalar("learning_rate", optimizer.param_groups[0]["lr"], epoch)
# evaluation
result, accuracy_avg, sub_accuracies, dataframe = evaluate(
val_loader, model, opt.evaluation_fn, use_flip=True
)
writer.add_text("accuracy/validation", dataframe.to_markdown(), epoch)
writer.add_scalar("accuracy/validation", accuracy_avg, epoch)
for key, sub_accuracy in sub_accuracies.items():
writer.add_scalar(f"accuracy/validation/{key}", sub_accuracy, epoch)
print(f"epoch {epoch}, avg accuracy {accuracy_avg:.4f}")
is_best = accuracy_avg > best_acc
if is_best:
best_acc = accuracy_avg
if opt.tune:
tune.report(accuracy=accuracy_avg)
if epoch % opt.save_interval == 0 or (is_best and epoch > opt.save_best_start * opt.epochs):
save_file = os.path.join(opt.save_folder, f"ckpt_epoch_{'best' if is_best else epoch}.pth")
save_model(model, optimizer, scheduler, scaler, opt, opt.epochs, save_file)
# save the last model
save_file = os.path.join(opt.save_folder, "last.pth")
save_model(model, optimizer, scheduler, scaler, opt, opt.epochs, save_file)
log_hyperparameter(writer, opt, best_acc, loss)
print(f"best accuracy: {best_acc*100:.2f}")
def _inject_config(config):
opt_new = {k: config[k] if k in config.keys() else v for k, v in vars(opt).items()}
main(argparse.Namespace(**opt_new))
def tune_():
hyperband = HyperBandScheduler(metric="accuracy", mode="max")
analysis = tune.run(
_inject_config,
config={},
stop={"accuracy": 0.90, "training_iteration": 100},
resources_per_trial={"gpu": 1},
num_samples=10,
scheduler=hyperband
)
print("Best config: ", analysis.get_best_config(metric="accuracy", mode="max"))
df = analysis.results_df
print(df)
if __name__ == "__main__":
import datetime
opt = parse_option()
date = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
opt.model_name = f"{date}_{opt.dataset}_{opt.network_name}" \
f"_lr_{opt.learning_rate}_decay_{opt.weight_decay}_bsz_{opt.batch_size}"
if opt.exp_name:
opt.model_name += "_" + opt.exp_name
opt.model_path = f"../save/{opt.dataset}_models"
opt.tb_path = f"../save/{opt.dataset}_tensorboard/{opt.model_name}"
opt.save_folder = os.path.join(opt.model_path, opt.model_name)
if not os.path.isdir(opt.save_folder):
os.makedirs(opt.save_folder)
opt.evaluation_fn = None
if opt.dataset == "casia-b":
opt.evaluation_fn = _evaluate_casia_b
if opt.tune:
tune_()
else:
main(opt)
|
[
"evaluate.evaluate"
] |
[((612, 626), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (624, 626), False, 'from utils import AverageMeter\n'), ((643, 657), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (655, 657), False, 'from utils import AverageMeter\n'), ((671, 685), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (683, 685), False, 'from utils import AverageMeter\n'), ((697, 708), 'time.time', 'time.time', ([], {}), '()\n', (706, 708), False, 'import time\n'), ((2190, 2203), 'datasets.graph.Graph', 'Graph', (['"""coco"""'], {}), "('coco')\n", (2195, 2203), False, 'from datasets.graph import Graph\n'), ((2685, 2713), 'datasets.dataset_factory', 'dataset_factory', (['opt.dataset'], {}), '(opt.dataset)\n', (2700, 2713), False, 'from datasets import dataset_factory\n'), ((3710, 3742), 'losses.SupConLoss', 'SupConLoss', ([], {'temperature': 'opt.temp'}), '(temperature=opt.temp)\n', (3720, 3742), False, 'from losses import SupConLoss\n'), ((4176, 4210), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'opt.tb_path'}), '(log_dir=opt.tb_path)\n', (4189, 4210), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((6399, 6448), 'ray.tune.schedulers.HyperBandScheduler', 'HyperBandScheduler', ([], {'metric': '"""accuracy"""', 'mode': '"""max"""'}), "(metric='accuracy', mode='max')\n", (6417, 6448), False, 'from ray.tune.schedulers import HyperBandScheduler\n'), ((6465, 6629), 'ray.tune.run', 'tune.run', (['_inject_config'], {'config': '{}', 'stop': "{'accuracy': 0.9, 'training_iteration': 100}", 'resources_per_trial': "{'gpu': 1}", 'num_samples': '(10)', 'scheduler': 'hyperband'}), "(_inject_config, config={}, stop={'accuracy': 0.9,\n 'training_iteration': 100}, resources_per_trial={'gpu': 1}, num_samples\n =10, scheduler=hyperband)\n", (6473, 6629), False, 'from ray import tune\n'), ((1698, 1709), 'time.time', 'time.time', ([], {}), '()\n', (1707, 1709), False, 'import time\n'), ((4577, 4588), 'time.time', 'time.time', ([], {}), '()\n', (4586, 4588), False, 'import time\n'), ((4723, 4734), 'time.time', 'time.time', ([], {}), '()\n', (4732, 4734), False, 'import time\n'), ((5045, 5106), 'evaluate.evaluate', 'evaluate', (['val_loader', 'model', 'opt.evaluation_fn'], {'use_flip': '(True)'}), '(val_loader, model, opt.evaluation_fn, use_flip=True)\n', (5053, 5106), False, 'from evaluate import evaluate, _evaluate_casia_b\n'), ((2086, 2104), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2102, 2104), False, 'import sys\n'), ((5614, 5648), 'ray.tune.report', 'tune.report', ([], {'accuracy': 'accuracy_avg'}), '(accuracy=accuracy_avg)\n', (5625, 5648), False, 'from ray import tune\n'), ((6892, 6915), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6913, 6915), False, 'import datetime\n'), ((792, 803), 'time.time', 'time.time', ([], {}), '()\n', (801, 803), False, 'import time\n'), ((1665, 1676), 'time.time', 'time.time', ([], {}), '()\n', (1674, 1676), False, 'import time\n')]
|
import datetime
import os
import json
import yaml
import visdom
import torch
import numpy as np
from gamified_squad import GamifiedSquad
from gamified_newsqa import GamifiedNewsQA
from agent import Agent, HistoryScoreCache
import generic
import evaluate
def train():
time_1 = datetime.datetime.now()
with open("config.yaml") as reader:
config = yaml.safe_load(reader)
if config['general']['dataset'] == "squad":
env = GamifiedSquad(config)
else:
env = GamifiedNewsQA(config)
env.split_reset("train")
agent = Agent()
# visdom
viz = visdom.Visdom()
plt_win = None
eval_plt_win = None
plt_q_value_win = None
plt_steps_win = None
eval_plt_steps_win = None
viz_avg_correct_state_acc, viz_avg_qa_acc = [], []
viz_avg_correct_state_q_value = []
viz_eval_correct_state_acc, viz_eval_qa_acc, viz_eval_steps = [], [], []
viz_avg_steps = []
step_in_total = 0
episode_no = 0
running_avg_qa_acc = HistoryScoreCache(capacity=50)
running_avg_correct_state_acc = HistoryScoreCache(capacity=50)
running_avg_qa_loss = HistoryScoreCache(capacity=50)
running_avg_correct_state_loss = HistoryScoreCache(capacity=50)
running_avg_correct_state_q_value = HistoryScoreCache(capacity=50)
running_avg_steps = HistoryScoreCache(capacity=50)
output_dir, data_dir = ".", "."
json_file_name = agent.experiment_tag.replace(" ", "_")
best_qa_acc_so_far = 0.0
# load model from checkpoint
if agent.load_pretrained:
if os.path.exists(output_dir + "/" + agent.experiment_tag + "_model.pt"):
agent.load_pretrained_model(output_dir + "/" + agent.experiment_tag + "_model.pt")
agent.update_target_net()
elif os.path.exists(data_dir + "/" + agent.load_from_tag + ".pt"):
agent.load_pretrained_model(data_dir + "/" + agent.load_from_tag + ".pt")
agent.update_target_net()
while(True):
if episode_no > agent.max_episode:
break
np.random.seed(episode_no)
env.seed(episode_no)
obs, infos = env.reset()
print("====================================================================================", episode_no)
print("-- Q: %s" % (infos[0]["q"].encode('utf-8')))
print("-- A: %s" % (infos[0]["a"][0].encode('utf-8')))
agent.train()
agent.init(obs, infos)
quest_list = agent.get_game_quest_info(infos)
input_quest, input_quest_char, quest_id_list = agent.get_agent_inputs(quest_list)
tmp_replay_buffer = []
print_cmds = []
batch_size = len(obs)
act_randomly = False if agent.noisy_net else episode_no < agent.learn_start_from_this_episode
for step_no in range(agent.max_nb_steps_per_episode):
# generate commands
if agent.noisy_net:
agent.reset_noise() # Draw a new set of noisy weights
commands, replay_info = agent.act(obs, infos, input_quest, input_quest_char, quest_id_list, random=act_randomly)
obs, infos = env.step(commands)
if agent.noisy_net and step_in_total % agent.update_per_k_game_steps == 0:
agent.reset_noise() # Draw a new set of noisy weights
if episode_no >= agent.learn_start_from_this_episode and step_in_total % agent.update_per_k_game_steps == 0:
interaction_loss, interaction_q_value = agent.update_interaction()
if interaction_loss is not None:
running_avg_correct_state_loss.push(interaction_loss)
running_avg_correct_state_q_value.push(interaction_q_value)
qa_loss = agent.update_qa()
if qa_loss is not None:
running_avg_qa_loss.push(qa_loss)
step_in_total += 1
still_running = generic.to_np(replay_info[-1])
print_cmds.append(commands[0] if still_running[0] else "--")
# force stopping
if step_no == agent.max_nb_steps_per_episode - 1:
replay_info[-1] = torch.zeros_like(replay_info[-1])
tmp_replay_buffer.append(replay_info)
if np.sum(still_running) == 0:
break
print(" / ".join(print_cmds).encode('utf-8'))
# The agent has exhausted all steps, now answer question.
chosen_head_tails = agent.answer_question_act(agent.naozi.get(), quest_list) # batch
chosen_head_tails_np = generic.to_np(chosen_head_tails)
chosen_answer_strings = generic.get_answer_strings(agent.naozi.get(), chosen_head_tails_np)
answer_strings = [item["a"] for item in infos]
qa_reward_np = generic.get_qa_reward(chosen_answer_strings, answer_strings)
correct_state_reward_np = generic.get_sufficient_info_reward(agent.naozi.get(), answer_strings)
correct_state_reward = generic.to_pt(correct_state_reward_np, enable_cuda=agent.use_cuda, type='float') # batch
# push qa experience into qa replay buffer
for b in range(batch_size): # data points in batch
is_prior = qa_reward_np[b] > agent.qa_reward_prior_threshold * agent.qa_replay_memory.avg_rewards()
# if the agent is not in the correct state, do not push it into replay buffer
if np.mean(correct_state_reward_np[b]) == 0.0:
continue
agent.qa_replay_memory.push(is_prior, qa_reward_np[b], agent.naozi.get(b), quest_list[b], answer_strings[b])
# small positive reward whenever it answers question correctly
masks_np = [generic.to_np(item[-1]) for item in tmp_replay_buffer]
command_rewards_np = []
for i in range(len(tmp_replay_buffer)):
if i == len(tmp_replay_buffer) - 1:
r = correct_state_reward * tmp_replay_buffer[i][-1]
r_np = correct_state_reward_np * masks_np[i]
else:
# give reward only at that one game step, not all
r = correct_state_reward * (tmp_replay_buffer[i][-1] - tmp_replay_buffer[i + 1][-1])
r_np = correct_state_reward_np * (masks_np[i] - masks_np[i + 1])
tmp_replay_buffer[i].append(r)
command_rewards_np.append(r_np)
command_rewards_np = np.array(command_rewards_np)
print(command_rewards_np[:, 0])
# push experience into replay buffer
for b in range(len(correct_state_reward_np)):
is_prior = np.sum(command_rewards_np, 0)[b] > 0.0
for i in range(len(tmp_replay_buffer)):
batch_description_list, batch_chosen_indices, batch_chosen_ctrlf_indices, _, batch_rewards = tmp_replay_buffer[i]
is_final = True
if masks_np[i][b] != 0:
is_final = False
agent.replay_memory.push(is_prior, batch_description_list[b], quest_list[b], batch_chosen_indices[b], batch_chosen_ctrlf_indices[b], batch_rewards[b], is_final)
if masks_np[i][b] == 0.0:
break
qa_acc = np.mean(qa_reward_np)
correct_state_acc = np.mean(correct_state_reward_np)
step_masks_np = np.sum(np.array(masks_np), 0) # batch
for i in range(len(qa_reward_np)):
# if the answer is totally wrong, we assume it used all steps
if qa_reward_np[i] == 0.0:
step_masks_np[i] = agent.max_nb_steps_per_episode
used_steps = np.mean(step_masks_np)
running_avg_qa_acc.push(qa_acc)
running_avg_correct_state_acc.push(correct_state_acc)
running_avg_steps.push(used_steps)
print_rewards = np.sum(np.mean(command_rewards_np, -1))
obs_string = agent.naozi.get(0)
print("-- OBS: %s" % (obs_string.encode('utf-8')))
print("-- PRED: %s" % (chosen_answer_strings[0].encode('utf-8')))
# finish game
agent.finish_of_episode(episode_no, batch_size)
episode_no += batch_size
time_2 = datetime.datetime.now()
print("Episode: {:3d} | time spent: {:s} | interaction loss: {:2.3f} | interaction qvalue: {:2.3f} | qa loss: {:2.3f} | rewards: {:2.3f} | qa acc: {:2.3f}/{:2.3f} | sufficient info: {:2.3f}/{:2.3f} | used steps: {:2.3f}".format(episode_no, str(time_2 - time_1).rsplit(".")[0], running_avg_correct_state_loss.get_avg(), running_avg_correct_state_q_value.get_avg(), running_avg_qa_loss.get_avg(), print_rewards, qa_acc, running_avg_qa_acc.get_avg(), correct_state_acc, running_avg_correct_state_acc.get_avg(), running_avg_steps.get_avg()))
if episode_no < agent.learn_start_from_this_episode:
continue
if agent.report_frequency == 0 or (episode_no % agent.report_frequency > (episode_no - batch_size) % agent.report_frequency):
continue
eval_qa_acc, eval_correct_state_acc, eval_used_steps = 0.0, 0.0, 0.0
# evaluate
if agent.run_eval:
eval_qa_acc, eval_correct_state_acc, eval_used_steps = evaluate.evaluate(env, agent, "valid")
env.split_reset("train")
# if run eval, then save model by eval accucacy
if agent.save_frequency > 0 and (episode_no % agent.report_frequency <= (episode_no - batch_size) % agent.report_frequency) and eval_qa_acc > best_qa_acc_so_far:
best_qa_acc_so_far = eval_qa_acc
agent.save_model_to_path(output_dir + "/" + agent.experiment_tag + "_model.pt")
# save model
elif agent.save_frequency > 0 and (episode_no % agent.report_frequency <= (episode_no - batch_size) % agent.report_frequency):
if running_avg_qa_acc.get_avg() > best_qa_acc_so_far:
best_qa_acc_so_far = running_avg_qa_acc.get_avg()
agent.save_model_to_path(output_dir + "/" + agent.experiment_tag + "_model.pt")
# plot using visdom
viz_avg_correct_state_acc.append(running_avg_correct_state_acc.get_avg())
viz_avg_qa_acc.append(running_avg_qa_acc.get_avg())
viz_avg_correct_state_q_value.append(running_avg_correct_state_q_value.get_avg())
viz_eval_correct_state_acc.append(eval_correct_state_acc)
viz_eval_qa_acc.append(eval_qa_acc)
viz_eval_steps.append(eval_used_steps)
viz_avg_steps.append(running_avg_steps.get_avg())
viz_x = np.arange(len(viz_avg_correct_state_acc)).tolist()
if plt_win is None:
plt_win = viz.line(X=viz_x, Y=viz_avg_correct_state_acc,
opts=dict(title=agent.experiment_tag + "_train"),
name="sufficient info")
viz.line(X=viz_x, Y=viz_avg_qa_acc,
opts=dict(title=agent.experiment_tag + "_train"),
win=plt_win, update='append', name="qa")
else:
viz.line(X=[len(viz_avg_correct_state_acc) - 1], Y=[viz_avg_correct_state_acc[-1]],
opts=dict(title=agent.experiment_tag + "_train"),
win=plt_win,
update='append', name="sufficient info")
viz.line(X=[len(viz_avg_qa_acc) - 1], Y=[viz_avg_qa_acc[-1]],
opts=dict(title=agent.experiment_tag + "_train"),
win=plt_win,
update='append', name="qa")
if plt_q_value_win is None:
plt_q_value_win = viz.line(X=viz_x, Y=viz_avg_correct_state_q_value,
opts=dict(title=agent.experiment_tag + "_train_q_value"),
name="sufficient info")
else:
viz.line(X=[len(viz_avg_correct_state_q_value) - 1], Y=[viz_avg_correct_state_q_value[-1]],
opts=dict(title=agent.experiment_tag + "_train_q_value"),
win=plt_q_value_win,
update='append', name="sufficient info")
if plt_steps_win is None:
plt_steps_win = viz.line(X=viz_x, Y=viz_avg_steps,
opts=dict(title=agent.experiment_tag + "_train_step"),
name="used steps")
else:
viz.line(X=[len(viz_avg_steps) - 1], Y=[viz_avg_steps[-1]],
opts=dict(title=agent.experiment_tag + "_train_step"),
win=plt_steps_win,
update='append', name="used steps")
if eval_plt_win is None:
eval_plt_win = viz.line(X=viz_x, Y=viz_eval_correct_state_acc,
opts=dict(title=agent.experiment_tag + "_eval"),
name="sufficient info")
viz.line(X=viz_x, Y=viz_eval_qa_acc,
opts=dict(title=agent.experiment_tag + "_eval"),
win=eval_plt_win, update='append', name="qa")
else:
viz.line(X=[len(viz_eval_correct_state_acc) - 1], Y=[viz_eval_correct_state_acc[-1]],
opts=dict(title=agent.experiment_tag + "_eval"),
win=eval_plt_win,
update='append', name="sufficient info")
viz.line(X=[len(viz_eval_qa_acc) - 1], Y=[viz_eval_qa_acc[-1]],
opts=dict(title=agent.experiment_tag + "_eval"),
win=eval_plt_win,
update='append', name="qa")
if eval_plt_steps_win is None:
eval_plt_steps_win = viz.line(X=viz_x, Y=viz_eval_steps,
opts=dict(title=agent.experiment_tag + "_eval_step"),
name="used steps")
else:
viz.line(X=[len(viz_avg_steps) - 1], Y=[viz_eval_steps[-1]],
opts=dict(title=agent.experiment_tag + "_eval_step"),
win=eval_plt_steps_win,
update='append', name="used steps")
# write accucacies down into file
_s = json.dumps({"time spent": str(time_2 - time_1).rsplit(".")[0],
"sufficient info": str(running_avg_correct_state_acc.get_avg()),
"qa": str(running_avg_qa_acc.get_avg()),
"sufficient qvalue": str(running_avg_correct_state_q_value.get_avg()),
"eval sufficient info": str(eval_correct_state_acc),
"eval qa": str(eval_qa_acc),
"eval steps": str(eval_used_steps),
"used steps": str(running_avg_steps.get_avg())})
with open(output_dir + "/" + json_file_name + '.json', 'a+') as outfile:
outfile.write(_s + '\n')
outfile.flush()
if __name__ == '__main__':
train()
|
[
"evaluate.evaluate"
] |
[((284, 307), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (305, 307), False, 'import datetime\n'), ((565, 572), 'agent.Agent', 'Agent', ([], {}), '()\n', (570, 572), False, 'from agent import Agent, HistoryScoreCache\n'), ((597, 612), 'visdom.Visdom', 'visdom.Visdom', ([], {}), '()\n', (610, 612), False, 'import visdom\n'), ((999, 1029), 'agent.HistoryScoreCache', 'HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1016, 1029), False, 'from agent import Agent, HistoryScoreCache\n'), ((1066, 1096), 'agent.HistoryScoreCache', 'HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1083, 1096), False, 'from agent import Agent, HistoryScoreCache\n'), ((1123, 1153), 'agent.HistoryScoreCache', 'HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1140, 1153), False, 'from agent import Agent, HistoryScoreCache\n'), ((1191, 1221), 'agent.HistoryScoreCache', 'HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1208, 1221), False, 'from agent import Agent, HistoryScoreCache\n'), ((1262, 1292), 'agent.HistoryScoreCache', 'HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1279, 1292), False, 'from agent import Agent, HistoryScoreCache\n'), ((1317, 1347), 'agent.HistoryScoreCache', 'HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1334, 1347), False, 'from agent import Agent, HistoryScoreCache\n'), ((370, 392), 'yaml.safe_load', 'yaml.safe_load', (['reader'], {}), '(reader)\n', (384, 392), False, 'import yaml\n'), ((455, 476), 'gamified_squad.GamifiedSquad', 'GamifiedSquad', (['config'], {}), '(config)\n', (468, 476), False, 'from gamified_squad import GamifiedSquad\n'), ((501, 523), 'gamified_newsqa.GamifiedNewsQA', 'GamifiedNewsQA', (['config'], {}), '(config)\n', (515, 523), False, 'from gamified_newsqa import GamifiedNewsQA\n'), ((1548, 1617), 'os.path.exists', 'os.path.exists', (["(output_dir + '/' + agent.experiment_tag + '_model.pt')"], {}), "(output_dir + '/' + agent.experiment_tag + '_model.pt')\n", (1562, 1617), False, 'import os\n'), ((2038, 2064), 'numpy.random.seed', 'np.random.seed', (['episode_no'], {}), '(episode_no)\n', (2052, 2064), True, 'import numpy as np\n'), ((4506, 4538), 'generic.to_np', 'generic.to_np', (['chosen_head_tails'], {}), '(chosen_head_tails)\n', (4519, 4538), False, 'import generic\n'), ((4718, 4778), 'generic.get_qa_reward', 'generic.get_qa_reward', (['chosen_answer_strings', 'answer_strings'], {}), '(chosen_answer_strings, answer_strings)\n', (4739, 4778), False, 'import generic\n'), ((4914, 4999), 'generic.to_pt', 'generic.to_pt', (['correct_state_reward_np'], {'enable_cuda': 'agent.use_cuda', 'type': '"""float"""'}), "(correct_state_reward_np, enable_cuda=agent.use_cuda, type='float'\n )\n", (4927, 4999), False, 'import generic\n'), ((6309, 6337), 'numpy.array', 'np.array', (['command_rewards_np'], {}), '(command_rewards_np)\n', (6317, 6337), True, 'import numpy as np\n'), ((7094, 7115), 'numpy.mean', 'np.mean', (['qa_reward_np'], {}), '(qa_reward_np)\n', (7101, 7115), True, 'import numpy as np\n'), ((7144, 7176), 'numpy.mean', 'np.mean', (['correct_state_reward_np'], {}), '(correct_state_reward_np)\n', (7151, 7176), True, 'import numpy as np\n'), ((7483, 7505), 'numpy.mean', 'np.mean', (['step_masks_np'], {}), '(step_masks_np)\n', (7490, 7505), True, 'import numpy as np\n'), ((8020, 8043), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8041, 8043), False, 'import datetime\n'), ((1765, 1825), 'os.path.exists', 'os.path.exists', (["(data_dir + '/' + agent.load_from_tag + '.pt')"], {}), "(data_dir + '/' + agent.load_from_tag + '.pt')\n", (1779, 1825), False, 'import os\n'), ((3881, 3911), 'generic.to_np', 'generic.to_np', (['replay_info[-1]'], {}), '(replay_info[-1])\n', (3894, 3911), False, 'import generic\n'), ((5615, 5638), 'generic.to_np', 'generic.to_np', (['item[-1]'], {}), '(item[-1])\n', (5628, 5638), False, 'import generic\n'), ((7208, 7226), 'numpy.array', 'np.array', (['masks_np'], {}), '(masks_np)\n', (7216, 7226), True, 'import numpy as np\n'), ((7683, 7714), 'numpy.mean', 'np.mean', (['command_rewards_np', '(-1)'], {}), '(command_rewards_np, -1)\n', (7690, 7714), True, 'import numpy as np\n'), ((9019, 9057), 'evaluate.evaluate', 'evaluate.evaluate', (['env', 'agent', '"""valid"""'], {}), "(env, agent, 'valid')\n", (9036, 9057), False, 'import evaluate\n'), ((4111, 4144), 'torch.zeros_like', 'torch.zeros_like', (['replay_info[-1]'], {}), '(replay_info[-1])\n', (4127, 4144), False, 'import torch\n'), ((4210, 4231), 'numpy.sum', 'np.sum', (['still_running'], {}), '(still_running)\n', (4216, 4231), True, 'import numpy as np\n'), ((5333, 5368), 'numpy.mean', 'np.mean', (['correct_state_reward_np[b]'], {}), '(correct_state_reward_np[b])\n', (5340, 5368), True, 'import numpy as np\n'), ((6501, 6530), 'numpy.sum', 'np.sum', (['command_rewards_np', '(0)'], {}), '(command_rewards_np, 0)\n', (6507, 6530), True, 'import numpy as np\n')]
|
import os
import sys
import math
import argparse
import numpy as np
from tqdm import tqdm
import torch
from torch.multiprocessing import Queue, Process
sys.path.insert(0, '../lib')
sys.path.insert(0, '../model')
from data.WiderPerson import WiderPerson
from utils import misc_utils, nms_utils
from evaluate import compute_JI, compute_APMR
def eval_all(args, config, network):
# model_path
saveDir = os.path.join('../model', args.model_dir, config.model_dir)
evalDir = os.path.join('../model', args.model_dir, config.eval_dir)
misc_utils.ensure_dir(evalDir)
model_file = os.path.join(saveDir,
'dump-{}.pth'.format(args.resume_weights))
assert os.path.exists(model_file)
# get devices
str_devices = args.devices
devices = misc_utils.device_parser(str_devices)
# load data
widerPerson = WiderPerson(config, if_train=False)
#crowdhuman.records = crowdhuman.records[:10]
# multiprocessing
num_devs = len(devices)
len_dataset = len(widerPerson)
num_image = math.ceil(len_dataset / num_devs)
result_queue = Queue(500)
procs = []
all_results = []
for i in range(num_devs):
start = i * num_image
end = min(start + num_image, len_dataset)
proc = Process(target=inference, args=(
config, network, model_file, devices[i], widerPerson, start, end, result_queue))
proc.start()
procs.append(proc)
pbar = tqdm(total=len_dataset, ncols=50)
for i in range(len_dataset):
t = result_queue.get()
all_results.append(t)
pbar.update(1)
pbar.close()
for p in procs:
p.join()
fpath = os.path.join(evalDir, 'dump-{}.json'.format(args.resume_weights))
misc_utils.save_json_lines(all_results, fpath)
# evaluation
eval_path = os.path.join(evalDir, 'eval-{}.json'.format(args.resume_weights))
eval_fid = open(eval_path,'w')
res_line, JI = compute_JI.evaluation_all(fpath, 'box')
for line in res_line:
eval_fid.write(line+'\n')
AP, MR = compute_APMR.compute_APMR(fpath, config.eval_source, 'box', config.annotations_root, )
line = 'AP:{:.4f}, MR:{:.4f}, JI:{:.4f}.'.format(AP, MR, JI)
print(line)
eval_fid.write(line+'\n')
eval_fid.close()
def inference(config, network, model_file, device, dataset, start, end, result_queue):
torch.set_default_tensor_type('torch.FloatTensor')
torch.multiprocessing.set_sharing_strategy('file_system')
# init model
net = network()
net.cuda(device)
net = net.eval()
check_point = torch.load(model_file)
net.load_state_dict(check_point['state_dict'])
# init data
dataset.records = dataset.records[start:end];
data_iter = torch.utils.data.DataLoader(dataset=dataset, shuffle=False)
# inference
for (image, gt_boxes, im_info, ID) in data_iter:
pred_boxes = net(image.cuda(device), im_info.cuda(device))
scale = im_info[0, 2]
if config.test_nms_method == 'set_nms':
assert pred_boxes.shape[-1] > 6, "Not EMD Network! Using normal_nms instead."
assert pred_boxes.shape[-1] % 6 == 0, "Prediction dim Error!"
top_k = pred_boxes.shape[-1] // 6
n = pred_boxes.shape[0]
pred_boxes = pred_boxes.reshape(-1, 6)
idents = np.tile(np.arange(n)[:,None], (1, top_k)).reshape(-1, 1)
pred_boxes = np.hstack((pred_boxes, idents))
keep = pred_boxes[:, 4] > config.pred_cls_threshold
pred_boxes = pred_boxes[keep]
keep = nms_utils.set_cpu_nms(pred_boxes, 0.5)
pred_boxes = pred_boxes[keep]
elif config.test_nms_method == 'normal_nms':
assert pred_boxes.shape[-1] % 6 == 0, "Prediction dim Error!"
pred_boxes = pred_boxes.reshape(-1, 6)
keep = pred_boxes[:, 4] > config.pred_cls_threshold
pred_boxes = pred_boxes[keep]
keep = nms_utils.cpu_nms(pred_boxes, config.test_nms)
pred_boxes = pred_boxes[keep]
elif config.test_nms_method == 'none':
assert pred_boxes.shape[-1] % 6 == 0, "Prediction dim Error!"
pred_boxes = pred_boxes.reshape(-1, 6)
keep = pred_boxes[:, 4] > config.pred_cls_threshold
pred_boxes = pred_boxes[keep]
else:
raise ValueError('Unknown NMS method.')
#if pred_boxes.shape[0] > config.detection_per_image and \
# config.test_nms_method != 'none':
# order = np.argsort(-pred_boxes[:, 4])
# order = order[:config.detection_per_image]
# pred_boxes = pred_boxes[order]
# recovery the scale
pred_boxes[:, :4] /= scale
pred_boxes[:, 2:4] -= pred_boxes[:, :2]
gt_boxes = gt_boxes[0].numpy()
gt_boxes[:, 2:4] -= gt_boxes[:, :2]
print(ID[0])
result_dict = dict(ID=ID[0], height=int(im_info[0, -3]), width=int(im_info[0, -2]),
dtboxes=boxes_dump(pred_boxes), gtboxes=boxes_dump(gt_boxes))
result_queue.put_nowait(result_dict)
def boxes_dump(boxes):
if boxes.shape[-1] == 7:
result = [{'box':[round(i, 1) for i in box[:4]],
'score':round(float(box[4]), 5),
'tag':int(box[5]),
'proposal_num':int(box[6])} for box in boxes]
elif boxes.shape[-1] == 6:
result = [{'box':[round(i, 1) for i in box[:4].tolist()],
'score':round(float(box[4]), 5),
'tag':int(box[5])} for box in boxes]
elif boxes.shape[-1] == 5:
result = [{'box':[round(i, 1) for i in box[:4]],
'tag':int(box[4])} for box in boxes]
else:
raise ValueError('Unknown box dim.')
return result
def run_test():
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', '-md', default=None, required=True, type=str)
parser.add_argument('--resume_weights', '-r', default=None, required=True, type=str)
parser.add_argument('--devices', '-d', default='0', type=str)
os.environ['NCCL_IB_DISABLE'] = '1'
args = parser.parse_args()
# import libs
model_root_dir = os.path.join('../model/', args.model_dir)
sys.path.insert(0, model_root_dir)
from config import config
from network import Network
eval_all(args, config, Network)
if __name__ == '__main__':
run_test()
|
[
"evaluate.compute_JI.evaluation_all",
"evaluate.compute_APMR.compute_APMR"
] |
[((154, 182), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../lib"""'], {}), "(0, '../lib')\n", (169, 182), False, 'import sys\n'), ((183, 213), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../model"""'], {}), "(0, '../model')\n", (198, 213), False, 'import sys\n'), ((410, 468), 'os.path.join', 'os.path.join', (['"""../model"""', 'args.model_dir', 'config.model_dir'], {}), "('../model', args.model_dir, config.model_dir)\n", (422, 468), False, 'import os\n'), ((483, 540), 'os.path.join', 'os.path.join', (['"""../model"""', 'args.model_dir', 'config.eval_dir'], {}), "('../model', args.model_dir, config.eval_dir)\n", (495, 540), False, 'import os\n'), ((545, 575), 'utils.misc_utils.ensure_dir', 'misc_utils.ensure_dir', (['evalDir'], {}), '(evalDir)\n', (566, 575), False, 'from utils import misc_utils, nms_utils\n'), ((682, 708), 'os.path.exists', 'os.path.exists', (['model_file'], {}), '(model_file)\n', (696, 708), False, 'import os\n'), ((772, 809), 'utils.misc_utils.device_parser', 'misc_utils.device_parser', (['str_devices'], {}), '(str_devices)\n', (796, 809), False, 'from utils import misc_utils, nms_utils\n'), ((844, 879), 'data.WiderPerson.WiderPerson', 'WiderPerson', (['config'], {'if_train': '(False)'}), '(config, if_train=False)\n', (855, 879), False, 'from data.WiderPerson import WiderPerson\n'), ((1031, 1064), 'math.ceil', 'math.ceil', (['(len_dataset / num_devs)'], {}), '(len_dataset / num_devs)\n', (1040, 1064), False, 'import math\n'), ((1084, 1094), 'torch.multiprocessing.Queue', 'Queue', (['(500)'], {}), '(500)\n', (1089, 1094), False, 'from torch.multiprocessing import Queue, Process\n'), ((1445, 1478), 'tqdm.tqdm', 'tqdm', ([], {'total': 'len_dataset', 'ncols': '(50)'}), '(total=len_dataset, ncols=50)\n', (1449, 1478), False, 'from tqdm import tqdm\n'), ((1732, 1778), 'utils.misc_utils.save_json_lines', 'misc_utils.save_json_lines', (['all_results', 'fpath'], {}), '(all_results, fpath)\n', (1758, 1778), False, 'from utils import misc_utils, nms_utils\n'), ((1932, 1971), 'evaluate.compute_JI.evaluation_all', 'compute_JI.evaluation_all', (['fpath', '"""box"""'], {}), "(fpath, 'box')\n", (1957, 1971), False, 'from evaluate import compute_JI, compute_APMR\n'), ((2045, 2134), 'evaluate.compute_APMR.compute_APMR', 'compute_APMR.compute_APMR', (['fpath', 'config.eval_source', '"""box"""', 'config.annotations_root'], {}), "(fpath, config.eval_source, 'box', config.\n annotations_root)\n", (2070, 2134), False, 'from evaluate import compute_JI, compute_APMR\n'), ((2356, 2406), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['"""torch.FloatTensor"""'], {}), "('torch.FloatTensor')\n", (2385, 2406), False, 'import torch\n'), ((2411, 2468), 'torch.multiprocessing.set_sharing_strategy', 'torch.multiprocessing.set_sharing_strategy', (['"""file_system"""'], {}), "('file_system')\n", (2453, 2468), False, 'import torch\n'), ((2566, 2588), 'torch.load', 'torch.load', (['model_file'], {}), '(model_file)\n', (2576, 2588), False, 'import torch\n'), ((2722, 2781), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'shuffle': '(False)'}), '(dataset=dataset, shuffle=False)\n', (2749, 2781), False, 'import torch\n'), ((5783, 5808), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5806, 5808), False, 'import argparse\n'), ((6159, 6200), 'os.path.join', 'os.path.join', (['"""../model/"""', 'args.model_dir'], {}), "('../model/', args.model_dir)\n", (6171, 6200), False, 'import os\n'), ((6205, 6239), 'sys.path.insert', 'sys.path.insert', (['(0)', 'model_root_dir'], {}), '(0, model_root_dir)\n', (6220, 6239), False, 'import sys\n'), ((1256, 1372), 'torch.multiprocessing.Process', 'Process', ([], {'target': 'inference', 'args': '(config, network, model_file, devices[i], widerPerson, start, end, result_queue\n )'}), '(target=inference, args=(config, network, model_file, devices[i],\n widerPerson, start, end, result_queue))\n', (1263, 1372), False, 'from torch.multiprocessing import Queue, Process\n'), ((3396, 3427), 'numpy.hstack', 'np.hstack', (['(pred_boxes, idents)'], {}), '((pred_boxes, idents))\n', (3405, 3427), True, 'import numpy as np\n'), ((3553, 3591), 'utils.nms_utils.set_cpu_nms', 'nms_utils.set_cpu_nms', (['pred_boxes', '(0.5)'], {}), '(pred_boxes, 0.5)\n', (3574, 3591), False, 'from utils import misc_utils, nms_utils\n'), ((3937, 3983), 'utils.nms_utils.cpu_nms', 'nms_utils.cpu_nms', (['pred_boxes', 'config.test_nms'], {}), '(pred_boxes, config.test_nms)\n', (3954, 3983), False, 'from utils import misc_utils, nms_utils\n'), ((3322, 3334), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (3331, 3334), True, 'import numpy as np\n')]
|
import os
import sys
import time
import random
import argparse
import shutil
import logging
import torch
import numpy as np
from allennlp.data.iterators import BasicIterator
from preprocess import build_tasks
from models import build_model
from trainer import build_trainer
from evaluate import evaluate
from util import device_mapping, query_yes_no, resume_checkpoint
def main(arguments):
parser = argparse.ArgumentParser(description='')
parser.add_argument('--cuda', help='-1 if no CUDA, else gpu id (single gpu is enough)', type=int, default=0)
parser.add_argument('--random_seed', help='random seed to use', type=int, default=111)
# Paths and logging
parser.add_argument('--log_file', help='file to log to', type=str, default='training.log')
parser.add_argument('--store_root', help='store root path', type=str, default='checkpoint')
parser.add_argument('--store_name', help='store name prefix for current experiment', type=str, default='sts')
parser.add_argument('--suffix', help='store name suffix for current experiment', type=str, default='')
parser.add_argument('--word_embs_file', help='file containing word embs', type=str, default='glove/glove.840B.300d.txt')
# Training resuming flag
parser.add_argument('--resume', help='whether to resume training', action='store_true', default=False)
# Tasks
parser.add_argument('--task', help='training and evaluation task', type=str, default='sts-b')
# Preprocessing options
parser.add_argument('--max_seq_len', help='max sequence length', type=int, default=40)
parser.add_argument('--max_word_v_size', help='max word vocab size', type=int, default=30000)
# Embedding options
parser.add_argument('--dropout_embs', help='dropout rate for embeddings', type=float, default=.2)
parser.add_argument('--d_word', help='dimension of word embeddings', type=int, default=300)
parser.add_argument('--glove', help='1 if use glove, else from scratch', type=int, default=1)
parser.add_argument('--train_words', help='1 if make word embs trainable', type=int, default=0)
# Model options
parser.add_argument('--d_hid', help='hidden dimension size', type=int, default=1500)
parser.add_argument('--n_layers_enc', help='number of RNN layers', type=int, default=2)
parser.add_argument('--n_layers_highway', help='number of highway layers', type=int, default=0)
parser.add_argument('--dropout', help='dropout rate to use in training', type=float, default=0.2)
# Training options
parser.add_argument('--batch_size', help='batch size', type=int, default=128)
parser.add_argument('--optimizer', help='optimizer to use', type=str, default='adam')
parser.add_argument('--lr', help='starting learning rate', type=float, default=1e-4)
parser.add_argument('--loss', type=str, default='mse', choices=['mse', 'l1', 'focal_l1', 'focal_mse', 'huber'])
parser.add_argument('--huber_beta', type=float, default=0.3, help='beta for huber loss')
parser.add_argument('--max_grad_norm', help='max grad norm', type=float, default=5.)
parser.add_argument('--val_interval', help='number of iterations between validation checks', type=int, default=400)
parser.add_argument('--max_vals', help='maximum number of validation checks', type=int, default=100)
parser.add_argument('--patience', help='patience for early stopping', type=int, default=10)
# imbalanced related
# LDS
parser.add_argument('--lds', action='store_true', default=False, help='whether to enable LDS')
parser.add_argument('--lds_kernel', type=str, default='gaussian',
choices=['gaussian', 'triang', 'laplace'], help='LDS kernel type')
parser.add_argument('--lds_ks', type=int, default=5, help='LDS kernel size: should be odd number')
parser.add_argument('--lds_sigma', type=float, default=2, help='LDS gaussian/laplace kernel sigma')
# FDS
parser.add_argument('--fds', action='store_true', default=False, help='whether to enable FDS')
parser.add_argument('--fds_kernel', type=str, default='gaussian',
choices=['gaussian', 'triang', 'laplace'], help='FDS kernel type')
parser.add_argument('--fds_ks', type=int, default=5, help='FDS kernel size: should be odd number')
parser.add_argument('--fds_sigma', type=float, default=2, help='FDS gaussian/laplace kernel sigma')
parser.add_argument('--start_update', type=int, default=0, help='which epoch to start FDS updating')
parser.add_argument('--start_smooth', type=int, default=1, help='which epoch to start using FDS to smooth features')
parser.add_argument('--bucket_num', type=int, default=50, help='maximum bucket considered for FDS')
parser.add_argument('--bucket_start', type=int, default=0, help='minimum(starting) bucket for FDS')
parser.add_argument('--fds_mmt', type=float, default=0.9, help='FDS momentum')
# re-weighting: SQRT_INV / INV
parser.add_argument('--reweight', type=str, default='none', choices=['none', 'sqrt_inv', 'inverse'],
help='cost-sensitive reweighting scheme')
# two-stage training: RRT
parser.add_argument('--retrain_fc', action='store_true', default=False,
help='whether to retrain last regression layer (regressor)')
parser.add_argument('--pretrained', type=str, default='', help='pretrained checkpoint file path to load backbone weights for RRT')
# evaluate only
parser.add_argument('--evaluate', action='store_true', default=False, help='evaluate only flag')
parser.add_argument('--eval_model', type=str, default='', help='the model to evaluate on; if not specified, '
'use the default best model in store_dir')
args = parser.parse_args(arguments)
os.makedirs(args.store_root, exist_ok=True)
if not args.lds and args.reweight != 'none':
args.store_name += f'_{args.reweight}'
if args.lds:
args.store_name += f'_lds_{args.lds_kernel[:3]}_{args.lds_ks}'
if args.lds_kernel in ['gaussian', 'laplace']:
args.store_name += f'_{args.lds_sigma}'
if args.fds:
args.store_name += f'_fds_{args.fds_kernel[:3]}_{args.fds_ks}'
if args.fds_kernel in ['gaussian', 'laplace']:
args.store_name += f'_{args.fds_sigma}'
args.store_name += f'_{args.start_update}_{args.start_smooth}_{args.fds_mmt}'
if args.retrain_fc:
args.store_name += f'_retrain_fc'
if args.loss == 'huber':
args.store_name += f'_{args.loss}_beta_{args.huber_beta}'
else:
args.store_name += f'_{args.loss}'
args.store_name += f'_seed_{args.random_seed}_valint_{args.val_interval}_patience_{args.patience}' \
f'_{args.optimizer}_{args.lr}_{args.batch_size}'
args.store_name += f'_{args.suffix}' if len(args.suffix) else ''
args.store_dir = os.path.join(args.store_root, args.store_name)
if not args.evaluate and not args.resume:
if os.path.exists(args.store_dir):
if query_yes_no('overwrite previous folder: {} ?'.format(args.store_dir)):
shutil.rmtree(args.store_dir)
print(args.store_dir + ' removed.\n')
else:
raise RuntimeError('Output folder {} already exists'.format(args.store_dir))
logging.info(f"===> Creating folder: {args.store_dir}")
os.makedirs(args.store_dir)
# Logistics
logging.root.handlers = []
if os.path.exists(args.store_dir):
log_file = os.path.join(args.store_dir, args.log_file)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s | %(message)s",
handlers=[
logging.FileHandler(log_file),
logging.StreamHandler()
])
else:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s | %(message)s",
handlers=[logging.StreamHandler()]
)
logging.info(args)
seed = random.randint(1, 10000) if args.random_seed < 0 else args.random_seed
random.seed(seed)
torch.manual_seed(seed)
if args.cuda >= 0:
logging.info("Using GPU %d", args.cuda)
torch.cuda.set_device(args.cuda)
torch.cuda.manual_seed_all(seed)
logging.info("Using random seed %d", seed)
# Load tasks
logging.info("Loading tasks...")
start_time = time.time()
tasks, vocab, word_embs = build_tasks(args)
logging.info('\tFinished loading tasks in %.3fs', time.time() - start_time)
# Build model
logging.info('Building model...')
start_time = time.time()
model = build_model(args, vocab, word_embs, tasks)
logging.info('\tFinished building model in %.3fs', time.time() - start_time)
# Set up trainer
iterator = BasicIterator(args.batch_size)
trainer, train_params, opt_params = build_trainer(args, model, iterator)
# Train
if tasks and not args.evaluate:
if args.retrain_fc and len(args.pretrained):
model_path = args.pretrained
assert os.path.isfile(model_path), f"No checkpoint found at '{model_path}'"
model_state = torch.load(model_path, map_location=device_mapping(args.cuda))
trainer._model = resume_checkpoint(trainer._model, model_state, backbone_only=True)
logging.info(f'Pre-trained backbone weights loaded: {model_path}')
logging.info('Retrain last regression layer only!')
for name, param in trainer._model.named_parameters():
if "sts-b_pred_layer" not in name:
param.requires_grad = False
logging.info(f'Only optimize parameters: {[n for n, p in trainer._model.named_parameters() if p.requires_grad]}')
to_train = [(n, p) for n, p in trainer._model.named_parameters() if p.requires_grad]
else:
to_train = [(n, p) for n, p in model.named_parameters() if p.requires_grad]
trainer.train(tasks, args.val_interval, to_train, opt_params, args.resume)
else:
logging.info("Skipping training...")
logging.info('Testing on test set...')
model_path = os.path.join(args.store_dir, "model_state_best.th") if not len(args.eval_model) else args.eval_model
assert os.path.isfile(model_path), f"No checkpoint found at '{model_path}'"
logging.info(f'Evaluating {model_path}...')
model_state = torch.load(model_path, map_location=device_mapping(args.cuda))
model = resume_checkpoint(model, model_state)
te_preds, te_labels, _ = evaluate(model, tasks, iterator, cuda_device=args.cuda, split="test")
if not len(args.eval_model):
np.savez_compressed(os.path.join(args.store_dir, f"{args.store_name}.npz"), preds=te_preds, labels=te_labels)
logging.info("Done testing.")
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
[
"evaluate.evaluate"
] |
[((405, 444), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (428, 444), False, 'import argparse\n'), ((5828, 5871), 'os.makedirs', 'os.makedirs', (['args.store_root'], {'exist_ok': '(True)'}), '(args.store_root, exist_ok=True)\n', (5839, 5871), False, 'import os\n'), ((6929, 6975), 'os.path.join', 'os.path.join', (['args.store_root', 'args.store_name'], {}), '(args.store_root, args.store_name)\n', (6941, 6975), False, 'import os\n'), ((7519, 7549), 'os.path.exists', 'os.path.exists', (['args.store_dir'], {}), '(args.store_dir)\n', (7533, 7549), False, 'import os\n'), ((8028, 8046), 'logging.info', 'logging.info', (['args'], {}), '(args)\n', (8040, 8046), False, 'import logging\n'), ((8134, 8151), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (8145, 8151), False, 'import random\n'), ((8156, 8179), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (8173, 8179), False, 'import torch\n'), ((8337, 8379), 'logging.info', 'logging.info', (['"""Using random seed %d"""', 'seed'], {}), "('Using random seed %d', seed)\n", (8349, 8379), False, 'import logging\n'), ((8402, 8434), 'logging.info', 'logging.info', (['"""Loading tasks..."""'], {}), "('Loading tasks...')\n", (8414, 8434), False, 'import logging\n'), ((8452, 8463), 'time.time', 'time.time', ([], {}), '()\n', (8461, 8463), False, 'import time\n'), ((8494, 8511), 'preprocess.build_tasks', 'build_tasks', (['args'], {}), '(args)\n', (8505, 8511), False, 'from preprocess import build_tasks\n'), ((8615, 8648), 'logging.info', 'logging.info', (['"""Building model..."""'], {}), "('Building model...')\n", (8627, 8648), False, 'import logging\n'), ((8666, 8677), 'time.time', 'time.time', ([], {}), '()\n', (8675, 8677), False, 'import time\n'), ((8690, 8732), 'models.build_model', 'build_model', (['args', 'vocab', 'word_embs', 'tasks'], {}), '(args, vocab, word_embs, tasks)\n', (8701, 8732), False, 'from models import build_model\n'), ((8851, 8881), 'allennlp.data.iterators.BasicIterator', 'BasicIterator', (['args.batch_size'], {}), '(args.batch_size)\n', (8864, 8881), False, 'from allennlp.data.iterators import BasicIterator\n'), ((8922, 8958), 'trainer.build_trainer', 'build_trainer', (['args', 'model', 'iterator'], {}), '(args, model, iterator)\n', (8935, 8958), False, 'from trainer import build_trainer\n'), ((10152, 10190), 'logging.info', 'logging.info', (['"""Testing on test set..."""'], {}), "('Testing on test set...')\n", (10164, 10190), False, 'import logging\n'), ((10320, 10346), 'os.path.isfile', 'os.path.isfile', (['model_path'], {}), '(model_path)\n', (10334, 10346), False, 'import os\n'), ((10393, 10436), 'logging.info', 'logging.info', (['f"""Evaluating {model_path}..."""'], {}), "(f'Evaluating {model_path}...')\n", (10405, 10436), False, 'import logging\n'), ((10530, 10567), 'util.resume_checkpoint', 'resume_checkpoint', (['model', 'model_state'], {}), '(model, model_state)\n', (10547, 10567), False, 'from util import device_mapping, query_yes_no, resume_checkpoint\n'), ((10597, 10666), 'evaluate.evaluate', 'evaluate', (['model', 'tasks', 'iterator'], {'cuda_device': 'args.cuda', 'split': '"""test"""'}), "(model, tasks, iterator, cuda_device=args.cuda, split='test')\n", (10605, 10666), False, 'from evaluate import evaluate\n'), ((10823, 10852), 'logging.info', 'logging.info', (['"""Done testing."""'], {}), "('Done testing.')\n", (10835, 10852), False, 'import logging\n'), ((7034, 7064), 'os.path.exists', 'os.path.exists', (['args.store_dir'], {}), '(args.store_dir)\n', (7048, 7064), False, 'import os\n'), ((7372, 7427), 'logging.info', 'logging.info', (['f"""===> Creating folder: {args.store_dir}"""'], {}), "(f'===> Creating folder: {args.store_dir}')\n", (7384, 7427), False, 'import logging\n'), ((7436, 7463), 'os.makedirs', 'os.makedirs', (['args.store_dir'], {}), '(args.store_dir)\n', (7447, 7463), False, 'import os\n'), ((7570, 7613), 'os.path.join', 'os.path.join', (['args.store_dir', 'args.log_file'], {}), '(args.store_dir, args.log_file)\n', (7582, 7613), False, 'import os\n'), ((8059, 8083), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (8073, 8083), False, 'import random\n'), ((8211, 8250), 'logging.info', 'logging.info', (['"""Using GPU %d"""', 'args.cuda'], {}), "('Using GPU %d', args.cuda)\n", (8223, 8250), False, 'import logging\n'), ((8259, 8291), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.cuda'], {}), '(args.cuda)\n', (8280, 8291), False, 'import torch\n'), ((8300, 8332), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (8326, 8332), False, 'import torch\n'), ((10110, 10146), 'logging.info', 'logging.info', (['"""Skipping training..."""'], {}), "('Skipping training...')\n", (10122, 10146), False, 'import logging\n'), ((10208, 10259), 'os.path.join', 'os.path.join', (['args.store_dir', '"""model_state_best.th"""'], {}), "(args.store_dir, 'model_state_best.th')\n", (10220, 10259), False, 'import os\n'), ((8566, 8577), 'time.time', 'time.time', ([], {}), '()\n', (8575, 8577), False, 'import time\n'), ((8788, 8799), 'time.time', 'time.time', ([], {}), '()\n', (8797, 8799), False, 'import time\n'), ((9121, 9147), 'os.path.isfile', 'os.path.isfile', (['model_path'], {}), '(model_path)\n', (9135, 9147), False, 'import os\n'), ((9308, 9374), 'util.resume_checkpoint', 'resume_checkpoint', (['trainer._model', 'model_state'], {'backbone_only': '(True)'}), '(trainer._model, model_state, backbone_only=True)\n', (9325, 9374), False, 'from util import device_mapping, query_yes_no, resume_checkpoint\n'), ((9387, 9453), 'logging.info', 'logging.info', (['f"""Pre-trained backbone weights loaded: {model_path}"""'], {}), "(f'Pre-trained backbone weights loaded: {model_path}')\n", (9399, 9453), False, 'import logging\n'), ((9466, 9517), 'logging.info', 'logging.info', (['"""Retrain last regression layer only!"""'], {}), "('Retrain last regression layer only!')\n", (9478, 9517), False, 'import logging\n'), ((10491, 10516), 'util.device_mapping', 'device_mapping', (['args.cuda'], {}), '(args.cuda)\n', (10505, 10516), False, 'from util import device_mapping, query_yes_no, resume_checkpoint\n'), ((10728, 10782), 'os.path.join', 'os.path.join', (['args.store_dir', 'f"""{args.store_name}.npz"""'], {}), "(args.store_dir, f'{args.store_name}.npz')\n", (10740, 10782), False, 'import os\n'), ((7169, 7198), 'shutil.rmtree', 'shutil.rmtree', (['args.store_dir'], {}), '(args.store_dir)\n', (7182, 7198), False, 'import shutil\n'), ((7762, 7791), 'logging.FileHandler', 'logging.FileHandler', (['log_file'], {}), '(log_file)\n', (7781, 7791), False, 'import logging\n'), ((7809, 7832), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (7830, 7832), False, 'import logging\n'), ((7989, 8012), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (8010, 8012), False, 'import logging\n'), ((9252, 9277), 'util.device_mapping', 'device_mapping', (['args.cuda'], {}), '(args.cuda)\n', (9266, 9277), False, 'from util import device_mapping, query_yes_no, resume_checkpoint\n')]
|
import config
from utils import squad_evaluate, load_and_cache_examples
import glob
import json
import logging
import os
import random
from evaluate import evaluate
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import AdamW, get_linear_schedule_with_warmup, WEIGHTS_NAME
logger = logging.getLogger(__name__)
# ALL_MODELS = config.ALL_MODELS
MODEL_CLASSES = config.MODEL_CLASSES
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer):
""" train the model """
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
args.warmup_steps = int(t_total * args.warmup_proportion)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1 and args.local_rank == -1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 1
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
# batch = tuple(t.to(args.device) for t in batch)
batch = tuple(batch[t].to(args.device) for t in batch.keys())
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
"start_positions": batch[3],
"end_positions": batch[4],
}
if args.model_type in ["xlm", "roberta", "distilbert", "camembert", "bart", "longformer"]:
del inputs["token_type_ids"]
if args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": batch[5], "p_mask": batch[6]})
if args.version_2_with_negative:
inputs.update({"is_impossible": batch[7]})
if hasattr(model, "config") and hasattr(model.config, "lang2id"):
inputs.update(
{"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)}
)
outputs = model(**inputs)
# model outputs are always tuple in transformers (see doc)
loss = outputs.loss
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0]:
examples, features, results = evaluate(args, model, tokenizer)
squad_evaluate(args, tokenizer, examples, features, results, str(global_step) + "epoch", False)
# Log metrics
# if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# # Only evaluate when single GPU otherwise metrics may not average well
# if args.local_rank == -1 and args.evaluate_during_training:
# examples, features, results = evaluate(args, model, tokenizer)
# write_evaluation(args, tokenizer, examples, features, results, prefix=str(step)+"step")
# Save model checkpoint
# if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# output_dir = os.path.join(args.output_dir, f"checkpoint_{global_step}")
# # Take care of distributed/parallel training
# model_to_save = model.module if hasattr(model, "module") else model
# model_to_save.save_pretrained(output_dir)
# tokenizer.save_pretrained(output_dir)
#
# torch.save(args, os.path.join(output_dir, "training_args.bin"))
# logger.info("Saving model checkpoint to %s", output_dir)
#
# torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
# torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
# logger.info("Saving optimizer and scheduler states to %s", output_dir)
# if args.max_steps > 0 and global_step > args.max_steps:
# epoch_iterator.close()
# break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
# if args.local_rank in [-1, 0]:
# tb_writer.close()
return global_step, tr_loss / global_step
def main(args):
if os.path.exists(args.output_dir) and os.listdir(
args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir))
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Added here for reproductibility
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.bert_config_file_T if args.bert_config_file_T else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
use_fast = False)
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.
# Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will
# remove the need for this code, but it is still valid.
if args.fp16:
try:
import apex
apex.amp.register_half_function(torch, "einsum")
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, mode="train", return_examples=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Save the trained model and the tokenizer
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir) # , force_download=True)
# SquadDataset is not compatible with Fast tokenizers which have a smarter overflow handeling
# So we use use_fast=False here for now until Fast-tokenizer-compatible-examples are out
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case, use_fast=False)
model.to(args.device)
# Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
if args.do_train:
logger.info("Loading checkpoints saved during training for evaluation")
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c)
for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
else:
logger.info("Loading checkpoint %s for evaluation", args.model_name_or_path)
checkpoints = [args.model_name_or_path]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
# Reload the model
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
model = model_class.from_pretrained(checkpoint) # , force_download=True)
model.to(args.device)
# Evaluate
examples, features, results = evaluate(args, model, tokenizer, prefix=global_step)
evaluation = squad_evaluate(args, tokenizer, examples, features, results)
logger.info("***** Eval results *****")
logger.info(json.dumps(evaluation, indent=2) + '\n')
output_eval_file = os.path.join(args.output_dir, "final_eval_results.txt")
logger.info(f"Write evaluation result to {output_eval_file}...")
with open(output_eval_file, "a") as writer:
writer.write(f"Output: {json.dumps(evaluation, indent=2)}\n")
# result = dict((k + ("_{}".format(global_step) if global_step else ""), v) for k, v in result.items())
# results.update(result)
return
if __name__ == '__main__':
config.parse()
args = config.args
main(args)
|
[
"evaluate.evaluate"
] |
[((429, 456), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (446, 456), False, 'import logging\n'), ((555, 577), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (566, 577), False, 'import random\n'), ((582, 607), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (596, 607), True, 'import numpy as np\n'), ((612, 640), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (629, 640), False, 'import torch\n'), ((1006, 1093), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'sampler': 'train_sampler', 'batch_size': 'args.train_batch_size'}), '(train_dataset, sampler=train_sampler, batch_size=args.\n train_batch_size)\n', (1016, 1093), False, 'from torch.utils.data import DataLoader, RandomSampler\n'), ((1830, 1916), 'transformers.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate', 'eps': 'args.adam_epsilon'}), '(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.\n adam_epsilon)\n', (1835, 1916), False, 'from transformers import AdamW, get_linear_schedule_with_warmup, WEIGHTS_NAME\n'), ((1990, 2101), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'args.warmup_steps', 'num_training_steps': 't_total'}), '(optimizer, num_warmup_steps=args.\n warmup_steps, num_training_steps=t_total)\n', (2021, 2101), False, 'from transformers import AdamW, get_linear_schedule_with_warmup, WEIGHTS_NAME\n'), ((4152, 4191), 'os.path.exists', 'os.path.exists', (['args.model_name_or_path'], {}), '(args.model_name_or_path)\n', (4166, 4191), False, 'import os\n'), ((10885, 11080), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': '(logging.INFO if args.local_rank in [-1, 0] else logging.WARN)'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO if args.local_rank in [-1, 0] else\n logging.WARN)\n", (10904, 11080), False, 'import logging\n'), ((16879, 16893), 'config.parse', 'config.parse', ([], {}), '()\n', (16891, 16893), False, 'import config\n'), ((672, 709), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (698, 709), False, 'import torch\n'), ((890, 918), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_dataset'], {}), '(train_dataset)\n', (903, 918), False, 'from torch.utils.data import DataLoader, RandomSampler\n'), ((949, 982), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['train_dataset'], {}), '(train_dataset)\n', (967, 982), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((2826, 2889), 'apex.amp.initialize', 'amp.initialize', (['model', 'optimizer'], {'opt_level': 'args.fp16_opt_level'}), '(model, optimizer, opt_level=args.fp16_opt_level)\n', (2840, 2889), False, 'from apex import amp\n'), ((3024, 3052), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (3045, 3052), False, 'import torch\n'), ((3170, 3313), 'torch.nn.parallel.DistributedDataParallel', 'torch.nn.parallel.DistributedDataParallel', (['model'], {'device_ids': '[args.local_rank]', 'output_device': 'args.local_rank', 'find_unused_parameters': '(True)'}), '(model, device_ids=[args.\n local_rank], output_device=args.local_rank, find_unused_parameters=True)\n', (3211, 3313), False, 'import torch\n'), ((5349, 5434), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {'desc': '"""Iteration"""', 'disable': '(args.local_rank not in [-1, 0])'}), "(train_dataloader, desc='Iteration', disable=args.local_rank not in [-1, 0]\n )\n", (5353, 5434), False, 'from tqdm import tqdm, trange\n'), ((9994, 10025), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (10008, 10025), False, 'import os\n'), ((10030, 10057), 'os.listdir', 'os.listdir', (['args.output_dir'], {}), '(args.output_dir)\n', (10040, 10057), False, 'import os\n'), ((10662, 10700), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.local_rank'], {}), '(args.local_rank)\n', (10683, 10700), False, 'import torch\n'), ((10718, 10755), 'torch.device', 'torch.device', (['"""cuda"""', 'args.local_rank'], {}), "('cuda', args.local_rank)\n", (10730, 10755), False, 'import torch\n'), ((10764, 10816), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (10800, 10816), False, 'import torch\n'), ((11487, 11514), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (11512, 11514), False, 'import torch\n'), ((12662, 12689), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (12687, 12689), False, 'import torch\n'), ((13497, 13574), 'utils.load_and_cache_examples', 'load_and_cache_examples', (['args', 'tokenizer'], {'mode': '"""train"""', 'return_examples': '(False)'}), "(args, tokenizer, mode='train', return_examples=False)\n", (13520, 13574), False, 'from utils import squad_evaluate, load_and_cache_examples\n'), ((2191, 2244), 'os.path.join', 'os.path.join', (['args.model_name_or_path', '"""optimizer.pt"""'], {}), "(args.model_name_or_path, 'optimizer.pt')\n", (2203, 2244), False, 'import os\n'), ((2278, 2331), 'os.path.join', 'os.path.join', (['args.model_name_or_path', '"""scheduler.pt"""'], {}), "(args.model_name_or_path, 'scheduler.pt')\n", (2290, 2331), False, 'import os\n'), ((7912, 7944), 'evaluate.evaluate', 'evaluate', (['args', 'model', 'tokenizer'], {}), '(args, model, tokenizer)\n', (7920, 7944), False, 'from evaluate import evaluate\n'), ((10531, 10556), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (10554, 10556), False, 'import torch\n'), ((13243, 13291), 'apex.amp.register_half_function', 'apex.amp.register_half_function', (['torch', '"""einsum"""'], {}), "(torch, 'einsum')\n", (13274, 13291), False, 'import apex\n'), ((14434, 14484), 'os.path.join', 'os.path.join', (['args.output_dir', '"""training_args.bin"""'], {}), "(args.output_dir, 'training_args.bin')\n", (14446, 14484), False, 'import os\n'), ((16125, 16177), 'evaluate.evaluate', 'evaluate', (['args', 'model', 'tokenizer'], {'prefix': 'global_step'}), '(args, model, tokenizer, prefix=global_step)\n', (16133, 16177), False, 'from evaluate import evaluate\n'), ((16203, 16263), 'utils.squad_evaluate', 'squad_evaluate', (['args', 'tokenizer', 'examples', 'features', 'results'], {}), '(args, tokenizer, examples, features, results)\n', (16217, 16263), False, 'from utils import squad_evaluate, load_and_cache_examples\n'), ((16413, 16468), 'os.path.join', 'os.path.join', (['args.output_dir', '"""final_eval_results.txt"""'], {}), "(args.output_dir, 'final_eval_results.txt')\n", (16425, 16468), False, 'import os\n'), ((2433, 2486), 'os.path.join', 'os.path.join', (['args.model_name_or_path', '"""optimizer.pt"""'], {}), "(args.model_name_or_path, 'optimizer.pt')\n", (2445, 2486), False, 'import os\n'), ((2534, 2587), 'os.path.join', 'os.path.join', (['args.model_name_or_path', '"""scheduler.pt"""'], {}), "(args.model_name_or_path, 'scheduler.pt')\n", (2546, 2587), False, 'import os\n'), ((3786, 3820), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (3818, 3820), False, 'import torch\n'), ((13832, 13860), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (13858, 13860), False, 'import torch\n'), ((7184, 7215), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'optimizer'], {}), '(loss, optimizer)\n', (7198, 7215), False, 'from apex import amp\n'), ((10428, 10453), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10451, 10453), False, 'import torch\n'), ((16340, 16372), 'json.dumps', 'json.dumps', (['evaluation'], {'indent': '(2)'}), '(evaluation, indent=2)\n', (16350, 16372), False, 'import json\n'), ((7509, 7537), 'apex.amp.master_params', 'amp.master_params', (['optimizer'], {}), '(optimizer)\n', (7526, 7537), False, 'from apex import amp\n'), ((15411, 15429), 'os.path.dirname', 'os.path.dirname', (['c'], {}), '(c)\n', (15426, 15429), False, 'import os\n'), ((16642, 16674), 'json.dumps', 'json.dumps', (['evaluation'], {'indent': '(2)'}), '(evaluation, indent=2)\n', (16652, 16674), False, 'import json\n'), ((15466, 15532), 'glob.glob', 'glob.glob', (["(args.output_dir + '/**/' + WEIGHTS_NAME)"], {'recursive': '(True)'}), "(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)\n", (15475, 15532), False, 'import glob\n'), ((6640, 6685), 'torch.ones', 'torch.ones', (['batch[0].shape'], {'dtype': 'torch.int64'}), '(batch[0].shape, dtype=torch.int64)\n', (6650, 6685), False, 'import torch\n')]
|
"""Train the model"""
import argparse
import logging
import os
import random
import numpy as np
import torch
import torch.optim as optim
from torch.autograd import Variable
from tqdm import tqdm
import sys
sys.path.append('')
#os.chdir("..")
import utils
from data.data_utils import fetch_data, FERDataset, DataLoader, transform_train, transform_weak, transform_infer
from evaluate import evaluate
from efficientnet_pytorch import EfficientNet
#import warnings
#warnings.filterwarnings("ignore", message=".*pthreadpool.*")
from train_utils import MarginCalibratedCELoss , ConfusionMatrix
def train(model, optimizer, loss_fn, dataloader, lr_warmUp, metrics, params):
"""Train the model on `num_steps` batches
Args:
model: (torch.nn.Module) the neural network
optimizer: (torch.optim) optimizer for parameters of model
loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch
dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches training data
metrics: main metric for determining best performing model
params: (Params) hyperparameters
num_steps: (int) number of batches to train on, each of size params.batch_size
"""
# set model to training mode
model.train()
# summary for current training loop and a running average object for loss
cm = ConfusionMatrix(num_classes=8)
summ = []
loss_avg = utils.RunningAverage()
# Use tqdm for progress bar
with tqdm(total=len(dataloader), file=sys.stdout) as t:
for i, (train_batch, labels_batch) in enumerate(dataloader):
# move to GPU if available
if params.cuda:
train_batch, labels_batch = train_batch.cuda(
non_blocking=True), labels_batch.cuda(non_blocking=True)
# convert to torch Variables
train_batch, labels_batch = Variable(
train_batch), Variable(labels_batch)
# compute model output and loss
output_batch = model(train_batch)
loss = loss_fn(output_batch, labels_batch)
# clear previous gradients, compute gradients of all variables wrt loss
optimizer.zero_grad()
loss.backward()
# performs updates using calculated gradients
optimizer.step()
if params.warm_up:
lr_warmUp.step()
# Evaluate summaries only once in a while
if i % params.save_summary_steps == 0:
# extract data from torch Variable, move to cpu, convert to numpy arrays
output_batch = output_batch.cpu().detach().numpy()
labels_batch = labels_batch.cpu().detach().numpy()
cm.update(output_batch,labels_batch)
# compute all metrics on this batch
#summary_batch = cm.compute()
#summary_batch['loss'] = loss.item()
#summ.append(summary_batch)
# update the average loss
loss_avg.update(loss.item())
t.set_postfix(loss='{:05.3f}'.format(loss_avg()))
t.update()
# compute mean of all metrics in summary
metrics_mean = cm.compute()
metrics_string = " ; ".join("{}: {:05.3f}".format(k, v)
for k, v in metrics_mean.items())
logging.info("- Train metrics: " + metrics_string)
def train_and_evaluate(model, train_dataloader, val_dataloader, optimizer, scheduler, lr_warmUp,
loss_fn, metrics, params, model_dir,
restore_file=None):
"""Train the model and evaluate every epoch.
Args:
model: (torch.nn.Module) the neural network
train_dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches training data
val_dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches validation data
optimizer: (torch.optim) optimizer for parameters of model
loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch
metrics: main metric for determining best performing model
params: (Params) hyperparameters
model_dir: (string) directory containing config, weights and log
restore_file: (string) optional- name of file to restore from (without its extension .pth.tar)
"""
# reload weights from restore_file if specified
if restore_file is not None:
restore_path = os.path.join(
args.model_dir, args.restore_file + '.pth.tar')
logging.info("Restoring parameters from {}".format(restore_path))
utils.load_checkpoint(restore_path, model, optimizer)
best_val_acc = 0.0
for epoch in range(params.num_epochs):
# Run one epoch
logging.info("Epoch {}/{}".format(epoch + 1, params.num_epochs))
# compute number of batches in one epoch (one full pass over the training set)
train(model, optimizer, loss_fn, train_dataloader, lr_warmUp, metrics, params)
# Evaluate for one epoch on validation set
val_metrics = evaluate(model, loss_fn, val_dataloader, metrics, params)
# update optimizer parameters
scheduler.step()
val_acc = val_metrics[metrics]
is_best = val_acc >= best_val_acc
# Save weights
utils.save_checkpoint({'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optim_dict': optimizer.state_dict()},
is_best=is_best,
checkpoint=model_dir)
# If best_eval, best_save_path
if is_best:
logging.info("- Found new best "+metrics)
best_val_acc = val_acc
# Save best val metrics in a json file in the model directory
best_json_path = os.path.join(
model_dir, "metrics_val_best_weights.json")
utils.save_dict_to_json(val_metrics, best_json_path)
# Save latest val metrics in a json file in the model directory
last_json_path = os.path.join(
model_dir, "metrics_val_last_weights.json")
utils.save_dict_to_json(val_metrics, last_json_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
"""
Saving & loading of the model.
"""
parser.add_argument('--experiment_title', default='train model on fer+')
parser.add_argument('--model_dir', default='experiments',
help="Directory containing params.json")
parser.add_argument("--save_name", type=str, default="fer")
parser.add_argument("--overwrite", action="store_true")
parser.add_argument('--restore_file', default=None,
help="Optional, name of the file in --model_dir containing weights to reload before \
training") # 'best' or 'train'
"""
Training Configuration
"""
parser.add_argument("--num_epochs", type=int, default=200)
parser.add_argument(
"--num_train_iter",
type=int,
default=1000,
help="total number of training iterations",
)
parser.add_argument(
"--save_summary_steps", type=int, default=20, help="evaluation frequency"
)
parser.add_argument(
"--batch_size",
type=int,
default=128,
help="total number of batch size of labeled data",
)
parser.add_argument(
"--eval_batch_size",
type=int,
default=128,
help="batch size of evaluation data loader (it does not affect the accuracy)",
)
"""
Optimizer configurations
"""
parser.add_argument("--opt", type=str, default="SGD")
parser.add_argument("--lr", type=float, default=0.03)
parser.add_argument("--momentum", type=float, default=0.9)
parser.add_argument("--weight_decay", type=float, default=5e-4)
parser.add_argument("--step_size", type=int, default=10)
parser.add_argument("--gamma", type=float, default=0.6)
parser.add_argument("--warm_up", action="store_true", help="one epoch warm up for learning rate")
parser.add_argument("--amp", action="store_true", help="use mixed precision training or not")
"""
Backbone Net Configurations
"""
parser.add_argument("--net", type=str, default="efficientnet-b0")
parser.add_argument("--net_from_name", type=bool, default=False)
parser.add_argument("--pretrained", action="store_true", default=False)
parser.add_argument("--depth", type=int, default=28)
parser.add_argument("--widen_factor", type=int, default=2)
parser.add_argument("--leaky_slope", type=float, default=0.1)
parser.add_argument("--dropout", type=float, default=0.0)
"""
Data Configurations
"""
parser.add_argument('--data_dir', default='data/fer2013/fer2013.csv',
help="Directory containing the dataset")
parser.add_argument("--dataset", type=str, default="fer+")
parser.add_argument("--data_sampler", type=str, default="none")
parser.add_argument("--num_workers", type=int, default=1)
args = parser.parse_args()
# Load the parameters from json file
import json
#print(json.dumps(vars(args), indent=4))
args.model_dir = os.path.join( args.model_dir,args.save_name)
print(args.model_dir)
json_path = os.path.join(args.model_dir, 'params.json')
os.makedirs(os.path.dirname(json_path), exist_ok=True)
assert not (os.path.isfile(json_path) and not args.overwrite), "already existing json configuration file found at {} \
\n use overwrite flag if you're sure!".format(json_path)
with open(json_path,'w' if args.overwrite else 'x' ) as f:
json.dump(vars(args), f, indent=4)
params = utils.Params(json_path)
# use GPU if available
params.cuda = torch.cuda.is_available()
# Set the random seed for reproducible experiments
torch.manual_seed(230)
np.random.seed(0)
random.seed(0)
if params.cuda:
torch.cuda.manual_seed(230)
torch.backends.cudnn.deterministic = True
# Set the logger
utils.set_logger(os.path.join(args.model_dir, 'train.log'))
# hyper parameter settings
logging.info("Setup for training model:")
logging.info((json.dumps(vars(args), indent=4)))
# Create the input data pipeline
logging.info("Loading the datasets...")
# fetch dataloaders
data_splits ,classes = fetch_data()
trainset = FERDataset(data_splits['train'], classes=classes, transform=transform_train, transform_weak=None)
valset = FERDataset(data_splits['val'], classes=classes, transform=transform_infer)
p = torch.Tensor([36.3419, 26.4458, 12.5597, 12.4088, 8.6819, 0.6808, 2.2951, 0.5860])
gmean = lambda p: torch.exp(torch.log(p).mean())
sampler = None
train_shuffle = True
if params.data_sampler == 'weighted':
w = (p/gmean(p))**(-1/3)
blance_sampler = torch.utils.data.WeightedRandomSampler(weights=w[trainset.data_split["labels"]],
num_samples=len(trainset.data_split["labels"]),
replacement=True, generator=None)
sampler = blance_sampler
train_shuffle = False
elif params.data_sampler == 'none':
train_shuffle = True
else:
raise Exception("Not Implemented Yet!")
train_dl = DataLoader(trainset, batch_size= params.batch_size,
shuffle=train_shuffle, sampler= sampler,
num_workers= params.num_workers, pin_memory= params.cuda)
val_dl = DataLoader(valset, batch_size= params.batch_size,
shuffle= False, sampler= None,
num_workers= 2, pin_memory= params.cuda)
logging.info("- done.")
# Define the model and optimizer
if "efficientnet" in params.net:
if params.pretrained:
raise Exception("Not Implemented Yet!")
else:
logging.info("Using not pretrained model "+ params.net+ " ...")
model = EfficientNet.from_name('efficientnet-b0',in_channels=trainset.in_channels,num_classes=trainset.num_classes)
else:
raise Exception("Not Implemented Error! check --net ")
model = model.cuda() if params.cuda else model
optimizer = optim.SGD(model.parameters(),
lr=params.lr,momentum=params.momentum,weight_decay=params.weight_decay, nesterov=True)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=params.step_size, gamma=params.gamma)
lr_warmUp = None
if params.warm_up:
lr_warmUp = optim.lr_scheduler.LinearLR(optimizer, start_factor=0.01,
end_factor=1.0,total_iters=len(train_dl),
last_epoch=- 1, verbose=False)
#assert False, 'forced stop!'
# fetch loss function and metrics
#loss_fn = torch.nn.CrossEntropyLoss(weight=w.cuda()**-1)
prior = (p/gmean(p))
weight = prior**(-1/2)
margin = -torch.log(prior**(-1/2))
loss_fn = MarginCalibratedCELoss(weight=weight, margin=margin, label_smoothing=0.05).cuda()
# maintain all metrics required in this dictionary- these are used in the training and evaluation loops
metrics = 'recall'
# Train the model
logging.info("Starting training for {} epoch(s)".format(params.num_epochs))
train_and_evaluate(model, train_dl, val_dl, optimizer, scheduler, lr_warmUp, loss_fn, metrics, params, args.model_dir,
args.restore_file)
|
[
"evaluate.evaluate"
] |
[((209, 228), 'sys.path.append', 'sys.path.append', (['""""""'], {}), "('')\n", (224, 228), False, 'import sys\n'), ((1398, 1428), 'train_utils.ConfusionMatrix', 'ConfusionMatrix', ([], {'num_classes': '(8)'}), '(num_classes=8)\n', (1413, 1428), False, 'from train_utils import MarginCalibratedCELoss, ConfusionMatrix\n'), ((1463, 1485), 'utils.RunningAverage', 'utils.RunningAverage', ([], {}), '()\n', (1483, 1485), False, 'import utils\n'), ((3417, 3467), 'logging.info', 'logging.info', (["('- Train metrics: ' + metrics_string)"], {}), "('- Train metrics: ' + metrics_string)\n", (3429, 3467), False, 'import logging\n'), ((6372, 6397), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6395, 6397), False, 'import argparse\n'), ((9425, 9469), 'os.path.join', 'os.path.join', (['args.model_dir', 'args.save_name'], {}), '(args.model_dir, args.save_name)\n', (9437, 9469), False, 'import os\n'), ((9527, 9570), 'os.path.join', 'os.path.join', (['args.model_dir', '"""params.json"""'], {}), "(args.model_dir, 'params.json')\n", (9539, 9570), False, 'import os\n'), ((9958, 9981), 'utils.Params', 'utils.Params', (['json_path'], {}), '(json_path)\n', (9970, 9981), False, 'import utils\n'), ((10037, 10062), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10060, 10062), False, 'import torch\n'), ((10128, 10150), 'torch.manual_seed', 'torch.manual_seed', (['(230)'], {}), '(230)\n', (10145, 10150), False, 'import torch\n'), ((10155, 10172), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (10169, 10172), True, 'import numpy as np\n'), ((10177, 10191), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (10188, 10191), False, 'import random\n'), ((10424, 10465), 'logging.info', 'logging.info', (['"""Setup for training model:"""'], {}), "('Setup for training model:')\n", (10436, 10465), False, 'import logging\n'), ((10562, 10601), 'logging.info', 'logging.info', (['"""Loading the datasets..."""'], {}), "('Loading the datasets...')\n", (10574, 10601), False, 'import logging\n'), ((10658, 10670), 'data.data_utils.fetch_data', 'fetch_data', ([], {}), '()\n', (10668, 10670), False, 'from data.data_utils import fetch_data, FERDataset, DataLoader, transform_train, transform_weak, transform_infer\n'), ((10686, 10787), 'data.data_utils.FERDataset', 'FERDataset', (["data_splits['train']"], {'classes': 'classes', 'transform': 'transform_train', 'transform_weak': 'None'}), "(data_splits['train'], classes=classes, transform=transform_train,\n transform_weak=None)\n", (10696, 10787), False, 'from data.data_utils import fetch_data, FERDataset, DataLoader, transform_train, transform_weak, transform_infer\n'), ((10797, 10871), 'data.data_utils.FERDataset', 'FERDataset', (["data_splits['val']"], {'classes': 'classes', 'transform': 'transform_infer'}), "(data_splits['val'], classes=classes, transform=transform_infer)\n", (10807, 10871), False, 'from data.data_utils import fetch_data, FERDataset, DataLoader, transform_train, transform_weak, transform_infer\n'), ((10885, 10971), 'torch.Tensor', 'torch.Tensor', (['[36.3419, 26.4458, 12.5597, 12.4088, 8.6819, 0.6808, 2.2951, 0.586]'], {}), '([36.3419, 26.4458, 12.5597, 12.4088, 8.6819, 0.6808, 2.2951, \n 0.586])\n', (10897, 10971), False, 'import torch\n'), ((11694, 11844), 'data.data_utils.DataLoader', 'DataLoader', (['trainset'], {'batch_size': 'params.batch_size', 'shuffle': 'train_shuffle', 'sampler': 'sampler', 'num_workers': 'params.num_workers', 'pin_memory': 'params.cuda'}), '(trainset, batch_size=params.batch_size, shuffle=train_shuffle,\n sampler=sampler, num_workers=params.num_workers, pin_memory=params.cuda)\n', (11704, 11844), False, 'from data.data_utils import fetch_data, FERDataset, DataLoader, transform_train, transform_weak, transform_infer\n'), ((11916, 12037), 'data.data_utils.DataLoader', 'DataLoader', (['valset'], {'batch_size': 'params.batch_size', 'shuffle': '(False)', 'sampler': 'None', 'num_workers': '(2)', 'pin_memory': 'params.cuda'}), '(valset, batch_size=params.batch_size, shuffle=False, sampler=\n None, num_workers=2, pin_memory=params.cuda)\n', (11926, 12037), False, 'from data.data_utils import fetch_data, FERDataset, DataLoader, transform_train, transform_weak, transform_infer\n'), ((12092, 12115), 'logging.info', 'logging.info', (['"""- done."""'], {}), "('- done.')\n", (12104, 12115), False, 'import logging\n'), ((12823, 12912), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': 'params.step_size', 'gamma': 'params.gamma'}), '(optimizer, step_size=params.step_size, gamma=\n params.gamma)\n', (12848, 12912), True, 'import torch.optim as optim\n'), ((4560, 4620), 'os.path.join', 'os.path.join', (['args.model_dir', "(args.restore_file + '.pth.tar')"], {}), "(args.model_dir, args.restore_file + '.pth.tar')\n", (4572, 4620), False, 'import os\n'), ((4716, 4769), 'utils.load_checkpoint', 'utils.load_checkpoint', (['restore_path', 'model', 'optimizer'], {}), '(restore_path, model, optimizer)\n', (4737, 4769), False, 'import utils\n'), ((5184, 5241), 'evaluate.evaluate', 'evaluate', (['model', 'loss_fn', 'val_dataloader', 'metrics', 'params'], {}), '(model, loss_fn, val_dataloader, metrics, params)\n', (5192, 5241), False, 'from evaluate import evaluate\n'), ((6194, 6250), 'os.path.join', 'os.path.join', (['model_dir', '"""metrics_val_last_weights.json"""'], {}), "(model_dir, 'metrics_val_last_weights.json')\n", (6206, 6250), False, 'import os\n'), ((6272, 6324), 'utils.save_dict_to_json', 'utils.save_dict_to_json', (['val_metrics', 'last_json_path'], {}), '(val_metrics, last_json_path)\n', (6295, 6324), False, 'import utils\n'), ((9592, 9618), 'os.path.dirname', 'os.path.dirname', (['json_path'], {}), '(json_path)\n', (9607, 9618), False, 'import os\n'), ((10220, 10247), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(230)'], {}), '(230)\n', (10242, 10247), False, 'import torch\n'), ((10341, 10382), 'os.path.join', 'os.path.join', (['args.model_dir', '"""train.log"""'], {}), "(args.model_dir, 'train.log')\n", (10353, 10382), False, 'import os\n'), ((13410, 13438), 'torch.log', 'torch.log', (['(prior ** (-1 / 2))'], {}), '(prior ** (-1 / 2))\n', (13419, 13438), False, 'import torch\n'), ((5776, 5819), 'logging.info', 'logging.info', (["('- Found new best ' + metrics)"], {}), "('- Found new best ' + metrics)\n", (5788, 5819), False, 'import logging\n'), ((5957, 6013), 'os.path.join', 'os.path.join', (['model_dir', '"""metrics_val_best_weights.json"""'], {}), "(model_dir, 'metrics_val_best_weights.json')\n", (5969, 6013), False, 'import os\n'), ((6043, 6095), 'utils.save_dict_to_json', 'utils.save_dict_to_json', (['val_metrics', 'best_json_path'], {}), '(val_metrics, best_json_path)\n', (6066, 6095), False, 'import utils\n'), ((9656, 9681), 'os.path.isfile', 'os.path.isfile', (['json_path'], {}), '(json_path)\n', (9670, 9681), False, 'import os\n'), ((12313, 12378), 'logging.info', 'logging.info', (["('Using not pretrained model ' + params.net + ' ...')"], {}), "('Using not pretrained model ' + params.net + ' ...')\n", (12325, 12378), False, 'import logging\n'), ((12397, 12510), 'efficientnet_pytorch.EfficientNet.from_name', 'EfficientNet.from_name', (['"""efficientnet-b0"""'], {'in_channels': 'trainset.in_channels', 'num_classes': 'trainset.num_classes'}), "('efficientnet-b0', in_channels=trainset.in_channels,\n num_classes=trainset.num_classes)\n", (12419, 12510), False, 'from efficientnet_pytorch import EfficientNet\n'), ((13450, 13524), 'train_utils.MarginCalibratedCELoss', 'MarginCalibratedCELoss', ([], {'weight': 'weight', 'margin': 'margin', 'label_smoothing': '(0.05)'}), '(weight=weight, margin=margin, label_smoothing=0.05)\n', (13472, 13524), False, 'from train_utils import MarginCalibratedCELoss, ConfusionMatrix\n'), ((1935, 1956), 'torch.autograd.Variable', 'Variable', (['train_batch'], {}), '(train_batch)\n', (1943, 1956), False, 'from torch.autograd import Variable\n'), ((1975, 1997), 'torch.autograd.Variable', 'Variable', (['labels_batch'], {}), '(labels_batch)\n', (1983, 1997), False, 'from torch.autograd import Variable\n'), ((11004, 11016), 'torch.log', 'torch.log', (['p'], {}), '(p)\n', (11013, 11016), False, 'import torch\n')]
|
import numpy as np
import torch
import torch.nn as nn
import os
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
from utils.opt import parse_option
from torch.utils.tensorboard import SummaryWriter
from torchvision.datasets import mnist
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torch.utils.data import DataLoader
from trainval import train
from evaluate import validate
from utils.utils import save_checkpoint, get_optimizer
from model.LeNet import LeNet5
opt = parse_option()
if __name__ == '__main__':
# download and create datasets
train_dataset = mnist.MNIST(
root='./train', download=True, train=True, transform=transforms.Compose([
transforms.Resize((32, 32)), transforms.ToTensor()]))
val_dataset = mnist.MNIST(root='./test', download=True, train=False, transform=transforms.Compose([
transforms.Resize((32, 32)), transforms.ToTensor()]))
# define the data loaders
train_loader = DataLoader(train_dataset, opt.batch_size)
val_loader = DataLoader(val_dataset, opt.batch_size)
model = LeNet5()
print(model)
optimizer = get_optimizer(opt, model)
criterion = nn.CrossEntropyLoss()
best_accuracy = 0
iter = 0
for epoch in range(opt.epoch):
# train for one epoch
iter, loss = train(train_loader, model, criterion,
optimizer, epoch, iter=iter)
# evaluate
loss, accuracy = validate(
val_loader, model, criterion, epoch)
is_best = accuracy < best_accuracy
best_accuracy = min(accuracy, best_accuracy)
# If best_eval, best_save_path
# Save latest/best weights in the model directory
save_checkpoint(
{"state_dict": model,
"epoch": epoch + 1,
"accuracy": accuracy,
"optimizer": optimizer.state_dict(),
}, is_best, opt.SAVE_DIR, 'checkpoint.pth')
print('accuracy: {:.2f}%'.format(100 * accuracy))
final_model_state_file = os.path.join(opt.SAVE_DIR, 'final_state.pth')
print('saving final model state to {}'.format(final_model_state_file))
torch.save(model.state_dict(), final_model_state_file)
print('Done!')
|
[
"evaluate.validate"
] |
[((520, 534), 'utils.opt.parse_option', 'parse_option', ([], {}), '()\n', (532, 534), False, 'from utils.opt import parse_option\n'), ((995, 1036), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset', 'opt.batch_size'], {}), '(train_dataset, opt.batch_size)\n', (1005, 1036), False, 'from torch.utils.data import DataLoader\n'), ((1054, 1093), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset', 'opt.batch_size'], {}), '(val_dataset, opt.batch_size)\n', (1064, 1093), False, 'from torch.utils.data import DataLoader\n'), ((1107, 1115), 'model.LeNet.LeNet5', 'LeNet5', ([], {}), '()\n', (1113, 1115), False, 'from model.LeNet import LeNet5\n'), ((1150, 1175), 'utils.utils.get_optimizer', 'get_optimizer', (['opt', 'model'], {}), '(opt, model)\n', (1163, 1175), False, 'from utils.utils import save_checkpoint, get_optimizer\n'), ((1192, 1213), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1211, 1213), True, 'import torch.nn as nn\n'), ((2074, 2119), 'os.path.join', 'os.path.join', (['opt.SAVE_DIR', '"""final_state.pth"""'], {}), "(opt.SAVE_DIR, 'final_state.pth')\n", (2086, 2119), False, 'import os\n'), ((1341, 1407), 'trainval.train', 'train', (['train_loader', 'model', 'criterion', 'optimizer', 'epoch'], {'iter': 'iter'}), '(train_loader, model, criterion, optimizer, epoch, iter=iter)\n', (1346, 1407), False, 'from trainval import train\n'), ((1473, 1518), 'evaluate.validate', 'validate', (['val_loader', 'model', 'criterion', 'epoch'], {}), '(val_loader, model, criterion, epoch)\n', (1481, 1518), False, 'from evaluate import validate\n'), ((725, 752), 'torchvision.transforms.Resize', 'transforms.Resize', (['(32, 32)'], {}), '((32, 32))\n', (742, 752), True, 'import torchvision.transforms as transforms\n'), ((754, 775), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (773, 775), True, 'import torchvision.transforms as transforms\n'), ((891, 918), 'torchvision.transforms.Resize', 'transforms.Resize', (['(32, 32)'], {}), '((32, 32))\n', (908, 918), True, 'import torchvision.transforms as transforms\n'), ((920, 941), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (939, 941), True, 'import torchvision.transforms as transforms\n')]
|
import argparse
from tools import ParsingCfg
import numpy as np
from tools import InitDataDir
# ###
# import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# ###
if __name__ == '__main__':
parser=argparse.ArgumentParser()
parser.add_argument("cfg_path",help="config file path",type=str)
parser.add_argument("-t","--train",help="training mode",action="store_true")
parser.add_argument("-ce","--cocoevaluation",help="coco evaluation mode",action="store_true")
parser.add_argument("-e","--evaluation",help="evaluation mode",action="store_true")
parser.add_argument("-fe","--fpsevaluation",help="fps evaluation mode",action="store_true")
parser.add_argument("-p","--predict",help="prediction mode",action="store_true")
parser.add_argument("-d","--demo",help="demo mode",action="store_true")
parser.add_argument("-td","--tflitedemo",help="tflite demo mode",action="store_true")
parser.add_argument("-trcd","--trackingdemo",help="tracking demo mode",action="store_true")
parser.add_argument("-cvt","--convert",help="tf to tflite",action="store_true")
parser.add_argument("-s","--server",help="sever mode",action="store_true")
args=parser.parse_args()
mode="train"
cfg_path=args.cfg_path
if(args.train==True):mode="train"
elif(args.cocoevaluation==True):mode="cocoevaluation"
elif(args.evaluation==True):mode="evaluation"
elif(args.fpsevaluation==True):mode="fpsevaluation"
elif(args.predict==True):mode="predict"
elif(args.demo==True):mode="demo"
elif(args.tflitedemo==True):mode="tflitedemo"
elif(args.trackingdemo==True):mode="trackingdemo"
elif(args.convert==True):mode="convert"
elif(args.server==True):mode="server"
cfg_dict=ParsingCfg(cfg_path)
input_shape=list(map(lambda x:int(x),cfg_dict["input_shape"]))
out_hw_list=list(map(lambda x:[int(x[0]),int(x[1])],cfg_dict["out_hw_list"]))
heads_len=len(out_hw_list)
backbone=cfg_dict["backbone"]
fpn_filters=cfg_dict["fpn_filters"]
fpn_repeat=cfg_dict["fpn_repeat"]
l1_anchors=np.array(cfg_dict["l1_anchors"])
l2_anchors=np.array(cfg_dict["l2_anchors"])
l3_anchors=np.array(cfg_dict["l3_anchors"])
l4_anchors=np.array(cfg_dict["l4_anchors"])
l5_anchors=np.array(cfg_dict["l5_anchors"])
anchors_list=[l1_anchors*np.array(out_hw_list[0]),
l2_anchors*np.array(out_hw_list[1]),
l3_anchors*np.array(out_hw_list[2]),
l4_anchors*np.array(out_hw_list[3]),
l5_anchors*np.array(out_hw_list[4])]
anchoors_len=len(l1_anchors)
labels=cfg_dict["labels"]
labels_len=len(labels)
if(mode=="train"):
from create_model import CSLYOLO,CompileCSLYOLO
from data_generator import DataGenerator,MultiDataGenerator
from model_operation import Training
from evaluate import CallbackEvalFunction
from callbacks import Stabilizer,WeightsSaver,BestWeightsSaver
init_weight=cfg_dict.get("init_weight_path",None)
weight_save_path=cfg_dict["weight_save_path"]
best_weight_save_path=cfg_dict["best_weight_save_path"]
freeze=cfg_dict.get("freeze",False)
#Must Contains Jsons
train_dir=cfg_dict["train_dir"]
valid_dir=cfg_dict["valid_dir"]
pred_dir=cfg_dict["pred_dir"]
batch_size=int(cfg_dict["batch_size"])
step_per_epoch=int(cfg_dict["step_per_epoch"])
epochs_schedule=list(map(lambda x:int(x),cfg_dict["epochs_schedule"]))
lr_schedule=cfg_dict["lr_schedule"]
callbacks_schedule=cfg_dict["callbacks_schedule"]
gen=MultiDataGenerator(train_dir,train_dir+"/json",input_shape[:2],out_hw_list,anchors_list,labels,batch_size=batch_size,print_bool=False)
gen.Start()
stabilizer=Stabilizer()
weight_saver=WeightsSaver(weight_save_path)
best_weight_saver=BestWeightsSaver(best_weight_save_path,CallbackEvalFunction,eval_parms=[labels,valid_dir,pred_dir])
model=CSLYOLO(input_shape,anchors_list,labels_len,fpn_filters,fpn_repeat,backbone,freeze)
model.summary()
model=CompileCSLYOLO(model,heads_len,whts_path=init_weight,lr=0.1)
for i,epochs in enumerate(epochs_schedule):
callbacks=[]
lr=lr_schedule[i]
for callback_name in callbacks_schedule[i]:
if(callback_name=="stabilizer"):callbacks.append(stabilizer)
if(callback_name=="weight_saver"):callbacks.append(weight_saver)
if(callback_name=="best_weight_saver"):callbacks.append(best_weight_saver)
model=CompileCSLYOLO(model,heads_len,whts_path=None,lr=lr,compile_type="train")
Training(model,gen.Generator(),batch_size=batch_size,epochs=epochs,step_per_epoch=step_per_epoch,callbacks=callbacks)
gen.Stop()
elif(mode=="predict"):
from create_model import CSLYOLO,CompileCSLYOLO,CSLYOLOHead
from model_operation import PredictingImgs
imgs_dir=cfg_dict["imgs_dir"]
pred_dir=cfg_dict["pred_dir"]
InitDataDir(pred_dir)
weight_path=cfg_dict["weight_path"]
max_boxes_per_cls=int(cfg_dict["max_boxes_per_cls"])
score_thres=cfg_dict["score_thres"]
iou_thres=cfg_dict["iou_thres"]
nms_type=cfg_dict["nms_type"]
drawing=cfg_dict["drawing"]
model=CSLYOLO(input_shape,anchors_list,labels_len,fpn_filters,fpn_repeat,backbone)
model.summary()
model=CompileCSLYOLO(model,heads_len,whts_path=weight_path,compile_type="predict")
model=CSLYOLOHead(model,heads_len,labels_len,max_boxes_per_cls=max_boxes_per_cls,score_thres=score_thres,iou_thres=iou_thres,nms_type=nms_type)
PredictingImgs(model,labels,imgs_dir,pred_dir,drawing=drawing,printing=True)
elif(mode=="cocoevaluation"):
from create_model import CSLYOLO,CompileCSLYOLO,CSLYOLOHead
from model_operation import PredictingImgs
from evaluate.cocoeval import COCOEval
imgs_dir=cfg_dict["imgs_dir"]
pred_dir=cfg_dict["pred_dir"]
annotation_path=cfg_dict["annotation_path"]
label2id_path=cfg_dict["label2id_path"]
weight_path=cfg_dict["weight_path"]
max_boxes_per_cls=int(cfg_dict["max_boxes_per_cls"])
score_thres=cfg_dict["score_thres"]
iou_thres=cfg_dict["iou_thres"]
nms_type=cfg_dict["nms_type"]
overwrite=cfg_dict["overwrite"]
if(overwrite==True):
InitDataDir(pred_dir)
model=CSLYOLO(input_shape,anchors_list,labels_len,fpn_filters,fpn_repeat,backbone)
model.summary()
model=CompileCSLYOLO(model,heads_len,whts_path=weight_path,compile_type="predict")
model=CSLYOLOHead(model,heads_len,labels_len,max_boxes_per_cls=max_boxes_per_cls,score_thres=score_thres,iou_thres=iou_thres,nms_type=nms_type)
PredictingImgs(model,labels,imgs_dir,pred_dir,drawing=False,printing=True)
COCOEval(annotation_path,pred_dir+"/json",label2id_path)
elif(mode=="evaluation"):
from create_model import CSLYOLO,CompileCSLYOLO,CSLYOLOHead
from model_operation import PredictingImgs
from evaluate import Evaluation
imgs_dir=cfg_dict["imgs_dir"]
pred_dir=cfg_dict["pred_dir"]
test_dir=cfg_dict["test_dir"]
weight_path=cfg_dict["weight_path"]
max_boxes_per_cls=int(cfg_dict["max_boxes_per_cls"])
score_thres=cfg_dict["score_thres"]
iou_thres=cfg_dict["iou_thres"]
nms_type=cfg_dict["nms_type"]
overwrite=cfg_dict["overwrite"]
if(overwrite==True):
InitDataDir(pred_dir)
model=CSLYOLO(input_shape,anchors_list,labels_len,fpn_filters,fpn_repeat,backbone)
model.summary()
model=CompileCSLYOLO(model,heads_len,whts_path=weight_path,compile_type="predict")
model=CSLYOLOHead(model,heads_len,labels_len,max_boxes_per_cls=max_boxes_per_cls,score_thres=score_thres,iou_thres=iou_thres,nms_type=nms_type)
PredictingImgs(model,labels,imgs_dir,pred_dir,drawing=False,printing=True)
mean_ap=Evaluation(labels,test_dir,pred_dir)
print("mAP: "+str(mean_ap))
elif(mode=="fpsevaluation"):
from create_model import CSLYOLO,CompileCSLYOLO,CSLYOLOHead
from evaluate import FramePerSecond
weight_path=cfg_dict["weight_path"]
max_boxes_per_cls=int(cfg_dict["max_boxes_per_cls"])
score_thres=cfg_dict["score_thres"]
iou_thres=cfg_dict["iou_thres"]
nms_type=cfg_dict["nms_type"]
model=CSLYOLO(input_shape,anchors_list,labels_len,fpn_filters,fpn_repeat,backbone)
model=CompileCSLYOLO(model,heads_len,whts_path=weight_path,compile_type="predict")
model=CSLYOLOHead(model,heads_len,labels_len,max_boxes_per_cls=max_boxes_per_cls,score_thres=score_thres,iou_thres=iou_thres,nms_type=nms_type)
fps=FramePerSecond(model,input_shape)
print("FPS: "+str(fps))
elif(mode=="demo"):
import cv2
from tools import InitLabels2bgrDict,Drawing
from create_model import CSLYOLO,CompileCSLYOLO,CSLYOLOHead
from model_operation import Predicting
from camera_stream import CameraStream
import datetime
import os
weight_path=cfg_dict["weight_path"]
videos_idx=int(cfg_dict["videos_idx"])
max_boxes_per_cls=int(cfg_dict["max_boxes_per_cls"])
score_thres=cfg_dict["score_thres"]
iou_thres=cfg_dict["iou_thres"]
InitLabels2bgrDict(labels)
model=CSLYOLO(input_shape,anchors_list,labels_len,fpn_filters,fpn_repeat,backbone)
model.summary()
model=CompileCSLYOLO(model,heads_len,whts_path=weight_path,compile_type="predict")
model=CSLYOLOHead(model,heads_len,labels_len,max_boxes_per_cls=max_boxes_per_cls,score_thres=score_thres,iou_thres=iou_thres,nms_type="nms")
camera_stream=CameraStream(videos_idx,show=True,save_dir="dataset/tracking")
camera_stream.Start()
while(camera_stream.StopChecking()==False):
frame=camera_stream.GetFrame()
pred_bboxes=Predicting(model,labels,frame)
camera_stream.UpdateBboxes(pred_bboxes)
camera_stream.Stop()
elif(mode=="tflitedemo"):
import cv2
from tools import InitLabels2bgrDict,Drawing
from model_operation import Predicting
from tflite.tflite_cslyolo import TFLiteCSLYOLOBody,TFLiteCSLYOLOHead,TFLiteCSLYOLOPredicting
import datetime
tflite_path=cfg_dict["tflite_path"]
videos_idx=int(cfg_dict["videos_idx"])
max_boxes_per_cls=int(cfg_dict["max_boxes_per_cls"])
score_thres=cfg_dict["score_thres"]
iou_thres=cfg_dict["iou_thres"]
InitLabels2bgrDict(labels)
interpreter=TFLiteCSLYOLOBody(tflite_path)
tfl_head_model=TFLiteCSLYOLOHead(input_shape[:2],out_hw_list,anchoors_len,labels_len,
max_boxes_per_cls=max_boxes_per_cls,score_thres=score_thres,iou_thres=iou_thres)
cap=cv2.VideoCapture(videos_idx)
if not cap.isOpened():
print("Cannot open camera.")
exit()
start_time=None
frame_count=0
while(True):
if(start_time==None):start_time=datetime.datetime.now()
frame_count+=1
ret,frame=cap.read()
pred_bboxes=TFLiteCSLYOLOPredicting(interpreter,tfl_head_model,labels,frame)
frame=Drawing(frame,pred_bboxes)
cv2.imshow('TFLDEMO',frame)
if cv2.waitKey(1) == ord('q'):
end_time=datetime.datetime.now()
print("FPS: "+str(frame_count/(end_time-start_time).seconds))
break
cap.release()
cv2.destroyAllWindows()
elif(mode=="trackingdemo"):
import cv2
from tools import InitLabels2bgrDict,Drawing
from create_model import CSLYOLO,CompileCSLYOLO,CSLYOLOHead
from model_operation import Predicting
p2_l1_anchors=np.array(cfg_dict["p2_l1_anchors"])
p2_l2_anchors=np.array(cfg_dict["p2_l2_anchors"])
p2_l3_anchors=np.array(cfg_dict["p2_l3_anchors"])
p2_l4_anchors=np.array(cfg_dict["p2_l4_anchors"])
p2_l5_anchors=np.array(cfg_dict["p2_l5_anchors"])
p2_anchors_list=[p2_l1_anchors*np.array(out_hw_list[0]),
p2_l2_anchors*np.array(out_hw_list[1]),
p2_l3_anchors*np.array(out_hw_list[2]),
p2_l4_anchors*np.array(out_hw_list[3]),
p2_l5_anchors*np.array(out_hw_list[4])]
p2_labels=cfg_dict["p2_labels"]
weight_path=cfg_dict["weight_path"]
p2_weight_path=cfg_dict["p2_weight_path"]
videos_idx=int(cfg_dict["videos_idx"])
max_boxes_per_cls=int(cfg_dict["max_boxes_per_cls"])
score_thres=cfg_dict["score_thres"]
iou_thres=cfg_dict["iou_thres"]
InitLabels2bgrDict(labels+p2_labels)
model=CSLYOLO(input_shape,anchors_list,labels_len,fpn_filters,fpn_repeat,backbone)
model.summary()
model=CompileCSLYOLO(model,heads_len,whts_path=weight_path,compile_type="predict")
model=CSLYOLOHead(model,heads_len,labels_len,max_boxes_per_cls=max_boxes_per_cls,score_thres=score_thres,iou_thres=iou_thres,nms_type="nms")
p2_model=CSLYOLO(input_shape,p2_anchors_list,labels_len,fpn_filters,fpn_repeat,backbone)
p2_model.summary()
p2_model=CompileCSLYOLO(p2_model,heads_len,whts_path=p2_weight_path,compile_type="predict")
p2_model=CSLYOLOHead(p2_model,heads_len,labels_len,max_boxes_per_cls=max_boxes_per_cls,score_thres=score_thres,iou_thres=iou_thres,nms_type="nms")
cap=cv2.VideoCapture(videos_idx)
if not cap.isOpened():
print("Cannot open camera.")
exit()
while(True):
ret,frame=cap.read()
pred_bboxes=Predicting(model,labels,frame)
p2_pred_bboxes=Predicting(p2_model,p2_labels,frame)
frame=Drawing(frame,pred_bboxes)
frame=Drawing(frame,p2_pred_bboxes)
cv2.imshow('DEMO',frame)
if(cv2.waitKey(1)==ord('q')):
break
cap.release()
cv2.destroyAllWindows()
elif(mode=="convert"):
from create_model import CSLYOLO,CompileCSLYOLO,CSLYOLOHead
from tflite.converter import TFLiteConverter
weight_path=cfg_dict["weight_path"]
tflite_path=cfg_dict["tflite_path"]
model=CSLYOLO(input_shape,anchors_list,labels_len,fpn_filters,fpn_repeat,backbone)
model.summary()
model=CompileCSLYOLO(model,heads_len,whts_path=weight_path,compile_type="predict")
TFLiteConverter(model,tflite_path)
# elif(mode=="server"):
# from create_model import CSLNet,CompileCSLNet,CSLNetHead
# from model_operation import PredictingImgs
# from server import CSLYServer
# import time
# weight_path=cfg_dict["weight_path"]
# max_boxes_per_cls=int(cfg_dict["max_boxes_per_cls"])
# score_thres=cfg_dict["score_thres"]
# iou_thres=cfg_dict["iou_thres"]
# model=CSLNet(input_shape,anchors_list,labels_len,fpn_filters,fpn_repeat,backbone)
# model.summary()
# model=CompileCSLNet(model,heads_len,whts_path=weight_path)
# model=CSLNetHead(model,heads_len,labels_len,max_boxes_per_cls=max_boxes_per_cls,score_thres=score_thres,iou_thres=iou_thres)
# csly_server=CSLYServer(model,labels,host="172.16.58.3")
# csly_server.Start()
# print("Initializing Server....Done.")
# time.sleep(999999)
# csly_server.Stop()
|
[
"evaluate.cocoeval.COCOEval",
"evaluate.Evaluation",
"evaluate.FramePerSecond"
] |
[((214, 239), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (237, 239), False, 'import argparse\n'), ((1771, 1791), 'tools.ParsingCfg', 'ParsingCfg', (['cfg_path'], {}), '(cfg_path)\n', (1781, 1791), False, 'from tools import ParsingCfg\n'), ((2112, 2144), 'numpy.array', 'np.array', (["cfg_dict['l1_anchors']"], {}), "(cfg_dict['l1_anchors'])\n", (2120, 2144), True, 'import numpy as np\n'), ((2161, 2193), 'numpy.array', 'np.array', (["cfg_dict['l2_anchors']"], {}), "(cfg_dict['l2_anchors'])\n", (2169, 2193), True, 'import numpy as np\n'), ((2210, 2242), 'numpy.array', 'np.array', (["cfg_dict['l3_anchors']"], {}), "(cfg_dict['l3_anchors'])\n", (2218, 2242), True, 'import numpy as np\n'), ((2259, 2291), 'numpy.array', 'np.array', (["cfg_dict['l4_anchors']"], {}), "(cfg_dict['l4_anchors'])\n", (2267, 2291), True, 'import numpy as np\n'), ((2308, 2340), 'numpy.array', 'np.array', (["cfg_dict['l5_anchors']"], {}), "(cfg_dict['l5_anchors'])\n", (2316, 2340), True, 'import numpy as np\n'), ((3723, 3870), 'data_generator.MultiDataGenerator', 'MultiDataGenerator', (['train_dir', "(train_dir + '/json')", 'input_shape[:2]', 'out_hw_list', 'anchors_list', 'labels'], {'batch_size': 'batch_size', 'print_bool': '(False)'}), "(train_dir, train_dir + '/json', input_shape[:2],\n out_hw_list, anchors_list, labels, batch_size=batch_size, print_bool=False)\n", (3741, 3870), False, 'from data_generator import DataGenerator, MultiDataGenerator\n'), ((3901, 3913), 'callbacks.Stabilizer', 'Stabilizer', ([], {}), '()\n', (3911, 3913), False, 'from callbacks import Stabilizer, WeightsSaver, BestWeightsSaver\n'), ((3936, 3966), 'callbacks.WeightsSaver', 'WeightsSaver', (['weight_save_path'], {}), '(weight_save_path)\n', (3948, 3966), False, 'from callbacks import Stabilizer, WeightsSaver, BestWeightsSaver\n'), ((3994, 4102), 'callbacks.BestWeightsSaver', 'BestWeightsSaver', (['best_weight_save_path', 'CallbackEvalFunction'], {'eval_parms': '[labels, valid_dir, pred_dir]'}), '(best_weight_save_path, CallbackEvalFunction, eval_parms=[\n labels, valid_dir, pred_dir])\n', (4010, 4102), False, 'from callbacks import Stabilizer, WeightsSaver, BestWeightsSaver\n'), ((4111, 4204), 'create_model.CSLYOLO', 'CSLYOLO', (['input_shape', 'anchors_list', 'labels_len', 'fpn_filters', 'fpn_repeat', 'backbone', 'freeze'], {}), '(input_shape, anchors_list, labels_len, fpn_filters, fpn_repeat,\n backbone, freeze)\n', (4118, 4204), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((4235, 4298), 'create_model.CompileCSLYOLO', 'CompileCSLYOLO', (['model', 'heads_len'], {'whts_path': 'init_weight', 'lr': '(0.1)'}), '(model, heads_len, whts_path=init_weight, lr=0.1)\n', (4249, 4298), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((2373, 2397), 'numpy.array', 'np.array', (['out_hw_list[0]'], {}), '(out_hw_list[0])\n', (2381, 2397), True, 'import numpy as np\n'), ((2429, 2453), 'numpy.array', 'np.array', (['out_hw_list[1]'], {}), '(out_hw_list[1])\n', (2437, 2453), True, 'import numpy as np\n'), ((2485, 2509), 'numpy.array', 'np.array', (['out_hw_list[2]'], {}), '(out_hw_list[2])\n', (2493, 2509), True, 'import numpy as np\n'), ((2541, 2565), 'numpy.array', 'np.array', (['out_hw_list[3]'], {}), '(out_hw_list[3])\n', (2549, 2565), True, 'import numpy as np\n'), ((2597, 2621), 'numpy.array', 'np.array', (['out_hw_list[4]'], {}), '(out_hw_list[4])\n', (2605, 2621), True, 'import numpy as np\n'), ((4734, 4811), 'create_model.CompileCSLYOLO', 'CompileCSLYOLO', (['model', 'heads_len'], {'whts_path': 'None', 'lr': 'lr', 'compile_type': '"""train"""'}), "(model, heads_len, whts_path=None, lr=lr, compile_type='train')\n", (4748, 4811), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((5205, 5226), 'tools.InitDataDir', 'InitDataDir', (['pred_dir'], {}), '(pred_dir)\n', (5216, 5226), False, 'from tools import InitDataDir\n'), ((5511, 5596), 'create_model.CSLYOLO', 'CSLYOLO', (['input_shape', 'anchors_list', 'labels_len', 'fpn_filters', 'fpn_repeat', 'backbone'], {}), '(input_shape, anchors_list, labels_len, fpn_filters, fpn_repeat,\n backbone)\n', (5518, 5596), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((5628, 5707), 'create_model.CompileCSLYOLO', 'CompileCSLYOLO', (['model', 'heads_len'], {'whts_path': 'weight_path', 'compile_type': '"""predict"""'}), "(model, heads_len, whts_path=weight_path, compile_type='predict')\n", (5642, 5707), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((5720, 5872), 'create_model.CSLYOLOHead', 'CSLYOLOHead', (['model', 'heads_len', 'labels_len'], {'max_boxes_per_cls': 'max_boxes_per_cls', 'score_thres': 'score_thres', 'iou_thres': 'iou_thres', 'nms_type': 'nms_type'}), '(model, heads_len, labels_len, max_boxes_per_cls=\n max_boxes_per_cls, score_thres=score_thres, iou_thres=iou_thres,\n nms_type=nms_type)\n', (5731, 5872), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((5867, 5953), 'model_operation.PredictingImgs', 'PredictingImgs', (['model', 'labels', 'imgs_dir', 'pred_dir'], {'drawing': 'drawing', 'printing': '(True)'}), '(model, labels, imgs_dir, pred_dir, drawing=drawing, printing\n =True)\n', (5881, 5953), False, 'from model_operation import PredictingImgs\n'), ((7141, 7201), 'evaluate.cocoeval.COCOEval', 'COCOEval', (['annotation_path', "(pred_dir + '/json')", 'label2id_path'], {}), "(annotation_path, pred_dir + '/json', label2id_path)\n", (7149, 7201), False, 'from evaluate.cocoeval import COCOEval\n'), ((6644, 6665), 'tools.InitDataDir', 'InitDataDir', (['pred_dir'], {}), '(pred_dir)\n', (6655, 6665), False, 'from tools import InitDataDir\n'), ((6685, 6770), 'create_model.CSLYOLO', 'CSLYOLO', (['input_shape', 'anchors_list', 'labels_len', 'fpn_filters', 'fpn_repeat', 'backbone'], {}), '(input_shape, anchors_list, labels_len, fpn_filters, fpn_repeat,\n backbone)\n', (6692, 6770), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((6810, 6889), 'create_model.CompileCSLYOLO', 'CompileCSLYOLO', (['model', 'heads_len'], {'whts_path': 'weight_path', 'compile_type': '"""predict"""'}), "(model, heads_len, whts_path=weight_path, compile_type='predict')\n", (6824, 6889), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((6906, 7058), 'create_model.CSLYOLOHead', 'CSLYOLOHead', (['model', 'heads_len', 'labels_len'], {'max_boxes_per_cls': 'max_boxes_per_cls', 'score_thres': 'score_thres', 'iou_thres': 'iou_thres', 'nms_type': 'nms_type'}), '(model, heads_len, labels_len, max_boxes_per_cls=\n max_boxes_per_cls, score_thres=score_thres, iou_thres=iou_thres,\n nms_type=nms_type)\n', (6917, 7058), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((7057, 7136), 'model_operation.PredictingImgs', 'PredictingImgs', (['model', 'labels', 'imgs_dir', 'pred_dir'], {'drawing': '(False)', 'printing': '(True)'}), '(model, labels, imgs_dir, pred_dir, drawing=False, printing=True)\n', (7071, 7136), False, 'from model_operation import PredictingImgs\n'), ((8329, 8367), 'evaluate.Evaluation', 'Evaluation', (['labels', 'test_dir', 'pred_dir'], {}), '(labels, test_dir, pred_dir)\n', (8339, 8367), False, 'from evaluate import Evaluation\n'), ((7824, 7845), 'tools.InitDataDir', 'InitDataDir', (['pred_dir'], {}), '(pred_dir)\n', (7835, 7845), False, 'from tools import InitDataDir\n'), ((7865, 7950), 'create_model.CSLYOLO', 'CSLYOLO', (['input_shape', 'anchors_list', 'labels_len', 'fpn_filters', 'fpn_repeat', 'backbone'], {}), '(input_shape, anchors_list, labels_len, fpn_filters, fpn_repeat,\n backbone)\n', (7872, 7950), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((7990, 8069), 'create_model.CompileCSLYOLO', 'CompileCSLYOLO', (['model', 'heads_len'], {'whts_path': 'weight_path', 'compile_type': '"""predict"""'}), "(model, heads_len, whts_path=weight_path, compile_type='predict')\n", (8004, 8069), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((8086, 8238), 'create_model.CSLYOLOHead', 'CSLYOLOHead', (['model', 'heads_len', 'labels_len'], {'max_boxes_per_cls': 'max_boxes_per_cls', 'score_thres': 'score_thres', 'iou_thres': 'iou_thres', 'nms_type': 'nms_type'}), '(model, heads_len, labels_len, max_boxes_per_cls=\n max_boxes_per_cls, score_thres=score_thres, iou_thres=iou_thres,\n nms_type=nms_type)\n', (8097, 8238), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((8237, 8316), 'model_operation.PredictingImgs', 'PredictingImgs', (['model', 'labels', 'imgs_dir', 'pred_dir'], {'drawing': '(False)', 'printing': '(True)'}), '(model, labels, imgs_dir, pred_dir, drawing=False, printing=True)\n', (8251, 8316), False, 'from model_operation import PredictingImgs\n'), ((8798, 8883), 'create_model.CSLYOLO', 'CSLYOLO', (['input_shape', 'anchors_list', 'labels_len', 'fpn_filters', 'fpn_repeat', 'backbone'], {}), '(input_shape, anchors_list, labels_len, fpn_filters, fpn_repeat,\n backbone)\n', (8805, 8883), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((8890, 8969), 'create_model.CompileCSLYOLO', 'CompileCSLYOLO', (['model', 'heads_len'], {'whts_path': 'weight_path', 'compile_type': '"""predict"""'}), "(model, heads_len, whts_path=weight_path, compile_type='predict')\n", (8904, 8969), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((8982, 9134), 'create_model.CSLYOLOHead', 'CSLYOLOHead', (['model', 'heads_len', 'labels_len'], {'max_boxes_per_cls': 'max_boxes_per_cls', 'score_thres': 'score_thres', 'iou_thres': 'iou_thres', 'nms_type': 'nms_type'}), '(model, heads_len, labels_len, max_boxes_per_cls=\n max_boxes_per_cls, score_thres=score_thres, iou_thres=iou_thres,\n nms_type=nms_type)\n', (8993, 9134), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((9133, 9167), 'evaluate.FramePerSecond', 'FramePerSecond', (['model', 'input_shape'], {}), '(model, input_shape)\n', (9147, 9167), False, 'from evaluate import FramePerSecond\n'), ((9770, 9796), 'tools.InitLabels2bgrDict', 'InitLabels2bgrDict', (['labels'], {}), '(labels)\n', (9788, 9796), False, 'from tools import InitLabels2bgrDict, Drawing\n'), ((9814, 9899), 'create_model.CSLYOLO', 'CSLYOLO', (['input_shape', 'anchors_list', 'labels_len', 'fpn_filters', 'fpn_repeat', 'backbone'], {}), '(input_shape, anchors_list, labels_len, fpn_filters, fpn_repeat,\n backbone)\n', (9821, 9899), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((9931, 10010), 'create_model.CompileCSLYOLO', 'CompileCSLYOLO', (['model', 'heads_len'], {'whts_path': 'weight_path', 'compile_type': '"""predict"""'}), "(model, heads_len, whts_path=weight_path, compile_type='predict')\n", (9945, 10010), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((10023, 10172), 'create_model.CSLYOLOHead', 'CSLYOLOHead', (['model', 'heads_len', 'labels_len'], {'max_boxes_per_cls': 'max_boxes_per_cls', 'score_thres': 'score_thres', 'iou_thres': 'iou_thres', 'nms_type': '"""nms"""'}), "(model, heads_len, labels_len, max_boxes_per_cls=\n max_boxes_per_cls, score_thres=score_thres, iou_thres=iou_thres,\n nms_type='nms')\n", (10034, 10172), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((10183, 10247), 'camera_stream.CameraStream', 'CameraStream', (['videos_idx'], {'show': '(True)', 'save_dir': '"""dataset/tracking"""'}), "(videos_idx, show=True, save_dir='dataset/tracking')\n", (10195, 10247), False, 'from camera_stream import CameraStream\n'), ((10399, 10431), 'model_operation.Predicting', 'Predicting', (['model', 'labels', 'frame'], {}), '(model, labels, frame)\n', (10409, 10431), False, 'from model_operation import Predicting\n'), ((11074, 11100), 'tools.InitLabels2bgrDict', 'InitLabels2bgrDict', (['labels'], {}), '(labels)\n', (11092, 11100), False, 'from tools import InitLabels2bgrDict, Drawing\n'), ((11122, 11152), 'tflite.tflite_cslyolo.TFLiteCSLYOLOBody', 'TFLiteCSLYOLOBody', (['tflite_path'], {}), '(tflite_path)\n', (11139, 11152), False, 'from tflite.tflite_cslyolo import TFLiteCSLYOLOBody, TFLiteCSLYOLOHead, TFLiteCSLYOLOPredicting\n'), ((11177, 11342), 'tflite.tflite_cslyolo.TFLiteCSLYOLOHead', 'TFLiteCSLYOLOHead', (['input_shape[:2]', 'out_hw_list', 'anchoors_len', 'labels_len'], {'max_boxes_per_cls': 'max_boxes_per_cls', 'score_thres': 'score_thres', 'iou_thres': 'iou_thres'}), '(input_shape[:2], out_hw_list, anchoors_len, labels_len,\n max_boxes_per_cls=max_boxes_per_cls, score_thres=score_thres, iou_thres\n =iou_thres)\n', (11194, 11342), False, 'from tflite.tflite_cslyolo import TFLiteCSLYOLOBody, TFLiteCSLYOLOHead, TFLiteCSLYOLOPredicting\n'), ((11386, 11414), 'cv2.VideoCapture', 'cv2.VideoCapture', (['videos_idx'], {}), '(videos_idx)\n', (11402, 11414), False, 'import cv2\n'), ((12117, 12140), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (12138, 12140), False, 'import cv2\n'), ((11737, 11804), 'tflite.tflite_cslyolo.TFLiteCSLYOLOPredicting', 'TFLiteCSLYOLOPredicting', (['interpreter', 'tfl_head_model', 'labels', 'frame'], {}), '(interpreter, tfl_head_model, labels, frame)\n', (11760, 11804), False, 'from tflite.tflite_cslyolo import TFLiteCSLYOLOBody, TFLiteCSLYOLOHead, TFLiteCSLYOLOPredicting\n'), ((11821, 11848), 'tools.Drawing', 'Drawing', (['frame', 'pred_bboxes'], {}), '(frame, pred_bboxes)\n', (11828, 11848), False, 'from tools import InitLabels2bgrDict, Drawing\n'), ((11861, 11889), 'cv2.imshow', 'cv2.imshow', (['"""TFLDEMO"""', 'frame'], {}), "('TFLDEMO', frame)\n", (11871, 11889), False, 'import cv2\n'), ((12398, 12433), 'numpy.array', 'np.array', (["cfg_dict['p2_l1_anchors']"], {}), "(cfg_dict['p2_l1_anchors'])\n", (12406, 12433), True, 'import numpy as np\n'), ((12457, 12492), 'numpy.array', 'np.array', (["cfg_dict['p2_l2_anchors']"], {}), "(cfg_dict['p2_l2_anchors'])\n", (12465, 12492), True, 'import numpy as np\n'), ((12516, 12551), 'numpy.array', 'np.array', (["cfg_dict['p2_l3_anchors']"], {}), "(cfg_dict['p2_l3_anchors'])\n", (12524, 12551), True, 'import numpy as np\n'), ((12575, 12610), 'numpy.array', 'np.array', (["cfg_dict['p2_l4_anchors']"], {}), "(cfg_dict['p2_l4_anchors'])\n", (12583, 12610), True, 'import numpy as np\n'), ((12634, 12669), 'numpy.array', 'np.array', (["cfg_dict['p2_l5_anchors']"], {}), "(cfg_dict['p2_l5_anchors'])\n", (12642, 12669), True, 'import numpy as np\n'), ((13348, 13386), 'tools.InitLabels2bgrDict', 'InitLabels2bgrDict', (['(labels + p2_labels)'], {}), '(labels + p2_labels)\n', (13366, 13386), False, 'from tools import InitLabels2bgrDict, Drawing\n'), ((13402, 13487), 'create_model.CSLYOLO', 'CSLYOLO', (['input_shape', 'anchors_list', 'labels_len', 'fpn_filters', 'fpn_repeat', 'backbone'], {}), '(input_shape, anchors_list, labels_len, fpn_filters, fpn_repeat,\n backbone)\n', (13409, 13487), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((13519, 13598), 'create_model.CompileCSLYOLO', 'CompileCSLYOLO', (['model', 'heads_len'], {'whts_path': 'weight_path', 'compile_type': '"""predict"""'}), "(model, heads_len, whts_path=weight_path, compile_type='predict')\n", (13533, 13598), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((13611, 13760), 'create_model.CSLYOLOHead', 'CSLYOLOHead', (['model', 'heads_len', 'labels_len'], {'max_boxes_per_cls': 'max_boxes_per_cls', 'score_thres': 'score_thres', 'iou_thres': 'iou_thres', 'nms_type': '"""nms"""'}), "(model, heads_len, labels_len, max_boxes_per_cls=\n max_boxes_per_cls, score_thres=score_thres, iou_thres=iou_thres,\n nms_type='nms')\n", (13622, 13760), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((13766, 13854), 'create_model.CSLYOLO', 'CSLYOLO', (['input_shape', 'p2_anchors_list', 'labels_len', 'fpn_filters', 'fpn_repeat', 'backbone'], {}), '(input_shape, p2_anchors_list, labels_len, fpn_filters, fpn_repeat,\n backbone)\n', (13773, 13854), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((13892, 13982), 'create_model.CompileCSLYOLO', 'CompileCSLYOLO', (['p2_model', 'heads_len'], {'whts_path': 'p2_weight_path', 'compile_type': '"""predict"""'}), "(p2_model, heads_len, whts_path=p2_weight_path, compile_type=\n 'predict')\n", (13906, 13982), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((13993, 14145), 'create_model.CSLYOLOHead', 'CSLYOLOHead', (['p2_model', 'heads_len', 'labels_len'], {'max_boxes_per_cls': 'max_boxes_per_cls', 'score_thres': 'score_thres', 'iou_thres': 'iou_thres', 'nms_type': '"""nms"""'}), "(p2_model, heads_len, labels_len, max_boxes_per_cls=\n max_boxes_per_cls, score_thres=score_thres, iou_thres=iou_thres,\n nms_type='nms')\n", (14004, 14145), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((14146, 14174), 'cv2.VideoCapture', 'cv2.VideoCapture', (['videos_idx'], {}), '(videos_idx)\n', (14162, 14174), False, 'import cv2\n'), ((14677, 14700), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (14698, 14700), False, 'import cv2\n'), ((11626, 11649), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11647, 11649), False, 'import datetime\n'), ((11905, 11919), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (11916, 11919), False, 'import cv2\n'), ((11959, 11982), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11980, 11982), False, 'import datetime\n'), ((14350, 14382), 'model_operation.Predicting', 'Predicting', (['model', 'labels', 'frame'], {}), '(model, labels, frame)\n', (14360, 14382), False, 'from model_operation import Predicting\n'), ((14409, 14447), 'model_operation.Predicting', 'Predicting', (['p2_model', 'p2_labels', 'frame'], {}), '(p2_model, p2_labels, frame)\n', (14419, 14447), False, 'from model_operation import Predicting\n'), ((14465, 14492), 'tools.Drawing', 'Drawing', (['frame', 'pred_bboxes'], {}), '(frame, pred_bboxes)\n', (14472, 14492), False, 'from tools import InitLabels2bgrDict, Drawing\n'), ((14511, 14541), 'tools.Drawing', 'Drawing', (['frame', 'p2_pred_bboxes'], {}), '(frame, p2_pred_bboxes)\n', (14518, 14541), False, 'from tools import InitLabels2bgrDict, Drawing\n'), ((14554, 14579), 'cv2.imshow', 'cv2.imshow', (['"""DEMO"""', 'frame'], {}), "('DEMO', frame)\n", (14564, 14579), False, 'import cv2\n'), ((14961, 15046), 'create_model.CSLYOLO', 'CSLYOLO', (['input_shape', 'anchors_list', 'labels_len', 'fpn_filters', 'fpn_repeat', 'backbone'], {}), '(input_shape, anchors_list, labels_len, fpn_filters, fpn_repeat,\n backbone)\n', (14968, 15046), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((15078, 15157), 'create_model.CompileCSLYOLO', 'CompileCSLYOLO', (['model', 'heads_len'], {'whts_path': 'weight_path', 'compile_type': '"""predict"""'}), "(model, heads_len, whts_path=weight_path, compile_type='predict')\n", (15092, 15157), False, 'from create_model import CSLYOLO, CompileCSLYOLO, CSLYOLOHead\n'), ((15164, 15199), 'tflite.converter.TFLiteConverter', 'TFLiteConverter', (['model', 'tflite_path'], {}), '(model, tflite_path)\n', (15179, 15199), False, 'from tflite.converter import TFLiteConverter\n'), ((12712, 12736), 'numpy.array', 'np.array', (['out_hw_list[0]'], {}), '(out_hw_list[0])\n', (12720, 12736), True, 'import numpy as np\n'), ((12778, 12802), 'numpy.array', 'np.array', (['out_hw_list[1]'], {}), '(out_hw_list[1])\n', (12786, 12802), True, 'import numpy as np\n'), ((12844, 12868), 'numpy.array', 'np.array', (['out_hw_list[2]'], {}), '(out_hw_list[2])\n', (12852, 12868), True, 'import numpy as np\n'), ((12910, 12934), 'numpy.array', 'np.array', (['out_hw_list[3]'], {}), '(out_hw_list[3])\n', (12918, 12934), True, 'import numpy as np\n'), ((12976, 13000), 'numpy.array', 'np.array', (['out_hw_list[4]'], {}), '(out_hw_list[4])\n', (12984, 13000), True, 'import numpy as np\n'), ((14595, 14609), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (14606, 14609), False, 'import cv2\n')]
|
from training import train
from testing import test
from evaluate import evaluate
def random_forest(path_train,
path_test,
name_identifiers,
name_targets,
features,
delimiter,
num_cores=1):
'''
this method performs the training,testing and evaluation of the
random forest algorithm.
@type path_train: str
@param path_train: full path to csv (first line contains headers).
delimiter should be specified with param delimiter
@type path_test: str
@param path_test: full path to csv (first line contains headers).
delimiter should be specified with param delimiter
@type name_identifiers: str
@param name_identifiers: name of column containing identifiers
@type name_targets: str
@param name_targets: name of column containing targets
@type features: str
@param features: list of features to be used
@type delimiter: str
@param delimiter: delimiter used in csv. tested with "," and "\t"
@type num_cores: int
@param num_cores: [optional]: num of cores you want to use in training
@rtype: tuple
@return: (output_classifier,evaluation). both are dicts. output_classifier
maps identifier -> output_classifier. evaluation maps all kinds of evaluation metrics
to floats.
'''
#call train using class
identifiers_training,target_training,rf = train(path_train,
name_identifiers,
name_targets,
features,
output_path_model=None,
cores=num_cores,
the_delimiter=delimiter)
#call test using class
identifiers_test,target,prediction = test(path_test,
name_identifiers,
name_targets,
features,
loaded_rf_model=rf,
path_rf_model=None,
the_delimiter=delimiter)
#evaluate
classifier_output,evaluation = evaluate(target, prediction, identifiers_test)
return classifier_output,evaluation
|
[
"evaluate.evaluate"
] |
[((1489, 1618), 'training.train', 'train', (['path_train', 'name_identifiers', 'name_targets', 'features'], {'output_path_model': 'None', 'cores': 'num_cores', 'the_delimiter': 'delimiter'}), '(path_train, name_identifiers, name_targets, features,\n output_path_model=None, cores=num_cores, the_delimiter=delimiter)\n', (1494, 1618), False, 'from training import train\n'), ((1991, 2118), 'testing.test', 'test', (['path_test', 'name_identifiers', 'name_targets', 'features'], {'loaded_rf_model': 'rf', 'path_rf_model': 'None', 'the_delimiter': 'delimiter'}), '(path_test, name_identifiers, name_targets, features, loaded_rf_model=\n rf, path_rf_model=None, the_delimiter=delimiter)\n', (1995, 2118), False, 'from testing import test\n'), ((2462, 2508), 'evaluate.evaluate', 'evaluate', (['target', 'prediction', 'identifiers_test'], {}), '(target, prediction, identifiers_test)\n', (2470, 2508), False, 'from evaluate import evaluate\n')]
|
import numpy as np
from scipy import sparse
import torch
import time
from tqdm import tqdm
from evaluate import eval_func, euclidean_dist
def calculate_V(initial_rank, all_feature_len, dis_i_qg, i, k1):
# dis_i_qg = euclidean_dist(torch.tensor([all_feature[i].numpy()]), all_feature).numpy()
forward_k_neigh_index = initial_rank[i, :k1 + 1]
# print(forward_k_neigh_index)
backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
fi = np.where(backward_k_neigh_index == i)[0]
k_reciprocal_index = forward_k_neigh_index[fi]
k_reciprocal_expansion_index = k_reciprocal_index
for j in range(len(k_reciprocal_index)):
candidate = k_reciprocal_index[j]
candidate_forward_k_neigh_index = initial_rank[candidate, :int(np.around(k1 / 2.)) + 1]
candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index,
:int(np.around(k1 / 2.)) + 1]
fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0]
candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]
if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2. / 3 * len(
candidate_k_reciprocal_index):
k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index, candidate_k_reciprocal_index)
k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
# print(k_reciprocal_expansion_index)
weight = np.exp(-dis_i_qg[k_reciprocal_expansion_index])
# print(weight)
V = np.zeros(( all_feature_len)).astype(np.float32)
V[k_reciprocal_expansion_index] = 1. * weight / np.sum(weight)
return V, k_reciprocal_expansion_index, weight
def re_ranking_batch(all_feature, q_num, k1, k2, lambda_value, len_slice=1000):
# calculate (q+g)*(q+g)
initial_rank = np.zeros((len(all_feature), k1+1)).astype(np.int32)
original_dist = np.zeros((q_num, len(all_feature)))
s_time = time.time()
n_iter = len(all_feature) // len_slice + int(len(all_feature) % len_slice > 0)
with tqdm(total=n_iter) as pbar:
for i in range(n_iter):
dis_i_qg = euclidean_dist(all_feature[i*len_slice:(i+1)*len_slice], all_feature).data.cpu().numpy()
initial_i_rank = np.argpartition(dis_i_qg, range(1, k1 + 1), ).astype(np.int32)[:, :k1 + 1]
initial_rank[i*len_slice:(i+1)*len_slice] = initial_i_rank
pbar.update(1)
# print(initial_rank[0])
end_time = time.time()
print("rank time : %s" % (end_time-s_time))
all_V = []
s_time = time.time()
n_iter = len(all_feature) // len_slice + int(len(all_feature) % len_slice > 0)
with tqdm(total=n_iter) as pbar:
for i in range(n_iter):
dis_i_qg = euclidean_dist(all_feature[i * len_slice:(i + 1) * len_slice], all_feature).data.cpu().numpy()
for ks in range(dis_i_qg.shape[0]):
r_k = i*len_slice+ks
dis_i_qg[ks] = np.power(dis_i_qg[ks], 2).astype(np.float32)
dis_i_qg[ks] = 1. * dis_i_qg[ks] / np.max(dis_i_qg[ks])
if r_k < q_num:
original_dist[r_k] = dis_i_qg[ks]
V ,k_reciprocal_expansion_index, weight = calculate_V(initial_rank, len(all_feature), dis_i_qg[ks], r_k, k1)
# if r_k == 0:
# print(k_reciprocal_expansion_index)
# print(weight)
# print(dis_i_qg[ks])
all_V.append(sparse.csr_matrix(V))
pbar.update(1)
all_V = sparse.vstack(all_V)
# print(all_V.getrow(0).toarray())
end_time = time.time()
print("calculate V time : %s" % (end_time - s_time))
# print(all_V.todense()[0])
all_V_qe = []
s_time = time.time()
for i in range(len(all_feature)):
temp_V = np.zeros((k2, len(all_feature)))
for l, row_index in enumerate(initial_rank[i, :k2]):
temp_V[l, :] = all_V.getrow(row_index).toarray()[0]
V_qe = np.mean(temp_V, axis=0)
all_V_qe.append(sparse.csr_matrix(V_qe))
all_V_qe = sparse.vstack(all_V_qe)
# print(all_V_qe.todense()[0])
del all_V
end_time = time.time()
print("calculate V_qe time : %s" % (end_time - s_time))
invIndex = []
for i in range(len(all_feature)):
invIndex.append(np.where(all_V_qe.getcol(i).toarray().transpose()[0] != 0)[0])
jaccard_dist = np.zeros_like(original_dist, dtype=np.float32)
for i in range(q_num):
temp_min = np.zeros(shape=[1, len(all_feature)], dtype=np.float32)
indNonZero = np.where(all_V_qe.getrow(i).toarray()[0] != 0)[0]
indImages = []
indImages = [invIndex[ind] for ind in indNonZero]
# print(indImages)
for j in range(len(indNonZero)):
# print(indNonZero[j])
c = all_V_qe.getrow(i).getcol(indNonZero[j]).toarray()[0, 0]
# print(c)
# print(indImages[j])
t_min = np.zeros((indImages[j].shape[0]))
for kk in range(indImages[j].shape[0]):
temp_d = all_V_qe.getrow(indImages[j][kk]).getcol(indNonZero[j]).toarray()[0, 0]
t_min[kk] = np.minimum(c, temp_d)
# print(t_min)
temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + t_min
# temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(V[i, indNonZero[j]],
# V[indImages[j], indNonZero[j]])
jaccard_dist[i] = 1 - temp_min / (2. - temp_min)
# print(jaccard_dist[0])
# print(original_dist[0])
final_dist = jaccard_dist * (1 - lambda_value) + original_dist * lambda_value
del original_dist
del all_V_qe
del jaccard_dist
final_dist = final_dist[:q_num, q_num:]
return final_dist
def re_ranking_batch_gpu(all_feature, q_num, k1, k2, lambda_value, len_slice=1000):
# calculate (q+g)*(q+g)
initial_rank = np.zeros((len(all_feature), k1+1)).astype(np.int32)
original_dist = np.zeros((q_num, len(all_feature)))
gpu_features = all_feature.cuda()
s_time = time.time()
n_iter = len(all_feature) // len_slice + int(len(all_feature) % len_slice > 0)
with tqdm(total=n_iter) as pbar:
for i in range(n_iter):
dis_i_qg = euclidean_dist(gpu_features[i*len_slice:(i+1)*len_slice], gpu_features).data.cpu().numpy()
initial_i_rank = np.argpartition(dis_i_qg, range(1, k1 + 1), ).astype(np.int32)[:, :k1 + 1]
initial_rank[i*len_slice:(i+1)*len_slice] = initial_i_rank
pbar.update(1)
# print(initial_rank[0])
end_time = time.time()
print("rank time : %s" % (end_time-s_time))
all_V = []
s_time = time.time()
n_iter = len(all_feature) // len_slice + int(len(all_feature) % len_slice > 0)
with tqdm(total=n_iter) as pbar:
for i in range(n_iter):
dis_i_qg = euclidean_dist(gpu_features[i * len_slice:(i + 1) * len_slice], gpu_features).data.cpu().numpy()
for ks in range(dis_i_qg.shape[0]):
r_k = i*len_slice+ks
dis_i_qg[ks] = np.power(dis_i_qg[ks], 2).astype(np.float32)
dis_i_qg[ks] = 1. * dis_i_qg[ks] / np.max(dis_i_qg[ks])
if r_k < q_num:
original_dist[r_k] = dis_i_qg[ks]
V ,k_reciprocal_expansion_index, weight = calculate_V(initial_rank, len(all_feature), dis_i_qg[ks], r_k, k1)
# if r_k == 0:
# print(k_reciprocal_expansion_index)
# print(weight)
# print(dis_i_qg[ks])
all_V.append(sparse.csr_matrix(V))
pbar.update(1)
all_V = sparse.vstack(all_V)
# print(all_V.getrow(0).toarray())
end_time = time.time()
print("calculate V time : %s" % (end_time - s_time))
# print(all_V.todense()[0])
all_V_qe = []
s_time = time.time()
for i in range(len(all_feature)):
temp_V = np.zeros((k2, len(all_feature)))
for l, row_index in enumerate(initial_rank[i, :k2]):
temp_V[l, :] = all_V.getrow(row_index).toarray()[0]
V_qe = np.mean(temp_V, axis=0)
all_V_qe.append(sparse.csr_matrix(V_qe))
all_V_qe = sparse.vstack(all_V_qe)
# print(all_V_qe.todense()[0])
del all_V
end_time = time.time()
print("calculate V_qe time : %s" % (end_time - s_time))
invIndex = []
for i in range(len(all_feature)):
invIndex.append(np.where(all_V_qe.getcol(i).toarray().transpose()[0] != 0)[0])
jaccard_dist = np.zeros_like(original_dist, dtype=np.float32)
with tqdm(total=q_num) as pbar:
for i in range(q_num):
temp_min = np.zeros(shape=[1, len(all_feature)], dtype=np.float32)
indNonZero = np.where(all_V_qe.getrow(i).toarray()[0] != 0)[0]
indImages = []
indImages = [invIndex[ind] for ind in indNonZero]
# print(indImages)
for j in range(len(indNonZero)):
# print(indNonZero[j])
c = all_V_qe.getrow(i).getcol(indNonZero[j]).toarray()[0, 0]
# print(c)
# print(indImages[j])
t_min = np.zeros((indImages[j].shape[0]))
for kk in range(indImages[j].shape[0]):
temp_d = all_V_qe.getrow(indImages[j][kk]).getcol(indNonZero[j]).toarray()[0, 0]
t_min[kk] = np.minimum(c, temp_d)
# print(t_min)
temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + t_min
# temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(V[i, indNonZero[j]],
# V[indImages[j], indNonZero[j]])
jaccard_dist[i] = 1 - temp_min / (2. - temp_min)
pbar.update(1)
# print(jaccard_dist[0])
# print(original_dist[0])
final_dist = jaccard_dist * (1 - lambda_value) + original_dist * lambda_value
del original_dist
del all_V_qe
del jaccard_dist
final_dist = final_dist[:q_num, q_num:]
return final_dist
|
[
"evaluate.euclidean_dist"
] |
[((1426, 1465), 'numpy.unique', 'np.unique', (['k_reciprocal_expansion_index'], {}), '(k_reciprocal_expansion_index)\n', (1435, 1465), True, 'import numpy as np\n'), ((1521, 1568), 'numpy.exp', 'np.exp', (['(-dis_i_qg[k_reciprocal_expansion_index])'], {}), '(-dis_i_qg[k_reciprocal_expansion_index])\n', (1527, 1568), True, 'import numpy as np\n'), ((2016, 2027), 'time.time', 'time.time', ([], {}), '()\n', (2025, 2027), False, 'import time\n'), ((2541, 2552), 'time.time', 'time.time', ([], {}), '()\n', (2550, 2552), False, 'import time\n'), ((2631, 2642), 'time.time', 'time.time', ([], {}), '()\n', (2640, 2642), False, 'import time\n'), ((3619, 3639), 'scipy.sparse.vstack', 'sparse.vstack', (['all_V'], {}), '(all_V)\n', (3632, 3639), False, 'from scipy import sparse\n'), ((3694, 3705), 'time.time', 'time.time', ([], {}), '()\n', (3703, 3705), False, 'import time\n'), ((3827, 3838), 'time.time', 'time.time', ([], {}), '()\n', (3836, 3838), False, 'import time\n'), ((4157, 4180), 'scipy.sparse.vstack', 'sparse.vstack', (['all_V_qe'], {}), '(all_V_qe)\n', (4170, 4180), False, 'from scipy import sparse\n'), ((4245, 4256), 'time.time', 'time.time', ([], {}), '()\n', (4254, 4256), False, 'import time\n'), ((4480, 4526), 'numpy.zeros_like', 'np.zeros_like', (['original_dist'], {'dtype': 'np.float32'}), '(original_dist, dtype=np.float32)\n', (4493, 4526), True, 'import numpy as np\n'), ((6205, 6216), 'time.time', 'time.time', ([], {}), '()\n', (6214, 6216), False, 'import time\n'), ((6732, 6743), 'time.time', 'time.time', ([], {}), '()\n', (6741, 6743), False, 'import time\n'), ((6822, 6833), 'time.time', 'time.time', ([], {}), '()\n', (6831, 6833), False, 'import time\n'), ((7812, 7832), 'scipy.sparse.vstack', 'sparse.vstack', (['all_V'], {}), '(all_V)\n', (7825, 7832), False, 'from scipy import sparse\n'), ((7887, 7898), 'time.time', 'time.time', ([], {}), '()\n', (7896, 7898), False, 'import time\n'), ((8020, 8031), 'time.time', 'time.time', ([], {}), '()\n', (8029, 8031), False, 'import time\n'), ((8350, 8373), 'scipy.sparse.vstack', 'sparse.vstack', (['all_V_qe'], {}), '(all_V_qe)\n', (8363, 8373), False, 'from scipy import sparse\n'), ((8438, 8449), 'time.time', 'time.time', ([], {}), '()\n', (8447, 8449), False, 'import time\n'), ((8673, 8719), 'numpy.zeros_like', 'np.zeros_like', (['original_dist'], {'dtype': 'np.float32'}), '(original_dist, dtype=np.float32)\n', (8686, 8719), True, 'import numpy as np\n'), ((475, 512), 'numpy.where', 'np.where', (['(backward_k_neigh_index == i)'], {}), '(backward_k_neigh_index == i)\n', (483, 512), True, 'import numpy as np\n'), ((1697, 1711), 'numpy.sum', 'np.sum', (['weight'], {}), '(weight)\n', (1703, 1711), True, 'import numpy as np\n'), ((2122, 2140), 'tqdm.tqdm', 'tqdm', ([], {'total': 'n_iter'}), '(total=n_iter)\n', (2126, 2140), False, 'from tqdm import tqdm\n'), ((2738, 2756), 'tqdm.tqdm', 'tqdm', ([], {'total': 'n_iter'}), '(total=n_iter)\n', (2742, 2756), False, 'from tqdm import tqdm\n'), ((4069, 4092), 'numpy.mean', 'np.mean', (['temp_V'], {'axis': '(0)'}), '(temp_V, axis=0)\n', (4076, 4092), True, 'import numpy as np\n'), ((6311, 6329), 'tqdm.tqdm', 'tqdm', ([], {'total': 'n_iter'}), '(total=n_iter)\n', (6315, 6329), False, 'from tqdm import tqdm\n'), ((6929, 6947), 'tqdm.tqdm', 'tqdm', ([], {'total': 'n_iter'}), '(total=n_iter)\n', (6933, 6947), False, 'from tqdm import tqdm\n'), ((8262, 8285), 'numpy.mean', 'np.mean', (['temp_V'], {'axis': '(0)'}), '(temp_V, axis=0)\n', (8269, 8285), True, 'import numpy as np\n'), ((8730, 8747), 'tqdm.tqdm', 'tqdm', ([], {'total': 'q_num'}), '(total=q_num)\n', (8734, 8747), False, 'from tqdm import tqdm\n'), ((989, 1044), 'numpy.where', 'np.where', (['(candidate_backward_k_neigh_index == candidate)'], {}), '(candidate_backward_k_neigh_index == candidate)\n', (997, 1044), True, 'import numpy as np\n'), ((1320, 1389), 'numpy.append', 'np.append', (['k_reciprocal_expansion_index', 'candidate_k_reciprocal_index'], {}), '(k_reciprocal_expansion_index, candidate_k_reciprocal_index)\n', (1329, 1389), True, 'import numpy as np\n'), ((1597, 1622), 'numpy.zeros', 'np.zeros', (['all_feature_len'], {}), '(all_feature_len)\n', (1605, 1622), True, 'import numpy as np\n'), ((4117, 4140), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['V_qe'], {}), '(V_qe)\n', (4134, 4140), False, 'from scipy import sparse\n'), ((5038, 5069), 'numpy.zeros', 'np.zeros', (['indImages[j].shape[0]'], {}), '(indImages[j].shape[0])\n', (5046, 5069), True, 'import numpy as np\n'), ((8310, 8333), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['V_qe'], {}), '(V_qe)\n', (8327, 8333), False, 'from scipy import sparse\n'), ((1148, 1212), 'numpy.intersect1d', 'np.intersect1d', (['candidate_k_reciprocal_index', 'k_reciprocal_index'], {}), '(candidate_k_reciprocal_index, k_reciprocal_index)\n', (1162, 1212), True, 'import numpy as np\n'), ((5249, 5270), 'numpy.minimum', 'np.minimum', (['c', 'temp_d'], {}), '(c, temp_d)\n', (5259, 5270), True, 'import numpy as np\n'), ((9315, 9346), 'numpy.zeros', 'np.zeros', (['indImages[j].shape[0]'], {}), '(indImages[j].shape[0])\n', (9323, 9346), True, 'import numpy as np\n'), ((3128, 3148), 'numpy.max', 'np.max', (['dis_i_qg[ks]'], {}), '(dis_i_qg[ks])\n', (3134, 3148), True, 'import numpy as np\n'), ((3556, 3576), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['V'], {}), '(V)\n', (3573, 3576), False, 'from scipy import sparse\n'), ((7321, 7341), 'numpy.max', 'np.max', (['dis_i_qg[ks]'], {}), '(dis_i_qg[ks])\n', (7327, 7341), True, 'import numpy as np\n'), ((7749, 7769), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['V'], {}), '(V)\n', (7766, 7769), False, 'from scipy import sparse\n'), ((9538, 9559), 'numpy.minimum', 'np.minimum', (['c', 'temp_d'], {}), '(c, temp_d)\n', (9548, 9559), True, 'import numpy as np\n'), ((3032, 3057), 'numpy.power', 'np.power', (['dis_i_qg[ks]', '(2)'], {}), '(dis_i_qg[ks], 2)\n', (3040, 3057), True, 'import numpy as np\n'), ((7225, 7250), 'numpy.power', 'np.power', (['dis_i_qg[ks]', '(2)'], {}), '(dis_i_qg[ks], 2)\n', (7233, 7250), True, 'import numpy as np\n'), ((779, 798), 'numpy.around', 'np.around', (['(k1 / 2.0)'], {}), '(k1 / 2.0)\n', (788, 798), True, 'import numpy as np\n'), ((941, 960), 'numpy.around', 'np.around', (['(k1 / 2.0)'], {}), '(k1 / 2.0)\n', (950, 960), True, 'import numpy as np\n'), ((2205, 2280), 'evaluate.euclidean_dist', 'euclidean_dist', (['all_feature[i * len_slice:(i + 1) * len_slice]', 'all_feature'], {}), '(all_feature[i * len_slice:(i + 1) * len_slice], all_feature)\n', (2219, 2280), False, 'from evaluate import eval_func, euclidean_dist\n'), ((2821, 2896), 'evaluate.euclidean_dist', 'euclidean_dist', (['all_feature[i * len_slice:(i + 1) * len_slice]', 'all_feature'], {}), '(all_feature[i * len_slice:(i + 1) * len_slice], all_feature)\n', (2835, 2896), False, 'from evaluate import eval_func, euclidean_dist\n'), ((6394, 6471), 'evaluate.euclidean_dist', 'euclidean_dist', (['gpu_features[i * len_slice:(i + 1) * len_slice]', 'gpu_features'], {}), '(gpu_features[i * len_slice:(i + 1) * len_slice], gpu_features)\n', (6408, 6471), False, 'from evaluate import eval_func, euclidean_dist\n'), ((7012, 7089), 'evaluate.euclidean_dist', 'euclidean_dist', (['gpu_features[i * len_slice:(i + 1) * len_slice]', 'gpu_features'], {}), '(gpu_features[i * len_slice:(i + 1) * len_slice], gpu_features)\n', (7026, 7089), False, 'from evaluate import eval_func, euclidean_dist\n')]
|
#%% -*- coding: utf-8 -*-
# Plotting
import matplotlib
matplotlib.use('agg')
import os
import time
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
# Internal imports
from utils.data import load_dataset, get_external_sounds
from models.vae.ae import AE, RegressionAE, DisentanglingAE
from models.vae.vae import VAE
from models.vae.wae import WAE
from models.vae.vae_flow import VAEFlow
from models.loss import multinomial_loss, multinomial_mse_loss
from models.basic import GatedMLP, GatedCNN, construct_encoder_decoder, construct_flow, construct_regressor, construct_disentangle
from evaluate import evaluate_model
# Define arguments
parser = argparse.ArgumentParser()
# Data arguments
parser.add_argument('--path', type=str, default='', help='Path to the dataset')
parser.add_argument('--test_sounds', type=str, default='', help='Path to test sounds')
parser.add_argument('--output', type=str, default='outputs', help='Path to output directory')
parser.add_argument('--dataset', type=str, default='32par', help='Name of the dataset')
parser.add_argument('--data', type=str, default='mel', help='Type of data to train on')
parser.add_argument('--train_type', type=str, default='fixed', help='Fixed or random data split')
parser.add_argument('--nbworkers', type=int, default=0, help='Number of workers for parallel import')
# Model arguments
parser.add_argument('--model', type=str, default='vae', help='Type of model (MLP, CNN, AE, VAE, WAE)')
parser.add_argument('--loss', type=str, default='mse', help='Loss for parameter regression')
parser.add_argument('--rec_loss', type=str, default='mse', help='Reconstruction loss')
parser.add_argument('--n_classes', type=int, default=61, help='Classes for multinoulli loss')
parser.add_argument('--n_hidden', type=int, default=1024, help='Number of hidden units')
parser.add_argument('--n_layers', type=int, default=4, help='Number of computing layers')
# CNN parameters
parser.add_argument('--channels', type=int, default=64, help='Number of channels in convolution')
parser.add_argument('--kernel', type=int, default=5, help='Size of convolution kernel')
parser.add_argument('--dilation', type=int, default=3, help='Dilation factor of convolution')
# AE-specific parameters
parser.add_argument('--layers', type=str, default='gated_cnn', help='Type of layers in the model')
parser.add_argument('--encoder_dims', type=int, default=64, help='Number of encoder output dimensions')
parser.add_argument('--latent_dims', type=int, default=0, help='Number of latent dimensions')
parser.add_argument('--warm_latent', type=int, default=50, help='Warmup epochs for latent')
parser.add_argument('--start_regress', type=int, default=100, help='Epoch to start regression')
parser.add_argument('--warm_regress', type=int, default=100, help='Warmup epochs for regression')
parser.add_argument('--beta_factor', type=int, default=1, help='Beta factor in VAE')
# Two-step training parameters
parser.add_argument('--ref_model', type=str, default='', help='Reference model')
# Flow specific parameters
parser.add_argument('--flow', type=str, default='iaf', help='Type of flow to use')
parser.add_argument('--flow_length', type=int, default=16, help='Number of flow transforms')
# Regression parameters
parser.add_argument('--regressor', type=str, default='mlp', help='Type of regressor')
parser.add_argument('--reg_layers', type=int, default=3, help='Number of regression layers')
parser.add_argument('--reg_hiddens', type=int, default=256, help='Number of units in regressor')
parser.add_argument('--reg_flow', type=str, default='maf', help='Type of flow in regressor')
parser.add_argument('--reg_factor', type=float, default=1e3, help='Regression loss weight')
# Optimization arguments
parser.add_argument('--k_run', type=int, default=0, help='ID of runs (k-folds)')
parser.add_argument('--early_stop', type=int, default=60, help='Early stopping')
parser.add_argument('--plot_interval', type=int, default=100, help='Interval of plotting frequency')
parser.add_argument('--batch_size', type=int, default=64, help='Size of the batch')
parser.add_argument('--epochs', type=int, default=200, help='Number of epochs to train on')
parser.add_argument('--eval', type=int, default=100, help='Frequency of full evalution')
parser.add_argument('--lr', type=float, default=2e-4, help='Learning rate')
# Semantic arguments
parser.add_argument('--semantic_dim', type=int, default=-1, help='Using semantic dimension')
parser.add_argument('--dis_layers', type=int, default=8, help='Number of disentangling layers')
parser.add_argument('--disentangling', type=str, default='density', help='Type of disentangling approach')
parser.add_argument('--start_disentangle',type=int, default=100, help='Epoch to start disentangling')
parser.add_argument('--warm_disentangle',type=int, default=25, help='Warmup on disentanglement')
# Evaluation parameters
parser.add_argument('--batch_evals', type=int, default=16, help='Number of batch to evaluate')
parser.add_argument('--batch_out', type=int, default=3, help='Number of batch to synthesize')
parser.add_argument('--check_exists', type=int, default=0, help='Check if model exists')
parser.add_argument('--time_limit', type=int, default=0, help='Maximum time to train (in minutes)')
# CUDA arguments
parser.add_argument('--device', type=str, default='cpu', help='Device for CUDA')
args = parser.parse_args()
# Track start time (for HPC)
start_time = time.time()
# In case we are CPU
args.synthesize = False
# Parameter checking
if (len(args.path) == 0):
args.path = (args.device == 'cpu') and '/Users/esling/Datasets/diva_dataset' or '/fast-2/datasets/diva_dataset/'
args.test_sounds = (args.device == 'cpu') and '/Users/esling/Datasets/synth_testing' or '/fast-2/datasets/flow_synthesizer/synth_testing'
args.vocal_sounds = '/fast-2/datasets/flow_synthesizer/vocal_testing'
#args.output = (args.device == 'cpu') and 'outputs' or '/fast-1/philippe/flow_results'
if (args.device not in ['cpu']):
args.synthesize = True
if (args.device != 'cpu'):
# Enable CuDNN optimization
torch.backends.cudnn.benchmark=True
"""
###################
Basic definitions
###################
"""
# Results and checkpoint folders
if not os.path.exists('{0}'.format(args.output)):
os.makedirs('{0}'.format(args.output))
os.makedirs('{0}/audio'.format(args.output))
os.makedirs('{0}/images'.format(args.output))
os.makedirs('{0}/models'.format(args.output))
# Model save file
model_name = '{0}_{1}_{2}_{3}'.format(args.model, args.data, args.loss, str(args.latent_dims))
if (not (args.model in ['mlp', 'gated_mlp', 'cnn', 'gated_cnn', 'res_cnn'])):
model_name += '_' + args.layers
if (args.model == 'vae_flow'):
model_name += '_' + args.flow
model_name += '_' + args.regressor
if (args.regressor != 'mlp'):
model_name += '_' + args.reg_flow + '_' + str(args.reg_layers)
if (args.semantic_dim > -1):
model_name += '_' + str(args.semantic_dim) + '_' + args.disentangling
if (args.k_run > 0):
model_name += '_' + str(args.k_run)
base_dir = '{0}/'.format(args.output)
base_img = '{0}/images/{1}'.format(args.output, model_name)
base_audio = '{0}/audio/{1}'.format(args.output, model_name)
if (args.check_exists == 1):
if os.path.exists(args.output + '/models/' + model_name + '.synth.results.npy'):
print('[Found ' + args.output + '/models/' + model_name + '.synth.results.npy - Exiting.]')
exit
# Handling cuda
args.cuda = not args.device == 'cpu' and torch.cuda.is_available()
args.device = torch.device(args.device if torch.cuda.is_available() else 'cpu')
print('Optimization will be on ' + str(args.device) + '.')
"""
###################
Basic definitions
###################
"""
print('[Loading dataset]')
ref_split = args.path + '/reference_split_' + args.dataset+ "_" + args.data + '.th'
if (args.train_type == 'random' or (not os.path.exists(ref_split))):
train_loader, valid_loader, test_loader, args = load_dataset(args)
if (args.train_type == 'fixed'):
torch.save([train_loader, valid_loader, test_loader], ref_split)
# Take fixed batch
fixed_data, fixed_params, fixed_meta, fixed_audio = next(iter(test_loader))
fixed_data, fixed_params, fixed_meta, fixed_audio = fixed_data.to(args.device), fixed_params.to(args.device), fixed_meta, fixed_audio
fixed_batch = (fixed_data, fixed_params, fixed_meta, fixed_audio)
else:
data = torch.load(ref_split)
train_loader, valid_loader, test_loader = data[0], data[1], data[2]
fixed_data, fixed_params, fixed_meta, fixed_audio = next(iter(test_loader))
fixed_data, fixed_params, fixed_meta, fixed_audio = fixed_data.to(args.device), fixed_params.to(args.device), fixed_meta, fixed_audio
fixed_batch = (fixed_data, fixed_params, fixed_meta, fixed_audio)
args.output_size = train_loader.dataset.output_size
args.input_size = train_loader.dataset.input_size
# Set latent dims to output dims
if (args.latent_dims == 0):
args.latent_dims = args.output_size
"""
###################
Model definition section
###################
"""
print('[Creating model]')
if (args.loss in ['multinomial']):
args.output_size *= args.n_classes
if (args.loss in ['multi_mse']):
args.output_size *= (args.n_classes + 1)
if (args.model == 'mlp'):
model = GatedMLP(np.prod(args.input_size), args.output_size, hidden_size = args.n_hidden, n_layers = args.n_layers, type_mod='normal')
elif (args.model == 'gated_mlp'):
model = GatedMLP(np.prod(args.input_size), args.output_size, hidden_size = args.n_hidden, n_layers = args.n_layers, type_mod='gated')
elif (args.model == 'cnn'):
model = GatedCNN(args.input_size, args.output_size, channels = args.channels, n_layers = 4, hidden_size = args.n_hidden, n_mlp = 3, type_mod='normal', args=args)
elif (args.model == 'gated_cnn'):
model = GatedCNN(args.input_size, args.output_size, channels = args.channels, n_layers = 4, hidden_size = args.n_hidden, n_mlp = 3, type_mod='gated', args=args)
elif (args.model == 'res_cnn'):
model = GatedCNN(args.input_size, args.output_size, channels = args.channels, n_layers = 4, hidden_size = args.n_hidden, n_mlp = 3, type_mod='residual', args=args)
elif (args.model in ['ae', 'vae', 'wae', 'vae_flow']):
# Construct reconstruction loss
if (args.rec_loss == 'mse'):
rec_loss = nn.MSELoss(reduction='sum').to(args.device)
elif (args.rec_loss == 'l1'):
rec_loss = nn.SmoothL1Loss(reduction='sum').to(args.device)
elif (args.rec_loss == 'multinomial'):
rec_loss = multinomial_loss
elif (args.rec_loss == 'multi_mse'):
rec_loss = multinomial_mse_loss
else:
raise Exception('Unknown reconstruction loss ' + args.rec_loss)
# Construct encoder and decoder
encoder, decoder = construct_encoder_decoder(args.input_size, args.encoder_dims, args.latent_dims, channels = args.channels, n_layers = args.n_layers, hidden_size = args.n_hidden, n_mlp = args.n_layers // 2, type_mod=args.layers, args=args)
# Construct specific type of AE
if (args.model == 'ae'):
model = AE(encoder, decoder, args.encoder_dims, args.latent_dims)
elif (args.model == 'vae'):
model = VAE(encoder, decoder, args.input_size, args.encoder_dims, args.latent_dims)
elif (args.model == 'wae'):
model = WAE(encoder, decoder, args.input_size, args.encoder_dims, args.latent_dims)
elif (args.model == 'vae_flow'):
# Construct the normalizing flow
flow, blocks = construct_flow(args.latent_dims, flow_type=args.flow, flow_length=args.flow_length, amortization='input')
# Construct full VAE with given flow
model = VAEFlow(encoder, decoder, flow, args.input_size, args.encoder_dims, args.latent_dims)
# Construct specific regressor
regression_model = construct_regressor(args.latent_dims, args.output_size, model=args.regressor, hidden_dims = args.reg_hiddens, n_layers=args.reg_layers, flow_type=args.reg_flow)
if (args.semantic_dim == -1):
# Final AE / Regression model
model = RegressionAE(model, args.latent_dims, args.output_size, rec_loss, regressor=regression_model, regressor_name=args.regressor)
else:
# Construct disentangling flow
disentangling = construct_disentangle(args.latent_dims, model=args.disentangling, semantic_dim=args.semantic_dim, n_layers=args.dis_layers, flow_type=args.reg_flow)
# Final AE / Disentanglement / Regression model
model = DisentanglingAE(model, args.latent_dims, args.output_size, rec_loss, regressor=regression_model, regressor_name=args.regressor, disentangling=disentangling, semantic_dim=args.semantic_dim)
else:
raise Exception('Unknown model ' + args.model)
# Send model to device
model = model.to(args.device)
# Two-step training loading procedure
if (len(args.ref_model) > 0):
print('[Loading reference ' + args.ref_model + ']')
ref_model = torch.load(args.ref_model)#, map_location=args.device)
if (args.regressor != 'mlp'):
ref_model_ae = ref_model.ae_model.to(args.device)
model.ae_model = None
model.ae_model = ref_model_ae
ref_model = None
else:
model = None
model = ref_model.to(args.device)
"""
###################
Optimizer section
###################
"""
# Optimizer model
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# Learning rate scheduler
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=20, verbose=True, threshold=1e-7)
# Loss
if (args.loss == 'mse'):
loss = nn.MSELoss(reduction='mean').to(args.device)
elif (args.loss == 'l1'):
loss = nn.SmoothL1Loss(reduction='mean').to(args.device)
elif (args.loss == 'bce'):
loss = nn.BCELoss(reduction='mean').to(args.device)
elif (args.loss == 'multinomial'):
loss = multinomial_loss
elif (args.loss == 'multi_mse'):
loss = multinomial_mse_loss
else:
raise Exception('Unknown loss ' + args.loss)
"""
###################
Training section
###################
"""
#% Monitoring quantities
losses = torch.zeros(args.epochs, 3)
if (args.epochs == 0):
losses = torch.zeros(200, 3)
best_loss = np.inf
early = 0
print('[Starting training]')
for i in range(args.epochs):
if (args.start_regress == 0):
from pympler import muppy, summary
all_objects = muppy.get_objects()
sum1 = summary.summarize(all_objects)
# Prints out a summary of the large objects
print('************ Summary at beginning of epoch ************')
summary.print_(sum1)
# Set warm-up values
args.beta = args.beta_factor * (float(i) / float(max(args.warm_latent, i)))
if (i >= args.start_regress):
args.gamma = ((float(i - args.start_regress) * args.reg_factor) / float(max(args.warm_regress, i - args.start_regress)))
if (args.regressor != 'mlp'):
args.gamma *= 1e-1
else:
args.gamma = 0
if (i >= args.start_disentangle):
args.delta = ((float(i - args.start_disentangle)) / float(max(args.warm_disentangle, i - args.start_disentangle)))
else:
args.delta = 0
print('%.3f - %.3f'%(args.beta, args.gamma))
# Perform one epoch of train
losses[i, 0] = model.train_epoch(train_loader, loss, optimizer, args)
# Perform validation
losses[i, 1] = model.eval_epoch(valid_loader, loss, args)
# Learning rate scheduling
if ((not args.model in ['ae', 'vae', 'wae', 'vae_flow']) or (i >= args.start_regress)):
scheduler.step(losses[i, 1])
# Perform test evaluation
losses[i, 2] = model.eval_epoch(test_loader, loss, args)
if (args.start_regress == 1000):
losses[i, 1] = losses[i, 0]
losses[i, 2] = losses[i, 0]
# Model saving
if (losses[i, 1] < best_loss):
# Save model
best_loss = losses[i, 1]
torch.save(model, args.output + '/models/' + model_name + '.model')
early = 0
# Check for early stopping
elif (args.early_stop > 0 and i >= args.start_regress):
early += 1
if (early > args.early_stop):
print('[Model stopped early]')
break
# Periodic evaluation (or debug model)
if ((i + 1) % args.plot_interval == 0 or (args.epochs == 1)):
args.plot = 'train'
with torch.no_grad():
model.eval()
evaluate_model(model, fixed_batch, test_loader, args, train=True, name=base_img + '_batch_' + str(i))
# Time limit for HPC grid eval
if ((args.time_limit > 0) and (((time.time() - start_time) / 60.0) > args.time_limit)):
print('[Hitting time limit after ' + str((time.time() - start_time) / 60.0) + ' minutes.]')
print('[Going to evaluation mode]')
break
if (args.regressor == 'flow_kl_f'):
print(torch.cuda.memory_allocated(args.device))
print('Epoch ' + str(i))
print(losses[i])
torch.cuda.empty_cache()
"""
###################
Evaluation section
###################
"""
from evaluate import evaluate_params, evaluate_synthesis, evaluate_projection
from evaluate import evaluate_reconstruction, evaluate_latent_space
from evaluate import evaluate_meta_parameters, evaluate_semantic_parameters
from evaluate import evaluate_latent_neighborhood
args.plot = 'final'
args.model_name, args.base_img, args.base_audio = model_name, base_img, base_audio
args.base_model = args.output + '/models/' + model_name
print('[Reload best performing model]')
model = torch.load(args.output + '/models/' + model_name + '.model')
model = model.to(args.device)
print('[Performing final evaluation]')
# Memory saver
with torch.no_grad():
# Perform parameters evaluation
evaluate_params(model, test_loader, args, losses=losses)
# Synthesis engine (on GPU)
if (args.synthesize):
# Import synthesis
from synth.synthesize import create_synth
print('[Synthesis evaluation]')
# Create synth rendering system
args.engine, args.generator, args.param_defaults, args.rev_idx = create_synth(args.dataset)
# Evaluation specific to AE models
if (args.model not in ['mlp', 'gated_mlp', 'cnn', 'gated_cnn', 'res_cnn']):
# Perform reconstruction evaluation
evaluate_reconstruction(model, test_loader, args, train=False)
# Evaluate latent space
args = evaluate_latent_space(model, test_loader, args, train=False)
# Perform meta-parameter analysis
evaluate_meta_parameters(model, test_loader, args, train=False)
# Perform latent neighborhood analysis
evaluate_latent_neighborhood(model, test_loader, args, train=False)
# Perform semantic parameter analysis
evaluate_semantic_parameters(model, test_loader, args, train=False)
# Synthesis engine (on GPU)
if (args.synthesize):
# Evaluate synthesizer output
evaluate_synthesis(model, test_loader, args, train=False)
print('[Load set of testing sound (outside Diva)]')
test_sounds = get_external_sounds(args.test_sounds, test_loader.dataset, args)
# Evaluate projection
evaluate_projection(model, test_sounds, args, train=False)
print('[Evaluate vocal sketching dataset]')
test_sounds = get_external_sounds(args.vocal_sounds, test_loader.dataset, args)
# Evaluate projection
evaluate_projection(model, test_sounds, args, train=False, type_val='vocal')
|
[
"evaluate.evaluate_latent_space",
"evaluate.evaluate_semantic_parameters",
"evaluate.evaluate_projection",
"evaluate.evaluate_synthesis",
"evaluate.evaluate_latent_neighborhood",
"evaluate.evaluate_reconstruction",
"evaluate.evaluate_params",
"evaluate.evaluate_meta_parameters"
] |
[((56, 77), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (70, 77), False, 'import matplotlib\n'), ((695, 720), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (718, 720), False, 'import argparse\n'), ((6309, 6320), 'time.time', 'time.time', ([], {}), '()\n', (6318, 6320), False, 'import time\n'), ((14305, 14424), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'mode': '"""min"""', 'factor': '(0.5)', 'patience': '(20)', 'verbose': '(True)', 'threshold': '(1e-07)'}), "(optimizer, mode='min', factor=0.5,\n patience=20, verbose=True, threshold=1e-07)\n", (14341, 14424), True, 'import torch.optim as optim\n'), ((14961, 14988), 'torch.zeros', 'torch.zeros', (['args.epochs', '(3)'], {}), '(args.epochs, 3)\n', (14972, 14988), False, 'import torch\n'), ((18346, 18406), 'torch.load', 'torch.load', (["(args.output + '/models/' + model_name + '.model')"], {}), "(args.output + '/models/' + model_name + '.model')\n", (18356, 18406), False, 'import torch\n'), ((8149, 8225), 'os.path.exists', 'os.path.exists', (["(args.output + '/models/' + model_name + '.synth.results.npy')"], {}), "(args.output + '/models/' + model_name + '.synth.results.npy')\n", (8163, 8225), False, 'import os\n'), ((8397, 8422), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8420, 8422), False, 'import torch\n'), ((8862, 8880), 'utils.data.load_dataset', 'load_dataset', (['args'], {}), '(args)\n', (8874, 8880), False, 'from utils.data import load_dataset, get_external_sounds\n'), ((9319, 9340), 'torch.load', 'torch.load', (['ref_split'], {}), '(ref_split)\n', (9329, 9340), False, 'import torch\n'), ((13814, 13840), 'torch.load', 'torch.load', (['args.ref_model'], {}), '(args.ref_model)\n', (13824, 13840), False, 'import torch\n'), ((15025, 15044), 'torch.zeros', 'torch.zeros', (['(200)', '(3)'], {}), '(200, 3)\n', (15036, 15044), False, 'import torch\n'), ((17774, 17798), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (17796, 17798), False, 'import torch\n'), ((18496, 18511), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18509, 18511), False, 'import torch\n'), ((18553, 18609), 'evaluate.evaluate_params', 'evaluate_params', (['model', 'test_loader', 'args'], {'losses': 'losses'}), '(model, test_loader, args, losses=losses)\n', (18568, 18609), False, 'from evaluate import evaluate_params, evaluate_synthesis, evaluate_projection\n'), ((8465, 8490), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8488, 8490), False, 'import torch\n'), ((8781, 8806), 'os.path.exists', 'os.path.exists', (['ref_split'], {}), '(ref_split)\n', (8795, 8806), False, 'import os\n'), ((8926, 8990), 'torch.save', 'torch.save', (['[train_loader, valid_loader, test_loader]', 'ref_split'], {}), '([train_loader, valid_loader, test_loader], ref_split)\n', (8936, 8990), False, 'import torch\n'), ((10211, 10235), 'numpy.prod', 'np.prod', (['args.input_size'], {}), '(args.input_size)\n', (10218, 10235), True, 'import numpy as np\n'), ((15231, 15250), 'pympler.muppy.get_objects', 'muppy.get_objects', ([], {}), '()\n', (15248, 15250), False, 'from pympler import muppy, summary\n'), ((15266, 15296), 'pympler.summary.summarize', 'summary.summarize', (['all_objects'], {}), '(all_objects)\n', (15283, 15296), False, 'from pympler import muppy, summary\n'), ((15430, 15450), 'pympler.summary.print_', 'summary.print_', (['sum1'], {}), '(sum1)\n', (15444, 15450), False, 'from pympler import muppy, summary\n'), ((16738, 16805), 'torch.save', 'torch.save', (['model', "(args.output + '/models/' + model_name + '.model')"], {}), "(model, args.output + '/models/' + model_name + '.model')\n", (16748, 16805), False, 'import torch\n'), ((18898, 18924), 'synth.synthesize.create_synth', 'create_synth', (['args.dataset'], {}), '(args.dataset)\n', (18910, 18924), False, 'from synth.synthesize import create_synth\n'), ((19096, 19158), 'evaluate.evaluate_reconstruction', 'evaluate_reconstruction', (['model', 'test_loader', 'args'], {'train': '(False)'}), '(model, test_loader, args, train=False)\n', (19119, 19158), False, 'from evaluate import evaluate_reconstruction, evaluate_latent_space\n'), ((19206, 19266), 'evaluate.evaluate_latent_space', 'evaluate_latent_space', (['model', 'test_loader', 'args'], {'train': '(False)'}), '(model, test_loader, args, train=False)\n', (19227, 19266), False, 'from evaluate import evaluate_reconstruction, evaluate_latent_space\n'), ((19317, 19380), 'evaluate.evaluate_meta_parameters', 'evaluate_meta_parameters', (['model', 'test_loader', 'args'], {'train': '(False)'}), '(model, test_loader, args, train=False)\n', (19341, 19380), False, 'from evaluate import evaluate_meta_parameters, evaluate_semantic_parameters\n'), ((19436, 19503), 'evaluate.evaluate_latent_neighborhood', 'evaluate_latent_neighborhood', (['model', 'test_loader', 'args'], {'train': '(False)'}), '(model, test_loader, args, train=False)\n', (19464, 19503), False, 'from evaluate import evaluate_latent_neighborhood\n'), ((19558, 19625), 'evaluate.evaluate_semantic_parameters', 'evaluate_semantic_parameters', (['model', 'test_loader', 'args'], {'train': '(False)'}), '(model, test_loader, args, train=False)\n', (19586, 19625), False, 'from evaluate import evaluate_meta_parameters, evaluate_semantic_parameters\n'), ((19730, 19787), 'evaluate.evaluate_synthesis', 'evaluate_synthesis', (['model', 'test_loader', 'args'], {'train': '(False)'}), '(model, test_loader, args, train=False)\n', (19748, 19787), False, 'from evaluate import evaluate_params, evaluate_synthesis, evaluate_projection\n'), ((19870, 19934), 'utils.data.get_external_sounds', 'get_external_sounds', (['args.test_sounds', 'test_loader.dataset', 'args'], {}), '(args.test_sounds, test_loader.dataset, args)\n', (19889, 19934), False, 'from utils.data import load_dataset, get_external_sounds\n'), ((19973, 20031), 'evaluate.evaluate_projection', 'evaluate_projection', (['model', 'test_sounds', 'args'], {'train': '(False)'}), '(model, test_sounds, args, train=False)\n', (19992, 20031), False, 'from evaluate import evaluate_params, evaluate_synthesis, evaluate_projection\n'), ((20106, 20171), 'utils.data.get_external_sounds', 'get_external_sounds', (['args.vocal_sounds', 'test_loader.dataset', 'args'], {}), '(args.vocal_sounds, test_loader.dataset, args)\n', (20125, 20171), False, 'from utils.data import load_dataset, get_external_sounds\n'), ((20210, 20286), 'evaluate.evaluate_projection', 'evaluate_projection', (['model', 'test_sounds', 'args'], {'train': '(False)', 'type_val': '"""vocal"""'}), "(model, test_sounds, args, train=False, type_val='vocal')\n", (20229, 20286), False, 'from evaluate import evaluate_params, evaluate_synthesis, evaluate_projection\n'), ((10385, 10409), 'numpy.prod', 'np.prod', (['args.input_size'], {}), '(args.input_size)\n', (10392, 10409), True, 'import numpy as np\n'), ((10542, 10696), 'models.basic.GatedCNN', 'GatedCNN', (['args.input_size', 'args.output_size'], {'channels': 'args.channels', 'n_layers': '(4)', 'hidden_size': 'args.n_hidden', 'n_mlp': '(3)', 'type_mod': '"""normal"""', 'args': 'args'}), "(args.input_size, args.output_size, channels=args.channels,\n n_layers=4, hidden_size=args.n_hidden, n_mlp=3, type_mod='normal', args\n =args)\n", (10550, 10696), False, 'from models.basic import GatedMLP, GatedCNN, construct_encoder_decoder, construct_flow, construct_regressor, construct_disentangle\n'), ((14463, 14491), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (14473, 14491), True, 'import torch.nn as nn\n'), ((17183, 17198), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17196, 17198), False, 'import torch\n'), ((17678, 17718), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', (['args.device'], {}), '(args.device)\n', (17705, 17718), False, 'import torch\n'), ((10742, 10895), 'models.basic.GatedCNN', 'GatedCNN', (['args.input_size', 'args.output_size'], {'channels': 'args.channels', 'n_layers': '(4)', 'hidden_size': 'args.n_hidden', 'n_mlp': '(3)', 'type_mod': '"""gated"""', 'args': 'args'}), "(args.input_size, args.output_size, channels=args.channels,\n n_layers=4, hidden_size=args.n_hidden, n_mlp=3, type_mod='gated', args=args\n )\n", (10750, 10895), False, 'from models.basic import GatedMLP, GatedCNN, construct_encoder_decoder, construct_flow, construct_regressor, construct_disentangle\n'), ((14545, 14578), 'torch.nn.SmoothL1Loss', 'nn.SmoothL1Loss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (14560, 14578), True, 'import torch.nn as nn\n'), ((10939, 11094), 'models.basic.GatedCNN', 'GatedCNN', (['args.input_size', 'args.output_size'], {'channels': 'args.channels', 'n_layers': '(4)', 'hidden_size': 'args.n_hidden', 'n_mlp': '(3)', 'type_mod': '"""residual"""', 'args': 'args'}), "(args.input_size, args.output_size, channels=args.channels,\n n_layers=4, hidden_size=args.n_hidden, n_mlp=3, type_mod='residual',\n args=args)\n", (10947, 11094), False, 'from models.basic import GatedMLP, GatedCNN, construct_encoder_decoder, construct_flow, construct_regressor, construct_disentangle\n'), ((14633, 14661), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (14643, 14661), True, 'import torch.nn as nn\n'), ((17411, 17422), 'time.time', 'time.time', ([], {}), '()\n', (17420, 17422), False, 'import time\n'), ((11685, 11912), 'models.basic.construct_encoder_decoder', 'construct_encoder_decoder', (['args.input_size', 'args.encoder_dims', 'args.latent_dims'], {'channels': 'args.channels', 'n_layers': 'args.n_layers', 'hidden_size': 'args.n_hidden', 'n_mlp': '(args.n_layers // 2)', 'type_mod': 'args.layers', 'args': 'args'}), '(args.input_size, args.encoder_dims, args.\n latent_dims, channels=args.channels, n_layers=args.n_layers,\n hidden_size=args.n_hidden, n_mlp=args.n_layers // 2, type_mod=args.\n layers, args=args)\n', (11710, 11912), False, 'from models.basic import GatedMLP, GatedCNN, construct_encoder_decoder, construct_flow, construct_regressor, construct_disentangle\n'), ((12706, 12873), 'models.basic.construct_regressor', 'construct_regressor', (['args.latent_dims', 'args.output_size'], {'model': 'args.regressor', 'hidden_dims': 'args.reg_hiddens', 'n_layers': 'args.reg_layers', 'flow_type': 'args.reg_flow'}), '(args.latent_dims, args.output_size, model=args.\n regressor, hidden_dims=args.reg_hiddens, n_layers=args.reg_layers,\n flow_type=args.reg_flow)\n', (12725, 12873), False, 'from models.basic import GatedMLP, GatedCNN, construct_encoder_decoder, construct_flow, construct_regressor, construct_disentangle\n'), ((11988, 12045), 'models.vae.ae.AE', 'AE', (['encoder', 'decoder', 'args.encoder_dims', 'args.latent_dims'], {}), '(encoder, decoder, args.encoder_dims, args.latent_dims)\n', (11990, 12045), False, 'from models.vae.ae import AE, RegressionAE, DisentanglingAE\n'), ((12955, 13084), 'models.vae.ae.RegressionAE', 'RegressionAE', (['model', 'args.latent_dims', 'args.output_size', 'rec_loss'], {'regressor': 'regression_model', 'regressor_name': 'args.regressor'}), '(model, args.latent_dims, args.output_size, rec_loss, regressor\n =regression_model, regressor_name=args.regressor)\n', (12967, 13084), False, 'from models.vae.ae import AE, RegressionAE, DisentanglingAE\n'), ((13153, 13310), 'models.basic.construct_disentangle', 'construct_disentangle', (['args.latent_dims'], {'model': 'args.disentangling', 'semantic_dim': 'args.semantic_dim', 'n_layers': 'args.dis_layers', 'flow_type': 'args.reg_flow'}), '(args.latent_dims, model=args.disentangling,\n semantic_dim=args.semantic_dim, n_layers=args.dis_layers, flow_type=\n args.reg_flow)\n', (13174, 13310), False, 'from models.basic import GatedMLP, GatedCNN, construct_encoder_decoder, construct_flow, construct_regressor, construct_disentangle\n'), ((13374, 13570), 'models.vae.ae.DisentanglingAE', 'DisentanglingAE', (['model', 'args.latent_dims', 'args.output_size', 'rec_loss'], {'regressor': 'regression_model', 'regressor_name': 'args.regressor', 'disentangling': 'disentangling', 'semantic_dim': 'args.semantic_dim'}), '(model, args.latent_dims, args.output_size, rec_loss,\n regressor=regression_model, regressor_name=args.regressor,\n disentangling=disentangling, semantic_dim=args.semantic_dim)\n', (13389, 13570), False, 'from models.vae.ae import AE, RegressionAE, DisentanglingAE\n'), ((12094, 12169), 'models.vae.vae.VAE', 'VAE', (['encoder', 'decoder', 'args.input_size', 'args.encoder_dims', 'args.latent_dims'], {}), '(encoder, decoder, args.input_size, args.encoder_dims, args.latent_dims)\n', (12097, 12169), False, 'from models.vae.vae import VAE\n'), ((17516, 17527), 'time.time', 'time.time', ([], {}), '()\n', (17525, 17527), False, 'import time\n'), ((11238, 11265), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (11248, 11265), True, 'import torch.nn as nn\n'), ((12218, 12293), 'models.vae.wae.WAE', 'WAE', (['encoder', 'decoder', 'args.input_size', 'args.encoder_dims', 'args.latent_dims'], {}), '(encoder, decoder, args.input_size, args.encoder_dims, args.latent_dims)\n', (12221, 12293), False, 'from models.vae.wae import WAE\n'), ((11335, 11367), 'torch.nn.SmoothL1Loss', 'nn.SmoothL1Loss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (11350, 11367), True, 'import torch.nn as nn\n'), ((12395, 12505), 'models.basic.construct_flow', 'construct_flow', (['args.latent_dims'], {'flow_type': 'args.flow', 'flow_length': 'args.flow_length', 'amortization': '"""input"""'}), "(args.latent_dims, flow_type=args.flow, flow_length=args.\n flow_length, amortization='input')\n", (12409, 12505), False, 'from models.basic import GatedMLP, GatedCNN, construct_encoder_decoder, construct_flow, construct_regressor, construct_disentangle\n'), ((12562, 12652), 'models.vae.vae_flow.VAEFlow', 'VAEFlow', (['encoder', 'decoder', 'flow', 'args.input_size', 'args.encoder_dims', 'args.latent_dims'], {}), '(encoder, decoder, flow, args.input_size, args.encoder_dims, args.\n latent_dims)\n', (12569, 12652), False, 'from models.vae.vae_flow import VAEFlow\n')]
|
from pathlib import Path
import sys
sys.path.append(str(Path().absolute()))
import logging
log_level = "INFO"
logging.basicConfig(
filename=str(snakemake.log),
filemode="w",
level=log_level,
format="[%(asctime)s]:%(levelname)s: %(message)s",
datefmt="%d/%m/%Y %I:%M:%S %p",
)
from evaluate.calculator import PrecisionCalculator, EmptyReportError
from evaluate.report import PrecisionReport
import pandas as pd
# setup
precision_report_files_for_one_sample = (
snakemake.input.precision_report_files_for_one_sample
)
output = Path(snakemake.output.precision_file_for_one_sample)
sample = snakemake.wildcards.sample
tool = snakemake.wildcards.tool
coverage = snakemake.wildcards.coverage
coverage_threshold = snakemake.wildcards.coverage_threshold
strand_bias_threshold = snakemake.wildcards.strand_bias_threshold
gaps_threshold = snakemake.wildcards.gaps_threshold
gt_conf_percentiles = [0]
# API usage
logging.info(f"Loading report")
precision_report = PrecisionReport.from_files(precision_report_files_for_one_sample)
logging.info(f"Creating calculator")
precision_calculator = PrecisionCalculator(precision_report)
logging.info(f"Calculating precision")
precision_df = precision_calculator.get_precision_report(gt_conf_percentiles)
metadata_df = pd.DataFrame(
data={
"sample": [sample] * len(precision_df),
"tool": [tool] * len(precision_df),
"coverage": [coverage] * len(precision_df),
"coverage_threshold": [coverage_threshold] * len(precision_df),
"strand_bias_threshold": [strand_bias_threshold] * len(precision_df),
"gaps_threshold": [gaps_threshold] * len(precision_df),
}
)
output_df = pd.concat([precision_df, metadata_df], axis=1)
# output
logging.info(f"Outputting precision file")
output_df.to_csv(output, sep="\t", index=False)
logging.info(f"Done")
|
[
"evaluate.report.PrecisionReport.from_files",
"evaluate.calculator.PrecisionCalculator"
] |
[((554, 606), 'pathlib.Path', 'Path', (['snakemake.output.precision_file_for_one_sample'], {}), '(snakemake.output.precision_file_for_one_sample)\n', (558, 606), False, 'from pathlib import Path\n'), ((932, 963), 'logging.info', 'logging.info', (['f"""Loading report"""'], {}), "(f'Loading report')\n", (944, 963), False, 'import logging\n'), ((983, 1048), 'evaluate.report.PrecisionReport.from_files', 'PrecisionReport.from_files', (['precision_report_files_for_one_sample'], {}), '(precision_report_files_for_one_sample)\n', (1009, 1048), False, 'from evaluate.report import PrecisionReport\n'), ((1050, 1086), 'logging.info', 'logging.info', (['f"""Creating calculator"""'], {}), "(f'Creating calculator')\n", (1062, 1086), False, 'import logging\n'), ((1110, 1147), 'evaluate.calculator.PrecisionCalculator', 'PrecisionCalculator', (['precision_report'], {}), '(precision_report)\n', (1129, 1147), False, 'from evaluate.calculator import PrecisionCalculator, EmptyReportError\n'), ((1149, 1187), 'logging.info', 'logging.info', (['f"""Calculating precision"""'], {}), "(f'Calculating precision')\n", (1161, 1187), False, 'import logging\n'), ((1684, 1730), 'pandas.concat', 'pd.concat', (['[precision_df, metadata_df]'], {'axis': '(1)'}), '([precision_df, metadata_df], axis=1)\n', (1693, 1730), True, 'import pandas as pd\n'), ((1742, 1784), 'logging.info', 'logging.info', (['f"""Outputting precision file"""'], {}), "(f'Outputting precision file')\n", (1754, 1784), False, 'import logging\n'), ((1834, 1855), 'logging.info', 'logging.info', (['f"""Done"""'], {}), "(f'Done')\n", (1846, 1855), False, 'import logging\n'), ((56, 62), 'pathlib.Path', 'Path', ([], {}), '()\n', (60, 62), False, 'from pathlib import Path\n')]
|
#!/usr/bin/env python
import sys
import os
import config
import evaluate
import ml_parse
from pyspark.mllib.recommendation import MatrixFactorizationModel, ALS
def main():
import configspark
sc = configspark.SPARK_CONTEXT
print("\nLoading MovieLens test dataset\n")
test_text = sc.textFile(config.ML_RATINGS_TEST)
ratings_test = (
test_text.map(ml_parse.parse_line).map(ml_parse.rating_convert))
if os.path.exists(config.ML_MODEL):
print("\n\nLoading existing recommendation model from %s\n\n"
% config.ML_MODEL)
model = MatrixFactorizationModel.load(sc, config.ML_MODEL)
else:
raise RuntimeError("Failed to load ALS model from %s" % config.ML_MODEL)
mse, rmse = evaluate.evaluate_model(model, ratings_test)
print("\nML ALS model performance: MSE=%0.3f RMSE=%0.3f\n" % (mse, rmse))
if __name__ == "__main__":
main()
|
[
"evaluate.evaluate_model"
] |
[((437, 468), 'os.path.exists', 'os.path.exists', (['config.ML_MODEL'], {}), '(config.ML_MODEL)\n', (451, 468), False, 'import os\n'), ((748, 792), 'evaluate.evaluate_model', 'evaluate.evaluate_model', (['model', 'ratings_test'], {}), '(model, ratings_test)\n', (771, 792), False, 'import evaluate\n'), ((589, 639), 'pyspark.mllib.recommendation.MatrixFactorizationModel.load', 'MatrixFactorizationModel.load', (['sc', 'config.ML_MODEL'], {}), '(sc, config.ML_MODEL)\n', (618, 639), False, 'from pyspark.mllib.recommendation import MatrixFactorizationModel, ALS\n')]
|
"""Train the model"""
import argparse
import logging
from tensorboardX import SummaryWriter
import os, shutil
import numpy as np
import pandas as pd
from sklearn.utils.class_weight import compute_class_weight
import torch
import torch.optim as optim
import torchvision.models as models
from torch.autograd import Variable
from tqdm import tqdm
# from torchsummary import summary
import utils
import json
import model.net as net
import model.data_loader as data_loader
from evaluate import evaluate, evaluate_predictions
parser = argparse.ArgumentParser()
parser.add_argument('--data-dir', default='data', help="Directory containing the dataset")
parser.add_argument('--model-dir', default='experiments', help="Directory containing params.json")
parser.add_argument('--setting-dir', default='settings', help="Directory with different settings")
parser.add_argument('--setting', default='collider-pf', help="Directory contain setting.json, experimental setting, data-generation, regression model etc")
parser.add_argument('--fase', default='xybn', help='fase of training model, see manuscript for details. x, y, xy, bn, or feature')
parser.add_argument('--experiment', default='', help="Manual name for experiment for logging, will be subdir of setting")
parser.add_argument('--restore-file', default=None,
help="Optional, name of the file in --model_dir containing weights to reload before \
training") # 'best' or 'train'
parser.add_argument('--restore-last', action='store_true', help="continue a last run")
parser.add_argument('--restore-warm', action='store_true', help="continue on the run called 'warm-start.pth'")
parser.add_argument('--use-last', action="store_true", help="use last state dict instead of 'best' (use for early stopping manually)")
parser.add_argument('--cold-start', action='store_true', help="ignore previous state dicts (weights), even if they exist")
parser.add_argument('--warm-start', dest='cold_start', action='store_false', help="start from previous state dict")
parser.add_argument('--disable-cuda', action='store_true', help="Disable Cuda")
parser.add_argument('--no-parallel', action="store_false", help="no multiple GPU", dest="parallel")
parser.add_argument('--parallel', action="store_true", help="multiple GPU", dest="parallel")
parser.add_argument('--intercept', action="store_true", help="dummy run for getting intercept baseline results")
parser.add_argument('--visdom', action='store_true', help='generate plots with visdom')
parser.add_argument('--novisdom', dest='visdom', action='store_false', help='dont plot with visdom')
parser.add_argument('--monitor-grads', action='store_true', help='keep track of mean norm of gradients')
parser.set_defaults(parallel=False, cold_start=True, use_last=False, intercept=False, restore_last=False, save_preds=False,
monitor_grads=False, restore_warm=False, visdom=False)
def train(model, optimizer, loss_fn, dataloader, metrics, params, setting, writer=None, epoch=None,
mines=None, optims_mine=None):
"""Train the model on `num_steps` batches
Args:
model: (torch.nn.Module) the neural network
optimizer: (torch.optim) optimizer for parameters of model
loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch
dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches training data
metrics: (dict) a dictionary of functions that compute a metric using the output and labels of each batch
params: (Params) hyperparameters
num_steps: (int) number of batches to train on, each of size params.batch_size
"""
global train_tensor_keys, logdir
# set model to training mode
model.train()
# summary for current training loop and a running average object for loss
summ = []
loss_avg = utils.RunningAverage()
# create storate for tensors for OLS after minibatches
ts = []
Xs = []
Xtrues = []
Ys = []
Xhats = []
Yhats = []
Zhats = []
# Use tqdm for progress bar
with tqdm(total=len(dataloader)) as progress_bar:
for i, batch in enumerate(dataloader):
summary_batch = {}
# put batch on cuda
batch = {k: v.to(params.device) for k, v in batch.items()}
if not (setting.covar_mode and epoch > params.suppress_t_epochs):
batch["t"] = torch.zeros_like(batch['t'])
Xs.append(batch['x'].detach().cpu())
Xtrues.append(batch['x_true'].detach().cpu())
# compute model output and loss
output_batch = model(batch['image'], batch['t'].view(-1,1), epoch)
Yhats.append(output_batch['y'].detach().cpu())
# calculate loss
if args.fase == "feature":
# calculate loss for z directly, to get clear how well this can be measured
loss_fn_z = torch.nn.MSELoss()
loss_z = loss_fn_z(output_batch["y"].squeeze(), batch["z"])
loss = loss_z
summary_batch["loss_z"] = loss_z.item()
else:
loss_fn_y = torch.nn.MSELoss()
loss_y = loss_fn_y(output_batch["y"].squeeze(), batch["y"])
loss = loss_y
summary_batch["loss_y"] = loss_y.item()
# calculate other losses based on estimation of x
if params.use_mi:
mi_losses = {}
# MINE mutual information calculate bottleneck loss
for (mi_name, mi_estimator), mi_optim in zip(mines.items(), optims_mine.values()):
if 'monitor' in mi_name:
bottleneck_name = mi_name.split("_")[1]
target_name = mi_name.split("_")[2]
else:
bottleneck_name = mi_name.split("_")[0]
target_name = mi_name.split("_")[1]
mi_bn = output_batch[bottleneck_name]
if "bn" in target_name:
mi_target = output_batch[target_name]
else:
mi_target = batch[target_name].view(-1,1)
for _ in range(params.num_mi_steps):
# update the MI estimator network for n steps
mi_loss = mi_estimator.lower_bound(mi_bn.detach(), mi_target.detach())
mi_optim.zero_grad()
mi_loss.backward(retain_graph=True)
mi_optim.step()
# after updating mi network, calculate MI for downward loss
mi_loss = mi_estimator.lower_bound(mi_bn, mi_target)
mi_losses[mi_name] = mi_loss
# store mutual information
summary_batch["mi_" + mi_name] = -1*mi_loss.item()
# calculate spearman rho
if mi_bn.shape[1] == 1:
summary_batch[mi_name + "_rho"] = net.spearmanrho(mi_target.detach().cpu(), mi_bn.detach().cpu())
# calculate loss for colllider x
if params.loss_x_type == 'mi':
loss_x = mi_losses['bnx_x']
elif params.loss_x_type == 'least-squares':
# if not using mutual information to make bottleneck layer close to x, directly predict x with the CNN
loss_fn_x = torch.nn.MSELoss()
loss_x = loss_fn_x(output_batch["bnx"].squeeze(), batch["x"])
else:
raise NotImplementedError(f'x loss not implemented: {params.loss_x_type}, should be in mi, least-squares')
summary_batch["loss_x"] = loss_x.item()
if not params.alpha == 1:
# possibly weigh down contribution of estimating x
loss_x *= params.alpha
summary_batch["loss_x_weighted"] = loss_x.item()
# add x loss to total loss
loss += loss_x
# add least squares regression on final layer
if params.do_least_squares:
X = batch["x"].view(-1,1)
t = batch["t"].view(-1,1)
Z = output_batch["bnz"]
if Z.ndimension() == 1:
Z.unsqueeze_(1)
Xhat = output_batch["bnx"]
# add intercept
Zi = torch.cat([torch.ones_like(t), Z], 1)
# add treatment info
Zt = torch.cat([Zi, t], 1)
Y = batch["y"].view(-1,1)
# regress y on final layer, without x
betas_y = net.cholesky_least_squares(Zt, Y, intercept=False)
y_hat = Zt.matmul(betas_y).view(-1,1)
mse_y = ((Y - y_hat)**2).mean()
summary_batch["regr_b_t"] = betas_y[-1].item()
summary_batch["regr_loss_y"] = mse_y.item()
# regress x on final layer without x
betas_x = net.cholesky_least_squares(Zi, Xhat, intercept=False)
x_hat = Zi.matmul(betas_x).view(-1,1)
mse_x = ((Xhat - x_hat)**2).mean()
# store all tensors for single pass after epoch
Xhats.append(Xhat.detach().cpu())
Zhats.append(Z.detach().cpu())
ts.append(t.detach().cpu())
Ys.append(Y.detach().cpu())
summary_batch["regr_loss_x"] = mse_x.item()
# add loss_bn only after n epochs
if params.bottleneck_loss and epoch > params.bn_loss_lag_epochs:
# only add to loss when bigger than margin
if params.bn_loss_type == "regressor-least-squares":
if params.bn_loss_margin_type == "dynamic-mean":
# for each batch, calculate loss of just using mean for predicting x
mse_x_mean = ((X - X.mean())**2).mean()
loss_bn = torch.max(torch.zeros_like(mse_x), mse_x_mean - mse_x)
elif params.bn_loss_margin_type == "fixed":
mse_diff = params.bn_loss_margin - mse_x
loss_bn = torch.max(torch.zeros_like(mse_x), mse_diff)
else:
raise NotImplementedError(f'bottleneck loss margin type not implemented: {params.bn_loss_margin_type}')
elif params.bn_loss_type == 'mi':
loss_bn = -1*mi_losses[params.bn_loss_mi]
#loss_bn = torch.max(torch.ones_like(loss_bn)*params.bn_loss_margin, loss_bn)
else:
raise NotImplementedError(f'currently not implemented bottleneck loss type: {params.bn_loss_type}')
# possibly reweigh bottleneck loss and add to total loss
summary_batch["loss_bn"] = loss_bn.item()
# note is this double?
if loss_bn > params.bn_loss_margin:
loss_bn *= params.bottleneck_loss_wt
loss += loss_bn
# perform parameter update
optimizer.zero_grad()
loss.backward()
optimizer.step()
summary_batch['loss'] = loss.item()
summ.append(summary_batch)
# if necessary, write out tensors
if params.monitor_train_tensors and (epoch % params.save_summary_steps == 0):
tensors = {}
for tensor_key in train_tensor_keys:
if tensor_key in batch.keys():
tensors[tensor_key] = batch[tensor_key].squeeze().numpy()
elif tensor_key.endswith("hat"):
tensor_key = tensor_key.split("_")[0]
if tensor_key in output_batch.keys():
tensors[tensor_key+"_hat"] = output_batch[tensor_key].detach().cpu().squeeze().numpy()
else:
assert False, f"key not found: {tensor_key}"
# print(tensors)
df = pd.DataFrame.from_dict(tensors, orient='columns')
df["epoch"] = epoch
with open(os.path.join(logdir, 'train-tensors.csv'), 'a') as f:
df[["epoch"]+train_tensor_keys].to_csv(f, header=False)
# update the average loss
loss_avg.update(loss.item())
progress_bar.set_postfix(loss='{:05.3f}'.format(loss_avg()))
progress_bar.update()
# visualize gradients
if epoch % params.save_summary_steps == 0 and args.monitor_grads:
abs_gradients = {}
for name, param in model.named_parameters():
try: # patch here, there were names / params that were 'none'
abs_gradients[name] = np.abs(param.grad.cpu().numpy()).mean()
writer.add_histogram("grad-"+name, param.grad, epoch)
writer.add_scalars("mean-abs-gradients", abs_gradients, epoch)
except:
pass
if params.use_mi:
for mi, mine in mines.items():
for name, param in mine.named_parameters():
writer.add_histogram("grad-mine-"+mi+"-"+name, param.grad, epoch)
# compute mean of all metrics in summary
metrics_mean = {metric:np.nanmean([x[metric] for x in summ]) for metric in summ[0]}
# collect tensors
Xhat = torch.cat(Xhats,0).view(-1,1)
Yhat = torch.cat(Yhats,0).view(-1,1)
Zhat = torch.cat(Zhats,0)
t = torch.cat(ts,0)
X = torch.cat(Xs,0)
Xtrue= torch.cat(Xtrues,0)
Y = torch.cat(Ys,0)
if params.do_least_squares:
# after the minibatches, do a single OLS on the whole data
Zi = torch.cat([torch.ones_like(t), Zhat], 1)
# add treatment info
Zt = torch.cat([Zi, t], 1)
# add x for biased version
XZt = torch.cat([torch.ones_like(t), Xhat, Zhat, t], 1)
betas_y_bias = net.cholesky_least_squares(XZt, Y, intercept=False)
betas_y_causal = net.cholesky_least_squares(Zt, Y, intercept=False)
model.betas_bias = betas_y_bias
model.betas_causal = betas_y_causal
metrics_mean["regr_bias_coef_t"] = betas_y_bias.squeeze()[-1]
metrics_mean["regr_bias_coef_z"] = betas_y_bias.squeeze()[-2]
metrics_mean["regr_causal_coef_t"] = betas_y_causal.squeeze()[-1]
metrics_mean["regr_causal_coef_z"] = betas_y_causal.squeeze()[-2]
# create some plots
xx_scatter = net.make_scatter_plot(X.numpy(), Xhat.numpy(), xlabel='x', ylabel='xhat')
xtruex_scatter= net.make_scatter_plot(Xtrue.numpy(), Xhat.numpy(), xlabel='xtrue', ylabel='xhat')
xyhat_scatter = net.make_scatter_plot(X.numpy(), Yhat.numpy(), c=t.numpy(), xlabel='x', ylabel='yhat')
yy_scatter = net.make_scatter_plot(Y.numpy(), Yhat.numpy(), c=t.numpy(), xlabel='y', ylabel='yhat')
writer.add_figure('x-xhat/train', xx_scatter, epoch+1)
writer.add_figure('xtrue-xhat/train', xtruex_scatter, epoch+1)
writer.add_figure('x-yhat/train', xyhat_scatter, epoch+1)
writer.add_figure('y-yhat/train', yy_scatter, epoch+1)
metrics_string = " ; ".join("{}: {:05.3f}".format(k, v) for k, v in metrics_mean.items())
logging.info("- Train metrics: " + metrics_string)
return metrics_mean
def train_and_evaluate(model, train_dataloader, val_dataloader, optimizer, loss_fn, metrics, params, setting, args,
writer=None, logdir=None, restore_file=None,
mines=None, optims_mine=None):
"""Train the model and evaluate every epoch.
Args:
model: (torch.nn.Module) the neural network
train_dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches training data
val_dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches validation data
optimizer: (torch.optim) optimizer for parameters of model
loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch
metrics: (dict) a dictionary of functions that compute a metric using mnisthe output and labels of each batch
params: (Params) hyperparameters
model_dir: (string) directory containing config, weights and log
restore_file: (string) optional- name of file to restore from (withoutmnistits extension .pth.tar)
covar_mode: (bool) does the data-loader give back covariates / additional data
"""
if params.use_mi:
# for (mi_name, mi_estimator), mi_optim in zip(mines.items(), optims_mine.values()):
# print(mi_name)
# print(mi_estimator)
# print(mi_optim)
train_mines = mines
train_optims_mine = optims_mine
val_mines = train_mines
val_optims_mine = None
# train_mines = {k: v for k, v in mines.items() if k in ['bnx_x', 'bnz_x']}
# train_optims_mine = {k: v for k, v in optims_mine.items() if k in ['bnx_x', 'bnz_x']}
#val_mines = {k: v for k, v in mines.items() if not (k in ['bnx_x', 'bnz_x'])}
#val_optims_mine = {k: v for k, v in optims_mine.items() if not (k in ['bnx_x', 'bnz_x'])}
# if params.bn_loss_type == 'mi':
# train_mines['bnz_x'] = mines['bnz_x']
# train_optims_mine['bnz_x'] = mines['bnz_x']
else:
train_mines = train_optims_mine = val_mines = val_optims_mine = None
# setup directories for data
setting_home = setting.home
if not args.fase == "feature":
data_dir = os.path.join(setting_home, "data")
else:
if setting.mode3d:
data_dir = "data"
else:
data_dir = "slices"
covar_mode = setting.covar_mode
x_frozen = False
best_val_metric = 0.0
if "loss" in setting.metrics[0]:
best_val_metric = 1.0e6
val_preds = np.zeros((len(val_dataloader.dataset), params.num_epochs))
for epoch in range(params.num_epochs):
# Run one epoch
logging.info(f"Epoch {epoch+1}/{params.num_epochs}; setting: {args.setting}, fase {args.fase}, experiment: {args.experiment}")
# stop gradients for freezing x part of model
if (params.freeze_x and (epoch > params.bn_loss_lag_epochs)) and (not x_frozen):
x_frozen = True # flag for not doing this twice
params.alpha = 0.0 # no more loss to x
print(f"freezing layers before x")
pre_x_layers = ["encoder", "fcs", "fcx", "fcx2", "regressor_x"]
if params.bn_place == "single-regressor":
# freeze everything except for last regressor
pre_x_layers.append("fcy")
keep_grad_layers = []
for name, param in model.named_parameters():
for x_layer in pre_x_layers:
if x_layer in name:
print(f"turning off gradient for {name}")
param.requires_grad_(False)
for name, param in model.named_parameters():
if param.requires_grad:
print(f"keeping gradient for {name}")
# compute number of batches in one epoch (one full pass over the training set)
train_metrics = train(model, optimizer, loss_fn, train_dataloader, metrics, params, setting, writer, epoch,
mines=train_mines, optims_mine=train_optims_mine)
for metric_name in train_metrics.keys():
metric_vals = {'train': train_metrics[metric_name]}
writer.add_scalars(metric_name, metric_vals, epoch+1)
# for name, param in model.named_parameters():
# writer.add_histogram(name, param.clone().cpu().data.numpy(), epoch+1)
if epoch % params.save_summary_steps == 0:
# Evaluate for one epoch on validation set
valid_metrics, outtensors = evaluate(model, loss_fn, val_dataloader, metrics, params, setting, epoch, writer,
mines=val_mines, optims_mine=val_optims_mine)
valid_metrics["intercept"] = model.regressor.fc.bias.detach().cpu().numpy()
for name, module in model.regressor.named_children():
if name == "t":
valid_metrics["b_t"] = module.weight.detach().cpu().numpy()
elif name == "zt":
weights = module.weight.detach().cpu().squeeze().numpy().reshape(-1)
for i, weight in enumerate(weights):
valid_metrics["b_zt"+str(i)] = weight
else:
pass
for metric_name in valid_metrics.keys():
metric_vals = {'valid': valid_metrics[metric_name]}
writer.add_scalars(metric_name, metric_vals, epoch+1)
# create plots
val_df = val_dataloader.dataset.df
xx_scatter = net.make_scatter_plot(val_df.x.values, outtensors['xhat'], xlabel='x', ylabel='xhat')
xtruex_scatter= net.make_scatter_plot(val_df.x_true.values, outtensors['xhat'], xlabel='x', ylabel='xhat')
xyhat_scatter = net.make_scatter_plot(val_df.x.values, outtensors['predictions'], c=val_df.t, xlabel='x', ylabel='yhat')
zyhat_scatter = net.make_scatter_plot(val_df.z.values, outtensors['predictions'], c=val_df.t, xlabel='z', ylabel='yhat')
yy_scatter = net.make_scatter_plot(val_df.y.values, outtensors['predictions'], c=val_df.t, xlabel='yhat', ylabel='y')
writer.add_figure('x-xhat/valid', xx_scatter, epoch+1)
writer.add_figure('xtrue-xhat/valid', xtruex_scatter, epoch+1)
writer.add_figure('x-yhat/valid', xyhat_scatter, epoch+1)
writer.add_figure('z-yhat/valid', zyhat_scatter, epoch+1)
writer.add_figure('y-yhat/valid', yy_scatter, epoch+1)
if args.visdom and ("b_t" in valid_metrics.keys()) and ("loss_y" in valid_metrics.keys()):
# visualize loss vs ate
try:
ate_loss_plt
except NameError:
ate_loss_plt = viz.line(
X = valid_metrics["b_t"].reshape(1,1),
Y = valid_metrics["loss_y"].reshape(1,1),
opts=dict(
# legend=logdir,
xtickmin=-0.5,
xtickmax=1.5,
xtickstep=0.5,
ytickmin=0.0,
ytickmax=3.0,
ytickstep=0.5,
title="b_t vs loss",
xlabel="b_t", ylabel="loss_y"
# linecolor=np.array([255, 0, 0]).reshape(1,3)
),
name=args.experiment
)
else:
viz.line(
X = valid_metrics["b_t"].reshape(1,1),
Y = valid_metrics["loss_y"].reshape(1,1),
win=ate_loss_plt,
# opts=dict(
# linecolor=np.array([255, 0, 0]).reshape(1,3) + np.array([0, 255, 0]).reshape(1,3) * epoch>params.bn_loss_lag_epochs
# ),
name=args.experiment,
update='append'
)
# writer.add_scalars('valid', valid_metrics, epoch+1)
# writer.add_scalars('train', train_metrics, epoch+1)
# metric_dict = {m: {'train': train_metrics[m], 'valid': valid_metrics[m]} for m in train_metrics.keys()}
if params.save_preds:
# writer.add_histogram("predictions", preds)
if setting.num_classes == 1:
val_preds[:, epoch] = np.squeeze(outtensors['predictions'])
# write preds to file
pred_fname = os.path.join(setting.home, setting.fase+"-fase", "preds_val.csv")
with open(pred_fname, 'ab') as f:
np.savetxt(f, preds.T, newline="")
np.save(os.path.join(setting.home, setting.fase+"-fase", "preds.npy"), preds)
#if params.multi_task and params.use_mi:
# val_metric = valid_metrics["bnx_x"]
else:
val_metric = valid_metrics[setting.metrics[0]]
# val_acc = valid_metrics['accuracy']
if "loss" in str(setting.metrics[0]):
is_best = val_metric<=best_val_metric
# print("new best, old: {:.3f}, new: {:.3f}".format(best_val_metric, val_metric))
else:
is_best = val_metric>=best_val_metric
# Save weights
state_dict = model.state_dict()
optim_dict = optimizer.state_dict()
# exclude weights from layers that get changed
# exclude_layers = ["bottleneck", "bnbn", "fc2"]
# state_dict = {s: state_dict[s] for s in state_dict.keys() if s.split(".")[0] not in exclude_layers}
# optim_dict = {s: optim_dict[s] for s in optim_dict.keys() if s.split(".")[0] not in exclude_layers}
state = {
'epoch': epoch+1,
'state_dict': state_dict,
'optim_dict': optim_dict
}
if params.use_mi:
for mi_name, mine in mines.items():
state[mi_name] = mine.state_dict()
state[mi_name+"_optim"] = optims_mine[mi_name].state_dict()
utils.save_checkpoint(state,
is_best=is_best,
checkpoint=logdir)
# If best_eval, best_save_path
valid_metrics["epoch"] = epoch
if is_best:
logging.info("- Found new best {}: {:.3f}".format(setting.metrics[0], val_metric))
best_val_metric = val_metric
# Save best val metrics in a json file in the model directory
best_json_path = os.path.join(logdir, "metrics_val_best_weights.json")
utils.save_dict_to_json(valid_metrics, best_json_path)
# Save latest val metrics in a json file in the model directory
last_json_path = os.path.join(logdir, "metrics_val_last_weights.json")
utils.save_dict_to_json(valid_metrics, last_json_path)
# final evaluation
writer.export_scalars_to_json(os.path.join(logdir, "all_scalars.json"))
if args.save_preds:
np.save(os.path.join(setting.home, setting.fase + "-fase", "val_preds.npy"), val_preds)
# if setting.covar_mode or setting.num_classes == 1:
# evaluate_predictions(os.path.join(setting.home, "data"),
# os.path.join(setting.home, setting.fase + "-fase"))
if __name__ == '__main__':
# Load the parameters from json file
args = parser.parse_args()
# Load information from last setting if none provided:
last_defaults = utils.Params("last-defaults.json")
if args.setting == "":
print("using last default setting")
args.setting = last_defaults.dict["setting"]
for param, value in last_defaults.dict.items():
print("{}: {}".format(param, value))
else:
with open("last-defaults.json", "r+") as jsonFile:
defaults = json.load(jsonFile)
tmp = defaults["setting"]
defaults["setting"] = args.setting
jsonFile.seek(0) # rewind
json.dump(defaults, jsonFile)
jsonFile.truncate()
# setup visdom environment
if args.visdom:
from visdom import Visdom
viz = Visdom(env=f"lidcr_{args.setting}_{args.fase}_{args.experiment}")
# load setting (data generation, regression model etc)
setting_home = os.path.join(args.setting_dir, args.setting)
setting = utils.Params(os.path.join(setting_home, "setting.json"))
setting.home = setting_home
# when not specified in call, grab model specification from setting file
if setting.cnn_model == "":
json_path = os.path.join(args.model_dir, "t-suppression", args.experiment+".json")
else:
json_path = os.path.join(args.model_dir, setting.cnn_model, 'params.json')
assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
if not os.path.exists(os.path.join(setting.home, args.fase + "-fase")):
os.makedirs(os.path.join(setting.home, args.fase + "-fase"))
shutil.copy(json_path, os.path.join(setting_home, args.fase + "-fase", "params.json"))
params = utils.Params(json_path)
# covar_mode = setting.covar_mode
# mode3d = setting.mode3d
parallel = args.parallel
params.device = None
if not args.disable_cuda and torch.cuda.is_available():
params.device = torch.device('cuda')
params.cuda = True
# switch gpus for better use when running multiple experiments
if not args.parallel:
if os.path.exists("last-cuda-0.flag"):
torch.cuda.set_device(1)
shutil.move("last-cuda-0.flag", "last-cuda-1.flag")
else:
torch.cuda.set_device(0)
shutil.move("last-cuda-1.flag", "last-cuda-0.flag")
# last_defaults = utils.Params("last-defaults.json")
# last_cuda_id = last_defaults.cuda_id
# new_cuda_id = int((last_cuda_id + 1) % 2)
# torch.cuda.set_device(new_cuda_id)
# new_defaults = last_defaults
# new_defaults.cuda_id = new_cuda_id
# utils.save_dict_to_json(new_defaults, "last-defaults.json")
else:
params.device = torch.device('cpu')
# adapt fase
setting.fase = args.fase
if args.fase == "x":
# setting.outcome = ["x"]
setting.num_classes = 1
setting.counterfactuals = False
setting.covar_mode = False
# setting.metrics = setting.metrics + ["total_loss"]
# setting.metrics = [x for x in setting.metrics if x != "total_loss"]
params.bottleneck_loss = True
params.bn_loss_lag_epochs = params.num_epochs
# setting.metrics = setting.metrics + ["bottleneck_loss"]
elif args.fase == "y":
pass
# setting.outcome = ["y"]
# setting.covar_mode = False
# setting.metrics = setting.metrics + ["bottleneck_loss"]
elif args.fase == "yt":
# setting.metrics = setting.metrics + ["bottleneck_loss"]
setting.covar_mode = True
# setting.outcome = ["y"]
# params.suppress_t_epochs = 0
# if args.cold_start:
# params.suppress_t_epochs = 90
# params.num_epochs = 180
elif args.fase == "xy":
# setting.outcome = ["x", "y"]
setting.covar_mode = True
# setting.metrics = setting.metrics + ["bottleneck_loss"]
setting.metrics = [x for x in setting.metrics if x!="total_loss"]
# params.num_epochs = 20
# params.bottleneck_loss = False
# params.suppress_t_epochs = 0
elif args.fase == "xybn":
# setting.outcome = ["x", "y"]
# setting.covar_mode = True
# setting.metrics = setting.metrics + ["bottleneck_loss", "ate"]
# setting.metrics = [x for x in setting.metrics if x!="total_loss"]
# params.suppress_t_epochs = 10
params.bottleneck_loss = True
elif args.fase == "bn":
setting.outcome = ["x", "y"]
setting.covar_mode = True
params.bottleneck_loss = True
# setting.metrics = setting.metrics + ["bottleneck_loss"]
elif args.fase == "z":
setting.outcome = ["z"]
setting.metrics = pd.Series(setting.metrics).drop_duplicates().tolist()
print("metrics {}:".format(setting.metrics))
# Set the random seed for reproducible experiments
torch.manual_seed(230)
if params.cuda: torch.cuda.manual_seed(230)
# Set the logger
logdir=os.path.join(setting_home, setting.fase+"-fase", "runs")
if not args.experiment == '':
logdir=os.path.join(logdir, args.experiment)
if not os.path.isdir(logdir):
os.makedirs(logdir)
# copy params as backupt to logdir
shutil.copy(json_path, os.path.join(logdir, "params.json"))
# utils.set_logger(os.path.join(args.model_dir, 'train.log'))
utils.set_logger(os.path.join(logdir, 'train.log'))
# Create the input data pipeline
logging.info("Loading the datasets...")
# fetch dataloaders
# dataloaders = data_loader.fetch_dataloader(['train', 'val'], args.data_dir, params)
# dataloaders = data_loader.fetch_dataloader(['train', 'val'], "data", params)
# dataloaders = data_loader.fetch_dataloader(types = ["train", "valid"])
dataloaders = data_loader.fetch_dataloader(args, params, setting, ["train", "valid"])
train_dl = dataloaders['train']
valid_dl = dataloaders['valid']
if setting.num_classes > 1 and params.balance_classes:
train_labels = train_dl.dataset.df[setting.outcome[0]].values
class_weights = compute_class_weight('balanced', np.unique(train_labels), train_labels)
# valid_dl = train_dl
logging.info("- done.")
# print("allocated mem after dataloaders %s" % (torch.cuda.max_memory_allocated(0)))
# Define the model and optimizer
# if covar_mode:
# model = net.TNet(params).cuda() if params.cuda else net.TNet(params)
# else:
# model = net.Net(params).to(params.device, non_blocking=True)
if args.intercept:
assert len(setting.outcome) == 1, "Multiple outcomes not implemented for intercept yet"
print("running intercept mode")
mu = valid_dl.dataset.df[setting.outcome].values.mean()
def new_forward(self, x, data, mu=mu):
intercept = torch.autograd.Variable(mu * torch.ones((x.shape[0],1)), requires_grad=False).to(params.device, non_blocking=True)
bn_activations = torch.autograd.Variable(torch.zeros((x.shape[0],)), requires_grad=False).to(params.device, non_blocking=True)
return {setting.outcome[0]: intercept, "bn": bn_activations}
net.Net3D.forward = new_forward
params.num_epochs = 1
setting.metrics = []
logdir = os.path.join(logdir, "intercept")
if setting.mode3d:
model = net.Net3D(params, setting).to(params.device)
else:
model = net.CausalNet(params, setting).to(params.device)
optimizers = {'sgd': optim.SGD, 'adam': optim.Adam}
if params.use_mi:
if params.mi_estimator == 'mine':
MI_ESTIMATOR = net.MINE
elif params.mi_estimator == 'jsd':
MI_ESTIMATOR = net.JSD
else:
raise NotImplementedError(f'MI estimator not implemented: {params.mi_estimator}, should be mine or jsd')
# prepare mutual information estimators
mines = dict(
# these are part of the loss function
bnx_x = MI_ESTIMATOR(params.regressor_x_dim+1).to(params.device), # MI from activation(s) that should represent x to real x
bnz_bnx = MI_ESTIMATOR(params.regressor_x_dim+params.regressor_z_dim).to(params.device),
monitor_bnx_x = MI_ESTIMATOR(params.regressor_x_dim+1).to(params.device), # MI from activation(s) that should represent x to real x
monitor_bnz_bnx = MI_ESTIMATOR(params.regressor_x_dim+params.regressor_z_dim).to(params.device),
# add more for monitoring purposes
bnz_x = MI_ESTIMATOR(params.regressor_z_dim+1).to(params.device), # MI from other activations to measured x
bnz_z = MI_ESTIMATOR(params.regressor_z_dim+1).to(params.device) # MI from other activations to true z
)
# each MI estimator gets their own optimizer
optims_mine = {}
for mi_name, mi_estimator in mines.items():
optims_mine[mi_name] = optim.Adam(mi_estimator.parameters(), lr=params.mi_lr)
else:
mines = optims_mine = None
if parallel:
print("parallel mode")
model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
if params.momentum > 0:
optimizer = optimizers[params.optimizer](model.parameters(), lr=params.learning_rate, weight_decay=params.wd, momentum=params.momentum)
else:
optimizer = optimizers[params.optimizer](model.parameters(), lr=params.learning_rate, weight_decay=params.wd)
# if params.use_mi:
# optimizer.add_param_group({'params': mine.parameters()})
if setting.covar_mode and params.lr_t_factor != 1:
optimizer = net.speedup_t(model, params)
if args.restore_last and (not args.cold_start):
print("Loading state dict from last running setting")
utils.load_checkpoint(os.path.join(setting.home, args.fase + "-fase", "last.pth.tar"), model, strict=False)
elif args.restore_warm:
utils.load_checkpoint(os.path.join(setting.home, 'warm-start.pth.tar'), model, strict=False)
else:
if not setting.fase in ["x", "feature"]:
if args.cold_start:
print("warning, not loading weights from previous fase, since cold-start = True")
else:
if (args.restore_file is not None) and (args.fase == "xybn"):
restore_path = os.path.join(setting.home, "xy-fase", args.restore_file + '.pth.tar')
logging.info("Restoring parameters from {}".format(restore_path))
if params.use_mi:
# utils.load_checkpoint(restore_path, model, mines=mines, optims_mine=optims_mine)
utils.load_checkpoint(restore_path, model, mines=mines)
else:
utils.load_checkpoint(restore_path, model)
else:
if args.use_last:
state_dict_type = "last"
else:
state_dict_type = "best"
if setting.fase == "y":
print("loading state-dict from x-fase")
utils.load_checkpoint(os.path.join(setting.home, "x-fase", state_dict_type+".pth.tar"), model, strict=False)
elif setting.fase == "yt":
# print("loading state-dict from y-fase")
# utils.load_checkpoint(os.path.join(setting.home, "y-fase", state_dict_type+".pth.tar"), model, strict=False)
print("loading state-dict from y-fase")
utils.load_checkpoint(os.path.join(setting.home, "y-fase", state_dict_type+".pth.tar"), model, strict=False)
elif setting.fase == "xy":
print("loading state-dict from yt-fase")
utils.load_checkpoint(os.path.join(setting.home, "yt-fase", state_dict_type+".pth.tar"), model, strict=False)
elif setting.fase == "xybn":
print("loading state-dict from xy-fase: last")
# utils.load_checkpoint(os.path.join(setting.home, "xy-fase", "last.pth.tar"), model, strict=False)
utils.load_checkpoint(os.path.join(setting.home, "xy-fase", state_dict_type+".pth.tar"), model, mines=mines, optims_mine = optims_mine, strict=False)
# utils.load_checkpoint(os.path.join(setting.home, "xybn-fase", "last.pth.tar"), model, strict=False)
# optimizer = net.softfreeze_conv_layers(model, params)
elif setting.fase == "bn":
print("loading state-dict from xy-fase")
utils.load_checkpoint(os.path.join(setting.home, "xy-fase", state_dict_type+".pth.tar"), model, strict=False)
# optimizer = net.softfreeze_conv_layers(model, params)
else:
assert False, "invalid fase: {}, should be in {x, y, xy, bn}".format(setting.fase)
# if setting.fase == "bn":
# net.freeze_conv_layers(model)
# fetch loss function and metrics
if setting.num_classes > 1 and params.balance_classes:
loss_fn = net.get_loss_fn(setting, weights=class_weights)
else:
loss_fn = net.get_loss_fn(setting)
# metrics = {metric:net.all_metrics[metric] for metric in setting.metrics}
metrics = None
if params.monitor_train_tensors:
print(f"Recording all train tensors")
import csv
train_tensor_keys = ['t','x', 'z', 'y', 'x_hat', 'z_hat', 'y_hat']
with open(os.path.join(logdir, 'train-tensors.csv'), 'w') as f:
writer = csv.writer(f)
writer.writerow(['epoch']+train_tensor_keys)
# Train the model
# print(model)
# print(summary(model, (3, 224, 224), batch_size=1))
logging.info("Starting training for {} epoch(s)".format(params.num_epochs))
for split, dl in dataloaders.items():
logging.info("Number of %s samples: %s" % (split, str(len(dl.dataset))))
# logging.info("Number of valid examples: {}".format(len(valid.dataset)))
with SummaryWriter(logdir) as writer:
# train(model, optimizer, loss_fn, train_dl, metrics, params)
train_and_evaluate(model, train_dl, valid_dl, optimizer, loss_fn, metrics, params, setting, args,
writer, logdir, args.restore_file,
mines, optims_mine)
|
[
"evaluate.evaluate"
] |
[((533, 558), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (556, 558), False, 'import argparse\n'), ((3886, 3908), 'utils.RunningAverage', 'utils.RunningAverage', ([], {}), '()\n', (3906, 3908), False, 'import utils\n'), ((13597, 13616), 'torch.cat', 'torch.cat', (['Zhats', '(0)'], {}), '(Zhats, 0)\n', (13606, 13616), False, 'import torch\n'), ((13627, 13643), 'torch.cat', 'torch.cat', (['ts', '(0)'], {}), '(ts, 0)\n', (13636, 13643), False, 'import torch\n'), ((13654, 13670), 'torch.cat', 'torch.cat', (['Xs', '(0)'], {}), '(Xs, 0)\n', (13663, 13670), False, 'import torch\n'), ((13681, 13701), 'torch.cat', 'torch.cat', (['Xtrues', '(0)'], {}), '(Xtrues, 0)\n', (13690, 13701), False, 'import torch\n'), ((13712, 13728), 'torch.cat', 'torch.cat', (['Ys', '(0)'], {}), '(Ys, 0)\n', (13721, 13728), False, 'import torch\n'), ((15381, 15431), 'logging.info', 'logging.info', (["('- Train metrics: ' + metrics_string)"], {}), "('- Train metrics: ' + metrics_string)\n", (15393, 15431), False, 'import logging\n'), ((27299, 27333), 'utils.Params', 'utils.Params', (['"""last-defaults.json"""'], {}), "('last-defaults.json')\n", (27311, 27333), False, 'import utils\n'), ((28118, 28162), 'os.path.join', 'os.path.join', (['args.setting_dir', 'args.setting'], {}), '(args.setting_dir, args.setting)\n', (28130, 28162), False, 'import os, shutil\n'), ((28571, 28596), 'os.path.isfile', 'os.path.isfile', (['json_path'], {}), '(json_path)\n', (28585, 28596), False, 'import os, shutil\n'), ((28906, 28929), 'utils.Params', 'utils.Params', (['json_path'], {}), '(json_path)\n', (28918, 28929), False, 'import utils\n'), ((32155, 32177), 'torch.manual_seed', 'torch.manual_seed', (['(230)'], {}), '(230)\n', (32172, 32177), False, 'import torch\n'), ((32259, 32317), 'os.path.join', 'os.path.join', (['setting_home', "(setting.fase + '-fase')", '"""runs"""'], {}), "(setting_home, setting.fase + '-fase', 'runs')\n", (32271, 32317), False, 'import os, shutil\n'), ((32734, 32773), 'logging.info', 'logging.info', (['"""Loading the datasets..."""'], {}), "('Loading the datasets...')\n", (32746, 32773), False, 'import logging\n'), ((33067, 33138), 'model.data_loader.fetch_dataloader', 'data_loader.fetch_dataloader', (['args', 'params', 'setting', "['train', 'valid']"], {}), "(args, params, setting, ['train', 'valid'])\n", (33095, 33138), True, 'import model.data_loader as data_loader\n'), ((33468, 33491), 'logging.info', 'logging.info', (['"""- done."""'], {}), "('- done.')\n", (33480, 33491), False, 'import logging\n'), ((13416, 13453), 'numpy.nanmean', 'np.nanmean', (['[x[metric] for x in summ]'], {}), '([x[metric] for x in summ])\n', (13426, 13453), True, 'import numpy as np\n'), ((13928, 13949), 'torch.cat', 'torch.cat', (['[Zi, t]', '(1)'], {}), '([Zi, t], 1)\n', (13937, 13949), False, 'import torch\n'), ((14079, 14130), 'model.net.cholesky_least_squares', 'net.cholesky_least_squares', (['XZt', 'Y'], {'intercept': '(False)'}), '(XZt, Y, intercept=False)\n', (14105, 14130), True, 'import model.net as net\n'), ((14160, 14210), 'model.net.cholesky_least_squares', 'net.cholesky_least_squares', (['Zt', 'Y'], {'intercept': '(False)'}), '(Zt, Y, intercept=False)\n', (14186, 14210), True, 'import model.net as net\n'), ((17720, 17754), 'os.path.join', 'os.path.join', (['setting_home', '"""data"""'], {}), "(setting_home, 'data')\n", (17732, 17754), False, 'import os, shutil\n'), ((18176, 18314), 'logging.info', 'logging.info', (['f"""Epoch {epoch + 1}/{params.num_epochs}; setting: {args.setting}, fase {args.fase}, experiment: {args.experiment}"""'], {}), "(\n f'Epoch {epoch + 1}/{params.num_epochs}; setting: {args.setting}, fase {args.fase}, experiment: {args.experiment}'\n )\n", (18188, 18314), False, 'import logging\n'), ((26745, 26785), 'os.path.join', 'os.path.join', (['logdir', '"""all_scalars.json"""'], {}), "(logdir, 'all_scalars.json')\n", (26757, 26785), False, 'import os, shutil\n'), ((27973, 28038), 'visdom.Visdom', 'Visdom', ([], {'env': 'f"""lidcr_{args.setting}_{args.fase}_{args.experiment}"""'}), "(env=f'lidcr_{args.setting}_{args.fase}_{args.experiment}')\n", (27979, 28038), False, 'from visdom import Visdom\n'), ((28190, 28232), 'os.path.join', 'os.path.join', (['setting_home', '"""setting.json"""'], {}), "(setting_home, 'setting.json')\n", (28202, 28232), False, 'import os, shutil\n'), ((28396, 28468), 'os.path.join', 'os.path.join', (['args.model_dir', '"""t-suppression"""', "(args.experiment + '.json')"], {}), "(args.model_dir, 't-suppression', args.experiment + '.json')\n", (28408, 28468), False, 'import os, shutil\n'), ((28497, 28559), 'os.path.join', 'os.path.join', (['args.model_dir', 'setting.cnn_model', '"""params.json"""'], {}), "(args.model_dir, setting.cnn_model, 'params.json')\n", (28509, 28559), False, 'import os, shutil\n'), ((28829, 28891), 'os.path.join', 'os.path.join', (['setting_home', "(args.fase + '-fase')", '"""params.json"""'], {}), "(setting_home, args.fase + '-fase', 'params.json')\n", (28841, 28891), False, 'import os, shutil\n'), ((29086, 29111), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (29109, 29111), False, 'import torch\n'), ((29137, 29157), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (29149, 29157), False, 'import torch\n'), ((30002, 30021), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (30014, 30021), False, 'import torch\n'), ((32198, 32225), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(230)'], {}), '(230)\n', (32220, 32225), False, 'import torch\n'), ((32365, 32402), 'os.path.join', 'os.path.join', (['logdir', 'args.experiment'], {}), '(logdir, args.experiment)\n', (32377, 32402), False, 'import os, shutil\n'), ((32414, 32435), 'os.path.isdir', 'os.path.isdir', (['logdir'], {}), '(logdir)\n', (32427, 32435), False, 'import os, shutil\n'), ((32445, 32464), 'os.makedirs', 'os.makedirs', (['logdir'], {}), '(logdir)\n', (32456, 32464), False, 'import os, shutil\n'), ((32532, 32567), 'os.path.join', 'os.path.join', (['logdir', '"""params.json"""'], {}), "(logdir, 'params.json')\n", (32544, 32567), False, 'import os, shutil\n'), ((32657, 32690), 'os.path.join', 'os.path.join', (['logdir', '"""train.log"""'], {}), "(logdir, 'train.log')\n", (32669, 32690), False, 'import os, shutil\n'), ((34538, 34571), 'os.path.join', 'os.path.join', (['logdir', '"""intercept"""'], {}), "(logdir, 'intercept')\n", (34550, 34571), False, 'import os, shutil\n'), ((36897, 36925), 'model.net.speedup_t', 'net.speedup_t', (['model', 'params'], {}), '(model, params)\n', (36910, 36925), True, 'import model.net as net\n'), ((40472, 40519), 'model.net.get_loss_fn', 'net.get_loss_fn', (['setting'], {'weights': 'class_weights'}), '(setting, weights=class_weights)\n', (40487, 40519), True, 'import model.net as net\n'), ((40548, 40572), 'model.net.get_loss_fn', 'net.get_loss_fn', (['setting'], {}), '(setting)\n', (40563, 40572), True, 'import model.net as net\n'), ((41415, 41436), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['logdir'], {}), '(logdir)\n', (41428, 41436), False, 'from tensorboardX import SummaryWriter\n'), ((13515, 13534), 'torch.cat', 'torch.cat', (['Xhats', '(0)'], {}), '(Xhats, 0)\n', (13524, 13534), False, 'import torch\n'), ((13556, 13575), 'torch.cat', 'torch.cat', (['Yhats', '(0)'], {}), '(Yhats, 0)\n', (13565, 13575), False, 'import torch\n'), ((20054, 20185), 'evaluate.evaluate', 'evaluate', (['model', 'loss_fn', 'val_dataloader', 'metrics', 'params', 'setting', 'epoch', 'writer'], {'mines': 'val_mines', 'optims_mine': 'val_optims_mine'}), '(model, loss_fn, val_dataloader, metrics, params, setting, epoch,\n writer, mines=val_mines, optims_mine=val_optims_mine)\n', (20062, 20185), False, 'from evaluate import evaluate, evaluate_predictions\n'), ((21103, 21192), 'model.net.make_scatter_plot', 'net.make_scatter_plot', (['val_df.x.values', "outtensors['xhat']"], {'xlabel': '"""x"""', 'ylabel': '"""xhat"""'}), "(val_df.x.values, outtensors['xhat'], xlabel='x',\n ylabel='xhat')\n", (21124, 21192), True, 'import model.net as net\n'), ((21218, 21312), 'model.net.make_scatter_plot', 'net.make_scatter_plot', (['val_df.x_true.values', "outtensors['xhat']"], {'xlabel': '"""x"""', 'ylabel': '"""xhat"""'}), "(val_df.x_true.values, outtensors['xhat'], xlabel='x',\n ylabel='xhat')\n", (21239, 21312), True, 'import model.net as net\n'), ((21338, 21447), 'model.net.make_scatter_plot', 'net.make_scatter_plot', (['val_df.x.values', "outtensors['predictions']"], {'c': 'val_df.t', 'xlabel': '"""x"""', 'ylabel': '"""yhat"""'}), "(val_df.x.values, outtensors['predictions'], c=val_df.\n t, xlabel='x', ylabel='yhat')\n", (21359, 21447), True, 'import model.net as net\n'), ((21471, 21580), 'model.net.make_scatter_plot', 'net.make_scatter_plot', (['val_df.z.values', "outtensors['predictions']"], {'c': 'val_df.t', 'xlabel': '"""z"""', 'ylabel': '"""yhat"""'}), "(val_df.z.values, outtensors['predictions'], c=val_df.\n t, xlabel='z', ylabel='yhat')\n", (21492, 21580), True, 'import model.net as net\n'), ((21604, 21713), 'model.net.make_scatter_plot', 'net.make_scatter_plot', (['val_df.y.values', "outtensors['predictions']"], {'c': 'val_df.t', 'xlabel': '"""yhat"""', 'ylabel': '"""y"""'}), "(val_df.y.values, outtensors['predictions'], c=val_df.\n t, xlabel='yhat', ylabel='y')\n", (21625, 21713), True, 'import model.net as net\n'), ((25835, 25899), 'utils.save_checkpoint', 'utils.save_checkpoint', (['state'], {'is_best': 'is_best', 'checkpoint': 'logdir'}), '(state, is_best=is_best, checkpoint=logdir)\n', (25856, 25899), False, 'import utils\n'), ((26562, 26615), 'os.path.join', 'os.path.join', (['logdir', '"""metrics_val_last_weights.json"""'], {}), "(logdir, 'metrics_val_last_weights.json')\n", (26574, 26615), False, 'import os, shutil\n'), ((26628, 26682), 'utils.save_dict_to_json', 'utils.save_dict_to_json', (['valid_metrics', 'last_json_path'], {}), '(valid_metrics, last_json_path)\n', (26651, 26682), False, 'import utils\n'), ((26828, 26895), 'os.path.join', 'os.path.join', (['setting.home', "(setting.fase + '-fase')", '"""val_preds.npy"""'], {}), "(setting.home, setting.fase + '-fase', 'val_preds.npy')\n", (26840, 26895), False, 'import os, shutil\n'), ((27655, 27674), 'json.load', 'json.load', (['jsonFile'], {}), '(jsonFile)\n', (27664, 27674), False, 'import json\n'), ((27811, 27840), 'json.dump', 'json.dump', (['defaults', 'jsonFile'], {}), '(defaults, jsonFile)\n', (27820, 27840), False, 'import json\n'), ((28683, 28730), 'os.path.join', 'os.path.join', (['setting.home', "(args.fase + '-fase')"], {}), "(setting.home, args.fase + '-fase')\n", (28695, 28730), False, 'import os, shutil\n'), ((28753, 28800), 'os.path.join', 'os.path.join', (['setting.home', "(args.fase + '-fase')"], {}), "(setting.home, args.fase + '-fase')\n", (28765, 28800), False, 'import os, shutil\n'), ((29301, 29335), 'os.path.exists', 'os.path.exists', (['"""last-cuda-0.flag"""'], {}), "('last-cuda-0.flag')\n", (29315, 29335), False, 'import os, shutil\n'), ((33398, 33421), 'numpy.unique', 'np.unique', (['train_labels'], {}), '(train_labels)\n', (33407, 33421), True, 'import numpy as np\n'), ((37071, 37134), 'os.path.join', 'os.path.join', (['setting.home', "(args.fase + '-fase')", '"""last.pth.tar"""'], {}), "(setting.home, args.fase + '-fase', 'last.pth.tar')\n", (37083, 37134), False, 'import os, shutil\n'), ((40945, 40958), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (40955, 40958), False, 'import csv\n'), ((4441, 4469), 'torch.zeros_like', 'torch.zeros_like', (["batch['t']"], {}), "(batch['t'])\n", (4457, 4469), False, 'import torch\n'), ((4949, 4967), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (4965, 4967), False, 'import torch\n'), ((5178, 5196), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (5194, 5196), False, 'import torch\n'), ((8587, 8608), 'torch.cat', 'torch.cat', (['[Zi, t]', '(1)'], {}), '([Zi, t], 1)\n', (8596, 8608), False, 'import torch\n'), ((8733, 8783), 'model.net.cholesky_least_squares', 'net.cholesky_least_squares', (['Zt', 'Y'], {'intercept': '(False)'}), '(Zt, Y, intercept=False)\n', (8759, 8783), True, 'import model.net as net\n'), ((9093, 9146), 'model.net.cholesky_least_squares', 'net.cholesky_least_squares', (['Zi', 'Xhat'], {'intercept': '(False)'}), '(Zi, Xhat, intercept=False)\n', (9119, 9146), True, 'import model.net as net\n'), ((12178, 12227), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['tensors'], {'orient': '"""columns"""'}), "(tensors, orient='columns')\n", (12200, 12227), True, 'import pandas as pd\n'), ((13856, 13874), 'torch.ones_like', 'torch.ones_like', (['t'], {}), '(t)\n', (13871, 13874), False, 'import torch\n'), ((14010, 14028), 'torch.ones_like', 'torch.ones_like', (['t'], {}), '(t)\n', (14025, 14028), False, 'import torch\n'), ((26331, 26384), 'os.path.join', 'os.path.join', (['logdir', '"""metrics_val_best_weights.json"""'], {}), "(logdir, 'metrics_val_best_weights.json')\n", (26343, 26384), False, 'import os, shutil\n'), ((26401, 26455), 'utils.save_dict_to_json', 'utils.save_dict_to_json', (['valid_metrics', 'best_json_path'], {}), '(valid_metrics, best_json_path)\n', (26424, 26455), False, 'import utils\n'), ((29353, 29377), 'torch.cuda.set_device', 'torch.cuda.set_device', (['(1)'], {}), '(1)\n', (29374, 29377), False, 'import torch\n'), ((29394, 29445), 'shutil.move', 'shutil.move', (['"""last-cuda-0.flag"""', '"""last-cuda-1.flag"""'], {}), "('last-cuda-0.flag', 'last-cuda-1.flag')\n", (29405, 29445), False, 'import os, shutil\n'), ((29480, 29504), 'torch.cuda.set_device', 'torch.cuda.set_device', (['(0)'], {}), '(0)\n', (29501, 29504), False, 'import torch\n'), ((29521, 29572), 'shutil.move', 'shutil.move', (['"""last-cuda-1.flag"""', '"""last-cuda-0.flag"""'], {}), "('last-cuda-1.flag', 'last-cuda-0.flag')\n", (29532, 29572), False, 'import os, shutil\n'), ((34612, 34638), 'model.net.Net3D', 'net.Net3D', (['params', 'setting'], {}), '(params, setting)\n', (34621, 34638), True, 'import model.net as net\n'), ((34683, 34713), 'model.net.CausalNet', 'net.CausalNet', (['params', 'setting'], {}), '(params, setting)\n', (34696, 34713), True, 'import model.net as net\n'), ((37215, 37263), 'os.path.join', 'os.path.join', (['setting.home', '"""warm-start.pth.tar"""'], {}), "(setting.home, 'warm-start.pth.tar')\n", (37227, 37263), False, 'import os, shutil\n'), ((40870, 40911), 'os.path.join', 'os.path.join', (['logdir', '"""train-tensors.csv"""'], {}), "(logdir, 'train-tensors.csv')\n", (40882, 40911), False, 'import os, shutil\n'), ((7510, 7528), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (7526, 7528), False, 'import torch\n'), ((24061, 24098), 'numpy.squeeze', 'np.squeeze', (["outtensors['predictions']"], {}), "(outtensors['predictions'])\n", (24071, 24098), True, 'import numpy as np\n'), ((24195, 24262), 'os.path.join', 'os.path.join', (['setting.home', "(setting.fase + '-fase')", '"""preds_val.csv"""'], {}), "(setting.home, setting.fase + '-fase', 'preds_val.csv')\n", (24207, 24262), False, 'import os, shutil\n'), ((24399, 24462), 'os.path.join', 'os.path.join', (['setting.home', "(setting.fase + '-fase')", '"""preds.npy"""'], {}), "(setting.home, setting.fase + '-fase', 'preds.npy')\n", (24411, 24462), False, 'import os, shutil\n'), ((31992, 32018), 'pandas.Series', 'pd.Series', (['setting.metrics'], {}), '(setting.metrics)\n', (32001, 32018), True, 'import pandas as pd\n'), ((36400, 36425), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (36423, 36425), False, 'import torch\n'), ((8502, 8520), 'torch.ones_like', 'torch.ones_like', (['t'], {}), '(t)\n', (8517, 8520), False, 'import torch\n'), ((12291, 12332), 'os.path.join', 'os.path.join', (['logdir', '"""train-tensors.csv"""'], {}), "(logdir, 'train-tensors.csv')\n", (12303, 12332), False, 'import os, shutil\n'), ((24339, 24373), 'numpy.savetxt', 'np.savetxt', (['f', 'preds.T'], {'newline': '""""""'}), "(f, preds.T, newline='')\n", (24349, 24373), True, 'import numpy as np\n'), ((34262, 34288), 'torch.zeros', 'torch.zeros', (['(x.shape[0],)'], {}), '((x.shape[0],))\n', (34273, 34288), False, 'import torch\n'), ((37606, 37675), 'os.path.join', 'os.path.join', (['setting.home', '"""xy-fase"""', "(args.restore_file + '.pth.tar')"], {}), "(setting.home, 'xy-fase', args.restore_file + '.pth.tar')\n", (37618, 37675), False, 'import os, shutil\n'), ((10088, 10111), 'torch.zeros_like', 'torch.zeros_like', (['mse_x'], {}), '(mse_x)\n', (10104, 10111), False, 'import torch\n'), ((34123, 34150), 'torch.ones', 'torch.ones', (['(x.shape[0], 1)'], {}), '((x.shape[0], 1))\n', (34133, 34150), False, 'import torch\n'), ((37931, 37986), 'utils.load_checkpoint', 'utils.load_checkpoint', (['restore_path', 'model'], {'mines': 'mines'}), '(restore_path, model, mines=mines)\n', (37952, 37986), False, 'import utils\n'), ((38037, 38079), 'utils.load_checkpoint', 'utils.load_checkpoint', (['restore_path', 'model'], {}), '(restore_path, model)\n', (38058, 38079), False, 'import utils\n'), ((10306, 10329), 'torch.zeros_like', 'torch.zeros_like', (['mse_x'], {}), '(mse_x)\n', (10322, 10329), False, 'import torch\n'), ((38419, 38485), 'os.path.join', 'os.path.join', (['setting.home', '"""x-fase"""', "(state_dict_type + '.pth.tar')"], {}), "(setting.home, 'x-fase', state_dict_type + '.pth.tar')\n", (38431, 38485), False, 'import os, shutil\n'), ((38864, 38930), 'os.path.join', 'os.path.join', (['setting.home', '"""y-fase"""', "(state_dict_type + '.pth.tar')"], {}), "(setting.home, 'y-fase', state_dict_type + '.pth.tar')\n", (38876, 38930), False, 'import os, shutil\n'), ((39109, 39176), 'os.path.join', 'os.path.join', (['setting.home', '"""yt-fase"""', "(state_dict_type + '.pth.tar')"], {}), "(setting.home, 'yt-fase', state_dict_type + '.pth.tar')\n", (39121, 39176), False, 'import os, shutil\n'), ((39487, 39554), 'os.path.join', 'os.path.join', (['setting.home', '"""xy-fase"""', "(state_dict_type + '.pth.tar')"], {}), "(setting.home, 'xy-fase', state_dict_type + '.pth.tar')\n", (39499, 39554), False, 'import os, shutil\n'), ((39979, 40046), 'os.path.join', 'os.path.join', (['setting.home', '"""xy-fase"""', "(state_dict_type + '.pth.tar')"], {}), "(setting.home, 'xy-fase', state_dict_type + '.pth.tar')\n", (39991, 40046), False, 'import os, shutil\n')]
|
#!/usr/bin/env python
import sys
import os
from shutil import copyfile
import time
import itertools
import torch
from torch.autograd import Variable
import torchvision.utils as vutils
from options import TrainOptions, create_sub_dirs
from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator
from model import StochCycleGAN, AugmentedCycleGAN
import numpy as np
from evaluate import eval_mse_A, eval_ubo_B
import shutil
import random
import glob
import json
def save_results(expr_dir, results_dict):
# save to results.json (for cluster exp)
fname = os.path.join(expr_dir, 'results.json')
with open(fname, 'w') as f:
json.dump(results_dict, f, indent=4)
def copy_scripts_to_folder(expr_dir):
dir_path = os.path.dirname(os.path.realpath(__file__))
for f in glob.glob("%s/*.py" % dir_path):
shutil.copy(f, expr_dir)
def print_log(out_f, message):
out_f.write(message+"\n")
out_f.flush()
print(message)
def format_log(epoch, i, errors, t, prefix=True):
message = '(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t)
if not prefix:
message = ' ' * len(message)
for k, v in errors.items():
message += '%s: %.3f ' % (k, v)
return message
def visualize_cycle(opt, real_A, visuals, eidx, uidx, train):
size = real_A.size()
images = [img.cpu().unsqueeze(1) for img in visuals.values()]
vis_image = torch.cat(images, dim=1).view(size[0]*len(images),size[1],size[2],size[3])
if train:
save_path = opt.train_vis_cycle
else:
save_path = opt.vis_cycle
save_path = os.path.join(save_path, 'cycle_%02d_%04d.png' % (eidx, uidx))
vutils.save_image(vis_image.cpu(), save_path,
normalize=True, range=(-1,1), nrow=len(images))
copyfile(save_path, os.path.join(opt.vis_latest, 'cycle.png'))
def visualize_multi(opt, real_A, model, eidx, uidx):
size = real_A.size()
# all samples in real_A share the same prior_z_B
multi_prior_z_B = Variable(real_A.data.new(opt.num_multi,
opt.nlatent, 1, 1).normal_(0, 1).repeat(size[0],1,1,1), volatile=True)
multi_fake_B = model.generate_multi(real_A.detach(), multi_prior_z_B)
multi_fake_B = multi_fake_B.data.cpu().view(
size[0], opt.num_multi, size[1], size[2], size[3])
vis_multi_image = torch.cat([real_A.data.cpu().unsqueeze(1), multi_fake_B], dim=1) \
.view(size[0]*(opt.num_multi+1),size[1],size[2],size[3])
save_path = os.path.join(opt.vis_multi, 'multi_%02d_%04d.png' % (eidx, uidx))
vutils.save_image(vis_multi_image.cpu(), save_path,
normalize=True, range=(-1,1), nrow=opt.num_multi+1)
copyfile(save_path, os.path.join(opt.vis_latest, 'multi.png'))
def visualize_inference(opt, real_A, real_B, model, eidx, uidx):
size = real_A.size()
real_B = real_B[:opt.num_multi]
# all samples in real_A share the same post_z_B
multi_fake_B = model.inference_multi(real_A.detach(), real_B.detach())
multi_fake_B = multi_fake_B.data.cpu().view(
size[0], opt.num_multi, size[1], size[2], size[3])
vis_multi_image = torch.cat([real_A.data.cpu().unsqueeze(1), multi_fake_B], dim=1) \
.view(size[0]*(opt.num_multi+1),size[1],size[2],size[3])
vis_multi_image = torch.cat([torch.ones(1, size[1], size[2], size[3]).cpu(), real_B.data.cpu(),
vis_multi_image.cpu()], dim=0)
save_path = os.path.join(opt.vis_inf, 'inf_%02d_%04d.png' % (eidx, uidx))
vutils.save_image(vis_multi_image.cpu(), save_path,
normalize=True, range=(-1,1), nrow=opt.num_multi+1)
copyfile(save_path, os.path.join(opt.vis_latest, 'inf.png'))
def train_model():
opt = TrainOptions().parse(sub_dirs=['vis_multi','vis_cycle','vis_latest','train_vis_cycle'])
out_f = open("%s/results.txt" % opt.expr_dir, 'w')
copy_scripts_to_folder(opt.expr_dir)
use_gpu = len(opt.gpu_ids) > 0
if opt.seed is not None:
print ("using random seed:", opt.seed)
random.seed(opt.seed)
np.random.seed(opt.seed)
torch.manual_seed(opt.seed)
if use_gpu:
torch.cuda.manual_seed_all(opt.seed)
if opt.numpy_data:
trainA, trainB, devA, devB, testA, testB = load_edges2shoes(opt.dataroot)
train_dataset = UnalignedIterator(trainA, trainB, batch_size=opt.batchSize)
print_log(out_f, '#training images = %d' % len(train_dataset))
vis_inf = False
test_dataset = AlignedIterator(testA, testB, batch_size=100)
print_log(out_f, '#test images = %d' % len(test_dataset))
dev_dataset = AlignedIterator(devA, devB, batch_size=100)
print_log(out_f, '#dev images = %d' % len(dev_dataset))
dev_cycle = itertools.cycle(AlignedIterator(devA, devB, batch_size=25))
else:
train_data_loader = DataLoader(opt, subset='train', unaligned=True, batchSize=opt.batchSize)
test_data_loader = DataLoader(opt, subset='test', unaligned=False, batchSize=200)
dev_data_loader = DataLoader(opt, subset='dev', unaligned=False, batchSize=200)
dev_cycle_loader = DataLoader(opt, subset='dev', unaligned=False, batchSize=25)
train_dataset = train_data_loader.load_data()
dataset_size = len(train_data_loader)
print_log(out_f, '#training images = %d' % dataset_size)
vis_inf = False
test_dataset = test_data_loader.load_data()
print_log(out_f, '#test images = %d' % len(test_data_loader))
dev_dataset = dev_data_loader.load_data()
print_log(out_f, '#dev images = %d' % len(dev_data_loader))
dev_cycle = itertools.cycle(dev_cycle_loader.load_data())
if opt.supervised:
if opt.numpy_data:
sup_size = int(len(trainA) * opt.sup_frac)
sup_trainA = trainA[:sup_size]
sup_trainB = trainB[:sup_size]
sup_train_dataset = AlignedIterator(sup_trainA, sup_trainB, batch_size=opt.batchSize)
else:
sup_train_data_loader = DataLoader(opt, subset='train', unaligned=False,
batchSize=opt.batchSize, fraction=opt.sup_frac)
sup_train_dataset = sup_train_data_loader.load_data()
sup_size = len(sup_train_data_loader)
sup_train_dataset = itertools.cycle(sup_train_dataset)
print_log(out_f, '#supervised images = %d' % sup_size)
# create_model
if opt.model == 'stoch_cycle_gan':
model = StochCycleGAN(opt)
elif opt.model == 'cycle_gan':
model = StochCycleGAN(opt, ignore_noise=True)
elif opt.model == 'aug_cycle_gan':
model = AugmentedCycleGAN(opt)
create_sub_dirs(opt, ['vis_inf'])
vis_inf = True
else:
raise NotImplementedError('Specified model is not implemented.')
print_log(out_f, "model [%s] was created" % (model.__class__.__name__))
# visualizer = Visualizer(opt)
total_steps = 0
print_start_time = time.time()
results = {
'best_dev_mse_A' : sys.float_info.max,
'best_test_mse_A' : sys.float_info.max,
'best_dev_bpp_B' : sys.float_info.max,
'best_test_bpp_B' : sys.float_info.max,
}
save_results(opt.expr_dir, results)
history_mse_A = []
history_ubo_B = []
create_sub_dirs(opt, ['vis_pred_B'])
for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
epoch_iter = 0
for i, data in enumerate(train_dataset):
real_A, real_B = Variable(data['A']), Variable(data['B'])
if real_A.size(0) != real_B.size(0):
continue
prior_z_B = Variable(real_A.data.new(real_A.size(0), opt.nlatent, 1, 1).normal_(0, 1))
total_steps += opt.batchSize
epoch_iter += opt.batchSize
if use_gpu:
real_A = real_A.cuda()
real_B = real_B.cuda()
prior_z_B = prior_z_B.cuda()
if opt.monitor_gnorm:
losses, visuals, gnorms = model.train_instance(real_A, real_B, prior_z_B)
else:
losses, visuals = model.train_instance(real_A, real_B, prior_z_B)
# supervised training
if opt.supervised:
sup_data = sup_train_dataset.next()
sup_real_A, sup_real_B = Variable(sup_data['A']), Variable(sup_data['B'])
if use_gpu:
sup_real_A, sup_real_B = sup_real_A.cuda(), sup_real_B.cuda()
sup_losses = model.supervised_train_instance(sup_real_A, sup_real_B, prior_z_B)
if total_steps % opt.display_freq == 0:
# visualize current training batch
visualize_cycle(opt, real_A, visuals, epoch, epoch_iter/opt.batchSize, train=True)
dev_data = dev_cycle.next()
dev_real_A, dev_real_B = Variable(dev_data['A']), Variable(dev_data['B'])
dev_prior_z_B = Variable(dev_real_A.data.new(dev_real_A.size(0),
opt.nlatent, 1, 1).normal_(0, 1))
if use_gpu:
dev_real_A = dev_real_A.cuda()
dev_real_B = dev_real_B.cuda()
dev_prior_z_B = dev_prior_z_B.cuda()
dev_visuals = model.generate_cycle(dev_real_A, dev_real_B, dev_prior_z_B)
visualize_cycle(opt, dev_real_A, dev_visuals, epoch, epoch_iter/opt.batchSize, train=False)
# visualize generated B with different z_B
visualize_multi(opt, dev_real_A, model, epoch, epoch_iter/opt.batchSize)
if vis_inf:
# visualize generated B with different z_B infered from real_B
visualize_inference(opt, dev_real_A, dev_real_B, model, epoch, epoch_iter/opt.batchSize)
if total_steps % opt.print_freq == 0:
t = (time.time() - print_start_time) / opt.batchSize
print_log(out_f, format_log(epoch, epoch_iter, losses, t))
if opt.supervised:
print_log(out_f, format_log(epoch, epoch_iter, sup_losses, t, prefix=False))
if opt.monitor_gnorm:
print_log(out_f, format_log(epoch, epoch_iter, gnorms, t, prefix=False)+"\n")
print_start_time = time.time()
if epoch % opt.save_epoch_freq == 0:
print_log(out_f, 'saving the model at the end of epoch %d, iters %d' %
(epoch, total_steps))
model.save('latest')
#####################
# evaluate mappings
#####################
if epoch % opt.eval_A_freq == 0:
t = time.time()
dev_mse_A = eval_mse_A(dev_dataset, model)
test_mse_A = eval_mse_A(test_dataset, model)
t = time.time() - t
history_mse_A.append((dev_mse_A, test_mse_A))
np.save("%s/history_mse_A" % opt.expr_dir, history_mse_A)
res_str_list = ["[%d] DEV_MSE_A: %.4f, TEST_MSE_A: %.4f, TIME: %.4f" % (epoch, dev_mse_A, test_mse_A, t)]
if dev_mse_A < results['best_dev_mse_A']:
with open("%s/best_mse_A.txt" % opt.expr_dir, 'w') as best_mse_A_f:
best_mse_A_f.write(res_str_list[0]+'\n')
best_mse_A_f.flush()
results['best_dev_mse_A'] = dev_mse_A
results['best_test_mse_A'] = test_mse_A
model.save('best_A')
save_results(opt.expr_dir, results)
res_str_list += ["*** BEST DEV A ***"]
res_str = "\n".join(["-"*60] + res_str_list + ["-"*60])
print_log(out_f, res_str)
if epoch % opt.eval_B_freq == 0:
t = time.time()
if opt.model == 'cycle_gan':
steps = 1
else:
steps = 50
dev_ubo_B, dev_bpp_B, dev_kld_B = eval_ubo_B(dev_dataset, model, steps, True, 'pred_B_%d' % epoch,
opt.vis_pred_B)
test_ubo_B, test_bpp_B, test_kld_B = eval_ubo_B(test_dataset, model, steps, False, 'pred_B',
opt.vis_pred_B)
t = time.time() - t
history_ubo_B.append((dev_ubo_B, dev_bpp_B, dev_kld_B, test_ubo_B, test_bpp_B, test_kld_B))
np.save("%s/history_ubo_B" % opt.expr_dir, history_ubo_B)
res_str_list = ["[%d] DEV_BPP_B: %.4f, TEST_BPP_B: %.4f, TIME: %.4f" % (epoch, dev_bpp_B, test_bpp_B, t)]
if dev_bpp_B < results['best_dev_bpp_B']:
with open("%s/best_bpp_B.txt" % opt.expr_dir, 'w') as best_bpp_B_f:
best_bpp_B_f.write(res_str_list[0]+'\n')
best_bpp_B_f.flush()
results['best_dev_bpp_B'] = dev_bpp_B
results['best_test_bpp_B'] = test_bpp_B
save_results(opt.expr_dir, results)
model.save('best_B')
res_str_list += ["*** BEST BPP B ***"]
res_str = "\n".join(["-"*60] + res_str_list + ["-"*60])
print_log(out_f, res_str)
print_log(out_f, 'End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
if epoch > opt.niter:
model.update_learning_rate()
out_f.close()
if __name__ == "__main__":
train_model()
|
[
"evaluate.eval_mse_A",
"evaluate.eval_ubo_B"
] |
[((596, 634), 'os.path.join', 'os.path.join', (['expr_dir', '"""results.json"""'], {}), "(expr_dir, 'results.json')\n", (608, 634), False, 'import os\n'), ((824, 855), 'glob.glob', 'glob.glob', (["('%s/*.py' % dir_path)"], {}), "('%s/*.py' % dir_path)\n", (833, 855), False, 'import glob\n'), ((1615, 1676), 'os.path.join', 'os.path.join', (['save_path', "('cycle_%02d_%04d.png' % (eidx, uidx))"], {}), "(save_path, 'cycle_%02d_%04d.png' % (eidx, uidx))\n", (1627, 1676), False, 'import os\n'), ((2475, 2540), 'os.path.join', 'os.path.join', (['opt.vis_multi', "('multi_%02d_%04d.png' % (eidx, uidx))"], {}), "(opt.vis_multi, 'multi_%02d_%04d.png' % (eidx, uidx))\n", (2487, 2540), False, 'import os\n'), ((3424, 3485), 'os.path.join', 'os.path.join', (['opt.vis_inf', "('inf_%02d_%04d.png' % (eidx, uidx))"], {}), "(opt.vis_inf, 'inf_%02d_%04d.png' % (eidx, uidx))\n", (3436, 3485), False, 'import os\n'), ((6961, 6972), 'time.time', 'time.time', ([], {}), '()\n', (6970, 6972), False, 'import time\n'), ((7278, 7314), 'options.create_sub_dirs', 'create_sub_dirs', (['opt', "['vis_pred_B']"], {}), "(opt, ['vis_pred_B'])\n", (7293, 7314), False, 'from options import TrainOptions, create_sub_dirs\n'), ((675, 711), 'json.dump', 'json.dump', (['results_dict', 'f'], {'indent': '(4)'}), '(results_dict, f, indent=4)\n', (684, 711), False, 'import json\n'), ((783, 809), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (799, 809), False, 'import os\n'), ((865, 889), 'shutil.copy', 'shutil.copy', (['f', 'expr_dir'], {}), '(f, expr_dir)\n', (876, 889), False, 'import shutil\n'), ((1807, 1848), 'os.path.join', 'os.path.join', (['opt.vis_latest', '"""cycle.png"""'], {}), "(opt.vis_latest, 'cycle.png')\n", (1819, 1848), False, 'import os\n'), ((2681, 2722), 'os.path.join', 'os.path.join', (['opt.vis_latest', '"""multi.png"""'], {}), "(opt.vis_latest, 'multi.png')\n", (2693, 2722), False, 'import os\n'), ((3626, 3665), 'os.path.join', 'os.path.join', (['opt.vis_latest', '"""inf.png"""'], {}), "(opt.vis_latest, 'inf.png')\n", (3638, 3665), False, 'import os\n'), ((4001, 4022), 'random.seed', 'random.seed', (['opt.seed'], {}), '(opt.seed)\n', (4012, 4022), False, 'import random\n'), ((4031, 4055), 'numpy.random.seed', 'np.random.seed', (['opt.seed'], {}), '(opt.seed)\n', (4045, 4055), True, 'import numpy as np\n'), ((4064, 4091), 'torch.manual_seed', 'torch.manual_seed', (['opt.seed'], {}), '(opt.seed)\n', (4081, 4091), False, 'import torch\n'), ((4236, 4266), 'edges2shoes_data.load_edges2shoes', 'load_edges2shoes', (['opt.dataroot'], {}), '(opt.dataroot)\n', (4252, 4266), False, 'from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator\n'), ((4291, 4350), 'edges2shoes_data.UnalignedIterator', 'UnalignedIterator', (['trainA', 'trainB'], {'batch_size': 'opt.batchSize'}), '(trainA, trainB, batch_size=opt.batchSize)\n', (4308, 4350), False, 'from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator\n'), ((4470, 4515), 'edges2shoes_data.AlignedIterator', 'AlignedIterator', (['testA', 'testB'], {'batch_size': '(100)'}), '(testA, testB, batch_size=100)\n', (4485, 4515), False, 'from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator\n'), ((4605, 4648), 'edges2shoes_data.AlignedIterator', 'AlignedIterator', (['devA', 'devB'], {'batch_size': '(100)'}), '(devA, devB, batch_size=100)\n', (4620, 4648), False, 'from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator\n'), ((4832, 4904), 'edges2shoes_data.DataLoader', 'DataLoader', (['opt'], {'subset': '"""train"""', 'unaligned': '(True)', 'batchSize': 'opt.batchSize'}), "(opt, subset='train', unaligned=True, batchSize=opt.batchSize)\n", (4842, 4904), False, 'from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator\n'), ((4932, 4994), 'edges2shoes_data.DataLoader', 'DataLoader', (['opt'], {'subset': '"""test"""', 'unaligned': '(False)', 'batchSize': '(200)'}), "(opt, subset='test', unaligned=False, batchSize=200)\n", (4942, 4994), False, 'from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator\n'), ((5021, 5082), 'edges2shoes_data.DataLoader', 'DataLoader', (['opt'], {'subset': '"""dev"""', 'unaligned': '(False)', 'batchSize': '(200)'}), "(opt, subset='dev', unaligned=False, batchSize=200)\n", (5031, 5082), False, 'from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator\n'), ((5110, 5170), 'edges2shoes_data.DataLoader', 'DataLoader', (['opt'], {'subset': '"""dev"""', 'unaligned': '(False)', 'batchSize': '(25)'}), "(opt, subset='dev', unaligned=False, batchSize=25)\n", (5120, 5170), False, 'from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator\n'), ((6298, 6332), 'itertools.cycle', 'itertools.cycle', (['sup_train_dataset'], {}), '(sup_train_dataset)\n', (6313, 6332), False, 'import itertools\n'), ((6471, 6489), 'model.StochCycleGAN', 'StochCycleGAN', (['opt'], {}), '(opt)\n', (6484, 6489), False, 'from model import StochCycleGAN, AugmentedCycleGAN\n'), ((7417, 7428), 'time.time', 'time.time', ([], {}), '()\n', (7426, 7428), False, 'import time\n'), ((1426, 1450), 'torch.cat', 'torch.cat', (['images'], {'dim': '(1)'}), '(images, dim=1)\n', (1435, 1450), False, 'import torch\n'), ((3697, 3711), 'options.TrainOptions', 'TrainOptions', ([], {}), '()\n', (3709, 3711), False, 'from options import TrainOptions, create_sub_dirs\n'), ((4124, 4160), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['opt.seed'], {}), '(opt.seed)\n', (4150, 4160), False, 'import torch\n'), ((4750, 4792), 'edges2shoes_data.AlignedIterator', 'AlignedIterator', (['devA', 'devB'], {'batch_size': '(25)'}), '(devA, devB, batch_size=25)\n', (4765, 4792), False, 'from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator\n'), ((5894, 5959), 'edges2shoes_data.AlignedIterator', 'AlignedIterator', (['sup_trainA', 'sup_trainB'], {'batch_size': 'opt.batchSize'}), '(sup_trainA, sup_trainB, batch_size=opt.batchSize)\n', (5909, 5959), False, 'from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator\n'), ((6010, 6110), 'edges2shoes_data.DataLoader', 'DataLoader', (['opt'], {'subset': '"""train"""', 'unaligned': '(False)', 'batchSize': 'opt.batchSize', 'fraction': 'opt.sup_frac'}), "(opt, subset='train', unaligned=False, batchSize=opt.batchSize,\n fraction=opt.sup_frac)\n", (6020, 6110), False, 'from edges2shoes_data import DataLoader, load_edges2shoes, AlignedIterator, UnalignedIterator\n'), ((6541, 6578), 'model.StochCycleGAN', 'StochCycleGAN', (['opt'], {'ignore_noise': '(True)'}), '(opt, ignore_noise=True)\n', (6554, 6578), False, 'from model import StochCycleGAN, AugmentedCycleGAN\n'), ((10747, 10758), 'time.time', 'time.time', ([], {}), '()\n', (10756, 10758), False, 'import time\n'), ((10783, 10813), 'evaluate.eval_mse_A', 'eval_mse_A', (['dev_dataset', 'model'], {}), '(dev_dataset, model)\n', (10793, 10813), False, 'from evaluate import eval_mse_A, eval_ubo_B\n'), ((10839, 10870), 'evaluate.eval_mse_A', 'eval_mse_A', (['test_dataset', 'model'], {}), '(test_dataset, model)\n', (10849, 10870), False, 'from evaluate import eval_mse_A, eval_ubo_B\n'), ((10973, 11030), 'numpy.save', 'np.save', (["('%s/history_mse_A' % opt.expr_dir)", 'history_mse_A'], {}), "('%s/history_mse_A' % opt.expr_dir, history_mse_A)\n", (10980, 11030), True, 'import numpy as np\n'), ((11808, 11819), 'time.time', 'time.time', ([], {}), '()\n', (11817, 11819), False, 'import time\n'), ((11978, 12063), 'evaluate.eval_ubo_B', 'eval_ubo_B', (['dev_dataset', 'model', 'steps', '(True)', "('pred_B_%d' % epoch)", 'opt.vis_pred_B'], {}), "(dev_dataset, model, steps, True, 'pred_B_%d' % epoch, opt.vis_pred_B\n )\n", (11988, 12063), False, 'from evaluate import eval_mse_A, eval_ubo_B\n'), ((12165, 12236), 'evaluate.eval_ubo_B', 'eval_ubo_B', (['test_dataset', 'model', 'steps', '(False)', '"""pred_B"""', 'opt.vis_pred_B'], {}), "(test_dataset, model, steps, False, 'pred_B', opt.vis_pred_B)\n", (12175, 12236), False, 'from evaluate import eval_mse_A, eval_ubo_B\n'), ((12445, 12502), 'numpy.save', 'np.save', (["('%s/history_ubo_B' % opt.expr_dir)", 'history_ubo_B'], {}), "('%s/history_ubo_B' % opt.expr_dir, history_ubo_B)\n", (12452, 12502), True, 'import numpy as np\n'), ((6634, 6656), 'model.AugmentedCycleGAN', 'AugmentedCycleGAN', (['opt'], {}), '(opt)\n', (6651, 6656), False, 'from model import StochCycleGAN, AugmentedCycleGAN\n'), ((6665, 6698), 'options.create_sub_dirs', 'create_sub_dirs', (['opt', "['vis_inf']"], {}), "(opt, ['vis_inf'])\n", (6680, 6698), False, 'from options import TrainOptions, create_sub_dirs\n'), ((7531, 7550), 'torch.autograd.Variable', 'Variable', (["data['A']"], {}), "(data['A'])\n", (7539, 7550), False, 'from torch.autograd import Variable\n'), ((7552, 7571), 'torch.autograd.Variable', 'Variable', (["data['B']"], {}), "(data['B'])\n", (7560, 7571), False, 'from torch.autograd import Variable\n'), ((10383, 10394), 'time.time', 'time.time', ([], {}), '()\n', (10392, 10394), False, 'import time\n'), ((10887, 10898), 'time.time', 'time.time', ([], {}), '()\n', (10896, 10898), False, 'import time\n'), ((12313, 12324), 'time.time', 'time.time', ([], {}), '()\n', (12322, 12324), False, 'import time\n'), ((3276, 3316), 'torch.ones', 'torch.ones', (['(1)', 'size[1]', 'size[2]', 'size[3]'], {}), '(1, size[1], size[2], size[3])\n', (3286, 3316), False, 'import torch\n'), ((8359, 8382), 'torch.autograd.Variable', 'Variable', (["sup_data['A']"], {}), "(sup_data['A'])\n", (8367, 8382), False, 'from torch.autograd import Variable\n'), ((8384, 8407), 'torch.autograd.Variable', 'Variable', (["sup_data['B']"], {}), "(sup_data['B'])\n", (8392, 8407), False, 'from torch.autograd import Variable\n'), ((8904, 8927), 'torch.autograd.Variable', 'Variable', (["dev_data['A']"], {}), "(dev_data['A'])\n", (8912, 8927), False, 'from torch.autograd import Variable\n'), ((8929, 8952), 'torch.autograd.Variable', 'Variable', (["dev_data['B']"], {}), "(dev_data['B'])\n", (8937, 8952), False, 'from torch.autograd import Variable\n'), ((9957, 9968), 'time.time', 'time.time', ([], {}), '()\n', (9966, 9968), False, 'import time\n'), ((13350, 13361), 'time.time', 'time.time', ([], {}), '()\n', (13359, 13361), False, 'import time\n')]
|
### modified from https://github.com/qiuqiangkong/audioset_tagging_cnn
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import h5py
import math
import time
import logging
import matplotlib.pyplot as plt
from sklearn import metrics
import _pickle as cPickle
import torch
torch.backends.cudnn.benchmark=True
torch.manual_seed(0)
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from utilities import (create_folder, get_filename, create_logging,
StatisticsContainer)
from models import (Cnn14, Cnn14_no_specaug, Cnn14_no_dropout,
Cnn6, Cnn10, ResNet22, ResNet38, ResNet54, Cnn14_emb512, Cnn14_emb128,
Cnn14_emb32, MobileNetV1, MobileNetV2, LeeNet11, LeeNet24, DaiNet19,
Res1dNet31, Res1dNet51, Wavegram_Cnn14, Wavegram_Logmel_Cnn14,
Cnn14_DecisionLevelMax, Cnn14_DecisionLevelAtt)
from pytorch_utils import (move_data_to_device, count_parameters, count_flops,
do_mixup)
from data_generator import (AudioSetDataset, BalancedSampler, BalancedSamplerMixup,
EvaluateSampler, Collator)
from evaluate import Evaluator
import config
from losses import get_loss_func
def train(args):
"""Train AudioSet tagging model.
Args:
dataset_dir: str
workspace: str
data_type: 'balanced_train' | 'unbalanced_train'
frames_per_second: int
mel_bins: int
model_type: str
loss_type: 'bce'
balanced: bool
augmentation: str
batch_size: int
learning_rate: float
resume_iteration: int
early_stop: int
accumulation_steps: int
cuda: bool
"""
# Arugments & parameters
workspace = args.workspace
data_type = args.data_type
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
loss_type = args.loss_type
balanced = args.balanced
augmentation = args.augmentation
batch_size = args.batch_size
learning_rate = args.learning_rate
resume_iteration = args.resume_iteration
early_stop = args.early_stop
device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')
filename = args.filename
num_workers = 8
sample_rate = config.sample_rate
audio_length = config.audio_length
classes_num = config.classes_num
loss_func = get_loss_func(loss_type)
# Paths
black_list_csv = os.path.join(workspace, 'black_list', 'dcase2017task4.csv')
waveform_hdf5s_dir = os.path.join(workspace, 'hdf5s', 'waveforms')
# Target hdf5 path
eval_train_targets_hdf5_path = os.path.join(workspace,
'hdf5s', 'targets', 'balanced_train.h5')
eval_test_targets_hdf5_path = os.path.join(workspace, 'hdf5s', 'targets',
'eval.h5')
if data_type == 'balanced_train':
train_targets_hdf5_path = os.path.join(workspace, 'hdf5s', 'targets',
'balanced_train.h5')
elif data_type == 'full_train':
train_targets_hdf5_path = os.path.join(workspace, 'hdf5s', 'targets',
'full_train.h5')
checkpoints_dir = os.path.join(workspace, 'checkpoints', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size))
create_folder(checkpoints_dir)
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
create_folder(os.path.dirname(statistics_path))
logs_dir = os.path.join(workspace, 'logs', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size))
create_logging(logs_dir, filemode='w')
logging.info(args)
if 'cuda' in str(device):
logging.info('Using GPU.')
device = 'cuda'
else:
logging.info('Using CPU.')
device = 'cpu'
# Model
Model = eval(model_type)
model = Model(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num)
params_num = count_parameters(model)
# flops_num = count_flops(model, audio_length)
logging.info('Parameters num: {}'.format(params_num))
# logging.info('Flops num: {:.3f} G'.format(flops_num / 1e9))
# Dataset will be used by DataLoader later. Provide an index and return
# waveform and target of audio
train_dataset = AudioSetDataset(
target_hdf5_path=train_targets_hdf5_path,
waveform_hdf5s_dir=waveform_hdf5s_dir,
audio_length=audio_length,
classes_num=classes_num)
bal_dataset = AudioSetDataset(
target_hdf5_path=eval_train_targets_hdf5_path,
waveform_hdf5s_dir=waveform_hdf5s_dir,
audio_length=audio_length,
classes_num=classes_num)
test_dataset = AudioSetDataset(
target_hdf5_path=eval_test_targets_hdf5_path,
waveform_hdf5s_dir=waveform_hdf5s_dir,
audio_length=audio_length,
classes_num=classes_num)
# Sampler
if balanced == 'balanced':
if 'mixup' in augmentation:
train_sampler = BalancedSamplerMixup(
target_hdf5_path=train_targets_hdf5_path,
black_list_csv=black_list_csv, batch_size=batch_size,
start_mix_epoch=1)
train_collector = Collator(mixup_alpha=1.)
assert batch_size % torch.cuda.device_count() == 0, 'To let mixup working properly this must be satisfied.'
else:
train_sampler = BalancedSampler(
target_hdf5_path=train_targets_hdf5_path,
black_list_csv=black_list_csv, batch_size=batch_size)
train_collector = Collator(mixup_alpha=None)
bal_sampler = EvaluateSampler(dataset_size=len(bal_dataset),
batch_size=batch_size)
test_sampler = EvaluateSampler(dataset_size=len(test_dataset),
batch_size=batch_size)
eval_collector = Collator(mixup_alpha=None)
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_sampler=train_sampler, collate_fn=train_collector,
num_workers=num_workers, pin_memory=True)
bal_loader = torch.utils.data.DataLoader(dataset=bal_dataset,
batch_sampler=bal_sampler, collate_fn=eval_collector,
num_workers=num_workers, pin_memory=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_sampler=test_sampler, collate_fn=eval_collector,
num_workers=num_workers, pin_memory=True)
# Evaluator
bal_evaluator = Evaluator(
model=model,
generator=bal_loader)
test_evaluator = Evaluator(
model=model,
generator=test_loader)
# Statistics
statistics_container = StatisticsContainer(statistics_path)
# Optimizer
optimizer = optim.Adam(model.parameters(), lr=learning_rate,
betas=(0.9, 0.999), eps=1e-08, weight_decay=0., amsgrad=True)
train_bgn_time = time.time()
# Resume training
if resume_iteration > 0:
resume_checkpoint_path = os.path.join(workspace, 'checkpoints', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'{}_iterations.pth'.format(resume_iteration))
logging.info('Loading checkpoint {}'.format(resume_checkpoint_path))
checkpoint = torch.load(resume_checkpoint_path)
model.load_state_dict(checkpoint['model'])
train_sampler.load_state_dict(checkpoint['sampler'])
statistics_container.load_state_dict(resume_iteration)
iteration = checkpoint['iteration']
else:
iteration = 0
# Parallel
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
if 'cuda' in str(device):
model.to(device)
t_ = time.time()
for batch_data_dict in train_loader:
"""batch_list_data_dict:
[{'audio_name': 'YtwJdQzi7x7Q.wav', 'waveform': (audio_length,), 'target': (classes_num)},
...]"""
# Evaluate
if (iteration % 2000 == 0 and iteration > resume_iteration) or (iteration == 0):
train_fin_time = time.time()
bal_statistics = bal_evaluator.evaluate()
test_statistics = test_evaluator.evaluate()
logging.info('Validate bal mAP: {:.3f}'.format(
np.mean(bal_statistics['average_precision'])))
logging.info('Validate test mAP: {:.3f}'.format(
np.mean(test_statistics['average_precision'])))
statistics_container.append(iteration, bal_statistics, data_type='bal')
statistics_container.append(iteration, test_statistics, data_type='test')
statistics_container.dump()
train_time = train_fin_time - train_bgn_time
validate_time = time.time() - train_fin_time
logging.info(
'iteration: {}, train time: {:.3f} s, validate time: {:.3f} s'
''.format(iteration, train_time, validate_time))
logging.info('------------------------------------')
train_bgn_time = time.time()
# Save model
if iteration % 20000 == 0:
checkpoint = {
'iteration': iteration,
'model': model.module.state_dict(),
'optimizer': optimizer.state_dict(),
'sampler': train_sampler.state_dict()}
checkpoint_path = os.path.join(
checkpoints_dir, '{}_iterations.pth'.format(iteration))
torch.save(checkpoint, checkpoint_path)
logging.info('Model saved to {}'.format(checkpoint_path))
# Move data to device
for key in batch_data_dict.keys():
batch_data_dict[key] = move_data_to_device(batch_data_dict[key], device)
# Forward
model.train()
if 'mixup' in augmentation:
batch_output_dict = model(batch_data_dict['waveform'], batch_data_dict['mixup_lambda'])
batch_target_dict = {'target': do_mixup(batch_data_dict['target'], batch_data_dict['mixup_lambda'])}
else:
batch_output_dict = model(batch_data_dict['waveform'], None)
batch_target_dict = {'target': batch_data_dict['target']}
loss = loss_func(batch_output_dict, batch_target_dict)
# Backward
loss.backward()
print(loss)
optimizer.step()
optimizer.zero_grad()
if iteration % 10 == 0:
print(iteration, 'time: {:.3f}'.format(time.time() - t_))
t_ = time.time()
iteration += 1
# Stop learning
if iteration == early_stop:
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example of parser. ')
subparsers = parser.add_subparsers(dest='mode')
parser_train = subparsers.add_parser('train')
parser_train.add_argument('--workspace', type=str, required=True)
parser_train.add_argument('--data_type', type=str, required=True)
parser_train.add_argument('--window_size', type=int, required=True)
parser_train.add_argument('--hop_size', type=int, required=True)
parser_train.add_argument('--mel_bins', type=int, required=True)
parser_train.add_argument('--fmin', type=int, required=True)
parser_train.add_argument('--fmax', type=int, required=True)
parser_train.add_argument('--model_type', type=str, required=True)
parser_train.add_argument('--loss_type', type=str, required=True)
parser_train.add_argument('--balanced', type=str, required=True)
parser_train.add_argument('--augmentation', type=str, required=True)
parser_train.add_argument('--batch_size', type=int, required=True)
parser_train.add_argument('--learning_rate', type=float, required=True)
parser_train.add_argument('--resume_iteration', type=int, required=True)
parser_train.add_argument('--early_stop', type=int, required=True)
parser_train.add_argument('--cuda', action='store_true', default=False)
args = parser.parse_args()
args.filename = get_filename(__file__)
if args.mode == 'calculate_scalar':
calculate_scalar(args)
elif args.mode == 'train':
train(args)
else:
raise Exception('Error argument!')
|
[
"evaluate.Evaluator"
] |
[((372, 392), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (389, 392), False, 'import torch\n'), ((111, 148), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""../utils"""'], {}), "(sys.path[0], '../utils')\n", (123, 148), False, 'import os\n'), ((2465, 2489), 'losses.get_loss_func', 'get_loss_func', (['loss_type'], {}), '(loss_type)\n', (2478, 2489), False, 'from losses import get_loss_func\n'), ((2524, 2583), 'os.path.join', 'os.path.join', (['workspace', '"""black_list"""', '"""dcase2017task4.csv"""'], {}), "(workspace, 'black_list', 'dcase2017task4.csv')\n", (2536, 2583), False, 'import os\n'), ((2614, 2659), 'os.path.join', 'os.path.join', (['workspace', '"""hdf5s"""', '"""waveforms"""'], {}), "(workspace, 'hdf5s', 'waveforms')\n", (2626, 2659), False, 'import os\n'), ((2719, 2783), 'os.path.join', 'os.path.join', (['workspace', '"""hdf5s"""', '"""targets"""', '"""balanced_train.h5"""'], {}), "(workspace, 'hdf5s', 'targets', 'balanced_train.h5')\n", (2731, 2783), False, 'import os\n'), ((2828, 2882), 'os.path.join', 'os.path.join', (['workspace', '"""hdf5s"""', '"""targets"""', '"""eval.h5"""'], {}), "(workspace, 'hdf5s', 'targets', 'eval.h5')\n", (2840, 2882), False, 'import os\n'), ((3642, 3672), 'utilities.create_folder', 'create_folder', (['checkpoints_dir'], {}), '(checkpoints_dir)\n', (3655, 3672), False, 'from utilities import create_folder, get_filename, create_logging, StatisticsContainer\n'), ((4632, 4670), 'utilities.create_logging', 'create_logging', (['logs_dir'], {'filemode': '"""w"""'}), "(logs_dir, filemode='w')\n", (4646, 4670), False, 'from utilities import create_folder, get_filename, create_logging, StatisticsContainer\n'), ((4675, 4693), 'logging.info', 'logging.info', (['args'], {}), '(args)\n', (4687, 4693), False, 'import logging\n'), ((5096, 5119), 'pytorch_utils.count_parameters', 'count_parameters', (['model'], {}), '(model)\n', (5112, 5119), False, 'from pytorch_utils import move_data_to_device, count_parameters, count_flops, do_mixup\n'), ((5432, 5588), 'data_generator.AudioSetDataset', 'AudioSetDataset', ([], {'target_hdf5_path': 'train_targets_hdf5_path', 'waveform_hdf5s_dir': 'waveform_hdf5s_dir', 'audio_length': 'audio_length', 'classes_num': 'classes_num'}), '(target_hdf5_path=train_targets_hdf5_path,\n waveform_hdf5s_dir=waveform_hdf5s_dir, audio_length=audio_length,\n classes_num=classes_num)\n', (5447, 5588), False, 'from data_generator import AudioSetDataset, BalancedSampler, BalancedSamplerMixup, EvaluateSampler, Collator\n'), ((5636, 5797), 'data_generator.AudioSetDataset', 'AudioSetDataset', ([], {'target_hdf5_path': 'eval_train_targets_hdf5_path', 'waveform_hdf5s_dir': 'waveform_hdf5s_dir', 'audio_length': 'audio_length', 'classes_num': 'classes_num'}), '(target_hdf5_path=eval_train_targets_hdf5_path,\n waveform_hdf5s_dir=waveform_hdf5s_dir, audio_length=audio_length,\n classes_num=classes_num)\n', (5651, 5797), False, 'from data_generator import AudioSetDataset, BalancedSampler, BalancedSamplerMixup, EvaluateSampler, Collator\n'), ((5846, 6006), 'data_generator.AudioSetDataset', 'AudioSetDataset', ([], {'target_hdf5_path': 'eval_test_targets_hdf5_path', 'waveform_hdf5s_dir': 'waveform_hdf5s_dir', 'audio_length': 'audio_length', 'classes_num': 'classes_num'}), '(target_hdf5_path=eval_test_targets_hdf5_path,\n waveform_hdf5s_dir=waveform_hdf5s_dir, audio_length=audio_length,\n classes_num=classes_num)\n', (5861, 6006), False, 'from data_generator import AudioSetDataset, BalancedSampler, BalancedSamplerMixup, EvaluateSampler, Collator\n'), ((6976, 7002), 'data_generator.Collator', 'Collator', ([], {'mixup_alpha': 'None'}), '(mixup_alpha=None)\n', (6984, 7002), False, 'from data_generator import AudioSetDataset, BalancedSampler, BalancedSamplerMixup, EvaluateSampler, Collator\n'), ((7045, 7203), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'train_dataset', 'batch_sampler': 'train_sampler', 'collate_fn': 'train_collector', 'num_workers': 'num_workers', 'pin_memory': '(True)'}), '(dataset=train_dataset, batch_sampler=\n train_sampler, collate_fn=train_collector, num_workers=num_workers,\n pin_memory=True)\n', (7072, 7203), False, 'import torch\n'), ((7235, 7383), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'bal_dataset', 'batch_sampler': 'bal_sampler', 'collate_fn': 'eval_collector', 'num_workers': 'num_workers', 'pin_memory': '(True)'}), '(dataset=bal_dataset, batch_sampler=bal_sampler,\n collate_fn=eval_collector, num_workers=num_workers, pin_memory=True)\n', (7262, 7383), False, 'import torch\n'), ((7417, 7572), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'test_dataset', 'batch_sampler': 'test_sampler', 'collate_fn': 'eval_collector', 'num_workers': 'num_workers', 'pin_memory': '(True)'}), '(dataset=test_dataset, batch_sampler=\n test_sampler, collate_fn=eval_collector, num_workers=num_workers,\n pin_memory=True)\n', (7444, 7572), False, 'import torch\n'), ((7619, 7663), 'evaluate.Evaluator', 'Evaluator', ([], {'model': 'model', 'generator': 'bal_loader'}), '(model=model, generator=bal_loader)\n', (7628, 7663), False, 'from evaluate import Evaluator\n'), ((7708, 7753), 'evaluate.Evaluator', 'Evaluator', ([], {'model': 'model', 'generator': 'test_loader'}), '(model=model, generator=test_loader)\n', (7717, 7753), False, 'from evaluate import Evaluator\n'), ((7825, 7861), 'utilities.StatisticsContainer', 'StatisticsContainer', (['statistics_path'], {}), '(statistics_path)\n', (7844, 7861), False, 'from utilities import create_folder, get_filename, create_logging, StatisticsContainer\n'), ((8041, 8052), 'time.time', 'time.time', ([], {}), '()\n', (8050, 8052), False, 'import time\n'), ((9121, 9149), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (9142, 9149), False, 'import torch\n'), ((9220, 9231), 'time.time', 'time.time', ([], {}), '()\n', (9229, 9231), False, 'import time\n'), ((12253, 12311), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Example of parser. """'}), "(description='Example of parser. ')\n", (12276, 12311), False, 'import argparse\n'), ((13607, 13629), 'utilities.get_filename', 'get_filename', (['__file__'], {}), '(__file__)\n', (13619, 13629), False, 'from utilities import create_folder, get_filename, create_logging, StatisticsContainer\n'), ((2197, 2217), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2209, 2217), False, 'import torch\n'), ((2266, 2285), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2278, 2285), False, 'import torch\n'), ((2965, 3029), 'os.path.join', 'os.path.join', (['workspace', '"""hdf5s"""', '"""targets"""', '"""balanced_train.h5"""'], {}), "(workspace, 'hdf5s', 'targets', 'balanced_train.h5')\n", (2977, 3029), False, 'import os\n'), ((4164, 4196), 'os.path.dirname', 'os.path.dirname', (['statistics_path'], {}), '(statistics_path)\n', (4179, 4196), False, 'import os\n'), ((4737, 4763), 'logging.info', 'logging.info', (['"""Using GPU."""'], {}), "('Using GPU.')\n", (4749, 4763), False, 'import logging\n'), ((4806, 4832), 'logging.info', 'logging.info', (['"""Using CPU."""'], {}), "('Using CPU.')\n", (4818, 4832), False, 'import logging\n'), ((8740, 8774), 'torch.load', 'torch.load', (['resume_checkpoint_path'], {}), '(resume_checkpoint_path)\n', (8750, 8774), False, 'import torch\n'), ((2235, 2260), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2258, 2260), False, 'import torch\n'), ((3113, 3173), 'os.path.join', 'os.path.join', (['workspace', '"""hdf5s"""', '"""targets"""', '"""full_train.h5"""'], {}), "(workspace, 'hdf5s', 'targets', 'full_train.h5')\n", (3125, 3173), False, 'import os\n'), ((6145, 6284), 'data_generator.BalancedSamplerMixup', 'BalancedSamplerMixup', ([], {'target_hdf5_path': 'train_targets_hdf5_path', 'black_list_csv': 'black_list_csv', 'batch_size': 'batch_size', 'start_mix_epoch': '(1)'}), '(target_hdf5_path=train_targets_hdf5_path,\n black_list_csv=black_list_csv, batch_size=batch_size, start_mix_epoch=1)\n', (6165, 6284), False, 'from data_generator import AudioSetDataset, BalancedSampler, BalancedSamplerMixup, EvaluateSampler, Collator\n'), ((6362, 6387), 'data_generator.Collator', 'Collator', ([], {'mixup_alpha': '(1.0)'}), '(mixup_alpha=1.0)\n', (6370, 6387), False, 'from data_generator import AudioSetDataset, BalancedSampler, BalancedSamplerMixup, EvaluateSampler, Collator\n'), ((6549, 6665), 'data_generator.BalancedSampler', 'BalancedSampler', ([], {'target_hdf5_path': 'train_targets_hdf5_path', 'black_list_csv': 'black_list_csv', 'batch_size': 'batch_size'}), '(target_hdf5_path=train_targets_hdf5_path, black_list_csv=\n black_list_csv, batch_size=batch_size)\n', (6564, 6665), False, 'from data_generator import AudioSetDataset, BalancedSampler, BalancedSamplerMixup, EvaluateSampler, Collator\n'), ((6725, 6751), 'data_generator.Collator', 'Collator', ([], {'mixup_alpha': 'None'}), '(mixup_alpha=None)\n', (6733, 6751), False, 'from data_generator import AudioSetDataset, BalancedSampler, BalancedSamplerMixup, EvaluateSampler, Collator\n'), ((9081, 9106), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (9104, 9106), False, 'import torch\n'), ((9591, 9602), 'time.time', 'time.time', ([], {}), '()\n', (9600, 9602), False, 'import time\n'), ((10506, 10558), 'logging.info', 'logging.info', (['"""------------------------------------"""'], {}), "('------------------------------------')\n", (10518, 10558), False, 'import logging\n'), ((10589, 10600), 'time.time', 'time.time', ([], {}), '()\n', (10598, 10600), False, 'import time\n'), ((11042, 11081), 'torch.save', 'torch.save', (['checkpoint', 'checkpoint_path'], {}), '(checkpoint, checkpoint_path)\n', (11052, 11081), False, 'import torch\n'), ((11261, 11310), 'pytorch_utils.move_data_to_device', 'move_data_to_device', (['batch_data_dict[key]', 'device'], {}), '(batch_data_dict[key], device)\n', (11280, 11310), False, 'from pytorch_utils import move_data_to_device, count_parameters, count_flops, do_mixup\n'), ((12087, 12098), 'time.time', 'time.time', ([], {}), '()\n', (12096, 12098), False, 'import time\n'), ((10289, 10300), 'time.time', 'time.time', ([], {}), '()\n', (10298, 10300), False, 'import time\n'), ((11540, 11608), 'pytorch_utils.do_mixup', 'do_mixup', (["batch_data_dict['target']", "batch_data_dict['mixup_lambda']"], {}), "(batch_data_dict['target'], batch_data_dict['mixup_lambda'])\n", (11548, 11608), False, 'from pytorch_utils import move_data_to_device, count_parameters, count_flops, do_mixup\n'), ((6419, 6444), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6442, 6444), False, 'import torch\n'), ((9819, 9863), 'numpy.mean', 'np.mean', (["bal_statistics['average_precision']"], {}), "(bal_statistics['average_precision'])\n", (9826, 9863), True, 'import numpy as np\n'), ((9944, 9989), 'numpy.mean', 'np.mean', (["test_statistics['average_precision']"], {}), "(test_statistics['average_precision'])\n", (9951, 9989), True, 'import numpy as np\n'), ((12051, 12062), 'time.time', 'time.time', ([], {}), '()\n', (12060, 12062), False, 'import time\n')]
|
import argparse
import logging
import os
from types import SimpleNamespace
import falcon
import pandas
import torch
from falcon_cors import CORS
import json
import waitress
from data import Data
from torch.utils.data import DataLoader
from utils import load_torch_model
from model import BertForClassification, CharCNN
from evaluate import evaluate
import time
from classmerge import classy_dic
from dataclean import cleanall, shortenlines
logging.basicConfig(level=logging.INFO, format='%(asctime)-18s %(message)s')
logger = logging.getLogger()
cors_allow_all = CORS(allow_all_origins=True,
allow_origins_list=['http://localhost:8081'],
allow_all_headers=True,
allow_all_methods=True,
allow_credentials_all_origins=True
)
parser = argparse.ArgumentParser()
parser.add_argument(
'-c', '--config_file', default='config/bert_config.json',
help='model config file')
args = parser.parse_args()
model_config=args.config_file
MODEL_MAP = {
'bert': BertForClassification,
'lbert': BertForClassification,
'cnn': CharCNN
}
class TorchResource:
def __init__(self):
logger.info("...")
# 0. Load config
with open(model_config) as fin:
self.config = json.load(fin, object_hook=lambda d: SimpleNamespace(**d))
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
# 1. Load data
self.data = Data(vocab_file=os.path.join(self.config.model_path, 'vocab.txt'),
max_seq_len=self.config.max_seq_len,
model_type=self.config.model_type, config=self.config)
# 2. Load model
self.model = MODEL_MAP[self.config.model_type](self.config)
self.model = load_torch_model(
self.model, model_path=os.path.join(self.config.model_path, 'model.bin'))
self.model.to(self.device)
logger.info("###")
def bert_classification(self,title, content):
logger.info('1:{}, 2:{}'.format(title, content))
row = {'type1': '/', 'title': title, 'content': content}
df = pandas.DataFrame().append(row, ignore_index=True)
filename = "data/{}.csv".format(time.time())
df.to_csv(filename, index=False, columns=['type1', 'title', 'content'])
test_set = self.data.load_file(filename, train=False)
data_loader_test = DataLoader(
test_set, batch_size=self.config.batch_size, shuffle=False)
# Evaluate
answer_list = evaluate(self.model, data_loader_test, self.device)
answer_list = [classy_dic[i] for i in answer_list]
return {"answer": answer_list}
def on_get(self, req, resp):
logger.info("...")
resp.set_header('Access-Control-Allow-Origin', 'http://localhost:8081')
resp.set_header('Access-Control-Allow-Methods', '*')
resp.set_header('Access-Control-Allow-Headers', '*')
resp.set_header('Access-Control-Allow-Credentials','true')
title = req.get_param('1', True)
content = req.get_param('2', True)
clean_title = shortenlines(title)
clean_content = cleanall(content)
resp.media = self.bert_classification(clean_title, clean_content)
logger.info("###")
def on_post(self, req, resp):
"""Handles POST requests"""
resp.set_header('Access-Control-Allow-Origin', 'http://localhost:8081')
resp.set_header('Access-Control-Allow-Methods', '*')
resp.set_header('Access-Control-Allow-Headers', '*')
resp.set_header('Access-Control-Allow-Credentials', 'true')
resp.set_header("Cache-Control", "no-cache")
data = req.stream.read(req.content_length)
jsondata = json.loads(data)
clean_title = shortenlines(jsondata.title)
clean_content = cleanall(jsondata.content)
resp.media = self.bert_classification(clean_title, clean_content)
if __name__=="__main__":
api = falcon.API(middleware=[cors_allow_all.middleware])
api.req_options.auto_parse_form_urlencoded = True
api.add_route('/z', TorchResource())
waitress.serve(api, port=58080, threads=48, url_scheme='http')
|
[
"evaluate.evaluate"
] |
[((441, 517), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)-18s %(message)s"""'}), "(level=logging.INFO, format='%(asctime)-18s %(message)s')\n", (460, 517), False, 'import logging\n'), ((527, 546), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (544, 546), False, 'import logging\n'), ((564, 730), 'falcon_cors.CORS', 'CORS', ([], {'allow_all_origins': '(True)', 'allow_origins_list': "['http://localhost:8081']", 'allow_all_headers': '(True)', 'allow_all_methods': '(True)', 'allow_credentials_all_origins': '(True)'}), "(allow_all_origins=True, allow_origins_list=['http://localhost:8081'],\n allow_all_headers=True, allow_all_methods=True,\n allow_credentials_all_origins=True)\n", (568, 730), False, 'from falcon_cors import CORS\n'), ((844, 869), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (867, 869), False, 'import argparse\n'), ((4067, 4117), 'falcon.API', 'falcon.API', ([], {'middleware': '[cors_allow_all.middleware]'}), '(middleware=[cors_allow_all.middleware])\n', (4077, 4117), False, 'import falcon\n'), ((4217, 4279), 'waitress.serve', 'waitress.serve', (['api'], {'port': '(58080)', 'threads': '(48)', 'url_scheme': '"""http"""'}), "(api, port=58080, threads=48, url_scheme='http')\n", (4231, 4279), False, 'import waitress\n'), ((1383, 1408), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1406, 1408), False, 'import torch\n'), ((2499, 2569), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'batch_size': 'self.config.batch_size', 'shuffle': '(False)'}), '(test_set, batch_size=self.config.batch_size, shuffle=False)\n', (2509, 2569), False, 'from torch.utils.data import DataLoader\n'), ((2624, 2675), 'evaluate.evaluate', 'evaluate', (['self.model', 'data_loader_test', 'self.device'], {}), '(self.model, data_loader_test, self.device)\n', (2632, 2675), False, 'from evaluate import evaluate\n'), ((3210, 3229), 'dataclean.shortenlines', 'shortenlines', (['title'], {}), '(title)\n', (3222, 3229), False, 'from dataclean import cleanall, shortenlines\n'), ((3254, 3271), 'dataclean.cleanall', 'cleanall', (['content'], {}), '(content)\n', (3262, 3271), False, 'from dataclean import cleanall, shortenlines\n'), ((3838, 3854), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (3848, 3854), False, 'import json\n'), ((3877, 3905), 'dataclean.shortenlines', 'shortenlines', (['jsondata.title'], {}), '(jsondata.title)\n', (3889, 3905), False, 'from dataclean import cleanall, shortenlines\n'), ((3930, 3956), 'dataclean.cleanall', 'cleanall', (['jsondata.content'], {}), '(jsondata.content)\n', (3938, 3956), False, 'from dataclean import cleanall, shortenlines\n'), ((1436, 1456), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1448, 1456), False, 'import torch\n'), ((1497, 1516), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1509, 1516), False, 'import torch\n'), ((2317, 2328), 'time.time', 'time.time', ([], {}), '()\n', (2326, 2328), False, 'import time\n'), ((1577, 1626), 'os.path.join', 'os.path.join', (['self.config.model_path', '"""vocab.txt"""'], {}), "(self.config.model_path, 'vocab.txt')\n", (1589, 1626), False, 'import os\n'), ((1927, 1976), 'os.path.join', 'os.path.join', (['self.config.model_path', '"""model.bin"""'], {}), "(self.config.model_path, 'model.bin')\n", (1939, 1976), False, 'import os\n'), ((2227, 2245), 'pandas.DataFrame', 'pandas.DataFrame', ([], {}), '()\n', (2243, 2245), False, 'import pandas\n'), ((1350, 1370), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '(**d)\n', (1365, 1370), False, 'from types import SimpleNamespace\n')]
|
'''
Created on Aug 9, 2016
Keras Implementation of Neural Matrix Factorization (NeuMF) recommender model in:
He Xiangnan et al. Neural Collaborative Filtering. In WWW 2017.
@author: <NAME> (<EMAIL>)
'''
import argparse
from time import time
import numpy as np
from keras.layers import Embedding, Input, Dense, Flatten, Multiply, Concatenate
from keras.models import Model
from keras.optimizers import Adagrad, Adam, SGD, RMSprop
from keras.regularizers import l2
import DEMF
import MLP
from Dataset import Dataset
from evaluate import evaluate_model
#################### Arguments ####################
def parse_args():
parser = argparse.ArgumentParser(description="Run NNCF.")
parser.add_argument('--path', nargs='?', default='Data/',
help='Input data path.')
parser.add_argument('--dataset', nargs='?', default='ml-1m',
help='Choose a dataset.')
parser.add_argument('--epochs', type=int, default=100,
help='Number of epochs.')
parser.add_argument('--batch_size', type=int, default=256,
help='Batch size.')
parser.add_argument('--num_factors', type=int, default=64,
help='Embedding size of DMF model.')
parser.add_argument('--layers', nargs='?', default='[1024,512,256,128,64]',
help="MLP layers. Note that the first layer is the concatenation of user and item embeddings. So layers[0]/2 is the embedding size.")
parser.add_argument('--reg_mf', type=float, default=0,
help='Regularization for DMF embeddings.')
parser.add_argument('--reg_layers', nargs='?', default='[0,0,0,0,0]',
help="Regularization for each MLP layer. reg_layers[0] is the regularization for embeddings.")
parser.add_argument('--num_neg', type=int, default=4,
help='Number of negative instances to pair with a positive instance.')
parser.add_argument('--lr', type=float, default=0.001,
help='Learning rate.')
parser.add_argument('--learner', nargs='?', default='adam',
help='Specify an optimizer: adagrad, adam, rmsprop, sgd')
parser.add_argument('--verbose', type=int, default=1,
help='Show performance per X iterations')
parser.add_argument('--out', type=int, default=1,
help='Whether to save the trained model.')
parser.add_argument('--dmf_pretrain', nargs='?', default='Pretrain/ml-1m_DMF4_[64,32,16,8]_1606876933.h5',
help='Specify the pretrain model file for MF part. If empty, no pretrain will be used')
parser.add_argument('--mlp_pretrain', nargs='?', default='Pretrain/ml-1m_MLP_[1024,512,256,128,64]_1608293197.h5',
help='Specify the pretrain model file for MLP part. If empty, no pretrain will be used')
return parser.parse_args()
#
# def init_normal(shape, name=None):
# return initializations.normal(shape, scale=0.01, name=name)
def get_model(num_users, num_items, mf_dim=10, layers=[10], reg_layers=[0], reg_mf=0):
assert len(layers) == len(reg_layers)
num_layer = len(layers) # Number of layers in the MLP
# Input variables
user_input = Input(shape=(1,), dtype='int32', name='user_input')
item_input = Input(shape=(1,), dtype='int32', name='item_input')
# Embedding layer
# Deprecate: init and W_regularizer
'''
MF_Embedding_User = Embedding(input_dim=num_users, output_dim=mf_dim, name='mf_embedding_user',
init=init_normal, W_regularizer=l2(reg_mf), input_length=1)
MF_Embedding_Item = Embedding(input_dim=num_items, output_dim=mf_dim, name='mf_embedding_item',
init=init_normal, W_regularizer=l2(reg_mf), input_length=1)
MLP_Embedding_User = Embedding(input_dim=num_users, output_dim=layers[0] / 2, name="mlp_embedding_user",
init=init_normal, W_regularizer=l2(reg_layers[0]), input_length=1)
MLP_Embedding_Item = Embedding(input_dim=num_items, output_dim=layers[0] / 2, name='mlp_embedding_item',
init=init_normal, W_regularizer=l2(reg_layers[0]), input_length=1)
'''
DMF1_Embedding_User = Embedding(input_dim=num_users, output_dim=mf_dim // 2, name='dmf1_embedding_user',
embeddings_initializer='random_normal', embeddings_regularizer=l2(reg_mf),
input_length=1)
DMF1_Embedding_Item = Embedding(input_dim=num_items, output_dim=mf_dim // 2, name='dmf1_embedding_item',
embeddings_initializer='random_normal', embeddings_regularizer=l2(reg_mf),
input_length=1)
DMF2_Embedding_User = Embedding(input_dim=num_users, output_dim=mf_dim // 2, name='dmf2_embedding_user',
embeddings_initializer='random_normal', embeddings_regularizer=l2(reg_mf),
input_length=1)
DMF2_Embedding_Item = Embedding(input_dim=num_items, output_dim=mf_dim // 2, name='dmf2_embedding_item',
embeddings_initializer='random_normal', embeddings_regularizer=l2(reg_mf),
input_length=1)
MLP_Embedding_User = Embedding(input_dim=num_users, output_dim=layers[0] // 2, name="mlp_embedding_user",
embeddings_initializer='random_normal', embeddings_regularizer=l2(reg_layers[0]),
input_length=1)
MLP_Embedding_Item = Embedding(input_dim=num_items, output_dim=layers[0] // 2, name='mlp_embedding_item',
embeddings_initializer='random_normal', embeddings_regularizer=l2(reg_layers[0]),
input_length=1)
# DMF part
dmf1_user_latent = Flatten()(DMF1_Embedding_User(user_input))
dmf1_item_latent = Flatten()(DMF1_Embedding_Item(item_input))
dmf2_user_latent = Flatten()(DMF2_Embedding_User(user_input))
dmf2_item_latent = Flatten()(DMF2_Embedding_Item(item_input))
# Deprecate: merge
'''mf_vector = merge([mf_user_latent, mf_item_latent], mode='mul') # element-wise multiply'''
vector_r = Multiply()([dmf1_user_latent, dmf1_item_latent])
vector_l = Concatenate()([dmf2_user_latent, dmf2_item_latent])
layer = Dense(mf_dim // 2, kernel_regularizer=l2(reg_layers[0]), activation='linear', name='pre-layer')
vector_l = layer(vector_l)
dmf_vector = Concatenate()([vector_r, vector_l])
for idx in range(1, num_layer-4):
# Deprecate: W_W_regularizer
'''layer = Dense(layers[idx], W_regularizer=l2(reg_layers[idx]), activation='relu', name='layer%d' % idx)'''
layer = Dense(layers[idx] // 16, kernel_regularizer=l2(reg_layers[idx]), activation='relu', name='dmf_layer%d' % idx)
dmf_vector = layer(dmf_vector)
# MLP part
mlp_user_latent = Flatten()(MLP_Embedding_User(user_input))
mlp_item_latent = Flatten()(MLP_Embedding_Item(item_input))
# Deprecate: merge
'''mlp_vector = merge([mlp_user_latent, mlp_item_latent], mode='concat')'''
mlp_vector = Concatenate(axis=1)([mlp_user_latent, mlp_item_latent])
for idx in range(1, num_layer):
layer = Dense(layers[idx], kernel_regularizer=l2(reg_layers[idx]), activation='relu', name="layer%d" % idx)
mlp_vector = layer(mlp_vector)
# Concatenate DMF and MLP parts
# mf_vector = Lambda(lambda x: x * alpha)(mf_vector)
# mlp_vector = Lambda(lambda x : x * (1-alpha))(mlp_vector)
# Deprecate: merge
'''predict_vector = merge([mf_vector, mlp_vector], mode='concat')'''
predict_vector = Concatenate()([dmf_vector, mlp_vector])
# Final prediction layer
prediction = Dense(1, activation='sigmoid', kernel_initializer='lecun_uniform', name="prediction")(predict_vector)
model = Model(inputs=[user_input, item_input],
outputs=prediction)
return model
def load_pretrain_model(model, dmf_model, mlp_model, num_layers):
# DMF embeddings
dmf1_user_embeddings = dmf_model.get_layer('dmf1_user_embedding').get_weights()
dmf1_item_embeddings = dmf_model.get_layer('dmf1_item_embedding').get_weights()
dmf2_user_embeddings = dmf_model.get_layer('dmf2_user_embedding').get_weights()
dmf2_item_embeddings = dmf_model.get_layer('dmf2_item_embedding').get_weights()
model.get_layer('dmf1_embedding_user').set_weights(dmf1_user_embeddings)
model.get_layer('dmf1_embedding_item').set_weights(dmf1_item_embeddings)
model.get_layer('dmf2_embedding_user').set_weights(dmf2_user_embeddings)
model.get_layer('dmf2_embedding_item').set_weights(dmf2_item_embeddings)
# DMF layers
dmf_layer_weights = dmf_model.get_layer('pre-layer').get_weights()
model.get_layer('pre-layer').set_weights(dmf_layer_weights)
for i in range(1, num_layers-4):
dmf_layer_weights = dmf_model.get_layer('layer%d' % i).get_weights()
model.get_layer('dmf_layer%d' % i).set_weights(dmf_layer_weights)
# MLP embeddings
mlp_user_embeddings = mlp_model.get_layer('user_embedding').get_weights()
mlp_item_embeddings = mlp_model.get_layer('item_embedding').get_weights()
model.get_layer('mlp_embedding_user').set_weights(mlp_user_embeddings)
model.get_layer('mlp_embedding_item').set_weights(mlp_item_embeddings)
# MLP layers
for i in range(1, num_layers):
mlp_layer_weights = mlp_model.get_layer('layer%d' % i).get_weights()
model.get_layer('layer%d' % i).set_weights(mlp_layer_weights)
# Prediction weights
dmf_prediction = dmf_model.get_layer('prediction').get_weights()
mlp_prediction = mlp_model.get_layer('prediction').get_weights()
new_weights = np.concatenate((dmf_prediction[0], mlp_prediction[0]), axis=0)
new_b = dmf_prediction[1] + mlp_prediction[1]
model.get_layer('prediction').set_weights([0.5 * new_weights, 0.5 * new_b])
return model
def get_train_instances(train, num_negatives):
user_input, item_input, labels = [], [], []
num_users = train.shape[0]
for (u, i) in train.keys():
# positive instance
user_input.append(u)
item_input.append(i)
labels.append(1)
# negative instances
for t in range(num_negatives):
j = np.random.randint(num_items)
while (u, j) in train.keys():
j = np.random.randint(num_items)
user_input.append(u)
item_input.append(j)
labels.append(0)
return user_input, item_input, labels
if __name__ == '__main__':
args = parse_args()
num_epochs = args.epochs
batch_size = args.batch_size
mf_dim = args.num_factors
layers = eval(args.layers)
reg_mf = args.reg_mf
reg_layers = eval(args.reg_layers)
num_negatives = args.num_neg
learning_rate = args.lr
learner = args.learner
verbose = args.verbose
dmf_pretrain = args.dmf_pretrain
mlp_pretrain = args.mlp_pretrain
topK = 10
evaluation_threads = 1 # mp.cpu_count()
print("NNCF arguments: %s " % (args))
model_out_file = 'Pretrain/%s_NNCF_%d_%s_%d.h5' % (args.dataset, mf_dim, args.layers, time())
# Loading data
t1 = time()
dataset = Dataset(args.path + args.dataset)
train, testRatings, testNegatives = dataset.trainMatrix, dataset.testRatings, dataset.testNegatives
num_users, num_items = train.shape
print("Load data done [%.1f s]. #user=%d, #item=%d, #train=%d, #test=%d"
% (time() - t1, num_users, num_items, train.nnz, len(testRatings)))
# Build model
model = get_model(num_users, num_items, mf_dim, layers, reg_layers, reg_mf)
if learner.lower() == "adagrad":
model.compile(optimizer=Adagrad(lr=learning_rate), loss='binary_crossentropy')
elif learner.lower() == "rmsprop":
model.compile(optimizer=RMSprop(lr=learning_rate), loss='binary_crossentropy')
elif learner.lower() == "adam":
model.compile(optimizer=Adam(lr=learning_rate), loss='binary_crossentropy')
else:
model.compile(optimizer=SGD(lr=learning_rate), loss='binary_crossentropy')
# Load pretrain model
if dmf_pretrain != '' and mlp_pretrain != '':
dmf_model = DEMF.get_model(num_users, num_items, layers=[64, 32, 16, 8], reg_layers=[0, 0, 0, 0])
dmf_model.load_weights(dmf_pretrain)
mlp_model = MLP.get_model(num_users, num_items, layers, reg_layers)
mlp_model.load_weights(mlp_pretrain)
model = load_pretrain_model(model, dmf_model, mlp_model, len(layers))
print("Load pretrained DMF (%s) and MLP (%s) models done. " % (dmf_pretrain, mlp_pretrain))
# Init performance
(hits, ndcgs) = evaluate_model(model, testRatings, testNegatives, topK, evaluation_threads)
hr, ndcg = np.array(hits).mean(), np.array(ndcgs).mean()
print('Init: HR = %.4f, NDCG = %.4f' % (hr, ndcg))
best_hr, best_ndcg, best_iter = hr, ndcg, -1
if args.out > 0:
model.save_weights(model_out_file, overwrite=True)
# Training model
for epoch in range(num_epochs):
t1 = time()
# Generate training instances
user_input, item_input, labels = get_train_instances(train, num_negatives)
# Training
hist = model.fit([np.array(user_input), np.array(item_input)], # input
np.array(labels), # labels
batch_size=batch_size, epochs=1, verbose=0, shuffle=True)
t2 = time()
# Evaluation
if epoch % verbose == 0:
(hits, ndcgs) = evaluate_model(model, testRatings, testNegatives, topK, evaluation_threads)
hr, ndcg, loss = np.array(hits).mean(), np.array(ndcgs).mean(), hist.history['loss'][0]
print('Iteration %d [%.1f s]: HR = %.4f, NDCG = %.4f, loss = %.4f [%.1f s]'
% (epoch, t2 - t1, hr, ndcg, loss, time() - t2))
if hr > best_hr:
best_hr, best_ndcg, best_iter = hr, ndcg, epoch
if args.out > 0:
model.save_weights(model_out_file, overwrite=True)
print("End. Best Iteration %d: HR = %.4f, NDCG = %.4f. " % (best_iter, best_hr, best_ndcg))
if args.out > 0:
print("The best NNCF model is saved to %s" % (model_out_file))
|
[
"evaluate.evaluate_model"
] |
[((662, 710), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run NNCF."""'}), "(description='Run NNCF.')\n", (685, 710), False, 'import argparse\n'), ((3347, 3398), 'keras.layers.Input', 'Input', ([], {'shape': '(1,)', 'dtype': '"""int32"""', 'name': '"""user_input"""'}), "(shape=(1,), dtype='int32', name='user_input')\n", (3352, 3398), False, 'from keras.layers import Embedding, Input, Dense, Flatten, Multiply, Concatenate\n'), ((3417, 3468), 'keras.layers.Input', 'Input', ([], {'shape': '(1,)', 'dtype': '"""int32"""', 'name': '"""item_input"""'}), "(shape=(1,), dtype='int32', name='item_input')\n", (3422, 3468), False, 'from keras.layers import Embedding, Input, Dense, Flatten, Multiply, Concatenate\n'), ((8159, 8217), 'keras.models.Model', 'Model', ([], {'inputs': '[user_input, item_input]', 'outputs': 'prediction'}), '(inputs=[user_input, item_input], outputs=prediction)\n', (8164, 8217), False, 'from keras.models import Model\n'), ((10076, 10138), 'numpy.concatenate', 'np.concatenate', (['(dmf_prediction[0], mlp_prediction[0])'], {'axis': '(0)'}), '((dmf_prediction[0], mlp_prediction[0]), axis=0)\n', (10090, 10138), True, 'import numpy as np\n'), ((11602, 11608), 'time.time', 'time', ([], {}), '()\n', (11606, 11608), False, 'from time import time\n'), ((11624, 11657), 'Dataset.Dataset', 'Dataset', (['(args.path + args.dataset)'], {}), '(args.path + args.dataset)\n', (11631, 11657), False, 'from Dataset import Dataset\n'), ((13116, 13191), 'evaluate.evaluate_model', 'evaluate_model', (['model', 'testRatings', 'testNegatives', 'topK', 'evaluation_threads'], {}), '(model, testRatings, testNegatives, topK, evaluation_threads)\n', (13130, 13191), False, 'from evaluate import evaluate_model\n'), ((6081, 6090), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6088, 6090), False, 'from keras.layers import Embedding, Input, Dense, Flatten, Multiply, Concatenate\n'), ((6148, 6157), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6155, 6157), False, 'from keras.layers import Embedding, Input, Dense, Flatten, Multiply, Concatenate\n'), ((6215, 6224), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6222, 6224), False, 'from keras.layers import Embedding, Input, Dense, Flatten, Multiply, Concatenate\n'), ((6282, 6291), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6289, 6291), False, 'from keras.layers import Embedding, Input, Dense, Flatten, Multiply, Concatenate\n'), ((6467, 6477), 'keras.layers.Multiply', 'Multiply', ([], {}), '()\n', (6475, 6477), False, 'from keras.layers import Embedding, Input, Dense, Flatten, Multiply, Concatenate\n'), ((6532, 6545), 'keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (6543, 6545), False, 'from keras.layers import Embedding, Input, Dense, Flatten, Multiply, Concatenate\n'), ((6743, 6756), 'keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (6754, 6756), False, 'from keras.layers import Embedding, Input, Dense, Flatten, Multiply, Concatenate\n'), ((7186, 7195), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (7193, 7195), False, 'from keras.layers import Embedding, Input, Dense, Flatten, Multiply, Concatenate\n'), ((7251, 7260), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (7258, 7260), False, 'from keras.layers import Embedding, Input, Dense, Flatten, Multiply, Concatenate\n'), ((7418, 7437), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(1)'}), '(axis=1)\n', (7429, 7437), False, 'from keras.layers import Embedding, Input, Dense, Flatten, Multiply, Concatenate\n'), ((7952, 7965), 'keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (7963, 7965), False, 'from keras.layers import Embedding, Input, Dense, Flatten, Multiply, Concatenate\n'), ((8042, 8132), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""', 'kernel_initializer': '"""lecun_uniform"""', 'name': '"""prediction"""'}), "(1, activation='sigmoid', kernel_initializer='lecun_uniform', name=\n 'prediction')\n", (8047, 8132), False, 'from keras.layers import Embedding, Input, Dense, Flatten, Multiply, Concatenate\n'), ((12634, 12723), 'DEMF.get_model', 'DEMF.get_model', (['num_users', 'num_items'], {'layers': '[64, 32, 16, 8]', 'reg_layers': '[0, 0, 0, 0]'}), '(num_users, num_items, layers=[64, 32, 16, 8], reg_layers=[0,\n 0, 0, 0])\n', (12648, 12723), False, 'import DEMF\n'), ((12787, 12842), 'MLP.get_model', 'MLP.get_model', (['num_users', 'num_items', 'layers', 'reg_layers'], {}), '(num_users, num_items, layers, reg_layers)\n', (12800, 12842), False, 'import MLP\n'), ((13517, 13523), 'time.time', 'time', ([], {}), '()\n', (13521, 13523), False, 'from time import time\n'), ((13902, 13908), 'time.time', 'time', ([], {}), '()\n', (13906, 13908), False, 'from time import time\n'), ((4585, 4595), 'keras.regularizers.l2', 'l2', (['reg_mf'], {}), '(reg_mf)\n', (4587, 4595), False, 'from keras.regularizers import l2\n'), ((4860, 4870), 'keras.regularizers.l2', 'l2', (['reg_mf'], {}), '(reg_mf)\n', (4862, 4870), False, 'from keras.regularizers import l2\n'), ((5135, 5145), 'keras.regularizers.l2', 'l2', (['reg_mf'], {}), '(reg_mf)\n', (5137, 5145), False, 'from keras.regularizers import l2\n'), ((5410, 5420), 'keras.regularizers.l2', 'l2', (['reg_mf'], {}), '(reg_mf)\n', (5412, 5420), False, 'from keras.regularizers import l2\n'), ((5687, 5704), 'keras.regularizers.l2', 'l2', (['reg_layers[0]'], {}), '(reg_layers[0])\n', (5689, 5704), False, 'from keras.regularizers import l2\n'), ((5968, 5985), 'keras.regularizers.l2', 'l2', (['reg_layers[0]'], {}), '(reg_layers[0])\n', (5970, 5985), False, 'from keras.regularizers import l2\n'), ((6635, 6652), 'keras.regularizers.l2', 'l2', (['reg_layers[0]'], {}), '(reg_layers[0])\n', (6637, 6652), False, 'from keras.regularizers import l2\n'), ((10657, 10685), 'numpy.random.randint', 'np.random.randint', (['num_items'], {}), '(num_items)\n', (10674, 10685), True, 'import numpy as np\n'), ((11562, 11568), 'time.time', 'time', ([], {}), '()\n', (11566, 11568), False, 'from time import time\n'), ((13776, 13792), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (13784, 13792), True, 'import numpy as np\n'), ((13996, 14071), 'evaluate.evaluate_model', 'evaluate_model', (['model', 'testRatings', 'testNegatives', 'topK', 'evaluation_threads'], {}), '(model, testRatings, testNegatives, topK, evaluation_threads)\n', (14010, 14071), False, 'from evaluate import evaluate_model\n'), ((7035, 7054), 'keras.regularizers.l2', 'l2', (['reg_layers[idx]'], {}), '(reg_layers[idx])\n', (7037, 7054), False, 'from keras.regularizers import l2\n'), ((7566, 7585), 'keras.regularizers.l2', 'l2', (['reg_layers[idx]'], {}), '(reg_layers[idx])\n', (7568, 7585), False, 'from keras.regularizers import l2\n'), ((10750, 10778), 'numpy.random.randint', 'np.random.randint', (['num_items'], {}), '(num_items)\n', (10767, 10778), True, 'import numpy as np\n'), ((12133, 12158), 'keras.optimizers.Adagrad', 'Adagrad', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (12140, 12158), False, 'from keras.optimizers import Adagrad, Adam, SGD, RMSprop\n'), ((13208, 13222), 'numpy.array', 'np.array', (['hits'], {}), '(hits)\n', (13216, 13222), True, 'import numpy as np\n'), ((13231, 13246), 'numpy.array', 'np.array', (['ndcgs'], {}), '(ndcgs)\n', (13239, 13246), True, 'import numpy as np\n'), ((13696, 13716), 'numpy.array', 'np.array', (['user_input'], {}), '(user_input)\n', (13704, 13716), True, 'import numpy as np\n'), ((13718, 13738), 'numpy.array', 'np.array', (['item_input'], {}), '(item_input)\n', (13726, 13738), True, 'import numpy as np\n'), ((11895, 11901), 'time.time', 'time', ([], {}), '()\n', (11899, 11901), False, 'from time import time\n'), ((12261, 12286), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (12268, 12286), False, 'from keras.optimizers import Adagrad, Adam, SGD, RMSprop\n'), ((12386, 12408), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (12390, 12408), False, 'from keras.optimizers import Adagrad, Adam, SGD, RMSprop\n'), ((12482, 12503), 'keras.optimizers.SGD', 'SGD', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (12485, 12503), False, 'from keras.optimizers import Adagrad, Adam, SGD, RMSprop\n'), ((14102, 14116), 'numpy.array', 'np.array', (['hits'], {}), '(hits)\n', (14110, 14116), True, 'import numpy as np\n'), ((14125, 14140), 'numpy.array', 'np.array', (['ndcgs'], {}), '(ndcgs)\n', (14133, 14140), True, 'import numpy as np\n'), ((14316, 14322), 'time.time', 'time', ([], {}), '()\n', (14320, 14322), False, 'from time import time\n')]
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# @Version : Python 3.6
import os
import json
import torch
import torch.optim as optim
import torch.nn as nn
import numpy as np
from tqdm import tqdm
from config import Config
from utils import EmbeddingLoader, RelationLoader, NYTDataLoader
from model import DS_Model
from evaluate import Eval
from plot import Canvas
class Runner(object):
def __init__(self, emb, class_num, loader, config):
self.class_num = class_num
self.loader = loader
self.config = config
self.model = DS_Model(emb, class_num, config)
self.model = self.model.to(config.device)
self.eval_tool = Eval(class_num, config)
self.plot_tool = Canvas(config)
def train(self):
train_loader, test_loader = self.loader
optimizer = optim.Adam(self.model.parameters(), lr=config.lr)
print(self.model)
print('traning model parameters:')
for name, param in self.model.named_parameters():
if param.requires_grad:
print('%s : %s' % (name, str(param.data.shape)))
print('--------------------------------------')
print('start to train the model ...')
max_auc = -float('inf')
for epoch in range(1, 1+self.config.epoch):
train_loss = 0.0
data_iterator = tqdm(train_loader, desc='Train')
for step, (data, label, scope) in enumerate(data_iterator):
self.model.train()
data = data.to(self.config.device)
label = label.to(self.config.device)
optimizer.zero_grad()
loss, _ = self.model(data, scope, label)
train_loss += loss.item()
loss.backward()
optimizer.step()
train_loss = train_loss / len(train_loader)
auc, test_loss, precision, recall = self.eval_tool.evaluate(
self.model, test_loader
)
print('[%03d] train_loss: %.3f | test_loss: %.3f | auc on test: %.3f'
% (epoch, train_loss, test_loss, auc), end=' ')
if auc > max_auc:
max_auc = auc
torch.save(self.model.state_dict(), os.path.join(
self.config.model_dir, 'model.pkl'))
print('>>> save models!')
else:
print()
def test(self):
print('-------------------------------------')
print('start test ...')
_, test_loader = self.loader
self.model.load_state_dict(torch.load(
os.path.join(config.model_dir, 'model.pkl')))
auc, test_loss, precision, recall = self.eval_tool.evaluate(
self.model, test_loader
)
print('test_loss: %.3f | auc on test: %.3f' % (test_loss, auc))
target_file = os.path.join(self.config.model_dir, 'pr.txt')
with open(target_file, 'w', encoding='utf-8') as fw:
for i in range(len(precision)):
fw.write('%.6f \t %.6f \n' % (precision[i], recall[i]))
self.plot_tool.plot(precision, recall, auc)
if __name__ == '__main__':
config = Config()
print('--------------------------------------')
print('some config:')
config.print_config()
print('--------------------------------------')
print('start to load data ...')
token2id, emb = EmbeddingLoader(config).load_embedding()
rel2id, id2rel, class_num = RelationLoader(config).get_relation()
loader = NYTDataLoader(rel2id, token2id, config)
train_loader, test_loader = None, None
if config.mode == 0: # train mode
train_loader = loader.get_train()
test_loader = loader.get_test()
elif config.mode == 1:
test_loader = loader.get_test()
loader = [train_loader, test_loader]
print('finish!')
runner = Runner(emb, class_num, loader, config)
if config.mode == 0: # train mode
runner.train()
runner.test()
elif config.mode == 1:
runner.test()
else:
raise ValueError('invalid train mode!')
|
[
"evaluate.Eval"
] |
[((3172, 3180), 'config.Config', 'Config', ([], {}), '()\n', (3178, 3180), False, 'from config import Config\n'), ((3518, 3557), 'utils.NYTDataLoader', 'NYTDataLoader', (['rel2id', 'token2id', 'config'], {}), '(rel2id, token2id, config)\n', (3531, 3557), False, 'from utils import EmbeddingLoader, RelationLoader, NYTDataLoader\n'), ((564, 596), 'model.DS_Model', 'DS_Model', (['emb', 'class_num', 'config'], {}), '(emb, class_num, config)\n', (572, 596), False, 'from model import DS_Model\n'), ((672, 695), 'evaluate.Eval', 'Eval', (['class_num', 'config'], {}), '(class_num, config)\n', (676, 695), False, 'from evaluate import Eval\n'), ((721, 735), 'plot.Canvas', 'Canvas', (['config'], {}), '(config)\n', (727, 735), False, 'from plot import Canvas\n'), ((2855, 2900), 'os.path.join', 'os.path.join', (['self.config.model_dir', '"""pr.txt"""'], {}), "(self.config.model_dir, 'pr.txt')\n", (2867, 2900), False, 'import os\n'), ((1348, 1380), 'tqdm.tqdm', 'tqdm', (['train_loader'], {'desc': '"""Train"""'}), "(train_loader, desc='Train')\n", (1352, 1380), False, 'from tqdm import tqdm\n'), ((3394, 3417), 'utils.EmbeddingLoader', 'EmbeddingLoader', (['config'], {}), '(config)\n', (3409, 3417), False, 'from utils import EmbeddingLoader, RelationLoader, NYTDataLoader\n'), ((3467, 3489), 'utils.RelationLoader', 'RelationLoader', (['config'], {}), '(config)\n', (3481, 3489), False, 'from utils import EmbeddingLoader, RelationLoader, NYTDataLoader\n'), ((2599, 2642), 'os.path.join', 'os.path.join', (['config.model_dir', '"""model.pkl"""'], {}), "(config.model_dir, 'model.pkl')\n", (2611, 2642), False, 'import os\n'), ((2239, 2287), 'os.path.join', 'os.path.join', (['self.config.model_dir', '"""model.pkl"""'], {}), "(self.config.model_dir, 'model.pkl')\n", (2251, 2287), False, 'import os\n')]
|
import warnings
warnings.filterwarnings("ignore")
import os, numpy as np, argparse, random, matplotlib, datetime
os.chdir(os.path.dirname(os.path.realpath(__file__)))
from pathlib import Path
matplotlib.use('agg')
from tqdm import tqdm
import auxiliaries as aux
import datasets as data
import netlib as netlib
import losses as losses
import evaluate as eval
import time
import copy
from tensorboardX import SummaryWriter
import torch.multiprocessing
from mixup import *
torch.multiprocessing.set_sharing_strategy('file_system')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='Inaturalist', type=str, help='Dataset to use.', choices=['Inaturalist','vehicle_id', 'sop', 'cars196', 'cub'])
parser.add_argument('--lr', default=0.00001, type=float, help='Learning Rate for network parameters.')
parser.add_argument('--fc_lr_mul', default=5, type=float, help='OPTIONAL: Multiply the embedding layer learning rate by this value. If set to 0, the embedding layer shares the same learning rate.')
parser.add_argument('--n_epochs', default=400, type=int, help='Number of training epochs.')
parser.add_argument('--kernels', default=16, type=int, help='Number of workers for pytorch dataloader.')
parser.add_argument('--bs', default=112 , type=int, help='Mini-Batchsize to use.')
parser.add_argument('--bs_base', default=200 , type=int, help='Mini-Batchsize to use for evaluation and for chunks in two feed-forward setup.')
parser.add_argument('--samples_per_class', default=4, type=int, help='Number of samples in one class drawn before choosing the next class')
parser.add_argument('--seed', default=1, type=int, help='Random seed for reproducibility.')
parser.add_argument('--scheduler', default='step', type=str, help='Type of learning rate scheduling. Currently: step & exp.')
parser.add_argument('--gamma', default=0.3, type=float, help='Learning rate reduction after tau epochs.')
parser.add_argument('--decay', default=0.0004, type=float, help='Weight decay for optimizer.')
parser.add_argument('--tau', default= [10,20,30],nargs='+',type=int,help='Stepsize(s) before reducing learning rate.')
parser.add_argument('--infrequent_eval', default=1,type=int, help='only compute evaluation metrics every 10 epochs')
parser.add_argument('--opt', default = 'adam',help='adam or sgd')
parser.add_argument('--loss', default='recallatk', type=str)
parser.add_argument('--mixup', default=0, type=int, help='Gompertzap: use mixup')
parser.add_argument('--sigmoid_temperature', default=0.01, type=float, help='RS@k: the temperature of the sigmoid used to estimate ranks')
parser.add_argument('--k_vals', nargs='+', default=[1,2,4,8], type=int, help='Recall @ Values.')
parser.add_argument('--k_vals_train', nargs='+', default=[1,2,4,8,16], type=int, help='Training recall@k vals.')
parser.add_argument('--k_temperatures', nargs='+', default=[1,2,4,8,16], type=int, help='Temperature for training recall@k vals.')
parser.add_argument('--resume', default='', type=str, help='path to checkpoint to load weights from (if empty then ImageNet pre-trained weights are loaded')
parser.add_argument('--embed_dim', default=512, type=int, help='Embedding dimensionality of the network')
parser.add_argument('--arch', default='resnet50', type=str, help='Network backend choice: resnet50, googlenet, BNinception')
parser.add_argument('--grad_measure', action='store_true', help='If added, gradients passed from embedding layer to the last conv-layer are stored in each iteration.')
parser.add_argument('--dist_measure', action='store_true', help='If added, the ratio between intra- and interclass distances is stored after each epoch.')
parser.add_argument('--not_pretrained', action='store_true', help='If added, the network will be trained WITHOUT ImageNet-pretrained weights.')
parser.add_argument('--gpu', default=0, type=int, help='GPU-id for GPU to use.')
parser.add_argument('--savename', default='', type=str, help='Save folder name if any special information is to be included.')
parser.add_argument('--source_path', default='/home/patelyas/RecallatK_surrogate', type=str, help='Path to data')
parser.add_argument('--save_path', default=os.getcwd()+'/Training_Results', type=str, help='Where to save the checkpoints')
opt = parser.parse_args()
opt.source_path += '/'+opt.dataset
opt.save_path += '/'+opt.dataset
if opt.dataset== 'Inaturalist':
opt.k_vals = [1,4,16,32]
opt.bs = 4000
opt.n_epochs = 90
if opt.arch == 'resnet50':
opt.tau = [40,70]
opt.bs_base = 200
opt.lr = 0.0001
opt.opt = 'adam'
if opt.arch == 'ViTB32':
opt.tau = [10,40,70]
opt.bs_base = 200
opt.lr = 0.00005
opt.opt = 'adamW'
if opt.arch == 'ViTB16':
opt.tau = [10,40,70]
opt.bs_base = 100
opt.lr = 0.00005
opt.opt = 'adamW'
if opt.dataset=='sop':
opt.tau = [25,50]
opt.k_vals = [1,10,100,1000]
opt.bs = 4000
opt.n_epochs = 55
if opt.arch == 'resnet50':
opt.bs_base = 200
opt.lr = 0.0002
opt.opt = 'adam'
if opt.arch == 'ViTB32':
opt.bs_base = 200
opt.lr = 0.00005
opt.opt = 'adamW'
if opt.arch == 'ViTB16':
opt.bs_base = 100
opt.lr = 0.00005
opt.opt = 'adamW'
if opt.dataset=='vehicle_id':
opt.tau = [40,70]
opt.k_vals = [1,5]
opt.bs = 4000
opt.n_epochs = 90
if opt.arch == 'resnet50':
opt.bs_base = 200
opt.lr = 0.0001
opt.opt = 'adam'
if opt.arch == 'ViTB32':
opt.bs_base = 200
opt.lr = 0.0001
opt.opt = 'adamW'
if opt.arch == 'ViTB16':
opt.bs_base = 100
opt.lr = 0.00005
opt.opt = 'adamW'
if opt.dataset == 'cars196':
opt.k_vals = [1,2,4,8,16]
opt.bs = 392
opt.bs_base = 98
if opt.arch == 'resnet50':
opt.n_epochs = 170
opt.tau = [80, 140]
opt.lr = 0.0001
opt.opt = 'adam'
if opt.arch == 'ViTB32':
opt.n_epochs = 50
opt.tau = [20,30,40]
opt.lr = 0.00003
opt.opt = 'adamW'
if opt.arch == 'ViTB16':
opt.n_epochs = 50
opt.tau = [20,30,40]
opt.lr = 0.00001
opt.opt = 'adamW'
if opt.dataset == 'cub':
opt.k_vals = [1,2,4,8,16]
opt.bs = 400
opt.bs_base = 100
if opt.arch == 'resnet50':
opt.n_epochs = 40
opt.tau = [10,20,30]
opt.lr = 0.0001
opt.opt = 'adam'
if opt.arch == 'ViTB32':
opt.n_epochs = 40
opt.tau = [10,20,30]
opt.lr = 0.00003
opt.opt = 'adamW'
if opt.arch == 'ViTB16':
opt.n_epochs = 40
opt.tau = [10,20,30]
opt.lr = 0.00001
opt.opt = 'adamW'
timestamp = datetime.datetime.now().strftime(r"%Y-%m-%d_%H-%M-%S")
exp_name = aux.args2exp_name(opt)
opt.save_name = f"weights_{exp_name}" +'/'+ timestamp
random.seed(opt.seed)
np.random.seed(opt.seed)
torch.manual_seed(opt.seed)
torch.cuda.manual_seed(opt.seed); torch.cuda.manual_seed_all(opt.seed)
tensorboard_path = Path(f"logs/logs_{exp_name}") / timestamp
tensorboard_path.parent.mkdir(exist_ok=True, parents=True)
global writer;
writer = SummaryWriter(tensorboard_path)
os.environ["CUDA_DEVICE_ORDER"] ="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]= str(opt.gpu)
opt.device = torch.device('cuda')
model = netlib.networkselect(opt)
_ = model.to(opt.device)
if 'fc_lr_mul' in vars(opt).keys() and opt.fc_lr_mul!=0:
all_but_fc_params = list(filter(lambda x: 'last_linear' not in x[0],model.named_parameters()))
for ind, param in enumerate(all_but_fc_params):
all_but_fc_params[ind] = param[1]
fc_params = model.model.last_linear.parameters()
to_optim = [{'params':all_but_fc_params,'lr':opt.lr,'weight_decay':opt.decay},
{'params':fc_params,'lr':opt.lr*opt.fc_lr_mul,'weight_decay':opt.decay}]
else:
to_optim = [{'params':model.parameters(),'lr':opt.lr,'weight_decay':opt.decay}]
dataloaders = data.give_dataloaders(opt.dataset, opt)
opt.num_classes = len(dataloaders['training'].dataset.avail_classes)
metrics_to_log = aux.metrics_to_examine(opt.dataset, opt.k_vals)
LOG = aux.LOGGER(opt, metrics_to_log, name='Base', start_new=True)
criterion, to_optim = losses.loss_select(opt.loss, opt, to_optim)
_ = criterion.to(opt.device)
if opt.grad_measure:
grad_measure = eval.GradientMeasure(opt, name='baseline')
if opt.dist_measure:
distance_measure = eval.DistanceMeasure(dataloaders['evaluation'], opt, name='Train', update_epochs=1)
if opt.opt == 'adam':
optimizer = torch.optim.Adam(to_optim)
elif opt.opt == 'sgd':
optimizer = torch.optim.SGD(to_optim)
elif opt.opt == 'rmsprop':
optimizer = torch.optim.RMSprop(to_optim)
else:
raise Exception('unknown optimiser')
if opt.scheduler=='exp':
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=opt.gamma)
elif opt.scheduler=='step':
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=opt.tau, gamma=opt.gamma)
elif opt.scheduler=='none':
print('No scheduling used!')
else:
raise Exception('No scheduling option for input: {}'.format(opt.scheduler))
def same_model(model1,model2):
for p1, p2 in zip(model1.parameters(), model2.parameters()):
if p1.data.ne(p2.data).sum() > 0:
return False
return True
def train_one_epoch(train_dataloader, model, optimizer, criterion, opt, epoch):
loss_collect = []
start = time.time()
data_iterator = tqdm(train_dataloader, desc='Epoch {} Training...'.format(epoch))
optimizer.zero_grad()
for i,(class_labels, input) in enumerate(data_iterator):
output = torch.zeros((len(input), opt.embed_dim)).to(opt.device)
for j in range(0, len(input), opt.bs_base):
input_x = input[j:j+opt.bs_base,:].to(opt.device)
x = model(input_x)
output[j:j+opt.bs_base,:] = copy.copy(x)
del x
torch.cuda.empty_cache()
if criterion.mixup:
output_mixup = pos_mixup(output, criterion.num_id)
num_samples = output_mixup.shape[0]
else:
num_samples = output.shape[0]
output.retain_grad()
loss = 0.
for q in range(0, num_samples):
if criterion.mixup: loss += criterion(output_mixup, q)
else: loss += criterion(output, q)
loss_collect.append(loss.item())
loss.backward()
output_grad = copy.copy(output.grad)
del loss
del output
if criterion.mixup: del output_mixup
torch.cuda.empty_cache()
for j in range(0, len(input), opt.bs_base):
input_x = input[j:j+opt.bs_base,:].to(opt.device)
x = model(input_x)
x.backward(output_grad[j:j+opt.bs_base,:])
optimizer.step()
optimizer.zero_grad()
if opt.grad_measure:
grad_measure.include(model.model.last_linear)
if i==len(train_dataloader)-1:
data_iterator.set_description('Epoch (Train) {0}: Mean Loss [{1:.4f}]'.format(epoch, np.mean(loss_collect)))
LOG.log('train', LOG.metrics_to_log['train'], [epoch, np.round(time.time()-start,4), np.mean(loss_collect)])
writer.add_scalar('global/training_loss',np.mean(loss_collect),epoch)
if opt.grad_measure:
grad_measure.dump(epoch)
print('\n-----\n')
if opt.dataset in ['Inaturalist', 'sop', 'cars196', 'cub']:
eval_params = {'dataloader': dataloaders['testing'], 'model': model, 'opt': opt, 'epoch': 0}
elif opt.dataset == 'vehicle_id':
eval_params = {
'dataloaders': [dataloaders['testing_set1'], dataloaders['testing_set2'], dataloaders['testing_set3']],
'model': model, 'opt': opt, 'epoch': 0}
print('epochs -> '+str(opt.n_epochs))
for epoch in range(opt.n_epochs):
if opt.scheduler!='none': print('Running with learning rates {}...'.format(' | '.join('{}'.format(x) for x in scheduler.get_lr())))
_ = model.train()
train_one_epoch(dataloaders['training'], model, optimizer, criterion, opt, epoch)
dataloaders['training'].dataset.reshuffle()
_ = model.eval()
if opt.dataset in ['Inaturalist', 'sop', 'cars196', 'cub']:
eval_params = {'dataloader':dataloaders['testing'], 'model':model, 'opt':opt, 'epoch':epoch}
elif opt.dataset=='vehicle_id':
eval_params = {'dataloaders':[dataloaders['testing_set1'], dataloaders['testing_set2'], dataloaders['testing_set3']], 'model':model, 'opt':opt, 'epoch':epoch}
if opt.infrequent_eval == 1:
epoch_freq = 5
else:
epoch_freq = 1
if not opt.dataset == 'vehicle_id':
if epoch%epoch_freq == 0 or epoch == opt.n_epochs - 1:
results = eval.evaluate(opt.dataset, LOG, save=True, **eval_params)
writer.add_scalar('global/recall1',results[0][0],epoch+1)
writer.add_scalar('global/recall2',results[0][1],epoch+1)
writer.add_scalar('global/recall3',results[0][2],epoch+1)
writer.add_scalar('global/recall4',results[0][3],epoch+1)
writer.add_scalar('global/NMI',results[1],epoch+1)
writer.add_scalar('global/F1',results[2],epoch+1)
else:
if epoch%epoch_freq == 0 or epoch == opt.n_epochs - 1:
results = eval.evaluate(opt.dataset, LOG, save=True, **eval_params)
writer.add_scalar('global/recall1',results[2],epoch+1)
writer.add_scalar('global/recall2',results[3],epoch+1)#writer.add_scalar('global/recall3',results[0][2],0)
writer.add_scalar('global/recall3',results[6],epoch+1)
writer.add_scalar('global/recall4',results[7],epoch+1)
writer.add_scalar('global/recall5',results[10],epoch+1)
writer.add_scalar('global/recall6',results[11],epoch+1)
if opt.dist_measure:
distance_measure.measure(model, epoch)
if opt.scheduler != 'none':
scheduler.step()
print('\n-----\n')
|
[
"evaluate.GradientMeasure",
"evaluate.DistanceMeasure",
"evaluate.evaluate"
] |
[((16, 49), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (39, 49), False, 'import warnings\n'), ((192, 213), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (206, 213), False, 'import os, numpy as np, argparse, random, matplotlib, datetime\n'), ((538, 563), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (561, 563), False, 'import os, numpy as np, argparse, random, matplotlib, datetime\n'), ((7145, 7167), 'auxiliaries.args2exp_name', 'aux.args2exp_name', (['opt'], {}), '(opt)\n', (7162, 7167), True, 'import auxiliaries as aux\n'), ((7222, 7243), 'random.seed', 'random.seed', (['opt.seed'], {}), '(opt.seed)\n', (7233, 7243), False, 'import os, numpy as np, argparse, random, matplotlib, datetime\n'), ((7244, 7268), 'numpy.random.seed', 'np.random.seed', (['opt.seed'], {}), '(opt.seed)\n', (7258, 7268), True, 'import os, numpy as np, argparse, random, matplotlib, datetime\n'), ((7513, 7544), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['tensorboard_path'], {}), '(tensorboard_path)\n', (7526, 7544), False, 'from tensorboardX import SummaryWriter\n'), ((7690, 7715), 'netlib.networkselect', 'netlib.networkselect', (['opt'], {}), '(opt)\n', (7710, 7715), True, 'import netlib as netlib\n'), ((8364, 8403), 'datasets.give_dataloaders', 'data.give_dataloaders', (['opt.dataset', 'opt'], {}), '(opt.dataset, opt)\n', (8385, 8403), True, 'import datasets as data\n'), ((8491, 8538), 'auxiliaries.metrics_to_examine', 'aux.metrics_to_examine', (['opt.dataset', 'opt.k_vals'], {}), '(opt.dataset, opt.k_vals)\n', (8513, 8538), True, 'import auxiliaries as aux\n'), ((8545, 8605), 'auxiliaries.LOGGER', 'aux.LOGGER', (['opt', 'metrics_to_log'], {'name': '"""Base"""', 'start_new': '(True)'}), "(opt, metrics_to_log, name='Base', start_new=True)\n", (8555, 8605), True, 'import auxiliaries as aux\n'), ((8629, 8672), 'losses.loss_select', 'losses.loss_select', (['opt.loss', 'opt', 'to_optim'], {}), '(opt.loss, opt, to_optim)\n', (8647, 8672), True, 'import losses as losses\n'), ((7387, 7416), 'pathlib.Path', 'Path', (['f"""logs/logs_{exp_name}"""'], {}), "(f'logs/logs_{exp_name}')\n", (7391, 7416), False, 'from pathlib import Path\n'), ((8743, 8785), 'evaluate.GradientMeasure', 'eval.GradientMeasure', (['opt'], {'name': '"""baseline"""'}), "(opt, name='baseline')\n", (8763, 8785), True, 'import evaluate as eval\n'), ((8830, 8917), 'evaluate.DistanceMeasure', 'eval.DistanceMeasure', (["dataloaders['evaluation']", 'opt'], {'name': '"""Train"""', 'update_epochs': '(1)'}), "(dataloaders['evaluation'], opt, name='Train',\n update_epochs=1)\n", (8850, 8917), True, 'import evaluate as eval\n'), ((9856, 9867), 'time.time', 'time.time', ([], {}), '()\n', (9865, 9867), False, 'import time\n'), ((138, 164), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (154, 164), False, 'import os, numpy as np, argparse, random, matplotlib, datetime\n'), ((7079, 7102), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7100, 7102), False, 'import os, numpy as np, argparse, random, matplotlib, datetime\n'), ((10852, 10874), 'copy.copy', 'copy.copy', (['output.grad'], {}), '(output.grad)\n', (10861, 10874), False, 'import copy\n'), ((11650, 11671), 'numpy.mean', 'np.mean', (['loss_collect'], {}), '(loss_collect)\n', (11657, 11671), True, 'import os, numpy as np, argparse, random, matplotlib, datetime\n'), ((4509, 4520), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4518, 4520), False, 'import os, numpy as np, argparse, random, matplotlib, datetime\n'), ((10299, 10311), 'copy.copy', 'copy.copy', (['x'], {}), '(x)\n', (10308, 10311), False, 'import copy\n'), ((11581, 11602), 'numpy.mean', 'np.mean', (['loss_collect'], {}), '(loss_collect)\n', (11588, 11602), True, 'import os, numpy as np, argparse, random, matplotlib, datetime\n'), ((13096, 13153), 'evaluate.evaluate', 'eval.evaluate', (['opt.dataset', 'LOG'], {'save': '(True)'}), '(opt.dataset, LOG, save=True, **eval_params)\n', (13109, 13153), True, 'import evaluate as eval\n'), ((13655, 13712), 'evaluate.evaluate', 'eval.evaluate', (['opt.dataset', 'LOG'], {'save': '(True)'}), '(opt.dataset, LOG, save=True, **eval_params)\n', (13668, 13712), True, 'import evaluate as eval\n'), ((11468, 11489), 'numpy.mean', 'np.mean', (['loss_collect'], {}), '(loss_collect)\n', (11475, 11489), True, 'import os, numpy as np, argparse, random, matplotlib, datetime\n'), ((11559, 11570), 'time.time', 'time.time', ([], {}), '()\n', (11568, 11570), False, 'import time\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Git-based CTF
###############################################################################
#
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Copyright (c) 2018 SoftSec Lab. KAIST
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
from ctf_utils import prompt_checkout_warning, load_config
from execute import exec_service, exec_exploit
from submit import submit
from fetch import fetch
from verify_service import verify_service
from verify_exploit import verify_exploit
from verify_injection import verify_injection
from show_score import show_score
from evaluate import evaluate
from get_hash import get_hash
from setup_env import setup_env
def add_exploit(parser):
parser.add_argument("--exploit", metavar="DIR", required=True,
help="specify the exploit directory")
def add_service_dir(parser):
parser.add_argument("--service-dir", metavar="DIR", required=True,
help="specify the service directory")
def add_service_name(parser):
parser.add_argument("--service-name", metavar="SRVNAME", required=True,
help="specify the name of the service")
def add_team(parser):
parser.add_argument("--team", metavar="TEAM", required=True,
help="specify the team to verify")
def add_target(parser):
parser.add_argument("--target", metavar="TEAM", required=True,
help="specify the target team")
def add_branch(parser):
parser.add_argument("--branch", metavar="BRANCH", required=True,
help="specify the target branch")
def add_host_port(parser):
parser.add_argument("--host-port", metavar="NUM", default="4000",
help="specify the host port number (default: 4000)")
def add_service_port(parser):
parser.add_argument("--service-port", metavar="NUM", default="4000",
help="specify the service port number (default: 4000)")
def add_conf(parser):
parser.add_argument("--conf", metavar="FILE", default="../configuration/config.json",
help="specify the config file (default: ../configuration/config.json)")
def add_admin_conf(parser):
parser.add_argument("--admin-conf", metavar="FILE", default="../configuration/.config.json",
help="specify the administrative config file (default: ../configuration/.config.json)")
def add_repo_location(parser):
parser.add_argument("--repo_location", metavar="DIR", default="./",
help="specify the location for repos to be cloned to (default: ./)")
def verify_service_main(prog, options):
desc = 'verify service docker'
parser = argparse.ArgumentParser(description=desc, prog=prog)
add_team(parser)
add_branch(parser)
add_host_port(parser)
add_service_port(parser)
add_conf(parser)
args = parser.parse_args(options)
verify_service(args.team, args.branch, args.service_port, args.host_port,
args.conf)
def verify_exploit_main(prog, options):
desc = 'verify written exploit'
parser = argparse.ArgumentParser(description=desc, prog=prog)
add_exploit(parser)
add_service_dir(parser)
add_branch(parser)
add_conf(parser)
parser.add_argument("--encrypt", dest="encrypt", action="store_true",
default=False,
help="specify whether to encrypt the verified exploit")
parser.add_argument("--timeout", metavar="SEC", required=True,
help="specify timeout for exploit")
args = parser.parse_args(options)
prompt_checkout_warning(args.service_dir)
config = load_config(args.conf)
verify_exploit(args.exploit, args.service_dir, args.branch,
int(args.timeout), config, args.encrypt)
def verify_injection_main(prog, options):
desc = 'verify injected vulnerabilities'
parser = argparse.ArgumentParser(description=desc, prog=prog)
add_team(parser)
add_conf(parser)
args = parser.parse_args(options)
verify_injection(args.team, args.conf)
def submit_main(prog, options):
desc = 'submit an exploit'
parser = argparse.ArgumentParser(description=desc, prog=prog)
add_exploit(parser)
add_service_dir(parser)
add_target(parser)
add_conf(parser)
add_branch(parser)
args = parser.parse_args(options)
submit(args.exploit, args.service_dir, args.branch, args.target, args.conf)
def fetch_main(prog, options):
desc = 'fetch an exploit'
parser = argparse.ArgumentParser(description=desc, prog=prog)
parser.add_argument("--issue", metavar="NO", required=True,
help="specify the issue number")
add_team(parser)
add_conf(parser)
args = parser.parse_args(options)
config = load_config(args.conf)
fetch(args.team, args.issue, config)
def verify_main(prog, options):
if len(options) == 0:
print('Usage:', prog, '<action> [options ...]\n')
print('Possible actions:')
print(' service : validate a service')
print(' exploit : validate an exploit')
print(' injection : validate injected vulnerabilities')
sys.exit()
action = options[0]
if action == 'service':
verify_service_main(prog + ' service', options[1:])
elif action == 'exploit':
verify_exploit_main(prog + ' exploit', options[1:])
elif action == 'injection':
verify_injection_main(prog + ' injection', options[1:])
else:
print('Unknown action.')
def score_main(prog, options):
desc = 'show the current score'
parser = argparse.ArgumentParser(description=desc, prog=prog)
add_conf(parser)
args = parser.parse_args(options)
show_score(args.conf)
def hash_main(prog, options):
desc = 'get latest hash of commit for each branch'
parser = argparse.ArgumentParser(description=desc, prog=prog)
add_conf(parser)
args = parser.parse_args(options)
get_hash(args.conf)
def setup_main(prog, options):
desc = 'setup CTF environment'
parser = argparse.ArgumentParser(description=desc, prog=prog)
add_admin_conf(parser)
add_repo_location(parser)
args = parser.parse_args(options)
setup_env(args.admin_conf, args.repo_location)
def eval_main(prog, options):
desc = 'evaluate participants'
parser = argparse.ArgumentParser(description=desc, prog=prog)
add_conf(parser)
args = parser.parse_args(options)
evaluate(args.conf)
def exec_service_main(prog, options):
desc = 'execute a service'
parser = argparse.ArgumentParser(description=desc, prog=prog)
add_service_dir(parser)
add_service_name(parser)
add_host_port(parser)
add_service_port(parser)
args = parser.parse_args(options)
exec_service(args.service_name,
args.service_dir,
args.host_port,
args.service_port)
def exec_exploit_main(prog, options):
desc = 'execute an exploit'
parser = argparse.ArgumentParser(description=desc, prog=prog)
parser.add_argument("--exploit-dir", metavar="DIR", required=True,
help="specify the exploit directory")
add_service_name(parser)
parser.add_argument("--ip", metavar="ADDR", default="127.0.0.1",
help="specify the IP address (default: 127.0.0.1)")
parser.add_argument("--port", metavar="NUM", default="4000",
help="specify the IP address (default: 4000)")
parser.add_argument("--timeout", metavar="SEC", required=True,
help="specify timeout for exploit")
args = parser.parse_args(options)
exec_exploit(args.service_name, args.exploit_dir, args.ip, int(args.port), \
int(args.timeout))
def exec_main(prog, options):
if len(options) == 0:
print('Usage:', prog, '<target> [options ...]\n')
print('Possible targets:')
print(' service : execute a service')
print(' exploit : execute an exploit')
sys.exit()
target = options[0]
if target == 'service':
exec_service_main(prog + ' service', options[1:])
elif target == 'exploit':
exec_exploit_main(prog + ' exploit', options[1:])
else:
print('Unknown action.')
def print_usage():
print('Usage:', sys.argv[0], '<action> [options ...]\n')
print('Possible actions:')
print(' help : show this help')
print(' exec : execute service/exploit')
print(' verify : verify service/injection/exploit')
print(' submit : submit an exploit')
print(' fetch : fetch an exploit')
print(' score : show the score')
print(' hash : get hash of each branch (for administrative purpose)')
print(' eval : manage the game score (for administrative purpose)')
print(' setup : setup the CTF env. (for administrative purpose)')
sys.exit()
def print_logo():
print (r"""
___ _ _ _ _ ___ _____ ___
/ _ (_) |_ | |__ __ _ ___ ___ __| | / __\/__ \/ __\
/ /_\/ | __|____| '_ \ / _` / __|/ _ \/ _` | / / / /\/ _\
/ /_\\| | ||_____| |_) | (_| \__ \ __/ (_| | / /___ / / / /
\____/|_|\__| |_.__/ \__,_|___/\___|\__,_| \____/ \/ \/
""")
def main(action, options):
if action == 'help':
print_usage()
elif action == 'exec':
exec_main(sys.argv[0] + ' exec', options)
elif action == 'verify':
verify_main(sys.argv[0] + ' verify', options)
elif action == 'submit':
submit_main(sys.argv[0] + ' submit', options)
elif action == 'fetch':
fetch_main(sys.argv[0] + ' fetch', options)
elif action == 'score':
score_main(sys.argv[0] + ' score', options)
elif action == 'hash':
hash_main(sys.argv[0] + ' hash', options)
elif action == 'eval':
eval_main(sys.argv[0] + ' eval', options)
elif action == 'setup':
setup_main(sys.argv[0] + ' setup', options)
else:
print('Unknown action.')
if __name__ == "__main__":
if len(sys.argv) < 2:
print_logo()
print_usage()
main(sys.argv[1], sys.argv[2:])
|
[
"evaluate.evaluate"
] |
[((3346, 3398), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc', 'prog': 'prog'}), '(description=desc, prog=prog)\n', (3369, 3398), False, 'import argparse\n'), ((3561, 3649), 'verify_service.verify_service', 'verify_service', (['args.team', 'args.branch', 'args.service_port', 'args.host_port', 'args.conf'], {}), '(args.team, args.branch, args.service_port, args.host_port,\n args.conf)\n', (3575, 3649), False, 'from verify_service import verify_service\n'), ((3755, 3807), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc', 'prog': 'prog'}), '(description=desc, prog=prog)\n', (3778, 3807), False, 'import argparse\n'), ((4266, 4307), 'ctf_utils.prompt_checkout_warning', 'prompt_checkout_warning', (['args.service_dir'], {}), '(args.service_dir)\n', (4289, 4307), False, 'from ctf_utils import prompt_checkout_warning, load_config\n'), ((4321, 4343), 'ctf_utils.load_config', 'load_config', (['args.conf'], {}), '(args.conf)\n', (4332, 4343), False, 'from ctf_utils import prompt_checkout_warning, load_config\n'), ((4569, 4621), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc', 'prog': 'prog'}), '(description=desc, prog=prog)\n', (4592, 4621), False, 'import argparse\n'), ((4706, 4744), 'verify_injection.verify_injection', 'verify_injection', (['args.team', 'args.conf'], {}), '(args.team, args.conf)\n', (4722, 4744), False, 'from verify_injection import verify_injection\n'), ((4822, 4874), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc', 'prog': 'prog'}), '(description=desc, prog=prog)\n', (4845, 4874), False, 'import argparse\n'), ((5036, 5111), 'submit.submit', 'submit', (['args.exploit', 'args.service_dir', 'args.branch', 'args.target', 'args.conf'], {}), '(args.exploit, args.service_dir, args.branch, args.target, args.conf)\n', (5042, 5111), False, 'from submit import submit\n'), ((5187, 5239), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc', 'prog': 'prog'}), '(description=desc, prog=prog)\n', (5210, 5239), False, 'import argparse\n'), ((5454, 5476), 'ctf_utils.load_config', 'load_config', (['args.conf'], {}), '(args.conf)\n', (5465, 5476), False, 'from ctf_utils import prompt_checkout_warning, load_config\n'), ((5481, 5517), 'fetch.fetch', 'fetch', (['args.team', 'args.issue', 'config'], {}), '(args.team, args.issue, config)\n', (5486, 5517), False, 'from fetch import fetch\n'), ((6283, 6335), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc', 'prog': 'prog'}), '(description=desc, prog=prog)\n', (6306, 6335), False, 'import argparse\n'), ((6399, 6420), 'show_score.show_score', 'show_score', (['args.conf'], {}), '(args.conf)\n', (6409, 6420), False, 'from show_score import show_score\n'), ((6520, 6572), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc', 'prog': 'prog'}), '(description=desc, prog=prog)\n', (6543, 6572), False, 'import argparse\n'), ((6636, 6655), 'get_hash.get_hash', 'get_hash', (['args.conf'], {}), '(args.conf)\n', (6644, 6655), False, 'from get_hash import get_hash\n'), ((6736, 6788), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc', 'prog': 'prog'}), '(description=desc, prog=prog)\n', (6759, 6788), False, 'import argparse\n'), ((6888, 6934), 'setup_env.setup_env', 'setup_env', (['args.admin_conf', 'args.repo_location'], {}), '(args.admin_conf, args.repo_location)\n', (6897, 6934), False, 'from setup_env import setup_env\n'), ((7014, 7066), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc', 'prog': 'prog'}), '(description=desc, prog=prog)\n', (7037, 7066), False, 'import argparse\n'), ((7130, 7149), 'evaluate.evaluate', 'evaluate', (['args.conf'], {}), '(args.conf)\n', (7138, 7149), False, 'from evaluate import evaluate\n'), ((7233, 7285), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc', 'prog': 'prog'}), '(description=desc, prog=prog)\n', (7256, 7285), False, 'import argparse\n'), ((7440, 7529), 'execute.exec_service', 'exec_service', (['args.service_name', 'args.service_dir', 'args.host_port', 'args.service_port'], {}), '(args.service_name, args.service_dir, args.host_port, args.\n service_port)\n', (7452, 7529), False, 'from execute import exec_service, exec_exploit\n'), ((7660, 7712), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc', 'prog': 'prog'}), '(description=desc, prog=prog)\n', (7683, 7712), False, 'import argparse\n'), ((9597, 9607), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9605, 9607), False, 'import sys\n'), ((5850, 5860), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5858, 5860), False, 'import sys\n'), ((8694, 8704), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8702, 8704), False, 'import sys\n')]
|
from ignite.engine.engine import Engine, State, Events
from dataset import get_iterator
from model import get_model
from loss import get_loss
from optimizer import get_optimizer
from logger import get_logger, log_results
from utils import prepare_batch
from metric import get_metrics
from evaluate import get_evaluator, evaluate_once
from metric.stat_metric import StatMetric
def get_trainer(args, model, loss_fn, optimizer):
def update_model(trainer, batch):
model.train()
optimizer.zero_grad()
net_inputs, target = prepare_batch(args, batch, model.vocab)
y_pred = model(**net_inputs)
batch_size = y_pred.shape[0]
loss, stats = loss_fn(y_pred, target)
loss.backward()
optimizer.step()
return loss.item(), stats, batch_size, y_pred.detach(), target.detach()
trainer = Engine(update_model)
metrics = {
'loss': StatMetric(output_transform=lambda x: (x[0], x[2])),
'top1_acc': StatMetric(output_transform=lambda x: ((x[3].argmax(dim=-1) == x[4]).float().mean().item(), x[2]))
}
if hasattr(loss_fn, 'get_metric'):
metrics = {**metrics, **loss_fn.get_metric()}
for name, metric in metrics.items():
metric.attach(trainer, name)
return trainer
def train(args):
iters, vocab = get_iterator(args)
model = get_model(args, vocab)
loss_fn = get_loss(args, vocab)
optimizer = get_optimizer(args, model)
trainer = get_trainer(args, model, loss_fn, optimizer)
metrics = get_metrics(args, vocab)
evaluator = get_evaluator(args, model, loss_fn, metrics)
logger = get_logger(args)
@trainer.on(Events.STARTED)
def on_training_started(engine):
print("Begin Training")
@trainer.on(Events.ITERATION_COMPLETED)
def log_iter_results(engine):
log_results(logger, 'train/iter', engine.state, engine.state.iteration)
@trainer.on(Events.EPOCH_COMPLETED)
def evaluate_epoch(engine):
log_results(logger, 'train/epoch', engine.state, engine.state.epoch)
state = evaluate_once(evaluator, iterator=iters['val'])
log_results(logger, 'valid/epoch', state, engine.state.epoch)
trainer.run(iters['train'], max_epochs=args.max_epochs)
|
[
"evaluate.evaluate_once",
"evaluate.get_evaluator"
] |
[((853, 873), 'ignite.engine.engine.Engine', 'Engine', (['update_model'], {}), '(update_model)\n', (859, 873), False, 'from ignite.engine.engine import Engine, State, Events\n'), ((1315, 1333), 'dataset.get_iterator', 'get_iterator', (['args'], {}), '(args)\n', (1327, 1333), False, 'from dataset import get_iterator\n'), ((1347, 1369), 'model.get_model', 'get_model', (['args', 'vocab'], {}), '(args, vocab)\n', (1356, 1369), False, 'from model import get_model\n'), ((1384, 1405), 'loss.get_loss', 'get_loss', (['args', 'vocab'], {}), '(args, vocab)\n', (1392, 1405), False, 'from loss import get_loss\n'), ((1422, 1448), 'optimizer.get_optimizer', 'get_optimizer', (['args', 'model'], {}), '(args, model)\n', (1435, 1448), False, 'from optimizer import get_optimizer\n'), ((1523, 1547), 'metric.get_metrics', 'get_metrics', (['args', 'vocab'], {}), '(args, vocab)\n', (1534, 1547), False, 'from metric import get_metrics\n'), ((1564, 1608), 'evaluate.get_evaluator', 'get_evaluator', (['args', 'model', 'loss_fn', 'metrics'], {}), '(args, model, loss_fn, metrics)\n', (1577, 1608), False, 'from evaluate import get_evaluator, evaluate_once\n'), ((1623, 1639), 'logger.get_logger', 'get_logger', (['args'], {}), '(args)\n', (1633, 1639), False, 'from logger import get_logger, log_results\n'), ((549, 588), 'utils.prepare_batch', 'prepare_batch', (['args', 'batch', 'model.vocab'], {}), '(args, batch, model.vocab)\n', (562, 588), False, 'from utils import prepare_batch\n'), ((907, 958), 'metric.stat_metric.StatMetric', 'StatMetric', ([], {'output_transform': '(lambda x: (x[0], x[2]))'}), '(output_transform=lambda x: (x[0], x[2]))\n', (917, 958), False, 'from metric.stat_metric import StatMetric\n'), ((1828, 1899), 'logger.log_results', 'log_results', (['logger', '"""train/iter"""', 'engine.state', 'engine.state.iteration'], {}), "(logger, 'train/iter', engine.state, engine.state.iteration)\n", (1839, 1899), False, 'from logger import get_logger, log_results\n'), ((1981, 2049), 'logger.log_results', 'log_results', (['logger', '"""train/epoch"""', 'engine.state', 'engine.state.epoch'], {}), "(logger, 'train/epoch', engine.state, engine.state.epoch)\n", (1992, 2049), False, 'from logger import get_logger, log_results\n'), ((2066, 2113), 'evaluate.evaluate_once', 'evaluate_once', (['evaluator'], {'iterator': "iters['val']"}), "(evaluator, iterator=iters['val'])\n", (2079, 2113), False, 'from evaluate import get_evaluator, evaluate_once\n'), ((2122, 2183), 'logger.log_results', 'log_results', (['logger', '"""valid/epoch"""', 'state', 'engine.state.epoch'], {}), "(logger, 'valid/epoch', state, engine.state.epoch)\n", (2133, 2183), False, 'from logger import get_logger, log_results\n')]
|
# coding:utf-8
"""
Filename: train.py
Author: @DvdNss
Created on 12/17/2021
"""
import argparse
import os
import torch
from sklearn.metrics import accuracy_score
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import PerceiverForSequenceClassification, AdamW
from evaluate import evaluate
def main():
"""
Train language-perceiver given files and arguments.
:return:
"""
# Create parser and its args
parser = argparse.ArgumentParser()
parser.add_argument('--train_data', help='Path to train torch file. ', default='data/train.pt')
parser.add_argument('--validation_data',
help='Path to validaton torch file. If not provided, no validation will occur. ', default=None)
parser.add_argument('--batch_size', help='Batch size. ', default=1, type=int)
parser.add_argument('--lr', help='Learning rate. ', default=5e-5, type=float)
parser.add_argument('--epochs', help='Number of epochs. ', default=1, type=int)
parser.add_argument('--output_dir', help='Output directory. ', default='model')
parser = parser.parse_args()
# Call dataloaders
train_dataloader = DataLoader(torch.load(parser.train_data), batch_size=parser.batch_size, shuffle=True)
validation_dataloader = DataLoader(torch.load(parser.validation_data),
batch_size=parser.batch_size) if parser.validation_data is not None else None
# Load model
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = PerceiverForSequenceClassification.from_pretrained('deepmind/language-perceiver',
problem_type='multi_label_classification',
num_labels=28)
# Send model to device
model.to(device)
# Define optimizer and metric
optimizer = AdamW(model.parameters(), lr=parser.lr)
# Train the model
for epoch in range(parser.epochs):
# Put model in training mode
model.train()
# Init logs
accu_logs, loss_logs, mem_logs = [], [], []
# Init pbar
with tqdm(train_dataloader, unit='batches') as progression:
# Set pbar description
progression.set_description(f"Epoch {epoch}")
# Iterate over batches
for batch in progression:
# Get inputs
inputs = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
targets = batch['targets'].type(torch.FloatTensor).to(device)
# Zero gradients
optimizer.zero_grad()
# Forward, backward & optimizer
outputs = model(inputs=inputs, attention_mask=attention_mask, labels=targets)
loss = outputs.loss
loss.backward()
optimizer.step()
# Evaluate over batch
if parser.validation_data is not None:
# Get predictions and targets
predictions = outputs.logits.cpu().detach().numpy()
references = batch["targets"].numpy()
# Binarize predictions
predictions = torch.as_tensor(predictions > 0.5, dtype=torch.int32)
# Retrieve acc and mem
accuracy = accuracy_score(y_true=references, y_pred=predictions)
memory = round(torch.cuda.memory_reserved(device) / 1e9, 2)
# Append data to logs
accu_logs.append(accuracy)
loss_logs.append(loss.item())
mem_logs.append(memory)
# Set logs on pbar
progression.set_postfix(loss=round(sum(loss_logs) / len(loss_logs), 3),
accuracy=round(sum(accu_logs) / len(accu_logs) * 100, 1),
memory=f"{round(sum(mem_logs) / len(mem_logs), 2)}Go")
else:
progression.set_postfix(loss=round(sum(loss_logs) / len(loss_logs), 3),
accuracy='disabled',
memory=f"{round(sum(mem_logs) / len(mem_logs), 2)}Go")
# Create output directory if needed
if not os.path.exists(parser.output_dir):
os.mkdir(parser.output_dir)
# Evaluate and save the model
if validation_dataloader is not None:
epoch_acc = evaluate(model=model, validation_dataloader=validation_dataloader)
torch.save(model, f"{parser.output_dir}/perceiver-e{epoch}-acc{int(epoch_acc)}.pt".replace('//', '/'))
else:
torch.save(model, f"{parser.output_dir}/perceiver-e{epoch}.pt".replace('//', '/'))
if __name__ == '__main__':
main()
|
[
"evaluate.evaluate"
] |
[((471, 496), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (494, 496), False, 'import argparse\n'), ((1556, 1705), 'transformers.PerceiverForSequenceClassification.from_pretrained', 'PerceiverForSequenceClassification.from_pretrained', (['"""deepmind/language-perceiver"""'], {'problem_type': '"""multi_label_classification"""', 'num_labels': '(28)'}), "(\n 'deepmind/language-perceiver', problem_type=\n 'multi_label_classification', num_labels=28)\n", (1606, 1705), False, 'from transformers import PerceiverForSequenceClassification, AdamW\n'), ((1185, 1214), 'torch.load', 'torch.load', (['parser.train_data'], {}), '(parser.train_data)\n', (1195, 1214), False, 'import torch\n'), ((1299, 1333), 'torch.load', 'torch.load', (['parser.validation_data'], {}), '(parser.validation_data)\n', (1309, 1333), False, 'import torch\n'), ((1506, 1531), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1529, 1531), False, 'import torch\n'), ((2191, 2229), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {'unit': '"""batches"""'}), "(train_dataloader, unit='batches')\n", (2195, 2229), False, 'from tqdm import tqdm\n'), ((4432, 4465), 'os.path.exists', 'os.path.exists', (['parser.output_dir'], {}), '(parser.output_dir)\n', (4446, 4465), False, 'import os\n'), ((4479, 4506), 'os.mkdir', 'os.mkdir', (['parser.output_dir'], {}), '(parser.output_dir)\n', (4487, 4506), False, 'import os\n'), ((4616, 4682), 'evaluate.evaluate', 'evaluate', ([], {'model': 'model', 'validation_dataloader': 'validation_dataloader'}), '(model=model, validation_dataloader=validation_dataloader)\n', (4624, 4682), False, 'from evaluate import evaluate\n'), ((3314, 3367), 'torch.as_tensor', 'torch.as_tensor', (['(predictions > 0.5)'], {'dtype': 'torch.int32'}), '(predictions > 0.5, dtype=torch.int32)\n', (3329, 3367), False, 'import torch\n'), ((3443, 3496), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'references', 'y_pred': 'predictions'}), '(y_true=references, y_pred=predictions)\n', (3457, 3496), False, 'from sklearn.metrics import accuracy_score\n'), ((3532, 3566), 'torch.cuda.memory_reserved', 'torch.cuda.memory_reserved', (['device'], {}), '(device)\n', (3558, 3566), False, 'import torch\n')]
|
import torch
import gym
from config import CartPole, Pong
from train import train
from evaluate import evaluate_policy
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' # needed locally
ENV_CONFIGS = {
'CartPole-v0': CartPole,
'Pong-v0': Pong
}
def training(env):
"""
function that is used to call the train() function inside train.py
Parameter 'env' is the environment.
"""
train(env)
def evaluating(env):
"""
This function is written instead of the main() function inside evaluate.py
path variable needs to be defined.
"""
path = f'/Users/haradys/Documents/Data_Science_Master/RL/Project/models/{env}_best.pt'
n_eval_episodes = 10
render = True
save_video = False
env_name = env
# Initialize environment and config
env_config = ENV_CONFIGS[env]
env = gym.make(env)
if save_video:
env = gym.wrappers.Monitor(env, './video/', video_callable=lambda episode_id: True, force=True)
# Load model from provided path.
dqn = torch.load(path, map_location=torch.device('cpu'))
dqn.eval()
mean_return = evaluate_policy(dqn, env, env_config, env_name, n_eval_episodes, render=render and not save_video, verbose=True)
print(f'The policy got a mean return of {mean_return} over {n_eval_episodes} episodes.')
env.close()
if __name__ == "__main__":
"""
In order to run this main, either 'Pong-v0' or 'CartPole-v0' needs to be uncommented,
and either training or evaluating(with rendering) needs to be uncommented.
"""
#env = 'CartPole-v0'
env = 'Pong-v0'
print('Start')
training(env)
print('End')
#evaluating(env)
|
[
"evaluate.evaluate_policy"
] |
[((412, 422), 'train.train', 'train', (['env'], {}), '(env)\n', (417, 422), False, 'from train import train\n'), ((841, 854), 'gym.make', 'gym.make', (['env'], {}), '(env)\n', (849, 854), False, 'import gym\n'), ((1112, 1229), 'evaluate.evaluate_policy', 'evaluate_policy', (['dqn', 'env', 'env_config', 'env_name', 'n_eval_episodes'], {'render': '(render and not save_video)', 'verbose': '(True)'}), '(dqn, env, env_config, env_name, n_eval_episodes, render=\n render and not save_video, verbose=True)\n', (1127, 1229), False, 'from evaluate import evaluate_policy\n'), ((889, 983), 'gym.wrappers.Monitor', 'gym.wrappers.Monitor', (['env', '"""./video/"""'], {'video_callable': '(lambda episode_id: True)', 'force': '(True)'}), "(env, './video/', video_callable=lambda episode_id: \n True, force=True)\n", (909, 983), False, 'import gym\n'), ((1057, 1076), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1069, 1076), False, 'import torch\n')]
|
""" Evaluate the baselines ont ROUGE/METEOR"""
import argparse
import json
import os
from os.path import join, exists
from evaluate import eval_meteor, eval_rouge
try:
_DATA_DIR = os.environ['DATA']
except KeyError:
print('please use environment variable to specify data directories')
def main(args):
dec_dir = join(args.decode_dir, 'output')
with open(join(args.decode_dir, 'log.json')) as f:
split = json.loads(f.read())['split']
ref_dir = join(_DATA_DIR, 'refs', split)
assert exists(ref_dir)
if args.rouge:
dec_pattern = r'(\d+).dec'
ref_pattern = '#ID#.ref'
output = eval_rouge(dec_pattern, dec_dir, ref_pattern, ref_dir,
cmd='-a -c 95 -r 1000 -f A -n 2')
#-c 95 -r 1000 -n 2 -m'
# ROUGE-1.5.5 -e data -a -n 2 -r 1000 -f A -z SPL config_file
metric = 'rouge'
else:
dec_pattern = '[0-9]+.dec'
ref_pattern = '[0-9]+.ref'
output = eval_meteor(dec_pattern, dec_dir, ref_pattern, ref_dir)
metric = 'meteor'
print(output)
with open(join(args.decode_dir, '{}.txt'.format(metric)), 'w') as f:
f.write(output)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Evaluate the output files for the RL full models')
# choose metric to evaluate
metric_opt = parser.add_mutually_exclusive_group(required=True)
metric_opt.add_argument('--rouge', action='store_true',
help='ROUGE evaluation')
metric_opt.add_argument('--meteor', action='store_true',
help='METEOR evaluation')
parser.add_argument('--decode_dir', action='store', required=True,
help='directory of decoded summaries')
args = parser.parse_args()
main(args)
|
[
"evaluate.eval_meteor",
"evaluate.eval_rouge"
] |
[((328, 359), 'os.path.join', 'join', (['args.decode_dir', '"""output"""'], {}), "(args.decode_dir, 'output')\n", (332, 359), False, 'from os.path import join, exists\n'), ((475, 505), 'os.path.join', 'join', (['_DATA_DIR', '"""refs"""', 'split'], {}), "(_DATA_DIR, 'refs', split)\n", (479, 505), False, 'from os.path import join, exists\n'), ((517, 532), 'os.path.exists', 'exists', (['ref_dir'], {}), '(ref_dir)\n', (523, 532), False, 'from os.path import join, exists\n'), ((1218, 1310), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate the output files for the RL full models"""'}), "(description=\n 'Evaluate the output files for the RL full models')\n", (1241, 1310), False, 'import argparse\n'), ((638, 731), 'evaluate.eval_rouge', 'eval_rouge', (['dec_pattern', 'dec_dir', 'ref_pattern', 'ref_dir'], {'cmd': '"""-a -c 95 -r 1000 -f A -n 2"""'}), "(dec_pattern, dec_dir, ref_pattern, ref_dir, cmd=\n '-a -c 95 -r 1000 -f A -n 2')\n", (648, 731), False, 'from evaluate import eval_meteor, eval_rouge\n'), ((979, 1034), 'evaluate.eval_meteor', 'eval_meteor', (['dec_pattern', 'dec_dir', 'ref_pattern', 'ref_dir'], {}), '(dec_pattern, dec_dir, ref_pattern, ref_dir)\n', (990, 1034), False, 'from evaluate import eval_meteor, eval_rouge\n'), ((374, 407), 'os.path.join', 'join', (['args.decode_dir', '"""log.json"""'], {}), "(args.decode_dir, 'log.json')\n", (378, 407), False, 'from os.path import join, exists\n')]
|
import os
import time
import json
import logging
import argparse
import sys
sys.path.append("libs")
from utils import Init_logging
from utils import PiecewiseSchedule
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import optimizers
from tensorflow.keras import callbacks
from tensorflow.keras import backend as K
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.summary import summary as tf_summary
from data import ContentVaeDataGenerator
from data import CollaborativeVAEDataGenerator
from model import SymetricUserOrientedCollarboativeVAE
from pretrain_vae import get_content_vae
from evaluate import EvaluateModel, EvaluateCold, mse
from evaluate import Recall_at_k, NDCG_at_k
from evaluate import binary_crossentropy
from evaluate import multinomial_crossentropy
import warnings
warnings.filterwarnings('ignore')
### Fix the random seeds.
np.random.seed(98765)
tf.set_random_seed(98765)
class Params():
def __init__(self, W, V):
self.lambda_W = W
self.lambda_V = V
citeulike_a_args = {
"hidden_sizes":[],
"latent_size":150,
"encoder_activs" : ["tanh"],
"decoder_activs" : ["softmax"],
}
movielen_10_args = {
"hidden_sizes":[100],
"latent_size":50,
"encoder_activs" : ["tanh"],
"decoder_activs" : ["tanh", "softmax"],
}
name_args_dict = {
"citeulike-a" : citeulike_a_args,
"movielen-10" : movielen_10_args,
}
name_loss_dict = {
"citeulike-a" : binary_crossentropy,
"movielen-10" : binary_crossentropy
}
def get_collabo_vae(dataset, input_dim):
collabo_vae = SymetricUserOrientedCollarboativeVAE(
input_dim = input_dim,
**name_args_dict[dataset],
)
return collabo_vae
def infer(infer_model, inputs, batch_size=2000):
num_samples = len(inputs)
z_size = infer_model.output.shape.as_list()[-1]
z_infer = np.zeros((num_samples, z_size), dtype=np.float32)
for i in range(num_samples//batch_size+1):
z_infer[i*batch_size:(i+1)*batch_size] \
= infer_model.predict_on_batch(inputs[i*batch_size:(i+1)*batch_size])
return z_infer
def summary(save_root, logs, epoch):
save_train = os.path.join(save_root, "train")
save_val = os.path.join(save_root, "val")
if not os.path.exists(save_train):
os.makedirs(save_train)
if not os.path.exists(save_val):
os.makedirs(save_val)
writer_train = tf.summary.FileWriter(save_train)
writer_val = tf.summary.FileWriter(save_val)
for metric, value in logs.items():
if isinstance(value, list):
value = value[0]
summary = tf_summary.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
if "val" in metric:
summary_value.tag = metric[4:]
writer_val.add_summary(summary, epoch)
else:
summary_value.tag = metric
writer_train.add_summary(summary, epoch)
writer_val.flush(); writer_val.flush()
def train_vae_model():
### Parse the console arguments.
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, default="citeulike-a",\
help="specify the dataset for experiment")
parser.add_argument("--split", type=int, default=0,
help="specify the split of dataset for experiment")
parser.add_argument("--batch_size", type=int, default=256,
help="specify the batch size for updating vae")
parser.add_argument("--device" , type=str, default="0",
help="specify the visible GPU device")
parser.add_argument("--summary", default=False, action="store_true",
help="whether or not write summaries to the results")
parser.add_argument("--lambda_V", default=None, type=int,
help="specify the value of lambda_V for regularization")
parser.add_argument("--num_cold", default=None, type=int,
help="specify the number of cold start items")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
### Set up the tensorflow session.
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
K.set_session(sess)
### Get the train, val data generator for content vae
if args.num_cold:
data_root = os.path.join("data", args.dataset, str(args.split), str(args.num_cold))
else:
data_root = os.path.join("data", args.dataset, str(args.split))
dataset = "movielen-10" if "movielen-10" in args.dataset else args.dataset
tstep_train_gen = ContentVaeDataGenerator(
data_root = data_root, joint=True,
batch_size = args.batch_size,
)
tstep_cold_gen = ContentVaeDataGenerator(
data_root = data_root, joint=True,
batch_size = args.batch_size, use_cold=True,
)
### Get the train, val data generator for vae
bstep_train_gen = CollaborativeVAEDataGenerator(
data_root = data_root, phase="train",
batch_size = args.batch_size,
)
bstep_valid_gen = CollaborativeVAEDataGenerator(
data_root = data_root, phase="val",
batch_size = args.batch_size*8,
)
bstep_cold_gen = CollaborativeVAEDataGenerator(
data_root = data_root, phase="val",
batch_size = args.batch_size*8, use_cold=True,
)
blr_schedule = PiecewiseSchedule([[0, 1e-3], [50, 1e-3], [51, 1e-4]], outside_value=1e-4)
tlr_schedule = PiecewiseSchedule([[0, 1e-3], [50, 1e-3], [51, 1e-4]], outside_value=1e-4)
### Build the t and b step vae model
if args.num_cold:
weight_path = os.path.join("models", args.dataset, str(args.split), "num_cold", \
str(args.num_cold), "pretrained", "weights.model")
else:
weight_path = os.path.join("models", args.dataset, str(args.split), "pretrained", "weights.model")
print("pretrained model load from: {}".format(weight_path))
content_vae = get_content_vae(dataset, tstep_train_gen.feature_dim)
collabo_vae = get_collabo_vae(dataset, input_dim=bstep_train_gen.num_items)
content_vae.load_weights(weight_path)
if args.lambda_V is not None:
print("Use user-specified lambda {}".format(args.lambda_V))
lambda_V = args.lambda_V
use_default_lambda = False
else:
if args.dataset == "citeulike-a":
lambda_V = 50
elif "movielen-10" in args.dataset:
lambda_V = 75
print("Use default lambda {}".format(lambda_V))
use_default_lambda = True
if args.num_cold is None:
use_default_cold = True
else:
use_default_cold = False
params = Params(W=2e-4, V=lambda_V)
vae_bstep = collabo_vae.build_vae_bstep(lambda_W=params.lambda_W, lambda_V=params.lambda_V)
vae_tstep = content_vae.build_vae_tstep(lambda_W=params.lambda_W, lambda_V=params.lambda_V)
sess.run(tf.global_variables_initializer())
vae_infer_tstep = content_vae.build_vae_infer_tstep()
vae_infer_bstep = collabo_vae.build_vae_infer_bstep()
vae_eval = collabo_vae.build_vae_eval()
vae_eval_cold = collabo_vae.update_vae_coldstart(infer(vae_infer_tstep, tstep_cold_gen.features.A))
### Some configurations for training
best_Recall_20, best_Recall_40, best_NDCG_100, best_sum = -np.inf, -np.inf, -np.inf, -np.inf
best_Recall_20_cold, best_Recall_40_cold, best_NDCG_100_cold = -np.inf, -np.inf, -np.inf
if use_default_lambda:
save_root = os.path.join("models", args.dataset, str(args.split))
else:
save_root = os.path.join("models", args.dataset, str(args.split), str(lambda_V))
if use_default_cold:
save_root = os.path.join("models", args.dataset, str(args.split))
else:
save_root = os.path.join("models", args.dataset, str(args.split), "num_cold", str(args.num_cold))
if not os.path.exists(save_root):
os.makedirs(save_root)
with open(os.path.join(save_root, "hyper.txt"), "w") as f:
json.dump(name_args_dict[dataset], f)
if not os.path.exists(save_root):
os.makedirs(save_root)
training_dynamics = os.path.join(save_root, "training_dynamics.csv")
with open(training_dynamics, "w") as f:
f.write("Recall@20,Recall@40,NDCG@100\n")
best_bstep_path = os.path.join(save_root, "best_bstep.model")
best_tstep_path = os.path.join(save_root, "best_tstep.model")
lamb_schedule_gauss = PiecewiseSchedule([[0, 0.0], [80, 0.2]], outside_value=0.2)
vae_bstep.compile(loss=multinomial_crossentropy, optimizer=optimizers.Adam(),
metrics=[multinomial_crossentropy])
vae_tstep.compile(optimizer=optimizers.Adam(), loss=name_loss_dict[dataset])
### Train the content and collaborative part of vae in an EM-like style
epochs = 200
mix_in_epochs = 30
bstep_tsboard = callbacks.TensorBoard(log_dir=save_root)
for epoch in range(epochs):
print("-"*10 + "Epoch:{}".format(epoch), "-"*10)
print("Begin bstep:")
K.set_value(vae_bstep.optimizer.lr, blr_schedule.value(epoch))
K.set_value(collabo_vae.gaussian_kl_loss.lamb_kl, lamb_schedule_gauss.value(epoch))
K.set_value(collabo_vae.mse_loss.targets, infer(vae_infer_tstep, tstep_train_gen.features.A))
his_bstep = vae_bstep.fit_generator(bstep_train_gen, epochs=1, workers=4,
validation_data=bstep_valid_gen)
Recall_20 = EvaluateModel(vae_eval, bstep_valid_gen, Recall_at_k, k=20)
Recall_40 = EvaluateModel(vae_eval, bstep_valid_gen, Recall_at_k, k=40)
NDCG_100 = EvaluateModel(vae_eval, bstep_valid_gen, NDCG_at_k, k=100)
if epoch > mix_in_epochs:
Recall_20_cold = EvaluateCold(vae_eval_cold, bstep_cold_gen, Recall_at_k, k=20)
Recall_40_cold = EvaluateCold(vae_eval_cold, bstep_cold_gen, Recall_at_k, k=40)
NDCG_100_cold = EvaluateCold(vae_eval_cold, bstep_cold_gen, NDCG_at_k, k=100)
if Recall_20_cold > best_Recall_20_cold:
best_Recall_20_cold = Recall_20_cold
if Recall_40_cold > best_Recall_40_cold:
best_Recall_40_cold = Recall_40_cold
if NDCG_100_cold > best_NDCG_100_cold:
best_NDCG_100_cold = NDCG_100_cold
if args.summary:
logs = his_bstep.history
logs.update({"val_recall_20":Recall_20,
"val_recall_40":Recall_40,
"val_ndcg_100":NDCG_100})
if epoch > mix_in_epochs:
logs.update({"val_cold_recall_20": Recall_20_cold,
"val_cold_recall_40": Recall_40_cold,
"val_cold_ndcg_100" : NDCG_100_cold})
summary(save_root, logs, epoch)
if Recall_20 > best_Recall_20:
best_Recall_20 = Recall_20
if Recall_40 > best_Recall_40:
best_Recall_40 = Recall_40
if NDCG_100 > best_NDCG_100:
best_NDCG_100 = NDCG_100
cur_sum = Recall_20 + Recall_40 + NDCG_100
if cur_sum > best_sum:
best_sum = cur_sum
vae_bstep.save_weights(best_bstep_path, save_format="tf")
vae_tstep.save_weights(best_tstep_path, save_format="tf")
with open(training_dynamics, "a") as f:
f.write("{:.4f},{:.4f},{:.4f}\n".\
format(Recall_20, Recall_40, NDCG_100))
print("-"*5+"Epoch: {}".format(epoch)+"-"*5)
print("cur recall@20: {:5f}, best recall@20: {:5f}".format(Recall_20, best_Recall_20))
print("cur recall@40: {:5f}, best recall@40: {:5f}".format(Recall_40, best_Recall_40))
print("cur NDCG@100: {:5f}, best NDCG@100: {:5f}".format(NDCG_100, best_NDCG_100))
if epoch > mix_in_epochs:
print("-"*5 + "Cold start items" + "-"*5)
print("cold recall@20: {:5f}, best cold recall@20: {:5f}".format(Recall_20_cold, best_Recall_20_cold))
print("cold recall@40: {:5f}, best cold recall@40: {:5f}".format(Recall_40_cold, best_Recall_40_cold))
print("cold NDCG@100: {:5f}, best cold NDCG@100: {:5f}".format(NDCG_100_cold, best_NDCG_100_cold))
print("Begin tstep:")
K.set_value(vae_tstep.optimizer.lr, tlr_schedule.value(epoch))
tstep_train_gen.update_previous_bstep(K.get_value(collabo_vae.embedding_weights))
vae_tstep.fit_generator(tstep_train_gen, workers=4, epochs=1)
vae_eval_cold = collabo_vae.update_vae_coldstart(infer(vae_infer_tstep, tstep_cold_gen.features.A))
print("Done training!")
if __name__ == '__main__':
train_vae_model()
|
[
"evaluate.EvaluateCold",
"evaluate.EvaluateModel"
] |
[((84, 107), 'sys.path.append', 'sys.path.append', (['"""libs"""'], {}), "('libs')\n", (99, 107), False, 'import sys\n'), ((888, 921), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (911, 921), False, 'import warnings\n'), ((952, 973), 'numpy.random.seed', 'np.random.seed', (['(98765)'], {}), '(98765)\n', (966, 973), True, 'import numpy as np\n'), ((975, 1000), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(98765)'], {}), '(98765)\n', (993, 1000), True, 'import tensorflow as tf\n'), ((1687, 1776), 'model.SymetricUserOrientedCollarboativeVAE', 'SymetricUserOrientedCollarboativeVAE', ([], {'input_dim': 'input_dim'}), '(input_dim=input_dim, **name_args_dict[\n dataset])\n', (1723, 1776), False, 'from model import SymetricUserOrientedCollarboativeVAE\n'), ((1977, 2026), 'numpy.zeros', 'np.zeros', (['(num_samples, z_size)'], {'dtype': 'np.float32'}), '((num_samples, z_size), dtype=np.float32)\n', (1985, 2026), True, 'import numpy as np\n'), ((2289, 2321), 'os.path.join', 'os.path.join', (['save_root', '"""train"""'], {}), "(save_root, 'train')\n", (2301, 2321), False, 'import os\n'), ((2338, 2368), 'os.path.join', 'os.path.join', (['save_root', '"""val"""'], {}), "(save_root, 'val')\n", (2350, 2368), False, 'import os\n'), ((2531, 2564), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['save_train'], {}), '(save_train)\n', (2552, 2564), True, 'import tensorflow as tf\n'), ((2583, 2614), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['save_val'], {}), '(save_val)\n', (2604, 2614), True, 'import tensorflow as tf\n'), ((3209, 3234), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3232, 3234), False, 'import argparse\n'), ((4234, 4250), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (4248, 4250), True, 'import tensorflow as tf\n'), ((4305, 4330), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (4315, 4330), True, 'import tensorflow as tf\n'), ((4336, 4355), 'tensorflow.keras.backend.set_session', 'K.set_session', (['sess'], {}), '(sess)\n', (4349, 4355), True, 'from tensorflow.keras import backend as K\n'), ((4722, 4811), 'data.ContentVaeDataGenerator', 'ContentVaeDataGenerator', ([], {'data_root': 'data_root', 'joint': '(True)', 'batch_size': 'args.batch_size'}), '(data_root=data_root, joint=True, batch_size=args.\n batch_size)\n', (4745, 4811), False, 'from data import ContentVaeDataGenerator\n'), ((4860, 4964), 'data.ContentVaeDataGenerator', 'ContentVaeDataGenerator', ([], {'data_root': 'data_root', 'joint': '(True)', 'batch_size': 'args.batch_size', 'use_cold': '(True)'}), '(data_root=data_root, joint=True, batch_size=args.\n batch_size, use_cold=True)\n', (4883, 4964), False, 'from data import ContentVaeDataGenerator\n'), ((5066, 5163), 'data.CollaborativeVAEDataGenerator', 'CollaborativeVAEDataGenerator', ([], {'data_root': 'data_root', 'phase': '"""train"""', 'batch_size': 'args.batch_size'}), "(data_root=data_root, phase='train',\n batch_size=args.batch_size)\n", (5095, 5163), False, 'from data import CollaborativeVAEDataGenerator\n'), ((5213, 5313), 'data.CollaborativeVAEDataGenerator', 'CollaborativeVAEDataGenerator', ([], {'data_root': 'data_root', 'phase': '"""val"""', 'batch_size': '(args.batch_size * 8)'}), "(data_root=data_root, phase='val', batch_size=\n args.batch_size * 8)\n", (5242, 5313), False, 'from data import CollaborativeVAEDataGenerator\n'), ((5359, 5474), 'data.CollaborativeVAEDataGenerator', 'CollaborativeVAEDataGenerator', ([], {'data_root': 'data_root', 'phase': '"""val"""', 'batch_size': '(args.batch_size * 8)', 'use_cold': '(True)'}), "(data_root=data_root, phase='val', batch_size=\n args.batch_size * 8, use_cold=True)\n", (5388, 5474), False, 'from data import CollaborativeVAEDataGenerator\n'), ((5520, 5605), 'utils.PiecewiseSchedule', 'PiecewiseSchedule', (['[[0, 0.001], [50, 0.001], [51, 0.0001]]'], {'outside_value': '(0.0001)'}), '([[0, 0.001], [50, 0.001], [51, 0.0001]], outside_value=0.0001\n )\n', (5537, 5605), False, 'from utils import PiecewiseSchedule\n'), ((5615, 5700), 'utils.PiecewiseSchedule', 'PiecewiseSchedule', (['[[0, 0.001], [50, 0.001], [51, 0.0001]]'], {'outside_value': '(0.0001)'}), '([[0, 0.001], [50, 0.001], [51, 0.0001]], outside_value=0.0001\n )\n', (5632, 5700), False, 'from utils import PiecewiseSchedule\n'), ((6117, 6170), 'pretrain_vae.get_content_vae', 'get_content_vae', (['dataset', 'tstep_train_gen.feature_dim'], {}), '(dataset, tstep_train_gen.feature_dim)\n', (6132, 6170), False, 'from pretrain_vae import get_content_vae\n'), ((8332, 8380), 'os.path.join', 'os.path.join', (['save_root', '"""training_dynamics.csv"""'], {}), "(save_root, 'training_dynamics.csv')\n", (8344, 8380), False, 'import os\n'), ((8502, 8545), 'os.path.join', 'os.path.join', (['save_root', '"""best_bstep.model"""'], {}), "(save_root, 'best_bstep.model')\n", (8514, 8545), False, 'import os\n'), ((8569, 8612), 'os.path.join', 'os.path.join', (['save_root', '"""best_tstep.model"""'], {}), "(save_root, 'best_tstep.model')\n", (8581, 8612), False, 'import os\n'), ((8642, 8701), 'utils.PiecewiseSchedule', 'PiecewiseSchedule', (['[[0, 0.0], [80, 0.2]]'], {'outside_value': '(0.2)'}), '([[0, 0.0], [80, 0.2]], outside_value=0.2)\n', (8659, 8701), False, 'from utils import PiecewiseSchedule\n'), ((9069, 9109), 'tensorflow.keras.callbacks.TensorBoard', 'callbacks.TensorBoard', ([], {'log_dir': 'save_root'}), '(log_dir=save_root)\n', (9090, 9109), False, 'from tensorflow.keras import callbacks\n'), ((2381, 2407), 'os.path.exists', 'os.path.exists', (['save_train'], {}), '(save_train)\n', (2395, 2407), False, 'import os\n'), ((2418, 2441), 'os.makedirs', 'os.makedirs', (['save_train'], {}), '(save_train)\n', (2429, 2441), False, 'import os\n'), ((2454, 2478), 'os.path.exists', 'os.path.exists', (['save_val'], {}), '(save_val)\n', (2468, 2478), False, 'import os\n'), ((2489, 2510), 'os.makedirs', 'os.makedirs', (['save_val'], {}), '(save_val)\n', (2500, 2510), False, 'import os\n'), ((2741, 2761), 'tensorflow.python.summary.summary.Summary', 'tf_summary.Summary', ([], {}), '()\n', (2759, 2761), True, 'from tensorflow.python.summary import summary as tf_summary\n'), ((7084, 7117), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7115, 7117), True, 'import tensorflow as tf\n'), ((8066, 8091), 'os.path.exists', 'os.path.exists', (['save_root'], {}), '(save_root)\n', (8080, 8091), False, 'import os\n'), ((8102, 8124), 'os.makedirs', 'os.makedirs', (['save_root'], {}), '(save_root)\n', (8113, 8124), False, 'import os\n'), ((8198, 8235), 'json.dump', 'json.dump', (['name_args_dict[dataset]', 'f'], {}), '(name_args_dict[dataset], f)\n', (8207, 8235), False, 'import json\n'), ((8248, 8273), 'os.path.exists', 'os.path.exists', (['save_root'], {}), '(save_root)\n', (8262, 8273), False, 'import os\n'), ((8284, 8306), 'os.makedirs', 'os.makedirs', (['save_root'], {}), '(save_root)\n', (8295, 8306), False, 'import os\n'), ((9690, 9749), 'evaluate.EvaluateModel', 'EvaluateModel', (['vae_eval', 'bstep_valid_gen', 'Recall_at_k'], {'k': '(20)'}), '(vae_eval, bstep_valid_gen, Recall_at_k, k=20)\n', (9703, 9749), False, 'from evaluate import EvaluateModel, EvaluateCold, mse\n'), ((9771, 9830), 'evaluate.EvaluateModel', 'EvaluateModel', (['vae_eval', 'bstep_valid_gen', 'Recall_at_k'], {'k': '(40)'}), '(vae_eval, bstep_valid_gen, Recall_at_k, k=40)\n', (9784, 9830), False, 'from evaluate import EvaluateModel, EvaluateCold, mse\n'), ((9851, 9909), 'evaluate.EvaluateModel', 'EvaluateModel', (['vae_eval', 'bstep_valid_gen', 'NDCG_at_k'], {'k': '(100)'}), '(vae_eval, bstep_valid_gen, NDCG_at_k, k=100)\n', (9864, 9909), False, 'from evaluate import EvaluateModel, EvaluateCold, mse\n'), ((8140, 8176), 'os.path.join', 'os.path.join', (['save_root', '"""hyper.txt"""'], {}), "(save_root, 'hyper.txt')\n", (8152, 8176), False, 'import os\n'), ((8766, 8783), 'tensorflow.keras.optimizers.Adam', 'optimizers.Adam', ([], {}), '()\n', (8781, 8783), False, 'from tensorflow.keras import optimizers\n'), ((8878, 8895), 'tensorflow.keras.optimizers.Adam', 'optimizers.Adam', ([], {}), '()\n', (8893, 8895), False, 'from tensorflow.keras import optimizers\n'), ((9977, 10039), 'evaluate.EvaluateCold', 'EvaluateCold', (['vae_eval_cold', 'bstep_cold_gen', 'Recall_at_k'], {'k': '(20)'}), '(vae_eval_cold, bstep_cold_gen, Recall_at_k, k=20)\n', (9989, 10039), False, 'from evaluate import EvaluateModel, EvaluateCold, mse\n'), ((10070, 10132), 'evaluate.EvaluateCold', 'EvaluateCold', (['vae_eval_cold', 'bstep_cold_gen', 'Recall_at_k'], {'k': '(40)'}), '(vae_eval_cold, bstep_cold_gen, Recall_at_k, k=40)\n', (10082, 10132), False, 'from evaluate import EvaluateModel, EvaluateCold, mse\n'), ((10162, 10223), 'evaluate.EvaluateCold', 'EvaluateCold', (['vae_eval_cold', 'bstep_cold_gen', 'NDCG_at_k'], {'k': '(100)'}), '(vae_eval_cold, bstep_cold_gen, NDCG_at_k, k=100)\n', (10174, 10223), False, 'from evaluate import EvaluateModel, EvaluateCold, mse\n'), ((12658, 12700), 'tensorflow.keras.backend.get_value', 'K.get_value', (['collabo_vae.embedding_weights'], {}), '(collabo_vae.embedding_weights)\n', (12669, 12700), True, 'from tensorflow.keras import backend as K\n')]
|
import pandas as pd
import matplotlib.pyplot as plt
import keras
from keras import layers
from keras.utils import plot_model
from keras.constraints import non_neg
from keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
import time
from time import localtime, strftime
import send_email
import pickle
import load_ratings
import evaluate as evl
# captura o tempo agora, somente para informação e análise dos resultados
date_now = strftime("%d/%m/%Y %H:%M:%S", localtime())
# carrega o dataset de ratings
dataset = load_ratings.load('../')
# divide o dataset em 80% para treinamento e 20% para teste
train, test = train_test_split(dataset, test_size=0.3, random_state=0)
n_users, n_movies = len(dataset.userId.unique()), len(dataset.movieId.unique())
embedding_size = 10
# cria as camadas, usando uma matrix de fatorização não negativa
movie_input = layers.Input(shape=[1], name='Movie')
user_input = layers.Input(shape=[1], name='User')
movie_embedding = layers.Embedding(input_dim=n_movies,
input_length=1,
output_dim=embedding_size,
name='Movie-Embedding',
embeddings_constraint=non_neg())(movie_input)
user_embedding = layers.Embedding(input_dim=n_users,
input_length=1,
output_dim=embedding_size,
name='User-Embedding',
embeddings_constraint=non_neg())(user_input)
movie_vec = layers.Flatten(name='FlattenMovies')(movie_embedding)
user_vec = layers.Flatten(name='FlattenUsers')(user_embedding)
prod = layers.dot([movie_vec, user_vec], axes=1, name='DotProduct')
model = keras.Model([user_input, movie_input], prod)
model.compile(optimizer='adam', loss='mean_squared_error')
# cria uma imagem do modelo da rede
plot_model(model, to_file='model_matrix.png', show_shapes=True)
# imprime o resumo do modelo em um arquivo
with open('model_summary.txt', 'w') as f:
model.summary(print_fn=lambda x: f.write(x + '\n'))
f.close()
# variável para guardar o número de epochs
epochs = 16
test_map = evl.mean_average_precision(model, train, test)
test_ndcg = evl.normalized_dcg(model, train, test)
test_auc = evl.roc_auc(model, train, test)
print("Check MAP: %0.4f" % test_map)
print("Check NDCG: %0.4f" % test_ndcg)
print("Check ROC_AUC: %0.4f" % test_auc)
# salva os modelos de acordo com o callback do Keras
save_path = '../models'
my_time = time.strftime("%Y_%m_%d_%H_%M")
model_name = 'matrix_factorization_' + my_time
full_name = save_path + '/' + model_name + '.h5'
m_check = ModelCheckpoint(full_name, monitor='val_loss', save_best_only=True)
# começa a contar o tempo do treinamento
start_time = time.time()
# faz o treinamento do modelo
history = model.fit([train.userId, train.movieId],
train.rating,
epochs=epochs,
verbose=2,
shuffle=True,
validation_split=0.1,
callbacks=[m_check])
# mostra o tempo to treinamento no formato hh:mm:ss
seconds = (time.time() - start_time)
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
print('%02d:%02d:%02d' % (h, m, s))
# salva o treinamento
history_name = 'dense_' + my_time
with open('../histories/' + history_name + '.pkl', 'wb') as file_pi:
pickle.dump(history.history, file_pi)
test_map = evl.mean_average_precision(model, train, test)
test_ndcg = evl.normalized_dcg(model, train, test)
test_auc = evl.roc_auc(model, train, test)
print("MAP: %0.4f" % test_map)
print("NDCG: %0.4f" % test_ndcg)
print("ROC_AUC: %0.4f" % test_auc)
# plota um gráfico da perda em relação às epochs e depois salva em uma imagem
loss = history.history['loss']
val_loss = history.history['val_loss']
pd.Series(loss).plot(label='Training loss')
pd.Series(val_loss).plot(label='Training val_loss')
plt.title('Perda do treinamento')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
fig1 = plt.gcf()
plt.show()
plt.draw()
fig1.savefig('training_loss_matrix.png', dpi=200)
test_preds = model.predict([test.userId, test.movieId])
final_test_mse = "Final test MSE: %0.3f" % mean_squared_error(test_preds, test.rating)
final_test_mae = "Final test MAE: %0.3f" % mean_absolute_error(test_preds, test.rating)
print(final_test_mse)
print(final_test_mae)
train_preds = model.predict([train.userId, train.movieId])
final_train_mse = "Final train MSE: %0.3f" % mean_squared_error(train_preds, train.rating)
final_train_mae = "Final train MAE: %0.3f" % mean_absolute_error(train_preds, train.rating)
print(final_train_mse)
print(final_train_mae)
# imprime os resultados em um arquivo
with open('results.txt', 'w') as fr:
fr.write('Data de treinamento da rede: ' + date_now + '\n')
fr.write('\n' + 'Tempo de execução: ' + str('%02d:%02d:%02d' % (h, m, s)) + '\n')
fr.write('\n' + str(final_test_mse) + '\n')
fr.write('\n' + str(final_test_mae) + '\n')
fr.write('\n' + str(final_train_mse) + '\n')
fr.write('\n' + str(final_train_mae) + '\n')
fr.write('\n' + 'Número de Epochs da rede: ' + str(epochs) + '\n')
fr.close()
# manda um email com os resultados da execução, passando como parâmetro arquivos para mandar como anexo
send_email.send(['training_loss_matrix.png', 'model_matrix.png', 'model_summary.txt'])
|
[
"evaluate.mean_average_precision",
"evaluate.roc_auc",
"evaluate.normalized_dcg"
] |
[((651, 675), 'load_ratings.load', 'load_ratings.load', (['"""../"""'], {}), "('../')\n", (668, 675), False, 'import load_ratings\n'), ((751, 807), 'sklearn.model_selection.train_test_split', 'train_test_split', (['dataset'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(dataset, test_size=0.3, random_state=0)\n', (767, 807), False, 'from sklearn.model_selection import train_test_split\n'), ((991, 1028), 'keras.layers.Input', 'layers.Input', ([], {'shape': '[1]', 'name': '"""Movie"""'}), "(shape=[1], name='Movie')\n", (1003, 1028), False, 'from keras import layers\n'), ((1042, 1078), 'keras.layers.Input', 'layers.Input', ([], {'shape': '[1]', 'name': '"""User"""'}), "(shape=[1], name='User')\n", (1054, 1078), False, 'from keras import layers\n'), ((1826, 1886), 'keras.layers.dot', 'layers.dot', (['[movie_vec, user_vec]'], {'axes': '(1)', 'name': '"""DotProduct"""'}), "([movie_vec, user_vec], axes=1, name='DotProduct')\n", (1836, 1886), False, 'from keras import layers\n'), ((1895, 1939), 'keras.Model', 'keras.Model', (['[user_input, movie_input]', 'prod'], {}), '([user_input, movie_input], prod)\n', (1906, 1939), False, 'import keras\n'), ((2036, 2099), 'keras.utils.plot_model', 'plot_model', (['model'], {'to_file': '"""model_matrix.png"""', 'show_shapes': '(True)'}), "(model, to_file='model_matrix.png', show_shapes=True)\n", (2046, 2099), False, 'from keras.utils import plot_model\n'), ((2320, 2366), 'evaluate.mean_average_precision', 'evl.mean_average_precision', (['model', 'train', 'test'], {}), '(model, train, test)\n', (2346, 2366), True, 'import evaluate as evl\n'), ((2379, 2417), 'evaluate.normalized_dcg', 'evl.normalized_dcg', (['model', 'train', 'test'], {}), '(model, train, test)\n', (2397, 2417), True, 'import evaluate as evl\n'), ((2429, 2460), 'evaluate.roc_auc', 'evl.roc_auc', (['model', 'train', 'test'], {}), '(model, train, test)\n', (2440, 2460), True, 'import evaluate as evl\n'), ((2666, 2697), 'time.strftime', 'time.strftime', (['"""%Y_%m_%d_%H_%M"""'], {}), "('%Y_%m_%d_%H_%M')\n", (2679, 2697), False, 'import time\n'), ((2804, 2871), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['full_name'], {'monitor': '"""val_loss"""', 'save_best_only': '(True)'}), "(full_name, monitor='val_loss', save_best_only=True)\n", (2819, 2871), False, 'from keras.callbacks import ModelCheckpoint\n'), ((2927, 2938), 'time.time', 'time.time', ([], {}), '()\n', (2936, 2938), False, 'import time\n'), ((3592, 3638), 'evaluate.mean_average_precision', 'evl.mean_average_precision', (['model', 'train', 'test'], {}), '(model, train, test)\n', (3618, 3638), True, 'import evaluate as evl\n'), ((3651, 3689), 'evaluate.normalized_dcg', 'evl.normalized_dcg', (['model', 'train', 'test'], {}), '(model, train, test)\n', (3669, 3689), True, 'import evaluate as evl\n'), ((3701, 3732), 'evaluate.roc_auc', 'evl.roc_auc', (['model', 'train', 'test'], {}), '(model, train, test)\n', (3712, 3732), True, 'import evaluate as evl\n'), ((4077, 4110), 'matplotlib.pyplot.title', 'plt.title', (['"""Perda do treinamento"""'], {}), "('Perda do treinamento')\n", (4086, 4110), True, 'import matplotlib.pyplot as plt\n'), ((4111, 4131), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (4121, 4131), True, 'import matplotlib.pyplot as plt\n'), ((4132, 4150), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (4142, 4150), True, 'import matplotlib.pyplot as plt\n'), ((4151, 4163), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4161, 4163), True, 'import matplotlib.pyplot as plt\n'), ((4171, 4180), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4178, 4180), True, 'import matplotlib.pyplot as plt\n'), ((4181, 4191), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4189, 4191), True, 'import matplotlib.pyplot as plt\n'), ((4192, 4202), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (4200, 4202), True, 'import matplotlib.pyplot as plt\n'), ((5424, 5514), 'send_email.send', 'send_email.send', (["['training_loss_matrix.png', 'model_matrix.png', 'model_summary.txt']"], {}), "(['training_loss_matrix.png', 'model_matrix.png',\n 'model_summary.txt'])\n", (5439, 5514), False, 'import send_email\n'), ((596, 607), 'time.localtime', 'localtime', ([], {}), '()\n', (605, 607), False, 'from time import localtime, strftime\n'), ((1701, 1737), 'keras.layers.Flatten', 'layers.Flatten', ([], {'name': '"""FlattenMovies"""'}), "(name='FlattenMovies')\n", (1715, 1737), False, 'from keras import layers\n'), ((1766, 1801), 'keras.layers.Flatten', 'layers.Flatten', ([], {'name': '"""FlattenUsers"""'}), "(name='FlattenUsers')\n", (1780, 1801), False, 'from keras import layers\n'), ((3302, 3313), 'time.time', 'time.time', ([], {}), '()\n', (3311, 3313), False, 'import time\n'), ((3542, 3579), 'pickle.dump', 'pickle.dump', (['history.history', 'file_pi'], {}), '(history.history, file_pi)\n', (3553, 3579), False, 'import pickle\n'), ((4353, 4396), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['test_preds', 'test.rating'], {}), '(test_preds, test.rating)\n', (4371, 4396), False, 'from sklearn.metrics import mean_squared_error\n'), ((4440, 4484), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['test_preds', 'test.rating'], {}), '(test_preds, test.rating)\n', (4459, 4484), False, 'from sklearn.metrics import mean_absolute_error\n'), ((4633, 4678), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['train_preds', 'train.rating'], {}), '(train_preds, train.rating)\n', (4651, 4678), False, 'from sklearn.metrics import mean_squared_error\n'), ((4724, 4770), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['train_preds', 'train.rating'], {}), '(train_preds, train.rating)\n', (4743, 4770), False, 'from sklearn.metrics import mean_absolute_error\n'), ((3981, 3996), 'pandas.Series', 'pd.Series', (['loss'], {}), '(loss)\n', (3990, 3996), True, 'import pandas as pd\n'), ((4025, 4044), 'pandas.Series', 'pd.Series', (['val_loss'], {}), '(val_loss)\n', (4034, 4044), True, 'import pandas as pd\n'), ((1364, 1373), 'keras.constraints.non_neg', 'non_neg', ([], {}), '()\n', (1371, 1373), False, 'from keras.constraints import non_neg\n'), ((1665, 1674), 'keras.constraints.non_neg', 'non_neg', ([], {}), '()\n', (1672, 1674), False, 'from keras.constraints import non_neg\n')]
|
#保存结果与建议至json文件
import time
import json
from evaluate import evaluate,getTimes
from getdata import get_alldata
def get_advice(proposal,times):
standard_proposal=[x/times for x in proposal]
if max(standard_proposal)<1.5:
#print("动作标准,请继续保持")
return "动作标准,请继续保持"
elif max(standard_proposal[0:4])>2.5:
print("手弯曲程度较大,未完全上单杠")
elif max(standard_proposal[0:4])>1.5:
print("基本标准,建议手不要弯曲")
elif max(standard_proposal[4:8])>1.5:
print("基本标准,腿伸直一点更好")
def savereuslt(filename,times,proposal,W,new_filename):
with open(filename,'r',encoding='utf8')as fp:
result_json_data = json.load(fp)
#print(result_json_data)
new_result={}
new_result.update(id=str(123),
time=str(time.strftime("%Y--%m--%d %H:%M:%S", time.localtime(int(time.time())))),
num=str(times),
advice=get_advice(proposal,times),
energy=str(W),
img=str(new_filename))
result_json_data["resultlist"].append(new_result)
with open(filename,"w",encoding='utf-8') as f:
json.dump(result_json_data,f,ensure_ascii=False)
#print("加载入文件完成...")
if __name__ == '__main__':
stanard_anglist=get_alldata("./jsonfile/standard/")#获取标准动作
now_anglist,now_xy_list,W=get_alldata("./jsonfile/now/",isRunning=True)#获取当前动作信息
times = getTimes(now_xy_list,0,"test")#计算运动个数 由于采用头部计数 传入0 头部的X即为各个List的第一个值
proposal=evaluate(stanard_anglist,now_anglist,int(times))
savereuslt("./result/result.json",times,proposal,W)
|
[
"evaluate.getTimes"
] |
[((1263, 1298), 'getdata.get_alldata', 'get_alldata', (['"""./jsonfile/standard/"""'], {}), "('./jsonfile/standard/')\n", (1274, 1298), False, 'from getdata import get_alldata\n'), ((1336, 1382), 'getdata.get_alldata', 'get_alldata', (['"""./jsonfile/now/"""'], {'isRunning': '(True)'}), "('./jsonfile/now/', isRunning=True)\n", (1347, 1382), False, 'from getdata import get_alldata\n'), ((1403, 1435), 'evaluate.getTimes', 'getTimes', (['now_xy_list', '(0)', '"""test"""'], {}), "(now_xy_list, 0, 'test')\n", (1411, 1435), False, 'from evaluate import evaluate, getTimes\n'), ((641, 654), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (650, 654), False, 'import json\n'), ((1137, 1187), 'json.dump', 'json.dump', (['result_json_data', 'f'], {'ensure_ascii': '(False)'}), '(result_json_data, f, ensure_ascii=False)\n', (1146, 1187), False, 'import json\n'), ((828, 839), 'time.time', 'time.time', ([], {}), '()\n', (837, 839), False, 'import time\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
import pandas as pd
import sys
sys.path.append("..")
mpl.use('tkagg') # issues with Big Sur
import matplotlib.pyplot as plt
from strategy.bollinger_bands import bollinger_bands
from backtest import Backtest
from evaluate import PortfolioReturn, SharpeRatio, MaxDrawdown, CAGR
# load data
df = pd.read_csv('../../database/hkex_ticks_day/hkex_0005.csv', header=0, index_col='Date', parse_dates=True)
# select time range
df = df.loc[pd.Timestamp('2018-01-01'):pd.Timestamp('2020-01-01')]
ticker = "0005.HK"
# Bollinger Bands
bb = bollinger_bands(df)
bb_fig = bb.plot_BB()
bb_fig.suptitle('HK.0005 - Bollinger Bands', fontsize=14)
bb_fig.savefig('./figures/volatility/01-bollinger-bands-plot')
plt.show()
signals = bb.gen_signals()
signal_fig = bb.plot_signals(signals)
signal_fig.suptitle('Bollinger Bands - Signals', fontsize=14)
signal_fig.savefig('./figures/volatility/01-bollinger-bands_signals')
plt.show()
# Backtesting
portfolio, backtest_fig = Backtest(ticker, signals, df)
print("Final portfolio value (including cash): {value:.4f} ".format(value = portfolio['total'][-1]))
print("Total return: {value:.4f}".format(value = portfolio['total'][-1] - portfolio['total'][0]))
print("Average daily return: {value:.4f}%".format(value = portfolio['returns'].mean()*100))
backtest_fig.suptitle('Bollinger Bands - Portfolio value', fontsize=14)
backtest_fig.savefig('./figures/volatility/01-bollinger-bands_portfolio-value')
plt.show()
# Evaluate strategy
# 1. Sharpe ratio
sharpe_ratio = SharpeRatio(portfolio)
print("Sharpe ratio: {ratio:.4f} ".format(ratio = sharpe_ratio))
# 3. Maximum drawdown
maxDrawdown_fig, max_daily_drawdown, daily_drawdown = MaxDrawdown(df)
maxDrawdown_fig.suptitle('Bollinger Bands - Maximum drawdown', fontsize=14)
maxDrawdown_fig.savefig('./figures/volatility/01-bollinger-bands_maximum-drawdown')
plt.show()
# 4. Compound Annual Growth Rate
cagr = CAGR(portfolio)
print("CAGR: {cagr:.4f} ".format(cagr = cagr))
|
[
"evaluate.CAGR",
"evaluate.SharpeRatio",
"evaluate.MaxDrawdown"
] |
[((107, 128), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (122, 128), False, 'import sys\n'), ((129, 145), 'matplotlib.use', 'mpl.use', (['"""tkagg"""'], {}), "('tkagg')\n", (136, 145), True, 'import matplotlib as mpl\n'), ((373, 481), 'pandas.read_csv', 'pd.read_csv', (['"""../../database/hkex_ticks_day/hkex_0005.csv"""'], {'header': '(0)', 'index_col': '"""Date"""', 'parse_dates': '(True)'}), "('../../database/hkex_ticks_day/hkex_0005.csv', header=0,\n index_col='Date', parse_dates=True)\n", (384, 481), True, 'import pandas as pd\n'), ((611, 630), 'strategy.bollinger_bands.bollinger_bands', 'bollinger_bands', (['df'], {}), '(df)\n', (626, 630), False, 'from strategy.bollinger_bands import bollinger_bands\n'), ((774, 784), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (782, 784), True, 'import matplotlib.pyplot as plt\n'), ((984, 994), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (992, 994), True, 'import matplotlib.pyplot as plt\n'), ((1037, 1066), 'backtest.Backtest', 'Backtest', (['ticker', 'signals', 'df'], {}), '(ticker, signals, df)\n', (1045, 1066), False, 'from backtest import Backtest\n'), ((1511, 1521), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1519, 1521), True, 'import matplotlib.pyplot as plt\n'), ((1577, 1599), 'evaluate.SharpeRatio', 'SharpeRatio', (['portfolio'], {}), '(portfolio)\n', (1588, 1599), False, 'from evaluate import PortfolioReturn, SharpeRatio, MaxDrawdown, CAGR\n'), ((1742, 1757), 'evaluate.MaxDrawdown', 'MaxDrawdown', (['df'], {}), '(df)\n', (1753, 1757), False, 'from evaluate import PortfolioReturn, SharpeRatio, MaxDrawdown, CAGR\n'), ((1918, 1928), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1926, 1928), True, 'import matplotlib.pyplot as plt\n'), ((1970, 1985), 'evaluate.CAGR', 'CAGR', (['portfolio'], {}), '(portfolio)\n', (1974, 1985), False, 'from evaluate import PortfolioReturn, SharpeRatio, MaxDrawdown, CAGR\n'), ((511, 537), 'pandas.Timestamp', 'pd.Timestamp', (['"""2018-01-01"""'], {}), "('2018-01-01')\n", (523, 537), True, 'import pandas as pd\n'), ((538, 564), 'pandas.Timestamp', 'pd.Timestamp', (['"""2020-01-01"""'], {}), "('2020-01-01')\n", (550, 564), True, 'import pandas as pd\n')]
|
import os
import re
import sys
sys.path.append('.')
import cv2
import csv
import math
import time
import string
import random
import scipy
import argparse
import matplotlib
import numpy as np
import pylab as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from pprint import pprint
from collections import deque
from operator import itemgetter
from collections import OrderedDict
from scipy.optimize import linear_sum_assignment
from scipy.ndimage.morphology import generate_binary_structure
from scipy.ndimage.filters import gaussian_filter, maximum_filter
from lib.network.rtpose_vgg import get_model
from lib.network import im_transform
from lib.config import update_config, cfg
from evaluate.coco_eval import get_outputs, handle_paf_and_heat
from lib.utils.common import Human, BodyPart, CocoPart, CocoColors, CocoPairsRender, draw_humans
from lib.utils.paf_to_pose import paf_to_pose_cpp
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', help='experiment configure file name',
default='./experiments/vgg19_368x368_sgd.yaml', type=str)
parser.add_argument('--weight', type=str,
default='pose_model.pth')
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--video', help = 'video path',
default=int(0), type=str)
args = parser.parse_args()
body_labels = {0:'Nose', 1: 'Neck', 2: 'RShoulder', 3:'RElbow', 4:'RWrist', 5:'LShoulder', 6:'LElbow',
7:'LWrist', 8:'RHip', 9:'RKnee', 10:'RAnkle', 11:'LHip', 12:'LKnee', 13:'LAnkle', 14:'REye',
15:'LEye', 16:'REar', 17:'LEar'}
body_idx = dict([[v,k] for k,v in body_labels.items()])
def id_gen(size=6, chars=string.ascii_uppercase + string.digits):
'''
https://pythontips.com/2013/07/28/generating-a-random-string/
input: id_gen(3, "6793YUIO")
output: 'Y3U'
'''
return ''.join(random.choice(chars) for x in range(size))
def preprocess(image):
global device
device = torch.device('cuda')
image = cv2.resize(image, (model_h, model_w))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = PIL.Image.fromarray(image)
image = transforms.functional.to_tensor(image).to(device)
image.sub_(mean[:, None, None]).div_(std[:, None, None])
return image[None, ...]
def inference(image):
data = preprocess(image)
cmap, paf = model_trt(data)
cmap, paf = cmap.detach().cpu(), paf.detach().cpu()
counts, objects, peaks = parse_objects(cmap, paf) #, cmap_threshold=0.15, link_threshold=0.15)
body_dict = draw_objects(image, counts, objects, peaks)
return image, body_dict
def IOU(boxA, boxB):
# pyimagesearch: determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def get_bbox(kp_list):
bbox = []
for aggs in [min, max]:
for idx in range(2):
bound = aggs(kp_list, key=itemgetter(idx))[idx]
bbox.append(bound)
return bbox
def tracker_match(trackers, detections, iou_thrd = 0.3):
'''
From current list of trackers and new detections, output matched detections,
unmatched trackers, unmatched detections.
https://towardsdatascience.com/computer-vision-for-tracking-8220759eee85
'''
IOU_mat= np.zeros((len(trackers),len(detections)),dtype=np.float32)
for t,trk in enumerate(trackers):
for d,det in enumerate(detections):
IOU_mat[t,d] = IOU(trk,det)
# Produces matches
# Solve the maximizing the sum of IOU assignment problem using the
# Hungarian algorithm (also known as Munkres algorithm)
matched_idx = linear_sum_assignment(-IOU_mat)
matched_idx = np.asarray(matched_idx)
matched_idx = np.transpose(matched_idx)
unmatched_trackers, unmatched_detections = [], []
for t,trk in enumerate(trackers):
if(t not in matched_idx[:,0]):
unmatched_trackers.append(t)
for d, det in enumerate(detections):
if(d not in matched_idx[:,1]):
unmatched_detections.append(d)
matches = []
# For creating trackers we consider any detection with an
# overlap less than iou_thrd to signifiy the existence of
# an untracked object
for m in matched_idx:
if(IOU_mat[m[0],m[1]] < iou_thrd):
unmatched_trackers.append(m[0])
unmatched_detections.append(m[1])
else:
matches.append(m.reshape(1,2))
if(len(matches)==0):
matches = np.empty((0,2),dtype=int)
else:
matches = np.concatenate(matches,axis=0)
return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
class PersonTracker(object):
def __init__(self):
self.id = id_gen() #int(time.time() * 1000)
self.q = deque(maxlen=10)
return
def set_bbox(self, bbox):
self.bbox = bbox
x1, y1, x2, y2 = bbox
self.h = 1e-6 + x2 - x1
self.w = 1e-6 + y2 - y1
self.centroid = tuple(map(int, ( x1 + self.h / 2, y1 + self.w / 2)))
return
def update_pose(self, pose_dict, frame_shape):
'''
ft_vec = np.zeros(2 * len(body_labels))
for ky in pose_dict:
idx = body_idx[ky]
ft_vec[2 * idx: 2 * (idx + 1)] = 2 * (np.array(pose_dict[ky]) - np.array(self.centroid)) / np.array((self.h, self.w))
self.q.append(ft_vec)
'''
# 기존의 ActionAI의 코드는 36개의 벡터로 표현하였는데 72개의 벡터로 수정
self.dict = pose_dict
ft_vec = np.zeros(2 * len(body_labels))
full_vec = np.zeros(2 * len(body_labels))
angle_vec = np.zeros(5)
for ky in pose_dict:
idx = body_idx[ky]
ft_vec[2 * idx: 2 * (idx + 1)] = 2 * (np.array(pose_dict[ky]) - np.array(self.centroid)) / np.array((self.h, self.w))
full_vec[2 * idx: 2 * (idx + 1)] = np.array(pose_dict[ky]) / np.array((frame_shape[1],frame_shape[0]))
vec = np.hstack([ft_vec, full_vec])
self.q.append(vec)
return
def annotate(self, image):
x1, y1, x2, y2 = self.bbox
image = cv2.rectangle(image, (x1, y1), (x2, y2), (0, 0, 255), 3)
image = cv2.putText(image, self.activity, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
image = cv2.drawMarker(image, self.centroid, (255, 0, 0), 0, 30, 4)
return image
# update config file
update_config(cfg, args)
model = get_model('vgg19')
model.load_state_dict(torch.load(args.weight))
model.cuda()
model.float()
model.eval()
video_capture = cv2.VideoCapture(args.video)
w = int(video_capture.get(3))
h = int(video_capture.get(4))
fourcc_cap = cv2.VideoWriter_fourcc(*'MJPG')
video_capture.set(cv2.CAP_PROP_FOURCC, fourcc_cap)
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, w)
video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, h)
DEBUG = False
WRITE2CSV = True
WRITE2VIDEO = True
RUNSECONDARY = False
if WRITE2CSV:
activity = os.path.basename(args.video)
dataFile = open('data/{}.csv'.format(activity),'w')
newFileWriter = csv.writer(dataFile)
if WRITE2VIDEO:
# Define the codec and create VideoWriter object
name = 'out.mp4'
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
write_video = cv2.VideoWriter(name, fourcc, 30.0, (w, h))
if RUNSECONDARY:
import tensorflow as tf
secondary_model = tf.keras.models.load_model('models/lstm_spin_squat.h5')
window = 3
pose_vec_dim = 36
motion_dict = {0: 'spin', 1: 'squat'}
trackers = []
if __name__ == "__main__":
video_capture = cv2.VideoCapture(args.video)
while True:
bboxes = []
# Capture frame-by-frame
ret, oriImg = video_capture.read()
shape_dst = np.min(oriImg.shape[0:2])
with torch.no_grad():
paf, heatmap, imscale = get_outputs(
oriImg, model, 'rtpose')
humans = paf_to_pose_cpp(heatmap, paf, cfg)
out = draw_humans(oriImg, humans)
for idx, body in enumerate(humans):
bbox = get_bbox(list(body.tuple_list(oriImg)))
bboxes.append((bbox, body.get_dictionary(oriImg)))
track_boxes = [tracker.bbox for tracker in trackers]
matched, unmatched_trackers, unmatched_detections = tracker_match(track_boxes, [b[0] for b in bboxes])
for idx, jdx in matched:
trackers[idx].set_bbox(bboxes[jdx][0])
trackers[idx].update_pose(bboxes[jdx][1], out.shape)
for idx in unmatched_detections:
try:
trackers.pop(idx)
except:
pass
for idx in unmatched_trackers:
person = PersonTracker()
person.set_bbox(bboxes[idx][0])
person.update_pose(bboxes[idx][1], out.shape)
trackers.append(person)
if RUNSECONDARY:
for tracker in trackers:
print(len(tracker.q))
if len(tracker.q) >= 3:
sample = np.array(list(tracker.q)[:3])
sample = sample.reshape(1, pose_vec_dim, window)
pred_activity = motion_dict[np.argmax(secondary_model.predict(sample)[0])]
tracker.activity = pred_activity
image = tracker.annotate(image)
print(pred_activity)
if DEBUG:
pprint([(tracker.id, np.vstack(tracker.q)) for tracker in trackers])
if WRITE2CSV:
for tracker in trackers:
print(len(tracker.q))
if len(tracker.q) >= 3:
newFileWriter.writerow([activity] + list(np.hstack(list(tracker.q)[:3])))
if WRITE2VIDEO:
write_video.write(out)
# Display the resulting frame
cv2.imshow('Video', out)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
try:
dataFile.close()
except:
pass
try:
write_video.release()
except:
pass
|
[
"evaluate.coco_eval.get_outputs"
] |
[((31, 51), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (46, 51), False, 'import sys\n'), ((964, 989), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (987, 989), False, 'import argparse\n'), ((7240, 7264), 'lib.config.update_config', 'update_config', (['cfg', 'args'], {}), '(cfg, args)\n', (7253, 7264), False, 'from lib.config import update_config, cfg\n'), ((7277, 7295), 'lib.network.rtpose_vgg.get_model', 'get_model', (['"""vgg19"""'], {}), "('vgg19')\n", (7286, 7295), False, 'from lib.network.rtpose_vgg import get_model\n'), ((7405, 7433), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.video'], {}), '(args.video)\n', (7421, 7433), False, 'import cv2\n'), ((7509, 7540), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MJPG'"], {}), "(*'MJPG')\n", (7531, 7540), False, 'import cv2\n'), ((2163, 2183), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2175, 2183), False, 'import torch\n'), ((2196, 2233), 'cv2.resize', 'cv2.resize', (['image', '(model_h, model_w)'], {}), '(image, (model_h, model_w))\n', (2206, 2233), False, 'import cv2\n'), ((2246, 2284), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (2258, 2284), False, 'import cv2\n'), ((4499, 4530), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['(-IOU_mat)'], {}), '(-IOU_mat)\n', (4520, 4530), False, 'from scipy.optimize import linear_sum_assignment\n'), ((4549, 4572), 'numpy.asarray', 'np.asarray', (['matched_idx'], {}), '(matched_idx)\n', (4559, 4572), True, 'import numpy as np\n'), ((4591, 4616), 'numpy.transpose', 'np.transpose', (['matched_idx'], {}), '(matched_idx)\n', (4603, 4616), True, 'import numpy as np\n'), ((7323, 7346), 'torch.load', 'torch.load', (['args.weight'], {}), '(args.weight)\n', (7333, 7346), False, 'import torch\n'), ((7789, 7817), 'os.path.basename', 'os.path.basename', (['args.video'], {}), '(args.video)\n', (7805, 7817), False, 'import os\n'), ((7894, 7914), 'csv.writer', 'csv.writer', (['dataFile'], {}), '(dataFile)\n', (7904, 7914), False, 'import csv\n'), ((8019, 8050), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (8041, 8050), False, 'import cv2\n'), ((8069, 8112), 'cv2.VideoWriter', 'cv2.VideoWriter', (['name', 'fourcc', '(30.0)', '(w, h)'], {}), '(name, fourcc, 30.0, (w, h))\n', (8084, 8112), False, 'import cv2\n'), ((8181, 8236), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""models/lstm_spin_squat.h5"""'], {}), "('models/lstm_spin_squat.h5')\n", (8207, 8236), True, 'import tensorflow as tf\n'), ((8382, 8410), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.video'], {}), '(args.video)\n', (8398, 8410), False, 'import cv2\n'), ((10820, 10843), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (10841, 10843), False, 'import cv2\n'), ((5344, 5371), 'numpy.empty', 'np.empty', (['(0, 2)'], {'dtype': 'int'}), '((0, 2), dtype=int)\n', (5352, 5371), True, 'import numpy as np\n'), ((5398, 5429), 'numpy.concatenate', 'np.concatenate', (['matches'], {'axis': '(0)'}), '(matches, axis=0)\n', (5412, 5429), True, 'import numpy as np\n'), ((5450, 5480), 'numpy.array', 'np.array', (['unmatched_detections'], {}), '(unmatched_detections)\n', (5458, 5480), True, 'import numpy as np\n'), ((5482, 5510), 'numpy.array', 'np.array', (['unmatched_trackers'], {}), '(unmatched_trackers)\n', (5490, 5510), True, 'import numpy as np\n'), ((5634, 5650), 'collections.deque', 'deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (5639, 5650), False, 'from collections import deque\n'), ((6457, 6468), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (6465, 6468), True, 'import numpy as np\n'), ((6798, 6827), 'numpy.hstack', 'np.hstack', (['[ft_vec, full_vec]'], {}), '([ft_vec, full_vec])\n', (6807, 6827), True, 'import numpy as np\n'), ((6952, 7008), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x1, y1)', '(x2, y2)', '(0, 0, 255)', '(3)'], {}), '(image, (x1, y1), (x2, y2), (0, 0, 255), 3)\n', (6965, 7008), False, 'import cv2\n'), ((7025, 7125), 'cv2.putText', 'cv2.putText', (['image', 'self.activity', '(x1, y1 - 10)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.9)', '(0, 0, 255)', '(2)'], {}), '(image, self.activity, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, \n 0.9, (0, 0, 255), 2)\n', (7036, 7125), False, 'import cv2\n'), ((7137, 7196), 'cv2.drawMarker', 'cv2.drawMarker', (['image', 'self.centroid', '(255, 0, 0)', '(0)', '(30)', '(4)'], {}), '(image, self.centroid, (255, 0, 0), 0, 30, 4)\n', (7151, 7196), False, 'import cv2\n'), ((8553, 8578), 'numpy.min', 'np.min', (['oriImg.shape[0:2]'], {}), '(oriImg.shape[0:2])\n', (8559, 8578), True, 'import numpy as np\n'), ((8736, 8770), 'lib.utils.paf_to_pose.paf_to_pose_cpp', 'paf_to_pose_cpp', (['heatmap', 'paf', 'cfg'], {}), '(heatmap, paf, cfg)\n', (8751, 8770), False, 'from lib.utils.paf_to_pose import paf_to_pose_cpp\n'), ((8799, 8826), 'lib.utils.common.draw_humans', 'draw_humans', (['oriImg', 'humans'], {}), '(oriImg, humans)\n', (8810, 8826), False, 'from lib.utils.common import Human, BodyPart, CocoPart, CocoColors, CocoPairsRender, draw_humans\n'), ((10646, 10670), 'cv2.imshow', 'cv2.imshow', (['"""Video"""', 'out'], {}), "('Video', out)\n", (10656, 10670), False, 'import cv2\n'), ((2065, 2085), 'random.choice', 'random.choice', (['chars'], {}), '(chars)\n', (2078, 2085), False, 'import random\n'), ((8593, 8608), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8606, 8608), False, 'import torch\n'), ((8646, 8682), 'evaluate.coco_eval.get_outputs', 'get_outputs', (['oriImg', 'model', '"""rtpose"""'], {}), "(oriImg, model, 'rtpose')\n", (8657, 8682), False, 'from evaluate.coco_eval import get_outputs, handle_paf_and_heat\n'), ((6632, 6658), 'numpy.array', 'np.array', (['(self.h, self.w)'], {}), '((self.h, self.w))\n', (6640, 6658), True, 'import numpy as np\n'), ((6706, 6729), 'numpy.array', 'np.array', (['pose_dict[ky]'], {}), '(pose_dict[ky])\n', (6714, 6729), True, 'import numpy as np\n'), ((6732, 6774), 'numpy.array', 'np.array', (['(frame_shape[1], frame_shape[0])'], {}), '((frame_shape[1], frame_shape[0]))\n', (6740, 6774), True, 'import numpy as np\n'), ((10683, 10697), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (10694, 10697), False, 'import cv2\n'), ((3783, 3798), 'operator.itemgetter', 'itemgetter', (['idx'], {}), '(idx)\n', (3793, 3798), False, 'from operator import itemgetter\n'), ((6579, 6602), 'numpy.array', 'np.array', (['pose_dict[ky]'], {}), '(pose_dict[ky])\n', (6587, 6602), True, 'import numpy as np\n'), ((6605, 6628), 'numpy.array', 'np.array', (['self.centroid'], {}), '(self.centroid)\n', (6613, 6628), True, 'import numpy as np\n'), ((10260, 10280), 'numpy.vstack', 'np.vstack', (['tracker.q'], {}), '(tracker.q)\n', (10269, 10280), True, 'import numpy as np\n')]
|
import os
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import logging
from evaluate import test
class Solver(object):
default_adam_args = {"lr": 0.001,
"betas": (0.9, 0.999),
"eps": 1e-8,
"weight_decay": 0.0}
def __init__(self, optim=torch.optim.Adam, optim_args={}):
optim_args_merged = self.default_adam_args.copy()
optim_args_merged.update(optim_args)
self.optim_args = optim_args_merged
self.optim = optim
self.logger = logging.getLogger()
self._reset_histories()
def _reset_histories(self):
"""
Resets train and val histories for the accuracy and the loss.
"""
self.train_loss_history = []
self.train_acc_history = []
self.val_acc_history = []
#@profile
def train(self, cnn, train_iter, val_iter, text_field, label_field,
num_epochs=10, clip=0.5, reg_lambda=0.0, cuda=False, best=True,
model_dir='../model/', log_dir='./logs', verbose=False):
# Zero gradients of both optimizers
optim = self.optim(cnn.parameters(), **self.optim_args)
#self.tf_logger = Logger(log_dir)
self._reset_histories()
if cuda:
cnn.cuda()
self.logger.info('START TRAIN')
self.logger.info('CUDA = ' + str(cuda))
torch.save(cnn.state_dict(), os.path.join(model_dir, 'cnn.pkl'))
best_val_acc = 0.0
best_epoch = 0
criterion = nn.CrossEntropyLoss()
if cuda:
criterion.cuda()
ss = 0
for epoch in range(num_epochs):
self.logger.info('Epoch: %d start ...' % (epoch + 1))
cnn.train()
for batch in train_iter:
ss += 1
input, target = batch.text, batch.label # input: len x N; target: N
# Reset
optim.zero_grad()
loss = 0
# Setup data
input.data.t_() # N x len
if cuda:
input = input.cuda()
target = target.cuda()
# Run words through cnn
scores = cnn(input)
l2_reg = None
for W in cnn.parameters():
if l2_reg is None:
l2_reg = W.norm(2)
else:
l2_reg = l2_reg + W.norm(2)
loss = criterion(scores, target) + l2_reg * reg_lambda
# Backpropagation
loss.backward()
torch.nn.utils.clip_grad_norm_(cnn.parameters(), clip)
optim.step()
'''
info = {
'Loss': loss.data[0],
}
for tag, value in info.items():
self.tf_logger.scalar_summary(tag, value, ss)
'''
if verbose:
self.logger.info('Epoch: %d, Iteration: %d, loss: %f' % (epoch + 1, ss, loss.item()))
val_acc = test(cnn, val_iter, text_field, label_field, cuda=cuda, verbose=verbose)
'''
info = {
'val_acc': val_acc
}
for tag, value in info.items():
self.tf_logger.scalar_summary(tag, value, epoch)
'''
if best:
if val_acc > best_val_acc:
torch.save(cnn.state_dict(), os.path.join(model_dir, 'cnn.pkl'))
best_val_acc = val_acc
else:
torch.save(cnn.state_dict(), os.path.join(model_dir, 'cnn.pkl'))
|
[
"evaluate.test"
] |
[((700, 719), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (717, 719), False, 'import logging\n'), ((1684, 1705), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1703, 1705), True, 'import torch.nn as nn\n'), ((1576, 1610), 'os.path.join', 'os.path.join', (['model_dir', '"""cnn.pkl"""'], {}), "(model_dir, 'cnn.pkl')\n", (1588, 1610), False, 'import os\n'), ((3255, 3327), 'evaluate.test', 'test', (['cnn', 'val_iter', 'text_field', 'label_field'], {'cuda': 'cuda', 'verbose': 'verbose'}), '(cnn, val_iter, text_field, label_field, cuda=cuda, verbose=verbose)\n', (3259, 3327), False, 'from evaluate import test\n'), ((2084, 2101), 'torch.optim.zero_grad', 'optim.zero_grad', ([], {}), '()\n', (2099, 2101), False, 'from torch import optim\n'), ((2846, 2858), 'torch.optim.step', 'optim.step', ([], {}), '()\n', (2856, 2858), False, 'from torch import optim\n'), ((3794, 3828), 'os.path.join', 'os.path.join', (['model_dir', '"""cnn.pkl"""'], {}), "(model_dir, 'cnn.pkl')\n", (3806, 3828), False, 'import os\n'), ((3652, 3686), 'os.path.join', 'os.path.join', (['model_dir', '"""cnn.pkl"""'], {}), "(model_dir, 'cnn.pkl')\n", (3664, 3686), False, 'import os\n')]
|
import pathlib
import itertools
import logging
import logging.config
from dataclasses import dataclass
import yaap
import torch
import torch.utils.tensorboard
import torchmodels
import utils
import train
import evaluate
import models
import models.dialog
import datasets
from datasets import Dialog
from loopers import LossInferencer
from loopers import EvaluatingInferencer
from loopers import LogInferencer
from loopers import Generator
from loopers import LogGenerator
from loopers import BeamSearchGenerator
from loopers import EvaluatingGenerator
@dataclass
class FinegrainedValidator(LogInferencer, EvaluatingInferencer, LossInferencer):
pass
@dataclass
class GenerativeValidator(
LogGenerator,
EvaluatingGenerator,
BeamSearchGenerator,
Generator
):
pass
def create_parser():
parser = yaap.Yaap()
# data options
parser.add_pth("data-dir", is_dir=True, must_exist=True,
default=(pathlib.Path(__file__).absolute().parent
.joinpath("tests/data/json")),
help="Path to the data dir. Must contain 'train.json' and "
"'dev.json'.")
parser.add_str("eval-splits", default=("train", "dev", "test"),
choices=("train", "dev", "test"),
help="List of splits to evaluate on.")
# model options
parser.add_pth("model-path", must_exist=True,
default=(pathlib.Path(__file__).absolute().parent
.joinpath("configs/vhda-mini.yml")),
help="Path to the model configuration file.")
parser.add_int("gpu", min_bound=0,
help="GPU device to use. (e.g. 0, 1, etc.)")
# display options
parser.add_pth("logging-config", must_exist=True,
default=(pathlib.Path(__file__).absolute().parent
.joinpath("configs/logging.yml")),
help="Path to a logging config file (yaml/json).")
parser.add_pth("save-dir", default="out", is_dir=True,
help="Directory to save output files.")
parser.add_bol("overwrite", help="Whether to overwrite save dir.")
parser.add_int("report-every",
help="Report training statistics every N steps.")
# training options
parser.add_int("batch-size", default=32,
help="Mini-batch size.")
parser.add_str("optimizer", default="adam", choices=("adam",),
help="Optimizer to use.")
parser.add_flt("gradient-clip",
help="Clip gradients by norm size.")
parser.add_flt("l2norm-weight",
help="Weight of l2-norm regularization.")
parser.add_flt("learning-rate", default=0.001, min_bound=0,
help="Optimizer learning rate.")
parser.add_int("epochs", default=10, min_bound=1,
help="Number of epochs to train for.")
parser.add_str("kld-schedule",
help="KLD w schedule given as a list of data points. Each "
"data point is a pair of training step and target "
"dropout scale. Steps in-between data points will be "
"interpolated. e.g. '[(0, 1.0), (10000, 0.1)]'")
parser.add_str("dropout-schedule",
help="Dropout schedule given as a list of data points. Each "
"data point is a pair of training step and target "
"dropout scale. Steps in-between data points will be "
"interpolated. e.g. '[(0, 1.0), (10000, 0.1)]'")
parser.add_bol("disable-kl",
help="Whether to disable kl-divergence term.")
parser.add_str("kl-mode", default="kl-mi",
help="KL mode: one of kl, kl-mi, kl-mi+.")
# validation options
parser.add_int("valid-batch-size", default=32,
help="Mini-batch sizes for validation inference.")
parser.add_flt("validate-every", default=1,
help="Number of epochs in-between validations.")
parser.add_bol("early-stop",
help="Whether to enable early-stopping.")
parser.add_str("early-stop-criterion", default="~loss",
help="The training statistics key to use as criterion "
"for early-stopping. Prefix with '~' to denote "
"negation during comparison.")
parser.add_int("early-stop-patience",
help="Number of epochs to wait without breaking "
"records until executing early-stopping.")
parser.add_int("beam-size", default=4)
parser.add_int("max-conv-len", default=20)
parser.add_int("max-sent-len", default=30)
# testing options
parser.add_str("embed-type", default="glove",
choices=("glove", "bin", "hdf5"),
help="Type of embedding to load for emb. evaluation.")
parser.add_pth("embed-path", must_exist=True,
default=(pathlib.Path(__file__).absolute().parent
.joinpath("tests/data/glove/"
"glove.840B.300d.woz.txt")),
help="Path to embedding file for emb. evaluation.")
# misc
parser.add_int("seed", help="Random seed.")
return parser
def main():
args = utils.parse_args(create_parser())
if args.logging_config is not None:
logging.config.dictConfig(utils.load_yaml(args.logging_config))
save_dir = pathlib.Path(args.save_dir)
if (not args.overwrite and
save_dir.exists() and utils.has_element(save_dir.glob("*.json"))):
raise FileExistsError(f"save directory ({save_dir}) is not empty")
shell = utils.ShellUtils()
shell.mkdir(save_dir, silent=True)
logger = logging.getLogger("train")
utils.seed(args.seed)
logger.info("loading data...")
load_fn = utils.chain_func(lambda x: list(map(Dialog.from_json, x)),
utils.load_json)
data_dir = pathlib.Path(args.data_dir)
train_data = load_fn(str(data_dir.joinpath("train.json")))
valid_data = load_fn(str(data_dir.joinpath("dev.json")))
test_data = load_fn(str(data_dir.joinpath("test.json")))
processor = datasets.DialogProcessor(
sent_processor=datasets.SentProcessor(
bos=True,
eos=True,
lowercase=True,
tokenizer="space",
max_len=30
),
boc=True,
eoc=True,
state_order="randomized",
max_len=30
)
processor.prepare_vocabs(
list(itertools.chain(train_data, valid_data, test_data)))
utils.save_pickle(processor, save_dir.joinpath("processor.pkl"))
logger.info("preparing model...")
utils.save_json(utils.load_yaml(args.model_path),
save_dir.joinpath("model.json"))
torchmodels.register_packages(models)
model_cls = torchmodels.create_model_cls(models, args.model_path)
model: models.AbstractTDA = model_cls(processor.vocabs)
model.reset_parameters()
utils.report_model(logger, model)
device = torch.device("cpu")
if args.gpu is not None:
device = torch.device(f"cuda:{args.gpu}")
model = model.to(device)
def create_scheduler(s):
return utils.PiecewiseScheduler([utils.Coordinate(*t) for t in eval(s)])
save_dir = pathlib.Path(args.save_dir)
train_args = train.TrainArguments(
model=model,
train_data=tuple(train_data),
valid_data=tuple(valid_data),
processor=processor,
device=device,
save_dir=save_dir,
report_every=args.report_every,
batch_size=args.batch_size,
valid_batch_size=args.valid_batch_size,
optimizer=args.optimizer,
gradient_clip=args.gradient_clip,
l2norm_weight=args.l2norm_weight,
learning_rate=args.learning_rate,
num_epochs=args.epochs,
kld_schedule=(utils.ConstantScheduler(1.0)
if args.kld_schedule is None else
create_scheduler(args.kld_schedule)),
dropout_schedule=(utils.ConstantScheduler(1.0)
if args.dropout_schedule is None else
create_scheduler(args.dropout_schedule)),
validate_every=args.validate_every,
early_stop=args.early_stop,
early_stop_criterion=args.early_stop_criterion,
early_stop_patience=args.early_stop_patience,
disable_kl=args.disable_kl,
kl_mode=args.kl_mode
)
utils.save_json(train_args.to_json(), save_dir.joinpath("train-args.json"))
record = train.train(train_args)
utils.save_json(record.to_json(), save_dir.joinpath("final-summary.json"))
eval_dir = save_dir.joinpath("eval")
shell.mkdir(eval_dir, silent=True)
eval_data = dict(list(filter(None, [
("train", train_data) if "train" in args.eval_splits else None,
("dev", valid_data) if "dev" in args.eval_splits else None,
("test", test_data) if "test" in args.eval_splits else None
])))
for split, data in eval_data.items():
eval_args = evaluate.EvaluateArugments(
model=model,
train_data=tuple(train_data),
test_data=tuple(data),
processor=processor,
embed_type=args.embed_type,
embed_path=args.embed_path,
device=device,
batch_size=args.valid_batch_size,
beam_size=args.beam_size,
max_conv_len=args.max_conv_len,
max_sent_len=args.max_sent_len
)
utils.save_json(eval_args.to_json(),
eval_dir.joinpath(f"eval-{split}-args.json"))
with torch.no_grad():
eval_results = evaluate.evaluate(eval_args)
save_path = eval_dir.joinpath(f"eval-{split}.json")
utils.save_json(eval_results, save_path)
logger.info(f"'{split}' results saved to {save_path}")
logger.info("done!")
if __name__ == "__main__":
main()
|
[
"evaluate.evaluate"
] |
[((827, 838), 'yaap.Yaap', 'yaap.Yaap', ([], {}), '()\n', (836, 838), False, 'import yaap\n'), ((5537, 5564), 'pathlib.Path', 'pathlib.Path', (['args.save_dir'], {}), '(args.save_dir)\n', (5549, 5564), False, 'import pathlib\n'), ((5762, 5780), 'utils.ShellUtils', 'utils.ShellUtils', ([], {}), '()\n', (5778, 5780), False, 'import utils\n'), ((5833, 5859), 'logging.getLogger', 'logging.getLogger', (['"""train"""'], {}), "('train')\n", (5850, 5859), False, 'import logging\n'), ((5864, 5885), 'utils.seed', 'utils.seed', (['args.seed'], {}), '(args.seed)\n', (5874, 5885), False, 'import utils\n'), ((6057, 6084), 'pathlib.Path', 'pathlib.Path', (['args.data_dir'], {}), '(args.data_dir)\n', (6069, 6084), False, 'import pathlib\n'), ((6905, 6942), 'torchmodels.register_packages', 'torchmodels.register_packages', (['models'], {}), '(models)\n', (6934, 6942), False, 'import torchmodels\n'), ((6959, 7012), 'torchmodels.create_model_cls', 'torchmodels.create_model_cls', (['models', 'args.model_path'], {}), '(models, args.model_path)\n', (6987, 7012), False, 'import torchmodels\n'), ((7106, 7139), 'utils.report_model', 'utils.report_model', (['logger', 'model'], {}), '(logger, model)\n', (7124, 7139), False, 'import utils\n'), ((7153, 7172), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (7165, 7172), False, 'import torch\n'), ((7408, 7435), 'pathlib.Path', 'pathlib.Path', (['args.save_dir'], {}), '(args.save_dir)\n', (7420, 7435), False, 'import pathlib\n'), ((8675, 8698), 'train.train', 'train.train', (['train_args'], {}), '(train_args)\n', (8686, 8698), False, 'import train\n'), ((6814, 6846), 'utils.load_yaml', 'utils.load_yaml', (['args.model_path'], {}), '(args.model_path)\n', (6829, 6846), False, 'import utils\n'), ((7219, 7251), 'torch.device', 'torch.device', (['f"""cuda:{args.gpu}"""'], {}), "(f'cuda:{args.gpu}')\n", (7231, 7251), False, 'import torch\n'), ((9898, 9938), 'utils.save_json', 'utils.save_json', (['eval_results', 'save_path'], {}), '(eval_results, save_path)\n', (9913, 9938), False, 'import utils\n'), ((5484, 5520), 'utils.load_yaml', 'utils.load_yaml', (['args.logging_config'], {}), '(args.logging_config)\n', (5499, 5520), False, 'import utils\n'), ((6335, 6429), 'datasets.SentProcessor', 'datasets.SentProcessor', ([], {'bos': '(True)', 'eos': '(True)', 'lowercase': '(True)', 'tokenizer': '"""space"""', 'max_len': '(30)'}), "(bos=True, eos=True, lowercase=True, tokenizer=\n 'space', max_len=30)\n", (6357, 6429), False, 'import datasets\n'), ((6634, 6684), 'itertools.chain', 'itertools.chain', (['train_data', 'valid_data', 'test_data'], {}), '(train_data, valid_data, test_data)\n', (6649, 6684), False, 'import itertools\n'), ((9757, 9772), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9770, 9772), False, 'import torch\n'), ((9801, 9829), 'evaluate.evaluate', 'evaluate.evaluate', (['eval_args'], {}), '(eval_args)\n', (9818, 9829), False, 'import evaluate\n'), ((7352, 7372), 'utils.Coordinate', 'utils.Coordinate', (['*t'], {}), '(*t)\n', (7368, 7372), False, 'import utils\n'), ((7989, 8017), 'utils.ConstantScheduler', 'utils.ConstantScheduler', (['(1.0)'], {}), '(1.0)\n', (8012, 8017), False, 'import utils\n'), ((8160, 8188), 'utils.ConstantScheduler', 'utils.ConstantScheduler', (['(1.0)'], {}), '(1.0)\n', (8183, 8188), False, 'import utils\n'), ((947, 969), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (959, 969), False, 'import pathlib\n'), ((1442, 1464), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (1454, 1464), False, 'import pathlib\n'), ((1820, 1842), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (1832, 1842), False, 'import pathlib\n'), ((5037, 5059), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (5049, 5059), False, 'import pathlib\n')]
|
"""Train the model"""
import argparse
import logging
import os
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.autograd import Variable
import models.nets as nets
import models.vgg as vgg
import models.data_loaders as data_loaders
from evaluate import evaluate
import utils
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='mnist',
help="Choose dataset (cifar10 or mnist)")
parser.add_argument('--model', default='softmax-reg',
help="Choose model (lenet5, vgg[11, 13, 16, 19], mlp, or softmax-reg)")
parser.add_argument('--model_dir', default='experiments/mnist_softmax_reg',
help="Directory containing params.json")
parser.add_argument('--restore_file', default=None,
help="Optional, name of the file in --model_dir containing weights to reload before \
training") # 'best' or 'train'
def train(model, optimizer, loss_fn, dataloader, metrics, params):
"""Train the model on `num_steps` batches
Args:
model: (torch.nn.Module) the neural network
optimizer: (torch.optim) optimizer for parameters of model
loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch
dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches training data
metrics: (dict) a dictionary of functions that compute a metric using the output and labels of each batch
params: (Params) hyperparameters
num_steps: (int) number of batches to train on, each of size params.batch_size
"""
# set model to training mode
model.train()
# summary for current training loop and a running average object for loss
summ = []
loss_avg = utils.RunningAverage()
# with tqdm(total=len(dataloader), ncols=80, disable=True) as t:
with tqdm(disable=False) as t:
for i, (train_batch, labels_batch) in enumerate(dataloader):
# move to GPU if available
if params.cuda:
train_batch, labels_batch = train_batch.cuda(
non_blocking=True), labels_batch.cuda(non_blocking=True)
# convert to torch Variables
train_batch, labels_batch = Variable(
train_batch), Variable(labels_batch)
# compute model output and loss
output_batch = model(train_batch)
loss = loss_fn(output_batch, labels_batch)
# clear previous gradients, compute gradients of all variables wrt loss
optimizer.zero_grad()
loss.backward()
# performs updates using calculated gradients
optimizer.step()
# Evaluate summaries only once in a while
if i % params.save_summary_steps == 0:
# extract data from torch Variable, move to cpu, convert to numpy arrays
output_batch = output_batch.data.cpu().numpy()
labels_batch = labels_batch.data.cpu().numpy()
# compute all metrics on this batch
summary_batch = {metric: metrics[metric](output_batch, labels_batch)
for metric in metrics}
summary_batch['loss'] = loss.item()
summ.append(summary_batch)
# update the average loss
loss_avg.update(loss.item())
t.set_postfix(loss='{:05.3f}'.format(loss_avg()))
t.update()
# compute mean of all metrics in summary
metrics_mean = {metric: np.mean([x[metric]
for x in summ]) for metric in summ[0]}
metrics_string = " ; ".join("{}: {:05.3f}".format(k, v)
for k, v in metrics_mean.items())
logging.info("- Train metrics: " + metrics_string)
def train_and_evaluate(model, train_dataloader, val_dataloader, optimizer, scheduler, loss_fn, metrics, params,
model_dir, restore_file=None):
"""Train the model and evaluate every epoch.
Args:
model: (torch.nn.Module) the neural network
train_dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches training data
val_dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches validation data
optimizer: (torch.optim) optimizer for parameters of model
loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch
metrics: (dict) a dictionary of functions that compute a metric using the output and labels of each batch
params: (Params) hyperparameters
model_dir: (string) directory containing config, weights and log
restore_file: (string) optional- name of file to restore from (without its extension .pth.tar)
"""
# reload weights from restore_file if specified
if restore_file is not None:
restore_path = os.path.join(
model_dir, restore_file + '.pth.tar')
logging.info("Restoring parameters from {}".format(restore_path))
utils.load_checkpoint(restore_path, model, optimizer)
best_val_acc = 0.0
for epoch in range(params.num_epochs):
# Run one epoch
logging.info("Epoch {}/{}".format(epoch + 1, params.num_epochs))
# compute number of batches in one epoch (one full pass over the training set)
train(model, optimizer, loss_fn, train_dataloader, metrics, params)
# Evaluate for one epoch on validation set
val_metrics = evaluate(model, loss_fn, val_dataloader, metrics, params)
# update learning rate scheduler
if scheduler is not None:
scheduler.step()
val_acc = val_metrics['accuracy']
is_best = val_acc >= best_val_acc
# Save weights
utils.save_checkpoint({'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optim_dict': optimizer.state_dict()},
is_best=is_best,
checkpoint=model_dir)
# If best_eval, best_save_path
if is_best:
logging.info("- Found new best accuracy")
best_val_acc = val_acc
# Save best val metrics in a json file in the model directory
best_json_path = os.path.join(
model_dir, "metrics_val_best_weights.json")
utils.save_dict_to_json(val_metrics, best_json_path)
# Save latest val metrics in a json file in the model directory
last_json_path = os.path.join(
model_dir, "metrics_val_last_weights.json")
utils.save_dict_to_json(val_metrics, last_json_path)
def main():
# Load the parameters from json file
args = parser.parse_args()
json_path = os.path.join(args.model_dir, 'params.json')
assert os.path.isfile(
json_path), "No json configuration file found at {}".format(json_path)
params = utils.Params(json_path)
# use GPU if available
params.cuda = torch.cuda.is_available()
# Set the random seed for reproducible experiments
torch.manual_seed(138)
if params.cuda:
torch.cuda.manual_seed(138)
# Set the logger
utils.set_logger(os.path.join(args.model_dir, 'train.log'))
# Create the input data pipeline
logging.info("Loading the datasets...")
# fetch dataloaders
if args.dataset == 'cifar10':
dataloaders = data_loaders.cifar10_dataloader(params.batch_size)
else:
dataloaders = data_loaders.mnist_dataloader(params.batch_size)
train_dl = dataloaders['train']
val_dl = dataloaders['test'] # !!!!!!!!!!!!!
logging.info("- done.")
# Define model
if args.model == 'lenet5':
model = nets.LeNet5(params).cuda() if params.cuda else nets.LeNet5(params)
elif args.model[:3] == 'vgg':
model = vgg.VGG(args.model, params).cuda() if params.cuda else nvgg.VGG(args.model, params)
elif args.model == 'mlp':
model = nets.MLP(params).cuda() if params.cuda else nets.MLP(params)
else:
model = nets.SoftmaxReg(params).cuda() if params.cuda else nets.SoftmaxReg(params)
# Define optimizer
if params.optim == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=params.lr, momentum=params.momentum,
weight_decay=(params.wd if params.dict.get('wd') is not None else 0.0))
else:
optimizer = optim.Adam(model.parameters(), lr=params.lr,
weight_decay=(params.wd if params.dict.get('wd') is not None else 0.0))
if params.dict.get('lr_adjust') is not None:
scheduler = lr_scheduler.StepLR(optimizer, step_size=params.lr_adjust, gamma=0.1)
else:
scheduler = None
# fetch loss function and metrics
loss_fn = nn.NLLLoss()
metrics = nets.metrics
# Train the model
logging.info("Starting training for {} epoch(s)".format(params.num_epochs))
train_and_evaluate(model, train_dl, val_dl, optimizer, scheduler, loss_fn, metrics, params,
args.model_dir, args.restore_file)
if __name__ == '__main__':
main()
|
[
"evaluate.evaluate"
] |
[((402, 427), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (425, 427), False, 'import argparse\n'), ((1889, 1911), 'utils.RunningAverage', 'utils.RunningAverage', ([], {}), '()\n', (1909, 1911), False, 'import utils\n'), ((3892, 3942), 'logging.info', 'logging.info', (["('- Train metrics: ' + metrics_string)"], {}), "('- Train metrics: ' + metrics_string)\n", (3904, 3942), False, 'import logging\n'), ((6933, 6976), 'os.path.join', 'os.path.join', (['args.model_dir', '"""params.json"""'], {}), "(args.model_dir, 'params.json')\n", (6945, 6976), False, 'import os\n'), ((6988, 7013), 'os.path.isfile', 'os.path.isfile', (['json_path'], {}), '(json_path)\n', (7002, 7013), False, 'import os\n'), ((7096, 7119), 'utils.Params', 'utils.Params', (['json_path'], {}), '(json_path)\n', (7108, 7119), False, 'import utils\n'), ((7166, 7191), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7189, 7191), False, 'import torch\n'), ((7252, 7274), 'torch.manual_seed', 'torch.manual_seed', (['(138)'], {}), '(138)\n', (7269, 7274), False, 'import torch\n'), ((7459, 7498), 'logging.info', 'logging.info', (['"""Loading the datasets..."""'], {}), "('Loading the datasets...')\n", (7471, 7498), False, 'import logging\n'), ((7802, 7825), 'logging.info', 'logging.info', (['"""- done."""'], {}), "('- done.')\n", (7814, 7825), False, 'import logging\n'), ((8970, 8982), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (8980, 8982), True, 'import torch.nn as nn\n'), ((1991, 2010), 'tqdm.tqdm', 'tqdm', ([], {'disable': '(False)'}), '(disable=False)\n', (1995, 2010), False, 'from tqdm import tqdm\n'), ((3667, 3701), 'numpy.mean', 'np.mean', (['[x[metric] for x in summ]'], {}), '([x[metric] for x in summ])\n', (3674, 3701), True, 'import numpy as np\n'), ((5050, 5100), 'os.path.join', 'os.path.join', (['model_dir', "(restore_file + '.pth.tar')"], {}), "(model_dir, restore_file + '.pth.tar')\n", (5062, 5100), False, 'import os\n'), ((5196, 5249), 'utils.load_checkpoint', 'utils.load_checkpoint', (['restore_path', 'model', 'optimizer'], {}), '(restore_path, model, optimizer)\n', (5217, 5249), False, 'import utils\n'), ((5653, 5710), 'evaluate.evaluate', 'evaluate', (['model', 'loss_fn', 'val_dataloader', 'metrics', 'params'], {}), '(model, loss_fn, val_dataloader, metrics, params)\n', (5661, 5710), False, 'from evaluate import evaluate\n'), ((6700, 6756), 'os.path.join', 'os.path.join', (['model_dir', '"""metrics_val_last_weights.json"""'], {}), "(model_dir, 'metrics_val_last_weights.json')\n", (6712, 6756), False, 'import os\n'), ((6778, 6830), 'utils.save_dict_to_json', 'utils.save_dict_to_json', (['val_metrics', 'last_json_path'], {}), '(val_metrics, last_json_path)\n', (6801, 6830), False, 'import utils\n'), ((7303, 7330), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(138)'], {}), '(138)\n', (7325, 7330), False, 'import torch\n'), ((7374, 7415), 'os.path.join', 'os.path.join', (['args.model_dir', '"""train.log"""'], {}), "(args.model_dir, 'train.log')\n", (7386, 7415), False, 'import os\n'), ((7580, 7630), 'models.data_loaders.cifar10_dataloader', 'data_loaders.cifar10_dataloader', (['params.batch_size'], {}), '(params.batch_size)\n', (7611, 7630), True, 'import models.data_loaders as data_loaders\n'), ((7663, 7711), 'models.data_loaders.mnist_dataloader', 'data_loaders.mnist_dataloader', (['params.batch_size'], {}), '(params.batch_size)\n', (7692, 7711), True, 'import models.data_loaders as data_loaders\n'), ((8809, 8878), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizer'], {'step_size': 'params.lr_adjust', 'gamma': '(0.1)'}), '(optimizer, step_size=params.lr_adjust, gamma=0.1)\n', (8828, 8878), True, 'import torch.optim.lr_scheduler as lr_scheduler\n'), ((6282, 6323), 'logging.info', 'logging.info', (['"""- Found new best accuracy"""'], {}), "('- Found new best accuracy')\n", (6294, 6323), False, 'import logging\n'), ((6463, 6519), 'os.path.join', 'os.path.join', (['model_dir', '"""metrics_val_best_weights.json"""'], {}), "(model_dir, 'metrics_val_best_weights.json')\n", (6475, 6519), False, 'import os\n'), ((6549, 6601), 'utils.save_dict_to_json', 'utils.save_dict_to_json', (['val_metrics', 'best_json_path'], {}), '(val_metrics, best_json_path)\n', (6572, 6601), False, 'import utils\n'), ((7940, 7959), 'models.nets.LeNet5', 'nets.LeNet5', (['params'], {}), '(params)\n', (7951, 7959), True, 'import models.nets as nets\n'), ((2373, 2394), 'torch.autograd.Variable', 'Variable', (['train_batch'], {}), '(train_batch)\n', (2381, 2394), False, 'from torch.autograd import Variable\n'), ((2413, 2435), 'torch.autograd.Variable', 'Variable', (['labels_batch'], {}), '(labels_batch)\n', (2421, 2435), False, 'from torch.autograd import Variable\n'), ((7893, 7912), 'models.nets.LeNet5', 'nets.LeNet5', (['params'], {}), '(params)\n', (7904, 7912), True, 'import models.nets as nets\n'), ((8184, 8200), 'models.nets.MLP', 'nets.MLP', (['params'], {}), '(params)\n', (8192, 8200), True, 'import models.nets as nets\n'), ((8278, 8301), 'models.nets.SoftmaxReg', 'nets.SoftmaxReg', (['params'], {}), '(params)\n', (8293, 8301), True, 'import models.nets as nets\n'), ((8010, 8037), 'models.vgg.VGG', 'vgg.VGG', (['args.model', 'params'], {}), '(args.model, params)\n', (8017, 8037), True, 'import models.vgg as vgg\n'), ((8140, 8156), 'models.nets.MLP', 'nets.MLP', (['params'], {}), '(params)\n', (8148, 8156), True, 'import models.nets as nets\n'), ((8227, 8250), 'models.nets.SoftmaxReg', 'nets.SoftmaxReg', (['params'], {}), '(params)\n', (8242, 8250), True, 'import models.nets as nets\n')]
|
import train
import evaluate
if __name__ == "__main__":
#trainer = train.TumorDetectionNet()
#trainer.train(path_to_dataset="archive/", model_filename="Tumor_classifier_model.h5")
evaluator = evaluate.Evaluator()
evaluator.evaluate(model_path="Tumor_classifier_model_v2.h5", image_path="archive/validation_data/323.jpg")
|
[
"evaluate.Evaluator"
] |
[((206, 226), 'evaluate.Evaluator', 'evaluate.Evaluator', ([], {}), '()\n', (224, 226), False, 'import evaluate\n')]
|
from evaluate import evaluate
__author__ = "<NAME>"
__email__ = "<EMAIL>"
data = '[{"state": {"cities": ["Mumbai", "Pune", "Nagpur", "Bhusaval", "Jalgaon"], "name": "Maharashtra"}}, {"state": {"cities": ["Bangalore", "Hubli"], "name": "Karnataka"}}, {"state": {"states": ["Raipur", "Durg"], "name": "Chhattisgarh"}}]'
a = evaluate(data)
|
[
"evaluate.evaluate"
] |
[((324, 338), 'evaluate.evaluate', 'evaluate', (['data'], {}), '(data)\n', (332, 338), False, 'from evaluate import evaluate\n')]
|
from django.contrib.auth import login
from evaluate.models import Result
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from evaluate.forms import TestCreateForm
def findEXT(a):
if a[-4] == '.':
return a[-3:]
if a[-5] == '.':
return a[-4:]
return -1
@login_required
def createTest(request):
if request.method == 'POST':
form = TestCreateForm(request.POST, request.FILES)
if form.is_valid():
doc = form.save(commit=False)
doc.user = request.user
makEXT = findEXT(doc.model_answer_key.name)
rsEXT = findEXT(doc.response_sheet.name)
if makEXT not in ['xls', 'xlsx', 'csv'] or rsEXT not in ['xls', 'xlsx', 'csv']:
print(makEXT, rsEXT, "hihi")
return render(request, 'evaluate/createTest.html', {'form' : form, 'title' : 'Evaluate'})
doc.save()
res = Result.objects.get(test = doc)
return redirect('testSummary', res.pk)
else:
form = TestCreateForm()
context = {
'form' : form,
'title' : 'Evaluate'
}
return render(request, 'evaluate/createTest.html', context)
@login_required
def testSummary(request, pk):
res = Result.objects.get(pk = pk)
if res.test.user != request.user:
return render(request, 'layouts/accessDenied.html')
data = []
for i in range(len(res.names)):
data.append([res.names[i], res.scores[i], res.emails[i]])
context = {
'result' : data,
'title' : 'Test Summary',
'name' : res.test.test_name,
'maxScore' : res.high_score,
'noStudents' : res.total_students,
'classAvg' : res.mean_percentage
}
return render(request, 'evaluate/testSummary.html', context)
|
[
"evaluate.models.Result.objects.get",
"evaluate.forms.TestCreateForm"
] |
[((1178, 1230), 'django.shortcuts.render', 'render', (['request', '"""evaluate/createTest.html"""', 'context'], {}), "(request, 'evaluate/createTest.html', context)\n", (1184, 1230), False, 'from django.shortcuts import render, redirect\n'), ((1289, 1314), 'evaluate.models.Result.objects.get', 'Result.objects.get', ([], {'pk': 'pk'}), '(pk=pk)\n', (1307, 1314), False, 'from evaluate.models import Result\n'), ((1781, 1834), 'django.shortcuts.render', 'render', (['request', '"""evaluate/testSummary.html"""', 'context'], {}), "(request, 'evaluate/testSummary.html', context)\n", (1787, 1834), False, 'from django.shortcuts import render, redirect\n'), ((426, 469), 'evaluate.forms.TestCreateForm', 'TestCreateForm', (['request.POST', 'request.FILES'], {}), '(request.POST, request.FILES)\n', (440, 469), False, 'from evaluate.forms import TestCreateForm\n'), ((1076, 1092), 'evaluate.forms.TestCreateForm', 'TestCreateForm', ([], {}), '()\n', (1090, 1092), False, 'from evaluate.forms import TestCreateForm\n'), ((1370, 1414), 'django.shortcuts.render', 'render', (['request', '"""layouts/accessDenied.html"""'], {}), "(request, 'layouts/accessDenied.html')\n", (1376, 1414), False, 'from django.shortcuts import render, redirect\n'), ((969, 997), 'evaluate.models.Result.objects.get', 'Result.objects.get', ([], {'test': 'doc'}), '(test=doc)\n', (987, 997), False, 'from evaluate.models import Result\n'), ((1019, 1050), 'django.shortcuts.redirect', 'redirect', (['"""testSummary"""', 'res.pk'], {}), "('testSummary', res.pk)\n", (1027, 1050), False, 'from django.shortcuts import render, redirect\n'), ((845, 930), 'django.shortcuts.render', 'render', (['request', '"""evaluate/createTest.html"""', "{'form': form, 'title': 'Evaluate'}"], {}), "(request, 'evaluate/createTest.html', {'form': form, 'title': 'Evaluate'}\n )\n", (851, 930), False, 'from django.shortcuts import render, redirect\n')]
|
import os
import cv2
import torch
from torch.utils.data import DataLoader
import torch.optim as optim
from torch.nn import CTCLoss
from dataset import Synth90kDataset, synth90k_collate_fn
from model import CRNN
from evaluate import evaluate
from config import train_config as config
def train_batch(crnn, data, optimizer, criterion, device):
crnn.train()
images, targets, target_lengths = [d.to(device) for d in data]
logits = crnn(images)
log_probs = torch.nn.functional.log_softmax(logits, dim=2)
batch_size = images.size(0)
input_lengths = torch.LongTensor([logits.size(0)] * batch_size)
target_lengths = torch.flatten(target_lengths)
loss = criterion(log_probs, targets, input_lengths, target_lengths)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item()
def main():
epochs = config['epochs']
train_batch_size = config['train_batch_size']
eval_batch_size = config['eval_batch_size']
lr = config['lr']
show_interval = config['show_interval']
valid_interval = config['valid_interval']
save_interval = config['save_interval']
cpu_workers = config['cpu_workers']
reload_checkpoint = config['reload_checkpoint']
valid_max_iter = config['valid_max_iter']
img_width = config['img_width']
img_height = config['img_height']
data_dir = config['data_dir']
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'device: {device}')
train_dataset = Synth90kDataset(root_dir=data_dir, mode='train',
img_height=img_height, img_width=img_width)
valid_dataset = Synth90kDataset(root_dir=data_dir, mode='dev',
img_height=img_height, img_width=img_width)
train_loader = DataLoader(
dataset=train_dataset,
batch_size=train_batch_size,
shuffle=True,
num_workers=cpu_workers,
collate_fn=synth90k_collate_fn)
valid_loader = DataLoader(
dataset=valid_dataset,
batch_size=eval_batch_size,
shuffle=True,
num_workers=cpu_workers,
collate_fn=synth90k_collate_fn)
num_class = len(Synth90kDataset.LABEL2CHAR) + 1
crnn = CRNN(1, img_height, img_width, num_class,
map_to_seq_hidden=config['map_to_seq_hidden'],
rnn_hidden=config['rnn_hidden'],
leaky_relu=config['leaky_relu'])
if reload_checkpoint:
crnn.load_state_dict(torch.load(reload_checkpoint, map_location=device))
crnn.to(device)
optimizer = optim.RMSprop(crnn.parameters(), lr=lr)
criterion = CTCLoss(reduction='sum')
criterion.to(device)
assert save_interval % valid_interval == 0
i = 1
for epoch in range(1, epochs + 1):
print(f'epoch: {epoch}')
tot_train_loss = 0.
tot_train_count = 0
for train_data in train_loader:
loss = train_batch(crnn, train_data, optimizer, criterion, device)
train_size = train_data[0].size(0)
tot_train_loss += loss
tot_train_count += train_size
if i % show_interval == 0:
print('train_batch_loss[', i, ']: ', loss / train_size)
if i % valid_interval == 0:
evaluation = evaluate(crnn, valid_loader, criterion,
decode_method=config['decode_method'],
beam_size=config['beam_size'])
print('valid_evaluation: loss={loss}, acc={acc}'.format(**evaluation))
if i % save_interval == 0:
prefix = 'crnn'
loss = evaluation['loss']
save_model_path = os.path.join(config['checkpoints_dir'],
f'{prefix}_{i:06}_loss{loss}.pt')
torch.save(crnn.state_dict(), save_model_path)
print('save model at ', save_model_path)
i += 1
print('train_loss: ', tot_train_loss / tot_train_count)
if __name__ == '__main__':
main()
|
[
"evaluate.evaluate"
] |
[((473, 519), 'torch.nn.functional.log_softmax', 'torch.nn.functional.log_softmax', (['logits'], {'dim': '(2)'}), '(logits, dim=2)\n', (504, 519), False, 'import torch\n'), ((642, 671), 'torch.flatten', 'torch.flatten', (['target_lengths'], {}), '(target_lengths)\n', (655, 671), False, 'import torch\n'), ((1508, 1604), 'dataset.Synth90kDataset', 'Synth90kDataset', ([], {'root_dir': 'data_dir', 'mode': '"""train"""', 'img_height': 'img_height', 'img_width': 'img_width'}), "(root_dir=data_dir, mode='train', img_height=img_height,\n img_width=img_width)\n", (1523, 1604), False, 'from dataset import Synth90kDataset, synth90k_collate_fn\n'), ((1657, 1751), 'dataset.Synth90kDataset', 'Synth90kDataset', ([], {'root_dir': 'data_dir', 'mode': '"""dev"""', 'img_height': 'img_height', 'img_width': 'img_width'}), "(root_dir=data_dir, mode='dev', img_height=img_height,\n img_width=img_width)\n", (1672, 1751), False, 'from dataset import Synth90kDataset, synth90k_collate_fn\n'), ((1804, 1941), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'train_batch_size', 'shuffle': '(True)', 'num_workers': 'cpu_workers', 'collate_fn': 'synth90k_collate_fn'}), '(dataset=train_dataset, batch_size=train_batch_size, shuffle=True,\n num_workers=cpu_workers, collate_fn=synth90k_collate_fn)\n', (1814, 1941), False, 'from torch.utils.data import DataLoader\n'), ((1998, 2134), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'valid_dataset', 'batch_size': 'eval_batch_size', 'shuffle': '(True)', 'num_workers': 'cpu_workers', 'collate_fn': 'synth90k_collate_fn'}), '(dataset=valid_dataset, batch_size=eval_batch_size, shuffle=True,\n num_workers=cpu_workers, collate_fn=synth90k_collate_fn)\n', (2008, 2134), False, 'from torch.utils.data import DataLoader\n'), ((2236, 2400), 'model.CRNN', 'CRNN', (['(1)', 'img_height', 'img_width', 'num_class'], {'map_to_seq_hidden': "config['map_to_seq_hidden']", 'rnn_hidden': "config['rnn_hidden']", 'leaky_relu': "config['leaky_relu']"}), "(1, img_height, img_width, num_class, map_to_seq_hidden=config[\n 'map_to_seq_hidden'], rnn_hidden=config['rnn_hidden'], leaky_relu=\n config['leaky_relu'])\n", (2240, 2400), False, 'from model import CRNN\n'), ((2639, 2663), 'torch.nn.CTCLoss', 'CTCLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (2646, 2663), False, 'from torch.nn import CTCLoss\n'), ((1418, 1443), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1441, 1443), False, 'import torch\n'), ((2494, 2544), 'torch.load', 'torch.load', (['reload_checkpoint'], {'map_location': 'device'}), '(reload_checkpoint, map_location=device)\n', (2504, 2544), False, 'import torch\n'), ((3300, 3414), 'evaluate.evaluate', 'evaluate', (['crnn', 'valid_loader', 'criterion'], {'decode_method': "config['decode_method']", 'beam_size': "config['beam_size']"}), "(crnn, valid_loader, criterion, decode_method=config[\n 'decode_method'], beam_size=config['beam_size'])\n", (3308, 3414), False, 'from evaluate import evaluate\n'), ((3737, 3810), 'os.path.join', 'os.path.join', (["config['checkpoints_dir']", 'f"""{prefix}_{i:06}_loss{loss}.pt"""'], {}), "(config['checkpoints_dir'], f'{prefix}_{i:06}_loss{loss}.pt')\n", (3749, 3810), False, 'import os\n')]
|
from tqdm.auto import tqdm
from crf import CRF
import sys
sys.path.append("../../../")
from utils import *
from evaluate import evaluation
import os
import pickle
import pandas as pd
import collections
import csv
import os
import glob
import pickle
from enum import Enum
from dataclasses import dataclass
from functools import lru_cache
import xml.etree.ElementTree as ET
import typing as t
from sklearn.utils.class_weight import compute_class_weight
PAD = "__PAD__"
UNK = "__UNK__"
DIM_EMBEDDING = 200
LSTM_LAYER = 3
LSTM_HIDDEN = 200
CHAR_DIM_EMBEDDING = 25
CHAR_LSTM_HIDDEN = 25
BATCH_SIZE = 16
EPOCHS = 50
KEEP_PROB = 0.5
import torch
torch.manual_seed(0)
#device = torch.device("cpu")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class TaggerModel(torch.nn.Module):
def __init__(
self,
nwords,
nchars,
ntags,
pretrained_list,
run_name,
exp_name,
list_of_possible_tags,
use_char=True,
use_crf=False,
class_weights=[],
learning_rate=0.015,
learning_decay_rate=0.05,
weight_decay=1e-8,
):
super().__init__()
self.run_name = run_name
self.exp_name = exp_name
self.class_weights = torch.Tensor(class_weights)
# Create word embeddings
pretrained_tensor = torch.FloatTensor(pretrained_list)
self.word_embedding = torch.nn.Embedding.from_pretrained(
pretrained_tensor, freeze=False
)
self.list_of_possible_tags = list_of_possible_tags
# Create input dropout parameter
# self.word_dropout = torch.nn.Dropout(1 - KEEP_PROB)
char_lstm_hidden = 0
self.use_char = use_char
if self.use_char:
# Character-level LSTMs
self.char_embedding = torch.nn.Embedding(nchars, CHAR_DIM_EMBEDDING)
self.char_lstm = torch.nn.LSTM(
CHAR_DIM_EMBEDDING,
CHAR_LSTM_HIDDEN,
num_layers=1,
batch_first=True,
bidirectional=True,
)
char_lstm_hidden = CHAR_LSTM_HIDDEN
# Create LSTM parameters
self.lstm = torch.nn.LSTM(
DIM_EMBEDDING + char_lstm_hidden,
LSTM_HIDDEN,
num_layers=LSTM_LAYER,
batch_first=True,
bidirectional=True,
)
# Create output dropout parameter
self.lstm_output_dropout = torch.nn.Dropout(1 - KEEP_PROB)
# Create final matrix multiply parameters
self.hidden_to_tag = torch.nn.Linear(LSTM_HIDDEN * 2, ntags)
self.ntags = ntags
self.use_crf = use_crf
if self.use_crf:
self.crf = CRF(target_size=ntags)
def forward(self, sentences, mask, sent_tokens, labels, lengths, cur_batch_size):
"""
sent_Tokens is a list of list of lists, where the essential unit is a
token, and it indices each character in the token. The max token length is
the extra dimension in sent_Tokens.
sentences is the sentence embedding.
"""
max_length = sentences.size(1)
# Look up word vectors
word_vectors = self.word_embedding(sentences)
# Apply dropout
# dropped_word_vectors = self.word_dropout(word_vectors)
if self.use_char:
sent_tokens = sent_tokens.view(cur_batch_size * max_length, -1)
token_vectors = self.char_embedding(sent_tokens)
char_lstm_out, (hn, cn) = self.char_lstm(token_vectors, None)
char_lstm_out = hn[-1].view(cur_batch_size, max_length, CHAR_LSTM_HIDDEN)
concat_vectors = torch.cat((word_vectors, char_lstm_out), dim=2)
else:
concat_vectors = word_vectors
# Run the LSTM over the input, reshaping data for efficiency
packed_words = torch.nn.utils.rnn.pack_padded_sequence(
concat_vectors, lengths, True
)
lstm_out, _ = self.lstm(packed_words, None)
lstm_out, _ = torch.nn.utils.rnn.pad_packed_sequence(
lstm_out, batch_first=True, total_length=max_length
)
# Apply dropout
lstm_out_dropped = self.lstm_output_dropout(lstm_out)
# Matrix multiply to get scores for each tag
output_scores = self.hidden_to_tag(lstm_out_dropped)
if self.use_crf:
loss = self.crf.neg_log_likelihood_loss(output_scores, mask.bool(), labels)
predicted_tags = self.crf(output_scores, mask.bool())
else:
output_scores = output_scores.view(cur_batch_size * max_length, -1)
flat_labels = labels.view(cur_batch_size * max_length)
loss_function = torch.nn.CrossEntropyLoss(ignore_index=0, reduction="sum")
loss = loss_function(output_scores, flat_labels)
predicted_tags = torch.argmax(output_scores, 1)
predicted_tags = predicted_tags.view(cur_batch_size, max_length)
return loss, predicted_tags
def write_preds_fn(
pred_list, gold_list, document_ids, name, exp_name, run_name, epoch, doc_tokens
):
# put predictions in a folder
pred_list_with_id = list(zip(pred_list, document_ids, gold_list, doc_tokens))
predictions_dir = os.path.join(exp_name, run_name, "preds", name)
print("Writing predictions into pkl files to %s" % predictions_dir)
os.makedirs(f"{predictions_dir}/{str(epoch)}/", exist_ok=True)
for pred, doc_id, gold, doc_ts in pred_list_with_id:
if "/" in str(doc_id):
doc_id = doc_id.split("/")[-1]
pickle.dump(
pred,
open(
os.path.join(predictions_dir, str(epoch), "%s_pred.pkl" % str(doc_id)),
"wb",
),
)
pickle.dump(
doc_ts,
open(
os.path.join(
predictions_dir, str(epoch), "%s_tokens.pkl" % str(doc_id)
),
"wb",
),
)
pickle.dump(
gold,
open(
os.path.join(predictions_dir, str(epoch), "%s_gold.pkl" % str(doc_id)),
"wb",
),
)
def flatten_into_document(doc_list):
# flatten into document, this means condensing all the intermediate SOS,
# EOS tokens
res_list = []
for i in range(len(doc_list)):
if i == 0:
sent = doc_list[i][:-1] # no EOS
elif i == len(doc_list) - 1:
sent = doc_list[i][1:] # no SOS
else:
sent = doc_list[i][1:-1] # both no SOS and EOS
res_list.extend(sent)
return res_list
def do_pass_chunk_into_sentences(
data,
token_to_id,
char_to_id,
tag_to_id,
id_to_tag,
expressions,
train,
write_preds,
epoch,
val_or_test="val",
):
"""
Approximating document-level prediction by doing sentence-level prediction
indepndently for each sentence in each document, then combining the sentence
level predictions for each document.
Data is a list of list of lists, where x[0] is document, x[1] is label
documetn in turn is a list of lists, where each list is a sentence.
Labels are similarly split into, for each document, a lsi tof lists.
"""
model, optimizer = expressions
# Loop over batches
loss = 0
gold_lists, pred_lists = [], []
for index in tqdm(range(0, len(data)), desc="batch"):
document = data[index]
sentences = document[0]
sentence_tags = document[1]
doc_ids = document[2]
doc_pred_list = []
doc_gold_list = []
sentence_ids = list(range(len(sentences)))
sentence_level_exs = list(zip(sentences, sentence_tags, sentence_ids))
sentence_lengths = [len(x) for x in sentences]
sentence_tags_lengths = [len(x) for x in sentence_tags]
assert len(sentence_lengths) == len(sentence_tags_lengths)
for i in range(len(sentence_lengths)):
assert sentence_lengths[i] == sentence_tags_lengths[i]
for start in range(0, len(sentence_level_exs), BATCH_SIZE):
batch = sentence_level_exs[start : start + BATCH_SIZE]
batch.sort(key=lambda x: -len(x[0]))
# Prepare inputs
cur_batch_size = len(batch)
max_length = len(batch[0][0])
lengths = [len(v[0]) for v in batch]
max_token_length = 0
for tokens, _, _ in batch:
for token in tokens:
max_token_length = max([max_token_length, len(token), len(token)])
input_array = torch.zeros((cur_batch_size, max_length)).long()
mask_array = torch.zeros((cur_batch_size, max_length)).byte()
input_token_array = torch.zeros(
(cur_batch_size, max_length, max_token_length)
).long()
output_array = torch.zeros((cur_batch_size, max_length)).long()
for n, (tokens, tags, _) in enumerate(batch):
token_ids = [token_to_id.get(t.lower(), 1) for t in tokens]
input_array[n, : len(tokens)] = torch.LongTensor(token_ids)
for m, token in enumerate(tokens):
char_ids = [char_to_id.get(c, 1) for c in token]
input_token_array[n, m, : len(token)] = torch.LongTensor(char_ids)
tag_ids = [tag_to_id[t] for t in tags]
mask_ids = [1 for t in tokens]
try:
mask_array[n, : len(tokens)] = torch.LongTensor(mask_ids)
output_array[n, : len(tags)] = torch.LongTensor(tag_ids)
except:
import pdb
pdb.set_trace()
model.to(device)
# Construct computation
batch_loss, output = model(
input_array.to(device),
mask_array.to(device),
input_token_array.to(device),
output_array.to(device),
lengths,
cur_batch_size,
)
# Run computations
if train:
batch_loss.backward()
optimizer.step()
model.zero_grad()
loss += batch_loss.item()
predicted = output.cpu().data.numpy()
out_dict = list(zip(batch, predicted))
# reorder sentences back to original order.
out_dict.sort(key=lambda x: x[0][2])
# Update the number of correct tags and total tags
for (_, g, _), a in out_dict:
gold_list, pred_list = [], []
for gt, at in zip(g, a):
at = id_to_tag[at]
gold_list.append(gt)
pred_list.append(at)
doc_gold_list.append(gold_list)
doc_pred_list.append(pred_list)
# flatten so each document is a list of document-level predictions
doc_gold_list = flatten_into_document(doc_gold_list)
doc_pred_list = flatten_into_document(doc_pred_list)
gold_lists.append(doc_gold_list)
pred_lists.append(doc_pred_list)
if write_preds:
document_tokens = [flatten_into_document(doc[0]) for doc in data]
write_preds_fn(
pred_lists,
gold_lists,
[example[2] for example in data],
val_or_test,
model.exp_name,
model.run_name,
epoch,
document_tokens,
)
if train or val_or_test == "val":
gold_lists = [(x, "mimic") for x in gold_lists]
pred_lists = [(x, "mimic") for x in pred_lists]
else:
gold_lists = [(gold_lists[i], data[i][3]) for i in range(len(gold_lists))]
pred_lists = [(pred_lists[i], data[i][3]) for i in range(len(gold_lists))]
return (
loss,
evaluation.get_evaluation(
pred_lists, gold_lists, model.list_of_possible_tags,# source="mimic"
),
)
|
[
"evaluate.evaluation.get_evaluation"
] |
[((60, 88), 'sys.path.append', 'sys.path.append', (['"""../../../"""'], {}), "('../../../')\n", (75, 88), False, 'import sys\n'), ((645, 665), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (662, 665), False, 'import torch\n'), ((5274, 5321), 'os.path.join', 'os.path.join', (['exp_name', 'run_name', '"""preds"""', 'name'], {}), "(exp_name, run_name, 'preds', name)\n", (5286, 5321), False, 'import os\n'), ((729, 754), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (752, 754), False, 'import torch\n'), ((1268, 1295), 'torch.Tensor', 'torch.Tensor', (['class_weights'], {}), '(class_weights)\n', (1280, 1295), False, 'import torch\n'), ((1357, 1391), 'torch.FloatTensor', 'torch.FloatTensor', (['pretrained_list'], {}), '(pretrained_list)\n', (1374, 1391), False, 'import torch\n'), ((1422, 1489), 'torch.nn.Embedding.from_pretrained', 'torch.nn.Embedding.from_pretrained', (['pretrained_tensor'], {'freeze': '(False)'}), '(pretrained_tensor, freeze=False)\n', (1456, 1489), False, 'import torch\n'), ((2209, 2335), 'torch.nn.LSTM', 'torch.nn.LSTM', (['(DIM_EMBEDDING + char_lstm_hidden)', 'LSTM_HIDDEN'], {'num_layers': 'LSTM_LAYER', 'batch_first': '(True)', 'bidirectional': '(True)'}), '(DIM_EMBEDDING + char_lstm_hidden, LSTM_HIDDEN, num_layers=\n LSTM_LAYER, batch_first=True, bidirectional=True)\n', (2222, 2335), False, 'import torch\n'), ((2479, 2510), 'torch.nn.Dropout', 'torch.nn.Dropout', (['(1 - KEEP_PROB)'], {}), '(1 - KEEP_PROB)\n', (2495, 2510), False, 'import torch\n'), ((2591, 2630), 'torch.nn.Linear', 'torch.nn.Linear', (['(LSTM_HIDDEN * 2)', 'ntags'], {}), '(LSTM_HIDDEN * 2, ntags)\n', (2606, 2630), False, 'import torch\n'), ((3884, 3954), 'torch.nn.utils.rnn.pack_padded_sequence', 'torch.nn.utils.rnn.pack_padded_sequence', (['concat_vectors', 'lengths', '(True)'], {}), '(concat_vectors, lengths, True)\n', (3923, 3954), False, 'import torch\n'), ((4051, 4146), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['lstm_out'], {'batch_first': '(True)', 'total_length': 'max_length'}), '(lstm_out, batch_first=True,\n total_length=max_length)\n', (4089, 4146), False, 'import torch\n'), ((11906, 11984), 'evaluate.evaluation.get_evaluation', 'evaluation.get_evaluation', (['pred_lists', 'gold_lists', 'model.list_of_possible_tags'], {}), '(pred_lists, gold_lists, model.list_of_possible_tags)\n', (11931, 11984), False, 'from evaluate import evaluation\n'), ((1832, 1878), 'torch.nn.Embedding', 'torch.nn.Embedding', (['nchars', 'CHAR_DIM_EMBEDDING'], {}), '(nchars, CHAR_DIM_EMBEDDING)\n', (1850, 1878), False, 'import torch\n'), ((1908, 2015), 'torch.nn.LSTM', 'torch.nn.LSTM', (['CHAR_DIM_EMBEDDING', 'CHAR_LSTM_HIDDEN'], {'num_layers': '(1)', 'batch_first': '(True)', 'bidirectional': '(True)'}), '(CHAR_DIM_EMBEDDING, CHAR_LSTM_HIDDEN, num_layers=1,\n batch_first=True, bidirectional=True)\n', (1921, 2015), False, 'import torch\n'), ((2737, 2759), 'crf.CRF', 'CRF', ([], {'target_size': 'ntags'}), '(target_size=ntags)\n', (2740, 2759), False, 'from crf import CRF\n'), ((3687, 3734), 'torch.cat', 'torch.cat', (['(word_vectors, char_lstm_out)'], {'dim': '(2)'}), '((word_vectors, char_lstm_out), dim=2)\n', (3696, 3734), False, 'import torch\n'), ((4733, 4791), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'ignore_index': '(0)', 'reduction': '"""sum"""'}), "(ignore_index=0, reduction='sum')\n", (4758, 4791), False, 'import torch\n'), ((4882, 4912), 'torch.argmax', 'torch.argmax', (['output_scores', '(1)'], {}), '(output_scores, 1)\n', (4894, 4912), False, 'import torch\n'), ((9151, 9178), 'torch.LongTensor', 'torch.LongTensor', (['token_ids'], {}), '(token_ids)\n', (9167, 9178), False, 'import torch\n'), ((8641, 8682), 'torch.zeros', 'torch.zeros', (['(cur_batch_size, max_length)'], {}), '((cur_batch_size, max_length))\n', (8652, 8682), False, 'import torch\n'), ((8715, 8756), 'torch.zeros', 'torch.zeros', (['(cur_batch_size, max_length)'], {}), '((cur_batch_size, max_length))\n', (8726, 8756), False, 'import torch\n'), ((8796, 8855), 'torch.zeros', 'torch.zeros', (['(cur_batch_size, max_length, max_token_length)'], {}), '((cur_batch_size, max_length, max_token_length))\n', (8807, 8855), False, 'import torch\n'), ((8920, 8961), 'torch.zeros', 'torch.zeros', (['(cur_batch_size, max_length)'], {}), '((cur_batch_size, max_length))\n', (8931, 8961), False, 'import torch\n'), ((9359, 9385), 'torch.LongTensor', 'torch.LongTensor', (['char_ids'], {}), '(char_ids)\n', (9375, 9385), False, 'import torch\n'), ((9560, 9586), 'torch.LongTensor', 'torch.LongTensor', (['mask_ids'], {}), '(mask_ids)\n', (9576, 9586), False, 'import torch\n'), ((9638, 9663), 'torch.LongTensor', 'torch.LongTensor', (['tag_ids'], {}), '(tag_ids)\n', (9654, 9663), False, 'import torch\n'), ((9072, 9081), 'typing.lower', 't.lower', ([], {}), '()\n', (9079, 9081), True, 'import typing as t\n'), ((9740, 9755), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (9753, 9755), False, 'import pdb\n')]
|
#!/usr/bin/env python
import sys
import os
import config
import evaluate
import msd_parse
from pyspark.mllib.recommendation import MatrixFactorizationModel, ALS
def main():
import configspark
sc = configspark.SPARK_CONTEXT
# user/song string ID to int ID mappings
full_text = sc.textFile(config.MSD_DATA)
full_raw = full_text.map(msd_parse.parse_line)
users, songs, _ = msd_parse.get_user_song_maps(full_raw)
print("\nLoading MovieLens test dataset\n")
test_parsed = (
sc.textFile(config.MSD_TEST)
.map(msd_parse.parse_line))
test_prepped = msd_parse.replace_raw_ids(test_parsed, users, songs)
test = test_prepped.map(msd_parse.rating_convert)
if os.path.exists(config.MSD_MODEL):
print("\n\nLoading existing recommendation model from %s\n\n"
% config.MSD_MODEL)
model = MatrixFactorizationModel.load(sc, config.MSD_MODEL)
else:
raise RuntimeError("Failed to load ALS model from %s"
% config.MSD_MODEL)
mse, rmse = evaluate.evaluate_model(model, test)
print("\nMSD ALS model performance: MSE=%0.3f RMSE=%0.3f\n" % (mse, rmse))
if __name__ == "__main__":
main()
|
[
"evaluate.evaluate_model"
] |
[((399, 437), 'msd_parse.get_user_song_maps', 'msd_parse.get_user_song_maps', (['full_raw'], {}), '(full_raw)\n', (427, 437), False, 'import msd_parse\n'), ((599, 651), 'msd_parse.replace_raw_ids', 'msd_parse.replace_raw_ids', (['test_parsed', 'users', 'songs'], {}), '(test_parsed, users, songs)\n', (624, 651), False, 'import msd_parse\n'), ((714, 746), 'os.path.exists', 'os.path.exists', (['config.MSD_MODEL'], {}), '(config.MSD_MODEL)\n', (728, 746), False, 'import os\n'), ((1056, 1092), 'evaluate.evaluate_model', 'evaluate.evaluate_model', (['model', 'test'], {}), '(model, test)\n', (1079, 1092), False, 'import evaluate\n'), ((868, 919), 'pyspark.mllib.recommendation.MatrixFactorizationModel.load', 'MatrixFactorizationModel.load', (['sc', 'config.MSD_MODEL'], {}), '(sc, config.MSD_MODEL)\n', (897, 919), False, 'from pyspark.mllib.recommendation import MatrixFactorizationModel, ALS\n')]
|
import argparse
import itertools
import os.path
import os
import time
import logging
import datetime
import torch
import torch.optim.lr_scheduler
import numpy as np
import os
import evaluate
import trees
import vocabulary
import nkutil
from tqdm import tqdm
import SAPar_model
import random
tokens = SAPar_model
from attutil import FindNgrams
def torch_load(load_path):
if SAPar_model.use_cuda:
return torch.load(load_path)
else:
return torch.load(load_path, map_location=lambda storage, location: storage)
def format_elapsed(start_time):
elapsed_time = int(time.time() - start_time)
minutes, seconds = divmod(elapsed_time, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
elapsed_string = "{}h{:02}m{:02}s".format(hours, minutes, seconds)
if days > 0:
elapsed_string = "{}d{}".format(days, elapsed_string)
return elapsed_string
def make_hparams():
return nkutil.HParams(
max_len_train=0, # no length limit
max_len_dev=0, # no length limit
sentence_max_len=300,
learning_rate=0.0008,
learning_rate_warmup_steps=160,
clip_grad_norm=0., #no clipping
step_decay=True, # note that disabling step decay is not implemented
step_decay_factor=0.5,
step_decay_patience=5,
max_consecutive_decays=3, # establishes a termination criterion
partitioned=True,
num_layers_position_only=0,
num_layers=8,
d_model=1024,
num_heads=8,
d_kv=64,
d_ff=2048,
d_label_hidden=250,
d_tag_hidden=250,
tag_loss_scale=5.0,
attention_dropout=0.2,
embedding_dropout=0.0,
relu_dropout=0.1,
residual_dropout=0.2,
use_tags=False,
use_words=False,
use_chars_lstm=False,
use_elmo=False,
use_bert=False,
use_zen=False,
use_bert_only=False,
use_xlnet=False,
use_xlnet_only=False,
predict_tags=False,
d_char_emb=32, # A larger value may be better for use_chars_lstm
tag_emb_dropout=0.2,
word_emb_dropout=0.4,
morpho_emb_dropout=0.2,
timing_dropout=0.0,
char_lstm_input_dropout=0.2,
elmo_dropout=0.5, # Note that this semi-stacks with morpho_emb_dropout!
bert_model="bert-base-uncased",
bert_do_lower_case=True,
bert_transliterate="",
xlnet_model="xlnet-large-cased",
xlnet_do_lower_case=False,
zen_model='',
ngram=5,
ngram_threshold=0,
ngram_freq_threshold=1,
ngram_type='pmi',
)
def run_train(args, hparams):
# if args.numpy_seed is not None:
# print("Setting numpy random seed to {}...".format(args.numpy_seed))
# np.random.seed(args.numpy_seed)
#
# # Make sure that pytorch is actually being initialized randomly.
# # On my cluster I was getting highly correlated results from multiple
# # runs, but calling reset_parameters() changed that. A brief look at the
# # pytorch source code revealed that pytorch initializes its RNG by
# # calling std::random_device, which according to the C++ spec is allowed
# # to be deterministic.
# seed_from_numpy = np.random.randint(2147483648)
# print("Manual seed for pytorch:", seed_from_numpy)
# torch.manual_seed(seed_from_numpy)
now_time = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
log_file_name = os.path.join(args.log_dir, 'log-' + now_time)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
filename=log_file_name,
filemode='w',
level=logging.INFO)
logger = logging.getLogger(__name__)
console_handler = logging.StreamHandler()
logger.addHandler(console_handler)
logger = logging.getLogger(__name__)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
hparams.set_from_args(args)
logger.info("Hyperparameters:")
logger.info(hparams.print())
logger.info("Loading training trees from {}...".format(args.train_path))
if hparams.predict_tags and args.train_path.endswith('10way.clean'):
logger.info("WARNING: The data distributed with this repository contains "
"predicted part-of-speech tags only (not gold tags!) We do not "
"recommend enabling predict_tags in this configuration.")
train_treebank = trees.load_trees(args.train_path)
if hparams.max_len_train > 0:
train_treebank = [tree for tree in train_treebank if len(list(tree.leaves())) <= hparams.max_len_train]
logger.info("Loaded {:,} training examples.".format(len(train_treebank)))
logger.info("Loading development trees from {}...".format(args.dev_path))
dev_treebank = trees.load_trees(args.dev_path)
if hparams.max_len_dev > 0:
dev_treebank = [tree for tree in dev_treebank if len(list(tree.leaves())) <= hparams.max_len_dev]
logger.info("Loaded {:,} development examples.".format(len(dev_treebank)))
logger.info("Loading test trees from {}...".format(args.test_path))
test_treebank = trees.load_trees(args.test_path)
if hparams.max_len_dev > 0:
test_treebank = [tree for tree in test_treebank if len(list(tree.leaves())) <= hparams.max_len_dev]
logger.info("Loaded {:,} test examples.".format(len(test_treebank)))
logger.info("Processing trees for training...")
train_parse = [tree.convert() for tree in train_treebank]
dev_parse = [tree.convert() for tree in dev_treebank]
test_parse = [tree.convert() for tree in test_treebank]
logger.info("Constructing vocabularies...")
tag_vocab = vocabulary.Vocabulary()
tag_vocab.index(tokens.START)
tag_vocab.index(tokens.STOP)
tag_vocab.index(tokens.TAG_UNK)
word_vocab = vocabulary.Vocabulary()
word_vocab.index(tokens.START)
word_vocab.index(tokens.STOP)
word_vocab.index(tokens.UNK)
label_vocab = vocabulary.Vocabulary()
label_vocab.index(())
char_set = set()
for tree in train_parse:
nodes = [tree]
while nodes:
node = nodes.pop()
if isinstance(node, trees.InternalParseNode):
label_vocab.index(node.label)
nodes.extend(reversed(node.children))
else:
tag_vocab.index(node.tag)
word_vocab.index(node.word)
char_set |= set(node.word)
char_vocab = vocabulary.Vocabulary()
# If codepoints are small (e.g. Latin alphabet), index by codepoint directly
highest_codepoint = max(ord(char) for char in char_set)
if highest_codepoint < 512:
if highest_codepoint < 256:
highest_codepoint = 256
else:
highest_codepoint = 512
# This also takes care of constants like tokens.CHAR_PAD
for codepoint in range(highest_codepoint):
char_index = char_vocab.index(chr(codepoint))
assert char_index == codepoint
else:
char_vocab.index(tokens.CHAR_UNK)
char_vocab.index(tokens.CHAR_START_SENTENCE)
char_vocab.index(tokens.CHAR_START_WORD)
char_vocab.index(tokens.CHAR_STOP_WORD)
char_vocab.index(tokens.CHAR_STOP_SENTENCE)
for char in sorted(char_set):
char_vocab.index(char)
tag_vocab.freeze()
word_vocab.freeze()
label_vocab.freeze()
char_vocab.freeze()
# -------- ngram vocab ------------
ngram_vocab = vocabulary.Vocabulary()
ngram_vocab.index(())
ngram_finder = FindNgrams(min_count=hparams.ngram_threshold)
def get_sentence(parse):
sentences = []
for tree in parse:
sentence = []
for leaf in tree.leaves():
sentence.append(leaf.word)
sentences.append(sentence)
return sentences
sentence_list = get_sentence(train_parse)
if not args.cross_domain:
sentence_list.extend(get_sentence(dev_parse))
# sentence_list.extend(get_sentence(test_parse))
if hparams.ngram_type == 'freq':
logger.info('ngram type: freq')
ngram_finder.count_ngram(sentence_list, hparams.ngram)
elif hparams.ngram_type == 'pmi':
logger.info('ngram type: pmi')
ngram_finder.find_ngrams_pmi(sentence_list, hparams.ngram, hparams.ngram_freq_threshold)
else:
raise ValueError()
ngram_type_count = [0 for _ in range(hparams.ngram)]
for w, c in ngram_finder.ngrams.items():
ngram_type_count[len(list(w))-1] += 1
for _ in range(c):
ngram_vocab.index(w)
logger.info(str(ngram_type_count))
ngram_vocab.freeze()
ngram_count = [0 for _ in range(hparams.ngram)]
for sentence in sentence_list:
for n in range(len(ngram_count)):
length = n + 1
for i in range(len(sentence)):
gram = tuple(sentence[i: i + length])
if gram in ngram_finder.ngrams:
ngram_count[n] += 1
logger.info(str(ngram_count))
# -------- ngram vocab ------------
def print_vocabulary(name, vocab):
special = {tokens.START, tokens.STOP, tokens.UNK}
logger.info("{} ({:,}): {}".format(
name, vocab.size,
sorted(value for value in vocab.values if value in special) +
sorted(value for value in vocab.values if value not in special)))
if args.print_vocabs:
print_vocabulary("Tag", tag_vocab)
print_vocabulary("Word", word_vocab)
print_vocabulary("Label", label_vocab)
print_vocabulary("Ngram", ngram_vocab)
logger.info("Initializing model...")
load_path = None
if load_path is not None:
logger.info(f"Loading parameters from {load_path}")
info = torch_load(load_path)
parser = SAPar_model.SAChartParser.from_spec(info['spec'], info['state_dict'])
else:
parser = SAPar_model.SAChartParser(
tag_vocab,
word_vocab,
label_vocab,
char_vocab,
ngram_vocab,
hparams,
)
print("Initializing optimizer...")
trainable_parameters = [param for param in parser.parameters() if param.requires_grad]
trainer = torch.optim.Adam(trainable_parameters, lr=1., betas=(0.9, 0.98), eps=1e-9)
if load_path is not None:
trainer.load_state_dict(info['trainer'])
pytorch_total_params = sum(p.numel() for p in parser.parameters() if p.requires_grad)
logger.info('# of trainable parameters: %d' % pytorch_total_params)
def set_lr(new_lr):
for param_group in trainer.param_groups:
param_group['lr'] = new_lr
assert hparams.step_decay, "Only step_decay schedule is supported"
warmup_coeff = hparams.learning_rate / hparams.learning_rate_warmup_steps
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
trainer, 'max',
factor=hparams.step_decay_factor,
patience=hparams.step_decay_patience,
verbose=True,
)
def schedule_lr(iteration):
iteration = iteration + 1
if iteration <= hparams.learning_rate_warmup_steps:
set_lr(iteration * warmup_coeff)
clippable_parameters = trainable_parameters
grad_clip_threshold = np.inf if hparams.clip_grad_norm == 0 else hparams.clip_grad_norm
logger.info("Training...")
total_processed = 0
current_processed = 0
check_every = len(train_parse) / args.checks_per_epoch
best_eval_fscore = -np.inf
test_fscore_on_dev = -np.inf
best_eval_scores = None
best_eval_model_path = None
best_eval_processed = 0
start_time = time.time()
def check_eval(eval_treebank, ep, flag='dev'):
# nonlocal best_eval_fscore
# nonlocal best_eval_model_path
# nonlocal best_eval_processed
dev_start_time = time.time()
eval_predicted = []
for dev_start_index in range(0, len(eval_treebank), args.eval_batch_size):
subbatch_trees = eval_treebank[dev_start_index:dev_start_index + args.eval_batch_size]
subbatch_sentences = [[(leaf.tag, leaf.word) for leaf in tree.leaves()] for tree in subbatch_trees]
predicted, _ = parser.parse_batch(subbatch_sentences)
del _
eval_predicted.extend([p.convert() for p in predicted])
eval_fscore = evaluate.evalb(args.evalb_dir, eval_treebank, eval_predicted)
logger.info(
flag + ' eval '
'epoch {} '
"fscore {} "
"elapsed {} "
"total-elapsed {}".format(
ep,
eval_fscore,
format_elapsed(dev_start_time),
format_elapsed(start_time),
)
)
return eval_fscore
def save_model(eval_fscore, remove_model):
nonlocal best_eval_fscore
nonlocal best_eval_model_path
nonlocal best_eval_processed
nonlocal best_eval_scores
if best_eval_model_path is not None:
extensions = [".pt"]
for ext in extensions:
path = best_eval_model_path + ext
if os.path.exists(path) and remove_model:
logger.info("Removing previous model file {}...".format(path))
os.remove(path)
best_eval_fscore = eval_fscore.fscore
best_eval_scores = eval_fscore
best_eval_model_path = "{}_eval={:.2f}_{}".format(
args.model_path_base, eval_fscore.fscore, now_time)
best_eval_processed = total_processed
logger.info("Saving new best model to {}...".format(best_eval_model_path))
torch.save({
'spec': parser.spec,
'state_dict': parser.state_dict(),
# 'trainer' : trainer.state_dict(),
}, best_eval_model_path + ".pt")
for epoch in itertools.count(start=1):
if args.epochs is not None and epoch > args.epochs:
break
np.random.shuffle(train_parse)
epoch_start_time = time.time()
for start_index in range(0, len(train_parse), args.batch_size):
trainer.zero_grad()
schedule_lr(total_processed // args.batch_size)
batch_loss_value = 0.0
batch_trees = train_parse[start_index:start_index + args.batch_size]
batch_sentences = [[(leaf.tag, leaf.word) for leaf in tree.leaves()] for tree in batch_trees]
batch_num_tokens = sum(len(sentence) for sentence in batch_sentences)
for subbatch_sentences, subbatch_trees in parser.split_batch(batch_sentences, batch_trees,
args.subbatch_max_tokens):
_, loss = parser.parse_batch(subbatch_sentences, subbatch_trees)
if hparams.predict_tags:
loss = loss[0] / len(batch_trees) + loss[1] / batch_num_tokens
else:
loss = loss / len(batch_trees)
loss_value = float(loss.data.cpu().numpy())
batch_loss_value += loss_value
if loss_value > 0:
loss.backward()
del loss
total_processed += len(subbatch_trees)
current_processed += len(subbatch_trees)
grad_norm = torch.nn.utils.clip_grad_norm_(clippable_parameters, grad_clip_threshold)
trainer.step()
print(
"epoch {:,} "
"batch {:,}/{:,} "
"processed {:,} "
"batch-loss {:.4f} "
"grad-norm {:.4f} "
"epoch-elapsed {} "
"total-elapsed {}".format(
epoch,
start_index // args.batch_size + 1,
int(np.ceil(len(train_parse) / args.batch_size)),
total_processed,
batch_loss_value,
grad_norm,
format_elapsed(epoch_start_time),
format_elapsed(start_time),
)
)
if current_processed >= check_every:
current_processed -= check_every
dev_fscore = check_eval(dev_treebank, epoch, flag='dev')
test_fscore = check_eval(test_treebank, epoch, flag='test')
if dev_fscore.fscore > best_eval_fscore:
save_model(dev_fscore, remove_model=True)
test_fscore_on_dev = test_fscore
# adjust learning rate at the end of an epoch
if (total_processed // args.batch_size + 1) > hparams.learning_rate_warmup_steps:
scheduler.step(best_eval_fscore)
if (total_processed - best_eval_processed) > args.patients \
+ ((hparams.step_decay_patience + 1) * hparams.max_consecutive_decays * len(train_parse)):
logger.info("Terminating due to lack of improvement in eval fscore.")
logger.info(
"best dev {} test {}".format(
best_eval_scores,
test_fscore_on_dev,
)
)
break
def run_test(args):
print("Loading test trees from {}...".format(args.test_path))
test_treebank = trees.load_trees(args.test_path)
print("Loaded {:,} test examples.".format(len(test_treebank)))
print("Loading model from {}...".format(args.model_path_base))
assert args.model_path_base.endswith(".pt"), "Only pytorch savefiles supported"
info = torch_load(args.model_path_base)
assert 'hparams' in info['spec'], "Older savefiles not supported"
parser = SAPar_model.SAChartParser.from_spec(info['spec'], info['state_dict'])
print("Parsing test sentences...")
start_time = time.time()
test_predicted = []
for start_index in tqdm(range(0, len(test_treebank), args.eval_batch_size)):
subbatch_trees = test_treebank[start_index:start_index+args.eval_batch_size]
subbatch_sentences = [[(leaf.tag, leaf.word) for leaf in tree.leaves()] for tree in subbatch_trees]
predicted, _ = parser.parse_batch(subbatch_sentences)
del _
test_predicted.extend([p.convert() for p in predicted])
# The tree loader does some preprocessing to the trees (e.g. stripping TOP
# symbols or SPMRL morphological features). We compare with the input file
# directly to be extra careful about not corrupting the evaluation. We also
# allow specifying a separate "raw" file for the gold trees: the inputs to
# our parser have traces removed and may have predicted tags substituted,
# and we may wish to compare against the raw gold trees to make sure we
# haven't made a mistake. As far as we can tell all of these variations give
# equivalent results.
ref_gold_path = args.test_path
if args.test_path_raw is not None:
print("Comparing with raw trees from", args.test_path_raw)
ref_gold_path = args.test_path_raw
test_fscore = evaluate.evalb(args.evalb_dir, test_treebank, test_predicted, ref_gold_path=ref_gold_path)
model_name = args.model_path_base[args.model_path_base.rfind('/')+1: args.model_path_base.rfind('.')]
output_file = './results/' + model_name + '.txt'
with open(output_file, "w") as outfile:
for tree in test_predicted:
outfile.write("{}\n".format(tree.linearize()))
print(
"test-fscore {} "
"test-elapsed {}".format(
test_fscore,
format_elapsed(start_time),
)
)
def run_parse(args):
if args.output_path != '-' and os.path.exists(args.output_path):
print("Error: output file already exists:", args.output_path)
return
print("Loading model from {}...".format(args.model_path_base))
assert args.model_path_base.endswith(".pt"), "Only pytorch savefiles supported"
info = torch_load(args.model_path_base)
assert 'hparams' in info['spec'], "Older savefiles not supported"
parser = SAPar_model.SAChartParser.from_spec(info['spec'], info['state_dict'])
print("Parsing sentences...")
with open(args.input_path) as input_file:
sentences = input_file.readlines()
sentences = [sentence.split() for sentence in sentences]
# Tags are not available when parsing from raw text, so use a dummy tag
if 'UNK' in parser.tag_vocab.indices:
dummy_tag = 'UNK'
else:
dummy_tag = parser.tag_vocab.value(0)
start_time = time.time()
all_predicted = []
for start_index in range(0, len(sentences), args.eval_batch_size):
subbatch_sentences = sentences[start_index:start_index+args.eval_batch_size]
subbatch_sentences = [[(dummy_tag, word) for word in sentence] for sentence in subbatch_sentences]
predicted, _ = parser.parse_batch(subbatch_sentences)
del _
if args.output_path == '-':
for p in predicted:
print(p.convert().linearize())
else:
all_predicted.extend([p.convert() for p in predicted])
if args.output_path != '-':
with open(args.output_path, 'w') as output_file:
for tree in all_predicted:
output_file.write("{}\n".format(tree.linearize()))
print("Output written to:", args.output_path)
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
hparams = make_hparams()
subparser = subparsers.add_parser("train")
subparser.set_defaults(callback=lambda args: run_train(args, hparams))
hparams.populate_arguments(subparser)
subparser.add_argument("--seed", default=2020, type=int)
subparser.add_argument("--model-path-base", required=True)
subparser.add_argument("--evalb-dir", default="./EVALB/")
subparser.add_argument("--train-path", default="./data/PTB/train.mrg")
subparser.add_argument("--dev-path", default="./data/PTB/dev.mrg")
subparser.add_argument("--test-path", default="./data/PTB/test.mrg")
subparser.add_argument("--log_dir", default="./logs/")
subparser.add_argument("--batch-size", type=int, default=250)
subparser.add_argument("--subbatch-max-tokens", type=int, default=2000)
subparser.add_argument("--eval-batch-size", type=int, default=100)
subparser.add_argument("--epochs", type=int)
subparser.add_argument("--checks-per-epoch", type=int, default=4)
subparser.add_argument("--patients", type=int, default=0)
subparser.add_argument("--print-vocabs", action="store_true")
subparser.add_argument("--stop-f", type=float, default=None)
subparser.add_argument("--track-f", type=float, default=None)
subparser.add_argument("--cross-domain", action='store_true')
subparser = subparsers.add_parser("test")
subparser.set_defaults(callback=run_test)
subparser.add_argument("--model-path-base", required=True)
subparser.add_argument("--evalb-dir", default="./EVALB/")
subparser.add_argument("--test-path", default="./data/PTB/test.mrg")
subparser.add_argument("--test-path-raw", type=str)
subparser.add_argument("--eval-batch-size", type=int, default=100)
subparser = subparsers.add_parser("parse")
subparser.set_defaults(callback=run_parse)
subparser.add_argument("--model-path-base", required=True)
subparser.add_argument("--input-path", type=str, required=True)
subparser.add_argument("--output-path", type=str, default="-")
subparser.add_argument("--eval-batch-size", type=int, default=100)
args = parser.parse_args()
args.callback(args)
# %%
if __name__ == "__main__":
main()
|
[
"evaluate.evalb"
] |
[((950, 2051), 'nkutil.HParams', 'nkutil.HParams', ([], {'max_len_train': '(0)', 'max_len_dev': '(0)', 'sentence_max_len': '(300)', 'learning_rate': '(0.0008)', 'learning_rate_warmup_steps': '(160)', 'clip_grad_norm': '(0.0)', 'step_decay': '(True)', 'step_decay_factor': '(0.5)', 'step_decay_patience': '(5)', 'max_consecutive_decays': '(3)', 'partitioned': '(True)', 'num_layers_position_only': '(0)', 'num_layers': '(8)', 'd_model': '(1024)', 'num_heads': '(8)', 'd_kv': '(64)', 'd_ff': '(2048)', 'd_label_hidden': '(250)', 'd_tag_hidden': '(250)', 'tag_loss_scale': '(5.0)', 'attention_dropout': '(0.2)', 'embedding_dropout': '(0.0)', 'relu_dropout': '(0.1)', 'residual_dropout': '(0.2)', 'use_tags': '(False)', 'use_words': '(False)', 'use_chars_lstm': '(False)', 'use_elmo': '(False)', 'use_bert': '(False)', 'use_zen': '(False)', 'use_bert_only': '(False)', 'use_xlnet': '(False)', 'use_xlnet_only': '(False)', 'predict_tags': '(False)', 'd_char_emb': '(32)', 'tag_emb_dropout': '(0.2)', 'word_emb_dropout': '(0.4)', 'morpho_emb_dropout': '(0.2)', 'timing_dropout': '(0.0)', 'char_lstm_input_dropout': '(0.2)', 'elmo_dropout': '(0.5)', 'bert_model': '"""bert-base-uncased"""', 'bert_do_lower_case': '(True)', 'bert_transliterate': '""""""', 'xlnet_model': '"""xlnet-large-cased"""', 'xlnet_do_lower_case': '(False)', 'zen_model': '""""""', 'ngram': '(5)', 'ngram_threshold': '(0)', 'ngram_freq_threshold': '(1)', 'ngram_type': '"""pmi"""'}), "(max_len_train=0, max_len_dev=0, sentence_max_len=300,\n learning_rate=0.0008, learning_rate_warmup_steps=160, clip_grad_norm=\n 0.0, step_decay=True, step_decay_factor=0.5, step_decay_patience=5,\n max_consecutive_decays=3, partitioned=True, num_layers_position_only=0,\n num_layers=8, d_model=1024, num_heads=8, d_kv=64, d_ff=2048,\n d_label_hidden=250, d_tag_hidden=250, tag_loss_scale=5.0,\n attention_dropout=0.2, embedding_dropout=0.0, relu_dropout=0.1,\n residual_dropout=0.2, use_tags=False, use_words=False, use_chars_lstm=\n False, use_elmo=False, use_bert=False, use_zen=False, use_bert_only=\n False, use_xlnet=False, use_xlnet_only=False, predict_tags=False,\n d_char_emb=32, tag_emb_dropout=0.2, word_emb_dropout=0.4,\n morpho_emb_dropout=0.2, timing_dropout=0.0, char_lstm_input_dropout=0.2,\n elmo_dropout=0.5, bert_model='bert-base-uncased', bert_do_lower_case=\n True, bert_transliterate='', xlnet_model='xlnet-large-cased',\n xlnet_do_lower_case=False, zen_model='', ngram=5, ngram_threshold=0,\n ngram_freq_threshold=1, ngram_type='pmi')\n", (964, 2051), False, 'import nkutil\n'), ((3505, 3550), 'os.path.join', 'os.path.join', (['args.log_dir', "('log-' + now_time)"], {}), "(args.log_dir, 'log-' + now_time)\n", (3517, 3550), False, 'import os\n'), ((3555, 3741), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'filename': 'log_file_name', 'filemode': '"""w"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', filename=log_file_name, filemode='w', level=\n logging.INFO)\n", (3574, 3741), False, 'import logging\n'), ((3836, 3863), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3853, 3863), False, 'import logging\n'), ((3886, 3909), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (3907, 3909), False, 'import logging\n'), ((3962, 3989), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3979, 3989), False, 'import logging\n'), ((3995, 4017), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (4006, 4017), False, 'import random\n'), ((4022, 4047), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (4036, 4047), True, 'import numpy as np\n'), ((4052, 4080), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (4069, 4080), False, 'import torch\n'), ((4589, 4622), 'trees.load_trees', 'trees.load_trees', (['args.train_path'], {}), '(args.train_path)\n', (4605, 4622), False, 'import trees\n'), ((4945, 4976), 'trees.load_trees', 'trees.load_trees', (['args.dev_path'], {}), '(args.dev_path)\n', (4961, 4976), False, 'import trees\n'), ((5287, 5319), 'trees.load_trees', 'trees.load_trees', (['args.test_path'], {}), '(args.test_path)\n', (5303, 5319), False, 'import trees\n'), ((5832, 5855), 'vocabulary.Vocabulary', 'vocabulary.Vocabulary', ([], {}), '()\n', (5853, 5855), False, 'import vocabulary\n'), ((5977, 6000), 'vocabulary.Vocabulary', 'vocabulary.Vocabulary', ([], {}), '()\n', (5998, 6000), False, 'import vocabulary\n'), ((6122, 6145), 'vocabulary.Vocabulary', 'vocabulary.Vocabulary', ([], {}), '()\n', (6143, 6145), False, 'import vocabulary\n'), ((6622, 6645), 'vocabulary.Vocabulary', 'vocabulary.Vocabulary', ([], {}), '()\n', (6643, 6645), False, 'import vocabulary\n'), ((7643, 7666), 'vocabulary.Vocabulary', 'vocabulary.Vocabulary', ([], {}), '()\n', (7664, 7666), False, 'import vocabulary\n'), ((7712, 7757), 'attutil.FindNgrams', 'FindNgrams', ([], {'min_count': 'hparams.ngram_threshold'}), '(min_count=hparams.ngram_threshold)\n', (7722, 7757), False, 'from attutil import FindNgrams\n'), ((10396, 10472), 'torch.optim.Adam', 'torch.optim.Adam', (['trainable_parameters'], {'lr': '(1.0)', 'betas': '(0.9, 0.98)', 'eps': '(1e-09)'}), '(trainable_parameters, lr=1.0, betas=(0.9, 0.98), eps=1e-09)\n', (10412, 10472), False, 'import torch\n'), ((10992, 11141), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['trainer', '"""max"""'], {'factor': 'hparams.step_decay_factor', 'patience': 'hparams.step_decay_patience', 'verbose': '(True)'}), "(trainer, 'max', factor=hparams.\n step_decay_factor, patience=hparams.step_decay_patience, verbose=True)\n", (11034, 11141), False, 'import torch\n'), ((11799, 11810), 'time.time', 'time.time', ([], {}), '()\n', (11808, 11810), False, 'import time\n'), ((14013, 14037), 'itertools.count', 'itertools.count', ([], {'start': '(1)'}), '(start=1)\n', (14028, 14037), False, 'import itertools\n'), ((17470, 17502), 'trees.load_trees', 'trees.load_trees', (['args.test_path'], {}), '(args.test_path)\n', (17486, 17502), False, 'import trees\n'), ((17850, 17919), 'SAPar_model.SAChartParser.from_spec', 'SAPar_model.SAChartParser.from_spec', (["info['spec']", "info['state_dict']"], {}), "(info['spec'], info['state_dict'])\n", (17885, 17919), False, 'import SAPar_model\n'), ((17977, 17988), 'time.time', 'time.time', ([], {}), '()\n', (17986, 17988), False, 'import time\n'), ((19210, 19305), 'evaluate.evalb', 'evaluate.evalb', (['args.evalb_dir', 'test_treebank', 'test_predicted'], {'ref_gold_path': 'ref_gold_path'}), '(args.evalb_dir, test_treebank, test_predicted, ref_gold_path\n =ref_gold_path)\n', (19224, 19305), False, 'import evaluate\n'), ((20210, 20279), 'SAPar_model.SAChartParser.from_spec', 'SAPar_model.SAChartParser.from_spec', (["info['spec']", "info['state_dict']"], {}), "(info['spec'], info['state_dict'])\n", (20245, 20279), False, 'import SAPar_model\n'), ((20684, 20695), 'time.time', 'time.time', ([], {}), '()\n', (20693, 20695), False, 'import time\n'), ((21533, 21558), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (21556, 21558), False, 'import argparse\n'), ((418, 439), 'torch.load', 'torch.load', (['load_path'], {}), '(load_path)\n', (428, 439), False, 'import torch\n'), ((465, 534), 'torch.load', 'torch.load', (['load_path'], {'map_location': '(lambda storage, location: storage)'}), '(load_path, map_location=lambda storage, location: storage)\n', (475, 534), False, 'import torch\n'), ((9975, 10044), 'SAPar_model.SAChartParser.from_spec', 'SAPar_model.SAChartParser.from_spec', (["info['spec']", "info['state_dict']"], {}), "(info['spec'], info['state_dict'])\n", (10010, 10044), False, 'import SAPar_model\n'), ((10072, 10171), 'SAPar_model.SAChartParser', 'SAPar_model.SAChartParser', (['tag_vocab', 'word_vocab', 'label_vocab', 'char_vocab', 'ngram_vocab', 'hparams'], {}), '(tag_vocab, word_vocab, label_vocab, char_vocab,\n ngram_vocab, hparams)\n', (10097, 10171), False, 'import SAPar_model\n'), ((12004, 12015), 'time.time', 'time.time', ([], {}), '()\n', (12013, 12015), False, 'import time\n'), ((12514, 12575), 'evaluate.evalb', 'evaluate.evalb', (['args.evalb_dir', 'eval_treebank', 'eval_predicted'], {}), '(args.evalb_dir, eval_treebank, eval_predicted)\n', (12528, 12575), False, 'import evaluate\n'), ((14126, 14156), 'numpy.random.shuffle', 'np.random.shuffle', (['train_parse'], {}), '(train_parse)\n', (14143, 14156), True, 'import numpy as np\n'), ((14184, 14195), 'time.time', 'time.time', ([], {}), '()\n', (14193, 14195), False, 'import time\n'), ((19811, 19843), 'os.path.exists', 'os.path.exists', (['args.output_path'], {}), '(args.output_path)\n', (19825, 19843), False, 'import os\n'), ((591, 602), 'time.time', 'time.time', ([], {}), '()\n', (600, 602), False, 'import time\n'), ((3431, 3454), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3452, 3454), False, 'import datetime\n'), ((15489, 15562), 'torch.nn.utils.clip_grad_norm_', 'torch.nn.utils.clip_grad_norm_', (['clippable_parameters', 'grad_clip_threshold'], {}), '(clippable_parameters, grad_clip_threshold)\n', (15519, 15562), False, 'import torch\n'), ((13306, 13326), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (13320, 13326), False, 'import os\n'), ((13448, 13463), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (13457, 13463), False, 'import os\n')]
|
import numpy as np
import sys
import evaluate as EV
matrix_list = ["all", "1_syllable", "2_syllable", "3_syllable", "4_syllable", "2-_syllable"]
def average_cost(costlist, querylist):
querylist_uniq = []
cost_dict = {}
for i in range(len(querylist)):
query_id = querylist[i].strip()
keyword = query_id.split("_")[0]
if not cost_dict.has_key(keyword):
querylist_uniq.append(keyword)
cost_dict[keyword] = []
cost_dict[keyword].append(costlist[i])
cost_uniq = []
for keyword in querylist_uniq:
cost_uniq.append(np.array(cost_dict[keyword]).sum(axis=0).tolist())
return (cost_uniq, querylist_uniq)
if __name__=="__main__":
if len(sys.argv) < 6:
print("USAGE: python %s result_dir keywordlist testlist textfile syllable_num_file"%sys.argv[0])
exit(1)
result_dir = sys.argv[1]
keyword_list = open(sys.argv[2]).readlines()
test_list = open(sys.argv[3]).readlines()
occurance_dict = EV.build_occurance_dict(sys.argv[4])
syllable_num_dict = EV.build_syllable_num_dict(sys.argv[5])
cost_list = []
for keyword in keyword_list:
result_fid = open(result_dir + keyword.strip() + ".RESULT")
result_list = result_fid.readlines()
result_fid.close()
score_list = []
for res in result_list:
score = float(res.strip().split()[0])
score_list.append(score)
cost_list.append(score_list)
(cost_uniq, keyword_list_uniq) = average_cost(cost_list, keyword_list)
evaluate_matrix = EV.evaluate(cost_uniq, keyword_list_uniq, test_list, occurance_dict, syllable_num_dict)
for x in matrix_list:
output = np.array(evaluate_matrix[x]).mean(axis=0)
MAP = output[0]
PatN = output[1]
Pat10 = output[2]
print('%s: MAP=%.3f PatN=%.3f Pat10=%.3f'%(x, MAP, PatN, Pat10))
|
[
"evaluate.evaluate",
"evaluate.build_occurance_dict",
"evaluate.build_syllable_num_dict"
] |
[((1009, 1045), 'evaluate.build_occurance_dict', 'EV.build_occurance_dict', (['sys.argv[4]'], {}), '(sys.argv[4])\n', (1032, 1045), True, 'import evaluate as EV\n'), ((1070, 1109), 'evaluate.build_syllable_num_dict', 'EV.build_syllable_num_dict', (['sys.argv[5]'], {}), '(sys.argv[5])\n', (1096, 1109), True, 'import evaluate as EV\n'), ((1590, 1681), 'evaluate.evaluate', 'EV.evaluate', (['cost_uniq', 'keyword_list_uniq', 'test_list', 'occurance_dict', 'syllable_num_dict'], {}), '(cost_uniq, keyword_list_uniq, test_list, occurance_dict,\n syllable_num_dict)\n', (1601, 1681), True, 'import evaluate as EV\n'), ((1721, 1749), 'numpy.array', 'np.array', (['evaluate_matrix[x]'], {}), '(evaluate_matrix[x])\n', (1729, 1749), True, 'import numpy as np\n'), ((594, 622), 'numpy.array', 'np.array', (['cost_dict[keyword]'], {}), '(cost_dict[keyword])\n', (602, 622), True, 'import numpy as np\n')]
|
import os
import json
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from evaluate import evaluate
if __name__ == '__main__':
training_commands, predict_commands = [], []
choices = {"dan": range(1, 4+1), "gru": range(1, 4+1)}
probing_accuracies = {"dan": [], "gru": []}
for seq2vec_name, layers in choices.items():
# Check if Base Models have been trained first.
serialization_dir = os.path.join("serialization_dirs", f"main_{seq2vec_name}_5k_with_emb")
model_files_present = all([os.path.exists(os.path.join(serialization_dir, file_name))
for file_name in ["model.ckpt.index", "config.json", "vocab.txt"]])
epochs = 8 if seq2vec_name == "dan" else 4 # gru is slow, use only 4 epochs
if not model_files_present:
print("\nYour base model hasn't been trained yet.")
print("Please train it first with the following command:")
training_command = (f"python train.py main "
f"data/imdb_sentiment_train_5k.jsonl "
f"data/imdb_sentiment_dev.jsonl "
f"--seq2vec-choice {seq2vec_name} "
f"--embedding-dim 50 "
f"--num-layers 4 "
f"--num-epochs {epochs} "
f"--suffix-name _{seq2vec_name}_5k_with_emb "
f"--pretrained-embedding-file data/glove.6B.50d.txt ")
print(training_command)
exit()
for layer in layers:
serialization_dir = os.path.join("serialization_dirs", f"probing_sentiment_{seq2vec_name}_with_emb_on_5k_at_layer_{layer}")
model_files_present = all([os.path.exists(os.path.join(serialization_dir, file_name))
for file_name in ["model.ckpt.index", "config.json", "vocab.txt"]])
predictions_file = (f"serialization_dirs/probing_sentiment_{seq2vec_name}_with_emb_on_5k_at_layer_{layer}/"
f"predictions_imdb_sentiment_5k_test.txt")
predictions_present = os.path.exists(predictions_file)
if not model_files_present:
training_command = (f"python train.py probing "
f"data/imdb_sentiment_train_5k.jsonl "
f"data/imdb_sentiment_dev.jsonl "
f"--base-model-dir serialization_dirs/main_{seq2vec_name}_5k_with_emb "
f"--layer-num {layer} "
f"--num-epochs {epochs} "
f"--suffix-name _sentiment_{seq2vec_name}_with_emb_on_5k_at_layer_{layer}")
training_commands.append(training_command)
continue
if not predictions_present:
predict_command = (f"python predict.py "
f"serialization_dirs/probing_sentiment_{seq2vec_name}_with_emb_on_5k_at_layer_{layer} "
f"data/imdb_sentiment_test.jsonl "
f"--predictions-file serialization_dirs/probing_sentiment_{seq2vec_name}_with_emb_on_5k_at_layer_{layer}/"
f"predictions_imdb_sentiment_5k_test.txt")
predict_commands.append(predict_command)
continue
accuracy = evaluate("data/imdb_sentiment_test.jsonl", predictions_file)
probing_accuracies[seq2vec_name].append(accuracy)
if training_commands:
print("\nPlease finish the missing model training using the following commands:")
print("\n".join(training_commands))
if predict_commands:
print("\nPlease finish the model predictions using the following commands:")
print("\n".join(predict_commands))
if training_commands or predict_commands:
print("\nCannot plot the results until all the files are present.")
exit()
# Make the plots
plt.style.use('seaborn-whitegrid')
for seq2vec_name, layer_range in choices.items():
plt.plot(layer_range, probing_accuracies[seq2vec_name])
plt.xlabel("Probing Layer")
plt.ylabel("Accuracy")
title = "SentimentTask: Probing Performance vs Probing Layer"
plt.title(title)
plt.savefig(os.path.join("plots", f"probing_performance_on_sentiment_task_{seq2vec_name}.png"))
plt.clf()
|
[
"evaluate.evaluate"
] |
[((40, 63), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (54, 63), False, 'import matplotlib\n'), ((4146, 4180), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-whitegrid"""'], {}), "('seaborn-whitegrid')\n", (4159, 4180), True, 'import matplotlib.pyplot as plt\n'), ((447, 517), 'os.path.join', 'os.path.join', (['"""serialization_dirs"""', 'f"""main_{seq2vec_name}_5k_with_emb"""'], {}), "('serialization_dirs', f'main_{seq2vec_name}_5k_with_emb')\n", (459, 517), False, 'import os\n'), ((4243, 4298), 'matplotlib.pyplot.plot', 'plt.plot', (['layer_range', 'probing_accuracies[seq2vec_name]'], {}), '(layer_range, probing_accuracies[seq2vec_name])\n', (4251, 4298), True, 'import matplotlib.pyplot as plt\n'), ((4307, 4334), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Probing Layer"""'], {}), "('Probing Layer')\n", (4317, 4334), True, 'import matplotlib.pyplot as plt\n'), ((4343, 4365), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (4353, 4365), True, 'import matplotlib.pyplot as plt\n'), ((4444, 4460), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (4453, 4460), True, 'import matplotlib.pyplot as plt\n'), ((4573, 4582), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4580, 4582), True, 'import matplotlib.pyplot as plt\n'), ((1679, 1786), 'os.path.join', 'os.path.join', (['"""serialization_dirs"""', 'f"""probing_sentiment_{seq2vec_name}_with_emb_on_5k_at_layer_{layer}"""'], {}), "('serialization_dirs',\n f'probing_sentiment_{seq2vec_name}_with_emb_on_5k_at_layer_{layer}')\n", (1691, 1786), False, 'import os\n'), ((2217, 2249), 'os.path.exists', 'os.path.exists', (['predictions_file'], {}), '(predictions_file)\n', (2231, 2249), False, 'import os\n'), ((3543, 3603), 'evaluate.evaluate', 'evaluate', (['"""data/imdb_sentiment_test.jsonl"""', 'predictions_file'], {}), "('data/imdb_sentiment_test.jsonl', predictions_file)\n", (3551, 3603), False, 'from evaluate import evaluate\n'), ((4481, 4567), 'os.path.join', 'os.path.join', (['"""plots"""', 'f"""probing_performance_on_sentiment_task_{seq2vec_name}.png"""'], {}), "('plots',\n f'probing_performance_on_sentiment_task_{seq2vec_name}.png')\n", (4493, 4567), False, 'import os\n'), ((568, 610), 'os.path.join', 'os.path.join', (['serialization_dir', 'file_name'], {}), '(serialization_dir, file_name)\n', (580, 610), False, 'import os\n'), ((1837, 1879), 'os.path.join', 'os.path.join', (['serialization_dir', 'file_name'], {}), '(serialization_dir, file_name)\n', (1849, 1879), False, 'import os\n')]
|
import os
import pickle
import argparse
import torch
from torch.utils.data import Dataset, DataLoader
from data_processing import prep_dataset, build_second_order_wikidata_graphs
from stockdataset import StockDataset
from model import MANSF
from train import train_model
from evaluate import evaluate_model
STOCKNET_REPO_NAME = 'stocknet-dataset-master'
PROCESSED_STOCKNET_DATA_FOLDER = 'processed_stocknet_data'
# Hyperparameters
T = 6
GRU_HIDDEN_SIZE = 64
ATTN_INTER_SIZE = 32
USE_EMBED_SIZE = 512
BLEND_SIZE = 32
GAT_1_INTER_SIZE = 32
GAT_2_INTER_SIZE = 32
LEAKYRELU_SLOPE = 0.01
ELU_ALPHA = 1.0
U = 8
LEARNING_RATE = 5e-4
NUM_EPOCHS = 1
"""
Valid executions:
main.py --preprocess_in <filepath> --preprocess_out <filepath>
main.py --train <filepath> --model <filepath>
main.py --evaluate <filepath> --model <filepath>
--preprocess_in should contain a filepath to a directory with the following structure:
root/
- links.csv
- wikidata_entries/
-- individual .txt files of wikidata entries for each stock
"""
def main():
parser = argparse.ArgumentParser(description='Preprocess data, train or evaluate the MAN-SF model. If executed \
with --train or --evaluate, --model is required. If executed with --preprocess_in, \
--preprocess_out is required.')
# Preprocessing input / Training / Evaluation arguments
group1 = parser.add_mutually_exclusive_group(required=True)
group1.add_argument('--preprocess_in', '-pi', metavar='FILEPATH', type=str,
help='Preprocesses the data. FILEPATH is filepath to load raw training data')
group1.add_argument('--train', '-t', metavar='FILEPATH', type=str,
help='Trains a MAN-SF model. FILEPATH is filepath for training data')
group1.add_argument('--evaluate', '-e', metavar='FILEPATH', type=str,
help='Evaluates a trained MAN-SF model. FILEPATH is filepath for evaluation data')
# Preprocessing output / Model
group2 = parser.add_mutually_exclusive_group()
group2.add_argument('--preprocess_out', '-po',metavar='FILEPATH', type=str,
help='FILEPATH is filepath for exporting processed data as well as downloading the StockNet data')
group2.add_argument('--model', '-m', metavar='FILEPATH', type=str,
help='FILEPATH is filepath to export model (for training) or filepath to load model (for evaluation)')
args = parser.parse_args()
# Validate arguments
if args.preprocess_in:
if not args.preprocess_out:
parser.error('--preprocess_in requires an additional argument --preprocess_out')
if args.train or args.evaluate:
if not args.model:
parser.error('--train or --evaluation requires an additional argument --model')
if args.preprocess_in:
preprocess(args.preprocess_in, args.preprocess_out)
elif args.train:
train(args.model, args.train)
else:
evaluate(args.model, args.evaluate)
def preprocess(in_filepath, out_filepath):
# StockNet data setup
# Download data (if not already available)
master_zip_filepath = os.path.join(out_filepath, "master.zip")
stocknet_dataset_filepath = os.path.join(out_filepath, STOCKNET_REPO_NAME)
data_output_filepath = os.path.join(out_filepath, PROCESSED_STOCKNET_DATA_FOLDER)
if not os.path.exists(master_zip_filepath):
os.system(f'wget https://github.com/yumoxu/stocknet-dataset/archive/master.zip\
-P {out_filepath}')
if not os.path.isdir(stocknet_dataset_filepath):
os.system(f'unzip {master_zip_filepath} -d {out_filepath}')
if not os.path.isdir(data_output_filepath):
os.mkdir(data_output_filepath)
# Train / Val / Test split
train_start_date = '2014-01-01'
train_end_date = '2015-07-31'
val_start_date = '2015-08-01'
val_end_date = '2015-09-30'
test_start_date = '2015-10-01'
test_end_date = '2016-01-01'
train = prep_dataset(stocknet_dataset_filepath, train_start_date, train_end_date)
val = prep_dataset(stocknet_dataset_filepath, val_start_date, val_end_date)
test = prep_dataset(stocknet_dataset_filepath, test_start_date, test_end_date)
def save_object(obj, filename):
with open(filename, 'wb') as output:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
# Output StockNet to file
save_object(train, os.path.join(data_output_filepath, 'train.pkl'))
save_object(val, os.path.join(data_output_filepath, 'val.pkl'))
save_object(test, os.path.join(data_output_filepath, 'test.pkl'))
# Wikidata graph setup
wikidata_graph = build_second_order_wikidata_graphs(in_filepath)
# Output graph to file
with open(os.path.join(out_filepath, 'wikidata_adjacency_list_first_and_second_order.txt'), 'w') as file:
for key in wikidata_graph.keys():
file.write(key + ':' + str(wikidata_graph[key]) + '\n')
def train(model_filepath, data_filepath):
print(f'Running training on {data_filepath}, saving model to {model_filepath}')
data_output_filepath = os.path.join(data_filepath, PROCESSED_STOCKNET_DATA_FOLDER)
# Import data from .pkl files
with open(os.path.join(data_output_filepath, 'train.pkl'), 'rb') as obj:
train = pickle.load(obj)
train_company_to_price_df, train_company_to_tweets, train_date_universe, train_n_days, train_n_stocks, train_max_tweets = train
with open(os.path.join(data_output_filepath, 'val.pkl'), 'rb') as obj:
val = pickle.load(obj)
val_company_to_price_df, val_company_to_tweets, val_date_universe, val_n_days, val_n_stocks, val_max_tweets = val
# Create StockDataset instances
train_dataset = StockDataset(train_company_to_price_df, train_company_to_tweets, train_date_universe, train_n_days, train_n_stocks, train_max_tweets)
val_dataset = StockDataset(val_company_to_price_df, val_company_to_tweets, val_date_universe, val_n_days, val_n_stocks, val_max_tweets)
# create dataloaders
train_dataloader = DataLoader(train_dataset, batch_size=1,
shuffle=True, num_workers=0)
val_dataloader = DataLoader(val_dataset, batch_size=1,
shuffle=False, num_workers=0)
man_sf_model = MANSF(T=T,
gru_hidden_size=GRU_HIDDEN_SIZE,
attn_inter_size=ATTN_INTER_SIZE,
use_embed_size=USE_EMBED_SIZE,
blend_size=BLEND_SIZE,
gat_1_inter_size=GAT_1_INTER_SIZE,
gat_2_inter_size=GAT_2_INTER_SIZE,
leakyrelu_slope=LEAKYRELU_SLOPE,
elu_alpha=ELU_ALPHA,
U=U)
train_acc_list, val_acc_list = train_model(man_sf_model, train_dataloader, val_dataloader, NUM_EPOCHS, LEARNING_RATE, T)
# Output StockNet to file
torch.save(man_sf_model, model_filepath)
def evaluate(model_filepath, data_filepath):
print(f'Running evaluation on {data_filepath} with model {model_filepath}')
data_output_filepath = os.path.join(data_filepath, PROCESSED_STOCKNET_DATA_FOLDER)
with open(os.path.join(data_output_filepath, 'test.pkl'), 'rb') as obj:
test = pickle.load(obj)
test_company_to_price_df, test_company_to_tweets, test_date_universe, test_n_days, test_n_stocks, test_max_tweets = test
test_dataset = StockDataset(test_company_to_price_df, test_company_to_tweets, test_date_universe, test_n_days, test_n_stocks, test_max_tweets)
test_dataloader = DataLoader(test_dataset, batch_size=1,
shuffle=False, num_workers=0)
man_sf_model = torch.load(model_filepath)
man_sf_model.eval()
test_acc, sharpe_ratio, f1 = evaluate_model(man_sf_model, test_dataloader, T, test_company_to_tweets, test_date_universe, data_filepath)
print('test accuracy:', test_acc)
print('sharpe ratio:', sharpe_ratio[0])
print('f1:', f1)
if __name__ == '__main__':
main()
|
[
"evaluate.evaluate_model"
] |
[((1095, 1337), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Preprocess data, train or evaluate the MAN-SF model. If executed with --train or --evaluate, --model is required. If executed with --preprocess_in, --preprocess_out is required."""'}), "(description=\n 'Preprocess data, train or evaluate the MAN-SF model. If executed with --train or --evaluate, --model is required. If executed with --preprocess_in, --preprocess_out is required.'\n )\n", (1118, 1337), False, 'import argparse\n'), ((3181, 3221), 'os.path.join', 'os.path.join', (['out_filepath', '"""master.zip"""'], {}), "(out_filepath, 'master.zip')\n", (3193, 3221), False, 'import os\n'), ((3254, 3300), 'os.path.join', 'os.path.join', (['out_filepath', 'STOCKNET_REPO_NAME'], {}), '(out_filepath, STOCKNET_REPO_NAME)\n', (3266, 3300), False, 'import os\n'), ((3328, 3386), 'os.path.join', 'os.path.join', (['out_filepath', 'PROCESSED_STOCKNET_DATA_FOLDER'], {}), '(out_filepath, PROCESSED_STOCKNET_DATA_FOLDER)\n', (3340, 3386), False, 'import os\n'), ((4020, 4093), 'data_processing.prep_dataset', 'prep_dataset', (['stocknet_dataset_filepath', 'train_start_date', 'train_end_date'], {}), '(stocknet_dataset_filepath, train_start_date, train_end_date)\n', (4032, 4093), False, 'from data_processing import prep_dataset, build_second_order_wikidata_graphs\n'), ((4104, 4173), 'data_processing.prep_dataset', 'prep_dataset', (['stocknet_dataset_filepath', 'val_start_date', 'val_end_date'], {}), '(stocknet_dataset_filepath, val_start_date, val_end_date)\n', (4116, 4173), False, 'from data_processing import prep_dataset, build_second_order_wikidata_graphs\n'), ((4185, 4256), 'data_processing.prep_dataset', 'prep_dataset', (['stocknet_dataset_filepath', 'test_start_date', 'test_end_date'], {}), '(stocknet_dataset_filepath, test_start_date, test_end_date)\n', (4197, 4256), False, 'from data_processing import prep_dataset, build_second_order_wikidata_graphs\n'), ((4691, 4738), 'data_processing.build_second_order_wikidata_graphs', 'build_second_order_wikidata_graphs', (['in_filepath'], {}), '(in_filepath)\n', (4725, 4738), False, 'from data_processing import prep_dataset, build_second_order_wikidata_graphs\n'), ((5141, 5200), 'os.path.join', 'os.path.join', (['data_filepath', 'PROCESSED_STOCKNET_DATA_FOLDER'], {}), '(data_filepath, PROCESSED_STOCKNET_DATA_FOLDER)\n', (5153, 5200), False, 'import os\n'), ((5768, 5905), 'stockdataset.StockDataset', 'StockDataset', (['train_company_to_price_df', 'train_company_to_tweets', 'train_date_universe', 'train_n_days', 'train_n_stocks', 'train_max_tweets'], {}), '(train_company_to_price_df, train_company_to_tweets,\n train_date_universe, train_n_days, train_n_stocks, train_max_tweets)\n', (5780, 5905), False, 'from stockdataset import StockDataset\n'), ((5920, 6045), 'stockdataset.StockDataset', 'StockDataset', (['val_company_to_price_df', 'val_company_to_tweets', 'val_date_universe', 'val_n_days', 'val_n_stocks', 'val_max_tweets'], {}), '(val_company_to_price_df, val_company_to_tweets,\n val_date_universe, val_n_days, val_n_stocks, val_max_tweets)\n', (5932, 6045), False, 'from stockdataset import StockDataset\n'), ((6091, 6159), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': '(1)', 'shuffle': '(True)', 'num_workers': '(0)'}), '(train_dataset, batch_size=1, shuffle=True, num_workers=0)\n', (6101, 6159), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((6210, 6277), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(0)'}), '(val_dataset, batch_size=1, shuffle=False, num_workers=0)\n', (6220, 6277), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((6326, 6599), 'model.MANSF', 'MANSF', ([], {'T': 'T', 'gru_hidden_size': 'GRU_HIDDEN_SIZE', 'attn_inter_size': 'ATTN_INTER_SIZE', 'use_embed_size': 'USE_EMBED_SIZE', 'blend_size': 'BLEND_SIZE', 'gat_1_inter_size': 'GAT_1_INTER_SIZE', 'gat_2_inter_size': 'GAT_2_INTER_SIZE', 'leakyrelu_slope': 'LEAKYRELU_SLOPE', 'elu_alpha': 'ELU_ALPHA', 'U': 'U'}), '(T=T, gru_hidden_size=GRU_HIDDEN_SIZE, attn_inter_size=ATTN_INTER_SIZE,\n use_embed_size=USE_EMBED_SIZE, blend_size=BLEND_SIZE, gat_1_inter_size=\n GAT_1_INTER_SIZE, gat_2_inter_size=GAT_2_INTER_SIZE, leakyrelu_slope=\n LEAKYRELU_SLOPE, elu_alpha=ELU_ALPHA, U=U)\n', (6331, 6599), False, 'from model import MANSF\n'), ((6847, 6940), 'train.train_model', 'train_model', (['man_sf_model', 'train_dataloader', 'val_dataloader', 'NUM_EPOCHS', 'LEARNING_RATE', 'T'], {}), '(man_sf_model, train_dataloader, val_dataloader, NUM_EPOCHS,\n LEARNING_RATE, T)\n', (6858, 6940), False, 'from train import train_model\n'), ((6972, 7012), 'torch.save', 'torch.save', (['man_sf_model', 'model_filepath'], {}), '(man_sf_model, model_filepath)\n', (6982, 7012), False, 'import torch\n'), ((7166, 7225), 'os.path.join', 'os.path.join', (['data_filepath', 'PROCESSED_STOCKNET_DATA_FOLDER'], {}), '(data_filepath, PROCESSED_STOCKNET_DATA_FOLDER)\n', (7178, 7225), False, 'import os\n'), ((7483, 7614), 'stockdataset.StockDataset', 'StockDataset', (['test_company_to_price_df', 'test_company_to_tweets', 'test_date_universe', 'test_n_days', 'test_n_stocks', 'test_max_tweets'], {}), '(test_company_to_price_df, test_company_to_tweets,\n test_date_universe, test_n_days, test_n_stocks, test_max_tweets)\n', (7495, 7614), False, 'from stockdataset import StockDataset\n'), ((7634, 7702), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(0)'}), '(test_dataset, batch_size=1, shuffle=False, num_workers=0)\n', (7644, 7702), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((7751, 7777), 'torch.load', 'torch.load', (['model_filepath'], {}), '(model_filepath)\n', (7761, 7777), False, 'import torch\n'), ((7837, 7948), 'evaluate.evaluate_model', 'evaluate_model', (['man_sf_model', 'test_dataloader', 'T', 'test_company_to_tweets', 'test_date_universe', 'data_filepath'], {}), '(man_sf_model, test_dataloader, T, test_company_to_tweets,\n test_date_universe, data_filepath)\n', (7851, 7948), False, 'from evaluate import evaluate_model\n'), ((3398, 3433), 'os.path.exists', 'os.path.exists', (['master_zip_filepath'], {}), '(master_zip_filepath)\n', (3412, 3433), False, 'import os\n'), ((3443, 3570), 'os.system', 'os.system', (['f"""wget https://github.com/yumoxu/stocknet-dataset/archive/master.zip -P {out_filepath}"""'], {}), "(\n f'wget https://github.com/yumoxu/stocknet-dataset/archive/master.zip -P {out_filepath}'\n )\n", (3452, 3570), False, 'import os\n'), ((3574, 3614), 'os.path.isdir', 'os.path.isdir', (['stocknet_dataset_filepath'], {}), '(stocknet_dataset_filepath)\n', (3587, 3614), False, 'import os\n'), ((3624, 3683), 'os.system', 'os.system', (['f"""unzip {master_zip_filepath} -d {out_filepath}"""'], {}), "(f'unzip {master_zip_filepath} -d {out_filepath}')\n", (3633, 3683), False, 'import os\n'), ((3695, 3730), 'os.path.isdir', 'os.path.isdir', (['data_output_filepath'], {}), '(data_output_filepath)\n', (3708, 3730), False, 'import os\n'), ((3740, 3770), 'os.mkdir', 'os.mkdir', (['data_output_filepath'], {}), '(data_output_filepath)\n', (3748, 3770), False, 'import os\n'), ((4455, 4502), 'os.path.join', 'os.path.join', (['data_output_filepath', '"""train.pkl"""'], {}), "(data_output_filepath, 'train.pkl')\n", (4467, 4502), False, 'import os\n'), ((4525, 4570), 'os.path.join', 'os.path.join', (['data_output_filepath', '"""val.pkl"""'], {}), "(data_output_filepath, 'val.pkl')\n", (4537, 4570), False, 'import os\n'), ((4594, 4640), 'os.path.join', 'os.path.join', (['data_output_filepath', '"""test.pkl"""'], {}), "(data_output_filepath, 'test.pkl')\n", (4606, 4640), False, 'import os\n'), ((5329, 5345), 'pickle.load', 'pickle.load', (['obj'], {}), '(obj)\n', (5340, 5345), False, 'import pickle\n'), ((5572, 5588), 'pickle.load', 'pickle.load', (['obj'], {}), '(obj)\n', (5583, 5588), False, 'import pickle\n'), ((7318, 7334), 'pickle.load', 'pickle.load', (['obj'], {}), '(obj)\n', (7329, 7334), False, 'import pickle\n'), ((4351, 4400), 'pickle.dump', 'pickle.dump', (['obj', 'output', 'pickle.HIGHEST_PROTOCOL'], {}), '(obj, output, pickle.HIGHEST_PROTOCOL)\n', (4362, 4400), False, 'import pickle\n'), ((4781, 4866), 'os.path.join', 'os.path.join', (['out_filepath', '"""wikidata_adjacency_list_first_and_second_order.txt"""'], {}), "(out_filepath, 'wikidata_adjacency_list_first_and_second_order.txt'\n )\n", (4793, 4866), False, 'import os\n'), ((5250, 5297), 'os.path.join', 'os.path.join', (['data_output_filepath', '"""train.pkl"""'], {}), "(data_output_filepath, 'train.pkl')\n", (5262, 5297), False, 'import os\n'), ((5497, 5542), 'os.path.join', 'os.path.join', (['data_output_filepath', '"""val.pkl"""'], {}), "(data_output_filepath, 'val.pkl')\n", (5509, 5542), False, 'import os\n'), ((7241, 7287), 'os.path.join', 'os.path.join', (['data_output_filepath', '"""test.pkl"""'], {}), "(data_output_filepath, 'test.pkl')\n", (7253, 7287), False, 'import os\n')]
|
import pandas as pd
from evaluate.calculator import (
RecallCalculator,
PrecisionCalculator,
EmptyReportError,
)
import pytest
from unittest.mock import patch, Mock
from evaluate.report import (
Report,
PrecisionReport,
RecallReport
)
from tests.common import create_precision_report_row
from io import StringIO
class TestPrecisionCalculator:
def test_calculatePrecision_NoReportsRaisesEmptyReportError(self):
columns = ["sample", "query_probe_header", "ref_probe_header", "classification"]
df = pd.DataFrame(columns=columns)
report = PrecisionReport([df])
calculator = PrecisionCalculator(report)
with pytest.raises(EmptyReportError):
calculator._calculate_precision_for_a_given_confidence()
def test_calculatePrecision_OneReportWithOneRowCompletelyCorrectReturnsOne(self):
columns = ["sample", "query_probe_header", "ref_probe_header", "classification"]
df = pd.DataFrame(
data=[create_precision_report_row(1.0, gt_conf=100)], columns=columns
)
report = PrecisionReport([df])
calculator = PrecisionCalculator(report)
actual = calculator._calculate_precision_for_a_given_confidence()
assert actual.precision == 1.0
assert actual.true_positives == 1.0
assert actual.total == 1.0
def test_calculatePrecision_OneReportWithOneRowCompletelyIncorrectReturnsZero(self):
columns = ["sample", "query_probe_header", "ref_probe_header", "classification"]
df = pd.DataFrame(
data=[create_precision_report_row(0.0, gt_conf=100)], columns=columns
)
report = PrecisionReport([df])
calculator = PrecisionCalculator(report)
actual = calculator._calculate_precision_for_a_given_confidence()
assert actual.precision == 0.0
assert actual.true_positives == 0.0
assert actual.total == 1.0
def test_calculatePrecision_OneReportWithOneRowCompletelyCorrectBelowConfThreasholdRaisesEmptyReportError(
self
):
columns = ["sample", "query_probe_header", "ref_probe_header", "classification"]
df = pd.DataFrame(
data=[create_precision_report_row(1.0, gt_conf=10)], columns=columns
)
report = PrecisionReport([df])
calculator = PrecisionCalculator(report)
confidence_threshold = 60
with pytest.raises(EmptyReportError):
calculator._calculate_precision_for_a_given_confidence(confidence_threshold)
def test_calculatePrecision_OneReportWithOneRowCompletelyCorrectEqualConfThreasholdReturnsOne(
self
):
columns = ["sample", "query_probe_header", "ref_probe_header", "classification"]
df = pd.DataFrame(
data=[create_precision_report_row(1.0, gt_conf=60)], columns=columns
)
report = PrecisionReport([df])
calculator = PrecisionCalculator(report)
confidence_threshold = 60
actual = calculator._calculate_precision_for_a_given_confidence(confidence_threshold)
assert actual.precision == 1.0
assert actual.true_positives == 1.0
assert actual.total == 1.0
def test_calculatePrecision_OneReportWithTwoRowsPartiallyCorrect(self):
columns = ["sample", "query_probe_header", "ref_probe_header", "classification"]
df = pd.DataFrame(
data=[
create_precision_report_row(0.5, gt_conf=100),
create_precision_report_row(0.7, gt_conf=100),
],
columns=columns,
)
report = PrecisionReport([df])
calculator = PrecisionCalculator(report)
actual = calculator._calculate_precision_for_a_given_confidence()
assert actual.precision == 1.2/2
assert actual.true_positives == 1.2
assert actual.total == 2.0
def test_calculatePrecision_OneReportWithThreeRowsTwoPartiallyCorrectOneBelowThreshold(
self
):
columns = ["sample", "query_probe_header", "ref_probe_header", "classification"]
df = pd.DataFrame(
data=[
create_precision_report_row(0.4, gt_conf=100),
create_precision_report_row(0.8, gt_conf=20),
create_precision_report_row(0.3, gt_conf=100),
],
columns=columns,
)
report = PrecisionReport([df])
calculator = PrecisionCalculator(report)
confidence_threshold = 80
actual = calculator._calculate_precision_for_a_given_confidence(confidence_threshold)
assert actual.precision == 0.7/2.0
assert actual.true_positives == 0.7
assert actual.total == 2.0
class TestRecallCalculator:
@patch.object(Report, Report.get_classifications_as_list.__name__, return_value=[
"unmapped", "partially_mapped", "primary_correct", "primary_incorrect",
"secondary_correct", "secondary_incorrect", "supplementary_correct",
"supplementary_incorrect"
])
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
@patch.object(RecallReport, RecallReport.assure_there_are_no_duplicated_evaluation.__name__)
@patch.object(RecallReport, RecallReport.get_number_of_truth_probes.__name__, return_value=8)
def test____calculate_info_wrt_truth_probes___one_classification_of_each(self, *mocks):
report = RecallReport([pd.DataFrame()], False)
true_positives, number_of_truth_probes = RecallCalculator._calculate_info_wrt_truth_probes(report)
assert true_positives==3 and number_of_truth_probes==8
@patch.object(Report, Report.get_classifications_as_list.__name__, return_value=[
"unmapped", "partially_mapped", "primary_correct", "primary_incorrect",
"secondary_correct", "secondary_incorrect", "supplementary_correct",
"supplementary_incorrect", "partially_mapped", "partially_mapped",
"primary_correct", "primary_correct", "primary_correct",
"supplementary_incorrect", "supplementary_incorrect", "supplementary_incorrect",
"unmapped", "unmapped", "unmapped",
])
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
@patch.object(RecallReport, RecallReport.assure_there_are_no_duplicated_evaluation.__name__)
@patch.object(RecallReport, RecallReport.get_number_of_truth_probes.__name__, return_value=19)
def test____calculate_info_wrt_truth_probes___some_duplicated_classifications(self, *mocks):
report = RecallReport([pd.DataFrame()], False)
true_positives, number_of_truth_probes = RecallCalculator._calculate_info_wrt_truth_probes(report)
assert true_positives == 6 and number_of_truth_probes == 19
@patch.object(RecallReport, RecallReport.get_proportion_of_allele_seqs_found_for_each_variant.__name__,
return_value=[1.0, 0.5, 0.8, 1.0, 0.9, 1.0, 0.0, 0.1, 1.0])
@patch.object(RecallReport, RecallReport.get_proportion_of_alleles_found_for_each_variant.__name__,
return_value=[0.0, 0.1, 0.2, 0.3, 1.0, 0.9, 0.8, 0.7, 0.6])
@patch.object(RecallReport, RecallReport.get_number_of_variants.__name__, return_value=20)
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
@patch.object(RecallReport, RecallReport.assure_there_are_no_duplicated_evaluation.__name__)
def test____calculate_info_wrt_variants(self, *mocks):
report = RecallReport([pd.DataFrame()], False)
nb_variants_where_all_allele_seqs_were_found, nb_variants_found_wrt_alleles, variants_total = \
RecallCalculator._calculate_info_wrt_variants(report)
assert nb_variants_where_all_allele_seqs_were_found == 6.3 and \
nb_variants_found_wrt_alleles == 4.6 and \
variants_total == 20
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
@patch.object(RecallReport, RecallReport.assure_there_are_no_duplicated_evaluation.__name__)
@patch.object(Report, Report.get_report_satisfying_confidence_threshold.__name__)
@patch.object(RecallCalculator, RecallCalculator._calculate_info_wrt_truth_probes.__name__, return_value=(5, 10))
@patch.object(RecallCalculator, RecallCalculator._calculate_info_wrt_variants.__name__, return_value=(4, 8, 10))
def test____calculate_recall_for_a_given_confidence(self, calculate_info_wrt_variants_mock,
calculate_info_wrt_truth_probes_mock,
get_report_satisfying_confidence_threshold_mock,
*other_mocks):
# setup
report_satisfying_confidence_threshold_mock = Mock()
get_report_satisfying_confidence_threshold_mock.return_value = report_satisfying_confidence_threshold_mock
report = RecallReport([pd.DataFrame()], False)
calculator = RecallCalculator(report)
recall_info_actual = calculator._calculate_recall_for_a_given_confidence(100)
get_report_satisfying_confidence_threshold_mock.assert_called_once_with(100)
calculate_info_wrt_truth_probes_mock.assert_called_once_with(report_satisfying_confidence_threshold_mock)
calculate_info_wrt_variants_mock.assert_called_once_with(report_satisfying_confidence_threshold_mock)
assert recall_info_actual.truth_probes_true_positives == 5
assert recall_info_actual.truth_probes_total == 10
assert recall_info_actual.nb_variants_where_all_allele_seqs_were_found == 4
assert recall_info_actual.nb_variants_found_wrt_alleles == 8
assert recall_info_actual.variants_total == 10
assert recall_info_actual.recall_wrt_truth_probes == 0.5
assert recall_info_actual.recall_wrt_variants_where_all_allele_seqs_were_found == 0.4
assert recall_info_actual.recall_wrt_variants_found_wrt_alleles == 0.8
@patch.object(RecallReport, RecallReport.get_proportion_of_allele_seqs_found_for_each_variant_with_nb_of_samples.__name__,
return_value=
pd.read_csv(StringIO(
"""PVID,proportion_of_allele_seqs_found_binary,NB_OF_SAMPLES
0,1,3
1,0,5
2,0,7
3,1,5
4,0,5
5,1,3
6,0,5
"""
), index_col="PVID"))
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
@patch.object(RecallReport, RecallReport.assure_there_are_no_duplicated_evaluation.__name__)
def test___get_recall_allele_seqs_vs_nb_of_samples_report(self, *mocks):
report = RecallReport([pd.DataFrame()], False)
calculator = RecallCalculator(report)
actual = calculator.get_recall_allele_seqs_vs_nb_of_samples_report(list(range(2, 8)))
expected = pd.read_csv(StringIO(
"""NB_OF_SAMPLES,recall_PVR
2,0.0
3,1.0
4,0.0
5,0.25
6,0.0
7,0.0
"""
))
assert actual.equals(expected)
@patch.object(RecallReport, RecallReport.get_proportion_of_alleles_found_for_each_variant_with_nb_of_samples.__name__,
return_value=
pd.read_csv(StringIO(
"""PVID,proportion_of_alleles_found,NB_OF_SAMPLES
0,1.0,3
1,0.8,5
2,0.6,7
3,1.0,5
4,0.4,5
5,0.9,3
6,0.0,5
"""
), index_col="PVID"))
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
@patch.object(RecallReport, RecallReport.assure_there_are_no_duplicated_evaluation.__name__)
def test___get_recall_alleles_vs_nb_of_samples_report(self, *mocks):
report = RecallReport([pd.DataFrame()], False)
calculator = RecallCalculator(report)
actual = calculator.get_recall_alleles_vs_nb_of_samples_report(list(range(2, 8)))
expected = pd.read_csv(StringIO(
"""NB_OF_SAMPLES,recall_AvgAR
2,0.0
3,0.95
4,0.0
5,0.55
6,0.0
7,0.6
"""
))
assert actual.equals(expected)
@patch.object(RecallReport, RecallReport.get_proportion_of_allele_seqs_found_for_each_variant_with_nb_of_samples.__name__,
return_value=
pd.read_csv(StringIO(
"""PVID,proportion_of_allele_seqs_found_binary,NB_OF_SAMPLES
0,1,3
1,0,5
2,0,7
3,1,5
4,0,5
5,1,3
6,0,5
"""
), index_col="PVID"))
@patch.object(RecallReport, RecallReport._create_helper_columns.__name__)
@patch.object(RecallReport, RecallReport.assure_there_are_no_duplicated_evaluation.__name__)
def test___get_recall_allele_seqs_vs_nb_of_samples_report___return_only_the_samples_given_in_parameter(self, *mocks):
report = RecallReport([pd.DataFrame()], False)
calculator = RecallCalculator(report)
actual = calculator.get_recall_allele_seqs_vs_nb_of_samples_report([2, 5])
expected = pd.read_csv(StringIO(
"""NB_OF_SAMPLES,recall_PVR
2,0.0
5,0.25
"""
))
assert actual.equals(expected)
|
[
"evaluate.calculator.RecallCalculator",
"evaluate.calculator.RecallCalculator._calculate_info_wrt_truth_probes",
"evaluate.calculator.RecallCalculator._calculate_info_wrt_variants",
"evaluate.report.PrecisionReport",
"evaluate.calculator.PrecisionCalculator"
] |
[((4740, 5000), 'unittest.mock.patch.object', 'patch.object', (['Report', 'Report.get_classifications_as_list.__name__'], {'return_value': "['unmapped', 'partially_mapped', 'primary_correct', 'primary_incorrect',\n 'secondary_correct', 'secondary_incorrect', 'supplementary_correct',\n 'supplementary_incorrect']"}), "(Report, Report.get_classifications_as_list.__name__,\n return_value=['unmapped', 'partially_mapped', 'primary_correct',\n 'primary_incorrect', 'secondary_correct', 'secondary_incorrect',\n 'supplementary_correct', 'supplementary_incorrect'])\n", (4752, 5000), False, 'from unittest.mock import patch, Mock\n'), ((5024, 5096), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport._create_helper_columns.__name__'], {}), '(RecallReport, RecallReport._create_helper_columns.__name__)\n', (5036, 5096), False, 'from unittest.mock import patch, Mock\n'), ((5102, 5198), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport.assure_there_are_no_duplicated_evaluation.__name__'], {}), '(RecallReport, RecallReport.\n assure_there_are_no_duplicated_evaluation.__name__)\n', (5114, 5198), False, 'from unittest.mock import patch, Mock\n'), ((5199, 5295), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport.get_number_of_truth_probes.__name__'], {'return_value': '(8)'}), '(RecallReport, RecallReport.get_number_of_truth_probes.__name__,\n return_value=8)\n', (5211, 5295), False, 'from unittest.mock import patch, Mock\n'), ((5615, 6101), 'unittest.mock.patch.object', 'patch.object', (['Report', 'Report.get_classifications_as_list.__name__'], {'return_value': "['unmapped', 'partially_mapped', 'primary_correct', 'primary_incorrect',\n 'secondary_correct', 'secondary_incorrect', 'supplementary_correct',\n 'supplementary_incorrect', 'partially_mapped', 'partially_mapped',\n 'primary_correct', 'primary_correct', 'primary_correct',\n 'supplementary_incorrect', 'supplementary_incorrect',\n 'supplementary_incorrect', 'unmapped', 'unmapped', 'unmapped']"}), "(Report, Report.get_classifications_as_list.__name__,\n return_value=['unmapped', 'partially_mapped', 'primary_correct',\n 'primary_incorrect', 'secondary_correct', 'secondary_incorrect',\n 'supplementary_correct', 'supplementary_incorrect', 'partially_mapped',\n 'partially_mapped', 'primary_correct', 'primary_correct',\n 'primary_correct', 'supplementary_incorrect', 'supplementary_incorrect',\n 'supplementary_incorrect', 'unmapped', 'unmapped', 'unmapped'])\n", (5627, 6101), False, 'from unittest.mock import patch, Mock\n'), ((6138, 6210), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport._create_helper_columns.__name__'], {}), '(RecallReport, RecallReport._create_helper_columns.__name__)\n', (6150, 6210), False, 'from unittest.mock import patch, Mock\n'), ((6216, 6312), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport.assure_there_are_no_duplicated_evaluation.__name__'], {}), '(RecallReport, RecallReport.\n assure_there_are_no_duplicated_evaluation.__name__)\n', (6228, 6312), False, 'from unittest.mock import patch, Mock\n'), ((6313, 6410), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport.get_number_of_truth_probes.__name__'], {'return_value': '(19)'}), '(RecallReport, RecallReport.get_number_of_truth_probes.__name__,\n return_value=19)\n', (6325, 6410), False, 'from unittest.mock import patch, Mock\n'), ((6741, 6912), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport.get_proportion_of_allele_seqs_found_for_each_variant.__name__'], {'return_value': '[1.0, 0.5, 0.8, 1.0, 0.9, 1.0, 0.0, 0.1, 1.0]'}), '(RecallReport, RecallReport.\n get_proportion_of_allele_seqs_found_for_each_variant.__name__,\n return_value=[1.0, 0.5, 0.8, 1.0, 0.9, 1.0, 0.0, 0.1, 1.0])\n', (6753, 6912), False, 'from unittest.mock import patch, Mock\n'), ((6927, 7095), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport.get_proportion_of_alleles_found_for_each_variant.__name__'], {'return_value': '[0.0, 0.1, 0.2, 0.3, 1.0, 0.9, 0.8, 0.7, 0.6]'}), '(RecallReport, RecallReport.\n get_proportion_of_alleles_found_for_each_variant.__name__, return_value\n =[0.0, 0.1, 0.2, 0.3, 1.0, 0.9, 0.8, 0.7, 0.6])\n', (6939, 7095), False, 'from unittest.mock import patch, Mock\n'), ((7109, 7202), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport.get_number_of_variants.__name__'], {'return_value': '(20)'}), '(RecallReport, RecallReport.get_number_of_variants.__name__,\n return_value=20)\n', (7121, 7202), False, 'from unittest.mock import patch, Mock\n'), ((7204, 7276), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport._create_helper_columns.__name__'], {}), '(RecallReport, RecallReport._create_helper_columns.__name__)\n', (7216, 7276), False, 'from unittest.mock import patch, Mock\n'), ((7282, 7378), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport.assure_there_are_no_duplicated_evaluation.__name__'], {}), '(RecallReport, RecallReport.\n assure_there_are_no_duplicated_evaluation.__name__)\n', (7294, 7378), False, 'from unittest.mock import patch, Mock\n'), ((7831, 7903), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport._create_helper_columns.__name__'], {}), '(RecallReport, RecallReport._create_helper_columns.__name__)\n', (7843, 7903), False, 'from unittest.mock import patch, Mock\n'), ((7909, 8005), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport.assure_there_are_no_duplicated_evaluation.__name__'], {}), '(RecallReport, RecallReport.\n assure_there_are_no_duplicated_evaluation.__name__)\n', (7921, 8005), False, 'from unittest.mock import patch, Mock\n'), ((8006, 8091), 'unittest.mock.patch.object', 'patch.object', (['Report', 'Report.get_report_satisfying_confidence_threshold.__name__'], {}), '(Report, Report.get_report_satisfying_confidence_threshold.__name__\n )\n', (8018, 8091), False, 'from unittest.mock import patch, Mock\n'), ((8092, 8209), 'unittest.mock.patch.object', 'patch.object', (['RecallCalculator', 'RecallCalculator._calculate_info_wrt_truth_probes.__name__'], {'return_value': '(5, 10)'}), '(RecallCalculator, RecallCalculator.\n _calculate_info_wrt_truth_probes.__name__, return_value=(5, 10))\n', (8104, 8209), False, 'from unittest.mock import patch, Mock\n'), ((8210, 8326), 'unittest.mock.patch.object', 'patch.object', (['RecallCalculator', 'RecallCalculator._calculate_info_wrt_variants.__name__'], {'return_value': '(4, 8, 10)'}), '(RecallCalculator, RecallCalculator.\n _calculate_info_wrt_variants.__name__, return_value=(4, 8, 10))\n', (8222, 8326), False, 'from unittest.mock import patch, Mock\n'), ((10348, 10420), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport._create_helper_columns.__name__'], {}), '(RecallReport, RecallReport._create_helper_columns.__name__)\n', (10360, 10420), False, 'from unittest.mock import patch, Mock\n'), ((10426, 10522), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport.assure_there_are_no_duplicated_evaluation.__name__'], {}), '(RecallReport, RecallReport.\n assure_there_are_no_duplicated_evaluation.__name__)\n', (10438, 10522), False, 'from unittest.mock import patch, Mock\n'), ((11443, 11515), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport._create_helper_columns.__name__'], {}), '(RecallReport, RecallReport._create_helper_columns.__name__)\n', (11455, 11515), False, 'from unittest.mock import patch, Mock\n'), ((11521, 11617), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport.assure_there_are_no_duplicated_evaluation.__name__'], {}), '(RecallReport, RecallReport.\n assure_there_are_no_duplicated_evaluation.__name__)\n', (11533, 11617), False, 'from unittest.mock import patch, Mock\n'), ((12534, 12606), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport._create_helper_columns.__name__'], {}), '(RecallReport, RecallReport._create_helper_columns.__name__)\n', (12546, 12606), False, 'from unittest.mock import patch, Mock\n'), ((12612, 12708), 'unittest.mock.patch.object', 'patch.object', (['RecallReport', 'RecallReport.assure_there_are_no_duplicated_evaluation.__name__'], {}), '(RecallReport, RecallReport.\n assure_there_are_no_duplicated_evaluation.__name__)\n', (12624, 12708), False, 'from unittest.mock import patch, Mock\n'), ((541, 570), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns'}), '(columns=columns)\n', (553, 570), True, 'import pandas as pd\n'), ((588, 609), 'evaluate.report.PrecisionReport', 'PrecisionReport', (['[df]'], {}), '([df])\n', (603, 609), False, 'from evaluate.report import Report, PrecisionReport, RecallReport\n'), ((631, 658), 'evaluate.calculator.PrecisionCalculator', 'PrecisionCalculator', (['report'], {}), '(report)\n', (650, 658), False, 'from evaluate.calculator import RecallCalculator, PrecisionCalculator, EmptyReportError\n'), ((1087, 1108), 'evaluate.report.PrecisionReport', 'PrecisionReport', (['[df]'], {}), '([df])\n', (1102, 1108), False, 'from evaluate.report import Report, PrecisionReport, RecallReport\n'), ((1130, 1157), 'evaluate.calculator.PrecisionCalculator', 'PrecisionCalculator', (['report'], {}), '(report)\n', (1149, 1157), False, 'from evaluate.calculator import RecallCalculator, PrecisionCalculator, EmptyReportError\n'), ((1667, 1688), 'evaluate.report.PrecisionReport', 'PrecisionReport', (['[df]'], {}), '([df])\n', (1682, 1688), False, 'from evaluate.report import Report, PrecisionReport, RecallReport\n'), ((1710, 1737), 'evaluate.calculator.PrecisionCalculator', 'PrecisionCalculator', (['report'], {}), '(report)\n', (1729, 1737), False, 'from evaluate.calculator import RecallCalculator, PrecisionCalculator, EmptyReportError\n'), ((2288, 2309), 'evaluate.report.PrecisionReport', 'PrecisionReport', (['[df]'], {}), '([df])\n', (2303, 2309), False, 'from evaluate.report import Report, PrecisionReport, RecallReport\n'), ((2331, 2358), 'evaluate.calculator.PrecisionCalculator', 'PrecisionCalculator', (['report'], {}), '(report)\n', (2350, 2358), False, 'from evaluate.calculator import RecallCalculator, PrecisionCalculator, EmptyReportError\n'), ((2874, 2895), 'evaluate.report.PrecisionReport', 'PrecisionReport', (['[df]'], {}), '([df])\n', (2889, 2895), False, 'from evaluate.report import Report, PrecisionReport, RecallReport\n'), ((2917, 2944), 'evaluate.calculator.PrecisionCalculator', 'PrecisionCalculator', (['report'], {}), '(report)\n', (2936, 2944), False, 'from evaluate.calculator import RecallCalculator, PrecisionCalculator, EmptyReportError\n'), ((3603, 3624), 'evaluate.report.PrecisionReport', 'PrecisionReport', (['[df]'], {}), '([df])\n', (3618, 3624), False, 'from evaluate.report import Report, PrecisionReport, RecallReport\n'), ((3646, 3673), 'evaluate.calculator.PrecisionCalculator', 'PrecisionCalculator', (['report'], {}), '(report)\n', (3665, 3673), False, 'from evaluate.calculator import RecallCalculator, PrecisionCalculator, EmptyReportError\n'), ((4378, 4399), 'evaluate.report.PrecisionReport', 'PrecisionReport', (['[df]'], {}), '([df])\n', (4393, 4399), False, 'from evaluate.report import Report, PrecisionReport, RecallReport\n'), ((4421, 4448), 'evaluate.calculator.PrecisionCalculator', 'PrecisionCalculator', (['report'], {}), '(report)\n', (4440, 4448), False, 'from evaluate.calculator import RecallCalculator, PrecisionCalculator, EmptyReportError\n'), ((5488, 5545), 'evaluate.calculator.RecallCalculator._calculate_info_wrt_truth_probes', 'RecallCalculator._calculate_info_wrt_truth_probes', (['report'], {}), '(report)\n', (5537, 5545), False, 'from evaluate.calculator import RecallCalculator, PrecisionCalculator, EmptyReportError\n'), ((6608, 6665), 'evaluate.calculator.RecallCalculator._calculate_info_wrt_truth_probes', 'RecallCalculator._calculate_info_wrt_truth_probes', (['report'], {}), '(report)\n', (6657, 6665), False, 'from evaluate.calculator import RecallCalculator, PrecisionCalculator, EmptyReportError\n'), ((7604, 7657), 'evaluate.calculator.RecallCalculator._calculate_info_wrt_variants', 'RecallCalculator._calculate_info_wrt_variants', (['report'], {}), '(report)\n', (7649, 7657), False, 'from evaluate.calculator import RecallCalculator, PrecisionCalculator, EmptyReportError\n'), ((8758, 8764), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (8762, 8764), False, 'from unittest.mock import patch, Mock\n'), ((8956, 8980), 'evaluate.calculator.RecallCalculator', 'RecallCalculator', (['report'], {}), '(report)\n', (8972, 8980), False, 'from evaluate.calculator import RecallCalculator, PrecisionCalculator, EmptyReportError\n'), ((10671, 10695), 'evaluate.calculator.RecallCalculator', 'RecallCalculator', (['report'], {}), '(report)\n', (10687, 10695), False, 'from evaluate.calculator import RecallCalculator, PrecisionCalculator, EmptyReportError\n'), ((11762, 11786), 'evaluate.calculator.RecallCalculator', 'RecallCalculator', (['report'], {}), '(report)\n', (11778, 11786), False, 'from evaluate.calculator import RecallCalculator, PrecisionCalculator, EmptyReportError\n'), ((12902, 12926), 'evaluate.calculator.RecallCalculator', 'RecallCalculator', (['report'], {}), '(report)\n', (12918, 12926), False, 'from evaluate.calculator import RecallCalculator, PrecisionCalculator, EmptyReportError\n'), ((673, 704), 'pytest.raises', 'pytest.raises', (['EmptyReportError'], {}), '(EmptyReportError)\n', (686, 704), False, 'import pytest\n'), ((2408, 2439), 'pytest.raises', 'pytest.raises', (['EmptyReportError'], {}), '(EmptyReportError)\n', (2421, 2439), False, 'import pytest\n'), ((10821, 10993), 'io.StringIO', 'StringIO', (['"""NB_OF_SAMPLES,recall_PVR\n 2,0.0\n 3,1.0\n 4,0.0\n 5,0.25\n 6,0.0\n 7,0.0\n """'], {}), '(\n """NB_OF_SAMPLES,recall_PVR\n 2,0.0\n 3,1.0\n 4,0.0\n 5,0.25\n 6,0.0\n 7,0.0\n """\n )\n', (10829, 10993), False, 'from io import StringIO\n'), ((11908, 12083), 'io.StringIO', 'StringIO', (['"""NB_OF_SAMPLES,recall_AvgAR\n 2,0.0\n 3,0.95\n 4,0.0\n 5,0.55\n 6,0.0\n 7,0.6\n """'], {}), '(\n """NB_OF_SAMPLES,recall_AvgAR\n 2,0.0\n 3,0.95\n 4,0.0\n 5,0.55\n 6,0.0\n 7,0.6\n """\n )\n', (11916, 12083), False, 'from io import StringIO\n'), ((13041, 13141), 'io.StringIO', 'StringIO', (['"""NB_OF_SAMPLES,recall_PVR\n 2,0.0\n 5,0.25\n """'], {}), '(\n """NB_OF_SAMPLES,recall_PVR\n 2,0.0\n 5,0.25\n """\n )\n', (13049, 13141), False, 'from io import StringIO\n'), ((5415, 5429), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5427, 5429), True, 'import pandas as pd\n'), ((6535, 6549), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6547, 6549), True, 'import pandas as pd\n'), ((7464, 7478), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7476, 7478), True, 'import pandas as pd\n'), ((8911, 8925), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8923, 8925), True, 'import pandas as pd\n'), ((10626, 10640), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10638, 10640), True, 'import pandas as pd\n'), ((10128, 10318), 'io.StringIO', 'StringIO', (['"""PVID,proportion_of_allele_seqs_found_binary,NB_OF_SAMPLES\n 0,1,3\n 1,0,5\n 2,0,7\n 3,1,5\n 4,0,5\n 5,1,3\n 6,0,5\n """'], {}), '(\n """PVID,proportion_of_allele_seqs_found_binary,NB_OF_SAMPLES\n 0,1,3\n 1,0,5\n 2,0,7\n 3,1,5\n 4,0,5\n 5,1,3\n 6,0,5\n """\n )\n', (10136, 10318), False, 'from io import StringIO\n'), ((11717, 11731), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11729, 11731), True, 'import pandas as pd\n'), ((11220, 11413), 'io.StringIO', 'StringIO', (['"""PVID,proportion_of_alleles_found,NB_OF_SAMPLES\n 0,1.0,3\n 1,0.8,5\n 2,0.6,7\n 3,1.0,5\n 4,0.4,5\n 5,0.9,3\n 6,0.0,5\n """'], {}), '(\n """PVID,proportion_of_alleles_found,NB_OF_SAMPLES\n 0,1.0,3\n 1,0.8,5\n 2,0.6,7\n 3,1.0,5\n 4,0.4,5\n 5,0.9,3\n 6,0.0,5\n """\n )\n', (11228, 11413), False, 'from io import StringIO\n'), ((12857, 12871), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (12869, 12871), True, 'import pandas as pd\n'), ((12314, 12504), 'io.StringIO', 'StringIO', (['"""PVID,proportion_of_allele_seqs_found_binary,NB_OF_SAMPLES\n 0,1,3\n 1,0,5\n 2,0,7\n 3,1,5\n 4,0,5\n 5,1,3\n 6,0,5\n """'], {}), '(\n """PVID,proportion_of_allele_seqs_found_binary,NB_OF_SAMPLES\n 0,1,3\n 1,0,5\n 2,0,7\n 3,1,5\n 4,0,5\n 5,1,3\n 6,0,5\n """\n )\n', (12322, 12504), False, 'from io import StringIO\n'), ((996, 1041), 'tests.common.create_precision_report_row', 'create_precision_report_row', (['(1.0)'], {'gt_conf': '(100)'}), '(1.0, gt_conf=100)\n', (1023, 1041), False, 'from tests.common import create_precision_report_row\n'), ((1576, 1621), 'tests.common.create_precision_report_row', 'create_precision_report_row', (['(0.0)'], {'gt_conf': '(100)'}), '(0.0, gt_conf=100)\n', (1603, 1621), False, 'from tests.common import create_precision_report_row\n'), ((2198, 2242), 'tests.common.create_precision_report_row', 'create_precision_report_row', (['(1.0)'], {'gt_conf': '(10)'}), '(1.0, gt_conf=10)\n', (2225, 2242), False, 'from tests.common import create_precision_report_row\n'), ((2784, 2828), 'tests.common.create_precision_report_row', 'create_precision_report_row', (['(1.0)'], {'gt_conf': '(60)'}), '(1.0, gt_conf=60)\n', (2811, 2828), False, 'from tests.common import create_precision_report_row\n'), ((3422, 3467), 'tests.common.create_precision_report_row', 'create_precision_report_row', (['(0.5)'], {'gt_conf': '(100)'}), '(0.5, gt_conf=100)\n', (3449, 3467), False, 'from tests.common import create_precision_report_row\n'), ((3485, 3530), 'tests.common.create_precision_report_row', 'create_precision_report_row', (['(0.7)'], {'gt_conf': '(100)'}), '(0.7, gt_conf=100)\n', (3512, 3530), False, 'from tests.common import create_precision_report_row\n'), ((4135, 4180), 'tests.common.create_precision_report_row', 'create_precision_report_row', (['(0.4)'], {'gt_conf': '(100)'}), '(0.4, gt_conf=100)\n', (4162, 4180), False, 'from tests.common import create_precision_report_row\n'), ((4198, 4242), 'tests.common.create_precision_report_row', 'create_precision_report_row', (['(0.8)'], {'gt_conf': '(20)'}), '(0.8, gt_conf=20)\n', (4225, 4242), False, 'from tests.common import create_precision_report_row\n'), ((4260, 4305), 'tests.common.create_precision_report_row', 'create_precision_report_row', (['(0.3)'], {'gt_conf': '(100)'}), '(0.3, gt_conf=100)\n', (4287, 4305), False, 'from tests.common import create_precision_report_row\n')]
|
from data import build_corpus
from evaluate import Metrics
from models.hmm import HMMModel
from models.crf import CRFModel
from models.lstm import BiLSTM
from models.BiLSTM_CRF import BiLSTM_CRF
from utils import *
def main():
print('读取数据...')
train_word_lists, train_tag_lists, word2id, tag2id = build_corpus('train')
dev_word_lists, dev_tag_lists = build_corpus('dev', maek_vocab = False)
test_word_lists, test_tag_lists = build_corpus('test', maek_vocab = False)
print('训练HMM模型...')
hmm_model = HMMModel(len(tag2id), len(word2id))
hmm_model.train(train_word_lists, train_tag_lists, word2id, tag2id)
pred_tag_lists = hmm_model.test(test_word_lists, word2id, tag2id)
metrics = Metrics(test_tag_lists, pred_tag_lists)
metrics.report_scores()
print('训练CRF模型...')
crf_model = CRFModel(max_iterations = 90)
crf_model.train(train_word_lists, train_tag_lists)
pred_tag_lists = crf_model.test(test_word_lists)
metrics = Metrics(test_tag_lists, pred_tag_lists)
metrics.report_scores()
print('训练BiLSTM模型...')
word2id, tag2id = extend_maps(word2id, tag2id)
bilstm = BiLSTM(len(word2id), len(tag2id))
bilstm.train(train_word_lists, train_tag_lists, dev_word_lists, dev_tag_lists, word2id, tag2id, 0.8)
bilstm.dev_test(test_word_lists, test_tag_lists, word2id, tag2id)
bilstm.close_sess()
print('训练BiLSTM-CRF模型...')
bilstm_crf = BiLSTM_CRF(len(word2id), len(tag2id))
bilstm_crf.train(train_word_lists, train_tag_lists, dev_word_lists, dev_tag_lists, word2id, tag2id, 0.8)
bilstm_crf.dev_test(test_word_lists, test_tag_lists, word2id, tag2id)
bilstm_crf.close_sess()
if __name__ == "__main__":
main()
|
[
"evaluate.Metrics"
] |
[((320, 341), 'data.build_corpus', 'build_corpus', (['"""train"""'], {}), "('train')\n", (332, 341), False, 'from data import build_corpus\n'), ((379, 416), 'data.build_corpus', 'build_corpus', (['"""dev"""'], {'maek_vocab': '(False)'}), "('dev', maek_vocab=False)\n", (391, 416), False, 'from data import build_corpus\n'), ((458, 496), 'data.build_corpus', 'build_corpus', (['"""test"""'], {'maek_vocab': '(False)'}), "('test', maek_vocab=False)\n", (470, 496), False, 'from data import build_corpus\n'), ((740, 779), 'evaluate.Metrics', 'Metrics', (['test_tag_lists', 'pred_tag_lists'], {}), '(test_tag_lists, pred_tag_lists)\n', (747, 779), False, 'from evaluate import Metrics\n'), ((853, 880), 'models.crf.CRFModel', 'CRFModel', ([], {'max_iterations': '(90)'}), '(max_iterations=90)\n', (861, 880), False, 'from models.crf import CRFModel\n'), ((1010, 1049), 'evaluate.Metrics', 'Metrics', (['test_tag_lists', 'pred_tag_lists'], {}), '(test_tag_lists, pred_tag_lists)\n', (1017, 1049), False, 'from evaluate import Metrics\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.