code
stringlengths 42
43.2k
| apis
list | extract_api
stringlengths 115
61.9k
|
---|---|---|
from absl import app, flags, logging
from absl.flags import FLAGS
import os
import tensorflow as tf
from modules.models import RRDB_Model, RRDB_Model_16x, RFB_Model_16x
from modules.lr_scheduler import MultiStepLR
from modules.losses import PixelLoss, PixelLossDown
from modules.utils import (load_yaml, load_dataset, load_val_dataset, ProgressBar,
set_memory_growth)
from evaluate import evaluate_dataset
flags.DEFINE_string('cfg_path', './configs/psnr.yaml', 'config file path')
flags.DEFINE_string('gpu', '0', 'which gpu to use')
def main(_):
# init
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
logger = tf.get_logger()
logger.disabled = True
logger.setLevel(logging.FATAL)
set_memory_growth()
cfg = load_yaml(FLAGS.cfg_path)
# define network
if cfg['network_G']['name']=='RRDB': # ESRGAN 4x
model = RRDB_Model(None, cfg['ch_size'], cfg['network_G'])
elif cfg['network_G']['name']=='RRDB_CIPLAB':
model = RRDB_Model_16x(None, cfg['ch_size'], cfg['network_G'])
elif cfg['network_G']['name']=='RFB_ESRGAN':
model = RFB_Model_16x(None, cfg['ch_size'], cfg['network_G'])
model.summary(line_length=80)
# load dataset
train_dataset = load_dataset(cfg, 'train_dataset', shuffle=True)
set5_dataset = load_val_dataset(cfg, 'set5')
set14_dataset = load_val_dataset(cfg, 'set14')
if 'DIV8K' in cfg['test_dataset']:
DIV8K_val = load_val_dataset(cfg, 'DIV8K', crop_centor=cfg['test_dataset']['DIV8K_crop_centor'])
# define optimizer
learning_rate = MultiStepLR(cfg['lr'], cfg['lr_steps'], cfg['lr_rate'])
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate,
beta_1=cfg['adam_beta1_G'],
beta_2=cfg['adam_beta2_G'])
# define losses function
if cfg['cycle_mse']:
pixel_loss_fn = PixelLossDown(criterion=cfg['pixel_criterion'], scale=cfg['scale'])
else:
pixel_loss_fn = PixelLoss(criterion=cfg['pixel_criterion'])
# load checkpoint
checkpoint_dir = cfg['log_dir'] + '/checkpoints'
checkpoint = tf.train.Checkpoint(step=tf.Variable(0, name='step'),
optimizer=optimizer,
model=model)
manager = tf.train.CheckpointManager(checkpoint=checkpoint,
directory=checkpoint_dir,
max_to_keep=3)
if manager.latest_checkpoint:
checkpoint.restore(manager.latest_checkpoint)
print('[*] load ckpt from {} at step {}.'.format(
manager.latest_checkpoint, checkpoint.step.numpy()))
else:
print("[*] training from scratch.")
# define training step function
@tf.function
def train_step(lr, hr):
with tf.GradientTape() as tape:
sr = model(lr, training=True)
losses = {}
losses['reg'] = tf.reduce_sum(model.losses)
losses['pixel'] = cfg['w_pixel'] * pixel_loss_fn(hr, sr)
total_loss = tf.add_n([l for l in losses.values()])
grads = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return total_loss, losses
# training loop
summary_writer = tf.summary.create_file_writer(cfg['log_dir']+'/logs')
prog_bar = ProgressBar(cfg['niter'], checkpoint.step.numpy())
remain_steps = max(cfg['niter'] - checkpoint.step.numpy(), 0)
for _ in range(remain_steps):
lr, hr = train_dataset()
checkpoint.step.assign_add(1)
steps = checkpoint.step.numpy()
total_loss, losses = train_step(lr, hr)
prog_bar.update("loss={:.4f}, lr={:.1e}".format(
total_loss.numpy(), optimizer.lr(steps).numpy()))
if steps % 10 == 0:
with summary_writer.as_default():
tf.summary.scalar(
'loss/total_loss', total_loss, step=steps)
for k, l in losses.items():
tf.summary.scalar('loss/{}'.format(k), l, step=steps)
tf.summary.scalar(
'learning_rate', optimizer.lr(steps), step=steps)
if steps % cfg['save_steps'] == 0:
manager.save()
print("\n[*] save ckpt file at {}".format(
manager.latest_checkpoint))
# log results on test data
set5_logs = evaluate_dataset(set5_dataset, model, cfg)
set14_logs = evaluate_dataset(set14_dataset, model, cfg)
if 'DIV8K' in cfg['test_dataset']:
DIV8K_logs = evaluate_dataset(DIV8K_val, model, cfg)
with summary_writer.as_default():
if cfg['logging']['psnr']:
tf.summary.scalar('set5/psnr', set5_logs['psnr'], step=steps)
tf.summary.scalar('set14/psnr', set14_logs['psnr'], step=steps)
if 'DIV8K' in cfg['test_dataset']:
tf.summary.scalar('DIV8K/psnr', DIV8K_logs['psnr'], step=steps)
if cfg['logging']['ssim']:
tf.summary.scalar('set5/ssim', set5_logs['ssim'], step=steps)
tf.summary.scalar('set14/ssim', set14_logs['ssim'], step=steps)
if 'DIV8K' in cfg['test_dataset']:
tf.summary.scalar('DIV8K/psnr', DIV8K_logs['psnr'], step=steps)
if cfg['logging']['lpips']:
tf.summary.scalar('set5/lpips', set5_logs['lpips'], step=steps)
tf.summary.scalar('set14/lpips', set14_logs['lpips'], step=steps)
if 'DIV8K' in cfg['test_dataset']:
tf.summary.scalar('DIV8K/lpips', DIV8K_logs['lpips'], step=steps)
if cfg['logging']['plot_samples']:
tf.summary.image("set5/samples", [set5_logs['samples']], step=steps)
tf.summary.image("set14/samples", [set14_logs['samples']], step=steps)
if 'DIV8K' in cfg['test_dataset']:
tf.summary.image("DIV8K/samples", [DIV8K_logs['samples']], step=steps)
print("\n[*] training done!")
if __name__ == '__main__':
app.run(main)
|
[
"evaluate.evaluate_dataset"
] |
[((435, 509), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""cfg_path"""', '"""./configs/psnr.yaml"""', '"""config file path"""'], {}), "('cfg_path', './configs/psnr.yaml', 'config file path')\n", (454, 509), False, 'from absl import app, flags, logging\n'), ((510, 561), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""gpu"""', '"""0"""', '"""which gpu to use"""'], {}), "('gpu', '0', 'which gpu to use')\n", (529, 561), False, 'from absl import app, flags, logging\n'), ((698, 713), 'tensorflow.get_logger', 'tf.get_logger', ([], {}), '()\n', (711, 713), True, 'import tensorflow as tf\n'), ((780, 799), 'modules.utils.set_memory_growth', 'set_memory_growth', ([], {}), '()\n', (797, 799), False, 'from modules.utils import load_yaml, load_dataset, load_val_dataset, ProgressBar, set_memory_growth\n'), ((811, 836), 'modules.utils.load_yaml', 'load_yaml', (['FLAGS.cfg_path'], {}), '(FLAGS.cfg_path)\n', (820, 836), False, 'from modules.utils import load_yaml, load_dataset, load_val_dataset, ProgressBar, set_memory_growth\n'), ((1296, 1344), 'modules.utils.load_dataset', 'load_dataset', (['cfg', '"""train_dataset"""'], {'shuffle': '(True)'}), "(cfg, 'train_dataset', shuffle=True)\n", (1308, 1344), False, 'from modules.utils import load_yaml, load_dataset, load_val_dataset, ProgressBar, set_memory_growth\n'), ((1364, 1393), 'modules.utils.load_val_dataset', 'load_val_dataset', (['cfg', '"""set5"""'], {}), "(cfg, 'set5')\n", (1380, 1393), False, 'from modules.utils import load_yaml, load_dataset, load_val_dataset, ProgressBar, set_memory_growth\n'), ((1414, 1444), 'modules.utils.load_val_dataset', 'load_val_dataset', (['cfg', '"""set14"""'], {}), "(cfg, 'set14')\n", (1430, 1444), False, 'from modules.utils import load_yaml, load_dataset, load_val_dataset, ProgressBar, set_memory_growth\n'), ((1632, 1687), 'modules.lr_scheduler.MultiStepLR', 'MultiStepLR', (["cfg['lr']", "cfg['lr_steps']", "cfg['lr_rate']"], {}), "(cfg['lr'], cfg['lr_steps'], cfg['lr_rate'])\n", (1643, 1687), False, 'from modules.lr_scheduler import MultiStepLR\n'), ((1704, 1818), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'learning_rate', 'beta_1': "cfg['adam_beta1_G']", 'beta_2': "cfg['adam_beta2_G']"}), "(learning_rate=learning_rate, beta_1=cfg[\n 'adam_beta1_G'], beta_2=cfg['adam_beta2_G'])\n", (1728, 1818), True, 'import tensorflow as tf\n'), ((2389, 2483), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', ([], {'checkpoint': 'checkpoint', 'directory': 'checkpoint_dir', 'max_to_keep': '(3)'}), '(checkpoint=checkpoint, directory=checkpoint_dir,\n max_to_keep=3)\n', (2415, 2483), True, 'import tensorflow as tf\n'), ((3425, 3480), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (["(cfg['log_dir'] + '/logs')"], {}), "(cfg['log_dir'] + '/logs')\n", (3454, 3480), True, 'import tensorflow as tf\n'), ((6347, 6360), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (6354, 6360), False, 'from absl import app, flags, logging\n'), ((931, 981), 'modules.models.RRDB_Model', 'RRDB_Model', (['None', "cfg['ch_size']", "cfg['network_G']"], {}), "(None, cfg['ch_size'], cfg['network_G'])\n", (941, 981), False, 'from modules.models import RRDB_Model, RRDB_Model_16x, RFB_Model_16x\n'), ((1504, 1593), 'modules.utils.load_val_dataset', 'load_val_dataset', (['cfg', '"""DIV8K"""'], {'crop_centor': "cfg['test_dataset']['DIV8K_crop_centor']"}), "(cfg, 'DIV8K', crop_centor=cfg['test_dataset'][\n 'DIV8K_crop_centor'])\n", (1520, 1593), False, 'from modules.utils import load_yaml, load_dataset, load_val_dataset, ProgressBar, set_memory_growth\n'), ((1975, 2042), 'modules.losses.PixelLossDown', 'PixelLossDown', ([], {'criterion': "cfg['pixel_criterion']", 'scale': "cfg['scale']"}), "(criterion=cfg['pixel_criterion'], scale=cfg['scale'])\n", (1988, 2042), False, 'from modules.losses import PixelLoss, PixelLossDown\n'), ((2077, 2120), 'modules.losses.PixelLoss', 'PixelLoss', ([], {'criterion': "cfg['pixel_criterion']"}), "(criterion=cfg['pixel_criterion'])\n", (2086, 2120), False, 'from modules.losses import PixelLoss, PixelLossDown\n'), ((1048, 1102), 'modules.models.RRDB_Model_16x', 'RRDB_Model_16x', (['None', "cfg['ch_size']", "cfg['network_G']"], {}), "(None, cfg['ch_size'], cfg['network_G'])\n", (1062, 1102), False, 'from modules.models import RRDB_Model, RRDB_Model_16x, RFB_Model_16x\n'), ((2238, 2265), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""step"""'}), "(0, name='step')\n", (2249, 2265), True, 'import tensorflow as tf\n'), ((2922, 2939), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2937, 2939), True, 'import tensorflow as tf\n'), ((3044, 3071), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['model.losses'], {}), '(model.losses)\n', (3057, 3071), True, 'import tensorflow as tf\n'), ((4557, 4599), 'evaluate.evaluate_dataset', 'evaluate_dataset', (['set5_dataset', 'model', 'cfg'], {}), '(set5_dataset, model, cfg)\n', (4573, 4599), False, 'from evaluate import evaluate_dataset\n'), ((4625, 4668), 'evaluate.evaluate_dataset', 'evaluate_dataset', (['set14_dataset', 'model', 'cfg'], {}), '(set14_dataset, model, cfg)\n', (4641, 4668), False, 'from evaluate import evaluate_dataset\n'), ((1168, 1221), 'modules.models.RFB_Model_16x', 'RFB_Model_16x', (['None', "cfg['ch_size']", "cfg['network_G']"], {}), "(None, cfg['ch_size'], cfg['network_G'])\n", (1181, 1221), False, 'from modules.models import RRDB_Model, RRDB_Model_16x, RFB_Model_16x\n'), ((4018, 4078), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss/total_loss"""', 'total_loss'], {'step': 'steps'}), "('loss/total_loss', total_loss, step=steps)\n", (4035, 4078), True, 'import tensorflow as tf\n'), ((4745, 4784), 'evaluate.evaluate_dataset', 'evaluate_dataset', (['DIV8K_val', 'model', 'cfg'], {}), '(DIV8K_val, model, cfg)\n', (4761, 4784), False, 'from evaluate import evaluate_dataset\n'), ((4895, 4956), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""set5/psnr"""', "set5_logs['psnr']"], {'step': 'steps'}), "('set5/psnr', set5_logs['psnr'], step=steps)\n", (4912, 4956), True, 'import tensorflow as tf\n'), ((4977, 5040), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""set14/psnr"""', "set14_logs['psnr']"], {'step': 'steps'}), "('set14/psnr', set14_logs['psnr'], step=steps)\n", (4994, 5040), True, 'import tensorflow as tf\n'), ((5248, 5309), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""set5/ssim"""', "set5_logs['ssim']"], {'step': 'steps'}), "('set5/ssim', set5_logs['ssim'], step=steps)\n", (5265, 5309), True, 'import tensorflow as tf\n'), ((5330, 5393), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""set14/ssim"""', "set14_logs['ssim']"], {'step': 'steps'}), "('set14/ssim', set14_logs['ssim'], step=steps)\n", (5347, 5393), True, 'import tensorflow as tf\n'), ((5602, 5665), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""set5/lpips"""', "set5_logs['lpips']"], {'step': 'steps'}), "('set5/lpips', set5_logs['lpips'], step=steps)\n", (5619, 5665), True, 'import tensorflow as tf\n'), ((5686, 5751), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""set14/lpips"""', "set14_logs['lpips']"], {'step': 'steps'}), "('set14/lpips', set14_logs['lpips'], step=steps)\n", (5703, 5751), True, 'import tensorflow as tf\n'), ((5969, 6037), 'tensorflow.summary.image', 'tf.summary.image', (['"""set5/samples"""', "[set5_logs['samples']]"], {'step': 'steps'}), "('set5/samples', [set5_logs['samples']], step=steps)\n", (5985, 6037), True, 'import tensorflow as tf\n'), ((6058, 6128), 'tensorflow.summary.image', 'tf.summary.image', (['"""set14/samples"""', "[set14_logs['samples']]"], {'step': 'steps'}), "('set14/samples', [set14_logs['samples']], step=steps)\n", (6074, 6128), True, 'import tensorflow as tf\n'), ((5120, 5183), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""DIV8K/psnr"""', "DIV8K_logs['psnr']"], {'step': 'steps'}), "('DIV8K/psnr', DIV8K_logs['psnr'], step=steps)\n", (5137, 5183), True, 'import tensorflow as tf\n'), ((5473, 5536), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""DIV8K/psnr"""', "DIV8K_logs['psnr']"], {'step': 'steps'}), "('DIV8K/psnr', DIV8K_logs['psnr'], step=steps)\n", (5490, 5536), True, 'import tensorflow as tf\n'), ((5831, 5896), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""DIV8K/lpips"""', "DIV8K_logs['lpips']"], {'step': 'steps'}), "('DIV8K/lpips', DIV8K_logs['lpips'], step=steps)\n", (5848, 5896), True, 'import tensorflow as tf\n'), ((6208, 6278), 'tensorflow.summary.image', 'tf.summary.image', (['"""DIV8K/samples"""', "[DIV8K_logs['samples']]"], {'step': 'steps'}), "('DIV8K/samples', [DIV8K_logs['samples']], step=steps)\n", (6224, 6278), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Distributed training using Pytorch boilerplate.
"""
import os
import logging
import random
import argparse
import warnings
import numpy as np
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
import resnet
from train import train
from evaluate import evaluate
from datasets import get_dataloaders
from utils import experiment_config, print_network
warnings.filterwarnings("ignore")
PARSER = argparse.ArgumentParser()
PARSER.add_argument('--dataset', default='cifar10',
help='e.g. cifar10, svhn, fashionmnist, mnist')
PARSER.add_argument('--n_epochs', type=int, default=1000,
help='number of epochs to train for.')
PARSER.add_argument('--batch_size', type=int, default=128,
help='number of images used to approx. gradient.')
PARSER.add_argument('--learning_rate', type=float, default=.1,
help='step size.')
PARSER.add_argument('--weight_decay', type=float, default=5e-4,
help='weight decay regularisation factor.')
PARSER.add_argument('--decay_rate', type=float, default=0.1,
help='factor to multiply with learning rate.')
PARSER.add_argument('--decay_steps', type=int, default=0,
help='decay learning rate every n steps.')
PARSER.add_argument('--optimiser', default='sgd',
help='e.g. sgd, adam')
PARSER.add_argument('--decay_milestones', nargs='+', type=int, default=[0],
help='epochs at which to multiply learning rate with decay rate.')
PARSER.add_argument('--padding', type=int, default=4,
help='padding augmentation factor.')
PARSER.add_argument('--brightness', type=float, default=0,
help='brightness augmentation factor.')
PARSER.add_argument('--contrast', type=float, default=0,
help='contrast augmentation factor.')
PARSER.add_argument('--patience', default=60,
help='number of epochs to wait for improvement.')
PARSER.add_argument('--crop_dim', type=int, default=32,
help='height and width of input cropping.')
PARSER.add_argument('--load_checkpoint_dir', default=None,
help='directory to load a checkpoint from.')
PARSER.add_argument('--no_distributed', dest='distributed', action='store_false',
help='choose whether or not to use distributed training.')
PARSER.set_defaults(distributed=True)
PARSER.add_argument('--inference', dest='inference', action='store_true',
help='infer from checkpoint rather than training.')
PARSER.set_defaults(inference=False)
PARSER.add_argument('--half_precision', dest='half_precision', action='store_true',
help='train using fp16.')
PARSER.set_defaults(half_precision=False)
def setup(distributed):
""" Sets up for optional distributed training.
For distributed training run as:
python -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --use_env main.py
To kill zombie processes use:
kill $(ps aux | grep "main.py" | grep -v grep | awk '{print $2}')
For data parallel training on GPUs or CPU training run as:
python main.py --no_distributed
"""
if distributed:
torch.distributed.init_process_group(backend='nccl', init_method='env://')
local_rank = int(os.environ.get('LOCAL_RANK'))
device = torch.device(f'cuda:{local_rank}') # unique on individual node
print('World size: {} ; Rank: {} ; LocalRank: {} ; Master: {}:{}'.format(
os.environ.get('WORLD_SIZE'),
os.environ.get('RANK'),
os.environ.get('LOCAL_RANK'),
os.environ.get('MASTER_ADDR'), os.environ.get('MASTER_PORT')))
else:
local_rank = None
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
seed = 8 # 666
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False # True
return device, local_rank
def main():
""" Main method. """
args = PARSER.parse_known_args()[0]
# sets up the backend for distributed training (optional)
device, local_rank = setup(distributed=args.distributed)
# retrieve the dataloaders for the chosen dataset
dataloaders, args = get_dataloaders(args)
# make dirs for current experiment logs, summaries etc
args = experiment_config(args)
# initialise the model
model = resnet.resnet20(args)
# place model onto GPU(s)
if args.distributed:
torch.cuda.set_device(device)
torch.set_num_threads(1) # n cpu threads / n processes per node
model = DistributedDataParallel(model.cuda(),
device_ids=[local_rank], output_device=local_rank)
# only print stuff from process (rank) 0
args.print_progress = True if int(os.environ.get('RANK')) == 0 else False
else:
if args.half_precision:
model.half() # convert to half precision
for layer in model.modules():
# keep batchnorm in 32 for convergence reasons
if isinstance(layer, nn.BatchNorm2d):
layer.float()
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
print('\nUsing', torch.cuda.device_count(), 'GPU(s).\n')
model.to(device)
args.print_progress = True
if args.print_progress:
print_network(model, args) # prints out the network architecture etc
logging.info('\ntrain: {} - valid: {} - test: {}'.format(
len(dataloaders['train'].dataset), len(dataloaders['valid'].dataset),
len(dataloaders['test'].dataset)))
# launch model training or inference
if not args.inference:
train(model, dataloaders, args)
if args.distributed: # cleanup
torch.distributed.destroy_process_group()
else:
model.load_state_dict(torch.load(args.load_checkpoint_dir))
test_loss, test_acc = evaluate(model, args, dataloaders['test'])
print('[Test] loss {:.4f} - acc {:.4f} - acc_topk {:.4f}'.format(
test_loss, test_acc[0], test_acc[1]))
if __name__ == '__main__':
main()
|
[
"evaluate.evaluate"
] |
[((444, 477), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (467, 477), False, 'import warnings\n'), ((488, 513), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (511, 513), False, 'import argparse\n'), ((3973, 3990), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3984, 3990), False, 'import random\n'), ((3995, 4015), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4009, 4015), True, 'import numpy as np\n'), ((4020, 4043), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (4037, 4043), False, 'import torch\n'), ((4048, 4076), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (4070, 4076), False, 'import torch\n'), ((4081, 4113), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (4107, 4113), False, 'import torch\n'), ((4565, 4586), 'datasets.get_dataloaders', 'get_dataloaders', (['args'], {}), '(args)\n', (4580, 4586), False, 'from datasets import get_dataloaders\n'), ((4658, 4681), 'utils.experiment_config', 'experiment_config', (['args'], {}), '(args)\n', (4675, 4681), False, 'from utils import experiment_config, print_network\n'), ((4722, 4743), 'resnet.resnet20', 'resnet.resnet20', (['args'], {}), '(args)\n', (4737, 4743), False, 'import resnet\n'), ((3343, 3417), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""', 'init_method': '"""env://"""'}), "(backend='nccl', init_method='env://')\n", (3379, 3417), False, 'import torch\n'), ((3490, 3524), 'torch.device', 'torch.device', (['f"""cuda:{local_rank}"""'], {}), "(f'cuda:{local_rank}')\n", (3502, 3524), False, 'import torch\n'), ((4808, 4837), 'torch.cuda.set_device', 'torch.cuda.set_device', (['device'], {}), '(device)\n', (4829, 4837), False, 'import torch\n'), ((4846, 4870), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (4867, 4870), False, 'import torch\n'), ((5724, 5750), 'utils.print_network', 'print_network', (['model', 'args'], {}), '(model, args)\n', (5737, 5750), False, 'from utils import experiment_config, print_network\n'), ((6065, 6096), 'train.train', 'train', (['model', 'dataloaders', 'args'], {}), '(model, dataloaders, args)\n', (6070, 6096), False, 'from train import train\n'), ((6300, 6342), 'evaluate.evaluate', 'evaluate', (['model', 'args', "dataloaders['test']"], {}), "(model, args, dataloaders['test'])\n", (6308, 6342), False, 'from evaluate import evaluate\n'), ((3443, 3471), 'os.environ.get', 'os.environ.get', (['"""LOCAL_RANK"""'], {}), "('LOCAL_RANK')\n", (3457, 3471), False, 'import os\n'), ((5488, 5513), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (5511, 5513), False, 'import torch\n'), ((5539, 5561), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (5554, 5561), True, 'import torch.nn as nn\n'), ((5587, 5612), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (5610, 5612), False, 'import torch\n'), ((6150, 6191), 'torch.distributed.destroy_process_group', 'torch.distributed.destroy_process_group', ([], {}), '()\n', (6189, 6191), False, 'import torch\n'), ((6232, 6268), 'torch.load', 'torch.load', (['args.load_checkpoint_dir'], {}), '(args.load_checkpoint_dir)\n', (6242, 6268), False, 'import torch\n'), ((3649, 3677), 'os.environ.get', 'os.environ.get', (['"""WORLD_SIZE"""'], {}), "('WORLD_SIZE')\n", (3663, 3677), False, 'import os\n'), ((3691, 3713), 'os.environ.get', 'os.environ.get', (['"""RANK"""'], {}), "('RANK')\n", (3705, 3713), False, 'import os\n'), ((3727, 3755), 'os.environ.get', 'os.environ.get', (['"""LOCAL_RANK"""'], {}), "('LOCAL_RANK')\n", (3741, 3755), False, 'import os\n'), ((3769, 3798), 'os.environ.get', 'os.environ.get', (['"""MASTER_ADDR"""'], {}), "('MASTER_ADDR')\n", (3783, 3798), False, 'import os\n'), ((3800, 3829), 'os.environ.get', 'os.environ.get', (['"""MASTER_PORT"""'], {}), "('MASTER_PORT')\n", (3814, 3829), False, 'import os\n'), ((3910, 3935), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3933, 3935), False, 'import torch\n'), ((5147, 5169), 'os.environ.get', 'os.environ.get', (['"""RANK"""'], {}), "('RANK')\n", (5161, 5169), False, 'import os\n')]
|
import os
import time
import math
import argparse
import torch
import torch.nn as nn
from torch import optim
import matplotlib.pyplot as plt
from lazy_dataset import LazyDataset
from lstm import Seq2Seq
from train import train_model
from evaluate import evaluate_model
from utils import *
from bucket_sampler import BucketBatchSampler
def make_loss_plot(model_history):
ax = plt.subplot(111)
# Hide the right and top spines
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.plot(
list(range(1, len(model_history) + 1)), model_history, label="training loss"
)
plt.xlabel("batch", fontsize=16)
plt.ylabel("training loss", fontsize=14)
ax.set_title("Training Loss", fontsize=20, pad=40)
plt.xticks(list(range(100, len(model_history) + 1, 100)))
plt.legend()
plt.show()
def main(args):
# use cuda if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# create directory for saving models if it doesn't already exist
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
SRC = torch.load(os.path.join(args.data_path, "src_vocab.pt"))
TRG = torch.load(os.path.join(args.data_path, "trg_vocab.pt"))
# gather parameters from the vocabulary
input_dim = len(SRC.vocab)
output_dim = len(TRG.vocab)
src_pad_idx = SRC.vocab.stoi[SRC.pad_token]
# create lazydataset and data loader
train_path = os.path.join(args.data_path, "train.tsv")
training_set = LazyDataset(train_path, SRC, TRG, "translation")
train_batch_sampler = BucketBatchSampler(train_path, args.batch_size)
# number of batches comes from the sampler, not the iterator
num_batches = train_batch_sampler.num_batches
# build dictionary of parameters for the Dataloader
train_loader_params = {
# since bucket sampler returns batch, batch_size is 1
"batch_size": 1,
# sort_batch reverse sorts for pack_pad_seq
"collate_fn": sort_batch,
"batch_sampler": train_batch_sampler,
"num_workers": args.num_workers,
"shuffle": args.shuffle,
"pin_memory": True,
"drop_last": False,
}
train_iterator = torch.utils.data.DataLoader(training_set, **train_loader_params)
if not args.continue_model:
# create model
model = Seq2Seq(
input_dim,
args.embedding_dim,
args.hidden_size,
output_dim,
args.num_layers,
args.dropout,
args.bidirectional,
src_pad_idx,
device,
).to(device)
# optionally randomly initialize weights
if args.random_init:
model.apply(random_init_weights)
# optionally freeze pretrained embeddings
if args.freeze_embeddings:
try:
src_pretrained_embeddings = SRC.vocab.vectors
model.encoder.enc_embedding.weight.data.copy_(src_pretrained_embeddings)
model.encoder.enc_embedding.weight.requires_grad = False
except TypeError:
print(
"Cannot freeze embedding layer without pretrained embeddings. Rerun make_vocab with source vectors"
)
start_epoch = 1
optimizer = make_muliti_optim(model.named_parameters(), args.learning_rate)
best_valid_loss = float("inf")
else:
model_dict = torch.load(args.continue_model, map_location=torch.device("cpu"))
prev_state_dict = model_dict["model_state_dict"]
prev_param_dict = get_prev_params(prev_state_dict)
dropout = model_dict["dropout"]
model = Seq2Seq(
input_dim,
prev_param_dict["emb_dim"],
prev_param_dict["enc_hid_dim"],
output_dim,
prev_param_dict["enc_layers"],
dropout,
prev_param_dict["bidirectional"],
src_pad_idx,
device,
).to(device)
if args.freeze_embeddings:
model.encoder.enc_embedding.weight.requires_grad = False
start_epoch = model_dict["epoch"]
# restart optimizer at training point
optimizer = make_muliti_optim(
model.named_parameters(),
args.learning_rate,
model_dict["adam_state_dict"],
model_dict["sparse_adam_state_dict"],
)
model.load_state_dict(prev_state_dict)
# assumes loading from starting point with best loss
# TODO: possibly fix this
best_valid_loss = model_dict["loss"]
# free up memory
del model_dict, prev_state_dict
print(model)
print(f"The model has {count_parameters(model):,} trainable parameters")
SRC_PAD_IDX = SRC.vocab.stoi[SRC.pad_token]
TRG_PAD_IDX = TRG.vocab.stoi[TRG.pad_token]
criterion = nn.CrossEntropyLoss(ignore_index=TRG_PAD_IDX)
# training
loss_history = []
for epoch in range(start_epoch, args.epochs + 1):
start_time = time.time()
train_loss, batch_loss = train_model(
model,
train_iterator,
task="translation",
optimizer=optimizer,
criterion=criterion,
clip=args.clip,
device=device,
epoch=epoch,
start_time=start_time,
save_path=args.save_path,
dropout=args.dropout,
pad_indices=(SRC_PAD_IDX, TRG_PAD_IDX),
teacher_forcing=args.teacher_forcing,
checkpoint=args.checkpoint,
num_batches=num_batches,
)
loss_history += batch_loss
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
model_filename = os.path.join(args.save_path, f"model_epoch_{epoch}.pt")
adam, sparse_adam = optimizer.return_optimizers()
torch.save(
{
"epoch": epoch,
"model_state_dict": model.state_dict(),
"adam_state_dict": adam.state_dict(),
"sparse_adam_state_dict": sparse_adam.state_dict(),
"loss": train_loss,
"dropout": args.dropout,
},
model_filename,
)
# optionally validate
if not args.skip_validate:
valid_path = os.path.join(args.data_path, "valid.tsv")
valid_set = LazyDataset(valid_path, SRC, TRG, "translation")
valid_batch_sampler = BucketBatchSampler(valid_path, args.batch_size)
num_batches = valid_batch_sampler.num_batches
valid_loader_params = {
# since bucket sampler returns batch, batch_size is 1
"batch_size": 1,
# sort_batch reverse sorts for pack_pad_seq
"collate_fn": sort_batch,
"batch_sampler": valid_batch_sampler,
"num_workers": args.num_workers,
"shuffle": args.shuffle,
"pin_memory": True,
"drop_last": False,
}
valid_iterator = torch.utils.data.DataLoader(
valid_set, **valid_loader_params
)
valid_loss = evaluate_model(
model,
valid_iterator,
num_batches=valid_num_batches,
task="translation",
optimizer=optimizer,
criterion=criterion,
teacher_forcing=args.teacher_forcing,
device=device,
pad_indices=(SRC_PAD_IDX, TRG_PAD_IDX),
)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
best_filename = os.path.join(args.save_path, f"best_model.pt")
torch.save(
{
"epoch": epoch,
"model_state_dict": model.state_dict(),
"adam_state_dict": adam.state_dict(),
"sparse_adam_state_dict": sparse_adam.state_dict(),
"loss": valid_loss,
"dropout": args.dropout,
},
best_filename,
)
print(f"Epoch: {epoch:02} | Time: {epoch_mins}m {epoch_secs}s")
print(
f"\t Train Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}"
)
print(
f"\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}"
)
else:
print(f"Epoch: {epoch:02} | Time: {epoch_mins}m {epoch_secs}s")
print(
f"\t Train Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}"
)
if args.loss_plot:
make_loss_plot(loss_history)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--data-path", help="folder where data and dictionaries are stored"
)
parser.add_argument(
"--save-path", help="folder for saving model and/or checkpoints"
)
parser.add_argument("--epochs", default=10, type=int)
parser.add_argument("--batch-size", default=64, type=int)
parser.add_argument("--num-workers", default=0, type=int)
parser.add_argument("--shuffle", default=False, action="store_true")
parser.add_argument(
"--random-init",
default=False,
action="store_true",
help="randomly initialize weights",
)
parser.add_argument("--embedding-dim", default=300, type=int)
parser.add_argument("--hidden-size", default=512, type=int)
parser.add_argument("--num-layers", default=1, type=int)
parser.add_argument("--dropout", default=0.1, type=float)
parser.add_argument("--bidirectional", default=False, action="store_true")
parser.add_argument("--teacher-forcing", default=0.5, type=float)
parser.add_argument("--clip", default=1.0, type=float)
parser.add_argument(
"--learning-rate", type=float, default=1e-3, help="learning rate for optimizer"
)
parser.add_argument("--checkpoint", type=int, help="save model every N batches")
parser.add_argument(
"--skip-validate",
default=False,
action="store_true",
help="set to False to skip validation",
)
parser.add_argument(
"--freeze-embeddings",
default=False,
action="store_true",
help="freeze source embedding layer",
)
parser.add_argument(
"--continue-model",
default=None,
type=str,
help="model for restarting training from a saved checkpoint",
)
parser.add_argument(
"--loss-plot", default=False, action="store_true", help="create a loss plot"
)
main(parser.parse_args())
|
[
"evaluate.evaluate_model"
] |
[((384, 400), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (395, 400), True, 'import matplotlib.pyplot as plt\n'), ((627, 659), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""batch"""'], {'fontsize': '(16)'}), "('batch', fontsize=16)\n", (637, 659), True, 'import matplotlib.pyplot as plt\n'), ((664, 704), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""training loss"""'], {'fontsize': '(14)'}), "('training loss', fontsize=14)\n", (674, 704), True, 'import matplotlib.pyplot as plt\n'), ((826, 838), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (836, 838), True, 'import matplotlib.pyplot as plt\n'), ((843, 853), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (851, 853), True, 'import matplotlib.pyplot as plt\n'), ((1471, 1512), 'os.path.join', 'os.path.join', (['args.data_path', '"""train.tsv"""'], {}), "(args.data_path, 'train.tsv')\n", (1483, 1512), False, 'import os\n'), ((1532, 1580), 'lazy_dataset.LazyDataset', 'LazyDataset', (['train_path', 'SRC', 'TRG', '"""translation"""'], {}), "(train_path, SRC, TRG, 'translation')\n", (1543, 1580), False, 'from lazy_dataset import LazyDataset\n'), ((1608, 1655), 'bucket_sampler.BucketBatchSampler', 'BucketBatchSampler', (['train_path', 'args.batch_size'], {}), '(train_path, args.batch_size)\n', (1626, 1655), False, 'from bucket_sampler import BucketBatchSampler\n'), ((2233, 2297), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['training_set'], {}), '(training_set, **train_loader_params)\n', (2260, 2297), False, 'import torch\n'), ((4884, 4929), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': 'TRG_PAD_IDX'}), '(ignore_index=TRG_PAD_IDX)\n', (4903, 4929), True, 'import torch.nn as nn\n'), ((8894, 8919), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8917, 8919), False, 'import argparse\n'), ((1056, 1086), 'os.path.exists', 'os.path.exists', (['args.save_path'], {}), '(args.save_path)\n', (1070, 1086), False, 'import os\n'), ((1096, 1120), 'os.mkdir', 'os.mkdir', (['args.save_path'], {}), '(args.save_path)\n', (1104, 1120), False, 'import os\n'), ((1143, 1187), 'os.path.join', 'os.path.join', (['args.data_path', '"""src_vocab.pt"""'], {}), "(args.data_path, 'src_vocab.pt')\n", (1155, 1187), False, 'import os\n'), ((1210, 1254), 'os.path.join', 'os.path.join', (['args.data_path', '"""trg_vocab.pt"""'], {}), "(args.data_path, 'trg_vocab.pt')\n", (1222, 1254), False, 'import os\n'), ((5043, 5054), 'time.time', 'time.time', ([], {}), '()\n', (5052, 5054), False, 'import time\n'), ((5088, 5447), 'train.train_model', 'train_model', (['model', 'train_iterator'], {'task': '"""translation"""', 'optimizer': 'optimizer', 'criterion': 'criterion', 'clip': 'args.clip', 'device': 'device', 'epoch': 'epoch', 'start_time': 'start_time', 'save_path': 'args.save_path', 'dropout': 'args.dropout', 'pad_indices': '(SRC_PAD_IDX, TRG_PAD_IDX)', 'teacher_forcing': 'args.teacher_forcing', 'checkpoint': 'args.checkpoint', 'num_batches': 'num_batches'}), "(model, train_iterator, task='translation', optimizer=optimizer,\n criterion=criterion, clip=args.clip, device=device, epoch=epoch,\n start_time=start_time, save_path=args.save_path, dropout=args.dropout,\n pad_indices=(SRC_PAD_IDX, TRG_PAD_IDX), teacher_forcing=args.\n teacher_forcing, checkpoint=args.checkpoint, num_batches=num_batches)\n", (5099, 5447), False, 'from train import train_model\n'), ((5676, 5687), 'time.time', 'time.time', ([], {}), '()\n', (5685, 5687), False, 'import time\n'), ((5780, 5835), 'os.path.join', 'os.path.join', (['args.save_path', 'f"""model_epoch_{epoch}.pt"""'], {}), "(args.save_path, f'model_epoch_{epoch}.pt')\n", (5792, 5835), False, 'import os\n'), ((937, 962), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (960, 962), False, 'import torch\n'), ((6360, 6401), 'os.path.join', 'os.path.join', (['args.data_path', '"""valid.tsv"""'], {}), "(args.data_path, 'valid.tsv')\n", (6372, 6401), False, 'import os\n'), ((6426, 6474), 'lazy_dataset.LazyDataset', 'LazyDataset', (['valid_path', 'SRC', 'TRG', '"""translation"""'], {}), "(valid_path, SRC, TRG, 'translation')\n", (6437, 6474), False, 'from lazy_dataset import LazyDataset\n'), ((6509, 6556), 'bucket_sampler.BucketBatchSampler', 'BucketBatchSampler', (['valid_path', 'args.batch_size'], {}), '(valid_path, args.batch_size)\n', (6527, 6556), False, 'from bucket_sampler import BucketBatchSampler\n'), ((7116, 7177), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_set'], {}), '(valid_set, **valid_loader_params)\n', (7143, 7177), False, 'import torch\n'), ((7234, 7471), 'evaluate.evaluate_model', 'evaluate_model', (['model', 'valid_iterator'], {'num_batches': 'valid_num_batches', 'task': '"""translation"""', 'optimizer': 'optimizer', 'criterion': 'criterion', 'teacher_forcing': 'args.teacher_forcing', 'device': 'device', 'pad_indices': '(SRC_PAD_IDX, TRG_PAD_IDX)'}), "(model, valid_iterator, num_batches=valid_num_batches, task=\n 'translation', optimizer=optimizer, criterion=criterion,\n teacher_forcing=args.teacher_forcing, device=device, pad_indices=(\n SRC_PAD_IDX, TRG_PAD_IDX))\n", (7248, 7471), False, 'from evaluate import evaluate_model\n'), ((2370, 2515), 'lstm.Seq2Seq', 'Seq2Seq', (['input_dim', 'args.embedding_dim', 'args.hidden_size', 'output_dim', 'args.num_layers', 'args.dropout', 'args.bidirectional', 'src_pad_idx', 'device'], {}), '(input_dim, args.embedding_dim, args.hidden_size, output_dim, args.\n num_layers, args.dropout, args.bidirectional, src_pad_idx, device)\n', (2377, 2515), False, 'from lstm import Seq2Seq\n'), ((3507, 3526), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3519, 3526), False, 'import torch\n'), ((3700, 3894), 'lstm.Seq2Seq', 'Seq2Seq', (['input_dim', "prev_param_dict['emb_dim']", "prev_param_dict['enc_hid_dim']", 'output_dim', "prev_param_dict['enc_layers']", 'dropout', "prev_param_dict['bidirectional']", 'src_pad_idx', 'device'], {}), "(input_dim, prev_param_dict['emb_dim'], prev_param_dict[\n 'enc_hid_dim'], output_dim, prev_param_dict['enc_layers'], dropout,\n prev_param_dict['bidirectional'], src_pad_idx, device)\n", (3707, 3894), False, 'from lstm import Seq2Seq\n'), ((7741, 7787), 'os.path.join', 'os.path.join', (['args.save_path', 'f"""best_model.pt"""'], {}), "(args.save_path, f'best_model.pt')\n", (7753, 7787), False, 'import os\n'), ((8409, 8429), 'math.exp', 'math.exp', (['train_loss'], {}), '(train_loss)\n', (8417, 8429), False, 'import math\n'), ((8533, 8553), 'math.exp', 'math.exp', (['valid_loss'], {}), '(valid_loss)\n', (8541, 8553), False, 'import math\n'), ((8749, 8769), 'math.exp', 'math.exp', (['train_loss'], {}), '(train_loss)\n', (8757, 8769), False, 'import math\n')]
|
import torch
from tqdm import tqdm, trange
from evaluate import evaluate
from sklearn.metrics import confusion_matrix, plot_confusion_matrix
import pandas as pd
import seaborn as sn
import numpy as np
import matplotlib.pyplot as plt
def test(model, criterion, dataloader, device, run):
run['test-config/device'] = device
model.to(device)
model.eval()
val_acc, val_loss = evaluate(model=model, criterion=criterion, dataloader=dataloader, device=device)
run['test/loss'] = val_loss
run['test/accuracy'] = val_acc
print("Test complete! \n Test results: \n\tAccuracy: {} \n\t Loss: {}".format(val_acc, val_loss))
print("Creating confusion matrix...")
y_pred = []
y_true = []
for i, (input_ids, attention_mask, image, labels) in enumerate(tqdm(iterable=dataloader, desc='Testing')):
input_ids, attention_mask, image, labels = input_ids.to(device), attention_mask.to(device), image.to(device, dtype=torch.float), labels.to(device)
predicted_class = model(text=input_ids, text_input_mask=attention_mask, image=image)
predicted_output = predicted_class.data.to('cpu').numpy()
desired_output = (torch.max(torch.exp(labels), 1)[1]).data.cpu().numpy()#labels.data.to('cpu').numpy()
#output = (predicted_output > 0.5).astype(int)
output = (torch.max(torch.exp(predicted_class), 1)[1]).data.cpu().numpy()
y_pred.extend(output)
y_true.extend(desired_output)
classes = ('News', 'Fake News')
cf_matrix = confusion_matrix(y_true, y_pred, normalize='true')
df_cm = pd.DataFrame(cf_matrix/np.sum(cf_matrix), index=[i for i in classes], columns=[i for i in classes])
plt.figure()
sn.heatmap(df_cm, annot=True)
plt.savefig('./confusion_matrix_normalized.png')
|
[
"evaluate.evaluate"
] |
[((388, 473), 'evaluate.evaluate', 'evaluate', ([], {'model': 'model', 'criterion': 'criterion', 'dataloader': 'dataloader', 'device': 'device'}), '(model=model, criterion=criterion, dataloader=dataloader, device=device\n )\n', (396, 473), False, 'from evaluate import evaluate\n'), ((1511, 1561), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {'normalize': '"""true"""'}), "(y_true, y_pred, normalize='true')\n", (1527, 1561), False, 'from sklearn.metrics import confusion_matrix, plot_confusion_matrix\n'), ((1680, 1692), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1690, 1692), True, 'import matplotlib.pyplot as plt\n'), ((1697, 1726), 'seaborn.heatmap', 'sn.heatmap', (['df_cm'], {'annot': '(True)'}), '(df_cm, annot=True)\n', (1707, 1726), True, 'import seaborn as sn\n'), ((1731, 1779), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./confusion_matrix_normalized.png"""'], {}), "('./confusion_matrix_normalized.png')\n", (1742, 1779), True, 'import matplotlib.pyplot as plt\n'), ((781, 822), 'tqdm.tqdm', 'tqdm', ([], {'iterable': 'dataloader', 'desc': '"""Testing"""'}), "(iterable=dataloader, desc='Testing')\n", (785, 822), False, 'from tqdm import tqdm, trange\n'), ((1598, 1615), 'numpy.sum', 'np.sum', (['cf_matrix'], {}), '(cf_matrix)\n', (1604, 1615), True, 'import numpy as np\n'), ((1175, 1192), 'torch.exp', 'torch.exp', (['labels'], {}), '(labels)\n', (1184, 1192), False, 'import torch\n'), ((1334, 1360), 'torch.exp', 'torch.exp', (['predicted_class'], {}), '(predicted_class)\n', (1343, 1360), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 16 17:57:09 2015
@author: Paco
"""
from utils import Utils
from evaluate import Evaluate
from metrics import Metrics
from gradient import Gradient
import numpy as np
# Load data
u = Utils()
train_facile = u.load_matrix('data/data_train_facile.mat')
#generate pairs
pairs_idx, pairs_label = u.generate_pairs(train_facile['label'], 1000, 0.1)
newX,newY = u.select_pairs_data(pairs_idx,train_facile['X'],train_facile['label'],c=700)
feat_idx = u._feat_idx
#test gradient
g = Gradient()
M_ini = g.generate_I(newX.shape[1])
M = g.sgd_metric_learning(newX, newY, 0.002, 50000, 0, M_ini)
# Calculate distance
m = Metrics()
X = u.select_features(train_facile['X'],feat_idx)
X -= X.mean(axis=0)
X /= X.std(axis=0)
X[np.isnan(X)] = 0.
dist = m.mahalanobis_dist(X, pairs_idx,M)
#dist[np.isnan(dist)] = 50.
## Evaluate model
e = Evaluate()
e.evaluation(pairs_label,dist)
## display results
e.display_roc()
e.easy_score()
# Evaluate test dataset and save it
test_facile = u.load_matrix('data/data_test_facile.mat')
#X2 = u.select_features(test_facile['X'],feat_idx)
#X2 -= X2.mean(axis=0)
#X2 /= X2.std(axis=0)
#X2[np.isnan(X2)] = 0.
#dist_test = m.mahalanobis_dist(X2, test_facile['pairs'],M)
#dist_test[np.isnan(dist_test)] = 1.
#u.save_test(dist_test)
|
[
"evaluate.Evaluate"
] |
[((232, 239), 'utils.Utils', 'Utils', ([], {}), '()\n', (237, 239), False, 'from utils import Utils\n'), ((524, 534), 'gradient.Gradient', 'Gradient', ([], {}), '()\n', (532, 534), False, 'from gradient import Gradient\n'), ((659, 668), 'metrics.Metrics', 'Metrics', ([], {}), '()\n', (666, 668), False, 'from metrics import Metrics\n'), ((870, 880), 'evaluate.Evaluate', 'Evaluate', ([], {}), '()\n', (878, 880), False, 'from evaluate import Evaluate\n'), ((760, 771), 'numpy.isnan', 'np.isnan', (['X'], {}), '(X)\n', (768, 771), True, 'import numpy as np\n')]
|
# coding: utf-8
# In[1]:
import logging
import os
import numpy as np
import torch
import torch.optim as optim
from tqdm import trange
from tqdm import tqdm_notebook as tqdm
import utils
import model.net as net
from model.data_loader import DataLoader
from evaluate import evaluate, f_score_simple
# In[2]:
# data_dir = 'data/coNLL/eng/'
# model_dir = 'experiments/coNLL/base_model/'
data_dir = 'data/kaggle/'
model_dir = 'experiments/kaggle/base_model/'
json_path = os.path.join(model_dir, 'params.json')
params = utils.Params(json_path)
# In[3]:
# use GPU if available
params.cuda = torch.cuda.is_available()
params.dict
# In[4]:
# load data
data_loader = DataLoader(data_dir, params)
data = data_loader.load_data(['train', 'val', 'test'])
train_data = data['train']
val_data = data['val']
test_data = data['test']
# specify the train and val dataset sizes
params.train_size = train_data['size']
params.val_size = val_data['size']
params.test_size = test_data['size']
params.pad_tag_ind = data_loader.tag_map[params.pad_tag]
# In[5]:
data_loader.dataset_params.dict
# In[6]:
# specify the train and val dataset sizes
params.train_size = train_data['size']
params.val_size = val_data['size']
params.pad_tag_ind = data_loader.tag_map[params.pad_tag]
# In[7]:
# Define the model and optimizer
model = net.Net(params).cuda()
# In[8]:
optimizer = optim.Adam(model.parameters(), lr=params.learning_rate)
# fetch loss function and metrics
loss_fn = net.loss_fn
metrics = net.metrics
# In[9]:
# Set the logger
utils.set_logger(os.path.join(model_dir, 'train.log'))
# In[10]:
def train(model, optimizer, loss_fn, data_iterator, metrics, params, num_steps):
"""Train the model on `num_steps` batches
Args:
model: (torch.nn.Module) the neural network
optimizer: (torch.optim) optimizer for parameters of model
loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch
data_iterator: (generator) a generator that generates batches of data and labels
metrics: (dict) a dictionary of functions that compute a metric using the output and labels of each batch
params: (Params) hyperparameters
num_steps: (int) number of batches to train on, each of size params.batch_size
"""
# set model to training mode
model.train()
# summary for current training loop and a running average object for loss
summ = []
loss_avg = utils.RunningAverage()
# Use tqdm for progress bar
t = trange(num_steps)
for i in t:
# fetch the next training batch
train_batch, labels_batch = next(data_iterator)
# compute model output and loss
output_batch = model(train_batch)
loss = loss_fn(output_batch, labels_batch)
# clear previous gradients, compute gradients of all variables wrt loss
optimizer.zero_grad()
loss.backward()
# performs updates using calculated gradients
optimizer.step()
# Evaluate summaries only once in a while
if i % params.save_summary_steps == 0:
# extract data from torch Variable, move to cpu, convert to numpy arrays
output_batch = output_batch.data.cpu().numpy()
labels_batch = labels_batch.data.cpu().numpy()
# compute all metrics on this batch
summary_batch = {metric:metrics[metric](output_batch, labels_batch, params) for metric in metrics}
summary_batch['loss'] = loss.item()
summ.append(summary_batch)
# print('Evaluate called')
# update the average loss
loss_avg.update(loss.item())
t.set_postfix(loss='{:05.3f}'.format(loss_avg()))
# compute mean of all metrics in summary
metrics_mean = {metric:np.mean([x[metric] for x in summ]) for metric in summ[0]}
metrics_string = " ; ".join("{}: {:05.3f}".format(k, v) for k, v in metrics_mean.items())
logging.info("- Train metrics: " + metrics_string)
# In[11]:
best_val_acc = 0.0
for epoch in range(params.num_epochs):
# Run one epoch
logging.info("Epoch {}/{}".format(epoch + 1, params.num_epochs))
# compute number of batches in one epoch (one full pass over the training set)
num_steps = (params.train_size + 1) // params.batch_size
train_data_iterator = data_loader.data_iterator(train_data, params, shuffle=True)
train(model, optimizer, loss_fn, train_data_iterator, metrics, params, num_steps)
# Evaluate for one epoch on validation set
num_steps = (params.val_size + 1) // params.batch_size
val_data_iterator = data_loader.data_iterator(val_data, params, shuffle=False)
val_metrics = evaluate(model, loss_fn, val_data, metrics, data_loader, params, num_steps)
val_acc = val_metrics['accuracy']
is_best = val_acc >= best_val_acc
# Save weights
utils.save_checkpoint({'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optim_dict' : optimizer.state_dict()},
is_best=is_best,
checkpoint=model_dir)
# If best_eval, best_save_path
if is_best:
logging.info("- Found new best accuracy")
best_val_acc = val_acc
# Save best val metrics in a json file in the model directory
best_json_path = os.path.join(model_dir, "metrics_val_best_weights.json")
utils.save_dict_to_json(val_metrics, best_json_path)
# Save latest val metrics in a json file in the model directory
last_json_path = os.path.join(model_dir, "metrics_val_last_weights.json")
utils.save_dict_to_json(val_metrics, last_json_path)
# In[12]:
num_steps = (params.val_size + 1) // params.batch_size
f_score_simple(model, val_data, data_loader, params, num_steps)
# ## Evaluate
# In[14]:
# Define the model
model = net.Net(params).cuda() if params.cuda else net.Net(params)
loss_fn = net.loss_fn
metrics = net.metrics
logging.info("Starting evaluation")
restore_file = 'best'
# Reload weights from the saved file
r = utils.load_checkpoint(os.path.join(model_dir, restore_file + '.pth.tar'), model)
# In[15]:
# Evaluate
num_steps = (params.test_size + 1) // params.batch_size
test_metrics = evaluate(model, loss_fn, test_data, metrics, data_loader, params, num_steps)
|
[
"evaluate.evaluate",
"evaluate.f_score_simple"
] |
[((476, 514), 'os.path.join', 'os.path.join', (['model_dir', '"""params.json"""'], {}), "(model_dir, 'params.json')\n", (488, 514), False, 'import os\n'), ((524, 547), 'utils.Params', 'utils.Params', (['json_path'], {}), '(json_path)\n', (536, 547), False, 'import utils\n'), ((598, 623), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (621, 623), False, 'import torch\n'), ((675, 703), 'model.data_loader.DataLoader', 'DataLoader', (['data_dir', 'params'], {}), '(data_dir, params)\n', (685, 703), False, 'from model.data_loader import DataLoader\n'), ((5814, 5877), 'evaluate.f_score_simple', 'f_score_simple', (['model', 'val_data', 'data_loader', 'params', 'num_steps'], {}), '(model, val_data, data_loader, params, num_steps)\n', (5828, 5877), False, 'from evaluate import evaluate, f_score_simple\n'), ((6039, 6074), 'logging.info', 'logging.info', (['"""Starting evaluation"""'], {}), "('Starting evaluation')\n", (6051, 6074), False, 'import logging\n'), ((6316, 6392), 'evaluate.evaluate', 'evaluate', (['model', 'loss_fn', 'test_data', 'metrics', 'data_loader', 'params', 'num_steps'], {}), '(model, loss_fn, test_data, metrics, data_loader, params, num_steps)\n', (6324, 6392), False, 'from evaluate import evaluate, f_score_simple\n'), ((1560, 1596), 'os.path.join', 'os.path.join', (['model_dir', '"""train.log"""'], {}), "(model_dir, 'train.log')\n", (1572, 1596), False, 'import os\n'), ((2472, 2494), 'utils.RunningAverage', 'utils.RunningAverage', ([], {}), '()\n', (2492, 2494), False, 'import utils\n'), ((2540, 2557), 'tqdm.trange', 'trange', (['num_steps'], {}), '(num_steps)\n', (2546, 2557), False, 'from tqdm import trange\n'), ((4006, 4056), 'logging.info', 'logging.info', (["('- Train metrics: ' + metrics_string)"], {}), "('- Train metrics: ' + metrics_string)\n", (4018, 4056), False, 'import logging\n'), ((4744, 4819), 'evaluate.evaluate', 'evaluate', (['model', 'loss_fn', 'val_data', 'metrics', 'data_loader', 'params', 'num_steps'], {}), '(model, loss_fn, val_data, metrics, data_loader, params, num_steps)\n', (4752, 4819), False, 'from evaluate import evaluate, f_score_simple\n'), ((5631, 5687), 'os.path.join', 'os.path.join', (['model_dir', '"""metrics_val_last_weights.json"""'], {}), "(model_dir, 'metrics_val_last_weights.json')\n", (5643, 5687), False, 'import os\n'), ((5692, 5744), 'utils.save_dict_to_json', 'utils.save_dict_to_json', (['val_metrics', 'last_json_path'], {}), '(val_metrics, last_json_path)\n', (5715, 5744), False, 'import utils\n'), ((5977, 5992), 'model.net.Net', 'net.Net', (['params'], {}), '(params)\n', (5984, 5992), True, 'import model.net as net\n'), ((6161, 6211), 'os.path.join', 'os.path.join', (['model_dir', "(restore_file + '.pth.tar')"], {}), "(model_dir, restore_file + '.pth.tar')\n", (6173, 6211), False, 'import os\n'), ((1331, 1346), 'model.net.Net', 'net.Net', (['params'], {}), '(params)\n', (1338, 1346), True, 'import model.net as net\n'), ((3849, 3883), 'numpy.mean', 'np.mean', (['[x[metric] for x in summ]'], {}), '([x[metric] for x in summ])\n', (3856, 3883), True, 'import numpy as np\n'), ((5254, 5295), 'logging.info', 'logging.info', (['"""- Found new best accuracy"""'], {}), "('- Found new best accuracy')\n", (5266, 5295), False, 'import logging\n'), ((5423, 5479), 'os.path.join', 'os.path.join', (['model_dir', '"""metrics_val_best_weights.json"""'], {}), "(model_dir, 'metrics_val_best_weights.json')\n", (5435, 5479), False, 'import os\n'), ((5488, 5540), 'utils.save_dict_to_json', 'utils.save_dict_to_json', (['val_metrics', 'best_json_path'], {}), '(val_metrics, best_json_path)\n', (5511, 5540), False, 'import utils\n'), ((5934, 5949), 'model.net.Net', 'net.Net', (['params'], {}), '(params)\n', (5941, 5949), True, 'import model.net as net\n')]
|
#!/usr/bin/env python3
"""Main script to run things"""
from data_utils import read_nmt_data, get_minibatch, read_config, hyperparam_string
from model import Seq2Seq, Seq2SeqAttention, Seq2SeqFastAttention
from criterions.matrixBLEU import mBLEU
from utils import onehot_initialization
from evaluate import evaluate_model
import math
import numpy as np
import logging
import argparse
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
help="path to json config",
required=True
)
args = parser.parse_args()
config_file_path = args.config
config = read_config(config_file_path)
experiment_name = hyperparam_string(config)
save_dir = config['data']['save_dir']
load_dir = config['data']['load_dir']
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
filename='log/%s' % (experiment_name),
filemode='w'
)
# define a new Handler to log to console as well
console = logging.StreamHandler()
# optional, set the logging level
console.setLevel(logging.INFO)
# set a format which is the same for console use
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
print('Reading data ...')
src, trg = read_nmt_data(
src=config['data']['src'],
config=config,
trg=config['data']['trg']
)
src_test, trg_test = read_nmt_data(
src=config['data']['test_src'],
config=config,
trg=config['data']['test_trg']
)
batch_size = config['data']['batch_size']
max_length = config['data']['max_src_length']
src_vocab_size = len(src['word2id'])
trg_vocab_size = len(trg['word2id'])
logging.info('Model Parameters:')
logging.info('Task: %s' % (config['data']['task']))
logging.info('Model: %s' % (config['model']['seq2seq']))
logging.info('Source Language: %s' % (config['model']['src_lang']))
logging.info('Target Language: %s' % (config['model']['trg_lang']))
logging.info('Source Word Embedding Dim: %s' % (config['model']['dim_word_src']))
logging.info('Target Word Embedding Dim: %s' % (config['model']['dim_word_trg']))
logging.info('Source RNN Hidden Dim: %s' % (config['model']['dim']))
logging.info('Target RNN Hidden Dim: %s' % (config['model']['dim']))
logging.info('Source RNN Depth: %d' % (config['model']['n_layers_src']))
logging.info('Target RNN Depth: %d' % (config['model']['n_layers_trg']))
logging.info('Source RNN Bidirectional: %s' % (config['model']['bidirectional']))
logging.info('Batch Size: %d' % (config['data']['batch_size']))
logging.info('Valid Batch Size: %d' % (config['data']['valid_batch_size']))
logging.info('Optimizer: %s' % (config['training']['optimizer']))
logging.info('Learning Rate: %f' % (config['training']['lrate']))
logging.info('Found %d words in src' % (src_vocab_size))
logging.info('Found %d words in trg' % (trg_vocab_size))
weight_mask = torch.ones(trg_vocab_size).cuda()
weight_mask[trg['word2id']['<pad>']] = 0
criterion_cross_entropy = nn.CrossEntropyLoss(weight=weight_mask).cuda()
criterion_bleu = mBLEU(4)
if config['model']['seq2seq'] == 'vanilla':
model = Seq2Seq(
src_emb_dim=config['model']['dim_word_src'],
trg_emb_dim=config['model']['dim_word_trg'],
src_vocab_size=src_vocab_size,
trg_vocab_size=trg_vocab_size,
src_hidden_dim=config['model']['dim'],
trg_hidden_dim=config['model']['dim'],
batch_size=batch_size,
bidirectional=config['model']['bidirectional'],
pad_token_src=src['word2id']['<pad>'],
pad_token_trg=trg['word2id']['<pad>'],
nlayers=config['model']['n_layers_src'],
nlayers_trg=config['model']['n_layers_trg'],
dropout=0.,
).cuda()
elif config['model']['seq2seq'] == 'attention':
model = Seq2SeqAttention(
src_emb_dim=config['model']['dim_word_src'],
trg_emb_dim=config['model']['dim_word_trg'],
src_vocab_size=src_vocab_size,
trg_vocab_size=trg_vocab_size,
src_hidden_dim=config['model']['dim'],
trg_hidden_dim=config['model']['dim'],
ctx_hidden_dim=config['model']['dim'],
attention_mode='dot',
batch_size=batch_size,
bidirectional=config['model']['bidirectional'],
pad_token_src=src['word2id']['<pad>'],
pad_token_trg=trg['word2id']['<pad>'],
nlayers=config['model']['n_layers_src'],
nlayers_trg=config['model']['n_layers_trg'],
dropout=0.,
).cuda()
elif config['model']['seq2seq'] == 'fastattention':
model = Seq2SeqFastAttention(
src_emb_dim=config['model']['dim_word_src'],
trg_emb_dim=config['model']['dim_word_trg'],
src_vocab_size=src_vocab_size,
trg_vocab_size=trg_vocab_size,
src_hidden_dim=config['model']['dim'],
trg_hidden_dim=config['model']['dim'],
batch_size=batch_size,
bidirectional=config['model']['bidirectional'],
pad_token_src=src['word2id']['<pad>'],
pad_token_trg=trg['word2id']['<pad>'],
nlayers=config['model']['n_layers_src'],
nlayers_trg=config['model']['n_layers_trg'],
dropout=0.,
).cuda()
# __TODO__ Make this more flexible for other learning methods.
if config['training']['optimizer'] == 'adam':
lr = config['training']['lrate']
optimizer = optim.Adam(model.parameters(), lr=lr)
elif config['training']['optimizer'] == 'adadelta':
optimizer = optim.Adadelta(model.parameters())
elif config['training']['optimizer'] == 'sgd':
lr = config['training']['lrate']
optimizer = optim.SGD(model.parameters(), lr=lr)
else:
raise NotImplementedError("Learning method not recommend for task")
class LossLogger(object):
def __init__(self, names, path):
self.names = names
if os.path.exists(path):
with open(path, 'r') as f:
names_ = tuple(f.readline().strip().split())
assert self.names == names_, "given names: {} prev names: {}".format("\t".join(self.names), "\t".join(names_))
self.a = [list(map(float, line.strip().split())) for line in f]
else:
with open(path, 'w') as f:
print('\t'.join(names), file=f)
self.a = []
self.f = open(path, 'a', 1)
def append(self, e):
self.a.append(e)
print('\t'.join(map(lambda x: "{:.6f}".format(x), e)), file=self.f)
def recent(self, k):
k = min(k, len(self.a))
return list(map(np.mean, zip(*self.a[-k:])))
def recent_repr(self, k):
v = self.recent(k)
return "\t".join("{}: {:.3f}".format(name, val) for name, val in zip(self.names, v))
losses = LossLogger(("loss", "cel", "mbl", "bll"), os.path.join("log", "{}.loss".format(experiment_name)))
bleus = LossLogger(("bleu",), os.path.join("log", "{}.bleu".format(experiment_name)))
pretrain_epochs = config["data"]["pretrain_epochs"]
for epoch_i in range(config['data']['last_epoch'], 1000):
if load_dir:
logging.info('loading model from {} ...'.format(load_dir))
model.load_state_dict(torch.load(load_dir))
for batch_i in range(0, len(src['data']), batch_size):
if batch_i >= 0:
break
verbose = (batch_i % config['management']['print_samples'] == 0)
input_lines_src, _, lens_src, mask_src = get_minibatch(
src['data'], src['word2id'], batch_i,
batch_size, max_length, add_start=True, add_end=True
)
input_lines_trg, output_lines_trg, lens_trg, mask_trg = get_minibatch(
trg['data'], trg['word2id'], batch_i,
batch_size, max_length, add_start=True, add_end=True
)
decoder_logit = model(input_lines_src, input_lines_trg)
optimizer.zero_grad()
X = torch.nn.functional.softmax(decoder_logit, dim=-1)
Y = torch.tensor(
onehot_initialization(output_lines_trg, trg_vocab_size),
dtype=torch.float,
device='cuda')
eos_id = trg['word2id']['</s>']
def length_mask(X):
l = X.shape[1]
mask = [torch.ones(X.shape[0], device='cuda')]
for t in range(l):
mask.append(mask[-1] * (1 - X[:, t, eos_id]))
mask = torch.stack(mask, dim=1)
lenX = torch.sum(mask, dim=1)
return mask, lenX
maskY, lenY = length_mask(Y)
maskX, lenX = maskY, lenY
mbl, mbl_ = criterion_bleu(Y, X, lenY, lenX, maskY, maskX, device='cuda', verbose=verbose)
bll = torch.exp(-mbl)
mbl = mbl.mean()
mbl_ = mbl_.mean(0)
bll = bll.mean()
cel = criterion_cross_entropy(
decoder_logit.contiguous().view(-1, trg_vocab_size),
output_lines_trg.view(-1)
)
bleu_w = config['model']['bleu_w']
if bleu_w == 0.:
loss = cel
elif bleu_w == 1.:
loss = mbl
else:
loss = cel * (1. - bleu_w) + mbl * bleu_w
if epoch_i < pretrain_epochs:
loss = cel
losses.append(list(map(lambda x: x.data.cpu().numpy(), (loss, cel, mbl, bll))))
X.retain_grad()
if verbose:
for order in range(1, 5):
optimizer.zero_grad()
mbl_[order-1].backward(retain_graph=True)
g = (X.grad[:, :, :Y.shape[-1]] * Y).sum(-1)
print('grad({}):'.format(order))
print(g[:5])
print('grad({}) argmax:'.format(order))
gw = X.grad[:, :, :Y.shape[-1]].min(-1)[1].cpu().numpy()
gw = [[trg['id2word'][word_id] for word_id in sent] for sent in gw]
for sent in gw[:5]:
print(' '.join(sent))
optimizer.zero_grad()
loss.backward(retain_graph=True)
if verbose:
t = X.grad[:, :, :Y.shape[-1]]
g = (t * Y).sum(-1)
print('grad:')
print(g[:5])
print('grad argmax:')
gw = t.min(-1)[1].cpu().numpy()
gw = [[trg['id2word'][word_id] for word_id in sent] for sent in gw]
for sent in gw[:5]:
print(' '.join(sent))
w = torch.tensor(onehot_initialization(X.max(-1)[1], trg_vocab_size)[:, :, :Y.shape[-1]], dtype=torch.float, device='cuda')
print('grad_:')
print((t * w).sum(-1)[:5])
monitor_loss_freq = config['management']['monitor_loss']
if batch_i % monitor_loss_freq == 0:
logging.info('epoch#{} batch{} {}'.format(
epoch_i, batch_i, losses.recent_repr(monitor_loss_freq)))
if (
config['management']['print_samples'] and
batch_i % config['management']['print_samples'] == 0
):
word_probs = model.decode(
decoder_logit
).data.cpu().numpy().argmax(axis=-1)
output_lines_trg = output_lines_trg.data.cpu().numpy()
samples = min(5, config['data']['batch_size'])
for sentence_pred, sentence_real in zip(
word_probs[:samples], output_lines_trg[:samples]
):
sentence_pred = [trg['id2word'][x] for x in sentence_pred]
sentence_real = [trg['id2word'][x] for x in sentence_real]
if '</s>' in sentence_real:
index = sentence_real.index('</s>')
sentence_real = sentence_real[:index]
sentence_pred = sentence_pred[:index]
#if '</s>' in sentence_pred:
# sentence_pred = sentence_pred[:sentence_pred.index('</s>')]
logging.info('Pred : %s ' % (' '.join(sentence_pred)))
logging.info('-----------------------------------------------')
logging.info('Real : %s ' % (' '.join(sentence_real)))
logging.info('===============================================')
if batch_i % config['management']['checkpoint_freq'] == 0:
logging.info('Evaluating model when batch_i = {} ...'.format(batch_i))
bleu = evaluate_model(
model, src, src_test, trg,
trg_test, config, verbose=False,
metric='bleu',
)
bleus.append((bleu,))
logging.info('Epoch#%d batch%d BLEU: %.5f' % (epoch_i, batch_i, bleu))
if save_dir:
dir = os.path.join(
save_dir,
experiment_name + '__epoch_%d__minibatch_%d' % (epoch_i, batch_i) + '.model')
logging.info('saving model into {} ...'.format(dir))
torch.save(model.state_dict(), dir)
load_dir = dir
optimizer.step()
print('epoch #{} eval...'.format(epoch_i))
bleu = evaluate_model(
model, src, src_test, trg,
trg_test, config, verbose=False,
metric='bleu',
)
bleus.append((bleu,))
logging.info('Epoch#%d BLEU: %.5f' % (epoch_i, bleu))
if save_dir:
dir = os.path.join(
save_dir,
experiment_name + '__epoch_%d' % (epoch_i) + '.model')
logging.info('saving model into {} ...'.format(dir))
torch.save(model.state_dict(), dir)
load_dir = dir
|
[
"evaluate.evaluate_model"
] |
[((535, 560), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (558, 560), False, 'import argparse\n'), ((717, 746), 'data_utils.read_config', 'read_config', (['config_file_path'], {}), '(config_file_path)\n', (728, 746), False, 'from data_utils import read_nmt_data, get_minibatch, read_config, hyperparam_string\n'), ((765, 790), 'data_utils.hyperparam_string', 'hyperparam_string', (['config'], {}), '(config)\n', (782, 790), False, 'from data_utils import read_nmt_data, get_minibatch, read_config, hyperparam_string\n'), ((867, 1018), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(levelname)s - %(message)s"""', 'filename': "('log/%s' % experiment_name)", 'filemode': '"""w"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(levelname)s - %(message)s', filename='log/%s' %\n experiment_name, filemode='w')\n", (886, 1018), False, 'import logging\n'), ((1090, 1113), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1111, 1113), False, 'import logging\n'), ((1240, 1302), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(levelname)s - %(message)s')\n", (1257, 1302), False, 'import logging\n'), ((1492, 1579), 'data_utils.read_nmt_data', 'read_nmt_data', ([], {'src': "config['data']['src']", 'config': 'config', 'trg': "config['data']['trg']"}), "(src=config['data']['src'], config=config, trg=config['data'][\n 'trg'])\n", (1505, 1579), False, 'from data_utils import read_nmt_data, get_minibatch, read_config, hyperparam_string\n'), ((1611, 1708), 'data_utils.read_nmt_data', 'read_nmt_data', ([], {'src': "config['data']['test_src']", 'config': 'config', 'trg': "config['data']['test_trg']"}), "(src=config['data']['test_src'], config=config, trg=config[\n 'data']['test_trg'])\n", (1624, 1708), False, 'from data_utils import read_nmt_data, get_minibatch, read_config, hyperparam_string\n'), ((1882, 1915), 'logging.info', 'logging.info', (['"""Model Parameters:"""'], {}), "('Model Parameters:')\n", (1894, 1915), False, 'import logging\n'), ((1916, 1965), 'logging.info', 'logging.info', (["('Task: %s' % config['data']['task'])"], {}), "('Task: %s' % config['data']['task'])\n", (1928, 1965), False, 'import logging\n'), ((1968, 2022), 'logging.info', 'logging.info', (["('Model: %s' % config['model']['seq2seq'])"], {}), "('Model: %s' % config['model']['seq2seq'])\n", (1980, 2022), False, 'import logging\n'), ((2025, 2090), 'logging.info', 'logging.info', (["('Source Language: %s' % config['model']['src_lang'])"], {}), "('Source Language: %s' % config['model']['src_lang'])\n", (2037, 2090), False, 'import logging\n'), ((2093, 2158), 'logging.info', 'logging.info', (["('Target Language: %s' % config['model']['trg_lang'])"], {}), "('Target Language: %s' % config['model']['trg_lang'])\n", (2105, 2158), False, 'import logging\n'), ((2161, 2240), 'logging.info', 'logging.info', (["('Source Word Embedding Dim: %s' % config['model']['dim_word_src'])"], {}), "('Source Word Embedding Dim: %s' % config['model']['dim_word_src'])\n", (2173, 2240), False, 'import logging\n'), ((2243, 2322), 'logging.info', 'logging.info', (["('Target Word Embedding Dim: %s' % config['model']['dim_word_trg'])"], {}), "('Target Word Embedding Dim: %s' % config['model']['dim_word_trg'])\n", (2255, 2322), False, 'import logging\n'), ((2325, 2391), 'logging.info', 'logging.info', (["('Source RNN Hidden Dim: %s' % config['model']['dim'])"], {}), "('Source RNN Hidden Dim: %s' % config['model']['dim'])\n", (2337, 2391), False, 'import logging\n'), ((2394, 2460), 'logging.info', 'logging.info', (["('Target RNN Hidden Dim: %s' % config['model']['dim'])"], {}), "('Target RNN Hidden Dim: %s' % config['model']['dim'])\n", (2406, 2460), False, 'import logging\n'), ((2463, 2533), 'logging.info', 'logging.info', (["('Source RNN Depth: %d' % config['model']['n_layers_src'])"], {}), "('Source RNN Depth: %d' % config['model']['n_layers_src'])\n", (2475, 2533), False, 'import logging\n'), ((2536, 2606), 'logging.info', 'logging.info', (["('Target RNN Depth: %d' % config['model']['n_layers_trg'])"], {}), "('Target RNN Depth: %d' % config['model']['n_layers_trg'])\n", (2548, 2606), False, 'import logging\n'), ((2609, 2688), 'logging.info', 'logging.info', (["('Source RNN Bidirectional: %s' % config['model']['bidirectional'])"], {}), "('Source RNN Bidirectional: %s' % config['model']['bidirectional'])\n", (2621, 2688), False, 'import logging\n'), ((2691, 2752), 'logging.info', 'logging.info', (["('Batch Size: %d' % config['data']['batch_size'])"], {}), "('Batch Size: %d' % config['data']['batch_size'])\n", (2703, 2752), False, 'import logging\n'), ((2755, 2828), 'logging.info', 'logging.info', (["('Valid Batch Size: %d' % config['data']['valid_batch_size'])"], {}), "('Valid Batch Size: %d' % config['data']['valid_batch_size'])\n", (2767, 2828), False, 'import logging\n'), ((2831, 2894), 'logging.info', 'logging.info', (["('Optimizer: %s' % config['training']['optimizer'])"], {}), "('Optimizer: %s' % config['training']['optimizer'])\n", (2843, 2894), False, 'import logging\n'), ((2897, 2960), 'logging.info', 'logging.info', (["('Learning Rate: %f' % config['training']['lrate'])"], {}), "('Learning Rate: %f' % config['training']['lrate'])\n", (2909, 2960), False, 'import logging\n'), ((2964, 3018), 'logging.info', 'logging.info', (["('Found %d words in src' % src_vocab_size)"], {}), "('Found %d words in src' % src_vocab_size)\n", (2976, 3018), False, 'import logging\n'), ((3021, 3075), 'logging.info', 'logging.info', (["('Found %d words in trg' % trg_vocab_size)"], {}), "('Found %d words in trg' % trg_vocab_size)\n", (3033, 3075), False, 'import logging\n'), ((3258, 3266), 'criterions.matrixBLEU.mBLEU', 'mBLEU', (['(4)'], {}), '(4)\n', (3263, 3266), False, 'from criterions.matrixBLEU import mBLEU\n'), ((13038, 13131), 'evaluate.evaluate_model', 'evaluate_model', (['model', 'src', 'src_test', 'trg', 'trg_test', 'config'], {'verbose': '(False)', 'metric': '"""bleu"""'}), "(model, src, src_test, trg, trg_test, config, verbose=False,\n metric='bleu')\n", (13052, 13131), False, 'from evaluate import evaluate_model\n'), ((13190, 13243), 'logging.info', 'logging.info', (["('Epoch#%d BLEU: %.5f' % (epoch_i, bleu))"], {}), "('Epoch#%d BLEU: %.5f' % (epoch_i, bleu))\n", (13202, 13243), False, 'import logging\n'), ((1410, 1431), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (1427, 1431), False, 'import logging\n'), ((3093, 3119), 'torch.ones', 'torch.ones', (['trg_vocab_size'], {}), '(trg_vocab_size)\n', (3103, 3119), False, 'import torch\n'), ((3194, 3233), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'weight': 'weight_mask'}), '(weight=weight_mask)\n', (3213, 3233), True, 'import torch.nn as nn\n'), ((5982, 6002), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (5996, 6002), False, 'import os\n'), ((7526, 7635), 'data_utils.get_minibatch', 'get_minibatch', (["src['data']", "src['word2id']", 'batch_i', 'batch_size', 'max_length'], {'add_start': '(True)', 'add_end': '(True)'}), "(src['data'], src['word2id'], batch_i, batch_size, max_length,\n add_start=True, add_end=True)\n", (7539, 7635), False, 'from data_utils import read_nmt_data, get_minibatch, read_config, hyperparam_string\n'), ((7730, 7839), 'data_utils.get_minibatch', 'get_minibatch', (["trg['data']", "trg['word2id']", 'batch_i', 'batch_size', 'max_length'], {'add_start': '(True)', 'add_end': '(True)'}), "(trg['data'], trg['word2id'], batch_i, batch_size, max_length,\n add_start=True, add_end=True)\n", (7743, 7839), False, 'from data_utils import read_nmt_data, get_minibatch, read_config, hyperparam_string\n'), ((7978, 8028), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['decoder_logit'], {'dim': '(-1)'}), '(decoder_logit, dim=-1)\n', (8005, 8028), False, 'import torch\n'), ((8731, 8746), 'torch.exp', 'torch.exp', (['(-mbl)'], {}), '(-mbl)\n', (8740, 8746), False, 'import torch\n'), ((13276, 13351), 'os.path.join', 'os.path.join', (['save_dir', "(experiment_name + '__epoch_%d' % epoch_i + '.model')"], {}), "(save_dir, experiment_name + '__epoch_%d' % epoch_i + '.model')\n", (13288, 13351), False, 'import os\n'), ((3325, 3839), 'model.Seq2Seq', 'Seq2Seq', ([], {'src_emb_dim': "config['model']['dim_word_src']", 'trg_emb_dim': "config['model']['dim_word_trg']", 'src_vocab_size': 'src_vocab_size', 'trg_vocab_size': 'trg_vocab_size', 'src_hidden_dim': "config['model']['dim']", 'trg_hidden_dim': "config['model']['dim']", 'batch_size': 'batch_size', 'bidirectional': "config['model']['bidirectional']", 'pad_token_src': "src['word2id']['<pad>']", 'pad_token_trg': "trg['word2id']['<pad>']", 'nlayers': "config['model']['n_layers_src']", 'nlayers_trg': "config['model']['n_layers_trg']", 'dropout': '(0.0)'}), "(src_emb_dim=config['model']['dim_word_src'], trg_emb_dim=config[\n 'model']['dim_word_trg'], src_vocab_size=src_vocab_size, trg_vocab_size\n =trg_vocab_size, src_hidden_dim=config['model']['dim'], trg_hidden_dim=\n config['model']['dim'], batch_size=batch_size, bidirectional=config[\n 'model']['bidirectional'], pad_token_src=src['word2id']['<pad>'],\n pad_token_trg=trg['word2id']['<pad>'], nlayers=config['model'][\n 'n_layers_src'], nlayers_trg=config['model']['n_layers_trg'], dropout=0.0)\n", (3332, 3839), False, 'from model import Seq2Seq, Seq2SeqAttention, Seq2SeqFastAttention\n'), ((7279, 7299), 'torch.load', 'torch.load', (['load_dir'], {}), '(load_dir)\n', (7289, 7299), False, 'import torch\n'), ((8067, 8122), 'utils.onehot_initialization', 'onehot_initialization', (['output_lines_trg', 'trg_vocab_size'], {}), '(output_lines_trg, trg_vocab_size)\n', (8088, 8122), False, 'from utils import onehot_initialization\n'), ((8449, 8473), 'torch.stack', 'torch.stack', (['mask'], {'dim': '(1)'}), '(mask, dim=1)\n', (8460, 8473), False, 'import torch\n'), ((8493, 8515), 'torch.sum', 'torch.sum', (['mask'], {'dim': '(1)'}), '(mask, dim=1)\n', (8502, 8515), False, 'import torch\n'), ((12335, 12428), 'evaluate.evaluate_model', 'evaluate_model', (['model', 'src', 'src_test', 'trg', 'trg_test', 'config'], {'verbose': '(False)', 'metric': '"""bleu"""'}), "(model, src, src_test, trg, trg_test, config, verbose=False,\n metric='bleu')\n", (12349, 12428), False, 'from evaluate import evaluate_model\n'), ((12535, 12605), 'logging.info', 'logging.info', (["('Epoch#%d batch%d BLEU: %.5f' % (epoch_i, batch_i, bleu))"], {}), "('Epoch#%d batch%d BLEU: %.5f' % (epoch_i, batch_i, bleu))\n", (12547, 12605), False, 'import logging\n'), ((3990, 4577), 'model.Seq2SeqAttention', 'Seq2SeqAttention', ([], {'src_emb_dim': "config['model']['dim_word_src']", 'trg_emb_dim': "config['model']['dim_word_trg']", 'src_vocab_size': 'src_vocab_size', 'trg_vocab_size': 'trg_vocab_size', 'src_hidden_dim': "config['model']['dim']", 'trg_hidden_dim': "config['model']['dim']", 'ctx_hidden_dim': "config['model']['dim']", 'attention_mode': '"""dot"""', 'batch_size': 'batch_size', 'bidirectional': "config['model']['bidirectional']", 'pad_token_src': "src['word2id']['<pad>']", 'pad_token_trg': "trg['word2id']['<pad>']", 'nlayers': "config['model']['n_layers_src']", 'nlayers_trg': "config['model']['n_layers_trg']", 'dropout': '(0.0)'}), "(src_emb_dim=config['model']['dim_word_src'], trg_emb_dim=\n config['model']['dim_word_trg'], src_vocab_size=src_vocab_size,\n trg_vocab_size=trg_vocab_size, src_hidden_dim=config['model']['dim'],\n trg_hidden_dim=config['model']['dim'], ctx_hidden_dim=config['model'][\n 'dim'], attention_mode='dot', batch_size=batch_size, bidirectional=\n config['model']['bidirectional'], pad_token_src=src['word2id']['<pad>'],\n pad_token_trg=trg['word2id']['<pad>'], nlayers=config['model'][\n 'n_layers_src'], nlayers_trg=config['model']['n_layers_trg'], dropout=0.0)\n", (4006, 4577), False, 'from model import Seq2Seq, Seq2SeqAttention, Seq2SeqFastAttention\n'), ((8298, 8335), 'torch.ones', 'torch.ones', (['X.shape[0]'], {'device': '"""cuda"""'}), "(X.shape[0], device='cuda')\n", (8308, 8335), False, 'import torch\n'), ((11949, 12012), 'logging.info', 'logging.info', (['"""-----------------------------------------------"""'], {}), "('-----------------------------------------------')\n", (11961, 12012), False, 'import logging\n'), ((12100, 12163), 'logging.info', 'logging.info', (['"""==============================================="""'], {}), "('===============================================')\n", (12112, 12163), False, 'import logging\n'), ((12654, 12759), 'os.path.join', 'os.path.join', (['save_dir', "(experiment_name + '__epoch_%d__minibatch_%d' % (epoch_i, batch_i) + '.model')"], {}), "(save_dir, experiment_name + '__epoch_%d__minibatch_%d' % (\n epoch_i, batch_i) + '.model')\n", (12666, 12759), False, 'import os\n'), ((4745, 5276), 'model.Seq2SeqFastAttention', 'Seq2SeqFastAttention', ([], {'src_emb_dim': "config['model']['dim_word_src']", 'trg_emb_dim': "config['model']['dim_word_trg']", 'src_vocab_size': 'src_vocab_size', 'trg_vocab_size': 'trg_vocab_size', 'src_hidden_dim': "config['model']['dim']", 'trg_hidden_dim': "config['model']['dim']", 'batch_size': 'batch_size', 'bidirectional': "config['model']['bidirectional']", 'pad_token_src': "src['word2id']['<pad>']", 'pad_token_trg': "trg['word2id']['<pad>']", 'nlayers': "config['model']['n_layers_src']", 'nlayers_trg': "config['model']['n_layers_trg']", 'dropout': '(0.0)'}), "(src_emb_dim=config['model']['dim_word_src'],\n trg_emb_dim=config['model']['dim_word_trg'], src_vocab_size=\n src_vocab_size, trg_vocab_size=trg_vocab_size, src_hidden_dim=config[\n 'model']['dim'], trg_hidden_dim=config['model']['dim'], batch_size=\n batch_size, bidirectional=config['model']['bidirectional'],\n pad_token_src=src['word2id']['<pad>'], pad_token_trg=trg['word2id'][\n '<pad>'], nlayers=config['model']['n_layers_src'], nlayers_trg=config[\n 'model']['n_layers_trg'], dropout=0.0)\n", (4765, 5276), False, 'from model import Seq2Seq, Seq2SeqAttention, Seq2SeqFastAttention\n')]
|
import time
import datetime
import pytz
import argparse
import numpy as np
import torch
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
import os
from PIL import Image
import torchvision.utils as vutils
from model.net import MonocularDepthModel
from model.loss import LossNetwork, combined_loss, mean_l2_loss
from model.metrics import evaluate_predictions
from model.dataloader import DataLoaders, get_test_dataloader
from utils import *
from evaluate import infer_depth, evaluate
class Trainer():
def __init__(self, data_path, test_data_path):
self.dataloaders = DataLoaders(data_path)
self.test_data_path = test_data_path
def train_and_evaluate(self, config):
batch_size = config['batch_size']
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
train_dataloader = self.dataloaders.get_train_dataloader(batch_size = batch_size)
num_batches = len(train_dataloader)
test_dataloader = get_test_dataloader(self.test_data_path, config['test_batch_size'], shuffle=True)
model = MonocularDepthModel(backbone = config['backbone'])
model = model.to(device)
params = [param for param in model.parameters() if param.requires_grad == True]
optimizer = torch.optim.Adam(params, config['lr'])
loss_model = LossNetwork().to(device)
if config['checkpoint']:
load_checkpoint(config['checkpoint'], model, optimizer)
print('Training...')
for epoch in range(config['epochs']):
accumulated_per_pixel_loss = RunningAverage()
accumulated_feature_loss = RunningAverage()
accumulated_iteration_time = RunningAverage()
epoch_start_time = time.time()
for iteration, batch in enumerate(train_dataloader):
model.train()
time_start = time.time()
optimizer.zero_grad()
images, depths = batch['img'], batch['depth']
images = normalize_batch(torch.autograd.Variable(images.to(device)))
depths = torch.autograd.Variable(depths.to(device))
predictions = model(images)
predictions_normalized = normalize_batch(predictions)
depths_normalized = normalize_batch(depths)
feature_losses_predictions = loss_model(predictions_normalized)
feature_losses_depths = loss_model(depths_normalized)
per_pixel_loss = combined_loss(predictions, depths)
accumulated_per_pixel_loss.update(per_pixel_loss, images.shape[0])
feature_loss = config['perceptual_weight'] * mean_l2_loss(feature_losses_predictions.res1, feature_losses_depths.res1)
accumulated_feature_loss.update(feature_loss, images.shape[0])
total_loss = per_pixel_loss + feature_loss
total_loss.backward()
optimizer.step()
time_end = time.time()
accumulated_iteration_time.update(time_end - time_start)
eta = str(datetime.timedelta(seconds = int(accumulated_iteration_time() * (num_batches - iteration))))
if iteration % config['log_interval'] == 0:
print(datetime.datetime.now(pytz.timezone('Asia/Kolkata')), end = ': ')
print('Epoch: %d [%d / %d] ; it_time: %f (%f) ; eta: %s' % (epoch, iteration, num_batches, time_end - time_start, accumulated_iteration_time(), eta))
print('Average per-pixel loss: %f; Average feature loss: %f' % (accumulated_per_pixel_loss(), accumulated_feature_loss()))
metrics = evaluate_predictions(predictions, depths)
epoch_end_time = time.time()
print('Epoch %d complete, time taken: %s' % (epoch, str(datetime.timedelta(seconds = int(epoch_end_time - epoch_start_time)))))
torch.cuda.empty_cache()
save_checkpoint({
'iteration': num_batches * epoch + iteration,
'state_dict': model.state_dict(),
'optim_dict': optimizer.state_dict()}, config['checkpoint_dir'])
print('Epoch %d saved\n\n' % (epoch))
# EVALUATE ON TEST DATA:
test_metrics = evaluate(model, test_dataloader, model_upsample = True)
random_test_batch = next(iter(test_dataloader))
log_images = random_test_batch[0]['img']
log_depths = random_test_batch[0]['depth']
log_preds = torch.cat([infer_depth(img, model, upsample = True)[0].unsqueeze(0) for img in log_images], dim = 0)
def get_with_colormap(self, plots):
images = []
for plot in plots:
plt.imsave('_.png', plot, cmap='jet')
img = Image.open('_.png')
os.remove('_.png')
images.append(img)
return images
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Training of depth estimation model')
'''REQUIRED ARGUMENTS'''
parser.add_argument('--train_dir', help='Train directory path - should contain the \'data\' folder', required = True)
parser.add_argument('--test_dir', help='Test directory path - should contain 3 files', required = True)
parser.add_argument('--batch_size', type=int, help='Batch size to process the train data', required = True)
parser.add_argument('--checkpoint_dir', help='Directory to save checkpoints in', required = True)
parser.add_argument('--epochs', type = int, help = 'Number of epochs', required = True)
'''OPTIONAL ARGUMENTS'''
parser.add_argument('--checkpoint', help='Model checkpoint path', default = None)
parser.add_argument('--lr', help = 'Learning rate', default = 3e-4)
parser.add_argument('--log_interval', help = 'Interval to print the avg. loss and metrics', default = 50)
parser.add_argument('--backbone', type=str, help = 'Model backbone: densenet161 or densenet121', default = 'densenet161')
parser.add_argument('--test_batch_size', type=int, help='Batch size for frequent testing', default = 2)
parser.add_argument('--perceptual_weight', type=int, help='Weight for the perceptual loss', default = 0.5)
args = parser.parse_args()
if args.backbone not in ('densenet161', 'densenet121'):
raise Exception('Invalid backbone specified!')
if not os.path.isdir(args.checkpoint_dir):
os.mkdir(args.checkpoint_dir)
trainer = Trainer(args.train_dir, args.test_dir)
trainer.train_and_evaluate(vars(args))
|
[
"evaluate.evaluate",
"evaluate.infer_depth"
] |
[((4688, 4761), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Training of depth estimation model"""'}), "(description='Training of depth estimation model')\n", (4711, 4761), False, 'import argparse\n'), ((614, 636), 'model.dataloader.DataLoaders', 'DataLoaders', (['data_path'], {}), '(data_path)\n', (625, 636), False, 'from model.dataloader import DataLoaders, get_test_dataloader\n'), ((998, 1084), 'model.dataloader.get_test_dataloader', 'get_test_dataloader', (['self.test_data_path', "config['test_batch_size']"], {'shuffle': '(True)'}), "(self.test_data_path, config['test_batch_size'], shuffle\n =True)\n", (1017, 1084), False, 'from model.dataloader import DataLoaders, get_test_dataloader\n'), ((1092, 1140), 'model.net.MonocularDepthModel', 'MonocularDepthModel', ([], {'backbone': "config['backbone']"}), "(backbone=config['backbone'])\n", (1111, 1140), False, 'from model.net import MonocularDepthModel\n'), ((1273, 1311), 'torch.optim.Adam', 'torch.optim.Adam', (['params', "config['lr']"], {}), "(params, config['lr'])\n", (1289, 1311), False, 'import torch\n'), ((6093, 6127), 'os.path.isdir', 'os.path.isdir', (['args.checkpoint_dir'], {}), '(args.checkpoint_dir)\n', (6106, 6127), False, 'import os\n'), ((6133, 6162), 'os.mkdir', 'os.mkdir', (['args.checkpoint_dir'], {}), '(args.checkpoint_dir)\n', (6141, 6162), False, 'import os\n'), ((796, 821), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (819, 821), False, 'import torch\n'), ((772, 792), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (784, 792), False, 'import torch\n'), ((827, 846), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (839, 846), False, 'import torch\n'), ((1716, 1727), 'time.time', 'time.time', ([], {}), '()\n', (1725, 1727), False, 'import time\n'), ((3587, 3598), 'time.time', 'time.time', ([], {}), '()\n', (3596, 3598), False, 'import time\n'), ((3739, 3763), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (3761, 3763), False, 'import torch\n'), ((4089, 4142), 'evaluate.evaluate', 'evaluate', (['model', 'test_dataloader'], {'model_upsample': '(True)'}), '(model, test_dataloader, model_upsample=True)\n', (4097, 4142), False, 'from evaluate import infer_depth, evaluate\n'), ((4507, 4544), 'matplotlib.pyplot.imsave', 'plt.imsave', (['"""_.png"""', 'plot'], {'cmap': '"""jet"""'}), "('_.png', plot, cmap='jet')\n", (4517, 4544), True, 'import matplotlib.pyplot as plt\n'), ((4557, 4576), 'PIL.Image.open', 'Image.open', (['"""_.png"""'], {}), "('_.png')\n", (4567, 4576), False, 'from PIL import Image\n'), ((4583, 4601), 'os.remove', 'os.remove', (['"""_.png"""'], {}), "('_.png')\n", (4592, 4601), False, 'import os\n'), ((1334, 1347), 'model.loss.LossNetwork', 'LossNetwork', ([], {}), '()\n', (1345, 1347), False, 'from model.loss import LossNetwork, combined_loss, mean_l2_loss\n'), ((1832, 1843), 'time.time', 'time.time', ([], {}), '()\n', (1841, 1843), False, 'import time\n'), ((2387, 2421), 'model.loss.combined_loss', 'combined_loss', (['predictions', 'depths'], {}), '(predictions, depths)\n', (2400, 2421), False, 'from model.loss import LossNetwork, combined_loss, mean_l2_loss\n'), ((2823, 2834), 'time.time', 'time.time', ([], {}), '()\n', (2832, 2834), False, 'import time\n'), ((2551, 2624), 'model.loss.mean_l2_loss', 'mean_l2_loss', (['feature_losses_predictions.res1', 'feature_losses_depths.res1'], {}), '(feature_losses_predictions.res1, feature_losses_depths.res1)\n', (2563, 2624), False, 'from model.loss import LossNetwork, combined_loss, mean_l2_loss\n'), ((3460, 3501), 'model.metrics.evaluate_predictions', 'evaluate_predictions', (['predictions', 'depths'], {}), '(predictions, depths)\n', (3480, 3501), False, 'from model.metrics import evaluate_predictions\n'), ((3103, 3132), 'pytz.timezone', 'pytz.timezone', (['"""Asia/Kolkata"""'], {}), "('Asia/Kolkata')\n", (3116, 3132), False, 'import pytz\n'), ((4325, 4363), 'evaluate.infer_depth', 'infer_depth', (['img', 'model'], {'upsample': '(True)'}), '(img, model, upsample=True)\n', (4336, 4363), False, 'from evaluate import infer_depth, evaluate\n')]
|
'''
@author Waldinsamkeit
@email <EMAIL>
@create date 2020-10-20 15:58:45
@desc [description]
'''
from pathlib import Path
import os
from sklearn import metrics
from dataloader import DataSet
from datetime import datetime
import torch.nn as nn
import torch
import numpy as np
from log import logger
from typing import Any, Callable, Dict, List, Optional, Sequence
from copy import deepcopy
from evaluate import EvalUnit,binary_confusion_matrix_evaluate,cluster_metrics_eval
from base.basewrapper import BaseWrapper
from base.basedataloader import BaseDataLoader
from utils import set_padding
import pickle
class ModelWrapper(BaseWrapper):
"""Class to wrapper training and testing of deeplearning model
"""
def __init__(self, model:nn.Module, config:Dict):
super(ModelWrapper,self).__init__(model,config)
self.append_require_mode('threshold')
self.threshold = self.getconfigattr('threshold',config=config)
def __check_before_work(self,keys:List[str]):
for key in keys:
if key not in self.__dict__:
raise ValueError('{} is Missing in Wrapper instance')
def __trans_np_to_tensor(self,item:Sequence[Any]):
wordset, attention_mask, waiting_word, labels = item
F = lambda x: torch.tensor(x).long().to(self.device)
wordset = F(wordset)
attention_mask = F(attention_mask)
waiting_word = F(waiting_word)
tensor_labels = torch.tensor(labels).float().to(self.device)
return wordset, attention_mask, waiting_word, labels, tensor_labels
def train_(self):
self.__check_before_work(['dev_dataloader','train_dataloader'])
self.model.to(self.device)
t = range(self.start_epoch, self.epoches)
all_step = len(self.train_dataloader)
validation_flag = True if self.dev_dataloader is not None else False
for epoch in t:
self.model.train()
epoch_unit = EvalUnit(0,0,0,0,'Training')
ep_loss = 0.0
for step,item in enumerate(self.train_dataloader):
batch_word_set, attention_mask, waiting_word, labels , tensor_labels = self.__trans_np_to_tensor(item)
pred_ans = self.model(batch_word_set, attention_mask, waiting_word)
step_loss = self.loss_fn(pred_ans,tensor_labels) / tensor_labels.shape[0]
ep_loss += step_loss.item()
self.optimizer.zero_grad()
step_loss.backward()
self.optimizer.step()
pred_labels = np.where(pred_ans.cpu().detach().numpy() > self.threshold,1,0)
unit = binary_confusion_matrix_evaluate(np.array(labels), pred_labels)
epoch_unit += unit
if (step + 1) % self.print_step == 0 or step + 1 == all_step:
logger.info('Training Epoch: {} step {}/{} Loss:{:.6f}'.format(
epoch,step + 1,all_step,step_loss
))
#Train Evaluation
logger.info('Evaluation Training Epoch:{}'.format(epoch))
val_loss = 0.0
val_unit = EvalUnit(0,0,0,0,'validation')
cluster_unit = {'FMI':0.0, 'NMI':0.0, 'ARI':0.0}
score = 0
if validation_flag:
# it will update model in validation method
val_loss, val_unit = self.validation()
if epoch > self.epoches / 10:
cluster_unit = self.validation_cluster_metrics()
score = cluster_unit['ARI']
else:
#No Validation Model
#update best model according to performance in Trainning DataSet
score = epoch_unit.f1_score()
# using f1_score to update standard line
if self.best_score < score:
self.best_score = score
self.best_model = deepcopy(self.model)
self.save_check_point()
logger.info("Update BEST MODEL !")
logger.info("Training Evaluation Epoch :{}".format(epoch))
logger.info(epoch_unit)
# if (epoch + 1) % self.checkpoint_epoch == 0 or epoch + 1 == self.epoches:
# self.save_check_point(epoch=epoch)
yield (
ep_loss/all_step,
epoch_unit.accuracy(),
epoch_unit.precision(),
epoch_unit.recall(),
epoch_unit.f1_score(),
val_loss,
val_unit.accuracy(),
val_unit.precision(),
val_unit.recall(),
val_unit.f1_score(),
cluster_unit,
self.best_score
)
def train(self,train_dataloader:BaseDataLoader, dev_dataloader:Optional[BaseDataLoader]):
self.train_dataloader = train_dataloader
self.dev_dataloader = dev_dataloader
vocab = self.dev_dataloader.data.vocab
word2id = self.dev_dataloader.word2id
target_wordset_list = {}
for k,v in vocab.items():
if v not in target_wordset_list:
target_wordset_list[v] = [word2id[k]]
else:
target_wordset_list[v].append(word2id[k])
self.target_wordset_list = list(target_wordset_list.values())
for item in self.train_():
yield item
def validation(self):
self.model.eval()
all_step = len(self.dev_dataloader)
validation_unit = EvalUnit(0,0,0,0,'Evaluation')
loss = 0.0
for step, item in enumerate(self.dev_dataloader):
batch_word_set, attention_mask, waiting_word, labels , tensor_labels = self.__trans_np_to_tensor(item)
pred_ans = self.model(batch_word_set, attention_mask, waiting_word)
step_loss = self.loss_fn(pred_ans,tensor_labels) / tensor_labels.shape[0]
loss += step_loss.item()
pred_labels = np.where(pred_ans.cpu().detach().numpy() > self.threshold,1,0)
unit = binary_confusion_matrix_evaluate(np.array(labels), pred_labels)
validation_unit += unit
if (step + 1) % self.print_step == 0 or step + 1 == all_step:
logger.info('Validation {}/{} Loss: {:.6f}'.format(step, all_step,step_loss))
logger.info("Validation Evaluation:")
logger.info(validation_unit)
return loss/all_step, validation_unit
def validation_cluster_metrics(self):
self.model.eval()
vocab = self.dev_dataloader.data.vocab
word2id = self.dev_dataloader.word2id
pred_wordset_list = self.__cluster_predict(self.model,vocab=vocab,word2id=word2id)
ans = self.__evaluate_cluster(pred_wordset_list,self.target_wordset_list)
return { i:j for i,j in ans}
def test_performance_(self):
self.__check_before_work(['test_dataloader'])
'''
Test Performance
'''
self.best_model.eval()
test_unit = EvalUnit(0,0,0,0,'Test')
for step,item in enumerate(self.test_dataloader):
batch_word_set, attentionn_mask, waitinng_word, labels, _ = self.__trans_np_to_tensor(item)
pred_ans = self.best_model(batch_word_set, attentionn_mask, waitinng_word)
pred_labels = np.where(pred_ans.cpu().detach().numpy() > self.threshold,1,0)
unit = binary_confusion_matrix_evaluate(np.array(labels), pred_labels)
test_unit += unit
logger.info("Test Performance Evaluation:")
logger.info(test_unit)
return test_unit.metrics2dict()
def test_performance(self,test_dataloader:BaseDataLoader):
self.test_dataloader = test_dataloader
return self.test_performance_()
def __cluster_predict(self,model:nn.Module,vocab:Dict, word2id:Dict)->Sequence[Any]:
model.eval()
words = vocab.keys()
wordset_list = []
for word in words:
wordid = word2id[word]
#Empty List
if not wordset_list:
wordset_list.append([wordid])
continue
itemsum = len(wordset_list)
tmp_best_scores = 0
index = 0
for ix in range(0,itemsum, self.batch_size):
batch_word_set = wordset_list[ix:ix+self.batch_size]
batch_waiting_word = [wordid] * len(batch_word_set)
batch_word_set, attention_mask = set_padding(batch_word_set)
batch_word_set, attention_mask, batch_waiting_word, _, _ = self.__trans_np_to_tensor(
[batch_word_set, attention_mask, batch_waiting_word, 0]
)
scores = model(batch_word_set, attention_mask, batch_waiting_word)
best_scores = torch.max(scores).item()
if best_scores >= tmp_best_scores:
tmp_best_scores = best_scores
index = ix + torch.argmax(scores).item()
if tmp_best_scores > self.threshold:
wordset_list[index].append(wordid)
else:
wordset_list.append([wordid])
return wordset_list
def __evaluate_cluster(self,pred_wordset_list:Sequence[Any], target_wordset_list:Sequence[Any]):
pred_cluster = {}
# import pdb;pdb.set_trace()
for idx,pred_word_set in enumerate(pred_wordset_list):
for word in pred_word_set:
pred_cluster[word] = idx
target_cluster = {}
for idx, target_word_set in enumerate(target_wordset_list):
for word in target_word_set:
target_cluster[word] = idx
return cluster_metrics_eval(pred_cluster,target_cluster)
'''Public Method'''
def cluster_predict(self,dataset:DataSet,word2id:Dict,outputfile:Optional[Path]) -> Sequence[Any]:
"""Using Binary Classifer to cluster wordset
Args:
dataset: it's self defined class, in DataSet, we use vocab to get all words and true cluster result
word2id: it is got from embedding file, translate word to embedding index
outputfile: outputfile path
Returns:
List of word sets
"""
self.best_model.eval()
vocab = dataset.vocab
wordset_list = self.__cluster_predict(self.best_model,vocab=vocab,word2id=word2id)
#id2word
# import pdb;pdb.set_trace()
id2word = { j:i for i,j in word2id.items()}
F = lambda x:[ id2word[i] for i in x]
pred_word_sets = [ F(wordset) for wordset in wordset_list]
if outputfile is not None:
with open(outputfile, 'w', encoding='utf-8') as f:
for pred_word_set in pred_word_sets:
for word in pred_word_set:
f.write(word+' ')
f.write('\n')
return pred_word_sets
def evaluate(self, dataset:DataSet, pred_word_sets:Sequence[Any])->Sequence[Any]:
""" Use Evaluating Function to Evaluate the final result
Args:
dataset: it's self defined class, we use vocab attribute to get true cluster result
pred_word_set: the output of cluster_predict method | List of word sets
function_list: the list of evaluating function which have two input pred_cluster and target_cluster
"""
#trans datatype
clusters = set(dataset.vocab.values())
cluster2id = {cluster:idx for idx,cluster in enumerate(clusters)}
target_cluster = {key:cluster2id[value] for key,value in dataset.vocab.items()}
pred_cluster = {}
# import pdb;pdb.set_trace()
for idx,pred_word_set in enumerate(pred_word_sets):
for word in pred_word_set:
pred_cluster[word] = idx
# import pdb;pdb.set_trace()
return cluster_metrics_eval(pred_cluster,target_cluster)
"""
Below Methods are used to Test
"""
def Test_predict_wordset_attention(self,word_set:List[str],word2id:Dict):
self.best_model.eval();
word_set_ = [ word2id[i] for i in word_set]
word_set_, attention_mask = set_padding([word_set_])
word_set_tensor = torch.tensor(word_set_).long().to(self.device)
attention_mask = torch.tensor(attention_mask).long().to(self.device)
attention_weight = self.best_model.test_predict_attention_weights(word_set_tensor, attention_mask)
attention_weight = attention_weight.cpu().detach().numpy()
d = {i:j.item() for i,j in zip(word_set,attention_weight)}
return d
def Test_predict_is_wordset(self,word_set:List[str],waiting_word:str,word2id:Dict):
self.best_model.eval();
word_set_ = [ word2id[i] for i in word_set]
waiting_word_ = word2id[waiting_word]
word_set_, attentionn_mask = set_padding([word_set_])
word_set_tensor = torch.tensor(word_set_).long().to(self.device)
attention_mask = torch.tensor(attentionn_mask).long().to(self.device)
waiting_word_tensor = torch.tensor([waiting_word_]).long().to(self.device)
y = self.best_model(word_set_tensor,attention_mask,waiting_word_tensor)
# batch_size , 1
y = y.squeeze(0).cpu().detach().numpy()
return y
""" ---------------- OverWrite No Writer -----------------"""
def save(self,dir_path:Path):
""" save this wrapper
using pickle to save this wrapper
It is convinient for us to get entire wrapper without setting config
"""
if os.path.isdir(dir_path):
name = self.model.name
version = self.model.version
filename = name + "_" + version +"_wrapper.pkl"
filepath = dir_path.joinpath(filename)
else:
filepath = dir_path
d = self.__dict__
with open(filepath, 'wb') as f:
pickle.dump(d, f)
@classmethod
def load(cls,dir_path:Path):
""" load this wrapper
using pickle to load this wrapper
It is convinient for us to get entire wrapper without setting config
"""
# f = open(self.filename, 'rb')
# tmp_dict = cPickle.load(f)
# f.close()
# self.__dict__.update(tmp_dict)
if os.path.isdir(dir_path):
flist = os.listdir(dir_path)
if not flist:
msg = 'No wrapper pickle file'
raise ValueError(msg=msg)
filepath = Path.joinpath(dir_path,max(flist))
if os.path.isfile(dir_path):
filepath = dir_path
with open(dir_path, 'rb') as f:
tmp_dict = pickle.load(f)
return cls(tmp_dict['model'],tmp_dict)
|
[
"evaluate.EvalUnit",
"evaluate.cluster_metrics_eval"
] |
[((5616, 5650), 'evaluate.EvalUnit', 'EvalUnit', (['(0)', '(0)', '(0)', '(0)', '"""Evaluation"""'], {}), "(0, 0, 0, 0, 'Evaluation')\n", (5624, 5650), False, 'from evaluate import EvalUnit, binary_confusion_matrix_evaluate, cluster_metrics_eval\n'), ((6428, 6465), 'log.logger.info', 'logger.info', (['"""Validation Evaluation:"""'], {}), "('Validation Evaluation:')\n", (6439, 6465), False, 'from log import logger\n'), ((6474, 6502), 'log.logger.info', 'logger.info', (['validation_unit'], {}), '(validation_unit)\n', (6485, 6502), False, 'from log import logger\n'), ((7113, 7141), 'evaluate.EvalUnit', 'EvalUnit', (['(0)', '(0)', '(0)', '(0)', '"""Test"""'], {}), "(0, 0, 0, 0, 'Test')\n", (7121, 7141), False, 'from evaluate import EvalUnit, binary_confusion_matrix_evaluate, cluster_metrics_eval\n'), ((7603, 7646), 'log.logger.info', 'logger.info', (['"""Test Performance Evaluation:"""'], {}), "('Test Performance Evaluation:')\n", (7614, 7646), False, 'from log import logger\n'), ((7655, 7677), 'log.logger.info', 'logger.info', (['test_unit'], {}), '(test_unit)\n', (7666, 7677), False, 'from log import logger\n'), ((9797, 9847), 'evaluate.cluster_metrics_eval', 'cluster_metrics_eval', (['pred_cluster', 'target_cluster'], {}), '(pred_cluster, target_cluster)\n', (9817, 9847), False, 'from evaluate import EvalUnit, binary_confusion_matrix_evaluate, cluster_metrics_eval\n'), ((11986, 12036), 'evaluate.cluster_metrics_eval', 'cluster_metrics_eval', (['pred_cluster', 'target_cluster'], {}), '(pred_cluster, target_cluster)\n', (12006, 12036), False, 'from evaluate import EvalUnit, binary_confusion_matrix_evaluate, cluster_metrics_eval\n'), ((12288, 12312), 'utils.set_padding', 'set_padding', (['[word_set_]'], {}), '([word_set_])\n', (12299, 12312), False, 'from utils import set_padding\n'), ((12981, 13005), 'utils.set_padding', 'set_padding', (['[word_set_]'], {}), '([word_set_])\n', (12992, 13005), False, 'from utils import set_padding\n'), ((13688, 13711), 'os.path.isdir', 'os.path.isdir', (['dir_path'], {}), '(dir_path)\n', (13701, 13711), False, 'import os\n'), ((14421, 14444), 'os.path.isdir', 'os.path.isdir', (['dir_path'], {}), '(dir_path)\n', (14434, 14444), False, 'import os\n'), ((14671, 14695), 'os.path.isfile', 'os.path.isfile', (['dir_path'], {}), '(dir_path)\n', (14685, 14695), False, 'import os\n'), ((1958, 1990), 'evaluate.EvalUnit', 'EvalUnit', (['(0)', '(0)', '(0)', '(0)', '"""Training"""'], {}), "(0, 0, 0, 0, 'Training')\n", (1966, 1990), False, 'from evaluate import EvalUnit, binary_confusion_matrix_evaluate, cluster_metrics_eval\n'), ((3141, 3175), 'evaluate.EvalUnit', 'EvalUnit', (['(0)', '(0)', '(0)', '(0)', '"""validation"""'], {}), "(0, 0, 0, 0, 'validation')\n", (3149, 3175), False, 'from evaluate import EvalUnit, binary_confusion_matrix_evaluate, cluster_metrics_eval\n'), ((4141, 4164), 'log.logger.info', 'logger.info', (['epoch_unit'], {}), '(epoch_unit)\n', (4152, 4164), False, 'from log import logger\n'), ((14026, 14043), 'pickle.dump', 'pickle.dump', (['d', 'f'], {}), '(d, f)\n', (14037, 14043), False, 'import pickle\n'), ((14466, 14486), 'os.listdir', 'os.listdir', (['dir_path'], {}), '(dir_path)\n', (14476, 14486), False, 'import os\n'), ((14793, 14807), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (14804, 14807), False, 'import pickle\n'), ((3933, 3953), 'copy.deepcopy', 'deepcopy', (['self.model'], {}), '(self.model)\n', (3941, 3953), False, 'from copy import deepcopy\n'), ((4010, 4044), 'log.logger.info', 'logger.info', (['"""Update BEST MODEL !"""'], {}), "('Update BEST MODEL !')\n", (4021, 4044), False, 'from log import logger\n'), ((6183, 6199), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (6191, 6199), True, 'import numpy as np\n'), ((7534, 7550), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (7542, 7550), True, 'import numpy as np\n'), ((8567, 8594), 'utils.set_padding', 'set_padding', (['batch_word_set'], {}), '(batch_word_set)\n', (8578, 8594), False, 'from utils import set_padding\n'), ((2681, 2697), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (2689, 2697), True, 'import numpy as np\n'), ((1450, 1470), 'torch.tensor', 'torch.tensor', (['labels'], {}), '(labels)\n', (1462, 1470), False, 'import torch\n'), ((8904, 8921), 'torch.max', 'torch.max', (['scores'], {}), '(scores)\n', (8913, 8921), False, 'import torch\n'), ((12339, 12362), 'torch.tensor', 'torch.tensor', (['word_set_'], {}), '(word_set_)\n', (12351, 12362), False, 'import torch\n'), ((12411, 12439), 'torch.tensor', 'torch.tensor', (['attention_mask'], {}), '(attention_mask)\n', (12423, 12439), False, 'import torch\n'), ((13032, 13055), 'torch.tensor', 'torch.tensor', (['word_set_'], {}), '(word_set_)\n', (13044, 13055), False, 'import torch\n'), ((13104, 13133), 'torch.tensor', 'torch.tensor', (['attentionn_mask'], {}), '(attentionn_mask)\n', (13116, 13133), False, 'import torch\n'), ((13187, 13216), 'torch.tensor', 'torch.tensor', (['[waiting_word_]'], {}), '([waiting_word_])\n', (13199, 13216), False, 'import torch\n'), ((1276, 1291), 'torch.tensor', 'torch.tensor', (['x'], {}), '(x)\n', (1288, 1291), False, 'import torch\n'), ((9064, 9084), 'torch.argmax', 'torch.argmax', (['scores'], {}), '(scores)\n', (9076, 9084), False, 'import torch\n')]
|
import pyarrow
import argparse
import os
from typing import Any, Dict, List, Optional, Union
#import soundfile as sf
#import librosa
import torch
#from transformers import Wav2Vec2CTCTokenizer
#from transformers import Wav2Vec2FeatureExtractor
from transformers import Wav2Vec2Processor
from transformers import Wav2Vec2ForCTC
#from datasets import load_metric, Dataset, concatenate_datasets, load_dataset
#from datasets import ClassLabel
#from dataclasses import dataclass, field
# custom script
import sys
#sys.path.append('/content/drive/MyDrive/w2v2_project/src') # Colab
sys.path.append('/home/prsull/scratch/l2asr/src') # Compute Canada
import asr4l2_utils as ut
import evaluate as ev
def main(args):
"The pipeline for testing one or more models."
evaluator = ev.Evaluator()
evaluator.setup(args)
evaluator.start()
if __name__ == "__main__":
if (
int(pyarrow.__version__.split(".")[1]) < 16
and int(pyarrow.__version__.split(".")[0]) == 0
):
os.kill(os.getpid(), 9)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
parser = argparse.ArgumentParser(description="Parse input args")
parser.add_argument(
"--PROCESSOR_PATH", required=True,
help="Path to the processor",
)
parser.add_argument(
"--SPLIT_PATH", required=False, # required for ARCTIC
help="Path to the test dataset",
)
parser.add_argument(
"--L1", required=False,
help="L1 to be evaluated on"
)
parser.add_argument(
"--REMOVED_IDS", required=False, default=[], nargs="*",
help="Zero or more speaker IDs to be removed from dev set",
)
parser.add_argument(
"--MODEL_PATHS", required=True, default=[], nargs="+",
help="One or more paths to the checkpoint or model name",
)
parser.add_argument(
"--EXPERIMENT_NAMES", required=True, default=[], nargs="+",
help="One or more experiment names (to be used for the pred-ref text file and the scoreboard)",
)
parser.add_argument(
"--SAVE_PATH", required=True,
help="Path for saving the prediction-reference pair files"
)
parser.add_argument(
"--CORPUS", required=True,
help="ARC=L1- or L2-ARCTIC, LS=LibriSpeech"
)
parser.add_argument(
"--LM_PATH", required=False, default=None,
help="Path to KenLM arpa"
)
parser.add_argument(
"--VOCAB_PATH", required=False, default=None,
help="Path to unigram_list from OpenSLR"
)
parser.add_argument(
"--ALPHA", required=False, type=float, default=0.5,
help="ALPHA value for pyctcdecode"
)
parser.add_argument(
"--BETA", required=False, type=float, default=1.5,
help="BETA value for pyctcdecode"
)
parser.add_argument(
"--BEAM", required=False, type=int, default=100,
help="BEAM value for pyctcdecode"
)
parser.add_argument(
"--DEV_ONLY", required=False, type=bool, default=False,
help="For doing hyperparameter tuning"
)
args = parser.parse_args()
main(args)
|
[
"evaluate.Evaluator"
] |
[((581, 630), 'sys.path.append', 'sys.path.append', (['"""/home/prsull/scratch/l2asr/src"""'], {}), "('/home/prsull/scratch/l2asr/src')\n", (596, 630), False, 'import sys\n'), ((783, 797), 'evaluate.Evaluator', 'ev.Evaluator', ([], {}), '()\n', (795, 797), True, 'import evaluate as ev\n'), ((1122, 1177), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parse input args"""'}), "(description='Parse input args')\n", (1145, 1177), False, 'import argparse\n'), ((1017, 1028), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1026, 1028), False, 'import os\n'), ((1070, 1095), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1093, 1095), False, 'import torch\n'), ((897, 927), 'pyarrow.__version__.split', 'pyarrow.__version__.split', (['"""."""'], {}), "('.')\n", (922, 927), False, 'import pyarrow\n'), ((953, 983), 'pyarrow.__version__.split', 'pyarrow.__version__.split', (['"""."""'], {}), "('.')\n", (978, 983), False, 'import pyarrow\n')]
|
"""Train and evaluate the model"""
import argparse
import logging
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from tqdm import trange
import tools.utils as utils
import model.net as net
from tools.data_loader import DataLoader
from evaluate import evaluate
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default='data/SemEval2010_task8', help="Directory containing the dataset")
parser.add_argument('--embedding_file', default='data/embeddings/vector_50d.txt', help="Path to embeddings file.")
parser.add_argument('--model_dir', default='experiments/base_model', help="Directory containing params.json")
parser.add_argument('--gpu', default=-1, help="GPU device number, 0 by default, -1 means CPU.")
parser.add_argument('--restore_file', default=None,
help="Optional, name of the file in --model_dir containing weights to reload before training")
def train(model, data_iterator, optimizer, scheduler, params, steps_num):
"""Train the model on `steps_num` batches"""
# set model to training mode
model.train()
# scheduler.step()
# a running average object for loss
loss_avg = utils.RunningAverage()
# Use tqdm for progress bar
t = trange(steps_num)
for _ in t:
# fetch the next training batch
batch_data, batch_labels = next(data_iterator)
# compute model output and loss
batch_output = model(batch_data)
loss = model.loss(batch_output, batch_labels)
# clear previous gradients, compute gradients of all variables wrt loss
model.zero_grad()
# optimizer.zero_grad()
loss.backward()
# gradient clipping
nn.utils.clip_grad_norm_(model.parameters(), params.clip_grad)
# performs updates using calculated gradients
optimizer.step()
# update the average loss
loss_avg.update(loss.item())
t.set_postfix(loss='{:05.3f}'.format(loss_avg()))
return loss_avg()
def train_and_evaluate(model, train_data, val_data, optimizer, scheduler, params, metric_labels, model_dir, restore_file=None):
"""Train the model and evaluate every epoch."""
# reload weights from restore_file if specified
if restore_file is not None:
restore_path = os.path.join(args.model_dir, args.restore_file + '.pth.tar')
logging.info("Restoring parameters from {}".format(restore_path))
utils.load_checkpoint(restore_path, model, optimizer)
best_val_f1 = 0.0
patience_counter = 0
for epoch in range(1, params.epoch_num + 1):
# Run one epoch
logging.info("Epoch {}/{}".format(epoch, params.epoch_num))
# Compute number of batches in one epoch
train_steps_num = params.train_size // params.batch_size
val_steps_num = params.val_size // params.batch_size
# data iterator for training
train_data_iterator = data_loader.data_iterator(train_data, params.batch_size, shuffle='True')
# Train for one epoch on training set
train_loss = train(model, train_data_iterator, optimizer, scheduler, params, train_steps_num)
# data iterator for training and validation
train_data_iterator = data_loader.data_iterator(train_data, params.batch_size)
val_data_iterator = data_loader.data_iterator(val_data, params.batch_size)
# Evaluate for one epoch on training set and validation set
train_metrics = evaluate(model, train_data_iterator, train_steps_num, metric_labels)
train_metrics['loss'] = train_loss
train_metrics_str = "; ".join("{}: {:05.2f}".format(k, v) for k, v in train_metrics.items())
logging.info("- Train metrics: " + train_metrics_str)
val_metrics = evaluate(model, val_data_iterator, val_steps_num, metric_labels)
val_metrics_str = "; ".join("{}: {:05.2f}".format(k, v) for k, v in val_metrics.items())
logging.info("- Eval metrics: " + val_metrics_str)
val_f1 = val_metrics['f1']
improve_f1 = val_f1 - best_val_f1
# Save weights ot the network
utils.save_checkpoint({'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optim_dict' : optimizer.state_dict()},
is_best=improve_f1>0,
checkpoint=model_dir)
if improve_f1 > 0:
logging.info("- Found new best F1")
best_val_f1 = val_f1
if improve_f1 < params.patience:
patience_counter += 1
else:
patience_counter = 0
else:
patience_counter += 1
# Early stopping and logging best f1
if (patience_counter >= params.patience_num and epoch > params.min_epoch_num) or epoch == params.epoch_num:
logging.info("best val f1: {:05.2f}".format(best_val_f1))
break
def CNN(data_loader,params):
# Define the model and optimizer
model = net.CNN(data_loader, params)
if params.optim_method == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=params.learning_rate, momentum=0.9, weight_decay=params.weight_decay)
elif params.optim_method == 'adam':
optimizer = optim.Adam(model.parameters(), lr=params.learning_rate, betas=(0.9, 0.999), weight_decay=params.weight_decay)
elif params.optim_method == 'adadelta':
optimizer = torch.optim.Adadelta(model.parameters(), lr=1.0, weight_decay=params.weight_decay)
else:
raise ValueError("Unknown optimizer, must be one of 'sgd'/'adam'/'adadelta'.")
scheduler = LambdaLR(optimizer, lr_lambda=lambda epoch: 1/(1 + 0.05*epoch)) # 动态改变学习率
# Train and evaluate the model
logging.info("Starting training for {} epoch(s)".format(params.epoch_num))
train_and_evaluate(model=model,
train_data=train_data,
val_data=val_data,
optimizer=optimizer,
scheduler=scheduler,
params=params,
metric_labels=metric_labels,
model_dir=args.model_dir,
restore_file=args.restore_file)
def BiLSTM_Att(data_loader,params):
# Define the model and optimizer
model = net.BiLSTM_Att(data_loader, params)
if params.optim_method == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=params.learning_rate, momentum=0.9, weight_decay=params.weight_decay)
elif params.optim_method == 'adam':
optimizer = optim.Adam(model.parameters(), lr=params.learning_rate, betas=(0.9, 0.999), weight_decay=params.weight_decay)
elif params.optim_method == 'adadelta':
optimizer = torch.optim.Adadelta(model.parameters(), lr=1.0, weight_decay=params.weight_decay)
else:
raise ValueError("Unknown optimizer, must be one of 'sgd'/'adam'.")
scheduler = LambdaLR(optimizer, lr_lambda=lambda epoch: 1/(1 + 0.05*epoch)) # 动态改变学习率
# Train and evaluate the model
logging.info("Starting training for {} epoch(s)".format(params.epoch_num))
train_and_evaluate(model=model,
train_data=train_data,
val_data=val_data,
optimizer=optimizer,
scheduler=scheduler,
# scheduler=None,
params=params,
metric_labels=metric_labels,
model_dir=args.model_dir,
restore_file=args.restore_file)
def BiLSTM_MaxPooling(data_loader,params):
# Define the model and optimizer
model = net.BiLSTM_MaxPooling(data_loader, params)
if params.optim_method == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=params.learning_rate, momentum=0.9, weight_decay=params.weight_decay)
elif params.optim_method == 'adam':
optimizer = optim.Adam(model.parameters(), lr=params.learning_rate, betas=(0.9, 0.999), weight_decay=params.weight_decay)
elif params.optim_method == 'adadelta':
optimizer = torch.optim.Adadelta(model.parameters(), lr=1.0, weight_decay=params.weight_decay)
else:
raise ValueError("Unknown optimizer, must be one of 'sgd'/'adam'.")
# scheduler = LambdaLR(optimizer, lr_lambda=lambda epoch: 1/(1 + 0.05*epoch)) # 动态改变学习率
# Train and evaluate the model
logging.info("Starting training for {} epoch(s)".format(params.epoch_num))
train_and_evaluate(model=model,
train_data=train_data,
val_data=val_data,
optimizer=optimizer,
# scheduler=scheduler,
scheduler=None,
params=params,
metric_labels=metric_labels,
model_dir=args.model_dir,
restore_file=args.restore_file)
if __name__ == '__main__':
# Load the parameters from json file
args = parser.parse_args()
json_path = os.path.join(args.model_dir, 'params.json')
assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
params = utils.Params(json_path)
# Use GPU if available
if torch.cuda.is_available():
params.gpu = args.gpu
else:
params.gpu = -1
# Set the random seed for reproducible experiments
torch.manual_seed(230)
if params.gpu >= 0:
torch.cuda.set_device(params.gpu)
torch.cuda.manual_seed(230)
# Set the logger
utils.set_logger(os.path.join(args.model_dir, 'train.log'))
# Create the input data pipeline
logging.info("Loading the datasets...")
# Initialize the DataLoader
data_loader = DataLoader(data_dir=args.data_dir,
embedding_file=args.embedding_file,
word_emb_dim=params.word_emb_dim,
max_len=params.max_len,
pos_dis_limit=params.pos_dis_limit,
pad_word='<pad>',
unk_word='<unk>',
other_label='Other',
gpu=params.gpu)
# Load word embdding
data_loader.load_embeddings_from_file_and_unique_words(emb_path=args.embedding_file,
emb_delimiter=' ',
verbose=True)
metric_labels = data_loader.metric_labels # relation labels to be evaluated
# Load data
train_data = data_loader.load_data('train')
# Due to the small dataset, the test data is used as validation data!
val_data = data_loader.load_data('test')
# Specify the train and val dataset sizes
params.train_size = train_data['size']
params.val_size = val_data['size']
logging.info("- done.")
# train with CNN model
CNN(data_loader,params)
# train with BiLSTM + attention
"""
train:Adadelta 动态调整学习率
batch_size : 10
1、动态max_len
- Eval metrics: precison: 79.13; recall: 82.29; f1: 80.68
2、固定max_len = 98
"""
# BiLSTM_Att(data_loader,params)
# BiLSTM_MaxPooling(data_loader,params)
|
[
"evaluate.evaluate"
] |
[((345, 370), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (368, 370), False, 'import argparse\n'), ((1222, 1244), 'tools.utils.RunningAverage', 'utils.RunningAverage', ([], {}), '()\n', (1242, 1244), True, 'import tools.utils as utils\n'), ((1290, 1307), 'tqdm.trange', 'trange', (['steps_num'], {}), '(steps_num)\n', (1296, 1307), False, 'from tqdm import trange\n'), ((5099, 5127), 'model.net.CNN', 'net.CNN', (['data_loader', 'params'], {}), '(data_loader, params)\n', (5106, 5127), True, 'import model.net as net\n'), ((5719, 5786), 'torch.optim.lr_scheduler.LambdaLR', 'LambdaLR', (['optimizer'], {'lr_lambda': '(lambda epoch: 1 / (1 + 0.05 * epoch))'}), '(optimizer, lr_lambda=lambda epoch: 1 / (1 + 0.05 * epoch))\n', (5727, 5786), False, 'from torch.optim.lr_scheduler import LambdaLR\n'), ((6400, 6435), 'model.net.BiLSTM_Att', 'net.BiLSTM_Att', (['data_loader', 'params'], {}), '(data_loader, params)\n', (6414, 6435), True, 'import model.net as net\n'), ((7016, 7083), 'torch.optim.lr_scheduler.LambdaLR', 'LambdaLR', (['optimizer'], {'lr_lambda': '(lambda epoch: 1 / (1 + 0.05 * epoch))'}), '(optimizer, lr_lambda=lambda epoch: 1 / (1 + 0.05 * epoch))\n', (7024, 7083), False, 'from torch.optim.lr_scheduler import LambdaLR\n'), ((7747, 7789), 'model.net.BiLSTM_MaxPooling', 'net.BiLSTM_MaxPooling', (['data_loader', 'params'], {}), '(data_loader, params)\n', (7768, 7789), True, 'import model.net as net\n'), ((9125, 9168), 'os.path.join', 'os.path.join', (['args.model_dir', '"""params.json"""'], {}), "(args.model_dir, 'params.json')\n", (9137, 9168), False, 'import os\n'), ((9180, 9205), 'os.path.isfile', 'os.path.isfile', (['json_path'], {}), '(json_path)\n', (9194, 9205), False, 'import os\n'), ((9279, 9302), 'tools.utils.Params', 'utils.Params', (['json_path'], {}), '(json_path)\n', (9291, 9302), True, 'import tools.utils as utils\n'), ((9338, 9363), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9361, 9363), False, 'import torch\n'), ((9493, 9515), 'torch.manual_seed', 'torch.manual_seed', (['(230)'], {}), '(230)\n', (9510, 9515), False, 'import torch\n'), ((9750, 9789), 'logging.info', 'logging.info', (['"""Loading the datasets..."""'], {}), "('Loading the datasets...')\n", (9762, 9789), False, 'import logging\n'), ((9845, 10096), 'tools.data_loader.DataLoader', 'DataLoader', ([], {'data_dir': 'args.data_dir', 'embedding_file': 'args.embedding_file', 'word_emb_dim': 'params.word_emb_dim', 'max_len': 'params.max_len', 'pos_dis_limit': 'params.pos_dis_limit', 'pad_word': '"""<pad>"""', 'unk_word': '"""<unk>"""', 'other_label': '"""Other"""', 'gpu': 'params.gpu'}), "(data_dir=args.data_dir, embedding_file=args.embedding_file,\n word_emb_dim=params.word_emb_dim, max_len=params.max_len, pos_dis_limit\n =params.pos_dis_limit, pad_word='<pad>', unk_word='<unk>', other_label=\n 'Other', gpu=params.gpu)\n", (9855, 10096), False, 'from tools.data_loader import DataLoader\n'), ((10983, 11006), 'logging.info', 'logging.info', (['"""- done."""'], {}), "('- done.')\n", (10995, 11006), False, 'import logging\n'), ((2345, 2405), 'os.path.join', 'os.path.join', (['args.model_dir', "(args.restore_file + '.pth.tar')"], {}), "(args.model_dir, args.restore_file + '.pth.tar')\n", (2357, 2405), False, 'import os\n'), ((2488, 2541), 'tools.utils.load_checkpoint', 'utils.load_checkpoint', (['restore_path', 'model', 'optimizer'], {}), '(restore_path, model, optimizer)\n', (2509, 2541), True, 'import tools.utils as utils\n'), ((3521, 3589), 'evaluate.evaluate', 'evaluate', (['model', 'train_data_iterator', 'train_steps_num', 'metric_labels'], {}), '(model, train_data_iterator, train_steps_num, metric_labels)\n', (3529, 3589), False, 'from evaluate import evaluate\n'), ((3742, 3795), 'logging.info', 'logging.info', (["('- Train metrics: ' + train_metrics_str)"], {}), "('- Train metrics: ' + train_metrics_str)\n", (3754, 3795), False, 'import logging\n'), ((3827, 3891), 'evaluate.evaluate', 'evaluate', (['model', 'val_data_iterator', 'val_steps_num', 'metric_labels'], {}), '(model, val_data_iterator, val_steps_num, metric_labels)\n', (3835, 3891), False, 'from evaluate import evaluate\n'), ((3997, 4047), 'logging.info', 'logging.info', (["('- Eval metrics: ' + val_metrics_str)"], {}), "('- Eval metrics: ' + val_metrics_str)\n", (4009, 4047), False, 'import logging\n'), ((9548, 9581), 'torch.cuda.set_device', 'torch.cuda.set_device', (['params.gpu'], {}), '(params.gpu)\n', (9569, 9581), False, 'import torch\n'), ((9590, 9617), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(230)'], {}), '(230)\n', (9612, 9617), False, 'import torch\n'), ((9665, 9706), 'os.path.join', 'os.path.join', (['args.model_dir', '"""train.log"""'], {}), "(args.model_dir, 'train.log')\n", (9677, 9706), False, 'import os\n'), ((4506, 4541), 'logging.info', 'logging.info', (['"""- Found new best F1"""'], {}), "('- Found new best F1')\n", (4518, 4541), False, 'import logging\n')]
|
from flask import Flask, render_template, request, abort
from utils.downloader import download_img,download_thumbnail
import sys
from evaluate import evaluate, mobilenet, nasnet
import os
import json
app = Flask(__name__)
fn = "temp/flask_test.jpg"
model = mobilenet()
model._make_predict_function()
#model2 = nasnet()
#model2._make_predict_function()
test = "teeest"
@app.route('/',methods=['GET'])
def index():
test = "teeest"
return render_template('index.html')
@app.route('/', methods=['POST'])
def predict():
_url = request.form['imgUrl']
print(_url)
try:
download_img(_url,fn)
#download_thumbnail(url,fn+"thumbnail.jpg")
_fn,_mean,_std = evaluate(model,[fn])[0]
print(_fn,_mean,_std)
except ValueError as err:
print(err)
abort(400)
return json.dumps({'error':str(err)})
return json.dumps({'filename':_fn,'mean': _mean, 'std':_std})#render_template('index.html', foo=False,test="123")
@app.route('/url=<path:url>')
def result(url):
download_img(url,fn)
download_thumbnail(url,fn+"thumbnail.jpg")
pred = evaluate(model,[fn,fn+"thumbnail.jpg"])
#pred2 = evaluate(model2,[fn,fn+"thumbnail.jpg"])
return 'Mobilenet: ' + str(pred) #+ '| Nasnet: ' + str(pred2)
if __name__ == '__main__':
app.run(debug=True)
|
[
"evaluate.evaluate",
"evaluate.mobilenet"
] |
[((207, 222), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (212, 222), False, 'from flask import Flask, render_template, request, abort\n'), ((258, 269), 'evaluate.mobilenet', 'mobilenet', ([], {}), '()\n', (267, 269), False, 'from evaluate import evaluate, mobilenet, nasnet\n'), ((446, 475), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (461, 475), False, 'from flask import Flask, render_template, request, abort\n'), ((885, 942), 'json.dumps', 'json.dumps', (["{'filename': _fn, 'mean': _mean, 'std': _std}"], {}), "({'filename': _fn, 'mean': _mean, 'std': _std})\n", (895, 942), False, 'import json\n'), ((1046, 1067), 'utils.downloader.download_img', 'download_img', (['url', 'fn'], {}), '(url, fn)\n', (1058, 1067), False, 'from utils.downloader import download_img, download_thumbnail\n'), ((1071, 1116), 'utils.downloader.download_thumbnail', 'download_thumbnail', (['url', "(fn + 'thumbnail.jpg')"], {}), "(url, fn + 'thumbnail.jpg')\n", (1089, 1116), False, 'from utils.downloader import download_img, download_thumbnail\n'), ((1126, 1169), 'evaluate.evaluate', 'evaluate', (['model', "[fn, fn + 'thumbnail.jpg']"], {}), "(model, [fn, fn + 'thumbnail.jpg'])\n", (1134, 1169), False, 'from evaluate import evaluate, mobilenet, nasnet\n'), ((594, 616), 'utils.downloader.download_img', 'download_img', (['_url', 'fn'], {}), '(_url, fn)\n', (606, 616), False, 'from utils.downloader import download_img, download_thumbnail\n'), ((693, 714), 'evaluate.evaluate', 'evaluate', (['model', '[fn]'], {}), '(model, [fn])\n', (701, 714), False, 'from evaluate import evaluate, mobilenet, nasnet\n'), ((812, 822), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (817, 822), False, 'from flask import Flask, render_template, request, abort\n')]
|
import argparse
import itertools
import os.path
import time, timeit
import sys
import dynet as dy
import numpy as np
import evaluate
import parse
import trees
import vocabulary
import gc
from collections import defaultdict
def format_elapsed(start_time):
elapsed_time = int(time.time() - start_time)
minutes, seconds = divmod(elapsed_time, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
elapsed_string = "{}h{:02}m{:02}s".format(hours, minutes, seconds)
if days > 0:
elapsed_string = "{}d{}".format(days, elapsed_string)
return elapsed_string
def run_train(args):
if args.numpy_seed is not None:
print("Setting numpy random seed to {}...".format(args.numpy_seed))
np.random.seed(args.numpy_seed)
sys.setrecursionlimit(10000)
print("Loading training trees from {}...".format(args.train_path))
train_treebank = trees.load_trees(args.train_path)
print("Loaded {:,} training examples.".format(len(train_treebank)))
print("Loading development trees from {}...".format(args.dev_path))
dev_treebank = trees.load_trees(args.dev_path)
print("Loaded {:,} development examples.".format(len(dev_treebank)))
print("Processing trees for training...")
train_parse = [tree.convert() for tree in train_treebank]
print("Constructing vocabularies...")
tag_vocab = vocabulary.Vocabulary()
tag_vocab.index(parse.START)
tag_vocab.index(parse.STOP)
word_vocab = vocabulary.Vocabulary()
word_vocab.index(parse.START)
word_vocab.index(parse.STOP)
word_vocab.index(parse.UNK)
label_vocab = vocabulary.Vocabulary()
label_vocab.index(())
for tree in train_parse:
nodes = [tree]
while nodes:
node = nodes.pop()
if isinstance(node, trees.InternalParseNode):
label_vocab.index(node.label)
nodes.extend(reversed(node.children))
else:
tag_vocab.index(node.tag)
word_vocab.index(node.word)
tag_vocab.freeze()
word_vocab.freeze()
label_vocab.freeze()
def print_vocabulary(name, vocab):
special = {parse.START, parse.STOP, parse.UNK}
print("{} ({:,}): {}".format(
name, vocab.size,
sorted(value for value in vocab.values if value in special) +
sorted(value for value in vocab.values if value not in special)))
if args.print_vocabs:
print_vocabulary("Tag", tag_vocab)
print_vocabulary("Word", word_vocab)
print_vocabulary("Label", label_vocab)
print("Initializing model...")
model = dy.ParameterCollection()
if os.path.exists(args.model_path_base + ".meta") and \
os.path.exists(args.model_path_base + ".data"):
[parser] = dy.load(args.model_path_base, model)
args.model_path_base = args.model_path_base.split("_dev")[0] + "-cont"
elif args.parser_type == "beam-parse":
parser = parse.BeamParser(
model,
tag_vocab,
word_vocab,
label_vocab,
args.tag_embedding_dim,
args.word_embedding_dim,
args.lstm_layers,
args.lstm_dim,
args.label_hidden_dim,
args.dropout,
args.beamsize,
)
else:
parser = parse.ChartParser(
model,
tag_vocab,
word_vocab,
label_vocab,
args.tag_embedding_dim,
args.word_embedding_dim,
args.lstm_layers,
args.lstm_dim,
args.label_hidden_dim,
args.dropout,
)
parser.cross_span = args.cross_span
parser.cubepruning = False if args.nocubepruning else True
trainer = dy.AdamTrainer(model)
total_processed = 0
current_processed = 0
check_every = len(train_parse) / args.checks_per_epoch
best_dev_fscore = -np.inf
best_dev_model_path = None
start_time = time.time()
def check_dev():
nonlocal best_dev_fscore
nonlocal best_dev_model_path
dev_start_time = time.time()
dev_predicted = []
for tree in dev_treebank:
dy.renew_cg()
sentence = [(leaf.tag, leaf.word) for leaf in tree.leaves()]
predicted, _ = parser.parse(sentence)
dev_predicted.append(predicted.convert())
dev_fscore = evaluate.evalb(dev_treebank, dev_predicted)
print(
"dev-fscore {} "
"dev-elapsed {} "
"total-elapsed {}".format(
dev_fscore,
format_elapsed(dev_start_time),
format_elapsed(start_time),
)
)
if dev_fscore.fscore() > best_dev_fscore:
if best_dev_model_path is not None:
for ext in [".data", ".meta"]:
path = best_dev_model_path + ext
if os.path.exists(path):
print("Removing previous model file {}...".format(path))
os.remove(path)
best_dev_fscore = dev_fscore.fscore()
best_dev_model_path = "{}_dev={:.2f}".format(
args.model_path_base, dev_fscore.fscore())
print("Saving new best model to {}...".format(best_dev_model_path))
dy.save(best_dev_model_path, [parser])
for epoch in itertools.count(start=1):
if args.epochs is not None and epoch > args.epochs:
break
np.random.shuffle(train_parse)
epoch_start_time = time.time()
for start_index in range(0, len(train_parse), args.batch_size):
dy.renew_cg()
batch_losses = []
for tree in train_parse[start_index:start_index + args.batch_size]:
sentence = [(leaf.tag, leaf.word) for leaf in tree.leaves()]
_, loss = parser.parse(sentence, tree)
batch_losses.append(loss)
total_processed += 1
current_processed += 1
batch_loss = dy.average(batch_losses)
batch_loss_value = batch_loss.scalar_value()
batch_loss.backward()
trainer.update()
print(
"epoch {:,} "
"batch {:,}/{:,} "
"processed {:,} "
"batch-loss {:.4f} "
"epoch-elapsed {} "
"total-elapsed {}".format(
epoch,
start_index // args.batch_size + 1,
int(np.ceil(len(train_parse) / args.batch_size)),
total_processed,
batch_loss_value,
format_elapsed(epoch_start_time),
format_elapsed(start_time),
)
)
if current_processed >= check_every:
current_processed -= check_every
check_dev()
def run_test(args):
print("Loading test trees from {}...".format(args.test_path))
test_treebank = trees.load_trees(args.test_path)
print("Loaded {:,} test examples.".format(len(test_treebank)))
print("Loading model from {}...".format(args.model_path_base))
model = dy.ParameterCollection()
[parser] = dy.load(args.model_path_base, model)
if args.beamsize is not None:
parser.beamsize = args.beamsize
if args.log:
beamsize = ""
if isinstance(parser, parse.BeamParser):
parsertype = "beam"
beamsize = args.beamsize if args.beamsize is not None else parser.beamsize
elif isinstance(parser, parse.ChartParser):
parsertype = "chart"
beamsize = None
log = open("log/{}_b{}.log".format(parsertype, beamsize), "w")
parser.cubepruning = False if args.nocubepruning else True
test_predicted = []
score_sum = 0.0
print("Parsing test sentences...")
start_time = time.time()
for i, tree in enumerate(test_treebank):
#sys.stderr.write("{}\r".format(i))
dy.renew_cg()
sentence = [(leaf.tag, leaf.word) for leaf in tree.leaves()]
if args.log:
gc.disable()
before = time.time()
predicted, score = parser.parse(sentence)
if args.log:
elapsed = time.time() - before
log_string = "len {} model {:10.10} time {}\n".format(len(sentence), score.value(), elapsed)
log.write(log_string)
log.flush()
gc.enable()
test_predicted.append(predicted.convert())
score_sum += score.value()
total_elapsed = float(time.time() - start_time)
test_fscore = evaluate.evalb(test_treebank, test_predicted)
print(
"test-fscore {} "
"test-elapsed {} "
"\ntotal model score {:10.10} "
"\nspeed: {}/{:5.5} = {:5.5} => {:5.5}".format(
test_fscore,
format_elapsed(start_time),
score_sum,
len(test_treebank), total_elapsed, float(len(test_treebank)) / total_elapsed, float(total_elapsed) / len(test_treebank)
)
)
def main():
dynet_args = [
"--dynet-mem",
"--dynet-weight-decay",
"--dynet-autobatch",
"--dynet-gpus",
"--dynet-gpu",
"--dynet-devices",
"--dynet-seed",
]
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
subparser = subparsers.add_parser("train")
subparser.set_defaults(callback=run_train)
for arg in dynet_args:
subparser.add_argument(arg)
subparser.add_argument("--numpy-seed", type=int)
subparser.add_argument("--parser-type", choices=["chart", "beam-parse"], required=True)
subparser.add_argument("--tag-embedding-dim", type=int, default=50)
subparser.add_argument("--word-embedding-dim", type=int, default=100)
subparser.add_argument("--lstm-layers", type=int, default=2)
subparser.add_argument("--lstm-dim", type=int, default=250)
subparser.add_argument("--label-hidden-dim", type=int, default=250)
subparser.add_argument("--dropout", type=float, default=0.4)
subparser.add_argument("--model-path-base", required=True)
subparser.add_argument("--train-path", default="data/02-21.10way.clean")
subparser.add_argument("--dev-path", default="data/22.auto.clean")
subparser.add_argument("--batch-size", type=int, default=10)
subparser.add_argument("--epochs", type=int)
subparser.add_argument("--checks-per-epoch", type=int, default=4)
subparser.add_argument("--print-vocabs", action="store_true")
subparser.add_argument("--beamsize", type=int, default=None)
subparser.add_argument("--cross-span", default=False, action="store_true")
subparser.add_argument("--nocubepruning", default=False, action="store_true")
subparser = subparsers.add_parser("test")
subparser.set_defaults(callback=run_test)
for arg in dynet_args:
subparser.add_argument(arg)
subparser.add_argument("--model-path-base", required=True)
subparser.add_argument("--test-path", default="data/23.auto.clean")
subparser.add_argument("--beamsize", type=int, default=None)
subparser.add_argument("--log", default=False, action="store_true")
subparser.add_argument("--nocubepruning", default=False, action="store_true")
args = parser.parse_args()
args.callback(args)
if __name__ == "__main__":
main()
|
[
"evaluate.evalb"
] |
[((787, 815), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10000)'], {}), '(10000)\n', (808, 815), False, 'import sys\n'), ((909, 942), 'trees.load_trees', 'trees.load_trees', (['args.train_path'], {}), '(args.train_path)\n', (925, 942), False, 'import trees\n'), ((1106, 1137), 'trees.load_trees', 'trees.load_trees', (['args.dev_path'], {}), '(args.dev_path)\n', (1122, 1137), False, 'import trees\n'), ((1378, 1401), 'vocabulary.Vocabulary', 'vocabulary.Vocabulary', ([], {}), '()\n', (1399, 1401), False, 'import vocabulary\n'), ((1485, 1508), 'vocabulary.Vocabulary', 'vocabulary.Vocabulary', ([], {}), '()\n', (1506, 1508), False, 'import vocabulary\n'), ((1627, 1650), 'vocabulary.Vocabulary', 'vocabulary.Vocabulary', ([], {}), '()\n', (1648, 1650), False, 'import vocabulary\n'), ((2640, 2664), 'dynet.ParameterCollection', 'dy.ParameterCollection', ([], {}), '()\n', (2662, 2664), True, 'import dynet as dy\n'), ((3768, 3789), 'dynet.AdamTrainer', 'dy.AdamTrainer', (['model'], {}), '(model)\n', (3782, 3789), True, 'import dynet as dy\n'), ((3979, 3990), 'time.time', 'time.time', ([], {}), '()\n', (3988, 3990), False, 'import time, timeit\n'), ((5397, 5421), 'itertools.count', 'itertools.count', ([], {'start': '(1)'}), '(start=1)\n', (5412, 5421), False, 'import itertools\n'), ((7058, 7090), 'trees.load_trees', 'trees.load_trees', (['args.test_path'], {}), '(args.test_path)\n', (7074, 7090), False, 'import trees\n'), ((7238, 7262), 'dynet.ParameterCollection', 'dy.ParameterCollection', ([], {}), '()\n', (7260, 7262), True, 'import dynet as dy\n'), ((7278, 7314), 'dynet.load', 'dy.load', (['args.model_path_base', 'model'], {}), '(args.model_path_base, model)\n', (7285, 7314), True, 'import dynet as dy\n'), ((7945, 7956), 'time.time', 'time.time', ([], {}), '()\n', (7954, 7956), False, 'import time, timeit\n'), ((8687, 8732), 'evaluate.evalb', 'evaluate.evalb', (['test_treebank', 'test_predicted'], {}), '(test_treebank, test_predicted)\n', (8701, 8732), False, 'import evaluate\n'), ((9365, 9390), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9388, 9390), False, 'import argparse\n'), ((750, 781), 'numpy.random.seed', 'np.random.seed', (['args.numpy_seed'], {}), '(args.numpy_seed)\n', (764, 781), True, 'import numpy as np\n'), ((2799, 2835), 'dynet.load', 'dy.load', (['args.model_path_base', 'model'], {}), '(args.model_path_base, model)\n', (2806, 2835), True, 'import dynet as dy\n'), ((4109, 4120), 'time.time', 'time.time', ([], {}), '()\n', (4118, 4120), False, 'import time, timeit\n'), ((4408, 4451), 'evaluate.evalb', 'evaluate.evalb', (['dev_treebank', 'dev_predicted'], {}), '(dev_treebank, dev_predicted)\n', (4422, 4451), False, 'import evaluate\n'), ((5510, 5540), 'numpy.random.shuffle', 'np.random.shuffle', (['train_parse'], {}), '(train_parse)\n', (5527, 5540), True, 'import numpy as np\n'), ((5568, 5579), 'time.time', 'time.time', ([], {}), '()\n', (5577, 5579), False, 'import time, timeit\n'), ((8055, 8068), 'dynet.renew_cg', 'dy.renew_cg', ([], {}), '()\n', (8066, 8068), True, 'import dynet as dy\n'), ((281, 292), 'time.time', 'time.time', ([], {}), '()\n', (290, 292), False, 'import time, timeit\n'), ((2975, 3178), 'parse.BeamParser', 'parse.BeamParser', (['model', 'tag_vocab', 'word_vocab', 'label_vocab', 'args.tag_embedding_dim', 'args.word_embedding_dim', 'args.lstm_layers', 'args.lstm_dim', 'args.label_hidden_dim', 'args.dropout', 'args.beamsize'], {}), '(model, tag_vocab, word_vocab, label_vocab, args.\n tag_embedding_dim, args.word_embedding_dim, args.lstm_layers, args.\n lstm_dim, args.label_hidden_dim, args.dropout, args.beamsize)\n', (2991, 3178), False, 'import parse\n'), ((3339, 3528), 'parse.ChartParser', 'parse.ChartParser', (['model', 'tag_vocab', 'word_vocab', 'label_vocab', 'args.tag_embedding_dim', 'args.word_embedding_dim', 'args.lstm_layers', 'args.lstm_dim', 'args.label_hidden_dim', 'args.dropout'], {}), '(model, tag_vocab, word_vocab, label_vocab, args.\n tag_embedding_dim, args.word_embedding_dim, args.lstm_layers, args.\n lstm_dim, args.label_hidden_dim, args.dropout)\n', (3356, 3528), False, 'import parse\n'), ((4195, 4208), 'dynet.renew_cg', 'dy.renew_cg', ([], {}), '()\n', (4206, 4208), True, 'import dynet as dy\n'), ((5335, 5373), 'dynet.save', 'dy.save', (['best_dev_model_path', '[parser]'], {}), '(best_dev_model_path, [parser])\n', (5342, 5373), True, 'import dynet as dy\n'), ((5665, 5678), 'dynet.renew_cg', 'dy.renew_cg', ([], {}), '()\n', (5676, 5678), True, 'import dynet as dy\n'), ((6082, 6106), 'dynet.average', 'dy.average', (['batch_losses'], {}), '(batch_losses)\n', (6092, 6106), True, 'import dynet as dy\n'), ((8172, 8184), 'gc.disable', 'gc.disable', ([], {}), '()\n', (8182, 8184), False, 'import gc\n'), ((8206, 8217), 'time.time', 'time.time', ([], {}), '()\n', (8215, 8217), False, 'import time, timeit\n'), ((8517, 8528), 'gc.enable', 'gc.enable', ([], {}), '()\n', (8526, 8528), False, 'import gc\n'), ((8643, 8654), 'time.time', 'time.time', ([], {}), '()\n', (8652, 8654), False, 'import time, timeit\n'), ((8321, 8332), 'time.time', 'time.time', ([], {}), '()\n', (8330, 8332), False, 'import time, timeit\n')]
|
"""Train the models"""
import logging
import os
import mlflow
import torch
from torch.autograd import Variable
from tqdm import tqdm
from datasets import create_data_loaders
from evaluate import evaluate
from evaluators import collect_metrics, collect_losses
from models import create_model
from models.net import collect_scheduler, collect_optimizer
from options.train_options import TrainOptions
from seg_utils import utils
from seg_utils.visualizer import Visualizer, get_visuals
def train(model, optimizer, losses, data_loader, metrics, opt, epoch, visualizer=None, prefix="train"):
"""Train the models on `num_steps` batches
Args:
model: (torch.nn.Module) the neural network
optimizer: (torch.optim) optimizer for parameters of models
losses: a class that takes batch_output and batch_labels and computes the loss for the batch
data_loader: (DataLoader) a torch.utils.data.DataLoader object that fetches training data
metrics: (Metrics) a class to keep track of metrics
opt: (BaseOptions) parameters
epoch: (int) current epoch number
visualizer: (Visualizer) visualizer object for plotting results
prefix: (str) prefix to use for metrics - default is 'train'
"""
use_cuda = len(opt.gpu_ids) > 0
# set models to training mode
model.train()
if metrics:
metrics.epoch_reset(prefix) # clear values from previous epoch
# Use tqdm for progress bar
with tqdm(total=int(len(data_loader) / opt.batch_size) + 1) as t:
for i, data in enumerate(data_loader):
input, labels = data_loader.process_data(data, use_cuda)
# convert to torch Variables
input = Variable(input)
# compute models output and loss
output = model(input)
loss = losses(output, labels)
# clear previous gradients, compute gradients of all variables wrt loss
optimizer.zero_grad()
loss.backward()
# performs updates using calculated gradients
optimizer.step()
# calculate metrics
if metrics:
metrics(output, labels)
if i % 10 == 5 and visualizer is not None:
bbox_dict = dict(labels=labels["bboxes"], output=output["bboxes"]) if opt.include_bbox else None
pseudos = data["pseudos"] if opt.include_pseudo else None
viz_labels = labels["segs"] if "segs" in labels else labels["adversarial_segs"]
visuals = \
get_visuals(
dict(images=data["images"], labels=viz_labels, output=output["segs"], pseudos=pseudos),
mean_and_std=data_loader.dataset.mean_and_std, bboxes=bbox_dict)[0]
visualizer.display_current_results(visuals, epoch, True, prefix)
if metrics:
loss_vals = metrics.to_dict(prefix=prefix)
# visualizer.print_current_losses(epoch, i*opt.batch_size / len(data_loader), epoch_metrics, "train")
visualizer.plot_current_losses(epoch, i * opt.batch_size / len(data_loader), loss_vals, prefix)
t.set_postfix(loss=loss.item())
t.update()
if metrics:
metrics.check_best(epoch)
def train_and_evaluate(model, dataloaders, optimizer, losses, metrics, opt, scheduler=None):
"""Train the models and evaluate every epoch.
Args:
model: (torch.nn.Module) the neural network
data_loaders: (dict) contains DataLoader objects for at least "train" and "val"
optimizer: (torch.optim) optimizer for parameters of models
losses: (dict) a dictionary of loss functions
metrics: (dict) a dictionary of functions that compute a metric
opt: (Params) parameters
scheduler: (torch.optim.lr_scheduler) Scheduler for optimizer
NB: keys of output from model should match keys from losses and metrics and should be present in data from data_loader
"""
assert all([t in dataloaders.keys() for t in ["train", "val"]]), "data_loaders must contain train and val"
# reload weights from restore_file if specified
if opt.restore_file is not None:
restore_path = os.path.join(opt.checkpoints_dir, opt.experiment, opt.name, opt.restore_file + '.pth.tar')
logging.info("Restoring parameters from {}".format(restore_path))
if opt.dont_restore_optimizer:
utils.load_checkpoint(restore_path, model, optimizer=None, loss=losses)
else:
utils.load_checkpoint(restore_path, model, optimizer, losses)
# metrics.restore(opt.name, opt.checkpoints_dir, )
visualizer = Visualizer(opt) # create a visualizer that display/save images and plots
epoch_metrics = dict() # {f"best_{k}": 0 for k in metrics.check_best().keys()} # best result is epoch 0 for now
for epoch in range(opt.num_epochs):
# Run one epoch
lr = optimizer.param_groups[0]["lr"]
logging.info(f"Epoch {epoch + 1}/{opt.num_epochs} - lr = {lr}")
if opt.include_adversarial:
if epoch < opt.adversarial_start_epoch:
assert opt.loss[0] in ["dice", "vanilla", "weighted_vanilla"], "might need to change this code section"
logging.info(f"before adversarial start epoch, enabling only {opt.loss[0]}")
losses.enable_only(opt.loss[0])
elif epoch == opt.adversarial_start_epoch:
logging.info("reached adversarial start epoch, enabling all")
losses.enable_all()
else:
losses.enable_all()
train(model, optimizer, losses, dataloaders["train"], metrics, opt, epoch, visualizer)
epoch_metrics.update(metrics.to_dict(prefix="train"))
# Evaluate for one epoch on validation set
evaluate(model, dataloaders["val"], metrics, opt, epoch, visualizer)
epoch_metrics.update(metrics.to_dict(prefix="val", include_best=False))
# Perform adversarial training loop
if opt.include_adversarial and not opt.batch_alternate_adversarial and opt.adversarial_start_epoch <= epoch < opt.num_epochs - 1:
# Evaluate before adversarial training
if "infer" in dataloaders:
evaluate(model, dataloaders["infer"], metrics, opt, epoch, visualizer,
prefix=dataloaders["infer"].dataset.name + "_pre_adv")
epoch_metrics.update(metrics.to_dict(prefix=dataloaders["infer"].dataset.name, include_best=False))
# set adversarial loss to target the 'adversarial_segs' label from dataloader
losses.set_loss_target("adversarial", "adversarial_segs")
losses.enable_only("adversarial")
# Don't pass in metrics or visualizer for adversarial training - they will break
train(model, optimizer, losses, dataloaders["adv_train"], metrics, opt, epoch, visualizer,
prefix="adv_train")
epoch_metrics.update(metrics.to_dict(prefix="adv_train"))
losses.enable_all()
losses.reset_targets() # target 'segs' again
# update schedulers if present
if scheduler is not None:
scheduler.step()
losses.scheduler_step()
# test on inference
if "infer" in dataloaders and epoch >= opt.start_infer_epoch:
evaluate(model, dataloaders["infer"], metrics, opt, epoch, visualizer,
prefix=dataloaders["infer"].dataset.name)
epoch_metrics.update(metrics.to_dict(prefix=dataloaders["infer"].dataset.name))
mlflow.log_metrics(epoch_metrics, step=epoch)
# models can be saved for each metric.
tags = [] # ["latest"] # always save latest model
for k, val in metrics.is_best.items():
# add others here if model should be saved
save_tags = ["val_best_dice"] if opt.output_nc < 4 else ["val_best_lv_endo_dice"]
if "infer" in dataloaders:
save_tags.append(f"{dataloaders['infer'].dataset.name}_best_lv_simplicity")
if k in save_tags and "train" not in k and val == epoch:
logging.info(f"- found new best accuracy for metric {k}: {epoch_metrics[k.replace('best_', '')]}")
if "simplicity" in k:
tags.append("infer_best_simplicity")
elif "dice" in k:
tags.append("val_best_dice")
else:
tags.append(k)
# Save weights
utils.save_checkpoint(os.path.join(opt.checkpoints_dir, opt.experiment, opt.name),
{'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optim_dict': optimizer.state_dict(),
'loss_dict': losses.state_dict()},
tags=tags,
prefix=opt.output_prefix)
metrics.print_best()
def run_training(opt_str=None, tags=None):
""" Run a training round.
If opt_str is None will process input from command line
"""
# Load the parameters from json file
opt = TrainOptions().parse(opt_str) # get training options
# Set the random seed for reproducible experiments
torch.manual_seed(21)
if len(opt.gpu_ids) > 0:
torch.cuda.manual_seed(21)
# Set the logger
utils.set_logger(os.path.join(opt.checkpoints_dir, opt.experiment, opt.name, 'train.log'))
# Create the input data pipeline
logging.info("Loading the datasets...")
dataloaders = create_data_loaders(opt, ("train", "val"))
logging.info("- done.")
# Define the models and optimizer
model = create_model(opt)
optimizer = collect_optimizer(opt, model)
# fetch loss function and metrics
losses = collect_losses(opt)
metrics = collect_metrics(opt, losses)
# fetch schedulers
scheduler = collect_scheduler(opt, optimizer)
# Initialize mlflow experiment tracker
mlflow.set_experiment(opt.experiment)
# run_id = utils.find_existing_mlflow_run(opt) # returns run_id if found else None
with mlflow.start_run(run_name=opt.name + f"_{opt.phase}"): # run_name is ignored if run_id found
mlflow.set_tag("run_type", "train")
mlflow.set_tag("dataset", dataloaders["train"].dataset.name)
if tags is not None:
for k, v in tags.items():
mlflow.set_tag(k, v)
mlflow.log_params(dict(**vars(opt)))
# Train the models
logging.info("Starting training for {} epoch(s)".format(opt.num_epochs))
train_and_evaluate(model, dataloaders, optimizer, losses, metrics, opt, scheduler)
if __name__ == '__main__':
run_training()
|
[
"evaluate.evaluate"
] |
[((4720, 4735), 'seg_utils.visualizer.Visualizer', 'Visualizer', (['opt'], {}), '(opt)\n', (4730, 4735), False, 'from seg_utils.visualizer import Visualizer, get_visuals\n'), ((9369, 9390), 'torch.manual_seed', 'torch.manual_seed', (['(21)'], {}), '(21)\n', (9386, 9390), False, 'import torch\n'), ((9614, 9653), 'logging.info', 'logging.info', (['"""Loading the datasets..."""'], {}), "('Loading the datasets...')\n", (9626, 9653), False, 'import logging\n'), ((9672, 9714), 'datasets.create_data_loaders', 'create_data_loaders', (['opt', "('train', 'val')"], {}), "(opt, ('train', 'val'))\n", (9691, 9714), False, 'from datasets import create_data_loaders\n'), ((9719, 9742), 'logging.info', 'logging.info', (['"""- done."""'], {}), "('- done.')\n", (9731, 9742), False, 'import logging\n'), ((9794, 9811), 'models.create_model', 'create_model', (['opt'], {}), '(opt)\n', (9806, 9811), False, 'from models import create_model\n'), ((9828, 9857), 'models.net.collect_optimizer', 'collect_optimizer', (['opt', 'model'], {}), '(opt, model)\n', (9845, 9857), False, 'from models.net import collect_scheduler, collect_optimizer\n'), ((9910, 9929), 'evaluators.collect_losses', 'collect_losses', (['opt'], {}), '(opt)\n', (9924, 9929), False, 'from evaluators import collect_metrics, collect_losses\n'), ((9944, 9972), 'evaluators.collect_metrics', 'collect_metrics', (['opt', 'losses'], {}), '(opt, losses)\n', (9959, 9972), False, 'from evaluators import collect_metrics, collect_losses\n'), ((10013, 10046), 'models.net.collect_scheduler', 'collect_scheduler', (['opt', 'optimizer'], {}), '(opt, optimizer)\n', (10030, 10046), False, 'from models.net import collect_scheduler, collect_optimizer\n'), ((10095, 10132), 'mlflow.set_experiment', 'mlflow.set_experiment', (['opt.experiment'], {}), '(opt.experiment)\n', (10116, 10132), False, 'import mlflow\n'), ((4266, 4361), 'os.path.join', 'os.path.join', (['opt.checkpoints_dir', 'opt.experiment', 'opt.name', "(opt.restore_file + '.pth.tar')"], {}), "(opt.checkpoints_dir, opt.experiment, opt.name, opt.\n restore_file + '.pth.tar')\n", (4278, 4361), False, 'import os\n'), ((5030, 5093), 'logging.info', 'logging.info', (['f"""Epoch {epoch + 1}/{opt.num_epochs} - lr = {lr}"""'], {}), "(f'Epoch {epoch + 1}/{opt.num_epochs} - lr = {lr}')\n", (5042, 5093), False, 'import logging\n'), ((5885, 5953), 'evaluate.evaluate', 'evaluate', (['model', "dataloaders['val']", 'metrics', 'opt', 'epoch', 'visualizer'], {}), "(model, dataloaders['val'], metrics, opt, epoch, visualizer)\n", (5893, 5953), False, 'from evaluate import evaluate\n'), ((7672, 7717), 'mlflow.log_metrics', 'mlflow.log_metrics', (['epoch_metrics'], {'step': 'epoch'}), '(epoch_metrics, step=epoch)\n', (7690, 7717), False, 'import mlflow\n'), ((9428, 9454), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(21)'], {}), '(21)\n', (9450, 9454), False, 'import torch\n'), ((9498, 9570), 'os.path.join', 'os.path.join', (['opt.checkpoints_dir', 'opt.experiment', 'opt.name', '"""train.log"""'], {}), "(opt.checkpoints_dir, opt.experiment, opt.name, 'train.log')\n", (9510, 9570), False, 'import os\n'), ((10230, 10283), 'mlflow.start_run', 'mlflow.start_run', ([], {'run_name': "(opt.name + f'_{opt.phase}')"}), "(run_name=opt.name + f'_{opt.phase}')\n", (10246, 10283), False, 'import mlflow\n'), ((10332, 10367), 'mlflow.set_tag', 'mlflow.set_tag', (['"""run_type"""', '"""train"""'], {}), "('run_type', 'train')\n", (10346, 10367), False, 'import mlflow\n'), ((10376, 10436), 'mlflow.set_tag', 'mlflow.set_tag', (['"""dataset"""', "dataloaders['train'].dataset.name"], {}), "('dataset', dataloaders['train'].dataset.name)\n", (10390, 10436), False, 'import mlflow\n'), ((1716, 1731), 'torch.autograd.Variable', 'Variable', (['input'], {}), '(input)\n', (1724, 1731), False, 'from torch.autograd import Variable\n'), ((4482, 4553), 'seg_utils.utils.load_checkpoint', 'utils.load_checkpoint', (['restore_path', 'model'], {'optimizer': 'None', 'loss': 'losses'}), '(restore_path, model, optimizer=None, loss=losses)\n', (4503, 4553), False, 'from seg_utils import utils\n'), ((4580, 4641), 'seg_utils.utils.load_checkpoint', 'utils.load_checkpoint', (['restore_path', 'model', 'optimizer', 'losses'], {}), '(restore_path, model, optimizer, losses)\n', (4601, 4641), False, 'from seg_utils import utils\n'), ((7437, 7553), 'evaluate.evaluate', 'evaluate', (['model', "dataloaders['infer']", 'metrics', 'opt', 'epoch', 'visualizer'], {'prefix': "dataloaders['infer'].dataset.name"}), "(model, dataloaders['infer'], metrics, opt, epoch, visualizer,\n prefix=dataloaders['infer'].dataset.name)\n", (7445, 7553), False, 'from evaluate import evaluate\n'), ((8626, 8685), 'os.path.join', 'os.path.join', (['opt.checkpoints_dir', 'opt.experiment', 'opt.name'], {}), '(opt.checkpoints_dir, opt.experiment, opt.name)\n', (8638, 8685), False, 'import os\n'), ((9255, 9269), 'options.train_options.TrainOptions', 'TrainOptions', ([], {}), '()\n', (9267, 9269), False, 'from options.train_options import TrainOptions\n'), ((5319, 5395), 'logging.info', 'logging.info', (['f"""before adversarial start epoch, enabling only {opt.loss[0]}"""'], {}), "(f'before adversarial start epoch, enabling only {opt.loss[0]}')\n", (5331, 5395), False, 'import logging\n'), ((6323, 6452), 'evaluate.evaluate', 'evaluate', (['model', "dataloaders['infer']", 'metrics', 'opt', 'epoch', 'visualizer'], {'prefix': "(dataloaders['infer'].dataset.name + '_pre_adv')"}), "(model, dataloaders['infer'], metrics, opt, epoch, visualizer,\n prefix=dataloaders['infer'].dataset.name + '_pre_adv')\n", (6331, 6452), False, 'from evaluate import evaluate\n'), ((10520, 10540), 'mlflow.set_tag', 'mlflow.set_tag', (['k', 'v'], {}), '(k, v)\n', (10534, 10540), False, 'import mlflow\n'), ((5515, 5576), 'logging.info', 'logging.info', (['"""reached adversarial start epoch, enabling all"""'], {}), "('reached adversarial start epoch, enabling all')\n", (5527, 5576), False, 'import logging\n')]
|
"""
测试检测器的精度
"""
import torch
import json
import time
import os, cv2
import tqdm
import numpy as np
from torchvision.transforms import transforms as cvtransforms
from torch.utils.data.dataloader import DataLoader
from lib.models.model_factory import create_model, load_model
from lib.datasets.jde import OriginDetDataset
from lib.transform.train_transform import collate_fn
from lib.utils.common import bbox_iou
from lib.models.utils.decode import mot_decode
from lib.utils.post_process import ctdet_post_process, simple_ctdet_post_process
from evaluate.utils import get_annotations_cache, cache_annotations
from evaluate.voc import voc_ap
from config.exp import config_factory
def post_process(opt, dets_in_one_image, meta, simple_post_map = False):
"""
对单幅图像中的所有检测信息框进行后处理
Args:
opt: 配置信息
dets_in_one_image: 单幅图像中经过解码和最大值抑制之后的所有原始检测框信息 1 * k * (bboxes + scores + clses = 6)
meta: 图像描述信息 meta['c']为原始尺寸均除以2 meta['s']为输入尺寸中最大的那个边
simple_post_map: 是否进行简单后处理
其中scores已经降序排列
Returns:
dets_mapped_results: 针对某张图像返回他对应的各个类别的一个字典
"""
dets_in_one_image = dets_in_one_image.detach().cpu().numpy()
dets_in_one_image = dets_in_one_image.reshape(1, -1, dets_in_one_image.shape[2]) # 1 * k * 6
# 检测框处理流程 将输出得到的bbox给映射回原来的图像中的大小尺寸
if not simple_post_map:
dets_mapped_results = ctdet_post_process(
dets_in_one_image.copy(),
[meta['c']],
[meta['s']],
meta['out_height'],
meta['out_width'],
opt.num_classes
)
else:
dets_mapped_results = simple_ctdet_post_process(
dets_in_one_image.copy(),
meta['output_shape'],
meta['origin_shape'],
opt.num_classes
)
# 将这一副图像中的各个类别作为numpy中的形式返回
for j in range(opt.num_classes):
dets_mapped_results[0][j] = np.array(dets_mapped_results[0][j], dtype=np.float32).reshape(-1, 5)
return dets_mapped_results[0]
def merge_outputs_into_dict(opt, detections):
"""
合并多个输出结果
Args:
detections: 单幅图像对应的字典所构成的列表 [det_dict]
Returns:
results: 将所有结果按照类别归类之后的结果
"""
results = {}
for j in range(opt.num_classes):
# 将各个图像中的输出结果整合到一个字典中
results[j] = np.concatenate([detection[j] for detection in detections], axis=0).astype(np.float32)
# 所有类别的得分汇总起来 得到一个1维的得分 是按照类别的顺序将所有的图像中对应的对象的得分给堆叠成1维
# scores = np.hstack([results[j][:, 4] for j in range(opt.num_classes)])
# 如果所有的对象的数目超过了128 则需要做一下过滤
# not necessary
# if len(scores) > 128:
# # 在所有的对象中挑选前k个最大的值 如果>=2*128 ???
# kth = len(scores) - 128
# thresh = np.partition(scores, kth)[kth]
# # 每个类别中只有超过了得分阈值的才可以作为最后结果
# for j in range(opt.num_classes):
# keep_inds = (results[j][:, 4] >= thresh)
# results[j] = results[j][keep_inds]
# 按照类别将结果字典输出 key为类别id
return results
def test_det(
opt,
batch_size,
img_size,
iou_thres,
print_interval=40,
):
# ===================================
# 数据加载
# ===================================
data_cfg = opt.data_cfg
f = open(data_cfg)
data_cfg_dict = json.load(f)
f.close()
nC = opt.num_classes
test_path = data_cfg_dict['test']
dataset_root = data_cfg_dict['root']
# Get dataloader
transforms = cvtransforms.Compose([
cvtransforms.ToTensor(),
cvtransforms.Normalize(opt.mean, opt.std)
])
dataset = OriginDetDataset(dataset_root, test_path, augment=False, transforms=transforms)
# Anno Cache
anno_cache_file = "det_annos_cache.pkl"
cls_ref_annos = get_annotations_cache(anno_cache_file)
if cls_ref_annos is None:
annosloader = DataLoader(dataset, 1, shuffle=False, num_workers=1, drop_last=False)
cache_annotations(annosloader, 2, anno_cache_file)
cls_ref_annos = get_annotations_cache(anno_cache_file)
print("=" * 60)
print("Annos Summary:")
# =================================================================
# TODO only for evaluating ball or players. ignore it.
# =================================================================
cls_ref_annos[0] = cls_ref_annos[1]
del cls_ref_annos[1]
for cls_id in range(nC):
print("Class Samples:", cls_id, len(cls_ref_annos[cls_id]), "Total Objs:", cls_ref_annos[cls_id]["gt_total"])
print("=" * 60)
# ===================================
# 验证数据集得到结果
# ===================================
# ===================================
# 环境设置
# ===================================
results_folder = "../demos/detections/"
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
if opt.gpus[0] >= 0:
opt.device = torch.device('cuda')
else:
opt.device = torch.device('cpu')
# opt.device = torch.device('cpu')
# ===================================
# 创建和加载模型
# ===================================
print('Creating model...')
model = create_model("fairmot", opt.arch, opt.heads, opt.head_conv)
model = load_model(model, opt.load_model)
model = model.to(opt.device)
model.eval()
# 初始化预测的数据字典
cls_ref_preds = {}
for cls_id in range(nC):
cls_ref_preds[cls_id] = {
"image_id": [], # 保存这个检测对象对应的图像名称 [img1.jpng, img1.jpng,, img1.jpng, img2.jpg, img2.jpg ...]
"conf": [], # 保存这个检测对象对应的置信度 [0.9, 0.9, 0.9, 0.8, 0.7, ...]
"bbox": [], # 保存这个检测对象对应的bbox [[1,2,3,4], [5,6,7,8] ...]
}
dataloader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=1, drop_last=False, collate_fn=collate_fn)
# # ==============
# # old detecting
# # ==============
# for i, batch in enumerate(tqdm.tqdm(dataloader, desc="Detect")):
# (imgs, _, paths, shapes, _) = batch
# # 得到各类标签
# imgs = imgs.to(opt.device)
# output = model(imgs)[-1]
# origin_shape = shapes[0]
# width = origin_shape[1]
# height = origin_shape[0]
# inp_width = img_size[0]
# inp_height = img_size[1]
# # c代表了原始图像中的中心点
# c = np.array([width / 2., height / 2.], dtype=np.float32)
# # s对应原始图像中最长的那个边
# s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
# meta = {
# 'c': c,
# 's': s,
# 'out_height': inp_height // opt.down_ratio,
# 'out_width': inp_width // opt.down_ratio,
# 'output_shape': (inp_height // opt.down_ratio, inp_width // opt.down_ratio),
# 'origin_shape': origin_shape,
# }
# # 将输出的hm映射到0~1之间
# hm = output['hm'][:, 0:1].sigmoid_()
# wh = output['wh']
# reg = output['reg'] if opt.reg_offset else None
# opt.K = 1
# # !!! 从输出到得到detections的解码过程
# detections, inds = mot_decode(hm, wh, center_offset_reg=reg, ltrb=opt.ltrb, K=opt.K)
# for i, image_path in enumerate(paths):
# print("-------croping-----")
# # print(detections[i][0]) # 对应输出中的尺寸
# # shape: 1 * K * 6 (bbox + score + cls)
# detection = detections[i].unsqueeze(0)
# dets_in_one_image_dict = post_process(opt, detection, meta, simple_post_map = True) # 得到每一张图像按照类别划分之后bbox
# # 保存检测结果
# for cls_id in dets_in_one_image_dict.keys():
# cls_ref_det_result = dets_in_one_image_dict[cls_id] # 单幅图像中每个类别的检测结果 shape: 1 * K * 5
# objects_in_one_img_confidences = cls_ref_det_result[:, 4] # 已经降序排列的置信度
# objects_in_one_img_bbox = cls_ref_det_result[:, :4] # bbox
# cls_ref_preds[cls_id]['image_id'].extend([image_path] * len(cls_ref_det_result))
# cls_ref_preds[cls_id]['conf'].extend(objects_in_one_img_confidences.tolist())
# cls_ref_preds[cls_id]['bbox'].extend(objects_in_one_img_bbox.tolist())
# ==================
# tiling mechanism
# ==================
tiling_size = 256
step = 128
for i, batch in enumerate(tqdm.tqdm(dataloader, desc="Detect")):
(imgs, _, paths, shapes, _) = batch
# 1 * 3 * origin_h * origin_w
imgs = imgs.to(opt.device)
origin_shape = shapes[0]
width = origin_shape[1]
height = origin_shape[0]
# TODO special adjust for img smaller than 256 * 256
width_corps = (width - tiling_size) // step + 1
height_corps = (height - tiling_size) // step + 1
# record
best_result = {
"crop_y": 0,
"crop_x": 0,
"hm_score": 0,
"detections": None,
}
# sliding detection
for col in range(height_corps):
for row in range(width_corps):
crop_y = col * step
crop_x = row * step
if crop_y + tiling_size > height:
crop_y = height - tiling_size
if crop_x + tiling_size > width:
crop_x = width - tiling_size
patch = imgs[:, :, crop_y:crop_y + tiling_size, crop_x:crop_x + tiling_size]
# output
output = model({'input':patch})[-1]
# select
hm = output['hm'][:, 0:1].sigmoid_()
if hm.max() > best_result["hm_score"]:
best_result['hm_score'] = hm.max()
else:
continue
# detection
wh = output['wh']
reg = output['reg'] if opt.reg_offset else None
opt.K = 1
# !!! 从输出到得到detections的解码过程
detections, inds = mot_decode(hm, wh, center_offset_reg=reg, ltrb=opt.ltrb, K=opt.K)
# record
best_result["crop_y"] = crop_y
best_result["crop_x"] = crop_x
best_result["detections"] = detections
# calc
detection = best_result["detections"].squeeze(0).squeeze(0).cpu().numpy()
detection *= opt.down_ratio
dets_in_one_image_dict = {}
dets_in_one_image_dict[0] = np.asarray([detection[:-1]])
image_path = paths[0]
# patch = origin_img[best_result["crop_y"]:best_result["crop_y"] + tiling_size, best_result["crop_x"]:best_result["crop_x"] + tiling_size]
dets_in_one_image_dict[0][:, 0] += best_result["crop_x"]
dets_in_one_image_dict[0][:, 1] += best_result["crop_y"]
dets_in_one_image_dict[0][:, 2] += best_result["crop_x"]
dets_in_one_image_dict[0][:, 3] += best_result["crop_y"]
# display
# pred and gt
# origin_img = cv2.imread(image_path)
# labels = np.loadtxt(image_path.replace("images", "labels_with_ids").replace(".jpg", ".txt"))
# gt_labels = labels[labels[:, 0] == 1]
# if gt_labels.shape[0] != 0:
# gt_label = gt_labels[0][2:6]
# bbox = gt_label.copy()
# bbox[0] = int((gt_label[0] - gt_label[2] / 2) * origin_shape[1])
# bbox[1] = int((gt_label[1] - gt_label[3] / 2) * origin_shape[0])
# bbox[2] = int((gt_label[0] + gt_label[2] / 2) * origin_shape[1])
# bbox[3] = int((gt_label[1] + gt_label[3] / 2) * origin_shape[0])
# bbox = bbox.astype(np.int32)
# cv2.rectangle(origin_img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=(0,0,255), thickness=1)
# bbox = dets_in_one_image_dict[0][0, :4].astype(np.int32)
# cv2.rectangle(origin_img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=(255,255,0), thickness=1)
# cv2.imwrite("../demos/detections/det_%d_%.06f_result.jpg" % (i, best_result["score"]), origin_img)
# 保存检测结果
for cls_id in dets_in_one_image_dict.keys():
cls_ref_det_result = dets_in_one_image_dict[cls_id] # 单幅图像中每个类别的检测结果 shape: 1 * K * 5
objects_in_one_img_confidences = cls_ref_det_result[:, 4] # 已经降序排列的置信度
objects_in_one_img_bbox = cls_ref_det_result[:, :4] # bbox
cls_ref_preds[cls_id]['image_id'].extend([image_path] * len(cls_ref_det_result))
cls_ref_preds[cls_id]['conf'].extend(objects_in_one_img_confidences.tolist())
cls_ref_preds[cls_id]['bbox'].extend(objects_in_one_img_bbox.tolist())
# 汇总所有bbox结果
for cls_id in dets_in_one_image_dict.keys():
print("Preds for ", cls_id, "images: ", len(cls_ref_preds[cls_id]["image_id"]))
print("Preds for ", cls_id, "conf: ", len(cls_ref_preds[cls_id]["conf"]))
print("Preds for ", cls_id, "bbox: ", len(cls_ref_preds[cls_id]["bbox"]))
# ===================================
# 计算P R mAP
# ===================================
for cls_id in cls_ref_preds.keys():
# 对每一个类别分别计算
preds = cls_ref_preds[cls_id]
preds["conf"] = np.asarray(preds["conf"])
preds["bbox"] = np.asarray(preds["bbox"])
print("=" * 60)
print("Evaluating for cls:", cls_id, "Conf shape:", preds["conf"].shape, "Bbox shape:", preds["bbox"].shape)
print("=" * 60)
npos = len(preds["image_id"])
# 对所有的bbox的confidence进行排序
desc_conf_ind = np.argsort(-preds["conf"])
desc_conf = np.sort(-preds["conf"])
desc_bbox = preds["bbox"][desc_conf_ind]
desc_image_ids = [preds["image_id"][i] for i in desc_conf_ind]
# 进行检测
pred_total = len(desc_image_ids) # 所有的检测结果
tp = np.zeros(pred_total) # 真阳性 命中
fp = np.zeros(pred_total) # 假阳性 误检
# GT
cls_ref_gt_records = cls_ref_annos[cls_id]
gt_total = cls_ref_gt_records["gt_total"]
# 从置信度由高到低进行检测
for i in tqdm.tqdm(range(pred_total), desc="Evaluate"):
gt_records = cls_ref_gt_records[desc_image_ids[i]]
pred_bbox = desc_bbox[i]
gt_bboxes = gt_records["bbox"].copy()
max_iou = -np.inf
max_iou_index = -1
if len(gt_bboxes) > 0:
# 当前图像中的确存在该类别
gt_bboxes[:, 0] *= gt_records["width"]
gt_bboxes[:, 2] *= gt_records["width"]
gt_bboxes[:, 1] *= gt_records["height"]
gt_bboxes[:, 3] *= gt_records["height"]
pred_bbox = torch.FloatTensor(pred_bbox).view(1, -1) # 1 * bbox
gt_bboxes = torch.FloatTensor(gt_bboxes) # gt_num * bbox
ious = bbox_iou(pred_bbox, gt_bboxes, x1y1x2y2=True)[0]
max_iou_index = np.argmax(ious)
max_iou = ious[max_iou_index]
if max_iou > iou_thres:
# 大于iou阈值认为可以匹配上
if not gt_records["det_flag"][max_iou_index]:
gt_records["det_flag"][max_iou_index] = True
tp[i] = 1
else:
fp[i] = 1
else:
fp[i] = 1
# 计算针对这一个类别的ap
# 积分图,在当前置信度前的误检数量
acc_fp = np.cumsum(fp)
# 积分图,在当前置信度前的正检数量
acc_tp = np.cumsum(tp)
recall = acc_tp / float(gt_total)
# avoid divide by zero in case the first detection matches a difficult
prec = acc_tp / np.maximum(acc_tp + acc_fp, np.finfo(np.float64).eps)
cls_ap = voc_ap(recall, prec)
cls_prec = np.sum(tp) / float(pred_total)
cls_recall = np.sum(tp) / float(gt_total)
print("AP: %.2f%%, P: %.2f%%, R: %.2f%%" % (cls_ap * 100, cls_prec * 100, cls_recall * 100))
if __name__ == '__main__':
# opt = config_factory.get_config("det_val_resdcn18_4x")()
# opt = config_factory.get_config("det_val_resdcn18_2x")()
# opt = config_factory.get_config("det_val_flynet_tiny")()
# opt = config_factory.get_config("det_val_gridnet_tiny")()
opt = config_factory.get_config("det_val_gridnet_ball")()
opt.recipe = "fairmot"
opt.task = "ball"
opt.load_model = "../models/mnet_det_ball.pth"
with torch.no_grad():
map = test_det(opt, batch_size=1, img_size=opt.img_size, iou_thres=0.2)
|
[
"evaluate.utils.cache_annotations",
"evaluate.utils.get_annotations_cache",
"evaluate.voc.voc_ap"
] |
[((3227, 3239), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3236, 3239), False, 'import json\n'), ((3528, 3607), 'lib.datasets.jde.OriginDetDataset', 'OriginDetDataset', (['dataset_root', 'test_path'], {'augment': '(False)', 'transforms': 'transforms'}), '(dataset_root, test_path, augment=False, transforms=transforms)\n', (3544, 3607), False, 'from lib.datasets.jde import OriginDetDataset\n'), ((3690, 3728), 'evaluate.utils.get_annotations_cache', 'get_annotations_cache', (['anno_cache_file'], {}), '(anno_cache_file)\n', (3711, 3728), False, 'from evaluate.utils import get_annotations_cache, cache_annotations\n'), ((5041, 5100), 'lib.models.model_factory.create_model', 'create_model', (['"""fairmot"""', 'opt.arch', 'opt.heads', 'opt.head_conv'], {}), "('fairmot', opt.arch, opt.heads, opt.head_conv)\n", (5053, 5100), False, 'from lib.models.model_factory import create_model, load_model\n'), ((5113, 5146), 'lib.models.model_factory.load_model', 'load_model', (['model', 'opt.load_model'], {}), '(model, opt.load_model)\n', (5123, 5146), False, 'from lib.models.model_factory import create_model, load_model\n'), ((5593, 5701), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(1)', 'drop_last': '(False)', 'collate_fn': 'collate_fn'}), '(dataset, batch_size=1, shuffle=False, num_workers=1, drop_last=\n False, collate_fn=collate_fn)\n', (5603, 5701), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((3781, 3850), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['dataset', '(1)'], {'shuffle': '(False)', 'num_workers': '(1)', 'drop_last': '(False)'}), '(dataset, 1, shuffle=False, num_workers=1, drop_last=False)\n', (3791, 3850), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((3859, 3909), 'evaluate.utils.cache_annotations', 'cache_annotations', (['annosloader', '(2)', 'anno_cache_file'], {}), '(annosloader, 2, anno_cache_file)\n', (3876, 3909), False, 'from evaluate.utils import get_annotations_cache, cache_annotations\n'), ((3934, 3972), 'evaluate.utils.get_annotations_cache', 'get_annotations_cache', (['anno_cache_file'], {}), '(anno_cache_file)\n', (3955, 3972), False, 'from evaluate.utils import get_annotations_cache, cache_annotations\n'), ((4789, 4809), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4801, 4809), False, 'import torch\n'), ((4841, 4860), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4853, 4860), False, 'import torch\n'), ((8189, 8225), 'tqdm.tqdm', 'tqdm.tqdm', (['dataloader'], {'desc': '"""Detect"""'}), "(dataloader, desc='Detect')\n", (8198, 8225), False, 'import tqdm\n'), ((10264, 10292), 'numpy.asarray', 'np.asarray', (['[detection[:-1]]'], {}), '([detection[:-1]])\n', (10274, 10292), True, 'import numpy as np\n'), ((13031, 13056), 'numpy.asarray', 'np.asarray', (["preds['conf']"], {}), "(preds['conf'])\n", (13041, 13056), True, 'import numpy as np\n'), ((13081, 13106), 'numpy.asarray', 'np.asarray', (["preds['bbox']"], {}), "(preds['bbox'])\n", (13091, 13106), True, 'import numpy as np\n'), ((13377, 13403), 'numpy.argsort', 'np.argsort', (["(-preds['conf'])"], {}), "(-preds['conf'])\n", (13387, 13403), True, 'import numpy as np\n'), ((13424, 13447), 'numpy.sort', 'np.sort', (["(-preds['conf'])"], {}), "(-preds['conf'])\n", (13431, 13447), True, 'import numpy as np\n'), ((13649, 13669), 'numpy.zeros', 'np.zeros', (['pred_total'], {}), '(pred_total)\n', (13657, 13669), True, 'import numpy as np\n'), ((13695, 13715), 'numpy.zeros', 'np.zeros', (['pred_total'], {}), '(pred_total)\n', (13703, 13715), True, 'import numpy as np\n'), ((15181, 15194), 'numpy.cumsum', 'np.cumsum', (['fp'], {}), '(fp)\n', (15190, 15194), True, 'import numpy as np\n'), ((15239, 15252), 'numpy.cumsum', 'np.cumsum', (['tp'], {}), '(tp)\n', (15248, 15252), True, 'import numpy as np\n'), ((15469, 15489), 'evaluate.voc.voc_ap', 'voc_ap', (['recall', 'prec'], {}), '(recall, prec)\n', (15475, 15489), False, 'from evaluate.voc import voc_ap\n'), ((15982, 16031), 'config.exp.config_factory.get_config', 'config_factory.get_config', (['"""det_val_gridnet_ball"""'], {}), "('det_val_gridnet_ball')\n", (16007, 16031), False, 'from config.exp import config_factory\n'), ((16145, 16160), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16158, 16160), False, 'import torch\n'), ((3428, 3451), 'torchvision.transforms.transforms.ToTensor', 'cvtransforms.ToTensor', ([], {}), '()\n', (3449, 3451), True, 'from torchvision.transforms import transforms as cvtransforms\n'), ((3461, 3502), 'torchvision.transforms.transforms.Normalize', 'cvtransforms.Normalize', (['opt.mean', 'opt.std'], {}), '(opt.mean, opt.std)\n', (3483, 3502), True, 'from torchvision.transforms import transforms as cvtransforms\n'), ((15509, 15519), 'numpy.sum', 'np.sum', (['tp'], {}), '(tp)\n', (15515, 15519), True, 'import numpy as np\n'), ((15561, 15571), 'numpy.sum', 'np.sum', (['tp'], {}), '(tp)\n', (15567, 15571), True, 'import numpy as np\n'), ((1893, 1946), 'numpy.array', 'np.array', (['dets_mapped_results[0][j]'], {'dtype': 'np.float32'}), '(dets_mapped_results[0][j], dtype=np.float32)\n', (1901, 1946), True, 'import numpy as np\n'), ((2281, 2347), 'numpy.concatenate', 'np.concatenate', (['[detection[j] for detection in detections]'], {'axis': '(0)'}), '([detection[j] for detection in detections], axis=0)\n', (2295, 2347), True, 'import numpy as np\n'), ((9816, 9881), 'lib.models.utils.decode.mot_decode', 'mot_decode', (['hm', 'wh'], {'center_offset_reg': 'reg', 'ltrb': 'opt.ltrb', 'K': 'opt.K'}), '(hm, wh, center_offset_reg=reg, ltrb=opt.ltrb, K=opt.K)\n', (9826, 9881), False, 'from lib.models.utils.decode import mot_decode\n'), ((14556, 14584), 'torch.FloatTensor', 'torch.FloatTensor', (['gt_bboxes'], {}), '(gt_bboxes)\n', (14573, 14584), False, 'import torch\n'), ((14720, 14735), 'numpy.argmax', 'np.argmax', (['ious'], {}), '(ious)\n', (14729, 14735), True, 'import numpy as np\n'), ((14639, 14684), 'lib.utils.common.bbox_iou', 'bbox_iou', (['pred_bbox', 'gt_bboxes'], {'x1y1x2y2': '(True)'}), '(pred_bbox, gt_bboxes, x1y1x2y2=True)\n', (14647, 14684), False, 'from lib.utils.common import bbox_iou\n'), ((15426, 15446), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (15434, 15446), True, 'import numpy as np\n'), ((14474, 14502), 'torch.FloatTensor', 'torch.FloatTensor', (['pred_bbox'], {}), '(pred_bbox)\n', (14491, 14502), False, 'import torch\n')]
|
import sys
sys.path.append('./')
import os
import pickle
import gc
import faiss
import numpy as np
from transformers import HfArgumentParser, AutoConfig, DPRContextEncoder, DPRQuestionEncoder
from torch.optim import AdamW
from LibVQ.dataset.dataset import load_rel, write_rel
from LibVQ.learnable_index import LearnableIndexWithEncoder
from LibVQ.models import Encoder
from LibVQ.utils import setuplogging
from arguments import IndexArguments, DataArguments, ModelArguments, TrainingArguments
from prepare_data.get_embeddings import DPR_Encoder
from evaluate import validate, load_test_data
faiss.omp_set_num_threads(32)
if __name__ == '__main__':
setuplogging()
parser = HfArgumentParser((IndexArguments, DataArguments, ModelArguments, TrainingArguments))
index_args, data_args, model_args, training_args = parser.parse_args_into_dataclasses()
# Load encoder
doc_encoder = DPR_Encoder(DPRContextEncoder.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base"))
query_encoder = DPR_Encoder(DPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base'))
config = AutoConfig.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base")
emb_size = config.hidden_size
text_encoder = Encoder(query_encoder=query_encoder,
doc_encoder=doc_encoder)
# Load embeddings of queries and docs
doc_embeddings_file = os.path.join(data_args.embeddings_dir, 'docs.memmap')
query_embeddings_file = os.path.join(data_args.embeddings_dir, 'train-queries.memmap')
doc_embeddings = np.memmap(doc_embeddings_file,
dtype=np.float32, mode="r")
doc_embeddings = doc_embeddings.reshape(-1, emb_size)
train_query_embeddings = np.memmap(query_embeddings_file,
dtype=np.float32, mode="r")
train_query_embeddings = train_query_embeddings.reshape(-1, emb_size)
test_query_embeddings = np.memmap(os.path.join(data_args.embeddings_dir, 'test-queries.memmap'),
dtype=np.float32, mode="r")
test_query_embeddings = test_query_embeddings.reshape(-1, emb_size)
# Create Index
# if there is a faiss index in init_index_file, it will creat learnable_index based on it;
# if no, it will creat and save a faiss index in init_index_file
init_index_file = os.path.join(data_args.embeddings_dir, f'{index_args.index_method}_ivf{index_args.ivf_centers_num}_pq{index_args.subvector_num}x{index_args.subvector_bits}.index')
learnable_index = LearnableIndexWithEncoder(index_method=index_args.index_method,
encoder=text_encoder,
init_index_file=init_index_file,
doc_embeddings=doc_embeddings,
ivf_centers_num=index_args.ivf_centers_num,
subvector_num=index_args.subvector_num,
subvector_bits=index_args.subvector_bits)
# The class randomly sample the negative from corpus by default. You also can assgin speficed negative for each query (set --neg_file)
neg_file = os.path.join(data_args.embeddings_dir, f"train-queries_hardneg.pickle")
if not os.path.exists(neg_file):
print('generating hard negatives for train queries ...')
train_ground_truths = load_rel(os.path.join(data_args.preprocess_dir, 'train-rels.tsv'))
trainquery2hardneg = learnable_index.hard_negative(train_query_embeddings,
train_ground_truths,
topk=400,
batch_size=64,
nprobe=index_args.ivf_centers_num)
pickle.dump(trainquery2hardneg, open(neg_file, 'wb'))
del trainquery2hardneg
gc.collect()
data_args.save_ckpt_dir = f'./saved_ckpts/{training_args.training_mode}_{index_args.index_method}/'
# contrastive learning
if training_args.training_mode == 'contrastive_index-and-query-encoder':
learnable_index.fit_with_multi_gpus(doc_embeddings_file=doc_embeddings_file,
query_data_dir=data_args.preprocess_dir,
max_query_length=data_args.max_query_length,
rel_file=os.path.join(data_args.preprocess_dir, 'train-rels.tsv'),
neg_file=os.path.join(data_args.embeddings_dir,
f"train-queries_hardneg.pickle"),
emb_size=emb_size,
per_query_neg_num=1,
checkpoint_path=data_args.save_ckpt_dir,
logging_steps=training_args.logging_steps,
per_device_train_batch_size=512,
checkpoint_save_steps=training_args.checkpoint_save_steps,
max_grad_norm=training_args.max_grad_norm,
temperature=training_args.temperature,
optimizer_class=AdamW,
loss_weight={'encoder_weight': 1.0, 'pq_weight': 1.0,
'ivf_weight': 'scaled_to_pqloss'},
lr_params={'encoder_lr': 5e-6, 'pq_lr': 1e-4, 'ivf_lr': 1e-3},
loss_method='contras',
fix_emb='doc',
epochs=8)
# distill learning
if training_args.training_mode == 'distill_index-and-query-encoder':
learnable_index.fit_with_multi_gpus(rel_file=os.path.join(data_args.preprocess_dir, 'train-rels.tsv'),
neg_file=None,
query_data_dir=data_args.preprocess_dir,
max_query_length=data_args.max_query_length,
query_embeddings_file=query_embeddings_file,
doc_embeddings_file=doc_embeddings_file,
emb_size=emb_size,
per_query_neg_num=100,
checkpoint_path=data_args.save_ckpt_dir,
logging_steps=training_args.logging_steps,
per_device_train_batch_size=24,
checkpoint_save_steps=training_args.checkpoint_save_steps,
max_grad_norm=training_args.max_grad_norm,
temperature=training_args.temperature,
optimizer_class=AdamW,
loss_weight={'encoder_weight': 1.0, 'pq_weight': 1.0,
'ivf_weight':'scaled_to_pqloss'},
lr_params={'encoder_lr': 5e-6, 'pq_lr': 1e-4, 'ivf_lr': 1e-3},
loss_method='distill',
fix_emb='doc',
epochs=10)
# distill learning and train both query encoder and doc encoder, which only can be used when ivf is disabled
if training_args.training_mode == 'distill_index-and-two-encoders':
data_args.save_ckpt_dir = f'./saved_ckpts/{training_args.training_mode}_{index_args.index_method}/'
assert 'ivf' not in index_args.index_method
learnable_index.fit_with_multi_gpus(rel_file=os.path.join(data_args.preprocess_dir, 'train-rels.tsv'),
neg_file=os.path.join(data_args.embeddings_dir,
f"train-queries_hardneg.pickle"),
query_data_dir=data_args.preprocess_dir,
max_query_length=data_args.max_query_length,
doc_data_dir=data_args.preprocess_dir,
max_doc_length=128,
query_embeddings_file=query_embeddings_file,
doc_embeddings_file=doc_embeddings_file,
emb_size=emb_size,
per_query_neg_num=12,
checkpoint_path=data_args.save_ckpt_dir,
logging_steps=training_args.logging_steps,
per_device_train_batch_size=16,
checkpoint_save_steps=training_args.checkpoint_save_steps,
max_grad_norm=training_args.max_grad_norm,
temperature=training_args.temperature,
optimizer_class=AdamW,
loss_weight={'encoder_weight': 1.0, 'pq_weight': 1.0, 'ivf_weight': 0.0},
lr_params={'encoder_lr': 5e-6, 'pq_lr': 1e-4, 'ivf_lr': 0.0},
loss_method='distill',
fix_emb='',
epochs=20)
if 'nolabel' in training_args.training_mode:
'''
If there is not relevance data, you can set the rel_file/rel_data to None, and it will automatically generate the data for training.
You also can manually generate the data as following:
'''
# generate train data by brute-force or the index which should has similar performance with brute force
if not os.path.exists(os.path.join(data_args.embeddings_dir, 'train-virtual_rel.tsv')):
print('generating relevance labels for train queries ...')
# flat_index = FaissIndex(doc_embeddings=doc_embeddings, index_method='flat', dist_mode='ip')
# query2pos, query2neg = flat_index.generate_virtual_traindata(train_query_embeddings,
# topk=400, batch_size=64)
# or
query2pos, query2neg = trainquery2hardneg = learnable_index.generate_virtual_traindata(
train_query_embeddings, topk=400, batch_size=64, nprobe=index_args.ivf_centers_num)
write_rel(os.path.join(data_args.embeddings_dir, 'train-virtual_rel.tsv'), query2pos)
pickle.dump(query2neg,
open(os.path.join(data_args.embeddings_dir, f"train-queries-virtual_hardneg.pickle"), 'wb'))
del query2pos, query2neg
gc.collect()
# distill with no label data
if training_args.training_mode == 'distill_index-and-query-encoder_nolabel':
learnable_index.fit_with_multi_gpus(rel_file=os.path.join(data_args.embeddings_dir, 'train-virtual_rel.tsv'),
neg_file=os.path.join(data_args.embeddings_dir,
f"train-queries-virtual_hardneg.pickle"),
query_data_dir=data_args.preprocess_dir,
max_query_length=data_args.max_query_length,
query_embeddings_file=query_embeddings_file,
doc_embeddings_file=doc_embeddings_file,
emb_size=emb_size,
per_query_neg_num=100,
checkpoint_path=data_args.save_ckpt_dir,
logging_steps=training_args.logging_steps,
per_device_train_batch_size=24, #training_args.per_device_train_batch_size,
checkpoint_save_steps=training_args.checkpoint_save_steps,
max_grad_norm=training_args.max_grad_norm,
temperature=training_args.temperature,
optimizer_class=AdamW,
loss_weight={'encoder_weight': 1.0, 'pq_weight': 1.0,
'ivf_weight': 'scaled_to_pqloss'},
lr_params={'encoder_lr': 5e-6, 'pq_lr': 1e-4, 'ivf_lr': 1e-3},
loss_method='distill',
fix_emb='doc',
epochs=10)
if training_args.training_mode == 'distill_index-and-two-encoders_nolabel':
learnable_index.fit_with_multi_gpus(rel_file=os.path.join(data_args.embeddings_dir, 'train-virtual_rel.tsv'),
neg_file=os.path.join(data_args.embeddings_dir,
f"train-queries-virtual_hardneg.pickle"),
query_data_dir=data_args.preprocess_dir,
max_query_length=data_args.max_query_length,
doc_data_dir=data_args.preprocess_dir,
max_doc_length=128,
query_embeddings_file=query_embeddings_file,
doc_embeddings_file=doc_embeddings_file,
emb_size=emb_size,
per_query_neg_num=12,
checkpoint_path=data_args.save_ckpt_dir,
logging_steps=training_args.logging_steps,
per_device_train_batch_size=16, #training_args.per_device_train_batch_size,
checkpoint_save_steps=training_args.checkpoint_save_steps,
max_grad_norm=training_args.max_grad_norm,
temperature=training_args.temperature,
optimizer_class=AdamW,
loss_weight={'encoder_weight': 1.0, 'pq_weight': 1.0,
'ivf_weight': 'scaled_to_pqloss'},
lr_params={'encoder_lr': 5e-6, 'pq_lr': 1e-4, 'ivf_lr': 1e-3},
loss_method='distill',
fix_emb='',
epochs=20)
# update query embeddings when re-training the query encoder
data_args.output_dir = f'./data/NQ/evaluate/LearnableIndex_{training_args.training_mode}_{index_args.index_method}'
new_query_embeddings = learnable_index.encode(data_dir=data_args.preprocess_dir,
prefix='test-queries',
max_length=data_args.max_query_length,
output_dir=data_args.output_dir,
batch_size=8196,
is_query=True,
return_vecs=True
)
# Test
scores, ann_items = learnable_index.search(new_query_embeddings, topk=100, nprobe=index_args.nprobe)
test_questions, test_answers, collections = load_test_data(
query_andwer_file='./data/NQ/raw_dataset/nq-test.qa.csv',
collections_file='./data/NQ/dataset/collection.tsv')
validate(ann_items, test_questions, test_answers, collections)
# Save and Load
saved_index_file = os.path.join(data_args.output_dir,
f'LibVQ_{training_args.training_mode}_{index_args.index_method}_ivf{index_args.ivf_centers_num}_pq{index_args.subvector_num}x{index_args.subvector_bits}.index')
learnable_index.save_index(saved_index_file)
learnable_index.load_index(saved_index_file)
# get the faiss index and then you can use the faiss API.
'''
index = learnable_index.index
index = faiss.read_index(saved_index_file)
index = faiss.index_gpu_to_cpu(index)
'''
|
[
"evaluate.load_test_data",
"evaluate.validate"
] |
[((11, 32), 'sys.path.append', 'sys.path.append', (['"""./"""'], {}), "('./')\n", (26, 32), False, 'import sys\n'), ((595, 624), 'faiss.omp_set_num_threads', 'faiss.omp_set_num_threads', (['(32)'], {}), '(32)\n', (620, 624), False, 'import faiss\n'), ((657, 671), 'LibVQ.utils.setuplogging', 'setuplogging', ([], {}), '()\n', (669, 671), False, 'from LibVQ.utils import setuplogging\n'), ((685, 773), 'transformers.HfArgumentParser', 'HfArgumentParser', (['(IndexArguments, DataArguments, ModelArguments, TrainingArguments)'], {}), '((IndexArguments, DataArguments, ModelArguments,\n TrainingArguments))\n', (701, 773), False, 'from transformers import HfArgumentParser, AutoConfig, DPRContextEncoder, DPRQuestionEncoder\n'), ((1119, 1188), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', (['"""facebook/dpr-ctx_encoder-single-nq-base"""'], {}), "('facebook/dpr-ctx_encoder-single-nq-base')\n", (1145, 1188), False, 'from transformers import HfArgumentParser, AutoConfig, DPRContextEncoder, DPRQuestionEncoder\n'), ((1243, 1304), 'LibVQ.models.Encoder', 'Encoder', ([], {'query_encoder': 'query_encoder', 'doc_encoder': 'doc_encoder'}), '(query_encoder=query_encoder, doc_encoder=doc_encoder)\n', (1250, 1304), False, 'from LibVQ.models import Encoder\n'), ((1402, 1455), 'os.path.join', 'os.path.join', (['data_args.embeddings_dir', '"""docs.memmap"""'], {}), "(data_args.embeddings_dir, 'docs.memmap')\n", (1414, 1455), False, 'import os\n'), ((1484, 1546), 'os.path.join', 'os.path.join', (['data_args.embeddings_dir', '"""train-queries.memmap"""'], {}), "(data_args.embeddings_dir, 'train-queries.memmap')\n", (1496, 1546), False, 'import os\n'), ((1569, 1627), 'numpy.memmap', 'np.memmap', (['doc_embeddings_file'], {'dtype': 'np.float32', 'mode': '"""r"""'}), "(doc_embeddings_file, dtype=np.float32, mode='r')\n", (1578, 1627), True, 'import numpy as np\n'), ((1747, 1807), 'numpy.memmap', 'np.memmap', (['query_embeddings_file'], {'dtype': 'np.float32', 'mode': '"""r"""'}), "(query_embeddings_file, dtype=np.float32, mode='r')\n", (1756, 1807), True, 'import numpy as np\n'), ((2363, 2535), 'os.path.join', 'os.path.join', (['data_args.embeddings_dir', 'f"""{index_args.index_method}_ivf{index_args.ivf_centers_num}_pq{index_args.subvector_num}x{index_args.subvector_bits}.index"""'], {}), "(data_args.embeddings_dir,\n f'{index_args.index_method}_ivf{index_args.ivf_centers_num}_pq{index_args.subvector_num}x{index_args.subvector_bits}.index'\n )\n", (2375, 2535), False, 'import os\n'), ((2549, 2843), 'LibVQ.learnable_index.LearnableIndexWithEncoder', 'LearnableIndexWithEncoder', ([], {'index_method': 'index_args.index_method', 'encoder': 'text_encoder', 'init_index_file': 'init_index_file', 'doc_embeddings': 'doc_embeddings', 'ivf_centers_num': 'index_args.ivf_centers_num', 'subvector_num': 'index_args.subvector_num', 'subvector_bits': 'index_args.subvector_bits'}), '(index_method=index_args.index_method, encoder=\n text_encoder, init_index_file=init_index_file, doc_embeddings=\n doc_embeddings, ivf_centers_num=index_args.ivf_centers_num,\n subvector_num=index_args.subvector_num, subvector_bits=index_args.\n subvector_bits)\n', (2574, 2843), False, 'from LibVQ.learnable_index import LearnableIndexWithEncoder\n'), ((3262, 3333), 'os.path.join', 'os.path.join', (['data_args.embeddings_dir', 'f"""train-queries_hardneg.pickle"""'], {}), "(data_args.embeddings_dir, f'train-queries_hardneg.pickle')\n", (3274, 3333), False, 'import os\n'), ((16623, 16752), 'evaluate.load_test_data', 'load_test_data', ([], {'query_andwer_file': '"""./data/NQ/raw_dataset/nq-test.qa.csv"""', 'collections_file': '"""./data/NQ/dataset/collection.tsv"""'}), "(query_andwer_file='./data/NQ/raw_dataset/nq-test.qa.csv',\n collections_file='./data/NQ/dataset/collection.tsv')\n", (16637, 16752), False, 'from evaluate import validate, load_test_data\n'), ((16770, 16832), 'evaluate.validate', 'validate', (['ann_items', 'test_questions', 'test_answers', 'collections'], {}), '(ann_items, test_questions, test_answers, collections)\n', (16778, 16832), False, 'from evaluate import validate, load_test_data\n'), ((16877, 17081), 'os.path.join', 'os.path.join', (['data_args.output_dir', 'f"""LibVQ_{training_args.training_mode}_{index_args.index_method}_ivf{index_args.ivf_centers_num}_pq{index_args.subvector_num}x{index_args.subvector_bits}.index"""'], {}), "(data_args.output_dir,\n f'LibVQ_{training_args.training_mode}_{index_args.index_method}_ivf{index_args.ivf_centers_num}_pq{index_args.subvector_num}x{index_args.subvector_bits}.index'\n )\n", (16889, 17081), False, 'import os\n'), ((912, 988), 'transformers.DPRContextEncoder.from_pretrained', 'DPRContextEncoder.from_pretrained', (['"""facebook/dpr-ctx_encoder-single-nq-base"""'], {}), "('facebook/dpr-ctx_encoder-single-nq-base')\n", (945, 988), False, 'from transformers import HfArgumentParser, AutoConfig, DPRContextEncoder, DPRQuestionEncoder\n'), ((1022, 1109), 'transformers.DPRQuestionEncoder.from_pretrained', 'DPRQuestionEncoder.from_pretrained', (['"""facebook/dpr-question_encoder-single-nq-base"""'], {}), "(\n 'facebook/dpr-question_encoder-single-nq-base')\n", (1056, 1109), False, 'from transformers import HfArgumentParser, AutoConfig, DPRContextEncoder, DPRQuestionEncoder\n'), ((1960, 2021), 'os.path.join', 'os.path.join', (['data_args.embeddings_dir', '"""test-queries.memmap"""'], {}), "(data_args.embeddings_dir, 'test-queries.memmap')\n", (1972, 2021), False, 'import os\n'), ((3345, 3369), 'os.path.exists', 'os.path.exists', (['neg_file'], {}), '(neg_file)\n', (3359, 3369), False, 'import os\n'), ((4034, 4046), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4044, 4046), False, 'import gc\n'), ((3475, 3531), 'os.path.join', 'os.path.join', (['data_args.preprocess_dir', '"""train-rels.tsv"""'], {}), "(data_args.preprocess_dir, 'train-rels.tsv')\n", (3487, 3531), False, 'import os\n'), ((11516, 11528), 'gc.collect', 'gc.collect', ([], {}), '()\n', (11526, 11528), False, 'import gc\n'), ((4569, 4625), 'os.path.join', 'os.path.join', (['data_args.preprocess_dir', '"""train-rels.tsv"""'], {}), "(data_args.preprocess_dir, 'train-rels.tsv')\n", (4581, 4625), False, 'import os\n'), ((4680, 4751), 'os.path.join', 'os.path.join', (['data_args.embeddings_dir', 'f"""train-queries_hardneg.pickle"""'], {}), "(data_args.embeddings_dir, f'train-queries_hardneg.pickle')\n", (4692, 4751), False, 'import os\n'), ((6163, 6219), 'os.path.join', 'os.path.join', (['data_args.preprocess_dir', '"""train-rels.tsv"""'], {}), "(data_args.preprocess_dir, 'train-rels.tsv')\n", (6175, 6219), False, 'import os\n'), ((8222, 8278), 'os.path.join', 'os.path.join', (['data_args.preprocess_dir', '"""train-rels.tsv"""'], {}), "(data_args.preprocess_dir, 'train-rels.tsv')\n", (8234, 8278), False, 'import os\n'), ((8333, 8404), 'os.path.join', 'os.path.join', (['data_args.embeddings_dir', 'f"""train-queries_hardneg.pickle"""'], {}), "(data_args.embeddings_dir, f'train-queries_hardneg.pickle')\n", (8345, 8404), False, 'import os\n'), ((10530, 10593), 'os.path.join', 'os.path.join', (['data_args.embeddings_dir', '"""train-virtual_rel.tsv"""'], {}), "(data_args.embeddings_dir, 'train-virtual_rel.tsv')\n", (10542, 10593), False, 'import os\n'), ((11238, 11301), 'os.path.join', 'os.path.join', (['data_args.embeddings_dir', '"""train-virtual_rel.tsv"""'], {}), "(data_args.embeddings_dir, 'train-virtual_rel.tsv')\n", (11250, 11301), False, 'import os\n'), ((11698, 11761), 'os.path.join', 'os.path.join', (['data_args.embeddings_dir', '"""train-virtual_rel.tsv"""'], {}), "(data_args.embeddings_dir, 'train-virtual_rel.tsv')\n", (11710, 11761), False, 'import os\n'), ((11816, 11895), 'os.path.join', 'os.path.join', (['data_args.embeddings_dir', 'f"""train-queries-virtual_hardneg.pickle"""'], {}), "(data_args.embeddings_dir, f'train-queries-virtual_hardneg.pickle')\n", (11828, 11895), False, 'import os\n'), ((13689, 13752), 'os.path.join', 'os.path.join', (['data_args.embeddings_dir', '"""train-virtual_rel.tsv"""'], {}), "(data_args.embeddings_dir, 'train-virtual_rel.tsv')\n", (13701, 13752), False, 'import os\n'), ((13807, 13886), 'os.path.join', 'os.path.join', (['data_args.embeddings_dir', 'f"""train-queries-virtual_hardneg.pickle"""'], {}), "(data_args.embeddings_dir, f'train-queries-virtual_hardneg.pickle')\n", (13819, 13886), False, 'import os\n'), ((11378, 11457), 'os.path.join', 'os.path.join', (['data_args.embeddings_dir', 'f"""train-queries-virtual_hardneg.pickle"""'], {}), "(data_args.embeddings_dir, f'train-queries-virtual_hardneg.pickle')\n", (11390, 11457), False, 'import os\n')]
|
import visual_visdom
import evaluate
#########################################################
## Callback-functions for evaluating model-performance ##
#########################################################
def _sample_cb(log, config, visdom=None, test_datasets=None, sample_size=64, iters_per_task=None):
'''Initiates function for evaluating samples of generative model.
[test_datasets] None or <list> of <Datasets> (if provided, also reconstructions are shown)'''
def sample_cb(generator, batch, task=1):
'''Callback-function, to evaluate sample (and reconstruction) ability of the model.'''
iteration = batch if task==1 else (task-1)*iters_per_task + batch
if iteration % log == 0:
# Evaluate reconstruction-ability of model on [test_dataset]
if test_datasets is not None:
# Reconstruct samples from current task
evaluate.show_reconstruction(generator, test_datasets[task-1], config, size=int(sample_size/2),
visdom=visdom, task=task)
# Generate samples
evaluate.show_samples(generator, config, visdom=visdom, size=sample_size,
title="Generated images after {} iters in task {}".format(batch, task))
# Return the callback-function (except if neither visdom or pdf is selected!)
return sample_cb if (visdom is not None) else None
def _eval_cb(log, test_datasets, visdom=None, iters_per_task=None, test_size=None, classes_per_task=None,
scenario="class", summary_graph=True, with_exemplars=False, otr_exemplars=False):
'''Initiates function for evaluating performance of classifier (in terms of precision).
[test_datasets] <list> of <Datasets>; also if only 1 task, it should be presented as a list!
[classes_per_task] <int> number of "active" classes per task
[scenario] <str> how to decide which classes to include during evaluating precision'''
def eval_cb(classifier, batch, task=1):
'''Callback-function, to evaluate performance of classifier.'''
iteration = batch if task==1 else (task-1)*iters_per_task + batch
# evaluate the solver on multiple tasks (and log to visdom)
if iteration % log == 0:
evaluate.precision(classifier, test_datasets, task, iteration,
classes_per_task=classes_per_task, scenario=scenario, test_size=test_size,
visdom=visdom, summary_graph=summary_graph,
with_exemplars=with_exemplars, otr_exemplars=otr_exemplars)
## Return the callback-function (except if visdom is not selected!)
return eval_cb if (visdom is not None) else None
##------------------------------------------------------------------------------------------------------------------##
################################################
## Callback-functions for calculating metrics ##
################################################
def _metric_cb(log, test_datasets, metrics_dict=None, iters_per_task=None, test_size=None, classes_per_task=None,
scenario="class", with_exemplars=False):
'''Initiates function for calculating statistics required for calculating metrics.
[test_datasets] <list> of <Datasets>; also if only 1 task, it should be presented as a list!
[classes_per_task] <int> number of "active" classes per task
[scenario] <str> how to decide which classes to include during evaluating precision'''
def metric_cb(classifier, batch, task=1, otr_exemplars=False):
'''Callback-function, to calculate statistics for metrics.'''
iteration = batch if task==1 else (task-1)*iters_per_task + batch
# evaluate the solver on multiple tasks (and log to visdom)
if iteration % log == 0:
evaluate.metric_statistics(classifier, test_datasets, task, iteration,
classes_per_task=classes_per_task, scenario=scenario, metrics_dict=metrics_dict,
test_size=test_size, with_exemplars=with_exemplars, otr_exemplars=otr_exemplars)
## Return the callback-function (except if no [metrics_dict] is selected!)
return metric_cb if (metrics_dict is not None) else None
##------------------------------------------------------------------------------------------------------------------##
###############################################################
## Callback-functions for keeping track of training-progress ##
###############################################################
def _solver_loss_cb(log, visdom, model=None, tasks=None, iters_per_task=None, replay=False, progress_bar=True):
'''Initiates function for keeping track of, and reporting on, the progress of the solver's training.'''
def cb(bar, iter, loss_dict, task=1):
'''Callback-function, to call on every iteration to keep track of training progress.'''
iteration = iter if task==1 else (task-1)*iters_per_task + iter
# progress-bar
if progress_bar and bar is not None:
task_stm = "" if (tasks is None) else " Task: {}/{} |".format(task, tasks)
bar.set_description(
' <SOLVER> |{t_stm} training loss: {loss:.3} | training precision: {prec:.3} |'
.format(t_stm=task_stm, loss=loss_dict['loss_total'], prec=loss_dict['precision'])
)
bar.update(1)
# log the loss of the solver (to visdom)
if (iteration % log == 0) and (visdom is not None):
if tasks is None or tasks==1:
plot_data = [loss_dict['pred']]
names = ['prediction']
else:
weight_new_task = 1. / task if replay else 1.
plot_data = [weight_new_task*loss_dict['pred']]
names = ['pred']
if replay:
if model.replay_targets=="hard":
plot_data += [(1-weight_new_task)*loss_dict['pred_r']]
names += ['pred - r']
elif model.replay_targets=="soft":
plot_data += [(1-weight_new_task)*loss_dict['distil_r']]
names += ['distill - r']
if model.ewc_lambda>0:
plot_data += [model.ewc_lambda * loss_dict['ewc']]
names += ['EWC (lambda={})'.format(model.ewc_lambda)]
if model.si_c>0:
plot_data += [model.si_c * loss_dict['si_loss']]
names += ['SI (c={})'.format(model.si_c)]
visual_visdom.visualize_scalars(
scalars=plot_data, names=names, iteration=iteration,
title="SOLVER: loss ({})".format(visdom["graph"]), env=visdom["env"], ylabel="training loss"
)
# Return the callback-function.
return cb
def _VAE_loss_cb(log, visdom, model, tasks=None, iters_per_task=None, replay=False, progress_bar=True):
'''Initiates functions for keeping track of, and reporting on, the progress of the generator's training.'''
def cb(bar, iter, loss_dict, task=1):
'''Callback-function, to perform on every iteration to keep track of training progress.'''
iteration = iter if task==1 else (task-1)*iters_per_task + iter
# progress-bar
if progress_bar and bar is not None:
task_stm = "" if (tasks is None) else " Task: {}/{} |".format(task, tasks)
bar.set_description(
' <VAE> |{t_stm} training loss: {loss:.3} | training precision: {prec:.3} |'
.format(t_stm=task_stm, loss=loss_dict['loss_total'], prec=loss_dict['precision'])
)
bar.update(1)
# log the loss of the solver (to visdom)
if (iteration % log == 0) and (visdom is not None):
if tasks is None or tasks==1:
plot_data = [loss_dict['recon'], loss_dict['variat']]
names = ['Recon', 'Variat']
if model.lamda_pl > 0:
plot_data += [loss_dict['pred']]
names += ['Prediction']
else:
weight_new_task = 1. / task if replay else 1.
plot_data = [weight_new_task*loss_dict['recon'], weight_new_task*loss_dict['variat']]
names = ['Recon', 'Variat']
if model.lamda_pl > 0:
plot_data += [weight_new_task*loss_dict['pred']]
names += ['Prediction']
if replay:
plot_data += [(1-weight_new_task)*loss_dict['recon_r'], (1-weight_new_task)*loss_dict['variat_r']]
names += ['Recon - r', 'Variat - r']
if model.lamda_pl>0:
if model.replay_targets=="hard":
plot_data += [(1-weight_new_task)*loss_dict['pred_r']]
names += ['pred - r']
elif model.replay_targets=="soft":
plot_data += [(1-weight_new_task)*loss_dict['distil_r']]
names += ['distill - r']
visual_visdom.visualize_scalars(
scalars=plot_data, names=names, iteration=iteration,
title="VAE: loss ({})".format(visdom["graph"]), env=visdom["env"], ylabel="training loss"
)
# Return the callback-function
return cb
|
[
"evaluate.metric_statistics",
"evaluate.precision"
] |
[((2323, 2578), 'evaluate.precision', 'evaluate.precision', (['classifier', 'test_datasets', 'task', 'iteration'], {'classes_per_task': 'classes_per_task', 'scenario': 'scenario', 'test_size': 'test_size', 'visdom': 'visdom', 'summary_graph': 'summary_graph', 'with_exemplars': 'with_exemplars', 'otr_exemplars': 'otr_exemplars'}), '(classifier, test_datasets, task, iteration,\n classes_per_task=classes_per_task, scenario=scenario, test_size=\n test_size, visdom=visdom, summary_graph=summary_graph, with_exemplars=\n with_exemplars, otr_exemplars=otr_exemplars)\n', (2341, 2578), False, 'import evaluate\n'), ((3907, 4152), 'evaluate.metric_statistics', 'evaluate.metric_statistics', (['classifier', 'test_datasets', 'task', 'iteration'], {'classes_per_task': 'classes_per_task', 'scenario': 'scenario', 'metrics_dict': 'metrics_dict', 'test_size': 'test_size', 'with_exemplars': 'with_exemplars', 'otr_exemplars': 'otr_exemplars'}), '(classifier, test_datasets, task, iteration,\n classes_per_task=classes_per_task, scenario=scenario, metrics_dict=\n metrics_dict, test_size=test_size, with_exemplars=with_exemplars,\n otr_exemplars=otr_exemplars)\n', (3933, 4152), False, 'import evaluate\n')]
|
import argparse
import itertools
import logging
import os
import time
from types import SimpleNamespace
import falcon
import pandas
import torch
from falcon_cors import CORS
import waitress
import numpy as np
import json
import re
from torch.utils.data import DataLoader
from data import Data
from evaluate import evaluate, handy_tool, calculate_accuracy_f1
from model import RnnForSentencePairClassification, BertYForClassification, NERNet,NERWNet
from utils import load_torch_model
MODEL_MAP = {
'bert': BertYForClassification,
'rnn': NERNet,
'rnnkv': NERWNet
}
logging.basicConfig(level=logging.INFO, format='%(asctime)-18s %(message)s')
logger = logging.getLogger()
cors_allow_all = CORS(allow_all_origins=True,
allow_origins_list=['*'],
allow_all_headers=True,
allow_all_methods=True,
allow_credentials_all_origins=True
)
parser = argparse.ArgumentParser()
parser.add_argument(
'-p', '--port', default=58081,
help='falcon server port')
parser.add_argument(
'-c', '--config_file', default='config/rnn_config.json',
help='model config file')
args = parser.parse_args()
model_config=args.config_file
def result_to_json(string, tags):
item = {"string": string, "entities": []}
entity_name = ""
entity_start = 0
idx = 0
i = -1
zipped = zip(string, tags)
listzip = list(zipped)
last = len(listzip)
for char, tag in listzip:
i += 1
if tag == 0:
item["entities"].append({"word": char, "start": idx, "end": idx+1, "type":'s'})
elif tag == 1:
entity_name += char
entity_start = idx
elif tag == 2:
if (entity_name != "") and (i == last):
entity_name += char
item["entities"].append({"word": entity_name, "start": entity_start, "end": idx + 1, "type": 'bms'})
entity_name = ""
else:
entity_name += char
elif tag == 3: # or i == len(zipped)
entity_name += char
item["entities"].append({"word": entity_name, "start": entity_start, "end": idx + 1, "type": 'bms'})
entity_name = ""
else:
entity_name = ""
entity_start = idx
idx += 1
return item
class TorchResource:
def __init__(self):
logger.info("...")
# 0. Load config
with open(model_config) as fin:
self.config = json.load(fin, object_hook=lambda d: SimpleNamespace(**d))
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
# 1. Load data
self.data = Data(vocab_file=os.path.join(self.config.model_path, 'vocab.txt'),
max_seq_len=self.config.max_seq_len,
model_type=self.config.model_type, config=self.config)
# 2. Load model
self.model = MODEL_MAP[self.config.model_type](self.config)
self.model = load_torch_model(
self.model, model_path=os.path.join(self.config.model_path, 'model.bin'))
self.model.to(self.device)
logger.info("###")
def flatten(self, ll):
return list(itertools.chain(*ll))
def split(self, content):
line = re.findall('(.*?(?:[\n ]|.$))', content)
sublines = []
for l in line:
if len(l) > self.config.max_seq_len:
ll = re.findall('(.*?(?:[。,]|.$))', l)
sublines.extend(ll)
else:
sublines.append(l)
sublines = [l for l in sublines if len(l.strip())> 0]
return sublines
def cleanall(self, content):
return content.replace(" ", "", 10**10)
def filter(self, entity):
ls = re.findall('[1234567890零一二三四五六七八九十百千万亿兆〇壹贰叁肆伍陆柒捌玖拾佰仟萬億1234567890,,\.人民币元角分]', entity)
ratio = len(ls) * 1.0 / len(entity)
return ratio
def bert_classification(self, content):
logger.info('1:{}'.format( content))
row = {'content': content}
df = pandas.DataFrame().append(row, ignore_index=True)
filename = "data/{}.csv".format(time.time())
df.to_csv(filename, index=False, columns=['content'])
test_set, sc_list, label_list = self.data.load_file(filename, train=False)
token_list = []
for line in sc_list:
tokens = self.data.tokenizer.convert_ids_to_tokens(line)
token_list.append(tokens)
data_loader_test = DataLoader(
test_set, batch_size=self.config.batch_size, shuffle=False)
# Evaluate
answer_list, length_list = evaluate(self.model, data_loader_test, self.device, isTest=True)
mod_tokens_list = handy_tool(token_list, length_list)
result = [result_to_json(t, s) for t, s in zip(mod_tokens_list, answer_list)]
entities = [item['entities'] for item in result]
entities = self.flatten(entities)
amount_entities = [entity['word'] for entity in entities if entity['type'] == 'bms']
# amount_entities = [amount for amount in amount_entities if self.filter(amount)>0.5]
return {"answer": amount_entities}
def on_get(self, req, resp):
logger.info("...")
resp.set_header('Access-Control-Allow-Origin', '*')
resp.set_header('Access-Control-Allow-Methods', '*')
resp.set_header('Access-Control-Allow-Headers', '*')
resp.set_header('Access-Control-Allow-Credentials','true')
content = req.get_param('1', True)
# clean_content =
clean_content = self.cleanall(content)
resp.media = self.bert_classification(clean_content)
logger.info("###")
def on_post(self, req, resp):
"""Handles POST requests"""
resp.set_header('Access-Control-Allow-Origin', '*')
resp.set_header('Access-Control-Allow-Methods', '*')
resp.set_header('Access-Control-Allow-Headers', '*')
resp.set_header('Access-Control-Allow-Credentials', 'true')
resp.set_header("Cache-Control", "no-cache")
data = req.stream.read(req.content_length)
data = data.decode('utf-8')
# regex = re.compile(r'\\(?![/u"])')
# data = regex.sub(r"\\", data)
jsondata = json.loads(data)
# clean_title = shortenlines(jsondata['1'])
# clean_content = cleanall(jsondata['2'])
content = jsondata['1']
clean_content = self.cleanall(content)
resp.media = self.bert_classification(clean_content)
logger.info("###")
if __name__=="__main__":
api = falcon.API(middleware=[cors_allow_all.middleware])
api.req_options.auto_parse_form_urlencoded = True
api.add_route('/z', TorchResource())
waitress.serve(api, port=args.port, threads=48, url_scheme='http')
|
[
"evaluate.handy_tool",
"evaluate.evaluate"
] |
[((581, 657), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)-18s %(message)s"""'}), "(level=logging.INFO, format='%(asctime)-18s %(message)s')\n", (600, 657), False, 'import logging\n'), ((667, 686), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (684, 686), False, 'import logging\n'), ((704, 847), 'falcon_cors.CORS', 'CORS', ([], {'allow_all_origins': '(True)', 'allow_origins_list': "['*']", 'allow_all_headers': '(True)', 'allow_all_methods': '(True)', 'allow_credentials_all_origins': '(True)'}), "(allow_all_origins=True, allow_origins_list=['*'], allow_all_headers=\n True, allow_all_methods=True, allow_credentials_all_origins=True)\n", (708, 847), False, 'from falcon_cors import CORS\n'), ((964, 989), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (987, 989), False, 'import argparse\n'), ((6672, 6722), 'falcon.API', 'falcon.API', ([], {'middleware': '[cors_allow_all.middleware]'}), '(middleware=[cors_allow_all.middleware])\n', (6682, 6722), False, 'import falcon\n'), ((6822, 6888), 'waitress.serve', 'waitress.serve', (['api'], {'port': 'args.port', 'threads': '(48)', 'url_scheme': '"""http"""'}), "(api, port=args.port, threads=48, url_scheme='http')\n", (6836, 6888), False, 'import waitress\n'), ((2597, 2622), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2620, 2622), False, 'import torch\n'), ((3370, 3410), 're.findall', 're.findall', (['"""(.*?(?:[\n ]|.$))"""', 'content'], {}), "('(.*?(?:[\\n ]|.$))', content)\n", (3380, 3410), False, 'import re\n'), ((3862, 3952), 're.findall', 're.findall', (['"""[1234567890零一二三四五六七八九十百千万亿兆〇壹贰叁肆伍陆柒捌玖拾佰仟萬億1234567890,,\\\\.人民币元角分]"""', 'entity'], {}), "('[1234567890零一二三四五六七八九十百千万亿兆〇壹贰叁肆伍陆柒捌玖拾佰仟萬億1234567890,,\\\\.人民币元角分]',\n entity)\n", (3872, 3952), False, 'import re\n'), ((4589, 4659), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'batch_size': 'self.config.batch_size', 'shuffle': '(False)'}), '(test_set, batch_size=self.config.batch_size, shuffle=False)\n', (4599, 4659), False, 'from torch.utils.data import DataLoader\n'), ((4728, 4792), 'evaluate.evaluate', 'evaluate', (['self.model', 'data_loader_test', 'self.device'], {'isTest': '(True)'}), '(self.model, data_loader_test, self.device, isTest=True)\n', (4736, 4792), False, 'from evaluate import evaluate, handy_tool, calculate_accuracy_f1\n'), ((4819, 4854), 'evaluate.handy_tool', 'handy_tool', (['token_list', 'length_list'], {}), '(token_list, length_list)\n', (4829, 4854), False, 'from evaluate import evaluate, handy_tool, calculate_accuracy_f1\n'), ((6350, 6366), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (6360, 6366), False, 'import json\n'), ((2650, 2670), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2662, 2670), False, 'import torch\n'), ((2711, 2730), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2723, 2730), False, 'import torch\n'), ((3301, 3321), 'itertools.chain', 'itertools.chain', (['*ll'], {}), '(*ll)\n', (3316, 3321), False, 'import itertools\n'), ((4242, 4253), 'time.time', 'time.time', ([], {}), '()\n', (4251, 4253), False, 'import time\n'), ((2790, 2839), 'os.path.join', 'os.path.join', (['self.config.model_path', '"""vocab.txt"""'], {}), "(self.config.model_path, 'vocab.txt')\n", (2802, 2839), False, 'import os\n'), ((3140, 3189), 'os.path.join', 'os.path.join', (['self.config.model_path', '"""model.bin"""'], {}), "(self.config.model_path, 'model.bin')\n", (3152, 3189), False, 'import os\n'), ((3526, 3559), 're.findall', 're.findall', (['"""(.*?(?:[。,]|.$))"""', 'l'], {}), "('(.*?(?:[。,]|.$))', l)\n", (3536, 3559), False, 'import re\n'), ((4152, 4170), 'pandas.DataFrame', 'pandas.DataFrame', ([], {}), '()\n', (4168, 4170), False, 'import pandas\n'), ((2564, 2584), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '(**d)\n', (2579, 2584), False, 'from types import SimpleNamespace\n')]
|
import torch
import torch.nn.functional as F
from settings import config
import numpy
import evaluate
from collections import defaultdict
import time
class Model(torch.nn.Module):
def __init__(self, data):
super(Model, self).__init__()
# Performance score
self.score = 0
self.best_score = 0
# Filename
self.input_name = "best"
self.output_name = "best"
# number of words in dictionary
num_words = len(data.word_to_index)
# Sentence
self.embedding = torch.nn.Embedding(num_words, config['word_dimension'])
self.lstm = torch.nn.LSTM(config['word_dimension'], config['model_dimension'], 1)
# Image - Assume image feature is already extracted from pre-trained CNN
self.linear = torch.nn.Linear(config['image_dimension'], config['model_dimension'])
# Initialize weights for linear layer
torch.nn.init.xavier_uniform_(self.linear.weight)
self.linear.bias.data.fill_(0)
if torch.cuda.is_available() and config["cuda"] == True:
self.embedding.cuda()
self.lstm.cuda()
self.linear.cuda()
def forward(self, sentence, image):
return self.forward_caption(sentence), self.forward_image(image)
def forward_image(self, image):
# Pass image through model
image_embedding = self.linear(image)
# Normalize
norm_image_embedding = F.normalize(image_embedding, p=2, dim=1)
return norm_image_embedding
def forward_caption(self, sentence):
# Pass caption through model
sentence_embedding = self.embedding(sentence)
_, (sentence_embedding, _) = self.lstm(sentence_embedding)
x_sentence_embedding = sentence_embedding.squeeze(0)
# Normalize vectors
norm_sentence_embedding = F.normalize(x_sentence_embedding, p=2, dim=1)
return norm_sentence_embedding
def average_i2t_and_t2i(self, i2t, t2i):
i_r1, i_r5, i_r10, i_medr, t_r1, t_r5, t_r10, t_medr = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
for x in i2t:
i_r1 += x[0]
i_r5 += x[1]
i_r10 += x[2]
i_medr += x[3]
for x in t2i:
t_r1 += x[0]
t_r5 += x[1]
t_r10 += x[2]
t_medr += x[3]
i_r1 = i_r1/len(i2t)
i_r5 = i_r5/len(i2t)
i_r10 = i_r10/len(i2t)
i_medr = i_medr/len(i2t)
t_r1 = t_r1/len(i2t)
t_r5 = t_r5/len(i2t)
t_r10 = t_r10/len(i2t)
t_medr = t_medr/len(i2t)
print(" * Image to text scores: R@1: %.1f, R@5: %.1f, R@10: %.1f, Medr: %.1f" % (i_r1, i_r5, i_r10, i_medr))
print(" * Text to image scores: R@1: %.1f, R@5: %.1f, R@10: %.1f, Medr: %.1f" % (t_r1, t_r5, t_r10, t_medr))
return
def evaluate(self, data, verbose=False, save_if_better=False):
"""
If using k-fold cross validation in the data module,
the data class will handle updaing the self.train and self.test
datasets. Thus the data.test_set(True) becomes very important.
However, a raw intialization of the dataclass with result in
the loaded data being assigned to both test and train, so we can
evaluate the results.
"""
print(" * Validating", end="", flush=True)
data.test_set(True) # very important | swaps to iterating over the test set for validation
score = 0
i2t, t2i = [], []
for caption, image_feature in data:
x, y = self.forward(caption, image_feature)
score_1, i2t_result = evaluate.image_to_text(x, y, verbose=verbose)
score_2, t2i_result = evaluate.text_to_image(x, y, verbose=verbose)
score += (score_1 + score_2)
i2t.append(i2t_result)
t2i.append(t2i_result)
print(".", end="", flush=True)
print("[DONE]", end="", flush=True)
print("")
data.test_set(False) # also very important | swaps BACK to using the TRAIN set
self.average_i2t_and_t2i(i2t, t2i)
if save_if_better and score > self.best_score:
self.save()
data.save_dictionaries()
self.best_score = score
return score
def save(self):
print(' * Saving model...')
torch.save(self.state_dict(), self.output_name+'.pkl')
print(' * Done!')
return
def load(self):
self.load_state_dict(torch.load(self.input_name+".pkl"))
print("[LOADED]", self.input_name+".pkl", "\n")
return
|
[
"evaluate.image_to_text",
"evaluate.text_to_image"
] |
[((477, 532), 'torch.nn.Embedding', 'torch.nn.Embedding', (['num_words', "config['word_dimension']"], {}), "(num_words, config['word_dimension'])\n", (495, 532), False, 'import torch\n'), ((547, 616), 'torch.nn.LSTM', 'torch.nn.LSTM', (["config['word_dimension']", "config['model_dimension']", '(1)'], {}), "(config['word_dimension'], config['model_dimension'], 1)\n", (560, 616), False, 'import torch\n'), ((709, 778), 'torch.nn.Linear', 'torch.nn.Linear', (["config['image_dimension']", "config['model_dimension']"], {}), "(config['image_dimension'], config['model_dimension'])\n", (724, 778), False, 'import torch\n'), ((822, 871), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', (['self.linear.weight'], {}), '(self.linear.weight)\n', (851, 871), False, 'import torch\n'), ((1287, 1327), 'torch.nn.functional.normalize', 'F.normalize', (['image_embedding'], {'p': '(2)', 'dim': '(1)'}), '(image_embedding, p=2, dim=1)\n', (1298, 1327), True, 'import torch.nn.functional as F\n'), ((1647, 1692), 'torch.nn.functional.normalize', 'F.normalize', (['x_sentence_embedding'], {'p': '(2)', 'dim': '(1)'}), '(x_sentence_embedding, p=2, dim=1)\n', (1658, 1692), True, 'import torch.nn.functional as F\n'), ((915, 940), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (938, 940), False, 'import torch\n'), ((3178, 3223), 'evaluate.image_to_text', 'evaluate.image_to_text', (['x', 'y'], {'verbose': 'verbose'}), '(x, y, verbose=verbose)\n', (3200, 3223), False, 'import evaluate\n'), ((3249, 3294), 'evaluate.text_to_image', 'evaluate.text_to_image', (['x', 'y'], {'verbose': 'verbose'}), '(x, y, verbose=verbose)\n', (3271, 3294), False, 'import evaluate\n'), ((3907, 3943), 'torch.load', 'torch.load', (["(self.input_name + '.pkl')"], {}), "(self.input_name + '.pkl')\n", (3917, 3943), False, 'import torch\n')]
|
import _init_path
from pgd import PGD
from utils import save_kitti_format, transform_matrix, create_dataloader, create_logger
from evaluate import evaluate
from functools import reduce
from pyquaternion import Quaternion
import tqdm
import re
from datetime import datetime
import lib.utils.iou3d.iou3d_utils as iou3d_utils
import lib.utils.kitti_utils as kitti_utils
import argparse
from lib.config import cfg, cfg_from_file, save_config_to_file, cfg_from_list
# from tools.kitti_object_eval_python.evaluate import evaluate as kitti_evaluate
from lib.utils.bbox_transform import decode_bbox_target
import tools.train_utils.train_utils as train_utils
from lib.net.point_rcnn_attack import AttackPointRCNN_RPN, AttackPointRCNN_RCNN
from lib.net.point_rcnn import PointRCNN
import torch.nn.functional as F
import torch
import numpy as np
import math
import os
np.random.seed(1024) # set the same seed
def parse_args():
parser = argparse.ArgumentParser()
# FLAT args
parser.add_argument('--split', default='val_1000',
help='The data split for evaluation')
parser.add_argument('--stage', default='1',
help='Attack stage of Point RCNN. Options: "1" for RPN stage, "2" for RCNN stage')
parser.add_argument('--nb_iter', default=20, type=int,
help='Number of attack iterations in PGD')
parser.add_argument('--task', default='cls',
help='Task of attacking. Options: "cls" for classification, "reg" for regression')
parser.add_argument('--attack_type', default='all',
help='Specify attack type. Options: "all", "translation", "rotation"')
parser.add_argument('--iter_eps', default=0.1, type=float,
help='Primary PGD attack step size for each iteration, in translation only/rotation only attacks, this parameter is used.')
parser.add_argument('--iter_eps2', default=0.01, type=float,
help='Secondary PGD attack step size for each iteration, only effective when attack_type is "all" and poly mode is disabled.')
"""
In our code, iter_eps2 will not effect in translation only/rotation only attack.
For translation only attack, we specified iter_eps to 0.1
For rotation only attack, we specified iter_eps to 0.01
For attacking full trajectory(translation+rotation), we specified iter_eps to 0.01 and iter_eps2 to 0.1
"""
parser.add_argument('--poly', action='store_true', default=False,
help='Polynomial trajectory perturbation option. Notice: if true, attack_type will be fixed(translation)')
# PointRCNN args
parser.add_argument('--cfg_file', type=str, default='./PointRCNN/tools/cfgs/default.yaml',
help='specify the config for evaluation')
parser.add_argument("--eval_mode", type=str, default='rcnn',
help="specify the evaluation mode")
parser.add_argument('--eval_all', action='store_true',
default=False, help='whether to evaluate all checkpoints')
parser.add_argument('--test', action='store_true',
default=False, help='evaluate without ground truth')
parser.add_argument("--ckpt", type=str, default='checkpoint_epoch_70.pth',
help="specify a checkpoint to be evaluated")
parser.add_argument("--rpn_ckpt", type=str, default=None,
help="specify the checkpoint of rpn if trained separated")
parser.add_argument("--rcnn_ckpt", type=str, default=None,
help="specify the checkpoint of rcnn if trained separated")
parser.add_argument('--batch_size', type=int, default=1,
help='batch size for evaluation')
parser.add_argument('--workers', type=int, default=4,
help='number of workers for dataloader')
parser.add_argument("--extra_tag", type=str, default='nuscenes',
help="extra tag for multiple evaluation")
parser.add_argument('--output_dir', type=str, default=None,
help='specify an output directory if needed')
parser.add_argument("--ckpt_dir", type=str, default=None,
help="specify a ckpt directory to be evaluated if needed")
parser.add_argument('--save_result', action='store_true',
default=False, help='save evaluation results to files')
parser.add_argument('--save_rpn_feature', action='store_true', default=False,
help='save features for separately rcnn training and evaluation')
parser.add_argument('--random_select', action='store_true',
default=True, help='sample to the same number of points')
parser.add_argument('--start_epoch', default=0, type=int,
help='ignore the checkpoint smaller than this epoch')
parser.add_argument("--rcnn_eval_roi_dir", type=str, default=None,
help='specify the saved rois for rcnn evaluation when using rcnn_offline mode')
parser.add_argument("--rcnn_eval_feature_dir", type=str, default=None,
help='specify the saved features for rcnn evaluation when using rcnn_offline mode')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
args = parser.parse_args()
return args
args = parse_args()
def load_ckpt_based_on_args(model, logger):
if args.ckpt is not None:
train_utils.load_checkpoint(model, filename=args.ckpt, logger=logger)
total_keys = model.state_dict().keys().__len__()
if cfg.RPN.ENABLED and args.rpn_ckpt is not None:
load_part_ckpt(model, filename=args.rpn_ckpt,
logger=logger, total_keys=total_keys)
if cfg.RCNN.ENABLED and args.rcnn_ckpt is not None:
load_part_ckpt(model, filename=args.rcnn_ckpt,
logger=logger, total_keys=total_keys)
def load_part_ckpt(model, filename, logger, total_keys=-1):
if os.path.isfile(filename):
logger.info(
"==> Loading part model from checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
model_state = checkpoint['model_state']
update_model_state = {
key: val for key, val in model_state.items() if key in model.state_dict()}
state_dict = model.state_dict()
state_dict.update(update_model_state)
model.load_state_dict(state_dict)
update_keys = update_model_state.keys().__len__()
if update_keys == 0:
raise RuntimeError
logger.info("==> Done (loaded %d/%d)" % (update_keys, total_keys))
else:
raise FileNotFoundError
def eval_one_epoch_joint(model, model_attack, dataloader, epoch_id, result_dir, logger):
np.random.seed(666)
MEAN_SIZE = torch.from_numpy(cfg.CLS_MEAN_SIZE[0]).cuda()
# print(MEAN_SIZE)
mode = 'TEST' if args.test else 'EVAL'
poly = args.poly
task = args.task
stage = args.stage
final_output_dir = os.path.join(result_dir, 'final_result', 'data')
os.makedirs(final_output_dir, exist_ok=True)
if args.save_result:
roi_output_dir = os.path.join(result_dir, 'roi_result', 'data')
refine_output_dir = os.path.join(result_dir, 'refine_result', 'data')
rpn_output_dir = os.path.join(result_dir, 'rpn_result', 'data')
os.makedirs(rpn_output_dir, exist_ok=True)
os.makedirs(roi_output_dir, exist_ok=True)
os.makedirs(refine_output_dir, exist_ok=True)
logger.info('---- EPOCH %s JOINT EVALUATION ----' % epoch_id)
logger.info('==> Output file: %s' % result_dir)
for k, v in model.named_parameters():
v.requires_grad = False # fix parameters
model.eval()
for k, v in model_attack.named_parameters():
v.requires_grad = False # fix parameters
model_attack.eval()
for k, v in model_attack.named_parameters():
if v.requires_grad:
logger.info('PARAM %s NOT FIXED!', k)
thresh_list = [0.1, 0.3, 0.5, 0.7, 0.9]
total_recalled_bbox_list, total_gt_bbox = [0] * 5, 0
total_roi_recalled_bbox_list = [0] * 5
dataset = dataloader.dataset
cnt = final_total = total_cls_acc = total_cls_acc_refined = total_rpn_iou = 0
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval')
pgd_attack = PGD(model_attack, iter_eps=args.iter_eps, iter_eps2=args.iter_eps2,
nb_iter=args.nb_iter, poly=args.poly)
num_step = 100
kitti_to_nu_lidar = Quaternion(axis=(0, 0, 1), angle=np.pi / 2)
for data in dataloader:
cnt += 1
sample_id, pts_rect, pts_features, pts_input, pts_pose = data['sample_id'], data[
'pts_rect'], data['pts_features'], data['pts_input'], data['pose_lidar']
batch_size = len(sample_id)
pose_matrix = np.squeeze(pts_pose)
# print(pose_matrix)
#plt.scatter(pts_input[0, :, 0], pts_input[0, :, 2], s=0.2, c='g', alpha=1)
if not pose_matrix.shape[0] == 2:
# we firstly transform the pc from kitti coordinate to nuscene coordinate
start_pc = np.squeeze(pts_input).T
# <--be careful here! we need to convert to nuscene format
start_pc = np.dot(
kitti_to_nu_lidar.rotation_matrix, start_pc[:3, :])
# change to polar coordinate
polar_points = np.arctan2(
start_pc[1, :], start_pc[0, :]) * 180 / np.pi + 180 # in degrees (0, 360]
polar_points_min = np.floor(np.min(polar_points)-0.1)
polar_points_max = np.ceil(np.max(polar_points))
start_pose_rec_translation = [
pose_matrix[0, 0], pose_matrix[0, 1], pose_matrix[0, 2]]
start_pose_rec_rotation = [
pose_matrix[0, 3], pose_matrix[0, 4], pose_matrix[0, 5], pose_matrix[0, 6]]
start_cs_rec_translation = [
pose_matrix[1, 0], pose_matrix[1, 1], pose_matrix[1, 2]]
start_cs_rec_rotation = [
pose_matrix[1, 3], pose_matrix[1, 4], pose_matrix[1, 5], pose_matrix[1, 6]]
end_pose_rec_translation = [
pose_matrix[2, 0], pose_matrix[2, 1], pose_matrix[2, 2]]
end_pose_rec_rotation = [
pose_matrix[2, 3], pose_matrix[2, 4], pose_matrix[2, 5], pose_matrix[2, 6]]
# enable motion distortion
# Init
sensor_from_vehicle = transform_matrix(
start_cs_rec_translation, Quaternion(start_cs_rec_rotation), inverse=True)
vehicle_from_global = transform_matrix(
start_pose_rec_translation, Quaternion(start_pose_rec_rotation), inverse=True)
global_from_car = transform_matrix(
start_pose_rec_translation, Quaternion(start_pose_rec_rotation), inverse=False)
car_from_current = transform_matrix(
start_cs_rec_translation, Quaternion(start_cs_rec_rotation), inverse=False)
# find the next sample data
translation_step = (np.array(
end_pose_rec_translation) - np.array(start_pose_rec_translation))/num_step
p_start = start_pose_rec_rotation
q_end = end_pose_rec_rotation
# trans_matrix_gps_list = list()
pc_timestap_list = list()
for t in range(num_step):
t_current = start_pose_rec_translation + t * translation_step
q_current = []
cosa = p_start[0]*q_end[0] + p_start[1]*q_end[1] + \
p_start[2]*q_end[2] + p_start[3]*q_end[3]
# If the dot product is negative, the quaternions have opposite handed-ness and slerp won't take
# the shorter path. Fix by reversing one quaternion.
if cosa < 0.0:
q_end[0] = -q_end[0]
q_end[1] = -q_end[1]
q_end[2] = -q_end[2]
q_end[3] = -q_end[3]
cosa = -cosa
# If the inputs are too close for comfort, linearly interpolate
if cosa > 0.9995:
k0 = 1.0 - t/num_step
k1 = t/num_step
else:
sina = np.sqrt(1.0 - cosa*cosa)
a = math.atan2(sina, cosa)
k0 = math.sin((1.0 - t/num_step)*a) / sina
k1 = math.sin(t*a/num_step) / sina
q_current.append(p_start[0]*k0 + q_end[0]*k1)
q_current.append(p_start[1]*k0 + q_end[1]*k1)
q_current.append(p_start[2]*k0 + q_end[2]*k1)
q_current.append(p_start[3]*k0 + q_end[3]*k1)
ref_from_car = transform_matrix(
start_cs_rec_translation, Quaternion(start_cs_rec_rotation), inverse=True)
car_from_global = transform_matrix(
t_current, Quaternion(q_current), inverse=True)
# select the points in a small scan area
small_delta = (polar_points_max-polar_points_min)/num_step
scan_start = polar_points > small_delta*t + polar_points_min
scan_end = polar_points <= small_delta*(t+1) + polar_points_min
scan_area = np.logical_and(scan_start, scan_end)
current_pc = start_pc[:, scan_area]
# transform point cloud at start timestep into the interpolatation step t
trans_matrix = reduce(
np.dot, [ref_from_car, car_from_global, global_from_car, car_from_current])
current_pc = trans_matrix.dot(
np.vstack((current_pc, np.ones(current_pc.shape[1]))))
pc_timestap_list.append(current_pc)
'''
Now calculate GPS compensation transformation
'''
vehicle_from_sensor = transform_matrix(
start_cs_rec_translation, Quaternion(start_cs_rec_rotation), inverse=False)
global_from_vehicle = transform_matrix(
t_current, Quaternion(q_current), inverse=False)
# can also calculate the inverse matrix of trans_matrix
trans_matrix_gps = reduce(np.dot, [
sensor_from_vehicle, vehicle_from_global, global_from_vehicle, vehicle_from_sensor])
trans_matrix_gps = np.expand_dims(trans_matrix_gps, 0)
if t == 0:
trans_matrix_gps_tensor = trans_matrix_gps
else:
trans_matrix_gps_tensor = np.concatenate(
[trans_matrix_gps_tensor, trans_matrix_gps], 0) # [1000, 4, 4]
rpn_cls_label, rpn_reg_label, gt_boxes3d = data[
'rpn_cls_label'], data['rpn_reg_label'], data['gt_boxes3d']
rpn_cls_label[rpn_cls_label > -1] = 1 - \
rpn_cls_label[rpn_cls_label > -1]
adv_pc = pgd_attack.attack(args.attack_type, pc_timestap_list, trans_matrix_gps_tensor, poly,
stage, task, rpn_cls_label=rpn_cls_label, rpn_reg_label=rpn_reg_label, gt_boxes3d=gt_boxes3d)
inputs = torch.from_numpy(adv_pc).cuda(non_blocking=True).float()
else:
inputs = torch.from_numpy(pts_input).cuda(
non_blocking=True).float()
# model inference
input_data = {'pts_input': inputs}
# model inference
ret_dict = model(input_data)
roi_scores_raw = ret_dict['roi_scores_raw'] # (B, M)
roi_boxes3d = ret_dict['rois'] # (B, M, 7)
seg_result = ret_dict['seg_result'].long() # (B, N)
rcnn_cls = ret_dict['rcnn_cls'].view(
batch_size, -1, ret_dict['rcnn_cls'].shape[1])
rcnn_reg = ret_dict['rcnn_reg'].view(
batch_size, -1, ret_dict['rcnn_reg'].shape[1]) # (B, M, C)
# bounding box regression
anchor_size = MEAN_SIZE
if cfg.RCNN.SIZE_RES_ON_ROI:
assert False
pred_boxes3d = decode_bbox_target(roi_boxes3d.view(-1, 7), rcnn_reg.view(-1, rcnn_reg.shape[-1]),
anchor_size=anchor_size,
loc_scope=cfg.RCNN.LOC_SCOPE,
loc_bin_size=cfg.RCNN.LOC_BIN_SIZE,
num_head_bin=cfg.RCNN.NUM_HEAD_BIN,
get_xz_fine=True, get_y_by_bin=cfg.RCNN.LOC_Y_BY_BIN,
loc_y_scope=cfg.RCNN.LOC_Y_SCOPE, loc_y_bin_size=cfg.RCNN.LOC_Y_BIN_SIZE,
get_ry_fine=True).view(batch_size, -1, 7)
# scoring
if rcnn_cls.shape[2] == 1:
raw_scores = rcnn_cls # (B, M, 1)
norm_scores = torch.sigmoid(raw_scores)
pred_classes = (norm_scores > cfg.RCNN.SCORE_THRESH).long()
else:
pred_classes = torch.argmax(rcnn_cls, dim=1).view(-1)
cls_norm_scores = F.softmax(rcnn_cls, dim=1)
raw_scores = rcnn_cls[:, pred_classes]
norm_scores = cls_norm_scores[:, pred_classes]
# evaluation
recalled_num = gt_num = rpn_iou = 0
if not args.test:
if not cfg.RPN.FIXED:
rpn_cls_label, rpn_reg_label = data['rpn_cls_label'], data['rpn_reg_label']
rpn_cls_label = torch.from_numpy(
rpn_cls_label).cuda(non_blocking=True).long()
gt_boxes3d = data['gt_boxes3d']
for k in range(batch_size):
# calculate recall
cur_gt_boxes3d = gt_boxes3d[k]
tmp_idx = cur_gt_boxes3d.__len__() - 1
while tmp_idx >= 0 and cur_gt_boxes3d[tmp_idx].sum() == 0:
tmp_idx -= 1
if tmp_idx >= 0:
cur_gt_boxes3d = cur_gt_boxes3d[:tmp_idx + 1]
cur_gt_boxes3d = torch.from_numpy(
cur_gt_boxes3d).cuda(non_blocking=True).float()
iou3d = iou3d_utils.boxes_iou3d_gpu(
pred_boxes3d[k], cur_gt_boxes3d)
gt_max_iou, _ = iou3d.max(dim=0)
refined_iou, _ = iou3d.max(dim=1)
for idx, thresh in enumerate(thresh_list):
total_recalled_bbox_list[idx] += (
gt_max_iou > thresh).sum().item()
recalled_num += (gt_max_iou > 0.7).sum().item()
gt_num += cur_gt_boxes3d.shape[0]
total_gt_bbox += cur_gt_boxes3d.shape[0]
# original recall
iou3d_in = iou3d_utils.boxes_iou3d_gpu(
roi_boxes3d[k], cur_gt_boxes3d)
gt_max_iou_in, _ = iou3d_in.max(dim=0)
for idx, thresh in enumerate(thresh_list):
total_roi_recalled_bbox_list[idx] += (
gt_max_iou_in > thresh).sum().item()
if not cfg.RPN.FIXED:
fg_mask = rpn_cls_label > 0
correct = ((seg_result == rpn_cls_label)
& fg_mask).sum().float()
union = fg_mask.sum().float() + (seg_result > 0).sum().float() - correct
rpn_iou = correct / torch.clamp(union, min=1.0)
total_rpn_iou += rpn_iou.item()
disp_dict = {
'mode': mode, 'recall': '%d/%d' % (total_recalled_bbox_list[3], total_gt_bbox)}
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if args.save_result:
# save roi and refine results
roi_boxes3d_np = roi_boxes3d.cpu().numpy()
pred_boxes3d_np = pred_boxes3d.cpu().numpy()
roi_scores_raw_np = roi_scores_raw.cpu().numpy()
raw_scores_np = raw_scores.cpu().numpy()
rpn_cls_np = ret_dict['rpn_cls'].cpu().numpy()
rpn_xyz_np = ret_dict['backbone_xyz'].cpu().numpy()
seg_result_np = seg_result.cpu().numpy()
output_data = np.concatenate((rpn_xyz_np, rpn_cls_np.reshape(batch_size, -1, 1),
seg_result_np.reshape(batch_size, -1, 1)), axis=2)
for k in range(batch_size):
cur_sample_id = sample_id[k]
calib = dataset.get_calib(cur_sample_id)
image_shape = dataset.get_image_shape(cur_sample_id)
save_kitti_format(cur_sample_id, calib, roi_boxes3d_np[k], roi_output_dir,
roi_scores_raw_np[k], image_shape, cfg)
save_kitti_format(cur_sample_id, calib, pred_boxes3d_np[k], refine_output_dir,
raw_scores_np[k], image_shape, cfg)
output_file = os.path.join(
rpn_output_dir, '%06d.npy' % cur_sample_id)
np.save(output_file, output_data.astype(np.float32))
# scores thresh
inds = norm_scores > cfg.RCNN.SCORE_THRESH
for k in range(batch_size):
cur_inds = inds[k].view(-1)
if cur_inds.sum() == 0:
continue
pred_boxes3d_selected = pred_boxes3d[k, cur_inds]
raw_scores_selected = raw_scores[k, cur_inds]
norm_scores_selected = norm_scores[k, cur_inds]
# NMS thresh
# rotated nms
boxes_bev_selected = kitti_utils.boxes3d_to_bev_torch(
pred_boxes3d_selected)
keep_idx = iou3d_utils.nms_gpu(
boxes_bev_selected, raw_scores_selected, cfg.RCNN.NMS_THRESH).view(-1)
pred_boxes3d_selected = pred_boxes3d_selected[keep_idx]
scores_selected = raw_scores_selected[keep_idx]
pred_boxes3d_selected, scores_selected = pred_boxes3d_selected.cpu(
).detach().numpy(), scores_selected.cpu().detach().numpy()
cur_sample_id = sample_id[k]
calib = dataset.get_calib(cur_sample_id)
final_total += pred_boxes3d_selected.shape[0]
image_shape = dataset.get_image_shape(cur_sample_id)
save_kitti_format(cur_sample_id, calib, pred_boxes3d_selected,
final_output_dir, scores_selected, image_shape, cfg)
progress_bar.close()
# dump empty files
split_file = os.path.join(dataset.imageset_dir,
'..', dataset.split, 'ImageSets', dataset.split + '.txt')
# print('split_file---', split_file)
split_file = os.path.abspath(split_file)
# print('split_file---', split_file)
image_idx_list = [x.strip() for x in open(split_file).readlines()]
# print('image_idx_list---', image_idx_list)
empty_cnt = 0
for k in range(image_idx_list.__len__()):
cur_file = os.path.join(final_output_dir, '%s.txt' % image_idx_list[k])
if not os.path.exists(cur_file):
with open(cur_file, 'w') as temp_f:
pass
empty_cnt += 1
logger.info('empty_cnt=%d: dump empty file %s' %
(empty_cnt, cur_file))
ret_dict = {'empty_cnt': empty_cnt}
logger.info(
'-------------------performance of epoch %s---------------------' % epoch_id)
logger.info(str(datetime.now()))
avg_rpn_iou = (total_rpn_iou / max(cnt, 1.0))
avg_cls_acc = (total_cls_acc / max(cnt, 1.0))
avg_cls_acc_refined = (total_cls_acc_refined / max(cnt, 1.0))
avg_det_num = (final_total / max(len(dataset), 1.0))
logger.info('final average detections: %.3f' % avg_det_num)
logger.info('final average rpn_iou refined: %.3f' % avg_rpn_iou)
logger.info('final average cls acc: %.3f' % avg_cls_acc)
logger.info('final average cls acc refined: %.3f' % avg_cls_acc_refined)
ret_dict['rpn_iou'] = avg_rpn_iou
ret_dict['rcnn_cls_acc'] = avg_cls_acc
ret_dict['rcnn_cls_acc_refined'] = avg_cls_acc_refined
ret_dict['rcnn_avg_num'] = avg_det_num
for idx, thresh in enumerate(thresh_list):
cur_roi_recall = total_roi_recalled_bbox_list[idx] / max(
total_gt_bbox, 1.0)
logger.info('total roi bbox recall(thresh=%.3f): %d / %d = %f' % (thresh, total_roi_recalled_bbox_list[idx],
total_gt_bbox, cur_roi_recall))
ret_dict['rpn_recall(thresh=%.2f)' % thresh] = cur_roi_recall
for idx, thresh in enumerate(thresh_list):
cur_recall = total_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)
logger.info('total bbox recall(thresh=%.3f): %d / %d = %f' % (thresh, total_recalled_bbox_list[idx],
total_gt_bbox, cur_recall))
ret_dict['rcnn_recall(thresh=%.2f)' % thresh] = cur_recall
if cfg.TEST.SPLIT != 'test':
logger.info('Average Precision:')
name_to_class = {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2}
# print(dataset.label_dir, final_output_dir,
# split_file, name_to_class[cfg.CLASSES])
# old eval
# ap_result_str, ap_dict = kitti_evaluate(dataset.label_dir, final_output_dir, label_split_file=split_file,
# current_class=name_to_class[cfg.CLASSES])
# logger.info('old eval:'+ap_result_str)
# new eval
ap_result_str, ap_dict = evaluate(dataset.label_dir, final_output_dir, label_split_file=split_file,
current_class=name_to_class[cfg.CLASSES])
logger.info('new eval:'+ap_result_str)
ret_dict.update(ap_dict)
logger.info('result is saved to: %s' % result_dir)
return ret_dict
def eval_one_epoch(model, model_attack, dataloader, epoch_id, result_dir, logger):
assert cfg.RPN.ENABLED and cfg.RCNN.ENABLED, 'RPN and RCNN module should be both enabled'
ret_dict = eval_one_epoch_joint(
model, model_attack, dataloader, epoch_id, result_dir, logger)
return ret_dict
if __name__ == '__main__':
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
# poly mode only available for translation-only attack
if args.poly:
args.attack_type = 'translation'
cfg.TAG = os.path.splitext(os.path.basename(args.cfg_file))[0]
outputdir = "FLAT_stage{stage}_{task}_{poly}{nb_iter}_{iter_eps}_{iter_eps2}".format(stage=str(args.stage), task=str(args.task), poly=(
'poly_' if args.poly else ''), nb_iter=str(args.nb_iter), iter_eps=str(args.iter_eps), iter_eps2=str(args.iter_eps2))
if args.eval_mode == 'rcnn':
cfg.RCNN.ENABLED = True
cfg.RPN.ENABLED = cfg.RPN.FIXED = True
root_result_dir = os.path.join(
'output', args.split, args.attack_type, outputdir)
ckpt_dir = os.path.join('../', 'output', args.split,
args.attack_type, outputdir, 'ckpt')
else:
raise NotImplementedError
if args.ckpt_dir is not None:
ckpt_dir = args.ckpt_dir
if args.output_dir is not None:
root_result_dir = args.output_dir
os.makedirs(root_result_dir, exist_ok=True)
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
log_file = os.path.join(root_result_dir, 'log_eval_one.txt')
logger = create_logger(log_file)
logger.info('**********************Start logging**********************')
# for key, val in vars(args).items():
# logger.info("{:16} {}".format(key, val))
save_config_to_file(cfg, logger=logger)
# create dataloader & network
test_loader = create_dataloader(logger, args, cfg)
model = PointRCNN(num_classes=test_loader.dataset.num_class,
use_xyz=True, mode='TEST')
model.cuda()
if args.stage == '1':
model_attack = AttackPointRCNN_RPN(
num_classes=test_loader.dataset.num_class, use_xyz=True, mode='TEST')
elif args.stage == '2':
model_attack = AttackPointRCNN_RCNN(
num_classes=test_loader.dataset.num_class, use_xyz=True, mode='TEST')
model_attack.cuda()
# load checkpoint
load_ckpt_based_on_args(model, logger)
# start evaluation
eval_one_epoch(model, model_attack, test_loader,
epoch_id, root_result_dir, logger)
|
[
"evaluate.evaluate"
] |
[((859, 879), 'numpy.random.seed', 'np.random.seed', (['(1024)'], {}), '(1024)\n', (873, 879), True, 'import numpy as np\n'), ((934, 959), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (957, 959), False, 'import argparse\n'), ((6129, 6153), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (6143, 6153), False, 'import os\n'), ((6920, 6939), 'numpy.random.seed', 'np.random.seed', (['(666)'], {}), '(666)\n', (6934, 6939), True, 'import numpy as np\n'), ((7156, 7204), 'os.path.join', 'os.path.join', (['result_dir', '"""final_result"""', '"""data"""'], {}), "(result_dir, 'final_result', 'data')\n", (7168, 7204), False, 'import os\n'), ((7209, 7253), 'os.makedirs', 'os.makedirs', (['final_output_dir'], {'exist_ok': '(True)'}), '(final_output_dir, exist_ok=True)\n', (7220, 7253), False, 'import os\n'), ((8492, 8602), 'pgd.PGD', 'PGD', (['model_attack'], {'iter_eps': 'args.iter_eps', 'iter_eps2': 'args.iter_eps2', 'nb_iter': 'args.nb_iter', 'poly': 'args.poly'}), '(model_attack, iter_eps=args.iter_eps, iter_eps2=args.iter_eps2, nb_iter\n =args.nb_iter, poly=args.poly)\n', (8495, 8602), False, 'from pgd import PGD\n'), ((8662, 8705), 'pyquaternion.Quaternion', 'Quaternion', ([], {'axis': '(0, 0, 1)', 'angle': '(np.pi / 2)'}), '(axis=(0, 0, 1), angle=np.pi / 2)\n', (8672, 8705), False, 'from pyquaternion import Quaternion\n'), ((22684, 22781), 'os.path.join', 'os.path.join', (['dataset.imageset_dir', '""".."""', 'dataset.split', '"""ImageSets"""', "(dataset.split + '.txt')"], {}), "(dataset.imageset_dir, '..', dataset.split, 'ImageSets', \n dataset.split + '.txt')\n", (22696, 22781), False, 'import os\n'), ((22865, 22892), 'os.path.abspath', 'os.path.abspath', (['split_file'], {}), '(split_file)\n', (22880, 22892), False, 'import os\n'), ((27510, 27553), 'os.makedirs', 'os.makedirs', (['root_result_dir'], {'exist_ok': '(True)'}), '(root_result_dir, exist_ok=True)\n', (27521, 27553), False, 'import os\n'), ((27720, 27769), 'os.path.join', 'os.path.join', (['root_result_dir', '"""log_eval_one.txt"""'], {}), "(root_result_dir, 'log_eval_one.txt')\n", (27732, 27769), False, 'import os\n'), ((27783, 27806), 'utils.create_logger', 'create_logger', (['log_file'], {}), '(log_file)\n', (27796, 27806), False, 'from utils import save_kitti_format, transform_matrix, create_dataloader, create_logger\n'), ((27981, 28020), 'lib.config.save_config_to_file', 'save_config_to_file', (['cfg'], {'logger': 'logger'}), '(cfg, logger=logger)\n', (28000, 28020), False, 'from lib.config import cfg, cfg_from_file, save_config_to_file, cfg_from_list\n'), ((28074, 28110), 'utils.create_dataloader', 'create_dataloader', (['logger', 'args', 'cfg'], {}), '(logger, args, cfg)\n', (28091, 28110), False, 'from utils import save_kitti_format, transform_matrix, create_dataloader, create_logger\n'), ((28123, 28202), 'lib.net.point_rcnn.PointRCNN', 'PointRCNN', ([], {'num_classes': 'test_loader.dataset.num_class', 'use_xyz': '(True)', 'mode': '"""TEST"""'}), "(num_classes=test_loader.dataset.num_class, use_xyz=True, mode='TEST')\n", (28132, 28202), False, 'from lib.net.point_rcnn import PointRCNN\n'), ((5594, 5663), 'tools.train_utils.train_utils.load_checkpoint', 'train_utils.load_checkpoint', (['model'], {'filename': 'args.ckpt', 'logger': 'logger'}), '(model, filename=args.ckpt, logger=logger)\n', (5621, 5663), True, 'import tools.train_utils.train_utils as train_utils\n'), ((6273, 6293), 'torch.load', 'torch.load', (['filename'], {}), '(filename)\n', (6283, 6293), False, 'import torch\n'), ((7305, 7351), 'os.path.join', 'os.path.join', (['result_dir', '"""roi_result"""', '"""data"""'], {}), "(result_dir, 'roi_result', 'data')\n", (7317, 7351), False, 'import os\n'), ((7380, 7429), 'os.path.join', 'os.path.join', (['result_dir', '"""refine_result"""', '"""data"""'], {}), "(result_dir, 'refine_result', 'data')\n", (7392, 7429), False, 'import os\n'), ((7455, 7501), 'os.path.join', 'os.path.join', (['result_dir', '"""rpn_result"""', '"""data"""'], {}), "(result_dir, 'rpn_result', 'data')\n", (7467, 7501), False, 'import os\n'), ((7510, 7552), 'os.makedirs', 'os.makedirs', (['rpn_output_dir'], {'exist_ok': '(True)'}), '(rpn_output_dir, exist_ok=True)\n', (7521, 7552), False, 'import os\n'), ((7561, 7603), 'os.makedirs', 'os.makedirs', (['roi_output_dir'], {'exist_ok': '(True)'}), '(roi_output_dir, exist_ok=True)\n', (7572, 7603), False, 'import os\n'), ((7612, 7657), 'os.makedirs', 'os.makedirs', (['refine_output_dir'], {'exist_ok': '(True)'}), '(refine_output_dir, exist_ok=True)\n', (7623, 7657), False, 'import os\n'), ((8985, 9005), 'numpy.squeeze', 'np.squeeze', (['pts_pose'], {}), '(pts_pose)\n', (8995, 9005), True, 'import numpy as np\n'), ((23137, 23197), 'os.path.join', 'os.path.join', (['final_output_dir', "('%s.txt' % image_idx_list[k])"], {}), "(final_output_dir, '%s.txt' % image_idx_list[k])\n", (23149, 23197), False, 'import os\n'), ((25720, 25840), 'evaluate.evaluate', 'evaluate', (['dataset.label_dir', 'final_output_dir'], {'label_split_file': 'split_file', 'current_class': 'name_to_class[cfg.CLASSES]'}), '(dataset.label_dir, final_output_dir, label_split_file=split_file,\n current_class=name_to_class[cfg.CLASSES])\n', (25728, 25840), False, 'from evaluate import evaluate\n'), ((26413, 26441), 'lib.config.cfg_from_file', 'cfg_from_file', (['args.cfg_file'], {}), '(args.cfg_file)\n', (26426, 26441), False, 'from lib.config import cfg, cfg_from_file, save_config_to_file, cfg_from_list\n'), ((26484, 26512), 'lib.config.cfg_from_list', 'cfg_from_list', (['args.set_cfgs'], {}), '(args.set_cfgs)\n', (26497, 26512), False, 'from lib.config import cfg, cfg_from_file, save_config_to_file, cfg_from_list\n'), ((27106, 27169), 'os.path.join', 'os.path.join', (['"""output"""', 'args.split', 'args.attack_type', 'outputdir'], {}), "('output', args.split, args.attack_type, outputdir)\n", (27118, 27169), False, 'import os\n'), ((27202, 27280), 'os.path.join', 'os.path.join', (['"""../"""', '"""output"""', 'args.split', 'args.attack_type', 'outputdir', '"""ckpt"""'], {}), "('../', 'output', args.split, args.attack_type, outputdir, 'ckpt')\n", (27214, 27280), False, 'import os\n'), ((27570, 27599), 're.findall', 're.findall', (['"""\\\\d+"""', 'args.ckpt'], {}), "('\\\\d+', args.ckpt)\n", (27580, 27599), False, 'import re\n'), ((28292, 28385), 'lib.net.point_rcnn_attack.AttackPointRCNN_RPN', 'AttackPointRCNN_RPN', ([], {'num_classes': 'test_loader.dataset.num_class', 'use_xyz': '(True)', 'mode': '"""TEST"""'}), "(num_classes=test_loader.dataset.num_class, use_xyz=True,\n mode='TEST')\n", (28311, 28385), False, 'from lib.net.point_rcnn_attack import AttackPointRCNN_RPN, AttackPointRCNN_RCNN\n'), ((6956, 6994), 'torch.from_numpy', 'torch.from_numpy', (['cfg.CLS_MEAN_SIZE[0]'], {}), '(cfg.CLS_MEAN_SIZE[0])\n', (6972, 6994), False, 'import torch\n'), ((9389, 9447), 'numpy.dot', 'np.dot', (['kitti_to_nu_lidar.rotation_matrix', 'start_pc[:3, :]'], {}), '(kitti_to_nu_lidar.rotation_matrix, start_pc[:3, :])\n', (9395, 9447), True, 'import numpy as np\n'), ((17058, 17083), 'torch.sigmoid', 'torch.sigmoid', (['raw_scores'], {}), '(raw_scores)\n', (17071, 17083), False, 'import torch\n'), ((17266, 17292), 'torch.nn.functional.softmax', 'F.softmax', (['rcnn_cls'], {'dim': '(1)'}), '(rcnn_cls, dim=1)\n', (17275, 17292), True, 'import torch.nn.functional as F\n'), ((21759, 21814), 'lib.utils.kitti_utils.boxes3d_to_bev_torch', 'kitti_utils.boxes3d_to_bev_torch', (['pred_boxes3d_selected'], {}), '(pred_boxes3d_selected)\n', (21791, 21814), True, 'import lib.utils.kitti_utils as kitti_utils\n'), ((22472, 22591), 'utils.save_kitti_format', 'save_kitti_format', (['cur_sample_id', 'calib', 'pred_boxes3d_selected', 'final_output_dir', 'scores_selected', 'image_shape', 'cfg'], {}), '(cur_sample_id, calib, pred_boxes3d_selected,\n final_output_dir, scores_selected, image_shape, cfg)\n', (22489, 22591), False, 'from utils import save_kitti_format, transform_matrix, create_dataloader, create_logger\n'), ((23213, 23237), 'os.path.exists', 'os.path.exists', (['cur_file'], {}), '(cur_file)\n', (23227, 23237), False, 'import os\n'), ((23608, 23622), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (23620, 23622), False, 'from datetime import datetime\n'), ((26664, 26695), 'os.path.basename', 'os.path.basename', (['args.cfg_file'], {}), '(args.cfg_file)\n', (26680, 26695), False, 'import os\n'), ((28446, 28541), 'lib.net.point_rcnn_attack.AttackPointRCNN_RCNN', 'AttackPointRCNN_RCNN', ([], {'num_classes': 'test_loader.dataset.num_class', 'use_xyz': '(True)', 'mode': '"""TEST"""'}), "(num_classes=test_loader.dataset.num_class, use_xyz=\n True, mode='TEST')\n", (28466, 28541), False, 'from lib.net.point_rcnn_attack import AttackPointRCNN_RPN, AttackPointRCNN_RCNN\n'), ((9271, 9292), 'numpy.squeeze', 'np.squeeze', (['pts_input'], {}), '(pts_input)\n', (9281, 9292), True, 'import numpy as np\n'), ((9743, 9763), 'numpy.max', 'np.max', (['polar_points'], {}), '(polar_points)\n', (9749, 9763), True, 'import numpy as np\n'), ((10657, 10690), 'pyquaternion.Quaternion', 'Quaternion', (['start_cs_rec_rotation'], {}), '(start_cs_rec_rotation)\n', (10667, 10690), False, 'from pyquaternion import Quaternion\n'), ((10802, 10837), 'pyquaternion.Quaternion', 'Quaternion', (['start_pose_rec_rotation'], {}), '(start_pose_rec_rotation)\n', (10812, 10837), False, 'from pyquaternion import Quaternion\n'), ((10946, 10981), 'pyquaternion.Quaternion', 'Quaternion', (['start_pose_rec_rotation'], {}), '(start_pose_rec_rotation)\n', (10956, 10981), False, 'from pyquaternion import Quaternion\n'), ((11089, 11122), 'pyquaternion.Quaternion', 'Quaternion', (['start_cs_rec_rotation'], {}), '(start_cs_rec_rotation)\n', (11099, 11122), False, 'from pyquaternion import Quaternion\n'), ((13442, 13478), 'numpy.logical_and', 'np.logical_and', (['scan_start', 'scan_end'], {}), '(scan_start, scan_end)\n', (13456, 13478), True, 'import numpy as np\n'), ((13653, 13739), 'functools.reduce', 'reduce', (['np.dot', '[ref_from_car, car_from_global, global_from_car, car_from_current]'], {}), '(np.dot, [ref_from_car, car_from_global, global_from_car,\n car_from_current])\n', (13659, 13739), False, 'from functools import reduce\n'), ((14418, 14522), 'functools.reduce', 'reduce', (['np.dot', '[sensor_from_vehicle, vehicle_from_global, global_from_vehicle,\n vehicle_from_sensor]'], {}), '(np.dot, [sensor_from_vehicle, vehicle_from_global,\n global_from_vehicle, vehicle_from_sensor])\n', (14424, 14522), False, 'from functools import reduce\n'), ((14598, 14633), 'numpy.expand_dims', 'np.expand_dims', (['trans_matrix_gps', '(0)'], {}), '(trans_matrix_gps, 0)\n', (14612, 14633), True, 'import numpy as np\n'), ((20787, 20905), 'utils.save_kitti_format', 'save_kitti_format', (['cur_sample_id', 'calib', 'roi_boxes3d_np[k]', 'roi_output_dir', 'roi_scores_raw_np[k]', 'image_shape', 'cfg'], {}), '(cur_sample_id, calib, roi_boxes3d_np[k], roi_output_dir,\n roi_scores_raw_np[k], image_shape, cfg)\n', (20804, 20905), False, 'from utils import save_kitti_format, transform_matrix, create_dataloader, create_logger\n'), ((20952, 21070), 'utils.save_kitti_format', 'save_kitti_format', (['cur_sample_id', 'calib', 'pred_boxes3d_np[k]', 'refine_output_dir', 'raw_scores_np[k]', 'image_shape', 'cfg'], {}), '(cur_sample_id, calib, pred_boxes3d_np[k],\n refine_output_dir, raw_scores_np[k], image_shape, cfg)\n', (20969, 21070), False, 'from utils import save_kitti_format, transform_matrix, create_dataloader, create_logger\n'), ((21132, 21188), 'os.path.join', 'os.path.join', (['rpn_output_dir', "('%06d.npy' % cur_sample_id)"], {}), "(rpn_output_dir, '%06d.npy' % cur_sample_id)\n", (21144, 21188), False, 'import os\n'), ((9678, 9698), 'numpy.min', 'np.min', (['polar_points'], {}), '(polar_points)\n', (9684, 9698), True, 'import numpy as np\n'), ((11212, 11246), 'numpy.array', 'np.array', (['end_pose_rec_translation'], {}), '(end_pose_rec_translation)\n', (11220, 11246), True, 'import numpy as np\n'), ((11266, 11302), 'numpy.array', 'np.array', (['start_pose_rec_translation'], {}), '(start_pose_rec_translation)\n', (11274, 11302), True, 'import numpy as np\n'), ((12419, 12445), 'numpy.sqrt', 'np.sqrt', (['(1.0 - cosa * cosa)'], {}), '(1.0 - cosa * cosa)\n', (12426, 12445), True, 'import numpy as np\n'), ((12468, 12490), 'math.atan2', 'math.atan2', (['sina', 'cosa'], {}), '(sina, cosa)\n', (12478, 12490), False, 'import math\n'), ((12954, 12987), 'pyquaternion.Quaternion', 'Quaternion', (['start_cs_rec_rotation'], {}), '(start_cs_rec_rotation)\n', (12964, 12987), False, 'from pyquaternion import Quaternion\n'), ((13086, 13107), 'pyquaternion.Quaternion', 'Quaternion', (['q_current'], {}), '(q_current)\n', (13096, 13107), False, 'from pyquaternion import Quaternion\n'), ((14136, 14169), 'pyquaternion.Quaternion', 'Quaternion', (['start_cs_rec_rotation'], {}), '(start_cs_rec_rotation)\n', (14146, 14169), False, 'from pyquaternion import Quaternion\n'), ((14273, 14294), 'pyquaternion.Quaternion', 'Quaternion', (['q_current'], {}), '(q_current)\n', (14283, 14294), False, 'from pyquaternion import Quaternion\n'), ((14793, 14855), 'numpy.concatenate', 'np.concatenate', (['[trans_matrix_gps_tensor, trans_matrix_gps]', '(0)'], {}), '([trans_matrix_gps_tensor, trans_matrix_gps], 0)\n', (14807, 14855), True, 'import numpy as np\n'), ((17197, 17226), 'torch.argmax', 'torch.argmax', (['rcnn_cls'], {'dim': '(1)'}), '(rcnn_cls, dim=1)\n', (17209, 17226), False, 'import torch\n'), ((18325, 18385), 'lib.utils.iou3d.iou3d_utils.boxes_iou3d_gpu', 'iou3d_utils.boxes_iou3d_gpu', (['pred_boxes3d[k]', 'cur_gt_boxes3d'], {}), '(pred_boxes3d[k], cur_gt_boxes3d)\n', (18352, 18385), True, 'import lib.utils.iou3d.iou3d_utils as iou3d_utils\n'), ((18956, 19015), 'lib.utils.iou3d.iou3d_utils.boxes_iou3d_gpu', 'iou3d_utils.boxes_iou3d_gpu', (['roi_boxes3d[k]', 'cur_gt_boxes3d'], {}), '(roi_boxes3d[k], cur_gt_boxes3d)\n', (18983, 19015), True, 'import lib.utils.iou3d.iou3d_utils as iou3d_utils\n'), ((21855, 21941), 'lib.utils.iou3d.iou3d_utils.nms_gpu', 'iou3d_utils.nms_gpu', (['boxes_bev_selected', 'raw_scores_selected', 'cfg.RCNN.NMS_THRESH'], {}), '(boxes_bev_selected, raw_scores_selected, cfg.RCNN.\n NMS_THRESH)\n', (21874, 21941), True, 'import lib.utils.iou3d.iou3d_utils as iou3d_utils\n'), ((9534, 9576), 'numpy.arctan2', 'np.arctan2', (['start_pc[1, :]', 'start_pc[0, :]'], {}), '(start_pc[1, :], start_pc[0, :])\n', (9544, 9576), True, 'import numpy as np\n'), ((12516, 12550), 'math.sin', 'math.sin', (['((1.0 - t / num_step) * a)'], {}), '((1.0 - t / num_step) * a)\n', (12524, 12550), False, 'import math\n'), ((12579, 12605), 'math.sin', 'math.sin', (['(t * a / num_step)'], {}), '(t * a / num_step)\n', (12587, 12605), False, 'import math\n'), ((19629, 19656), 'torch.clamp', 'torch.clamp', (['union'], {'min': '(1.0)'}), '(union, min=1.0)\n', (19640, 19656), False, 'import torch\n'), ((13847, 13875), 'numpy.ones', 'np.ones', (['current_pc.shape[1]'], {}), '(current_pc.shape[1])\n', (13854, 13875), True, 'import numpy as np\n'), ((15400, 15424), 'torch.from_numpy', 'torch.from_numpy', (['adv_pc'], {}), '(adv_pc)\n', (15416, 15424), False, 'import torch\n'), ((15492, 15519), 'torch.from_numpy', 'torch.from_numpy', (['pts_input'], {}), '(pts_input)\n', (15508, 15519), False, 'import torch\n'), ((17653, 17684), 'torch.from_numpy', 'torch.from_numpy', (['rpn_cls_label'], {}), '(rpn_cls_label)\n', (17669, 17684), False, 'import torch\n'), ((18207, 18239), 'torch.from_numpy', 'torch.from_numpy', (['cur_gt_boxes3d'], {}), '(cur_gt_boxes3d)\n', (18223, 18239), False, 'import torch\n')]
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import os
import ast
import argparse
import warnings
import numpy as np
from functools import partial
from data import read, load_dict, convert_example_to_feature
from model import JointModel
from utils import set_seed
from metric import SeqEntityScore, MultiLabelClassificationScore
from evaluate import evaluate
import paddle
import paddle.nn.functional as F
from paddlenlp.datasets import load_dataset
from paddlenlp.transformers import ErnieTokenizer, ErnieModel, LinearDecayWithWarmup
from paddlenlp.data import Stack, Pad, Tuple
warnings.filterwarnings("ignore")
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--num_epoch", type=int, default=3, help="Number of epoches for fine-tuning.")
parser.add_argument("--learning_rate", type=float, default=5e-5, help="Learning rate used to train with warmup.")
parser.add_argument("--slot_dict_path", type=str, default=None, help="slot dict path")
parser.add_argument("--intent_dict_path", type=str, default=None, help="intent dict path")
parser.add_argument("--train_path", type=str, default=None, help="train data")
parser.add_argument("--dev_path", type=str, default=None, help="dev data")
parser.add_argument("--weight_decay", type=float, default=0.01, help="Weight decay rate for L2 regularizer.")
parser.add_argument("--warmup_proportion", type=float, default=0.1, help="Warmup proportion params for warmup strategy")
parser.add_argument("--max_seq_len", type=int, default=512, help="Number of words of the longest seqence.")
parser.add_argument("--max_grad_norm", type=float, default=1.0, help="max grad norm to clip gradient.")
parser.add_argument("--eval_step", type=int, default=500, help="evaluation step")
parser.add_argument("--log_step", type=int, default=50, help="log step")
parser.add_argument("--batch_size", type=int, default=32, help="Total examples' number in batch for training.")
parser.add_argument("--checkpoint", type=str, default=None, help="Directory to model checkpoint")
parser.add_argument("--use_history", type=bool, default=False, help="Use history in dataset or not")
parser.add_argument("--intent_weight", type=bool, default=True, help="Use intent weight strategy")
parser.add_argument("--seed", type=int, default=1000, help="random seed for initialization")
parser.add_argument('--device', choices=['cpu', 'gpu'], default="gpu", help="Select which device to train model, defaults to gpu.")
args = parser.parse_args()
# yapf: enable
class JointLoss(paddle.nn.Layer):
def __init__(self, intent_weight=None):
super(JointLoss, self).__init__()
self.intent_criterion = paddle.nn.BCEWithLogitsLoss(weight=intent_weight)
self.slot_criterion = paddle.nn.CrossEntropyLoss()
def forward(self, intent_logits, slot_logits, intent_labels, slot_labels):
intent_loss = self.intent_criterion(intent_logits, intent_labels)
slot_loss = self.slot_criterion(slot_logits, slot_labels)
loss = intent_loss + slot_loss
return loss
def train():
# set running envir
paddle.set_device(args.device)
set_seed(args.seed)
if not os.path.exists(args.checkpoint):
os.mkdir(args.checkpoint)
model_name = "ernie-1.0"
# load and process data
intent2id, id2intent = load_dict(args.intent_dict_path)
slot2id, id2slot = load_dict(args.slot_dict_path)
train_ds = load_dataset(read, data_path=args.train_path, lazy=False)
dev_ds = load_dataset(read, data_path=args.dev_path, lazy=False)
# compute intent weight
if args.intent_weight:
intent_weight = [1] * len(intent2id)
for example in train_ds:
for intent in example["intent_labels"]:
intent_weight[intent2id[intent]] += 1
for intent, intent_id in intent2id.items():
neg_pos = (len(train_ds) - intent_weight[intent_id]) / intent_weight[intent_id]
intent_weight[intent_id] = np.log10(neg_pos)
intent_weight = paddle.to_tensor(intent_weight)
tokenizer = ErnieTokenizer.from_pretrained(model_name)
trans_func = partial(convert_example_to_feature, tokenizer=tokenizer, slot2id=slot2id, intent2id=intent2id, use_history=args.use_history, pad_default_tag="O", max_seq_len=args.max_seq_len)
train_ds = train_ds.map(trans_func, lazy=False)
dev_ds = dev_ds.map(trans_func, lazy=False)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id),
Pad(axis=0, pad_val=tokenizer.pad_token_type_id),
Stack(dtype="float32"),
Pad(axis=0, pad_val=slot2id["O"], dtype="int64"),
Pad(axis=0, pad_val=tokenizer.pad_token_id)
):fn(samples)
train_batch_sampler = paddle.io.BatchSampler(train_ds, batch_size=args.batch_size, shuffle=True)
dev_batch_sampler = paddle.io.BatchSampler(dev_ds, batch_size=args.batch_size, shuffle=False)
train_loader = paddle.io.DataLoader(dataset=train_ds, batch_sampler=train_batch_sampler, collate_fn=batchify_fn, return_list=True)
dev_loader = paddle.io.DataLoader(dataset=dev_ds, batch_sampler=dev_batch_sampler, collate_fn=batchify_fn, return_list=True)
# configure model training
ernie = ErnieModel.from_pretrained(model_name)
joint_model = JointModel(ernie, len(slot2id), len(intent2id), use_history=args.use_history, dropout=0.1)
num_training_steps = len(train_loader) * args.num_epoch
lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps, args.warmup_proportion)
decay_params = [p.name for n, p in joint_model.named_parameters() if not any(nd in n for nd in ["bias", "norm"])]
grad_clip = paddle.nn.ClipGradByGlobalNorm(args.max_grad_norm)
optimizer = paddle.optimizer.AdamW(learning_rate=lr_scheduler, parameters=joint_model.parameters(), weight_decay=args.weight_decay, apply_decay_param_fun=lambda x: x in decay_params, grad_clip=grad_clip)
if args.intent_weight:
joint_loss = JointLoss(intent_weight)
else:
joint_loss = JointLoss()
intent_metric = MultiLabelClassificationScore(id2intent)
slot_metric = SeqEntityScore(id2slot)
# start to train joint_model
global_step, intent_best_f1, slot_best_f1 = 0, 0., 0.
joint_model.train()
for epoch in range(1, args.num_epoch+1):
for batch_data in train_loader:
input_ids, token_type_ids, intent_labels, tag_ids, history_ids = batch_data
intent_logits, slot_logits = joint_model(input_ids, token_type_ids=token_type_ids, history_ids=history_ids)
loss = joint_loss(intent_logits, slot_logits, intent_labels, tag_ids)
loss.backward()
lr_scheduler.step()
optimizer.step()
optimizer.clear_grad()
if global_step > 0 and global_step % args.log_step == 0:
print(f"epoch: {epoch} - global_step: {global_step}/{num_training_steps} - loss:{loss.numpy().item():.6f}")
if global_step > 0 and global_step % args.eval_step == 0:
intent_results, slot_results = evaluate(joint_model, dev_loader, intent_metric, slot_metric)
intent_result, slot_result = intent_results["Total"], slot_results["Total"]
joint_model.train()
intent_f1, slot_f1 = intent_result["F1"], slot_result["F1"]
if intent_f1 > intent_best_f1 or slot_f1 > slot_best_f1:
paddle.save(joint_model.state_dict(), f"{args.checkpoint}/best.pdparams")
if intent_f1 > intent_best_f1:
print(f"intent best F1 performence has been updated: {intent_best_f1:.5f} --> {intent_f1:.5f}")
intent_best_f1 = intent_f1
if slot_f1 > slot_best_f1:
print(f"slot best F1 performence has been updated: {slot_best_f1:.5f} --> {slot_f1:.5f}")
slot_best_f1 = slot_f1
print(f'intent evalution result: precision: {intent_result["Precision"]:.5f}, recall: {intent_result["Recall"]:.5f}, F1: {intent_result["F1"]:.5f}, current best {intent_best_f1:.5f}')
print(f'slot evalution result: precision: {slot_result["Precision"]:.5f}, recall: {slot_result["Recall"]:.5f}, F1: {slot_result["F1"]:.5f}, current best {slot_best_f1:.5f}\n')
global_step += 1
if __name__=="__main__":
train()
|
[
"evaluate.evaluate"
] |
[((1163, 1196), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1186, 1196), False, 'import warnings\n'), ((1223, 1255), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['__doc__'], {}), '(__doc__)\n', (1246, 1255), False, 'import argparse\n'), ((3662, 3692), 'paddle.set_device', 'paddle.set_device', (['args.device'], {}), '(args.device)\n', (3679, 3692), False, 'import paddle\n'), ((3697, 3716), 'utils.set_seed', 'set_seed', (['args.seed'], {}), '(args.seed)\n', (3705, 3716), False, 'from utils import set_seed\n'), ((3882, 3914), 'data.load_dict', 'load_dict', (['args.intent_dict_path'], {}), '(args.intent_dict_path)\n', (3891, 3914), False, 'from data import read, load_dict, convert_example_to_feature\n'), ((3938, 3968), 'data.load_dict', 'load_dict', (['args.slot_dict_path'], {}), '(args.slot_dict_path)\n', (3947, 3968), False, 'from data import read, load_dict, convert_example_to_feature\n'), ((3985, 4042), 'paddlenlp.datasets.load_dataset', 'load_dataset', (['read'], {'data_path': 'args.train_path', 'lazy': '(False)'}), '(read, data_path=args.train_path, lazy=False)\n', (3997, 4042), False, 'from paddlenlp.datasets import load_dataset\n'), ((4056, 4111), 'paddlenlp.datasets.load_dataset', 'load_dataset', (['read'], {'data_path': 'args.dev_path', 'lazy': '(False)'}), '(read, data_path=args.dev_path, lazy=False)\n', (4068, 4111), False, 'from paddlenlp.datasets import load_dataset\n'), ((4626, 4668), 'paddlenlp.transformers.ErnieTokenizer.from_pretrained', 'ErnieTokenizer.from_pretrained', (['model_name'], {}), '(model_name)\n', (4656, 4668), False, 'from paddlenlp.transformers import ErnieTokenizer, ErnieModel, LinearDecayWithWarmup\n'), ((4686, 4869), 'functools.partial', 'partial', (['convert_example_to_feature'], {'tokenizer': 'tokenizer', 'slot2id': 'slot2id', 'intent2id': 'intent2id', 'use_history': 'args.use_history', 'pad_default_tag': '"""O"""', 'max_seq_len': 'args.max_seq_len'}), "(convert_example_to_feature, tokenizer=tokenizer, slot2id=slot2id,\n intent2id=intent2id, use_history=args.use_history, pad_default_tag='O',\n max_seq_len=args.max_seq_len)\n", (4693, 4869), False, 'from functools import partial\n'), ((5307, 5381), 'paddle.io.BatchSampler', 'paddle.io.BatchSampler', (['train_ds'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_ds, batch_size=args.batch_size, shuffle=True)\n', (5329, 5381), False, 'import paddle\n'), ((5406, 5479), 'paddle.io.BatchSampler', 'paddle.io.BatchSampler', (['dev_ds'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(dev_ds, batch_size=args.batch_size, shuffle=False)\n', (5428, 5479), False, 'import paddle\n'), ((5499, 5618), 'paddle.io.DataLoader', 'paddle.io.DataLoader', ([], {'dataset': 'train_ds', 'batch_sampler': 'train_batch_sampler', 'collate_fn': 'batchify_fn', 'return_list': '(True)'}), '(dataset=train_ds, batch_sampler=train_batch_sampler,\n collate_fn=batchify_fn, return_list=True)\n', (5519, 5618), False, 'import paddle\n'), ((5632, 5747), 'paddle.io.DataLoader', 'paddle.io.DataLoader', ([], {'dataset': 'dev_ds', 'batch_sampler': 'dev_batch_sampler', 'collate_fn': 'batchify_fn', 'return_list': '(True)'}), '(dataset=dev_ds, batch_sampler=dev_batch_sampler,\n collate_fn=batchify_fn, return_list=True)\n', (5652, 5747), False, 'import paddle\n'), ((5789, 5827), 'paddlenlp.transformers.ErnieModel.from_pretrained', 'ErnieModel.from_pretrained', (['model_name'], {}), '(model_name)\n', (5815, 5827), False, 'from paddlenlp.transformers import ErnieTokenizer, ErnieModel, LinearDecayWithWarmup\n'), ((6017, 6107), 'paddlenlp.transformers.LinearDecayWithWarmup', 'LinearDecayWithWarmup', (['args.learning_rate', 'num_training_steps', 'args.warmup_proportion'], {}), '(args.learning_rate, num_training_steps, args.\n warmup_proportion)\n', (6038, 6107), False, 'from paddlenlp.transformers import ErnieTokenizer, ErnieModel, LinearDecayWithWarmup\n'), ((6237, 6287), 'paddle.nn.ClipGradByGlobalNorm', 'paddle.nn.ClipGradByGlobalNorm', (['args.max_grad_norm'], {}), '(args.max_grad_norm)\n', (6267, 6287), False, 'import paddle\n'), ((6634, 6674), 'metric.MultiLabelClassificationScore', 'MultiLabelClassificationScore', (['id2intent'], {}), '(id2intent)\n', (6663, 6674), False, 'from metric import SeqEntityScore, MultiLabelClassificationScore\n'), ((6693, 6716), 'metric.SeqEntityScore', 'SeqEntityScore', (['id2slot'], {}), '(id2slot)\n', (6707, 6716), False, 'from metric import SeqEntityScore, MultiLabelClassificationScore\n'), ((3230, 3279), 'paddle.nn.BCEWithLogitsLoss', 'paddle.nn.BCEWithLogitsLoss', ([], {'weight': 'intent_weight'}), '(weight=intent_weight)\n', (3257, 3279), False, 'import paddle\n'), ((3310, 3338), 'paddle.nn.CrossEntropyLoss', 'paddle.nn.CrossEntropyLoss', ([], {}), '()\n', (3336, 3338), False, 'import paddle\n'), ((3729, 3760), 'os.path.exists', 'os.path.exists', (['args.checkpoint'], {}), '(args.checkpoint)\n', (3743, 3760), False, 'import os\n'), ((3770, 3795), 'os.mkdir', 'os.mkdir', (['args.checkpoint'], {}), '(args.checkpoint)\n', (3778, 3795), False, 'import os\n'), ((4577, 4608), 'paddle.to_tensor', 'paddle.to_tensor', (['intent_weight'], {}), '(intent_weight)\n', (4593, 4608), False, 'import paddle\n'), ((4535, 4552), 'numpy.log10', 'np.log10', (['neg_pos'], {}), '(neg_pos)\n', (4543, 4552), True, 'import numpy as np\n'), ((5016, 5059), 'paddlenlp.data.Pad', 'Pad', ([], {'axis': '(0)', 'pad_val': 'tokenizer.pad_token_id'}), '(axis=0, pad_val=tokenizer.pad_token_id)\n', (5019, 5059), False, 'from paddlenlp.data import Stack, Pad, Tuple\n'), ((5070, 5118), 'paddlenlp.data.Pad', 'Pad', ([], {'axis': '(0)', 'pad_val': 'tokenizer.pad_token_type_id'}), '(axis=0, pad_val=tokenizer.pad_token_type_id)\n', (5073, 5118), False, 'from paddlenlp.data import Stack, Pad, Tuple\n'), ((5128, 5150), 'paddlenlp.data.Stack', 'Stack', ([], {'dtype': '"""float32"""'}), "(dtype='float32')\n", (5133, 5150), False, 'from paddlenlp.data import Stack, Pad, Tuple\n'), ((5160, 5208), 'paddlenlp.data.Pad', 'Pad', ([], {'axis': '(0)', 'pad_val': "slot2id['O']", 'dtype': '"""int64"""'}), "(axis=0, pad_val=slot2id['O'], dtype='int64')\n", (5163, 5208), False, 'from paddlenlp.data import Stack, Pad, Tuple\n'), ((5218, 5261), 'paddlenlp.data.Pad', 'Pad', ([], {'axis': '(0)', 'pad_val': 'tokenizer.pad_token_id'}), '(axis=0, pad_val=tokenizer.pad_token_id)\n', (5221, 5261), False, 'from paddlenlp.data import Stack, Pad, Tuple\n'), ((7645, 7706), 'evaluate.evaluate', 'evaluate', (['joint_model', 'dev_loader', 'intent_metric', 'slot_metric'], {}), '(joint_model, dev_loader, intent_metric, slot_metric)\n', (7653, 7706), False, 'from evaluate import evaluate\n')]
|
import argparse
import torch
from models import ConvNet_sem_seg
import load_data
import evaluate
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def add_args(parser):
# general options
parser.add_argument("--epochs", type=int, default=20)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--learning_rate", type=float, default=1e-3)
parser.add_argument("--bandwidth", type=int, default=30)
parser.add_argument("--sem_seg_threshold", type=int, default=150)
parser.add_argument("--train_data", default="non_rot", choices=["rot", "non_rot"])
parser.add_argument("--test_data", default="rot", choices=["rot", "non_rot"])
parser.add_argument("--len_train_data", type=int, default=60000)
parser.add_argument("--len_test_data", type=int, default=10000)
# model specific options
parser.add_argument("--feature_numbers", type=int, default=[20, 40, 20], nargs="*")
parser.add_argument("--kernel_sizes", type=int, default=[5, 5, 5], nargs="*")
parser.add_argument("--use_skips", action="store_true")
parser.add_argument("--strides", type=int, default=[3, 3, 3], nargs="*")
return parser
def main():
parser = add_args(argparse.ArgumentParser())
args = parser.parse_args()
segmenter = ConvNet_sem_seg(
dim_in=2 * args.bandwidth,
f_in=1,
fs=args.feature_numbers,
f_out=11,
k_sizes=args.kernel_sizes,
strides=args.strides,
use_skips=args.use_skips,
)
segmenter.to(DEVICE)
optimizer = torch.optim.Adam(segmenter.parameters(), lr=args.learning_rate)
criterion = torch.nn.CrossEntropyLoss()
criterion.to(DEVICE)
train_loader, test_loader = load_data.get_dataloader(
batch_size=args.batch_size,
bandwidth=args.bandwidth,
train_set_rotated=(args.train_data == "rot"),
test_set_rotated=(args.test_data == "rot"),
len_train_data=args.len_train_data,
len_test_data=args.len_test_data,
)
n_batches = args.len_train_data // args.batch_size
for epoch in range(args.epochs):
print(f"Training Epoch {epoch + 1}/{args.epochs}...")
for batch_idx, (imgs, masks) in enumerate(train_loader):
imgs = imgs.to(DEVICE)
masks = masks.to(DEVICE, dtype=torch.long)
optimizer.zero_grad()
outputs = segmenter(imgs)
loss = criterion(outputs, masks)
loss.backward()
optimizer.step()
if n_batches > 20 and batch_idx % (n_batches // 20) == 0:
print(f"Batch {batch_idx + 1}/{n_batches}, train loss: {loss.item():.3f}")
print(f"Epoch {epoch+1}/{args.epochs} finished, train loss: {loss.item():.3f}")
print("Evaluating on test data...")
metrics = evaluate.get_metrics(segmenter, test_loader)
for metric, value in metrics.items():
print(metric + ":")
print(value)
if __name__ == "__main__":
main()
print("Done.")
|
[
"evaluate.get_metrics"
] |
[((1311, 1480), 'models.ConvNet_sem_seg', 'ConvNet_sem_seg', ([], {'dim_in': '(2 * args.bandwidth)', 'f_in': '(1)', 'fs': 'args.feature_numbers', 'f_out': '(11)', 'k_sizes': 'args.kernel_sizes', 'strides': 'args.strides', 'use_skips': 'args.use_skips'}), '(dim_in=2 * args.bandwidth, f_in=1, fs=args.feature_numbers,\n f_out=11, k_sizes=args.kernel_sizes, strides=args.strides, use_skips=\n args.use_skips)\n', (1326, 1480), False, 'from models import ConvNet_sem_seg\n'), ((1658, 1685), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (1683, 1685), False, 'import torch\n'), ((1744, 1992), 'load_data.get_dataloader', 'load_data.get_dataloader', ([], {'batch_size': 'args.batch_size', 'bandwidth': 'args.bandwidth', 'train_set_rotated': "(args.train_data == 'rot')", 'test_set_rotated': "(args.test_data == 'rot')", 'len_train_data': 'args.len_train_data', 'len_test_data': 'args.len_test_data'}), "(batch_size=args.batch_size, bandwidth=args.\n bandwidth, train_set_rotated=args.train_data == 'rot', test_set_rotated\n =args.test_data == 'rot', len_train_data=args.len_train_data,\n len_test_data=args.len_test_data)\n", (1768, 1992), False, 'import load_data\n'), ((135, 160), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (158, 160), False, 'import torch\n'), ((1236, 1261), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1259, 1261), False, 'import argparse\n'), ((2836, 2880), 'evaluate.get_metrics', 'evaluate.get_metrics', (['segmenter', 'test_loader'], {}), '(segmenter, test_loader)\n', (2856, 2880), False, 'import evaluate\n')]
|
"""Train the model"""
import argparse
import logging
import os
import os.path
import pickle
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from tqdm import tqdm
import utils
import model.net as net
import model.data_loader as data_loader
from evaluate import evaluate
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='all', help="The Model Name")
parser.add_argument('--md', default='experiments', help="The Model Directory")
parser.add_argument('--data_dir', default='data', help="Directory containing the dataset")
parser.add_argument('--restore_file', default=None,
help="Optional, name of the file in --model_dir containing weights to reload before \
training")
def train(model, optimizer, loss_fn, dataloader, metrics, params):
"""Train the model on `num_steps` batches
Args:
model: (torch.nn.Module) the neural network
optimizer: (torch.optim) optimizer for parameters of model
loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch
dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches training data
metrics: (dict) a dictionary of functions that compute a metric using the output and labels of each batch
params: (Params) hyperparameters
num_steps: (int) number of batches to train on, each of size params.batch_size
"""
# set model to training mode
model.train()
# summary for current training loop and a running average object for loss
summ = []
loss_avg = utils.RunningAverage()
# Use tqdm for progress bar
with tqdm(total=len(dataloader), ascii=True) as t:
for i, (train_batch, labels_batch) in enumerate(dataloader):
# move to GPU if available
if params.cuda:
train_batch, labels_batch = train_batch.float().cuda(), labels_batch.float().cuda()
# convert to torch Variables
train_batch, labels_batch = Variable(train_batch), Variable(labels_batch)
# compute model output and loss
output_batch = model(train_batch)
loss = loss_fn(output_batch, labels_batch)
# clear previous gradients, compute gradients of all variables wrt loss
optimizer.zero_grad()
loss.backward()
# performs updates using calculated gradients
optimizer.step()
# Evaluate summaries only once in a while
if i % params.save_summary_steps == 0:
# extract data from torch Variable, move to cpu, convert to numpy arrays
output_batch = output_batch.data.cpu().numpy()
labels_batch = labels_batch.data.cpu().numpy()
# compute all metrics on this batch
summary_batch = {metric:metrics[metric](output_batch, labels_batch)
for metric in metrics}
summary_batch['loss'] = loss.item() # loss.data[0]
summ.append(summary_batch)
# update the average loss
loss_avg.update(loss.item())
t.set_postfix(loss='{:05.3f}'.format(loss_avg()))
t.update()
# compute mean of all metrics in summary
metrics_mean = {metric:np.mean([x[metric] for x in summ]) for metric in summ[0]}
metrics_string = " ; ".join("{}: {:05.3f}".format(k, v) for k, v in metrics_mean.items())
logging.info("- Train metrics: " + metrics_string)
def train_and_evaluate(model, optimizer, scheduler, loss_fn, metrics, params, model_dir,
restore_file=None):
"""Train the model and evaluate every epoch.
Args:
model: (torch.nn.Module) the neural network
train_dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches training data
val_dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches validation data
optimizer: (torch.optim) optimizer for parameters of model
loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch
metrics: (dict) a dictionary of functions that compute a metric using the output and labels of each batch
params: (Params) hyperparameters
model_dir: (string) directory containing config, weights and log
restore_file: (string) optional- name of file to restore from (without its extension .pth.tar)
"""
# reload weights from restore_file if specified
if restore_file is not None:
restore_path = os.path.join(args.model_dir, args.restore_file + '.pth.tar')
logging.info("Restoring parameters from {}".format(restore_path))
utils.load_checkpoint(restore_path, model, optimizer)
best_val_loss = 1e10
best_test_loss = 1e10
for epoch in range(params.num_epochs):
logging.info("Generate the train and test datasets...")
# fetch dataloaders for every epoch
dataloaders = data_loader.fetch_dataloader(['train', 'val', 'test'], args.data_dir, params)
logging.info("- done.")
# Run one epoch
logging.info("Epoch {}/{}".format(epoch + 1, params.num_epochs))
# compute number of batches in one epoch (one full pass over the training set)
train(model, optimizer, loss_fn, dataloaders['train'], metrics, params)
# Evaluate for one epoch on validation set
val_metrics = evaluate(model, loss_fn, dataloaders['val'], metrics, params, 'Val')
val_loss = val_metrics['rmse']
is_best_val = val_loss<=best_val_loss
# Evaluate for one epoch on test set
test_metrics = evaluate(model, loss_fn, dataloaders['test'], metrics, params, 'Test')
test_loss = test_metrics['rmse']
is_best_test = test_loss<=best_test_loss
# Save weights
utils.save_checkpoint({'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optim_dict' : optimizer.state_dict()},
is_best=is_best_val,
checkpoint=model_dir)
# If best_eval, best_save_path
if is_best_val:
logging.info("- Found new best val result")
best_val_loss = val_loss
# Save metrics in a json file in the model directory
best_json_path = os.path.join(model_dir, "metrics_val_best_weights.json")
utils.save_dict_to_json(val_metrics, best_json_path)
if is_best_test:
logging.info("- Found new best test result")
best_test_loss = test_loss
# Save metrics in a json file in the model directory
best_json_path = os.path.join(model_dir, "metrics_test_best_weights.json")
utils.save_dict_to_json(test_metrics, best_json_path)
def run(args=None):
# Load the parameters from json file
args.model_dir = args.md + '/' + args.model
json_path = os.path.join(args.model_dir, 'params.json')
assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
params = utils.Params(json_path)
# use GPU if available
params.cuda = torch.cuda.is_available()
# Set the random seed for reproducible experiments
torch.manual_seed(230)
if params.cuda: torch.cuda.manual_seed(230)
# Set the logger
print(os.path.join(args.model_dir, '_train.log'))
utils.set_logger(os.path.join(args.model_dir, '_train.log'))
# Define the model and optimizer
model = getattr(net, args.model)(params).cuda() if params.cuda else getattr(net, args.model)(params)
optimizer = optim.Adam(model.parameters(), lr=params.learning_rate, weight_decay=10e-5)
# optimizer = optim.RSMprop(model.parameters(), lr=params.learning_rate)
# scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer)
# scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
scheduler = None
# fetch loss function and metrics
loss_fn = nn.MSELoss()
metrics = net.metrics
# Train the model
logging.info("Starting training for {} epoch(s)".format(params.num_epochs))
train_and_evaluate(model, optimizer, scheduler, loss_fn, metrics, params, args.model_dir,
args.restore_file)
if __name__ == '__main__':
args = parser.parse_args()
if args.model == 'all':
MODELS = [f for f in os.listdir(args.md) if not os.path.isfile(os.path.join(args.md, f))]
for m in MODELS:
args.model = m
run(args)
else:
run(args)
|
[
"evaluate.evaluate"
] |
[((370, 395), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (393, 395), False, 'import argparse\n'), ((1680, 1702), 'utils.RunningAverage', 'utils.RunningAverage', ([], {}), '()\n', (1700, 1702), False, 'import utils\n'), ((3567, 3617), 'logging.info', 'logging.info', (["('- Train metrics: ' + metrics_string)"], {}), "('- Train metrics: ' + metrics_string)\n", (3579, 3617), False, 'import logging\n'), ((7100, 7143), 'os.path.join', 'os.path.join', (['args.model_dir', '"""params.json"""'], {}), "(args.model_dir, 'params.json')\n", (7112, 7143), False, 'import os\n'), ((7155, 7180), 'os.path.isfile', 'os.path.isfile', (['json_path'], {}), '(json_path)\n', (7169, 7180), False, 'import os\n'), ((7254, 7277), 'utils.Params', 'utils.Params', (['json_path'], {}), '(json_path)\n', (7266, 7277), False, 'import utils\n'), ((7324, 7349), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7347, 7349), False, 'import torch\n'), ((7410, 7432), 'torch.manual_seed', 'torch.manual_seed', (['(230)'], {}), '(230)\n', (7427, 7432), False, 'import torch\n'), ((8160, 8172), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (8170, 8172), True, 'import torch.nn as nn\n'), ((3411, 3445), 'numpy.mean', 'np.mean', (['[x[metric] for x in summ]'], {}), '([x[metric] for x in summ])\n', (3418, 3445), True, 'import numpy as np\n'), ((4688, 4748), 'os.path.join', 'os.path.join', (['args.model_dir', "(args.restore_file + '.pth.tar')"], {}), "(args.model_dir, args.restore_file + '.pth.tar')\n", (4700, 4748), False, 'import os\n'), ((4831, 4884), 'utils.load_checkpoint', 'utils.load_checkpoint', (['restore_path', 'model', 'optimizer'], {}), '(restore_path, model, optimizer)\n', (4852, 4884), False, 'import utils\n'), ((4989, 5044), 'logging.info', 'logging.info', (['"""Generate the train and test datasets..."""'], {}), "('Generate the train and test datasets...')\n", (5001, 5044), False, 'import logging\n'), ((5111, 5188), 'model.data_loader.fetch_dataloader', 'data_loader.fetch_dataloader', (["['train', 'val', 'test']", 'args.data_dir', 'params'], {}), "(['train', 'val', 'test'], args.data_dir, params)\n", (5139, 5188), True, 'import model.data_loader as data_loader\n'), ((5197, 5220), 'logging.info', 'logging.info', (['"""- done."""'], {}), "('- done.')\n", (5209, 5220), False, 'import logging\n'), ((5561, 5629), 'evaluate.evaluate', 'evaluate', (['model', 'loss_fn', "dataloaders['val']", 'metrics', 'params', '"""Val"""'], {}), "(model, loss_fn, dataloaders['val'], metrics, params, 'Val')\n", (5569, 5629), False, 'from evaluate import evaluate\n'), ((5784, 5854), 'evaluate.evaluate', 'evaluate', (['model', 'loss_fn', "dataloaders['test']", 'metrics', 'params', '"""Test"""'], {}), "(model, loss_fn, dataloaders['test'], metrics, params, 'Test')\n", (5792, 5854), False, 'from evaluate import evaluate\n'), ((7453, 7480), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(230)'], {}), '(230)\n', (7475, 7480), False, 'import torch\n'), ((7513, 7555), 'os.path.join', 'os.path.join', (['args.model_dir', '"""_train.log"""'], {}), "(args.model_dir, '_train.log')\n", (7525, 7555), False, 'import os\n'), ((7578, 7620), 'os.path.join', 'os.path.join', (['args.model_dir', '"""_train.log"""'], {}), "(args.model_dir, '_train.log')\n", (7590, 7620), False, 'import os\n'), ((6337, 6380), 'logging.info', 'logging.info', (['"""- Found new best val result"""'], {}), "('- Found new best val result')\n", (6349, 6380), False, 'import logging\n'), ((6512, 6568), 'os.path.join', 'os.path.join', (['model_dir', '"""metrics_val_best_weights.json"""'], {}), "(model_dir, 'metrics_val_best_weights.json')\n", (6524, 6568), False, 'import os\n'), ((6581, 6633), 'utils.save_dict_to_json', 'utils.save_dict_to_json', (['val_metrics', 'best_json_path'], {}), '(val_metrics, best_json_path)\n', (6604, 6633), False, 'import utils\n'), ((6672, 6716), 'logging.info', 'logging.info', (['"""- Found new best test result"""'], {}), "('- Found new best test result')\n", (6684, 6716), False, 'import logging\n'), ((6850, 6907), 'os.path.join', 'os.path.join', (['model_dir', '"""metrics_test_best_weights.json"""'], {}), "(model_dir, 'metrics_test_best_weights.json')\n", (6862, 6907), False, 'import os\n'), ((6920, 6973), 'utils.save_dict_to_json', 'utils.save_dict_to_json', (['test_metrics', 'best_json_path'], {}), '(test_metrics, best_json_path)\n', (6943, 6973), False, 'import utils\n'), ((2121, 2142), 'torch.autograd.Variable', 'Variable', (['train_batch'], {}), '(train_batch)\n', (2129, 2142), False, 'from torch.autograd import Variable\n'), ((2144, 2166), 'torch.autograd.Variable', 'Variable', (['labels_batch'], {}), '(labels_batch)\n', (2152, 2166), False, 'from torch.autograd import Variable\n'), ((8555, 8574), 'os.listdir', 'os.listdir', (['args.md'], {}), '(args.md)\n', (8565, 8574), False, 'import os\n'), ((8597, 8621), 'os.path.join', 'os.path.join', (['args.md', 'f'], {}), '(args.md, f)\n', (8609, 8621), False, 'import os\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
last mod 7/2/18
what are last two features in MonoGRnet output?
usage for new detector:
first disable metrics check
min_sensor_prob to <<0
use simple scoreToProb
use the plots to figure out a good scoreToProb function
then you can run metrics check
current avg precisions:
.5 iou -- .855, .783, .774 --> .863, .834, .837
.7 iou -- .538, .597, .619
n ground truths: 2608 easy, 6890 moderate, 8307 hard?
monogrnet at .3 IoU:: .815, .72, .653
voxeljones at .3: .91, .79, .76
"""
import numpy as np
min_sensor_prob_to_report = 0.#.05
#dataformat = '/home/m2/Data/kitti/estimates/detectionsBT630/{:02d}f{:04d}.npy'
dataformat = '/home/m2/Data/kitti/estimates/detectionsMGR/{:02d}f{:04d}.npy'
#def scoreToProb(score): return score ### default before you've checked performance
def scoreToProb(score): # monogrnet
out = np.where(score < .7, score*.2/.7+.05, score*.75/.3 + 1-.75/.3)
return np.maximum(np.minimum(out, .995), .005)
def getMsmts(sceneidx, fileidx):
data = np.load(dataformat.format(sceneidx, fileidx))
if data.shape == (0,):
data = np.zeros((0,6))
data[data[:,2]>np.pi, 2] -= 2*np.pi
data[:,5] = scoreToProb(data[:,5])
data = data[data[:,5] > min_sensor_prob_to_report]
data = data[(data[:,0] < 57) & (data[:,0] > -3) & (abs(data[:,1])<48)]
# MonoGRnet returns some very unrealistic values for length and width
# easy to crop
data[:,3] = np.minimum(np.maximum(data[:,3], 1.45), 2.45)
data[:,4] = np.minimum(np.maximum(data[:,4], .65), 1.2)
return data
if __name__ == '__main__':
# analyze score distribution for true and false detections
from scipy.optimize import linear_sum_assignment
import matplotlib.pyplot as plt
from sklearn.neighbors import KernelDensity
from sklearn.linear_model import LinearRegression
from evaluate import MetricAvgPrec, soMetricIoU
from analyzeGT import readGroundTruthFileTracking
from trackinginfo import sceneranges
gt_files = '/home/m2/Data/kitti/tracking_gt/{:04d}.txt'
scene_idxs = list(range(10))
scoresmatch = []
scorescrop = []
scoresmiss = []
nmissed = 0
nmissedcrop = 0
metric = MetricAvgPrec()
for scene_idx in scene_idxs:
startfileidx, endfileidx = sceneranges[scene_idx]
with open(gt_files.format(scene_idx), 'r') as fd: gtfilestr = fd.read()
gt_all, gtdontcares = readGroundTruthFileTracking(gtfilestr, ('Car', 'Van'))
selfposT = None # isn't actually used
for fileidx in range(startfileidx, endfileidx):
gt = gt_all[fileidx]
gtscored = np.array([gtobj['scored'] for gtobj in gt])
gtboxes = np.array([gtobj['box'] for gtobj in gt])
gtdiff = np.array([gtobj['difficulty'] for gtobj in gt])
msmts = getMsmts(scene_idx, fileidx)
ngt = gtscored.shape[0]
nmsmts = msmts.shape[0]
matches = np.zeros((ngt, nmsmts))
for gtidx, msmtidx in np.ndindex(ngt, nmsmts):
gtbox = gtboxes[gtidx]
msmt = msmts[msmtidx]
##closeness = np.hypot(*(gtbox[:2]-msmt[:2])) * .4
##closeness += ((gtbox[2]-msmt[2]+np.pi)%(2*np.pi)-np.pi) * 1.
##closeness += np.hypot(*(gtbox[3:]-msmt[3:5])) * .3
##closeness -= 1
##closeness = np.hypot(*(gtbox[:2]-msmt[:2])) - 1.5
closeness = soMetricIoU(gtbox, msmt, cutoff=.1)
closeness2 = np.hypot(gtbox[0]-msmt[0],gtbox[1]-msmt[1])-3.
closeness = np.minimum(closeness, closeness2)
matches[gtidx, msmtidx] = min(closeness, 0)
matchesnonmiss = matches < 0
matches[:] = -msmt[5]
matches[matchesnonmiss==False] = 0
rowpairs, colpairs = linear_sum_assignment(matches)
msmtsmissed = np.ones(nmsmts, dtype=bool)
for rowidx, colidx in zip(rowpairs, colpairs):
nonmiss = matchesnonmiss[rowidx, colidx]
noncrop = gtscored[rowidx]
if nonmiss:
msmtsmissed[colidx] = False
if noncrop:
scoresmatch.append(msmts[colidx,5])
else:
scorescrop.append(msmts[colidx,5])
else:
nmissed += 1
if noncrop:
nmissedcrop += 1
for msmtidx in range(nmsmts):
if msmtsmissed[msmtidx]:
scoresmiss.append(msmts[msmtidx,5])
metric.add(gtboxes, gtscored, gtdiff, msmts[:,:5], msmts[:,5])
scoresmatch.sort()
scorescrop.sort()
scoresmiss.sort()
nmatches = len(scoresmatch)
nmisses = len(scoresmiss)
relmatches = float(nmatches) / (nmatches + nmisses)
allscores = scoresmatch + scorescrop + scoresmiss
minscore = np.percentile(allscores, .5)
maxscore = np.percentile(allscores, 99.5)
scorearray = np.linspace(minscore, maxscore, 100)
kd = KernelDensity(bandwidth = (maxscore-minscore)/50, kernel='gaussian')
scoreT = kd.fit(np.array(scoresmatch)[:,None]).score_samples(
scorearray[:,None])
scoreT = np.exp(scoreT) * relmatches
scoreF = kd.fit(np.array(scoresmiss)[:,None]).score_samples(
scorearray[:,None])
scoreF = np.exp(scoreF) * (1-relmatches)
ratio = scoreT / np.maximum(scoreT + scoreF, 1e-8)
# fit a quadratic model to the ratio of true to false
X = np.column_stack((scorearray, scorearray**2))
lm = LinearRegression(fit_intercept=True, normalize=True).fit(X, ratio)
coefs = (lm.intercept_, lm.coef_[0], lm.coef_[1])
print(coefs)
ests = coefs[0] + coefs[1]*scorearray + coefs[2]*scorearray**2
plt.plot(scorearray, ratio, 'b', scorearray, ests, 'g--',
scorearray, scorearray, 'y--')
avgprec = metric.calc()
|
[
"evaluate.MetricAvgPrec",
"evaluate.soMetricIoU"
] |
[((903, 992), 'numpy.where', 'np.where', (['(score < 0.7)', '(score * 0.2 / 0.7 + 0.05)', '(score * 0.75 / 0.3 + 1 - 0.75 / 0.3)'], {}), '(score < 0.7, score * 0.2 / 0.7 + 0.05, score * 0.75 / 0.3 + 1 - \n 0.75 / 0.3)\n', (911, 992), True, 'import numpy as np\n'), ((2254, 2269), 'evaluate.MetricAvgPrec', 'MetricAvgPrec', ([], {}), '()\n', (2267, 2269), False, 'from evaluate import MetricAvgPrec, soMetricIoU\n'), ((5051, 5080), 'numpy.percentile', 'np.percentile', (['allscores', '(0.5)'], {}), '(allscores, 0.5)\n', (5064, 5080), True, 'import numpy as np\n'), ((5095, 5125), 'numpy.percentile', 'np.percentile', (['allscores', '(99.5)'], {}), '(allscores, 99.5)\n', (5108, 5125), True, 'import numpy as np\n'), ((5143, 5179), 'numpy.linspace', 'np.linspace', (['minscore', 'maxscore', '(100)'], {}), '(minscore, maxscore, 100)\n', (5154, 5179), True, 'import numpy as np\n'), ((5189, 5259), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'bandwidth': '((maxscore - minscore) / 50)', 'kernel': '"""gaussian"""'}), "(bandwidth=(maxscore - minscore) / 50, kernel='gaussian')\n", (5202, 5259), False, 'from sklearn.neighbors import KernelDensity\n'), ((5684, 5730), 'numpy.column_stack', 'np.column_stack', (['(scorearray, scorearray ** 2)'], {}), '((scorearray, scorearray ** 2))\n', (5699, 5730), True, 'import numpy as np\n'), ((5947, 6039), 'matplotlib.pyplot.plot', 'plt.plot', (['scorearray', 'ratio', '"""b"""', 'scorearray', 'ests', '"""g--"""', 'scorearray', 'scorearray', '"""y--"""'], {}), "(scorearray, ratio, 'b', scorearray, ests, 'g--', scorearray,\n scorearray, 'y--')\n", (5955, 6039), True, 'import matplotlib.pyplot as plt\n'), ((988, 1010), 'numpy.minimum', 'np.minimum', (['out', '(0.995)'], {}), '(out, 0.995)\n', (998, 1010), True, 'import numpy as np\n'), ((1150, 1166), 'numpy.zeros', 'np.zeros', (['(0, 6)'], {}), '((0, 6))\n', (1158, 1166), True, 'import numpy as np\n'), ((1495, 1523), 'numpy.maximum', 'np.maximum', (['data[:, 3]', '(1.45)'], {}), '(data[:, 3], 1.45)\n', (1505, 1523), True, 'import numpy as np\n'), ((1557, 1585), 'numpy.maximum', 'np.maximum', (['data[:, 4]', '(0.65)'], {}), '(data[:, 4], 0.65)\n', (1567, 1585), True, 'import numpy as np\n'), ((2485, 2539), 'analyzeGT.readGroundTruthFileTracking', 'readGroundTruthFileTracking', (['gtfilestr', "('Car', 'Van')"], {}), "(gtfilestr, ('Car', 'Van'))\n", (2512, 2539), False, 'from analyzeGT import readGroundTruthFileTracking\n'), ((5381, 5395), 'numpy.exp', 'np.exp', (['scoreT'], {}), '(scoreT)\n', (5387, 5395), True, 'import numpy as np\n'), ((5531, 5545), 'numpy.exp', 'np.exp', (['scoreF'], {}), '(scoreF)\n', (5537, 5545), True, 'import numpy as np\n'), ((5584, 5618), 'numpy.maximum', 'np.maximum', (['(scoreT + scoreF)', '(1e-08)'], {}), '(scoreT + scoreF, 1e-08)\n', (5594, 5618), True, 'import numpy as np\n'), ((2707, 2750), 'numpy.array', 'np.array', (["[gtobj['scored'] for gtobj in gt]"], {}), "([gtobj['scored'] for gtobj in gt])\n", (2715, 2750), True, 'import numpy as np\n'), ((2773, 2813), 'numpy.array', 'np.array', (["[gtobj['box'] for gtobj in gt]"], {}), "([gtobj['box'] for gtobj in gt])\n", (2781, 2813), True, 'import numpy as np\n'), ((2835, 2882), 'numpy.array', 'np.array', (["[gtobj['difficulty'] for gtobj in gt]"], {}), "([gtobj['difficulty'] for gtobj in gt])\n", (2843, 2882), True, 'import numpy as np\n'), ((3039, 3062), 'numpy.zeros', 'np.zeros', (['(ngt, nmsmts)'], {}), '((ngt, nmsmts))\n', (3047, 3062), True, 'import numpy as np\n'), ((3097, 3120), 'numpy.ndindex', 'np.ndindex', (['ngt', 'nmsmts'], {}), '(ngt, nmsmts)\n', (3107, 3120), True, 'import numpy as np\n'), ((3932, 3962), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['matches'], {}), '(matches)\n', (3953, 3962), False, 'from scipy.optimize import linear_sum_assignment\n'), ((3989, 4016), 'numpy.ones', 'np.ones', (['nmsmts'], {'dtype': 'bool'}), '(nmsmts, dtype=bool)\n', (3996, 4016), True, 'import numpy as np\n'), ((5738, 5790), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(True)', 'normalize': '(True)'}), '(fit_intercept=True, normalize=True)\n', (5754, 5790), False, 'from sklearn.linear_model import LinearRegression\n'), ((3543, 3579), 'evaluate.soMetricIoU', 'soMetricIoU', (['gtbox', 'msmt'], {'cutoff': '(0.1)'}), '(gtbox, msmt, cutoff=0.1)\n', (3554, 3579), False, 'from evaluate import MetricAvgPrec, soMetricIoU\n'), ((3683, 3716), 'numpy.minimum', 'np.minimum', (['closeness', 'closeness2'], {}), '(closeness, closeness2)\n', (3693, 3716), True, 'import numpy as np\n'), ((3608, 3656), 'numpy.hypot', 'np.hypot', (['(gtbox[0] - msmt[0])', '(gtbox[1] - msmt[1])'], {}), '(gtbox[0] - msmt[0], gtbox[1] - msmt[1])\n', (3616, 3656), True, 'import numpy as np\n'), ((5278, 5299), 'numpy.array', 'np.array', (['scoresmatch'], {}), '(scoresmatch)\n', (5286, 5299), True, 'import numpy as np\n'), ((5429, 5449), 'numpy.array', 'np.array', (['scoresmiss'], {}), '(scoresmiss)\n', (5437, 5449), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import argparse
import os
import json
from models.StyleSpeech import StyleSpeech
from dataloader import prepare_dataloader
from optimizer import ScheduledOptim
from evaluate import evaluate
import utils
def load_checkpoint(checkpoint_path, model, optimizer):
assert os.path.isfile(checkpoint_path)
print("Starting model from checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path)
if 'model' in checkpoint_dict:
model.load_state_dict(checkpoint_dict['model'])
print('Model is loaded!')
if 'optimizer' in checkpoint_dict:
optimizer.load_state_dict(checkpoint_dict['optimizer'])
print('Optimizer is loaded!')
current_step = checkpoint_dict['step'] + 1
return model, optimizer, current_step
def main(args, c):
# Define model
model = StyleSpeech(c).cuda()
print("StyleSpeech Has Been Defined")
num_param = utils.get_param_num(model)
print('Number of StyleSpeech Parameters:', num_param)
with open(os.path.join(args.save_path, "model.txt"), "w") as f_log:
f_log.write(str(model))
# Optimizer
optimizer = torch.optim.Adam(model.parameters(), betas=c.betas, eps=c.eps)
# Loss
Loss = model.get_criterion()
print("Optimizer and Loss Function Defined.")
# Get dataset
data_loader = prepare_dataloader(args.data_path, "train.txt", shuffle=True, batch_size=c.batch_size)
print("Data Loader is Prepared.")
# Load checkpoint if exists
if args.checkpoint_path is not None:
assert os.path.exists(args.checkpoint_path)
model, optimizer, current_step= load_checkpoint(args.checkpoint_path, model, optimizer)
print("\n---Model Restored at Step {}---\n".format(current_step))
else:
print("\n---Start New Training---\n")
current_step = 0
checkpoint_path = os.path.join(args.save_path, 'ckpt')
os.makedirs(checkpoint_path, exist_ok=True)
# Scheduled optimizer
scheduled_optim = ScheduledOptim(optimizer, c.decoder_hidden, c.n_warm_up_step, current_step)
# Init logger
log_path = os.path.join(args.save_path, 'log')
logger = SummaryWriter(os.path.join(log_path, 'board'))
with open(os.path.join(log_path, "log.txt"), "a") as f_log:
f_log.write("Dataset :{}\n Number of Parameters: {}\n".format(c.dataset, num_param))
# Init synthesis directory
synth_path = os.path.join(args.save_path, 'synth')
os.makedirs(synth_path, exist_ok=True)
# Training
model.train()
while current_step < args.max_iter:
# Get Training Loader
for idx, batch in enumerate(data_loader):
if current_step == args.max_iter:
break
# Get Data
sid, text, mel_target, D, log_D, f0, energy, \
src_len, mel_len, max_src_len, max_mel_len = model.parse_batch(batch)
# Forward
scheduled_optim.zero_grad()
mel_output, src_output, style_vector, log_duration_output, f0_output, energy_output, src_mask, mel_mask, _ = model(
text, src_len, mel_target, mel_len, D, f0, energy, max_src_len, max_mel_len)
mel_loss, d_loss, f_loss, e_loss = Loss(mel_output, mel_target,
log_duration_output, log_D, f0_output, f0, energy_output, energy, src_len, mel_len)
# Total loss
total_loss = mel_loss + d_loss + f_loss + e_loss
# Backward
total_loss.backward()
# Clipping gradients to avoid gradient explosion
nn.utils.clip_grad_norm_(model.parameters(), c.grad_clip_thresh)
# Update weights
scheduled_optim.step_and_update_lr()
# Print log
if current_step % args.log_step == 0 and current_step != 0:
t_l = total_loss.item()
m_l = mel_loss.item()
d_l = d_loss.item()
f_l = f_loss.item()
e_l = e_loss.item()
str1 = "Step [{}/{}]:".format(current_step, args.max_iter)
str2 = "Total Loss: {:.4f}\nMel Loss: {:.4f},\n" \
"Duration Loss: {:.4f}, F0 Loss: {:.4f}, Energy Loss: {:.4f} ;" \
.format(t_l, m_l, d_l, f_l, e_l)
print(str1 + "\n" + str2 +"\n")
with open(os.path.join(log_path, "log.txt"), "a") as f_log:
f_log.write(str1 + "\n" + str2 +"\n")
logger.add_scalar('Train/total_loss', t_l, current_step)
logger.add_scalar('Train/mel_loss', m_l, current_step)
logger.add_scalar('Train/duration_loss', d_l, current_step)
logger.add_scalar('Train/f0_loss', f_l, current_step)
logger.add_scalar('Train/energy_loss', e_l, current_step)
# Save Checkpoint
if current_step % args.save_step == 0 and current_step != 0:
torch.save({'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'step': current_step},
os.path.join(checkpoint_path, 'checkpoint_{}.pth.tar'.format(current_step)))
print("*** Save Checkpoint ***")
print("Save model at step {}...\n".format(current_step))
if current_step % args.synth_step == 0 and current_step != 0:
length = mel_len[0].item()
mel_target = mel_target[0, :length].detach().cpu().transpose(0, 1)
mel = mel_output[0, :length].detach().cpu().transpose(0, 1)
# plotting
utils.plot_data([mel.numpy(), mel_target.numpy()],
['Synthesized Spectrogram', 'Ground-Truth Spectrogram'], filename=os.path.join(synth_path, 'step_{}.png'.format(current_step)))
print("Synth spectrograms at step {}...\n".format(current_step))
if current_step % args.eval_step == 0 and current_step != 0:
model.eval()
with torch.no_grad():
m_l, d_l, f_l, e_l = evaluate(args, model, current_step)
str_v = "*** Validation ***\n" \
"StyleSpeech Step {},\n" \
"Mel Loss: {}\nDuration Loss:{}\nF0 Loss: {}\nEnergy Loss: {}" \
.format(current_step, m_l, d_l, f_l, e_l)
print(str_v + "\n" )
with open(os.path.join(log_path, "eval.txt"), "a") as f_log:
f_log.write(str_v + "\n")
logger.add_scalar('Validation/mel_loss', m_l, current_step)
logger.add_scalar('Validation/duration_loss', d_l, current_step)
logger.add_scalar('Validation/f0_loss', f_l, current_step)
logger.add_scalar('Validation/energy_loss', e_l, current_step)
model.train()
current_step += 1
print("Training Done at Step : {}".format(current_step))
torch.save({'model': model.state_dict(), 'optimizer': scheduled_optim.state_dict(), 'step': current_step},
os.path.join(checkpoint_path, 'checkpoint_last_{}.pth.tar'.format(current_step)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default='dataset/LibriTTS/preprocessed')
parser.add_argument('--save_path', default='exp_stylespeech')
parser.add_argument('--config', default='configs/config.json')
parser.add_argument('--max_iter', default=100000, type=int)
parser.add_argument('--save_step', default=5000, type=int)
parser.add_argument('--synth_step', default=1000, type=int)
parser.add_argument('--eval_step', default=5000, type=int)
parser.add_argument('--log_step', default=100, type=int)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the pretrained model')
args = parser.parse_args()
torch.backends.cudnn.enabled = True
with open(args.config) as f:
data = f.read()
json_config = json.loads(data)
config = utils.AttrDict(json_config)
utils.build_env(args.config, 'config.json', args.save_path)
main(args, config)
|
[
"evaluate.evaluate"
] |
[((369, 400), 'os.path.isfile', 'os.path.isfile', (['checkpoint_path'], {}), '(checkpoint_path)\n', (383, 400), False, 'import os\n'), ((498, 525), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (508, 525), False, 'import torch\n'), ((1030, 1056), 'utils.get_param_num', 'utils.get_param_num', (['model'], {}), '(model)\n', (1049, 1056), False, 'import utils\n'), ((1458, 1549), 'dataloader.prepare_dataloader', 'prepare_dataloader', (['args.data_path', '"""train.txt"""'], {'shuffle': '(True)', 'batch_size': 'c.batch_size'}), "(args.data_path, 'train.txt', shuffle=True, batch_size=c.\n batch_size)\n", (1476, 1549), False, 'from dataloader import prepare_dataloader\n'), ((1994, 2030), 'os.path.join', 'os.path.join', (['args.save_path', '"""ckpt"""'], {}), "(args.save_path, 'ckpt')\n", (2006, 2030), False, 'import os\n'), ((2036, 2079), 'os.makedirs', 'os.makedirs', (['checkpoint_path'], {'exist_ok': '(True)'}), '(checkpoint_path, exist_ok=True)\n', (2047, 2079), False, 'import os\n'), ((2136, 2211), 'optimizer.ScheduledOptim', 'ScheduledOptim', (['optimizer', 'c.decoder_hidden', 'c.n_warm_up_step', 'current_step'], {}), '(optimizer, c.decoder_hidden, c.n_warm_up_step, current_step)\n', (2150, 2211), False, 'from optimizer import ScheduledOptim\n'), ((2249, 2284), 'os.path.join', 'os.path.join', (['args.save_path', '"""log"""'], {}), "(args.save_path, 'log')\n", (2261, 2284), False, 'import os\n'), ((2557, 2594), 'os.path.join', 'os.path.join', (['args.save_path', '"""synth"""'], {}), "(args.save_path, 'synth')\n", (2569, 2594), False, 'import os\n'), ((2600, 2638), 'os.makedirs', 'os.makedirs', (['synth_path'], {'exist_ok': '(True)'}), '(synth_path, exist_ok=True)\n', (2611, 2638), False, 'import os\n'), ((7555, 7580), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7578, 7580), False, 'import argparse\n'), ((8382, 8398), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (8392, 8398), False, 'import json\n'), ((8413, 8440), 'utils.AttrDict', 'utils.AttrDict', (['json_config'], {}), '(json_config)\n', (8427, 8440), False, 'import utils\n'), ((8446, 8505), 'utils.build_env', 'utils.build_env', (['args.config', '"""config.json"""', 'args.save_path'], {}), "(args.config, 'config.json', args.save_path)\n", (8461, 8505), False, 'import utils\n'), ((1678, 1714), 'os.path.exists', 'os.path.exists', (['args.checkpoint_path'], {}), '(args.checkpoint_path)\n', (1692, 1714), False, 'import os\n'), ((2313, 2344), 'os.path.join', 'os.path.join', (['log_path', '"""board"""'], {}), "(log_path, 'board')\n", (2325, 2344), False, 'import os\n'), ((948, 962), 'models.StyleSpeech.StyleSpeech', 'StyleSpeech', (['c'], {}), '(c)\n', (959, 962), False, 'from models.StyleSpeech import StyleSpeech\n'), ((1131, 1172), 'os.path.join', 'os.path.join', (['args.save_path', '"""model.txt"""'], {}), "(args.save_path, 'model.txt')\n", (1143, 1172), False, 'import os\n'), ((2361, 2394), 'os.path.join', 'os.path.join', (['log_path', '"""log.txt"""'], {}), "(log_path, 'log.txt')\n", (2373, 2394), False, 'import os\n'), ((6292, 6307), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6305, 6307), False, 'import torch\n'), ((6351, 6386), 'evaluate.evaluate', 'evaluate', (['args', 'model', 'current_step'], {}), '(args, model, current_step)\n', (6359, 6386), False, 'from evaluate import evaluate\n'), ((4602, 4635), 'os.path.join', 'os.path.join', (['log_path', '"""log.txt"""'], {}), "(log_path, 'log.txt')\n", (4614, 4635), False, 'import os\n'), ((6735, 6769), 'os.path.join', 'os.path.join', (['log_path', '"""eval.txt"""'], {}), "(log_path, 'eval.txt')\n", (6747, 6769), False, 'import os\n')]
|
import os
import logging
import time
import random
import re
import json
from copy import deepcopy
import numpy as np
import torch
from torch.optim import Adam
from tqdm import tqdm
from transformers import BertTokenizer
from dora import DORA
from config import Config
from reader import Reader
import ontology
from db import DB
from evaluate import MultiWozEvaluator
def init_process(config):
logger = logging.getLogger("DORA")
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler()
logger.addHandler(stream_handler)
torch.cuda.set_device(config.cuda_device)
random.seed(config.seed)
np.random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if not os.path.exists("save"):
os.mkdir("save")
save_path = "save/model_{}.pt".format(re.sub("\s+", "_", time.asctime()))
db = DB(config.db_path) # 지금 당장은 안쓰지만 end to end를 위해서
reader = Reader(db, config)
start = time.time()
logger.info("Loading data...")
reader.load_data("train")
end = time.time()
logger.info("Loaded. {} secs".format(end-start))
evaluator = MultiWozEvaluator("valid", config.data_path, config.db_path, config.assets_path)
lr = config.lr
model = DORA(db, config).cuda() # 이걸 또 바꿔줘야함..
optimizer = Adam(model.parameters(), lr=lr)
global_epoch = 0
max_score = 0
early_stop_count = config.early_stop_count
# load saved model, optimizer
if config.save_path is not None:
global_epoch, max_score = load(model, optimizer, config.save_path, config.cuda_device)
train.max_iter = len(list(reader.make_batch(reader.train)))
validate.max_iter = len(list(reader.make_batch(reader.dev)))
# gate_acc, joint_acc, slot_acc, domain_acc, inform_rate, success_rate, bleu_score = validate(model, reader, evaluator, config)
# logger.info("accuracy(domain/gate/joint/slot): {:.2f}, {:.2f}, {:.2f}, {:.2f}, inform: {:.2f}, success: {:.2f}, bleu: {:.2f}"\
# .format(domain_acc, gate_acc, joint_acc, slot_acc, inform_rate, success_rate, bleu_score))
for epoch in range(global_epoch, global_epoch + config.max_epochs):
logger.info("Train...")
start = time.time()
train(model, reader, optimizer, config)
end = time.time()
logger.info("epoch: {}, {:.4f} secs".format(epoch+1, end-start))
logger.info("Validate...")
gate_acc, joint_acc, slot_acc, domain_acc, inform_rate, success_rate, bleu_score = validate(model, reader, evaluator, config)
logger.info("accuracy(domain/gate/joint/slot): {:.2f}, {:.2f}, {:.2f}, {:.2f}, inform: {:.2f}, success: {:.2f}, bleu: {:.2f}"\
.format(domain_acc, gate_acc, joint_acc, slot_acc, inform_rate, success_rate, bleu_score))
score = inform_rate + success_rate + bleu_score
if score > max_score: # save model
save(model, optimizer, save_path, epoch, score)
logger.info("Saved to {}.".format(os.path.abspath(save_path)))
max_score = score
early_stop_count = config.early_stop_count
else: # ealry stopping
if early_stop_count == 0:
if epoch < config.min_epochs:
early_stop_count += 1
logger.info("Too early to stop training.")
logger.info("early stop count: {}".format(early_stop_count))
else:
logger.info("Early stopped.")
break
elif early_stop_count == 2:
lr = lr / 2
logger.info("learning rate schedule: {}".format(lr))
for param in optimizer.param_groups:
param["lr"] = lr
early_stop_count -= 1
logger.info("early stop count: {}".format(early_stop_count))
logger.info("Training finished.")
def train(model, reader, optimizer, config):
iterator = reader.make_batch(reader.train)
t = tqdm(enumerate(iterator), total=train.max_iter, ncols=250, position=0, leave=True, dynamic_ncols=config.dynamic_tqdm)
for batch_idx, batch in t:
inputs, contexts, segments, dial_ids = reader.make_input(batch, mode="train")
batch_size = len(contexts[0])
turns = len(inputs)
gate_loss = 0
value_loss = 0
action_loss = 0
response_loss = 0
slot_acc = 0
joint_acc = 0
gate_acc = 0
domain_loss = 0
domain_acc = 0
batch_count = 0 # number of batches
for turn_idx in range(turns):
context_len = contexts[turn_idx].size(1)
model.zero_grad()
domain_loss_, domain_acc_, gate_loss_, gate_acc_, value_loss_, belief_acc, action_loss_, response_loss_ = \
model(inputs[turn_idx], contexts[turn_idx], segments[turn_idx])
loss = domain_loss_ + gate_loss_ + value_loss_ + action_loss_ + response_loss_
domain_loss += domain_loss_ * batch_size
domain_acc += domain_acc_ * batch_size
gate_loss += gate_loss_ * batch_size
gate_acc += gate_acc_ * batch_size
value_loss += value_loss_ * batch_size
slot_acc += belief_acc.sum(dim=1).sum(dim=0)
joint_acc += (belief_acc.mean(dim=1) == 1).sum(dim=0).float()
action_loss += action_loss_ * batch_size
response_loss += response_loss_ * batch_size
batch_count += batch_size
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), config.gradient_clipping)
optimizer.step()
domain_loss = domain_loss / batch_count
domain_acc = domain_acc / batch_count * 100
gate_loss = gate_loss / batch_count
gate_acc = gate_acc / batch_count * 100
value_loss = value_loss / batch_count
action_loss = action_loss / batch_count
response_loss = response_loss / batch_count
slot_acc = slot_acc / batch_count / len(ontology.all_info_slots) * 100
joint_acc = joint_acc / batch_count * 100
t.set_description("iter: {}, loss(domain/gate/value/action/response): {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, "
"accuracy(domain/gate/joint/slot): {:.2f}, {:.2f}, {:.2f}, {:.2f}"
.format(batch_idx+1, domain_loss.item(), gate_loss.item(), value_loss.item(), action_loss.item(), response_loss.item(), \
domain_acc.item(), gate_acc.item(), joint_acc.item(), slot_acc.item()))
time.sleep(1)
del domain_loss, domain_acc, gate_loss, gate_acc, value_loss, action_loss, response_loss, slot_acc, joint_acc
torch.cuda.empty_cache()
def validate(model, reader, evaluator, config):
model.eval()
val_loss = 0
slot_acc = 0
joint_acc = 0
batch_count = 0
gate_acc = 0
domain_acc = 0
with open(os.path.join(config.assets_path, "never_split.txt"), "r") as f:
never_split = f.read().split("\n")
tokenizer = BertTokenizer(os.path.join(config.assets_path, "vocab.txt"), never_split=never_split)
val_dial_gens = {}
val_dial_gens_decoded = {}
with torch.no_grad():
iterator = reader.make_batch(reader.dev)
t = tqdm(enumerate(iterator), total=validate.max_iter, ncols=150, position=0, leave=True, dynamic_ncols=config.dynamic_tqdm)
for batch_idx, batch in t:
# don't shuffle slot order nor use true previous domain state and belief
inputs, contexts, segments, dial_ids = reader.make_input(batch, mode="test")
batch_size = len(contexts[0])
turns = len(inputs)
# all slot-values are none
inputs[0]["prev_belief"] = inputs[0]["prev_belief"].tolist()
# for evaluation
dial_gens = [[] for i in range(batch_size)]
dial_gens_decoded = [[] for i in range(batch_size)]
belief_gens = [[] for i in range(batch_size)]
for turn_idx in range(turns):
turn_context = torch.zeros(batch_size, config.max_context_len, dtype=torch.int64).cuda()
turn_segment = torch.zeros(batch_size, config.max_context_len, dtype=torch.int64).cuda()
max_context_len = 0
for idx in range(len(contexts[turn_idx])):
turn_context_ = contexts[turn_idx][idx].tolist()
turn_segment_ = segments[turn_idx][idx].tolist()
try:
turn_context_ = turn_context_[:turn_context_.index(config.pad_idx)]
except:
turn_context_ = turn_context_
turn_segment_ = turn_segment_[:len(turn_context_)]
# add previous domain state to context
domain_state = []
prev_domain_state = inputs[turn_idx]["prev_domain_state"]
for domain_idx, domain in enumerate(ontology.all_domains):
domain_state.append("[DOMAIN]")
domain_state.append("[{}]".format(domain))
if prev_domain_state[idx, domain_idx] == 1:
domain_state.append("[ON]")
else:
domain_state.append("[OFF]")
domain_state = " ".join(domain_state)
turn_context_ += tokenizer.encode(domain_state, add_special_tokens=False)
# add previous belief state to context
for slot_idx in range(len(ontology.all_info_slots)):
slot = ontology.all_info_slots[slot_idx]
domain, slot = slot.split("-")
slot = "[{}] - {}".format(domain, slot)
value = inputs[turn_idx]["prev_belief"][idx][slot_idx]
if config.slot_idx in value:
value = tokenizer.encode("none")[1:]
turn_context_ += tokenizer.convert_tokens_to_ids(["[SLOT]"])
turn_context_ += tokenizer.encode(slot, add_special_tokens=False)
turn_context_ += tokenizer.convert_tokens_to_ids(["-"])
turn_context_ += value[:-1] # except [EOS]
turn_context_.append(tokenizer.sep_token_id) # [SEP]
context_len = len(turn_context_)
max_context_len = max(max_context_len, context_len)
turn_context[idx, :context_len] = torch.tensor(turn_context_[:1] + turn_context_[-(min(context_len, config.max_context_len)-1):])
turn_segment[idx, :len(turn_segment_)] = torch.tensor(turn_segment_)
turn_context = turn_context[:, :max_context_len]
turn_segment = turn_segment[:, :max_context_len]
domain_acc_, gate_acc_, belief_acc, domain_state, belief_gen, action_gen, response_gen = \
model.forward(inputs[turn_idx], turn_context, turn_segment, "val", config.postprocessing)
if turn_idx < turns-1:
inputs[turn_idx+1]["prev_belief"] = deepcopy(belief_gen) # generated belief, not ground truth
inputs[turn_idx+1]["prev_domain_state"] = domain_state
domain_acc += domain_acc_ * batch_size
gate_acc += gate_acc_ * batch_size
slot_acc += belief_acc.sum(dim=1).sum(dim=0)
joint_acc += (belief_acc.mean(dim=1) == 1).sum(dim=0).float()
batch_count += batch_size
torch.cuda.empty_cache()
# for evaluation
response_gens = [response[:-1] for response in response_gen]
response_gens_decoded = [tokenizer.decode(response[:-1]) for response in response_gen]
for b_idx, belief in enumerate(belief_gen):
belief_gen[b_idx] = [tokenizer.decode(value[:-1]) for value in belief]
for b_idx in range(batch_size):
dial_gens[b_idx].append(response_gens[b_idx])
dial_gens_decoded[b_idx].append(response_gens_decoded[b_idx])
belief = {}
for slot_idx, slot in enumerate(ontology.all_info_slots):
belief[slot] = belief_gen[b_idx][slot_idx]
belief_gens[b_idx].append(belief)
t.set_description("iter: {}".format(batch_idx+1))
time.sleep(1)
for b_idx in range(batch_size):
dial_id = dial_ids[b_idx]
dial_id = "{}.json".format(dial_id)
val_dial_gens[dial_id] = dial_gens[b_idx]
val_dial_gens_decoded[dial_id] = {}
val_dial_gens_decoded[dial_id]["response"] = dial_gens_decoded[b_idx]
val_dial_gens_decoded[dial_id]["belief_state"] = belief_gens[b_idx]
model.train()
gate_acc = gate_acc / batch_count * 100
domain_acc = domain_acc / batch_count * 100
slot_acc = slot_acc / batch_count / len(ontology.all_info_slots) * 100
joint_acc = joint_acc / batch_count * 100
del gate_acc, domain_acc, slot_acc, joint_acc
torch.cuda.empty_cache()
val_dial = json.load(open(os.path.join(config.data_path, "dev_data.json"), "r"))
_, inform_rate, success_rate, bleu = evaluator.evaluateModel(val_dial_gens_decoded, val_dial_gens, val_dial, mode='valid')
return gate_acc.item(), joint_acc.item(), slot_acc.item(), domain_acc.item(), inform_rate, success_rate, bleu
def save(model, optimizer, save_path, epoch, score):
checkpoint = {
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
"epoch": epoch,
"score": score
}
torch.save(checkpoint, save_path)
def load(model, optimizer, save_path, cuda_device):
checkpoint = torch.load(save_path, map_location = lambda storage, loc: storage.cuda(cuda_device))
model.load_state_dict(checkpoint["model"])
optimizer.load_state_dict(checkpoint["optimizer"])
return checkpoint["epoch"], checkpoint["score"]
if __name__ == "__main__":
config = Config()
parser = config.parser
config = parser.parse_args()
init_process(config)
|
[
"evaluate.MultiWozEvaluator"
] |
[((411, 436), 'logging.getLogger', 'logging.getLogger', (['"""DORA"""'], {}), "('DORA')\n", (428, 436), False, 'import logging\n'), ((492, 515), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (513, 515), False, 'import logging\n'), ((563, 604), 'torch.cuda.set_device', 'torch.cuda.set_device', (['config.cuda_device'], {}), '(config.cuda_device)\n', (584, 604), False, 'import torch\n'), ((610, 634), 'random.seed', 'random.seed', (['config.seed'], {}), '(config.seed)\n', (621, 634), False, 'import random\n'), ((639, 666), 'numpy.random.seed', 'np.random.seed', (['config.seed'], {}), '(config.seed)\n', (653, 666), True, 'import numpy as np\n'), ((671, 701), 'torch.manual_seed', 'torch.manual_seed', (['config.seed'], {}), '(config.seed)\n', (688, 701), False, 'import torch\n'), ((706, 741), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['config.seed'], {}), '(config.seed)\n', (728, 741), False, 'import torch\n'), ((746, 785), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['config.seed'], {}), '(config.seed)\n', (772, 785), False, 'import torch\n'), ((1028, 1046), 'db.DB', 'DB', (['config.db_path'], {}), '(config.db_path)\n', (1030, 1046), False, 'from db import DB\n'), ((1091, 1109), 'reader.Reader', 'Reader', (['db', 'config'], {}), '(db, config)\n', (1097, 1109), False, 'from reader import Reader\n'), ((1122, 1133), 'time.time', 'time.time', ([], {}), '()\n', (1131, 1133), False, 'import time\n'), ((1209, 1220), 'time.time', 'time.time', ([], {}), '()\n', (1218, 1220), False, 'import time\n'), ((1291, 1376), 'evaluate.MultiWozEvaluator', 'MultiWozEvaluator', (['"""valid"""', 'config.data_path', 'config.db_path', 'config.assets_path'], {}), "('valid', config.data_path, config.db_path, config.assets_path\n )\n", (1308, 1376), False, 'from evaluate import MultiWozEvaluator\n'), ((13403, 13427), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (13425, 13427), False, 'import torch\n'), ((13969, 14002), 'torch.save', 'torch.save', (['checkpoint', 'save_path'], {}), '(checkpoint, save_path)\n', (13979, 14002), False, 'import torch\n'), ((14354, 14362), 'config.Config', 'Config', ([], {}), '()\n', (14360, 14362), False, 'from config import Config\n'), ((891, 913), 'os.path.exists', 'os.path.exists', (['"""save"""'], {}), "('save')\n", (905, 913), False, 'import os\n'), ((923, 939), 'os.mkdir', 'os.mkdir', (['"""save"""'], {}), "('save')\n", (931, 939), False, 'import os\n'), ((2364, 2375), 'time.time', 'time.time', ([], {}), '()\n', (2373, 2375), False, 'import time\n'), ((2448, 2459), 'time.time', 'time.time', ([], {}), '()\n', (2457, 2459), False, 'import time\n'), ((6694, 6707), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6704, 6707), False, 'import time\n'), ((6843, 6867), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (6865, 6867), False, 'import torch\n'), ((7194, 7239), 'os.path.join', 'os.path.join', (['config.assets_path', '"""vocab.txt"""'], {}), "(config.assets_path, 'vocab.txt')\n", (7206, 7239), False, 'import os\n'), ((7331, 7346), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7344, 7346), False, 'import torch\n'), ((1001, 1015), 'time.asctime', 'time.asctime', ([], {}), '()\n', (1013, 1015), False, 'import time\n'), ((1405, 1421), 'dora.DORA', 'DORA', (['db', 'config'], {}), '(db, config)\n', (1409, 1421), False, 'from dora import DORA\n'), ((7057, 7108), 'os.path.join', 'os.path.join', (['config.assets_path', '"""never_split.txt"""'], {}), "(config.assets_path, 'never_split.txt')\n", (7069, 7108), False, 'import os\n'), ((12683, 12696), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (12693, 12696), False, 'import time\n'), ((13459, 13506), 'os.path.join', 'os.path.join', (['config.data_path', '"""dev_data.json"""'], {}), "(config.data_path, 'dev_data.json')\n", (13471, 13506), False, 'import os\n'), ((11791, 11815), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (11813, 11815), False, 'import torch\n'), ((3148, 3174), 'os.path.abspath', 'os.path.abspath', (['save_path'], {}), '(save_path)\n', (3163, 3174), False, 'import os\n'), ((10880, 10907), 'torch.tensor', 'torch.tensor', (['turn_segment_'], {}), '(turn_segment_)\n', (10892, 10907), False, 'import torch\n'), ((11352, 11372), 'copy.deepcopy', 'deepcopy', (['belief_gen'], {}), '(belief_gen)\n', (11360, 11372), False, 'from copy import deepcopy\n'), ((8210, 8276), 'torch.zeros', 'torch.zeros', (['batch_size', 'config.max_context_len'], {'dtype': 'torch.int64'}), '(batch_size, config.max_context_len, dtype=torch.int64)\n', (8221, 8276), False, 'import torch\n'), ((8315, 8381), 'torch.zeros', 'torch.zeros', (['batch_size', 'config.max_context_len'], {'dtype': 'torch.int64'}), '(batch_size, config.max_context_len, dtype=torch.int64)\n', (8326, 8381), False, 'import torch\n')]
|
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import transformers
from transformers import AutoModel, BertTokenizerFast
import matplotlib.pyplot as plt
from underthesea import word_tokenize
import regex as re
device = torch.device("cuda")
import torch
from transformers import AutoModel, AutoTokenizer
from transformers import AdamW
from sklearn.metrics import precision_score, \
recall_score, confusion_matrix, classification_report, \
accuracy_score, f1_score
from evaluate import test_evaluate, evaluate
from model.bert_classification import PhoBert_Classification
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
def train(model):
model.train()
total_loss, total_accuracy = 0, 0
total_preds=[]
for step,batch in enumerate(train_dataloader):
if step % 50 == 0 and not step == 0:
print(' Batch {:>5,} of {:>5,}.'.format(step, len(train_dataloader)))
batch = [r.to(device) for r in batch]
sent_id, mask, labels = batch
model.zero_grad()
preds = model(sent_id, mask)
loss = cross_entropy(preds, labels)
total_loss = total_loss + loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
preds=preds.detach().cpu().numpy()
total_preds.append(preds)
avg_loss = total_loss / len(train_dataloader)
total_preds = np.concatenate(total_preds, axis=0)
return avg_loss, total_preds
if __name__ == '__main__':
path_dataset = "/kaggle/input/datasetv1mergev2/Dataset_24_12_version1Mergeversion2.xlsx"
dataframe = pd.read_excel(path_dataset, sheet_name = 'Dataset')
train_text, tmp_text, train_labels, tmp_labels = train_test_split(dataframe['Review'], dataframe['Label'],
random_state=2021,
test_size=0.2,
stratify=dataframe['Label'])
val_text, test_text, val_labels, test_labels = train_test_split(tmp_text, tmp_labels,
random_state=2021,
test_size=0.5,
stratify=tmp_labels)
train_text = train_text.astype(str)
val_text = val_text.astype(str)
test_text = test_text.astype(str)
model = PhoBert_Classification(2)
tokenizer = AutoTokenizer.from_pretrained("vinai/phobert-base", use_fast=False)
# tokenize and encode sequences in the training set
MAX_LENGTH = 200
tokens_train = tokenizer.batch_encode_plus(
train_text.tolist(),
max_length = MAX_LENGTH,
pad_to_max_length=True,
truncation=True
)
# tokenize and encode sequences in the validation set
tokens_val = tokenizer.batch_encode_plus(
val_text.tolist(),
max_length = MAX_LENGTH,
pad_to_max_length=True,
truncation=True
)
# # tokenize and encode sequences in the test set
tokens_test = tokenizer.batch_encode_plus(
test_text.tolist(),
max_length = MAX_LENGTH,
pad_to_max_length=True,
truncation=True
)
train_seq = torch.tensor(tokens_train['input_ids'])
train_mask = torch.tensor(tokens_train['attention_mask'])
train_y = torch.tensor(train_labels.tolist())
val_seq = torch.tensor(tokens_val['input_ids'])
val_mask = torch.tensor(tokens_val['attention_mask'])
val_y = torch.tensor(val_labels.tolist())
test_seq = torch.tensor(tokens_test['input_ids'])
test_mask = torch.tensor(tokens_test['attention_mask'])
test_y = torch.tensor(test_labels.tolist())
batch_size = 32
train_data = TensorDataset(train_seq, train_mask, train_y)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
val_data = TensorDataset(val_seq, val_mask, val_y)
val_sampler = SequentialSampler(val_data)
val_dataloader = DataLoader(val_data, sampler = val_sampler, batch_size=batch_size)
test_data = TensorDataset(test_seq, test_mask, test_y)
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler = test_sampler, batch_size=batch_size)
device = torch.device("cuda")
model = model.to(device)
optimizer = AdamW(model.parameters(), lr = 1e-5)
class_weights = compute_class_weight('balanced', np.unique(train_labels), train_labels)
weights= torch.tensor(class_weights,dtype=torch.float)
# push to GPU
weights = weights.to(device)
# define the loss function
cross_entropy = nn.NLLLoss(weight=weights)
# set initial loss to infinite
best_valid_loss = float('inf')
best_valid_f1 = 0
# empty lists to store training and validation loss of each epoch
train_losses=[]
valid_losses=[]
n_epochs = []
d = 0
warmup_nepochs = 10
finetune_nepochs = 150
for param in model.backbone.parameters():
param.requires_grad = False
#for each epoch
for epoch in range(warmup_nepochs):
print("Start")
print('\n Warmup Epoch {:} / {:}'.format(epoch + 1, warmup_nepochs))
#train model
train_loss, _ = train(model)
print({"Loss train": train_loss})
#evaluate model
valid_loss, _, f1_value = evaluate(model, val_dataloader)
print({"Loss val": valid_loss})
print({"F1 score": f1_value})
#save the best model
if f1_value > best_valid_f1:
best_valid_f1 = f1_value
torch.save(model.state_dict(), '/kaggle/working/Best_weights_f1.pt')
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), '/kaggle/working/Lass_weights_f1.pt')
# append training and validation loss
d+=1
n_epochs.append(d)
train_losses.append(train_loss)
valid_losses.append(valid_loss)
print(f'\nTraining Loss: {train_loss:.3f}')
print(f'Validation Loss: {valid_loss:.3f}')
for param in model.backbone.parameters():
param.requires_grad = True
for epoch in range(finetune_nepochs):
print("Start")
print('\n FineTune Epoch {:} / {:}'.format(epoch + 1, finetune_nepochs))
#train model
train_loss, _ = train(model)
print({"Loss train": train_loss})
#evaluate model
valid_loss, _, f1_value = evaluate(model, val_dataloader)
print({"Loss val": valid_loss})
print({"F1 score": f1_value})
#save the best model
if f1_value > best_valid_f1:
best_valid_f1 = f1_value
torch.save(model.state_dict(), '/kaggle/working/Best_weights_f1.pt')
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), '/kaggle/working/Lass_weights_f1.pt')
# append training and validation loss
d+=1
n_epochs.append(d)
train_losses.append(train_loss)
valid_losses.append(valid_loss)
print(f'\nTraining Loss: {train_loss:.3f}')
print(f'Validation Loss: {valid_loss:.3f}')
|
[
"evaluate.evaluate"
] |
[((350, 370), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (362, 370), False, 'import torch\n'), ((1579, 1614), 'numpy.concatenate', 'np.concatenate', (['total_preds'], {'axis': '(0)'}), '(total_preds, axis=0)\n', (1593, 1614), True, 'import numpy as np\n'), ((1785, 1834), 'pandas.read_excel', 'pd.read_excel', (['path_dataset'], {'sheet_name': '"""Dataset"""'}), "(path_dataset, sheet_name='Dataset')\n", (1798, 1834), True, 'import pandas as pd\n'), ((1890, 2014), 'sklearn.model_selection.train_test_split', 'train_test_split', (["dataframe['Review']", "dataframe['Label']"], {'random_state': '(2021)', 'test_size': '(0.2)', 'stratify': "dataframe['Label']"}), "(dataframe['Review'], dataframe['Label'], random_state=2021,\n test_size=0.2, stratify=dataframe['Label'])\n", (1906, 2014), False, 'from sklearn.model_selection import train_test_split\n'), ((2271, 2368), 'sklearn.model_selection.train_test_split', 'train_test_split', (['tmp_text', 'tmp_labels'], {'random_state': '(2021)', 'test_size': '(0.5)', 'stratify': 'tmp_labels'}), '(tmp_text, tmp_labels, random_state=2021, test_size=0.5,\n stratify=tmp_labels)\n', (2287, 2368), False, 'from sklearn.model_selection import train_test_split\n'), ((2721, 2746), 'model.bert_classification.PhoBert_Classification', 'PhoBert_Classification', (['(2)'], {}), '(2)\n', (2743, 2746), False, 'from model.bert_classification import PhoBert_Classification\n'), ((2763, 2830), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""vinai/phobert-base"""'], {'use_fast': '(False)'}), "('vinai/phobert-base', use_fast=False)\n", (2792, 2830), False, 'from transformers import AutoModel, AutoTokenizer\n'), ((3553, 3592), 'torch.tensor', 'torch.tensor', (["tokens_train['input_ids']"], {}), "(tokens_train['input_ids'])\n", (3565, 3592), False, 'import torch\n'), ((3610, 3654), 'torch.tensor', 'torch.tensor', (["tokens_train['attention_mask']"], {}), "(tokens_train['attention_mask'])\n", (3622, 3654), False, 'import torch\n'), ((3720, 3757), 'torch.tensor', 'torch.tensor', (["tokens_val['input_ids']"], {}), "(tokens_val['input_ids'])\n", (3732, 3757), False, 'import torch\n'), ((3773, 3815), 'torch.tensor', 'torch.tensor', (["tokens_val['attention_mask']"], {}), "(tokens_val['attention_mask'])\n", (3785, 3815), False, 'import torch\n'), ((3878, 3916), 'torch.tensor', 'torch.tensor', (["tokens_test['input_ids']"], {}), "(tokens_test['input_ids'])\n", (3890, 3916), False, 'import torch\n'), ((3933, 3976), 'torch.tensor', 'torch.tensor', (["tokens_test['attention_mask']"], {}), "(tokens_test['attention_mask'])\n", (3945, 3976), False, 'import torch\n'), ((4064, 4109), 'torch.utils.data.TensorDataset', 'TensorDataset', (['train_seq', 'train_mask', 'train_y'], {}), '(train_seq, train_mask, train_y)\n', (4077, 4109), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((4131, 4156), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_data'], {}), '(train_data)\n', (4144, 4156), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((4181, 4249), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'sampler': 'train_sampler', 'batch_size': 'batch_size'}), '(train_data, sampler=train_sampler, batch_size=batch_size)\n', (4191, 4249), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((4266, 4305), 'torch.utils.data.TensorDataset', 'TensorDataset', (['val_seq', 'val_mask', 'val_y'], {}), '(val_seq, val_mask, val_y)\n', (4279, 4305), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((4325, 4352), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['val_data'], {}), '(val_data)\n', (4342, 4352), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((4375, 4439), 'torch.utils.data.DataLoader', 'DataLoader', (['val_data'], {'sampler': 'val_sampler', 'batch_size': 'batch_size'}), '(val_data, sampler=val_sampler, batch_size=batch_size)\n', (4385, 4439), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((4460, 4502), 'torch.utils.data.TensorDataset', 'TensorDataset', (['test_seq', 'test_mask', 'test_y'], {}), '(test_seq, test_mask, test_y)\n', (4473, 4502), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((4522, 4550), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['test_data'], {}), '(test_data)\n', (4539, 4550), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((4574, 4640), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'sampler': 'test_sampler', 'batch_size': 'batch_size'}), '(test_data, sampler=test_sampler, batch_size=batch_size)\n', (4584, 4640), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((4661, 4681), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4673, 4681), False, 'import torch\n'), ((4870, 4916), 'torch.tensor', 'torch.tensor', (['class_weights'], {'dtype': 'torch.float'}), '(class_weights, dtype=torch.float)\n', (4882, 4916), False, 'import torch\n'), ((5021, 5047), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {'weight': 'weights'}), '(weight=weights)\n', (5031, 5047), True, 'import torch.nn as nn\n'), ((4818, 4841), 'numpy.unique', 'np.unique', (['train_labels'], {}), '(train_labels)\n', (4827, 4841), True, 'import numpy as np\n'), ((5764, 5795), 'evaluate.evaluate', 'evaluate', (['model', 'val_dataloader'], {}), '(model, val_dataloader)\n', (5772, 5795), False, 'from evaluate import test_evaluate, evaluate\n'), ((6932, 6963), 'evaluate.evaluate', 'evaluate', (['model', 'val_dataloader'], {}), '(model, val_dataloader)\n', (6940, 6963), False, 'from evaluate import test_evaluate, evaluate\n')]
|
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import argparse
import os
import time
from fastspeech2 import FastSpeech2
from loss import FastSpeech2Loss
from dataset import Dataset
from optimizer import ScheduledOptim
from evaluate import evaluate
import hparams as hp
import utils
import audio as Audio
def main(args):
torch.manual_seed(0)
# Get device
device = torch.device('cuda'if torch.cuda.is_available()else 'cpu')
# Get dataset
dataset = Dataset("train.txt")
loader = DataLoader(dataset, batch_size=hp.batch_size**2, shuffle=True,
collate_fn=dataset.collate_fn, drop_last=True, num_workers=0)
# Define model
model = nn.DataParallel(FastSpeech2()).to(device)
print("Model Has Been Defined")
num_param = utils.get_param_num(model)
print('Number of FastSpeech2 Parameters:', num_param)
# Optimizer and loss
optimizer = torch.optim.Adam(model.parameters(), betas=hp.betas, eps=hp.eps, weight_decay = hp.weight_decay)
scheduled_optim = ScheduledOptim(optimizer, hp.decoder_hidden, hp.n_warm_up_step, args.restore_step)
Loss = FastSpeech2Loss().to(device)
print("Optimizer and Loss Function Defined.")
# Load checkpoint if exists
checkpoint_path = os.path.join(hp.checkpoint_path)
try:
checkpoint = torch.load(os.path.join(
checkpoint_path, 'checkpoint_{}.pth.tar'.format(args.restore_step)))
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("\n---Model Restored at Step {}---\n".format(args.restore_step))
except:
print("\n---Start New Training---\n")
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
# Load vocoder
if hp.vocoder == 'melgan':
melgan = utils.get_melgan()
elif hp.vocoder == 'waveglow':
waveglow = utils.get_waveglow()
# Init logger
log_path = hp.log_path
if not os.path.exists(log_path):
os.makedirs(log_path)
os.makedirs(os.path.join(log_path, 'train'))
os.makedirs(os.path.join(log_path, 'validation'))
train_logger = SummaryWriter(os.path.join(log_path, 'train'))
val_logger = SummaryWriter(os.path.join(log_path, 'validation'))
# Init synthesis directory
synth_path = hp.synth_path
if not os.path.exists(synth_path):
os.makedirs(synth_path)
# Define Some Information
Time = np.array([])
Start = time.perf_counter()
# Training
model = model.train()
for epoch in range(hp.epochs):
# Get Training Loader
total_step = hp.epochs * len(loader) * hp.batch_size
for i, batchs in enumerate(loader):
for j, data_of_batch in enumerate(batchs):
start_time = time.perf_counter()
current_step = i*hp.batch_size + j + args.restore_step + epoch*len(loader)*hp.batch_size + 1
# Get Data
text = torch.from_numpy(data_of_batch["text"]).long().to(device)
mel_target = torch.from_numpy(data_of_batch["mel_target"]).float().to(device)
D = torch.from_numpy(data_of_batch["D"]).long().to(device)
log_D = torch.from_numpy(data_of_batch["log_D"]).float().to(device)
f0 = torch.from_numpy(data_of_batch["f0"]).float().to(device)
energy = torch.from_numpy(data_of_batch["energy"]).float().to(device)
src_len = torch.from_numpy(data_of_batch["src_len"]).long().to(device)
mel_len = torch.from_numpy(data_of_batch["mel_len"]).long().to(device)
max_src_len = np.max(data_of_batch["src_len"]).astype(np.int32)
max_mel_len = np.max(data_of_batch["mel_len"]).astype(np.int32)
# Forward
mel_output, mel_postnet_output, log_duration_output, f0_output, energy_output, src_mask, mel_mask, _ = model(
text, src_len, mel_len, D, f0, energy, max_src_len, max_mel_len)
# Cal Loss
mel_loss, mel_postnet_loss, d_loss, f_loss, e_loss = Loss(
log_duration_output, log_D, f0_output, f0, energy_output, energy, mel_output, mel_postnet_output, mel_target, ~src_mask, ~mel_mask)
total_loss = mel_loss + mel_postnet_loss + d_loss + f_loss + e_loss
# Logger
t_l = total_loss.item()
m_l = mel_loss.item()
m_p_l = mel_postnet_loss.item()
d_l = d_loss.item()
f_l = f_loss.item()
e_l = e_loss.item()
with open(os.path.join(log_path, "total_loss.txt"), "a") as f_total_loss:
f_total_loss.write(str(t_l)+"\n")
with open(os.path.join(log_path, "mel_loss.txt"), "a") as f_mel_loss:
f_mel_loss.write(str(m_l)+"\n")
with open(os.path.join(log_path, "mel_postnet_loss.txt"), "a") as f_mel_postnet_loss:
f_mel_postnet_loss.write(str(m_p_l)+"\n")
with open(os.path.join(log_path, "duration_loss.txt"), "a") as f_d_loss:
f_d_loss.write(str(d_l)+"\n")
with open(os.path.join(log_path, "f0_loss.txt"), "a") as f_f_loss:
f_f_loss.write(str(f_l)+"\n")
with open(os.path.join(log_path, "energy_loss.txt"), "a") as f_e_loss:
f_e_loss.write(str(e_l)+"\n")
# Backward
total_loss = total_loss / hp.acc_steps
total_loss.backward()
if current_step % hp.acc_steps != 0:
continue
# Clipping gradients to avoid gradient explosion
nn.utils.clip_grad_norm_(model.parameters(), hp.grad_clip_thresh)
# Update weights
scheduled_optim.step_and_update_lr()
scheduled_optim.zero_grad()
# Print
if current_step % hp.log_step == 0:
Now = time.perf_counter()
str1 = "Epoch [{}/{}], Step [{}/{}]:".format(
epoch+1, hp.epochs, current_step, total_step)
str2 = "Total Loss: {:.4f}, Mel Loss: {:.4f}, Mel PostNet Loss: {:.4f}, Duration Loss: {:.4f}, F0 Loss: {:.4f}, Energy Loss: {:.4f};".format(
t_l, m_l, m_p_l, d_l, f_l, e_l)
str3 = "Time Used: {:.3f}s, Estimated Time Remaining: {:.3f}s.".format(
(Now-Start), (total_step-current_step)*np.mean(Time))
print("\n" + str1)
print(str2)
print(str3)
with open(os.path.join(log_path, "log.txt"), "a") as f_log:
f_log.write(str1 + "\n")
f_log.write(str2 + "\n")
f_log.write(str3 + "\n")
f_log.write("\n")
train_logger.add_scalar('Loss/total_loss', t_l, current_step)
train_logger.add_scalar('Loss/mel_loss', m_l, current_step)
train_logger.add_scalar('Loss/mel_postnet_loss', m_p_l, current_step)
train_logger.add_scalar('Loss/duration_loss', d_l, current_step)
train_logger.add_scalar('Loss/F0_loss', f_l, current_step)
train_logger.add_scalar('Loss/energy_loss', e_l, current_step)
if current_step % hp.save_step == 0:
torch.save({'model': model.state_dict(), 'optimizer': optimizer.state_dict(
)}, os.path.join(checkpoint_path, 'checkpoint_{}.pth.tar'.format(current_step)))
print("save model at step {} ...".format(current_step))
if current_step % hp.synth_step == 0:
length = mel_len[0].item()
mel_target_torch = mel_target[0, :length].detach().unsqueeze(0).transpose(1, 2)
mel_target = mel_target[0, :length].detach().cpu().transpose(0, 1)
mel_torch = mel_output[0, :length].detach().unsqueeze(0).transpose(1, 2)
mel = mel_output[0, :length].detach().cpu().transpose(0, 1)
mel_postnet_torch = mel_postnet_output[0, :length].detach().unsqueeze(0).transpose(1, 2)
mel_postnet = mel_postnet_output[0, :length].detach().cpu().transpose(0, 1)
Audio.tools.inv_mel_spec(mel, os.path.join(synth_path, "step_{}_griffin_lim.wav".format(current_step)))
Audio.tools.inv_mel_spec(mel_postnet, os.path.join(synth_path, "step_{}_postnet_griffin_lim.wav".format(current_step)))
if hp.vocoder == 'melgan':
utils.melgan_infer(mel_torch, melgan, os.path.join(hp.synth_path, 'step_{}_{}.wav'.format(current_step, hp.vocoder)))
utils.melgan_infer(mel_postnet_torch, melgan, os.path.join(hp.synth_path, 'step_{}_postnet_{}.wav'.format(current_step, hp.vocoder)))
utils.melgan_infer(mel_target_torch, melgan, os.path.join(hp.synth_path, 'step_{}_ground-truth_{}.wav'.format(current_step, hp.vocoder)))
elif hp.vocoder == 'waveglow':
utils.waveglow_infer(mel_torch, waveglow, os.path.join(hp.synth_path, 'step_{}_{}.wav'.format(current_step, hp.vocoder)))
utils.waveglow_infer(mel_postnet_torch, waveglow, os.path.join(hp.synth_path, 'step_{}_postnet_{}.wav'.format(current_step, hp.vocoder)))
utils.waveglow_infer(mel_target_torch, waveglow, os.path.join(hp.synth_path, 'step_{}_ground-truth_{}.wav'.format(current_step, hp.vocoder)))
f0 = f0[0, :length].detach().cpu().numpy()
energy = energy[0, :length].detach().cpu().numpy()
f0_output = f0_output[0, :length].detach().cpu().numpy()
energy_output = energy_output[0, :length].detach().cpu().numpy()
utils.plot_data([(mel_postnet.numpy(), f0_output, energy_output), (mel_target.numpy(), f0, energy)],
['Synthetized Spectrogram', 'Ground-Truth Spectrogram'], filename=os.path.join(synth_path, 'step_{}.png'.format(current_step)))
if current_step % hp.eval_step == 0:
model.eval()
with torch.no_grad():
d_l, f_l, e_l, m_l, m_p_l = evaluate(model, current_step)
t_l = d_l + f_l + e_l + m_l + m_p_l
val_logger.add_scalar('Loss/total_loss', t_l, current_step)
val_logger.add_scalar('Loss/mel_loss', m_l, current_step)
val_logger.add_scalar('Loss/mel_postnet_loss', m_p_l, current_step)
val_logger.add_scalar('Loss/duration_loss', d_l, current_step)
val_logger.add_scalar('Loss/F0_loss', f_l, current_step)
val_logger.add_scalar('Loss/energy_loss', e_l, current_step)
model.train()
end_time = time.perf_counter()
Time = np.append(Time, end_time - start_time)
if len(Time) == hp.clear_Time:
temp_value = np.mean(Time)
Time = np.delete(
Time, [i for i in range(len(Time))], axis=None)
Time = np.append(Time, temp_value)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--restore_step', type=int, default=0)
args = parser.parse_args()
main(args)
|
[
"evaluate.evaluate"
] |
[((445, 465), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (462, 465), False, 'import torch\n'), ((599, 619), 'dataset.Dataset', 'Dataset', (['"""train.txt"""'], {}), "('train.txt')\n", (606, 619), False, 'from dataset import Dataset\n'), ((635, 766), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(hp.batch_size ** 2)', 'shuffle': '(True)', 'collate_fn': 'dataset.collate_fn', 'drop_last': '(True)', 'num_workers': '(0)'}), '(dataset, batch_size=hp.batch_size ** 2, shuffle=True, collate_fn\n =dataset.collate_fn, drop_last=True, num_workers=0)\n', (645, 766), False, 'from torch.utils.data import DataLoader\n'), ((901, 927), 'utils.get_param_num', 'utils.get_param_num', (['model'], {}), '(model)\n', (920, 927), False, 'import utils\n'), ((1152, 1239), 'optimizer.ScheduledOptim', 'ScheduledOptim', (['optimizer', 'hp.decoder_hidden', 'hp.n_warm_up_step', 'args.restore_step'], {}), '(optimizer, hp.decoder_hidden, hp.n_warm_up_step, args.\n restore_step)\n', (1166, 1239), False, 'from optimizer import ScheduledOptim\n'), ((1386, 1418), 'os.path.join', 'os.path.join', (['hp.checkpoint_path'], {}), '(hp.checkpoint_path)\n', (1398, 1418), False, 'import os\n'), ((2621, 2633), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2629, 2633), True, 'import numpy as np\n'), ((2647, 2666), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2664, 2666), False, 'import time\n'), ((12140, 12165), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (12163, 12165), False, 'import argparse\n'), ((1973, 1991), 'utils.get_melgan', 'utils.get_melgan', ([], {}), '()\n', (1989, 1991), False, 'import utils\n'), ((2130, 2154), 'os.path.exists', 'os.path.exists', (['log_path'], {}), '(log_path)\n', (2144, 2154), False, 'import os\n'), ((2165, 2186), 'os.makedirs', 'os.makedirs', (['log_path'], {}), '(log_path)\n', (2176, 2186), False, 'import os\n'), ((2334, 2365), 'os.path.join', 'os.path.join', (['log_path', '"""train"""'], {}), "(log_path, 'train')\n", (2346, 2365), False, 'import os\n'), ((2399, 2435), 'os.path.join', 'os.path.join', (['log_path', '"""validation"""'], {}), "(log_path, 'validation')\n", (2411, 2435), False, 'import os\n'), ((2515, 2541), 'os.path.exists', 'os.path.exists', (['synth_path'], {}), '(synth_path)\n', (2529, 2541), False, 'import os\n'), ((2552, 2575), 'os.makedirs', 'os.makedirs', (['synth_path'], {}), '(synth_path)\n', (2563, 2575), False, 'import os\n'), ((522, 547), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (545, 547), False, 'import torch\n'), ((1247, 1264), 'loss.FastSpeech2Loss', 'FastSpeech2Loss', ([], {}), '()\n', (1262, 1264), False, 'from loss import FastSpeech2Loss\n'), ((2048, 2068), 'utils.get_waveglow', 'utils.get_waveglow', ([], {}), '()\n', (2066, 2068), False, 'import utils\n'), ((2208, 2239), 'os.path.join', 'os.path.join', (['log_path', '"""train"""'], {}), "(log_path, 'train')\n", (2220, 2239), False, 'import os\n'), ((2262, 2298), 'os.path.join', 'os.path.join', (['log_path', '"""validation"""'], {}), "(log_path, 'validation')\n", (2274, 2298), False, 'import os\n'), ((821, 834), 'fastspeech2.FastSpeech2', 'FastSpeech2', ([], {}), '()\n', (832, 834), False, 'from fastspeech2 import FastSpeech2\n'), ((1826, 1857), 'os.path.exists', 'os.path.exists', (['checkpoint_path'], {}), '(checkpoint_path)\n', (1840, 1857), False, 'import os\n'), ((1872, 1900), 'os.makedirs', 'os.makedirs', (['checkpoint_path'], {}), '(checkpoint_path)\n', (1883, 1900), False, 'import os\n'), ((2978, 2997), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2995, 2997), False, 'import time\n'), ((11747, 11766), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (11764, 11766), False, 'import time\n'), ((11791, 11829), 'numpy.append', 'np.append', (['Time', '(end_time - start_time)'], {}), '(Time, end_time - start_time)\n', (11800, 11829), True, 'import numpy as np\n'), ((6407, 6426), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (6424, 6426), False, 'import time\n'), ((11912, 11925), 'numpy.mean', 'np.mean', (['Time'], {}), '(Time)\n', (11919, 11925), True, 'import numpy as np\n'), ((12066, 12093), 'numpy.append', 'np.append', (['Time', 'temp_value'], {}), '(Time, temp_value)\n', (12075, 12093), True, 'import numpy as np\n'), ((3867, 3899), 'numpy.max', 'np.max', (["data_of_batch['src_len']"], {}), "(data_of_batch['src_len'])\n", (3873, 3899), True, 'import numpy as np\n'), ((3948, 3980), 'numpy.max', 'np.max', (["data_of_batch['mel_len']"], {}), "(data_of_batch['mel_len'])\n", (3954, 3980), True, 'import numpy as np\n'), ((4932, 4972), 'os.path.join', 'os.path.join', (['log_path', '"""total_loss.txt"""'], {}), "(log_path, 'total_loss.txt')\n", (4944, 4972), False, 'import os\n'), ((5078, 5116), 'os.path.join', 'os.path.join', (['log_path', '"""mel_loss.txt"""'], {}), "(log_path, 'mel_loss.txt')\n", (5090, 5116), False, 'import os\n'), ((5218, 5264), 'os.path.join', 'os.path.join', (['log_path', '"""mel_postnet_loss.txt"""'], {}), "(log_path, 'mel_postnet_loss.txt')\n", (5230, 5264), False, 'import os\n'), ((5384, 5427), 'os.path.join', 'os.path.join', (['log_path', '"""duration_loss.txt"""'], {}), "(log_path, 'duration_loss.txt')\n", (5396, 5427), False, 'import os\n'), ((5525, 5562), 'os.path.join', 'os.path.join', (['log_path', '"""f0_loss.txt"""'], {}), "(log_path, 'f0_loss.txt')\n", (5537, 5562), False, 'import os\n'), ((5660, 5701), 'os.path.join', 'os.path.join', (['log_path', '"""energy_loss.txt"""'], {}), "(log_path, 'energy_loss.txt')\n", (5672, 5701), False, 'import os\n'), ((10976, 10991), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10989, 10991), False, 'import torch\n'), ((11046, 11075), 'evaluate.evaluate', 'evaluate', (['model', 'current_step'], {}), '(model, current_step)\n', (11054, 11075), False, 'from evaluate import evaluate\n'), ((6944, 6957), 'numpy.mean', 'np.mean', (['Time'], {}), '(Time)\n', (6951, 6957), True, 'import numpy as np\n'), ((7120, 7153), 'os.path.join', 'os.path.join', (['log_path', '"""log.txt"""'], {}), "(log_path, 'log.txt')\n", (7132, 7153), False, 'import os\n'), ((3180, 3219), 'torch.from_numpy', 'torch.from_numpy', (["data_of_batch['text']"], {}), "(data_of_batch['text'])\n", (3196, 3219), False, 'import torch\n'), ((3268, 3313), 'torch.from_numpy', 'torch.from_numpy', (["data_of_batch['mel_target']"], {}), "(data_of_batch['mel_target'])\n", (3284, 3313), False, 'import torch\n'), ((3354, 3390), 'torch.from_numpy', 'torch.from_numpy', (["data_of_batch['D']"], {}), "(data_of_batch['D'])\n", (3370, 3390), False, 'import torch\n'), ((3434, 3474), 'torch.from_numpy', 'torch.from_numpy', (["data_of_batch['log_D']"], {}), "(data_of_batch['log_D'])\n", (3450, 3474), False, 'import torch\n'), ((3516, 3553), 'torch.from_numpy', 'torch.from_numpy', (["data_of_batch['f0']"], {}), "(data_of_batch['f0'])\n", (3532, 3553), False, 'import torch\n'), ((3599, 3640), 'torch.from_numpy', 'torch.from_numpy', (["data_of_batch['energy']"], {}), "(data_of_batch['energy'])\n", (3615, 3640), False, 'import torch\n'), ((3687, 3729), 'torch.from_numpy', 'torch.from_numpy', (["data_of_batch['src_len']"], {}), "(data_of_batch['src_len'])\n", (3703, 3729), False, 'import torch\n'), ((3775, 3817), 'torch.from_numpy', 'torch.from_numpy', (["data_of_batch['mel_len']"], {}), "(data_of_batch['mel_len'])\n", (3791, 3817), False, 'import torch\n')]
|
#pylint: disable = redefined-outer-name, invalid-name
# inbuilt lib imports:
from typing import List, Dict, Union
import os
import argparse
import random
import json
# external lib imports:
from tqdm import tqdm
import numpy as np
import tensorflow as tf
from tensorflow.keras import models, optimizers
# project imports
from lib.dependency_tree import DependencyTree
from lib.parsing_system import ParsingSystem
from lib.vocabulary import Vocabulary
from lib.data import (
read_conll_data,
generate_batches,
load_embeddings,
generate_training_instances,
)
from lib.model import DependencyParser
from predict import predict
from evaluate import evaluate
def train(model: models.Model,
optimizer: optimizers.Optimizer,
train_instances: List[Dict[str, np.ndarray]],
validation_sentences: List[List[str]],
validation_trees: List[DependencyTree],
parsing_system: ParsingSystem,
vocabulary: Vocabulary,
num_epochs: int,
batch_size: int) -> Dict[str, Union[models.Model, str]]:
"""
Trains a model on the given training instances as
configured and returns the trained model.
"""
print("\nGenerating Training batches:")
train_batches = generate_batches(train_instances, batch_size)
train_batches = [(batch["inputs"], batch["labels"]) for batch in train_batches]
for epoch in range(num_epochs):
print(f"\nEpoch {epoch}")
# Training Epoch
total_training_loss = 0
generator_tqdm = tqdm(train_batches)
for index, (batch_inputs, batch_labels) in enumerate(generator_tqdm):
with tf.GradientTape() as tape:
model_outputs = model(inputs=batch_inputs, labels=batch_labels)
loss_value = model_outputs["loss"]
grads = tape.gradient(loss_value, model.trainable_variables)
clipped_grads = [tf.clip_by_norm(grad, 5) for grad in grads]
optimizer.apply_gradients(zip(clipped_grads, model.trainable_variables))
total_training_loss += loss_value
description = ("Average training loss: %.2f " % (total_training_loss/(index+1)))
generator_tqdm.set_description(description, refresh=False)
# Validation evaluation
print("Evaluating validation performance:")
predicted_trees = predict(model, validation_sentences, parsing_system, vocabulary)
evaluation_report = evaluate(validation_sentences, parsing_system,
predicted_trees, validation_trees)
print("\n"+evaluation_report)
training_outputs = {"model": model, "evaluation_report": evaluation_report}
return training_outputs
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train Dependency Parsing Model')
# General training arguments
parser.add_argument('train_data_file_path', type=str, help='training data file path')
parser.add_argument('validation_data_file_path', type=str, help='validation data file path')
parser.add_argument('--batch-size', type=int, default=10000, help='batch size')
parser.add_argument('--num-epochs', type=int, default=5, help='max num epochs to train for')
parser.add_argument('--cache-processed-data', action="store_true", default=False,
help='if passed, it will cache generated training instances '
'at the same path with with extension .jsonl '
'You can use --use-cache next time to used the cached datasets. '
'Do not use it until you have finalized feature generation code.')
parser.add_argument('--use-cached-data', action="store_true", default=False,
help='if passed, use the cached preproced data instead of connl files.')
parser.add_argument('--pretrained-embedding-file', type=str,
help='if passed, use glove embeddings to initialize. '
'the embedding matrix')
parser.add_argument('--experiment-name', type=str, default="default",
help='optional experiment name which determines where to store '
'the model training outputs.')
# Model specific arguments
parser.add_argument('--num-tokens', type=int, help='num_tokens ', default=48)
parser.add_argument('--hidden-dim', type=int, help='hidden_dim of neural network', default=200)
parser.add_argument('--embedding-dim', type=int, help='embedding_dim of word embeddings', default=50)
parser.add_argument('--activation-name', type=str, choices=("cubic", "tanh", "sigmoid"),
help='activation-name', default="cubic")
parser.add_argument('--trainable-embeddings', action="store_false",
help='are embeddings trainable', default=True)
parser.add_argument('--regularization-lambda', type=float,
help='regularization_lambda ', default=1e-8)
args = parser.parse_args()
# Set numpy, tensorflow and python seeds for reproducibility.
tf.random.set_seed(1337)
np.random.seed(1337)
random.seed(13370)
# Print if GPU is available or not.
device_name = tf.test.gpu_device_name()
print(f"GPU found: {device_name == '/device:GPU:0'}")
# Setup Serialization dir
save_serialization_dir = os.path.join("serialization_dirs", args.experiment_name)
if not os.path.exists(save_serialization_dir):
os.makedirs(save_serialization_dir)
# Setup Training / Validation data
print("Reading training data")
train_sentences, train_trees = read_conll_data(args.train_data_file_path)
print("Reading validation data")
validation_sentences, validation_trees = read_conll_data(args.validation_data_file_path)
vocabulary = Vocabulary(train_sentences, train_trees)
sorted_labels = [item[0] for item in
sorted(vocabulary.label_token_to_id.items(), key=lambda e: e[1])]
non_null_sorted_labels = sorted_labels[1:]
parsing_system = ParsingSystem(non_null_sorted_labels)
# Generating training instances takes ~20 minutes everytime. So once you finalize the
# feature generation and want to try different configs for experiments, you can use caching.
if args.use_cached_data:
print("Loading cached training instances")
cache_processed_data_path = args.train_data_file_path.replace("conll", "jsonl")
if not os.path.exists(cache_processed_data_path):
raise Exception(f"You asked to use cached data but {cache_processed_data_path} "
f"is not available.")
with open(cache_processed_data_path, "r") as file:
train_instances = [json.loads(line)
for line in tqdm(file.readlines()) if line.strip()]
else:
print("Generating training instances")
train_instances = generate_training_instances(parsing_system,
train_sentences,
vocabulary, train_trees)
# If cached training data is asked for.
if args.cache_processed_data:
print("Caching training instances for later use")
cache_processed_data_path = args.train_data_file_path.replace("conll", "jsonl")
with open(cache_processed_data_path, "w") as file:
for instance in tqdm(train_instances):
file.write(json.dumps(instance) + "\n")
# Setup Model
config_dict = {"vocab_size": len(vocabulary.id_to_token),
"embedding_dim": args.embedding_dim,
"num_tokens": args.num_tokens,
"hidden_dim": args.hidden_dim,
"num_transitions": parsing_system.num_transitions(),
"regularization_lambda": args.regularization_lambda,
"trainable_embeddings": args.trainable_embeddings,
"activation_name": args.activation_name}
model = DependencyParser(**config_dict)
if args.pretrained_embedding_file:
embedding_matrix = load_embeddings(args.pretrained_embedding_file,
vocabulary, args.embedding_dim)
model.embeddings.assign(embedding_matrix)
# Setup Optimizer
optimizer = optimizers.Adam()
# Train
training_outputs = train(model, optimizer, train_instances,
validation_sentences, validation_trees,
parsing_system, vocabulary, args.num_epochs,
args.batch_size)
# Save the trained model
trained_model = training_outputs["model"]
trained_model.save_weights(os.path.join(save_serialization_dir, f'model.ckpt'))
# Save the last epoch dev metrics
evaluation_report = training_outputs["evaluation_report"]
metrics_path = os.path.join(save_serialization_dir, "metrics.txt")
with open(metrics_path, "w") as file:
file.write(evaluation_report)
# Save the used vocab
vocab_path = os.path.join(save_serialization_dir, "vocab.pickle")
vocabulary.save(vocab_path)
# Save the used config
config_path = os.path.join(save_serialization_dir, "config.json")
with open(config_path, "w") as file:
json.dump(config_dict, file)
print(f"\nFinal model stored in serialization directory: {save_serialization_dir}")
|
[
"evaluate.evaluate"
] |
[((1252, 1297), 'lib.data.generate_batches', 'generate_batches', (['train_instances', 'batch_size'], {}), '(train_instances, batch_size)\n', (1268, 1297), False, 'from lib.data import read_conll_data, generate_batches, load_embeddings, generate_training_instances\n'), ((2767, 2836), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train Dependency Parsing Model"""'}), "(description='Train Dependency Parsing Model')\n", (2790, 2836), False, 'import argparse\n'), ((5128, 5152), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(1337)'], {}), '(1337)\n', (5146, 5152), True, 'import tensorflow as tf\n'), ((5157, 5177), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (5171, 5177), True, 'import numpy as np\n'), ((5182, 5200), 'random.seed', 'random.seed', (['(13370)'], {}), '(13370)\n', (5193, 5200), False, 'import random\n'), ((5260, 5285), 'tensorflow.test.gpu_device_name', 'tf.test.gpu_device_name', ([], {}), '()\n', (5283, 5285), True, 'import tensorflow as tf\n'), ((5404, 5460), 'os.path.join', 'os.path.join', (['"""serialization_dirs"""', 'args.experiment_name'], {}), "('serialization_dirs', args.experiment_name)\n", (5416, 5460), False, 'import os\n'), ((5666, 5708), 'lib.data.read_conll_data', 'read_conll_data', (['args.train_data_file_path'], {}), '(args.train_data_file_path)\n', (5681, 5708), False, 'from lib.data import read_conll_data, generate_batches, load_embeddings, generate_training_instances\n'), ((5792, 5839), 'lib.data.read_conll_data', 'read_conll_data', (['args.validation_data_file_path'], {}), '(args.validation_data_file_path)\n', (5807, 5839), False, 'from lib.data import read_conll_data, generate_batches, load_embeddings, generate_training_instances\n'), ((5858, 5898), 'lib.vocabulary.Vocabulary', 'Vocabulary', (['train_sentences', 'train_trees'], {}), '(train_sentences, train_trees)\n', (5868, 5898), False, 'from lib.vocabulary import Vocabulary\n'), ((6097, 6134), 'lib.parsing_system.ParsingSystem', 'ParsingSystem', (['non_null_sorted_labels'], {}), '(non_null_sorted_labels)\n', (6110, 6134), False, 'from lib.parsing_system import ParsingSystem\n'), ((8073, 8104), 'lib.model.DependencyParser', 'DependencyParser', ([], {}), '(**config_dict)\n', (8089, 8104), False, 'from lib.model import DependencyParser\n'), ((8384, 8401), 'tensorflow.keras.optimizers.Adam', 'optimizers.Adam', ([], {}), '()\n', (8399, 8401), False, 'from tensorflow.keras import models, optimizers\n'), ((8948, 8999), 'os.path.join', 'os.path.join', (['save_serialization_dir', '"""metrics.txt"""'], {}), "(save_serialization_dir, 'metrics.txt')\n", (8960, 8999), False, 'import os\n'), ((9124, 9176), 'os.path.join', 'os.path.join', (['save_serialization_dir', '"""vocab.pickle"""'], {}), "(save_serialization_dir, 'vocab.pickle')\n", (9136, 9176), False, 'import os\n'), ((9255, 9306), 'os.path.join', 'os.path.join', (['save_serialization_dir', '"""config.json"""'], {}), "(save_serialization_dir, 'config.json')\n", (9267, 9306), False, 'import os\n'), ((1535, 1554), 'tqdm.tqdm', 'tqdm', (['train_batches'], {}), '(train_batches)\n', (1539, 1554), False, 'from tqdm import tqdm\n'), ((2365, 2429), 'predict.predict', 'predict', (['model', 'validation_sentences', 'parsing_system', 'vocabulary'], {}), '(model, validation_sentences, parsing_system, vocabulary)\n', (2372, 2429), False, 'from predict import predict\n'), ((2458, 2543), 'evaluate.evaluate', 'evaluate', (['validation_sentences', 'parsing_system', 'predicted_trees', 'validation_trees'], {}), '(validation_sentences, parsing_system, predicted_trees,\n validation_trees)\n', (2466, 2543), False, 'from evaluate import evaluate\n'), ((5472, 5510), 'os.path.exists', 'os.path.exists', (['save_serialization_dir'], {}), '(save_serialization_dir)\n', (5486, 5510), False, 'import os\n'), ((5520, 5555), 'os.makedirs', 'os.makedirs', (['save_serialization_dir'], {}), '(save_serialization_dir)\n', (5531, 5555), False, 'import os\n'), ((6965, 7054), 'lib.data.generate_training_instances', 'generate_training_instances', (['parsing_system', 'train_sentences', 'vocabulary', 'train_trees'], {}), '(parsing_system, train_sentences, vocabulary,\n train_trees)\n', (6992, 7054), False, 'from lib.data import read_conll_data, generate_batches, load_embeddings, generate_training_instances\n'), ((8172, 8251), 'lib.data.load_embeddings', 'load_embeddings', (['args.pretrained_embedding_file', 'vocabulary', 'args.embedding_dim'], {}), '(args.pretrained_embedding_file, vocabulary, args.embedding_dim)\n', (8187, 8251), False, 'from lib.data import read_conll_data, generate_batches, load_embeddings, generate_training_instances\n'), ((8775, 8826), 'os.path.join', 'os.path.join', (['save_serialization_dir', 'f"""model.ckpt"""'], {}), "(save_serialization_dir, f'model.ckpt')\n", (8787, 8826), False, 'import os\n'), ((9356, 9384), 'json.dump', 'json.dump', (['config_dict', 'file'], {}), '(config_dict, file)\n', (9365, 9384), False, 'import json\n'), ((6506, 6547), 'os.path.exists', 'os.path.exists', (['cache_processed_data_path'], {}), '(cache_processed_data_path)\n', (6520, 6547), False, 'import os\n'), ((7471, 7492), 'tqdm.tqdm', 'tqdm', (['train_instances'], {}), '(train_instances)\n', (7475, 7492), False, 'from tqdm import tqdm\n'), ((1650, 1667), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (1665, 1667), True, 'import tensorflow as tf\n'), ((1915, 1939), 'tensorflow.clip_by_norm', 'tf.clip_by_norm', (['grad', '(5)'], {}), '(grad, 5)\n', (1930, 1939), True, 'import tensorflow as tf\n'), ((6782, 6798), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (6792, 6798), False, 'import json\n'), ((7521, 7541), 'json.dumps', 'json.dumps', (['instance'], {}), '(instance)\n', (7531, 7541), False, 'import json\n')]
|
"""
* Copyright (c) 2021, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import random
import json
import argparse
from loguru import logger
import params
from evaluate import clustering_report_gt
def main(args):
random.seed(args.seed)
with open("dialog-flow-extraction/data/MultiWOZ_2.1/data_single.json",
"r") as f:
data = json.load(f)
for domain in params.domain:
logger.warning(f"Domain: {domain}")
dialogs = data[domain]
logger.warning(f"#dialogs: {len(dialogs)}")
labels_true = []
num_cluster = dialogs[0]["num_label"]
logger.warning(f"#clusters: {num_cluster}")
for dialog in dialogs:
labels_true.extend(dialog["label"])
# logger.debug(len(labels_true))
# Random baseline
labels_pred = random.choices(range(0, num_cluster),
k=len(labels_true))
clustering_report_gt(labels_true, labels_pred)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--seed',
type=int,
default=666,
help="random seed for initialization")
args = parser.parse_args()
print(args)
main(args)
|
[
"evaluate.clustering_report_gt"
] |
[((378, 400), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (389, 400), False, 'import random\n'), ((1230, 1255), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1253, 1255), False, 'import argparse\n'), ((517, 529), 'json.load', 'json.load', (['f'], {}), '(f)\n', (526, 529), False, 'import json\n'), ((579, 614), 'loguru.logger.warning', 'logger.warning', (['f"""Domain: {domain}"""'], {}), "(f'Domain: {domain}')\n", (593, 614), False, 'from loguru import logger\n'), ((797, 840), 'loguru.logger.warning', 'logger.warning', (['f"""#clusters: {num_cluster}"""'], {}), "(f'#clusters: {num_cluster}')\n", (811, 840), False, 'from loguru import logger\n'), ((1141, 1187), 'evaluate.clustering_report_gt', 'clustering_report_gt', (['labels_true', 'labels_pred'], {}), '(labels_true, labels_pred)\n', (1161, 1187), False, 'from evaluate import clustering_report_gt\n')]
|
import numpy as np
from app import main, init_activity_detection, init_classificator, init_feature_extraction, init_pre_processing
from models import Waveform
from utils import load_groundtruth, read_csv, plot_metrics_classification_boxplot
from utils import plot_audio, plot_odf, plot_confusion_matrix, plot_evaluation_report
from evaluate import evaluate_system, evaluate_activity_detection, evaluate_classificator
import pickle
import pandas as pd
"""
The run script is used to run an off-line simulation of the system in order to evaluate some interfaces
for performance testing.
"""
# Import model
from utils.load_csv import get_prediction_time_instants, load_annotation
filename = './app/finalized_model_mfccs.sav'
knn_model = pickle.load(open(filename, 'rb'))
model_normalization = './app/model_normalization_mfccs.csv'
normalization_values = pd.read_csv(model_normalization)
path = '../../TrainDataset/LOD_1624992635178/Snare_LOD'
audio = Waveform(path=path + ".wav")
groundtruth = load_annotation(path + ".csv")
# Init system
init_pre_processing()
init_activity_detection(func_type=1)
init_feature_extraction(func_type="mfcc", by_pass=False, n_mfcc_arg=20, norm_file=normalization_values)
init_classificator(knn_model=knn_model, by_pass=False)
buffer_len = 512
# Call system
result = main(audio, buffer_len)
prediction = get_prediction_time_instants(result['ONSET_LOCATIONS'], result['PREDICTION'], audio.sample_rate)
# Plot results
plot_audio(audio.waveform, result['SIGNAL_PROCESSED'], audio.sample_rate)
plot_odf(audio.filename, audio.waveform, result['SIGNAL_PROCESSED'], audio.sample_rate, result['ONSET_LOCATIONS'],
result['HFC'], result['THRESHOLD'])
groundtruth_activity = np.zeros(len(result['ONSET_LOCATIONS']))
# Transform annotation in the desired format (1 activity, 0 non-activity)
for i in range(0, len(groundtruth), 2):
sample_instant_1 = int(float(groundtruth[i][0]) * audio.sample_rate)
sample_instant_2 = int(float(groundtruth[i + 1][0]) * audio.sample_rate)
groundtruth_activity[sample_instant_1:sample_instant_2] = 1
# Evaluation
precision_ad, recall_ad, fscore_ad, accuracy_ad = evaluate_activity_detection(groundtruth_activity,
result['ONSET_LOCATIONS'])
report, cm = evaluate_classificator(groundtruth, prediction)
precision, recall, fscore = evaluate_system(groundtruth, prediction)
print('----------------------------------------------------------------------')
print('Prototype report')
print('----------------------------------------------------------------------')
print('Activity detection evaluation:')
print('\n Precision:' + str(precision_ad) +
'\n Recall:' + str(recall_ad) +
'\n F1-score:' + str(fscore_ad) +
'\n Accuracy:' + str(accuracy_ad)
)
print('----------------------------------------------------------------------')
print('Classification evaluation:')
plot_evaluation_report(report)
print(prediction)
if len(cm) == 3:
plot_confusion_matrix(cm)
print('----------------------------------------------------------------------')
print('System evaluation:')
print('\n Precision:' + str(precision) +
'\n Recall:' + str(recall) +
'\n F1-score:' + str(fscore)
)
print('----------------------------------------------------------------------')
|
[
"evaluate.evaluate_classificator",
"evaluate.evaluate_activity_detection",
"evaluate.evaluate_system"
] |
[((855, 887), 'pandas.read_csv', 'pd.read_csv', (['model_normalization'], {}), '(model_normalization)\n', (866, 887), True, 'import pandas as pd\n'), ((954, 982), 'models.Waveform', 'Waveform', ([], {'path': "(path + '.wav')"}), "(path=path + '.wav')\n", (962, 982), False, 'from models import Waveform\n'), ((997, 1027), 'utils.load_csv.load_annotation', 'load_annotation', (["(path + '.csv')"], {}), "(path + '.csv')\n", (1012, 1027), False, 'from utils.load_csv import get_prediction_time_instants, load_annotation\n'), ((1045, 1066), 'app.init_pre_processing', 'init_pre_processing', ([], {}), '()\n', (1064, 1066), False, 'from app import main, init_activity_detection, init_classificator, init_feature_extraction, init_pre_processing\n'), ((1067, 1103), 'app.init_activity_detection', 'init_activity_detection', ([], {'func_type': '(1)'}), '(func_type=1)\n', (1090, 1103), False, 'from app import main, init_activity_detection, init_classificator, init_feature_extraction, init_pre_processing\n'), ((1104, 1211), 'app.init_feature_extraction', 'init_feature_extraction', ([], {'func_type': '"""mfcc"""', 'by_pass': '(False)', 'n_mfcc_arg': '(20)', 'norm_file': 'normalization_values'}), "(func_type='mfcc', by_pass=False, n_mfcc_arg=20,\n norm_file=normalization_values)\n", (1127, 1211), False, 'from app import main, init_activity_detection, init_classificator, init_feature_extraction, init_pre_processing\n'), ((1208, 1262), 'app.init_classificator', 'init_classificator', ([], {'knn_model': 'knn_model', 'by_pass': '(False)'}), '(knn_model=knn_model, by_pass=False)\n', (1226, 1262), False, 'from app import main, init_activity_detection, init_classificator, init_feature_extraction, init_pre_processing\n'), ((1304, 1327), 'app.main', 'main', (['audio', 'buffer_len'], {}), '(audio, buffer_len)\n', (1308, 1327), False, 'from app import main, init_activity_detection, init_classificator, init_feature_extraction, init_pre_processing\n'), ((1342, 1443), 'utils.load_csv.get_prediction_time_instants', 'get_prediction_time_instants', (["result['ONSET_LOCATIONS']", "result['PREDICTION']", 'audio.sample_rate'], {}), "(result['ONSET_LOCATIONS'], result['PREDICTION'\n ], audio.sample_rate)\n", (1370, 1443), False, 'from utils.load_csv import get_prediction_time_instants, load_annotation\n'), ((1454, 1527), 'utils.plot_audio', 'plot_audio', (['audio.waveform', "result['SIGNAL_PROCESSED']", 'audio.sample_rate'], {}), "(audio.waveform, result['SIGNAL_PROCESSED'], audio.sample_rate)\n", (1464, 1527), False, 'from utils import plot_audio, plot_odf, plot_confusion_matrix, plot_evaluation_report\n'), ((1528, 1683), 'utils.plot_odf', 'plot_odf', (['audio.filename', 'audio.waveform', "result['SIGNAL_PROCESSED']", 'audio.sample_rate', "result['ONSET_LOCATIONS']", "result['HFC']", "result['THRESHOLD']"], {}), "(audio.filename, audio.waveform, result['SIGNAL_PROCESSED'], audio.\n sample_rate, result['ONSET_LOCATIONS'], result['HFC'], result['THRESHOLD'])\n", (1536, 1683), False, 'from utils import plot_audio, plot_odf, plot_confusion_matrix, plot_evaluation_report\n'), ((2145, 2221), 'evaluate.evaluate_activity_detection', 'evaluate_activity_detection', (['groundtruth_activity', "result['ONSET_LOCATIONS']"], {}), "(groundtruth_activity, result['ONSET_LOCATIONS'])\n", (2172, 2221), False, 'from evaluate import evaluate_system, evaluate_activity_detection, evaluate_classificator\n'), ((2314, 2361), 'evaluate.evaluate_classificator', 'evaluate_classificator', (['groundtruth', 'prediction'], {}), '(groundtruth, prediction)\n', (2336, 2361), False, 'from evaluate import evaluate_system, evaluate_activity_detection, evaluate_classificator\n'), ((2390, 2430), 'evaluate.evaluate_system', 'evaluate_system', (['groundtruth', 'prediction'], {}), '(groundtruth, prediction)\n', (2405, 2430), False, 'from evaluate import evaluate_system, evaluate_activity_detection, evaluate_classificator\n'), ((2944, 2974), 'utils.plot_evaluation_report', 'plot_evaluation_report', (['report'], {}), '(report)\n', (2966, 2974), False, 'from utils import plot_audio, plot_odf, plot_confusion_matrix, plot_evaluation_report\n'), ((3014, 3039), 'utils.plot_confusion_matrix', 'plot_confusion_matrix', (['cm'], {}), '(cm)\n', (3035, 3039), False, 'from utils import plot_audio, plot_odf, plot_confusion_matrix, plot_evaluation_report\n')]
|
from robust_rcf import robust_rcf
import numpy as np
import pandas as pd
from evaluate import evaluate, anomaly_classification_percentile
from sklearn.metrics import accuracy_score
import time
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
def test_rrcf_simon(data, sample = 0.1):
# load / prepare data
df = pd.DataFrame(data, columns = ['Timestamp','Year', 'Month', 'Day of Month', 'Day of Week', 'Hour', 'Minute', 'Seconds', 'Simon Features', 'file'])
# shuffle values for SIMON training / testing
df_shuffle = df.sample(frac = 1)
simon_features = np.array(df_shuffle['Simon Features'].values.tolist())
labels = (df_shuffle['file'] != 'enron.jsonl').astype(int)
# break into train / test by oldest / newest
train_split = int(0.6 * df.shape[0])
val_split = int(0.3 * df.shape[0] * sample)
simon_train, y_train = simon_features[:train_split], labels[:train_split]
simon_val, y_val = simon_features[train_split:train_split + val_split], labels[train_split:train_split + val_split]
simon_test, y_test = simon_features[train_split + val_split:], labels[train_split + val_split:]
# print anomalous percentage in train / val / test
print('There are {} ({} %) anomalous examples in the train set'.format(sum(y_train), 100 * sum(y_train) / len(y_train)))
print('There are {} ({} %) anomalous examples in the sampled validation set'.format(sum(y_val), 100 * sum(y_val) / len(y_val)))
print('There are {} ({} %) anomalous examples in the test set'.format(sum(y_test), 100 * sum(y_test) / len(y_test)))
# test batch anomaly detection on SIMON features
# initially set num_samples_per_tree based on ratio of anomalies
tree_size = int((df.shape[0] - sum(labels)) / sum(labels) * 2)
num_trees = 200
start_time = time.time()
print('Fitting batch anomaly detection on training set...')
clf = robust_rcf(num_trees, tree_size)
clf.fit_batch(simon_train)
print('Fitting batch anomaly detection took {} seconds'.format(time.time() - start_time))
'''
print('Scoring training set')
start_time = time.time()
anom_score = clf.batch_anomaly_scores()
print('Scoring batch anomaly detection took {} seconds'.format(time.time() - start_time))
# set threshold as % of anomalies in sample
# TODO = add function that can do percentile or z-score
anom_thresh = (len(labels) - sum(labels)) / len(labels) * 100
anom_pred = anomaly_classification_percentile(anom_score, anom_thresh)
print("Training Set Evaluation")
print(evaluate(y_train, anom_pred))
'''
# eval on validation set
print('Scoring validation set')
start_time = time.time()
val_anom_score = clf.anomaly_score(simon_val)
print('Scoring batch anomaly detection took {} seconds on ({} %) of the validation set'.format(time.time() - start_time, 100 * sample))
val_anom_pred = anomaly_classification_percentile(val_anom_score, anom_thresh)
print("Validation Set Evaluation")
print(evaluate(y_val, val_anom_pred))
# test streaming anomaly detection on SIMON features (just validation set)
print('Fitting / scoring streaming anomaly detection on validation set...')
start_time = time.time()
stream_anom_scores = clf.stream_anomaly_scores(simon_val, window_size = 1, new_forest=True)
print('Fitting / Scoring streaming anomaly detection took {} seconds on ({}%) of the validation set'.format(time.time() - start_time, 100 * sample))
val_anom_pred = anomaly_classification_percentile(stream_anom_scores, anom_thresh)
print("Validation Set Evaluation")
print(evaluate(y_val, val_anom_pred))
def test_rrcf_enron_times(data, sample = 0.1, anom_thresh = 95):
'''
Test batch and streaming anomaly detection on just Enron email time features
'''
# sort Enron emails by timestamp
df = pd.DataFrame(data, columns = ['Timestamp','Year', 'Month', 'Day of Month', 'Day of Week', 'Hour', 'Minute', 'Seconds', 'Simon Features', 'file'])
df = df.loc[df['file'] == 'enron.jsonl'].sort_values(by = 'Timestamp')
# convert timestamp column to timestamp difference
df['Timestamp Difference'] = df['Timestamp'].diff()
# drop non-time columns
df.drop(['Timestamp', 'Simon Features', 'file'], axis=1, inplace=True)
#df = df[['Timestamp Difference']]
# cast to np array of float values and remove initial timestamp (nan time difference)
df = df.values.astype(float)[1:]
# test on sample of training / validation data
train_split = int(0.6 * df.shape[0] * sample)
val_split = int(0.3 * df.shape[0] * sample)
enron_train = df[:train_split]
enron_val = df[train_split:train_split + val_split]
plt.hist(enron_train)
plt.show()
plt.hist(enron_val)
plt.show()
# test batch anomaly detection
tree_size = 100
num_trees = 100
start_time = time.time()
print('Fitting batch anomaly detection on training set...')
clf = robust_rcf(num_trees, tree_size)
clf.fit_batch(enron_train)
print('Fitting batch anomaly detection took {} seconds'.format(time.time() - start_time))
print('Scoring training set')
start_time = time.time()
anom_score = clf.anomaly_score(enron_train)
print('Scoring batch anomaly detection took {} seconds'.format(time.time() - start_time))
# set "true" anomalies just based on frequency
anom_pred = anomaly_classification_percentile(anom_score, anom_thresh)
anom_true = (enron_train[:,-1] < np.percentile(enron_train[:,-1], 100 - anom_thresh)).astype(int)
print("Training Set Evaluation")
print(evaluate(anom_true, anom_pred))
# eval on validation set
print('Scoring validation set')
start_time = time.time()
val_anom_score = clf.anomaly_score(enron_val)
print('Scoring batch anomaly detection took {} seconds on ({} %) of the validation set'.format(time.time() - start_time, 100 * sample))
val_anom_pred = anomaly_classification_percentile(val_anom_score, anom_thresh)
anom_true = (enron_val[:,-1] < np.percentile(enron_val[:,-1],100 - anom_thresh)).astype(int)
print("Validation Set Evaluation")
print(evaluate(anom_true, val_anom_pred))
# graph results
colors = ('blue', 'red')
targets = ('non-anomalous', 'anomalous')
enron_scaled = MinMaxScaler().fit_transform(enron_train[:,-1].reshape(-1,1)).reshape(-1,)
pred_indices = (np.where(val_anom_pred == 0), np.where(val_anom_pred == 1))
pred_data = (enron_scaled[np.where(val_anom_pred == 0)[0]], enron_scaled[np.where(val_anom_pred == 1)[0]])
plt.subplot(2,1,1)
for index, dat, color, target in zip(pred_indices, pred_data, colors, targets):
plt.scatter(index[0], dat, c = color, label = target, s=10)
plt.legend()
plt.title('Batch Anomaly Detection on Enron Time Series Data')
plt.show()
# test streaming anomaly detection on Enron time features (just validation set)
print('Fitting / scoring streaming anomaly detection on validation set...')
start_time = time.time()
stream_anom_scores = clf.stream_anomaly_scores(enron_val, window_size = 1, new_forest=True)
print('Fitting / Scoring streaming anomaly detection took {} seconds on ({} %) of the validation set'.format(time.time() - start_time, 100 * sample))
val_anom_pred = anomaly_classification_percentile(stream_anom_scores, anom_thresh)
print("Validation Set Evaluation")
print(evaluate(anom_true, val_anom_pred))
# graph results
colors = ('blue', 'red')
targets = ('non-anomalous', 'anomalous')
enron_scaled = MinMaxScaler().fit_transform(enron_train[:,-1].reshape(-1,1)).reshape(-1,)
pred_indices = (np.where(val_anom_pred == 0), np.where(val_anom_pred == 1))
pred_data = (enron_scaled[np.where(val_anom_pred == 0)[0]], enron_scaled[np.where(val_anom_pred == 1)[0]])
plt.subplot(2,1,1)
for index, dat, color, target in zip(pred_indices, pred_data, colors, targets):
plt.scatter(index[0], dat, c = color, label = target, s=10)
plt.legend()
plt.title('Batch Anomaly Detection on Enron Time Series Data')
plt.show()
# test streaming anomaly detection on Enron time features (just validation set)
print('Fitting / scoring streaming anomaly detection on validation set...')
start_time = time.time()
stream_anom_scores = clf.stream_anomaly_scores(enron_val, window_size = 1, new_forest=True)
print('Fitting / Scoring streaming anomaly detection took {} seconds on ({} %) of the validation set'.format(time.time() - start_time, 100 * sample))
val_anom_pred = anomaly_classification_percentile(stream_anom_scores, anom_thresh)
print("Validation Set Evaluation")
print(evaluate(anom_true, val_anom_pred))
# graph results
colors = ('blue', 'red')
targets = ('non-anomalous', 'anomalous')
pred_indices = (np.where(val_anom_pred == 0), np.where(val_anom_pred == 1))
pred_data = (enron_scaled[np.where(val_anom_pred == 0)[0]], enron_scaled[np.where(val_anom_pred == 1)[0]])
plt.subplot(2,1,2)
for index, dat, color, target in zip(pred_indices, pred_data, colors, targets):
plt.scatter(index[0], dat, c = color, label = target, s=10)
plt.legend()
plt.title('Streaming Anomaly Detection on Enron Time Series Data')
plt.show()
def test_rrcf_enron_jpl_times(data, sample = 0.1):
'''
Test batch and streaming anomaly detection on JPL Abuse emails superimposed on Enron
email distribution over time
'''
# graph JPL / Nigerian timestamps vs Enron timestamps
df = pd.DataFrame(data, columns = ['Timestamp','Year', 'Month', 'Day of Month', 'Day of Week', 'Hour', 'Minute', 'Seconds', 'Simon Features', 'file'])
plt.hist(df.loc[df['file'] == 'enron.jsonl']['Timestamp'], label = 'non-anomalous')
plt.hist(df.loc[df['file'] != 'enron.jsonl'][df['file'] != 'nigerian.jsonl']['Timestamp'], label = 'anomalous (JPL)')
plt.legend()
plt.title('Comparison of Enron and JPL Timestamps Data')
plt.show()
# resample timestamps?? superimpose
# test on sample of training / validation data
# evaluate
def optimize_tree_size(X_val, y_val, sample = 0.1, num_trees = 100,
min = 1, max = 2048, step_size = 1, patience = None):
'''
Find tree_size that produces highest validation set accuracy for fixed num_trees
using batch anomaly detection
Parameters:
X_val validation data features
y_val validation data labels
sample sample of data to take for evaluation
num_trees fixed num_trees HP
min min size for tree_size HP
max optional max size for tree_size HP
step_size step_size for incrementing tree_size HP
patience stop evaluation if val accuracy hasn't improved for this many steps
'''
acc = 0
best_acc = acc
best_index = 0
anom_thresh = (len(y_val) - sum(y_val)) / len(y_val) * 100
sample = sample * X_val.shape[0]
patience_count = 0
for i in range(min, max + step_size, step_size):
if patience_count >= patience:
break
clf = robust_rcf(num_trees, i)
clf.fit_batch(X_val[:sample])
anom_score = clf.batch_anomaly_scores()
anom_pred = anomaly_classification_percentile(anom_score, anom_thresh)
acc = accuracy_score(y_val[:sample], anom_pred)
if acc > best_acc:
best_acc = acc
best_index = i
else:
patience_count += 1
print('The best accuracy was {} with tree size {}'.format(best_acc, best_index))
return best_index
def optimize_num_trees(X_val, y_val, sample = 0.1, tree_size = 256,
min = 50, max=1000, step_size = 1, patience = None):
'''
Find num_trees that produces highest validation set accuracy for fixed tree_size
using batch anomaly detection
Parameters:
X_val validation data features
y_val validation data labels
sample sample of data to take for evaluation
tree_size fixed tree_size HP
min min size for num_trees HP
max optional max size for num_trees HP
step_size step_size for incrementing num_trees HP
patience stop evaluation if val accuracy hasn't improved for this many steps
'''
acc = 0
best_acc = acc
best_index = 0
anom_thresh = (len(y_val) - sum(y_val)) / len(y_val) * 100
sample = sample * X_val.shape[0]
patience_count = 0
for i in range(min, max + step_size, step_size):
if patience_count >= patience:
break
clf = robust_rcf(i, tree_size)
clf.fit_batch(X_val[:sample])
anom_score = clf.batch_anomaly_scores()
anom_pred = anomaly_classification_percentile(anom_score, anom_thresh)
acc = accuracy_score(y_val[:sample], anom_pred)
if acc > best_acc:
best_acc = acc
best_index = i
else:
patience_count += 1
print('The best accuracy was {} with number of trees {}'.format(best_acc, best_index))
return best_index
# main method for testing methods
if __name__ == '__main__':
datapath = 'all_emails_parsed.npz'
data = np.load(datapath)['all_emails']
#test_rrcf_simon(data, sample = .1)
test_rrcf_enron_times(data, sample = .05)
|
[
"evaluate.evaluate",
"evaluate.anomaly_classification_percentile"
] |
[((349, 497), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['Timestamp', 'Year', 'Month', 'Day of Month', 'Day of Week', 'Hour',\n 'Minute', 'Seconds', 'Simon Features', 'file']"}), "(data, columns=['Timestamp', 'Year', 'Month', 'Day of Month',\n 'Day of Week', 'Hour', 'Minute', 'Seconds', 'Simon Features', 'file'])\n", (361, 497), True, 'import pandas as pd\n'), ((1820, 1831), 'time.time', 'time.time', ([], {}), '()\n', (1829, 1831), False, 'import time\n'), ((1906, 1938), 'robust_rcf.robust_rcf', 'robust_rcf', (['num_trees', 'tree_size'], {}), '(num_trees, tree_size)\n', (1916, 1938), False, 'from robust_rcf import robust_rcf\n'), ((2690, 2701), 'time.time', 'time.time', ([], {}), '()\n', (2699, 2701), False, 'import time\n'), ((2912, 2974), 'evaluate.anomaly_classification_percentile', 'anomaly_classification_percentile', (['val_anom_score', 'anom_thresh'], {}), '(val_anom_score, anom_thresh)\n', (2945, 2974), False, 'from evaluate import evaluate, anomaly_classification_percentile\n'), ((3237, 3248), 'time.time', 'time.time', ([], {}), '()\n', (3246, 3248), False, 'import time\n'), ((3518, 3584), 'evaluate.anomaly_classification_percentile', 'anomaly_classification_percentile', (['stream_anom_scores', 'anom_thresh'], {}), '(stream_anom_scores, anom_thresh)\n', (3551, 3584), False, 'from evaluate import evaluate, anomaly_classification_percentile\n'), ((3879, 4027), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['Timestamp', 'Year', 'Month', 'Day of Month', 'Day of Week', 'Hour',\n 'Minute', 'Seconds', 'Simon Features', 'file']"}), "(data, columns=['Timestamp', 'Year', 'Month', 'Day of Month',\n 'Day of Week', 'Hour', 'Minute', 'Seconds', 'Simon Features', 'file'])\n", (3891, 4027), True, 'import pandas as pd\n'), ((4728, 4749), 'matplotlib.pyplot.hist', 'plt.hist', (['enron_train'], {}), '(enron_train)\n', (4736, 4749), True, 'import matplotlib.pyplot as plt\n'), ((4754, 4764), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4762, 4764), True, 'import matplotlib.pyplot as plt\n'), ((4769, 4788), 'matplotlib.pyplot.hist', 'plt.hist', (['enron_val'], {}), '(enron_val)\n', (4777, 4788), True, 'import matplotlib.pyplot as plt\n'), ((4793, 4803), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4801, 4803), True, 'import matplotlib.pyplot as plt\n'), ((4901, 4912), 'time.time', 'time.time', ([], {}), '()\n', (4910, 4912), False, 'import time\n'), ((4987, 5019), 'robust_rcf.robust_rcf', 'robust_rcf', (['num_trees', 'tree_size'], {}), '(num_trees, tree_size)\n', (4997, 5019), False, 'from robust_rcf import robust_rcf\n'), ((5196, 5207), 'time.time', 'time.time', ([], {}), '()\n', (5205, 5207), False, 'import time\n'), ((5418, 5476), 'evaluate.anomaly_classification_percentile', 'anomaly_classification_percentile', (['anom_score', 'anom_thresh'], {}), '(anom_score, anom_thresh)\n', (5451, 5476), False, 'from evaluate import evaluate, anomaly_classification_percentile\n'), ((5741, 5752), 'time.time', 'time.time', ([], {}), '()\n', (5750, 5752), False, 'import time\n'), ((5963, 6025), 'evaluate.anomaly_classification_percentile', 'anomaly_classification_percentile', (['val_anom_score', 'anom_thresh'], {}), '(val_anom_score, anom_thresh)\n', (5996, 6025), False, 'from evaluate import evaluate, anomaly_classification_percentile\n'), ((6592, 6612), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (6603, 6612), True, 'import matplotlib.pyplot as plt\n'), ((6767, 6779), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6777, 6779), True, 'import matplotlib.pyplot as plt\n'), ((6784, 6846), 'matplotlib.pyplot.title', 'plt.title', (['"""Batch Anomaly Detection on Enron Time Series Data"""'], {}), "('Batch Anomaly Detection on Enron Time Series Data')\n", (6793, 6846), True, 'import matplotlib.pyplot as plt\n'), ((6851, 6861), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6859, 6861), True, 'import matplotlib.pyplot as plt\n'), ((7044, 7055), 'time.time', 'time.time', ([], {}), '()\n', (7053, 7055), False, 'import time\n'), ((7326, 7392), 'evaluate.anomaly_classification_percentile', 'anomaly_classification_percentile', (['stream_anom_scores', 'anom_thresh'], {}), '(stream_anom_scores, anom_thresh)\n', (7359, 7392), False, 'from evaluate import evaluate, anomaly_classification_percentile\n'), ((7866, 7886), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (7877, 7886), True, 'import matplotlib.pyplot as plt\n'), ((8041, 8053), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8051, 8053), True, 'import matplotlib.pyplot as plt\n'), ((8058, 8120), 'matplotlib.pyplot.title', 'plt.title', (['"""Batch Anomaly Detection on Enron Time Series Data"""'], {}), "('Batch Anomaly Detection on Enron Time Series Data')\n", (8067, 8120), True, 'import matplotlib.pyplot as plt\n'), ((8125, 8135), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8133, 8135), True, 'import matplotlib.pyplot as plt\n'), ((8318, 8329), 'time.time', 'time.time', ([], {}), '()\n', (8327, 8329), False, 'import time\n'), ((8600, 8666), 'evaluate.anomaly_classification_percentile', 'anomaly_classification_percentile', (['stream_anom_scores', 'anom_thresh'], {}), '(stream_anom_scores, anom_thresh)\n', (8633, 8666), False, 'from evaluate import evaluate, anomaly_classification_percentile\n'), ((9042, 9062), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (9053, 9062), True, 'import matplotlib.pyplot as plt\n'), ((9217, 9229), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9227, 9229), True, 'import matplotlib.pyplot as plt\n'), ((9234, 9300), 'matplotlib.pyplot.title', 'plt.title', (['"""Streaming Anomaly Detection on Enron Time Series Data"""'], {}), "('Streaming Anomaly Detection on Enron Time Series Data')\n", (9243, 9300), True, 'import matplotlib.pyplot as plt\n'), ((9305, 9315), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9313, 9315), True, 'import matplotlib.pyplot as plt\n'), ((9582, 9730), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['Timestamp', 'Year', 'Month', 'Day of Month', 'Day of Week', 'Hour',\n 'Minute', 'Seconds', 'Simon Features', 'file']"}), "(data, columns=['Timestamp', 'Year', 'Month', 'Day of Month',\n 'Day of Week', 'Hour', 'Minute', 'Seconds', 'Simon Features', 'file'])\n", (9594, 9730), True, 'import pandas as pd\n'), ((9732, 9818), 'matplotlib.pyplot.hist', 'plt.hist', (["df.loc[df['file'] == 'enron.jsonl']['Timestamp']"], {'label': '"""non-anomalous"""'}), "(df.loc[df['file'] == 'enron.jsonl']['Timestamp'], label=\n 'non-anomalous')\n", (9740, 9818), True, 'import matplotlib.pyplot as plt\n'), ((9820, 9940), 'matplotlib.pyplot.hist', 'plt.hist', (["df.loc[df['file'] != 'enron.jsonl'][df['file'] != 'nigerian.jsonl']['Timestamp'\n ]"], {'label': '"""anomalous (JPL)"""'}), "(df.loc[df['file'] != 'enron.jsonl'][df['file'] != 'nigerian.jsonl'\n ]['Timestamp'], label='anomalous (JPL)')\n", (9828, 9940), True, 'import matplotlib.pyplot as plt\n'), ((9942, 9954), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9952, 9954), True, 'import matplotlib.pyplot as plt\n'), ((9959, 10015), 'matplotlib.pyplot.title', 'plt.title', (['"""Comparison of Enron and JPL Timestamps Data"""'], {}), "('Comparison of Enron and JPL Timestamps Data')\n", (9968, 10015), True, 'import matplotlib.pyplot as plt\n'), ((10020, 10030), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10028, 10030), True, 'import matplotlib.pyplot as plt\n'), ((3024, 3054), 'evaluate.evaluate', 'evaluate', (['y_val', 'val_anom_pred'], {}), '(y_val, val_anom_pred)\n', (3032, 3054), False, 'from evaluate import evaluate, anomaly_classification_percentile\n'), ((3634, 3664), 'evaluate.evaluate', 'evaluate', (['y_val', 'val_anom_pred'], {}), '(y_val, val_anom_pred)\n', (3642, 3664), False, 'from evaluate import evaluate, anomaly_classification_percentile\n'), ((5626, 5656), 'evaluate.evaluate', 'evaluate', (['anom_true', 'anom_pred'], {}), '(anom_true, anom_pred)\n', (5634, 5656), False, 'from evaluate import evaluate, anomaly_classification_percentile\n'), ((6172, 6206), 'evaluate.evaluate', 'evaluate', (['anom_true', 'val_anom_pred'], {}), '(anom_true, val_anom_pred)\n', (6180, 6206), False, 'from evaluate import evaluate, anomaly_classification_percentile\n'), ((6417, 6445), 'numpy.where', 'np.where', (['(val_anom_pred == 0)'], {}), '(val_anom_pred == 0)\n', (6425, 6445), True, 'import numpy as np\n'), ((6447, 6475), 'numpy.where', 'np.where', (['(val_anom_pred == 1)'], {}), '(val_anom_pred == 1)\n', (6455, 6475), True, 'import numpy as np\n'), ((6703, 6758), 'matplotlib.pyplot.scatter', 'plt.scatter', (['index[0]', 'dat'], {'c': 'color', 'label': 'target', 's': '(10)'}), '(index[0], dat, c=color, label=target, s=10)\n', (6714, 6758), True, 'import matplotlib.pyplot as plt\n'), ((7442, 7476), 'evaluate.evaluate', 'evaluate', (['anom_true', 'val_anom_pred'], {}), '(anom_true, val_anom_pred)\n', (7450, 7476), False, 'from evaluate import evaluate, anomaly_classification_percentile\n'), ((7691, 7719), 'numpy.where', 'np.where', (['(val_anom_pred == 0)'], {}), '(val_anom_pred == 0)\n', (7699, 7719), True, 'import numpy as np\n'), ((7721, 7749), 'numpy.where', 'np.where', (['(val_anom_pred == 1)'], {}), '(val_anom_pred == 1)\n', (7729, 7749), True, 'import numpy as np\n'), ((7977, 8032), 'matplotlib.pyplot.scatter', 'plt.scatter', (['index[0]', 'dat'], {'c': 'color', 'label': 'target', 's': '(10)'}), '(index[0], dat, c=color, label=target, s=10)\n', (7988, 8032), True, 'import matplotlib.pyplot as plt\n'), ((8716, 8750), 'evaluate.evaluate', 'evaluate', (['anom_true', 'val_anom_pred'], {}), '(anom_true, val_anom_pred)\n', (8724, 8750), False, 'from evaluate import evaluate, anomaly_classification_percentile\n'), ((8867, 8895), 'numpy.where', 'np.where', (['(val_anom_pred == 0)'], {}), '(val_anom_pred == 0)\n', (8875, 8895), True, 'import numpy as np\n'), ((8897, 8925), 'numpy.where', 'np.where', (['(val_anom_pred == 1)'], {}), '(val_anom_pred == 1)\n', (8905, 8925), True, 'import numpy as np\n'), ((9153, 9208), 'matplotlib.pyplot.scatter', 'plt.scatter', (['index[0]', 'dat'], {'c': 'color', 'label': 'target', 's': '(10)'}), '(index[0], dat, c=color, label=target, s=10)\n', (9164, 9208), True, 'import matplotlib.pyplot as plt\n'), ((11241, 11265), 'robust_rcf.robust_rcf', 'robust_rcf', (['num_trees', 'i'], {}), '(num_trees, i)\n', (11251, 11265), False, 'from robust_rcf import robust_rcf\n'), ((11372, 11430), 'evaluate.anomaly_classification_percentile', 'anomaly_classification_percentile', (['anom_score', 'anom_thresh'], {}), '(anom_score, anom_thresh)\n', (11405, 11430), False, 'from evaluate import evaluate, anomaly_classification_percentile\n'), ((11445, 11486), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_val[:sample]', 'anom_pred'], {}), '(y_val[:sample], anom_pred)\n', (11459, 11486), False, 'from sklearn.metrics import accuracy_score\n'), ((12826, 12850), 'robust_rcf.robust_rcf', 'robust_rcf', (['i', 'tree_size'], {}), '(i, tree_size)\n', (12836, 12850), False, 'from robust_rcf import robust_rcf\n'), ((12957, 13015), 'evaluate.anomaly_classification_percentile', 'anomaly_classification_percentile', (['anom_score', 'anom_thresh'], {}), '(anom_score, anom_thresh)\n', (12990, 13015), False, 'from evaluate import evaluate, anomaly_classification_percentile\n'), ((13030, 13071), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_val[:sample]', 'anom_pred'], {}), '(y_val[:sample], anom_pred)\n', (13044, 13071), False, 'from sklearn.metrics import accuracy_score\n'), ((13424, 13441), 'numpy.load', 'np.load', (['datapath'], {}), '(datapath)\n', (13431, 13441), True, 'import numpy as np\n'), ((2037, 2048), 'time.time', 'time.time', ([], {}), '()\n', (2046, 2048), False, 'import time\n'), ((2851, 2862), 'time.time', 'time.time', ([], {}), '()\n', (2860, 2862), False, 'import time\n'), ((3457, 3468), 'time.time', 'time.time', ([], {}), '()\n', (3466, 3468), False, 'import time\n'), ((5118, 5129), 'time.time', 'time.time', ([], {}), '()\n', (5127, 5129), False, 'import time\n'), ((5323, 5334), 'time.time', 'time.time', ([], {}), '()\n', (5332, 5334), False, 'import time\n'), ((5514, 5566), 'numpy.percentile', 'np.percentile', (['enron_train[:, -1]', '(100 - anom_thresh)'], {}), '(enron_train[:, -1], 100 - anom_thresh)\n', (5527, 5566), True, 'import numpy as np\n'), ((5902, 5913), 'time.time', 'time.time', ([], {}), '()\n', (5911, 5913), False, 'import time\n'), ((6061, 6111), 'numpy.percentile', 'np.percentile', (['enron_val[:, -1]', '(100 - anom_thresh)'], {}), '(enron_val[:, -1], 100 - anom_thresh)\n', (6074, 6111), True, 'import numpy as np\n'), ((6507, 6535), 'numpy.where', 'np.where', (['(val_anom_pred == 0)'], {}), '(val_anom_pred == 0)\n', (6515, 6535), True, 'import numpy as np\n'), ((6554, 6582), 'numpy.where', 'np.where', (['(val_anom_pred == 1)'], {}), '(val_anom_pred == 1)\n', (6562, 6582), True, 'import numpy as np\n'), ((7265, 7276), 'time.time', 'time.time', ([], {}), '()\n', (7274, 7276), False, 'import time\n'), ((7781, 7809), 'numpy.where', 'np.where', (['(val_anom_pred == 0)'], {}), '(val_anom_pred == 0)\n', (7789, 7809), True, 'import numpy as np\n'), ((7828, 7856), 'numpy.where', 'np.where', (['(val_anom_pred == 1)'], {}), '(val_anom_pred == 1)\n', (7836, 7856), True, 'import numpy as np\n'), ((8539, 8550), 'time.time', 'time.time', ([], {}), '()\n', (8548, 8550), False, 'import time\n'), ((8957, 8985), 'numpy.where', 'np.where', (['(val_anom_pred == 0)'], {}), '(val_anom_pred == 0)\n', (8965, 8985), True, 'import numpy as np\n'), ((9004, 9032), 'numpy.where', 'np.where', (['(val_anom_pred == 1)'], {}), '(val_anom_pred == 1)\n', (9012, 9032), True, 'import numpy as np\n'), ((6322, 6336), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (6334, 6336), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((7596, 7610), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (7608, 7610), False, 'from sklearn.preprocessing import MinMaxScaler\n')]
|
"""
This runs the final configuration as reported in the paper.
"""
from config import base
import evaluate as e
config = base.get_config()
output_path = 'results/final.output.txt'
print("Running configuration: {}".format(config))
predictions = e.evaluate(config)
test_data = e.load_data(config['test_filepath'])
e.output(predictions, test_data, config['classes'],
output_path)
print("Saved output to {}".format(output_path))
|
[
"evaluate.evaluate",
"evaluate.load_data",
"evaluate.output"
] |
[((124, 141), 'config.base.get_config', 'base.get_config', ([], {}), '()\n', (139, 141), False, 'from config import base\n'), ((248, 266), 'evaluate.evaluate', 'e.evaluate', (['config'], {}), '(config)\n', (258, 266), True, 'import evaluate as e\n'), ((279, 315), 'evaluate.load_data', 'e.load_data', (["config['test_filepath']"], {}), "(config['test_filepath'])\n", (290, 315), True, 'import evaluate as e\n'), ((316, 380), 'evaluate.output', 'e.output', (['predictions', 'test_data', "config['classes']", 'output_path'], {}), "(predictions, test_data, config['classes'], output_path)\n", (324, 380), True, 'import evaluate as e\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions.normal import Normal
from torch.utils.data import DataLoader
import numpy as np
from datetime import datetime
import time
from pathlib import Path
import argparse
import math
from collections import defaultdict
from spline_cconv import ContinuousConv1D
import time_series
import flow
from ema import EMA
from utils import count_parameters, mkdir, make_scheduler
from tracker import Tracker
from evaluate import Evaluator
from layers import (
Classifier,
GridDecoder,
GridEncoder,
Decoder,
)
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
class Encoder(nn.Module):
def __init__(self, cconv, latent_size, channels, flow_depth=2):
super().__init__()
self.cconv = cconv
if flow_depth > 0:
hidden_size = latent_size * 2
flow_layers = [flow.InverseAutoregressiveFlow(
latent_size, hidden_size, latent_size)
for _ in range(flow_depth)]
flow_layers.append(flow.Reverse(latent_size))
self.q_z_flow = flow.FlowSequential(*flow_layers)
self.enc_chunk = 3
else:
self.q_z_flow = None
self.enc_chunk = 2
self.grid_encoder = GridEncoder(channels, latent_size * self.enc_chunk)
def forward(self, cconv_graph, batch_size, iw_samples=3):
x = self.cconv(*cconv_graph, batch_size)
grid_enc = self.grid_encoder(x).chunk(self.enc_chunk, dim=1)
mu, logvar = grid_enc[:2]
std = F.softplus(logvar)
qz_x = Normal(mu, std)
z_0 = qz_x.rsample([iw_samples])
log_q_z_0 = qz_x.log_prob(z_0)
if self.q_z_flow:
z_T, log_q_z_flow = self.q_z_flow(z_0, context=grid_enc[2])
log_q_z = (log_q_z_0 + log_q_z_flow).sum(-1)
else:
z_T, log_q_z = z_0, log_q_z_0.sum(-1)
return z_T, log_q_z
def masked_loss(loss_fn, pred, data, mask):
# return (loss_fn(pred * mask, data * mask,
# reduction='none') * mask).mean()
# Expand data shape from (batch_size, d) to (iw_samples, batch_size, d)
return loss_fn(pred, data.expand_as(pred), reduction='none') * mask
class PVAE(nn.Module):
def __init__(self, encoder, decoder, classifier, sigma=.2, cls_weight=100):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.classifier = classifier
self.sigma = sigma
self.cls_weight = cls_weight
def forward(self, data, time, mask, y, cconv_graph, iw_samples=3,
ts_lambda=1, kl_lambda=1):
batch_size = len(data)
z_T, log_q_z = self.encoder(cconv_graph, batch_size, iw_samples)
pz = Normal(torch.zeros_like(z_T), torch.ones_like(z_T))
log_p_z = pz.log_prob(z_T).sum(-1)
# kl_loss: log q(z|x) - log p(z)
kl_loss = log_q_z - log_p_z
var2 = 2 * self.sigma**2
# half_log2pivar: log(2 * pi * sigma^2) / 2
half_log2pivar = .5 * math.log(math.pi * var2)
# Multivariate Gaussian log-likelihood:
# -D/2 * log(2*pi*sigma^2) - 1/2 \sum_{i=1}^D (x_i - mu_i)^2 / sigma^2
def neg_gaussian_logp(pred, data, mask=None):
se = F.mse_loss(pred, data.expand_as(pred), reduction='none')
if mask is None:
return se / var2 + half_log2pivar
return (se / var2 + half_log2pivar) * mask
# Reshape z to accommodate modules with strict input shape
# requirements such as convolutional layers.
# Expected shape of x_recon: (iw_samples * batch_size, C, L)
z_flat = z_T.view(-1, *z_T.shape[2:])
x_recon = self.decoder(
z_flat,
time.repeat((iw_samples, 1, 1)),
mask.repeat((iw_samples, 1, 1)))
# Gaussian noise for time series
# data shape :(batch_size, C, L)
# x_recon shape: (iw_samples * batch_size, C, L)
x_recon = x_recon.view(iw_samples, *data.shape)
neg_logp = neg_gaussian_logp(x_recon, data, mask)
# neg_logp: -log p(x|z)
neg_logp = neg_logp.sum((-1, -2))
y_logit = self.classifier(z_flat).view(iw_samples, -1)
# cls_loss: -log p(y|z)
cls_loss = F.binary_cross_entropy_with_logits(
y_logit, y.expand_as(y_logit), reduction='none')
# elbo_x = log p(x|z) + log p(z) - log q(z|x)
elbo_x = -(neg_logp * ts_lambda + kl_loss * kl_lambda)
with torch.no_grad():
is_weight = F.softmax(elbo_x, 0)
# IWAE loss: -log E[p(x|z) p(z) / q(z|x)]
# Here we ignore the constant shift of -log(k_samples)
loss_x = -elbo_x.logsumexp(0).mean()
loss_y = (is_weight * cls_loss).sum(0).mean()
loss = loss_x + loss_y * self.cls_weight
# For debugging
x_se = masked_loss(F.mse_loss, x_recon, data, mask)
mse = x_se.sum((-1, -2)) / mask.sum((-1, -2)).clamp(min=1)
CE = (is_weight * cls_loss).sum(0).mean().item()
loss_breakdown = {
'loss': loss.item(),
'reconst.': neg_logp.mean().item() * ts_lambda,
'MSE': mse.mean().item(),
'KL': kl_loss.mean().item() * kl_lambda,
'CE': CE,
'classif.': CE * self.cls_weight,
}
return loss, z_T, elbo_x, loss_breakdown
def predict(self, data, time, mask, cconv_graph, iw_samples=50):
dummy_y = data.new_zeros(len(data))
_, z, elbo, _ = self(
data, time, mask, dummy_y, cconv_graph, iw_samples)
z_flat = z.view(-1, *z.shape[2:])
pred_logit = self.classifier(z_flat).view(iw_samples, -1)
is_weight = F.softmax(elbo, 0)
# Importance reweighted predictive probability
# p(y|x) =~ E_{q_IW(z|x)}[p(y|z)]
py_z = torch.sigmoid(pred_logit)
expected_py_z = (is_weight * py_z).sum(0)
return expected_py_z
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data', default='mimic3.npz',
help='data file')
parser.add_argument('--seed', type=int, default=None,
help='random seed. Randomly set if not specified.')
# training options
parser.add_argument('--nz', type=int, default=32,
help='dimension of latent variable')
parser.add_argument('--epoch', type=int, default=200,
help='number of training epochs')
parser.add_argument('--batch-size', type=int, default=64,
help='batch size')
# Use smaller test batch size to accommodate more importance samples
parser.add_argument('--test-batch-size', type=int, default=32,
help='batch size for validation and test set')
parser.add_argument('--train-k', type=int, default=8,
help='number of importance weights for training')
parser.add_argument('--test-k', type=int, default=50,
help='number of importance weights for evaluation')
parser.add_argument('--flow', type=int, default=2,
help='number of IAF layers')
parser.add_argument('--lr', type=float, default=2e-4,
help='global learning rate')
parser.add_argument('--enc-lr', type=float, default=1e-4,
help='encoder learning rate')
parser.add_argument('--dec-lr', type=float, default=1e-4,
help='decoder learning rate')
parser.add_argument('--min-lr', type=float, default=-1,
help='min learning rate for LR scheduler. '
'-1 to disable annealing')
parser.add_argument('--wd', type=float, default=1e-3,
help='weight decay')
parser.add_argument('--overlap', type=float, default=.5,
help='kernel overlap')
parser.add_argument('--cls', type=float, default=200,
help='classification weight')
parser.add_argument('--clsdep', type=int, default=1,
help='number of layers for classifier')
parser.add_argument('--ts', type=float, default=1,
help='log-likelihood weight for ELBO')
parser.add_argument('--kl', type=float, default=.1,
help='KL weight for ELBO')
parser.add_argument('--eval-interval', type=int, default=1,
help='AUC evaluation interval. '
'0 to disable evaluation.')
parser.add_argument('--save-interval', type=int, default=0,
help='interval to save models. 0 to disable saving.')
parser.add_argument('--prefix', default='pvae',
help='prefix of output directory')
parser.add_argument('--comp', type=int, default=7,
help='continuous convolution kernel size')
parser.add_argument('--sigma', type=float, default=.2,
help='standard deviation for Gaussian likelihood')
parser.add_argument('--dec-ch', default='8-16-16',
help='decoder architecture')
parser.add_argument('--enc-ch', default='64-32-32-16',
help='encoder architecture')
parser.add_argument('--rescale', dest='rescale', action='store_const',
const=True, default=True,
help='if set, rescale time to [-1, 1]')
parser.add_argument('--no-rescale', dest='rescale', action='store_const',
const=False)
parser.add_argument('--cconvnorm', dest='cconv_norm',
action='store_const', const=True, default=True,
help='if set, normalize continuous convolutional '
'layer using mean pooling')
parser.add_argument('--no-cconvnorm', dest='cconv_norm',
action='store_const', const=False)
parser.add_argument('--cconv-ref', type=int, default=98,
help='number of evenly-spaced reference locations '
'for continuous convolutional layer')
parser.add_argument('--dec-ref', type=int, default=128,
help='number of evenly-spaced reference locations '
'for decoder')
parser.add_argument('--ema', dest='ema', type=int, default=0,
help='start epoch of exponential moving average '
'(EMA). -1 to disable EMA')
parser.add_argument('--ema-decay', type=float, default=.9999,
help='EMA decay')
args = parser.parse_args()
nz = args.nz
epochs = args.epoch
eval_interval = args.eval_interval
save_interval = args.save_interval
if args.seed is None:
rnd = np.random.RandomState(None)
random_seed = rnd.randint(np.iinfo(np.uint32).max)
else:
random_seed = args.seed
rnd = np.random.RandomState(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
max_time = 5
cconv_ref = args.cconv_ref
overlap = args.overlap
train_dataset, val_dataset, test_dataset = time_series.split_data(
args.data, rnd, max_time, cconv_ref, overlap, device, args.rescale)
train_loader = DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
drop_last=True, collate_fn=train_dataset.collate_fn)
n_train_batch = len(train_loader)
val_loader = DataLoader(
val_dataset, batch_size=args.test_batch_size, shuffle=False,
collate_fn=val_dataset.collate_fn)
test_loader = DataLoader(
test_dataset, batch_size=args.test_batch_size, shuffle=False,
collate_fn=test_dataset.collate_fn)
in_channels, seq_len = train_dataset.data.shape[1:]
dec_channels = [int(c) for c in args.dec_ch.split('-')] + [in_channels]
enc_channels = [int(c) for c in args.enc_ch.split('-')]
out_channels = enc_channels[0]
squash = torch.sigmoid
if args.rescale:
squash = torch.tanh
dec_ch_up = 2**(len(dec_channels) - 2)
assert args.dec_ref % dec_ch_up == 0, (
f'--dec-ref={args.dec_ref} is not divided by {dec_ch_up}.')
dec_len0 = args.dec_ref // dec_ch_up
grid_decoder = GridDecoder(nz, dec_channels, dec_len0, squash)
decoder = Decoder(
grid_decoder, max_time=max_time, dec_ref=args.dec_ref).to(device)
cconv = ContinuousConv1D(in_channels, out_channels, max_time, cconv_ref,
overlap_rate=overlap, kernel_size=args.comp,
norm=args.cconv_norm).to(device)
encoder = Encoder(cconv, nz, enc_channels, args.flow).to(device)
classifier = Classifier(nz, args.clsdep).to(device)
pvae = PVAE(
encoder, decoder, classifier, args.sigma, args.cls).to(device)
ema = None
if args.ema >= 0:
ema = EMA(pvae, args.ema_decay, args.ema)
other_params = [param for name, param in pvae.named_parameters()
if not (name.startswith('decoder.grid_decoder')
or name.startswith('encoder.grid_encoder'))]
params = [
{'params': decoder.grid_decoder.parameters(), 'lr': args.dec_lr},
{'params': encoder.grid_encoder.parameters(), 'lr': args.enc_lr},
{'params': other_params},
]
optimizer = optim.Adam(
params, lr=args.lr, weight_decay=args.wd)
scheduler = make_scheduler(optimizer, args.lr, args.min_lr, epochs)
path = '{}_{}'.format(
args.prefix, datetime.now().strftime('%m%d.%H%M%S'))
output_dir = Path('results') / 'mimic3-pvae' / path
print(output_dir)
log_dir = mkdir(output_dir / 'log')
model_dir = mkdir(output_dir / 'model')
start_epoch = 0
with (log_dir / 'seed.txt').open('w') as f:
print(random_seed, file=f)
with (log_dir / 'gpu.txt').open('a') as f:
print(torch.cuda.device_count(), start_epoch, file=f)
with (log_dir / 'args.txt').open('w') as f:
for key, val in sorted(vars(args).items()):
print(f'{key}: {val}', file=f)
with (log_dir / 'params.txt').open('w') as f:
def print_params_count(module, name):
try: # sum counts if module is a list
params_count = sum(count_parameters(m) for m in module)
except TypeError:
params_count = count_parameters(module)
print(f'{name} {params_count}', file=f)
print_params_count(grid_decoder, 'grid_decoder')
print_params_count(decoder, 'decoder')
print_params_count(cconv, 'cconv')
print_params_count(encoder, 'encoder')
print_params_count(classifier, 'classifier')
print_params_count(pvae, 'pvae')
print_params_count(pvae, 'total')
tracker = Tracker(log_dir, n_train_batch)
evaluator = Evaluator(pvae, val_loader, test_loader, log_dir,
eval_args={'iw_samples': args.test_k})
start = time.time()
epoch_start = start
for epoch in range(start_epoch, epochs):
loss_breakdown = defaultdict(float)
epoch_start = time.time()
for (val, idx, mask, y, _, cconv_graph) in train_loader:
optimizer.zero_grad()
loss, _, _, loss_info = pvae(
val, idx, mask, y, cconv_graph, args.train_k, args.ts, args.kl)
loss.backward()
optimizer.step()
if ema:
ema.update()
for loss_name, loss_val in loss_info.items():
loss_breakdown[loss_name] += loss_val
if scheduler:
scheduler.step()
cur_time = time.time()
tracker.log(
epoch, loss_breakdown, cur_time - epoch_start, cur_time - start)
if eval_interval > 0 and (epoch + 1) % eval_interval == 0:
if ema:
ema.apply()
evaluator.evaluate(epoch)
ema.restore()
else:
evaluator.evaluate(epoch)
model_dict = {
'pvae': pvae.state_dict(),
'ema': ema.state_dict() if ema else None,
'epoch': epoch + 1,
'args': args,
}
torch.save(model_dict, str(log_dir / 'model.pth'))
if save_interval > 0 and (epoch + 1) % save_interval == 0:
torch.save(model_dict, str(model_dir / f'{epoch:04d}.pth'))
print(output_dir)
if __name__ == '__main__':
main()
|
[
"evaluate.Evaluator"
] |
[((640, 665), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (663, 665), False, 'import torch\n'), ((675, 718), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (687, 718), False, 'import torch\n'), ((6064, 6089), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6087, 6089), False, 'import argparse\n'), ((11079, 11113), 'numpy.random.RandomState', 'np.random.RandomState', (['random_seed'], {}), '(random_seed)\n', (11100, 11113), True, 'import numpy as np\n'), ((11118, 11145), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (11132, 11145), True, 'import numpy as np\n'), ((11150, 11180), 'torch.manual_seed', 'torch.manual_seed', (['random_seed'], {}), '(random_seed)\n', (11167, 11180), False, 'import torch\n'), ((11304, 11398), 'time_series.split_data', 'time_series.split_data', (['args.data', 'rnd', 'max_time', 'cconv_ref', 'overlap', 'device', 'args.rescale'], {}), '(args.data, rnd, max_time, cconv_ref, overlap, device,\n args.rescale)\n', (11326, 11398), False, 'import time_series\n'), ((11424, 11548), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'drop_last': '(True)', 'collate_fn': 'train_dataset.collate_fn'}), '(train_dataset, batch_size=args.batch_size, shuffle=True,\n drop_last=True, collate_fn=train_dataset.collate_fn)\n', (11434, 11548), False, 'from torch.utils.data import DataLoader\n'), ((11618, 11728), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': 'args.test_batch_size', 'shuffle': '(False)', 'collate_fn': 'val_dataset.collate_fn'}), '(val_dataset, batch_size=args.test_batch_size, shuffle=False,\n collate_fn=val_dataset.collate_fn)\n', (11628, 11728), False, 'from torch.utils.data import DataLoader\n'), ((11761, 11873), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'args.test_batch_size', 'shuffle': '(False)', 'collate_fn': 'test_dataset.collate_fn'}), '(test_dataset, batch_size=args.test_batch_size, shuffle=False,\n collate_fn=test_dataset.collate_fn)\n', (11771, 11873), False, 'from torch.utils.data import DataLoader\n'), ((12410, 12457), 'layers.GridDecoder', 'GridDecoder', (['nz', 'dec_channels', 'dec_len0', 'squash'], {}), '(nz, dec_channels, dec_len0, squash)\n', (12421, 12457), False, 'from layers import Classifier, GridDecoder, GridEncoder, Decoder\n'), ((13504, 13556), 'torch.optim.Adam', 'optim.Adam', (['params'], {'lr': 'args.lr', 'weight_decay': 'args.wd'}), '(params, lr=args.lr, weight_decay=args.wd)\n', (13514, 13556), True, 'import torch.optim as optim\n'), ((13583, 13638), 'utils.make_scheduler', 'make_scheduler', (['optimizer', 'args.lr', 'args.min_lr', 'epochs'], {}), '(optimizer, args.lr, args.min_lr, epochs)\n', (13597, 13638), False, 'from utils import count_parameters, mkdir, make_scheduler\n'), ((13821, 13846), 'utils.mkdir', 'mkdir', (["(output_dir / 'log')"], {}), "(output_dir / 'log')\n", (13826, 13846), False, 'from utils import count_parameters, mkdir, make_scheduler\n'), ((13863, 13890), 'utils.mkdir', 'mkdir', (["(output_dir / 'model')"], {}), "(output_dir / 'model')\n", (13868, 13890), False, 'from utils import count_parameters, mkdir, make_scheduler\n'), ((14951, 14982), 'tracker.Tracker', 'Tracker', (['log_dir', 'n_train_batch'], {}), '(log_dir, n_train_batch)\n', (14958, 14982), False, 'from tracker import Tracker\n'), ((14999, 15091), 'evaluate.Evaluator', 'Evaluator', (['pvae', 'val_loader', 'test_loader', 'log_dir'], {'eval_args': "{'iw_samples': args.test_k}"}), "(pvae, val_loader, test_loader, log_dir, eval_args={'iw_samples':\n args.test_k})\n", (15008, 15091), False, 'from evaluate import Evaluator\n'), ((15126, 15137), 'time.time', 'time.time', ([], {}), '()\n', (15135, 15137), False, 'import time\n'), ((1356, 1407), 'layers.GridEncoder', 'GridEncoder', (['channels', '(latent_size * self.enc_chunk)'], {}), '(channels, latent_size * self.enc_chunk)\n', (1367, 1407), False, 'from layers import Classifier, GridDecoder, GridEncoder, Decoder\n'), ((1637, 1655), 'torch.nn.functional.softplus', 'F.softplus', (['logvar'], {}), '(logvar)\n', (1647, 1655), True, 'import torch.nn.functional as F\n'), ((1671, 1686), 'torch.distributions.normal.Normal', 'Normal', (['mu', 'std'], {}), '(mu, std)\n', (1677, 1686), False, 'from torch.distributions.normal import Normal\n'), ((5800, 5818), 'torch.nn.functional.softmax', 'F.softmax', (['elbo', '(0)'], {}), '(elbo, 0)\n', (5809, 5818), True, 'import torch.nn.functional as F\n'), ((5932, 5957), 'torch.sigmoid', 'torch.sigmoid', (['pred_logit'], {}), '(pred_logit)\n', (5945, 5957), False, 'import torch\n'), ((10940, 10967), 'numpy.random.RandomState', 'np.random.RandomState', (['None'], {}), '(None)\n', (10961, 10967), True, 'import numpy as np\n'), ((13037, 13072), 'ema.EMA', 'EMA', (['pvae', 'args.ema_decay', 'args.ema'], {}), '(pvae, args.ema_decay, args.ema)\n', (13040, 13072), False, 'from ema import EMA\n'), ((15233, 15251), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (15244, 15251), False, 'from collections import defaultdict\n'), ((15274, 15285), 'time.time', 'time.time', ([], {}), '()\n', (15283, 15285), False, 'import time\n'), ((15799, 15810), 'time.time', 'time.time', ([], {}), '()\n', (15808, 15810), False, 'import time\n'), ((1184, 1217), 'flow.FlowSequential', 'flow.FlowSequential', (['*flow_layers'], {}), '(*flow_layers)\n', (1203, 1217), False, 'import flow\n'), ((2845, 2866), 'torch.zeros_like', 'torch.zeros_like', (['z_T'], {}), '(z_T)\n', (2861, 2866), False, 'import torch\n'), ((2868, 2888), 'torch.ones_like', 'torch.ones_like', (['z_T'], {}), '(z_T)\n', (2883, 2888), False, 'import torch\n'), ((3126, 3150), 'math.log', 'math.log', (['(math.pi * var2)'], {}), '(math.pi * var2)\n', (3134, 3150), False, 'import math\n'), ((3841, 3872), 'time.repeat', 'time.repeat', (['(iw_samples, 1, 1)'], {}), '((iw_samples, 1, 1))\n', (3852, 3872), False, 'import time\n'), ((4592, 4607), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4605, 4607), False, 'import torch\n'), ((4633, 4653), 'torch.nn.functional.softmax', 'F.softmax', (['elbo_x', '(0)'], {}), '(elbo_x, 0)\n', (4642, 4653), True, 'import torch.nn.functional as F\n'), ((12473, 12535), 'layers.Decoder', 'Decoder', (['grid_decoder'], {'max_time': 'max_time', 'dec_ref': 'args.dec_ref'}), '(grid_decoder, max_time=max_time, dec_ref=args.dec_ref)\n', (12480, 12535), False, 'from layers import Classifier, GridDecoder, GridEncoder, Decoder\n'), ((12569, 12704), 'spline_cconv.ContinuousConv1D', 'ContinuousConv1D', (['in_channels', 'out_channels', 'max_time', 'cconv_ref'], {'overlap_rate': 'overlap', 'kernel_size': 'args.comp', 'norm': 'args.cconv_norm'}), '(in_channels, out_channels, max_time, cconv_ref,\n overlap_rate=overlap, kernel_size=args.comp, norm=args.cconv_norm)\n', (12585, 12704), False, 'from spline_cconv import ContinuousConv1D\n'), ((12857, 12884), 'layers.Classifier', 'Classifier', (['nz', 'args.clsdep'], {}), '(nz, args.clsdep)\n', (12867, 12884), False, 'from layers import Classifier, GridDecoder, GridEncoder, Decoder\n'), ((13746, 13761), 'pathlib.Path', 'Path', (['"""results"""'], {}), "('results')\n", (13750, 13761), False, 'from pathlib import Path\n'), ((14057, 14082), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (14080, 14082), False, 'import torch\n'), ((966, 1035), 'flow.InverseAutoregressiveFlow', 'flow.InverseAutoregressiveFlow', (['latent_size', 'hidden_size', 'latent_size'], {}), '(latent_size, hidden_size, latent_size)\n', (996, 1035), False, 'import flow\n'), ((1129, 1154), 'flow.Reverse', 'flow.Reverse', (['latent_size'], {}), '(latent_size)\n', (1141, 1154), False, 'import flow\n'), ((11002, 11021), 'numpy.iinfo', 'np.iinfo', (['np.uint32'], {}), '(np.uint32)\n', (11010, 11021), True, 'import numpy as np\n'), ((13688, 13702), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13700, 13702), False, 'from datetime import datetime\n'), ((14529, 14553), 'utils.count_parameters', 'count_parameters', (['module'], {}), '(module)\n', (14545, 14553), False, 'from utils import count_parameters, mkdir, make_scheduler\n'), ((14431, 14450), 'utils.count_parameters', 'count_parameters', (['m'], {}), '(m)\n', (14447, 14450), False, 'from utils import count_parameters, mkdir, make_scheduler\n')]
|
import torch
from torch import nn, optim
from torch.utils.data import DataLoader, sampler
from tqdm import tqdm
from argument import get_args
from backbone import vovnet39, vovnet57, resnet50, resnet101
from utils.dataset import COCODataset, collate_fn
from model import ATSS,Efficientnet_Bifpn_ATSS
from utils import transform
from utils.lrscheduler import GluonLRScheduler,iter_per_epoch_cal,set_schduler_with_wormup
from evaluate import evaluate
from distributed import (
get_rank,
synchronize,
reduce_loss_dict,
DistributedSampler,
all_gather,
get_world_size,
convert_sync_bn,
simple_group_split
)
from utils.ema import EMA
import os,cv2
from tensorboardX import SummaryWriter
import numpy as np
def accumulate_predictions(predictions):
all_predictions = all_gather(predictions)
if get_rank() != 0:
return
predictions = {}
for p in all_predictions:
predictions.update(p)
ids = list(sorted(predictions.keys()))
if len(ids) != ids[-1] + 1:
print('Evaluation results is not contiguous')
predictions = [predictions[i] for i in ids]
return predictions
@torch.no_grad()
def valid_loss(args, epoch, loader, dataset, model, device, logger=None):
loss_regress_list = []
loss_cls_list = []
loss_centerness_list = []
if args.distributed:
model = model.module
torch.cuda.empty_cache()
model.eval()
if get_rank() == 0:
pbar = tqdm(enumerate(loader), total=len(loader), dynamic_ncols=True)
else:
pbar = enumerate(loader)
preds = {}
for idx, (images, targets, ids) in pbar:
model.zero_grad()
images = images.to(device)
targets = [target.to(device) for target in targets]
pred,loss_dict = model(images,targets,args.val_with_loss)
loss_reduced = reduce_loss_dict(loss_dict)
loss_cls = loss_reduced['loss_cls'].mean().item()
loss_box = loss_reduced['loss_reg'].mean().item()
loss_center = loss_reduced['loss_centerness'].mean().item()
loss_regress_list.append(float(loss_box))
loss_cls_list.append(float(loss_cls))
loss_centerness_list.append(float(loss_center))
if logger:
log_group_name = 'validation'
logger.add_scalar(log_group_name+'/class_loss',np.mean(loss_cls_list),epoch)
logger.add_scalar(log_group_name+'/regression_loss',np.mean(loss_regress_list),epoch)
logger.add_scalar(log_group_name+'/centerness_loss',np.mean(loss_centerness_list),epoch)
loss_all = np.mean(loss_cls_list) + np.mean(loss_regress_list) + np.mean(loss_centerness_list)
logger.add_scalar(log_group_name+'/loss_epoch_all',loss_all,epoch)
return loss_all
@torch.no_grad()
def valid(args, epoch, loader, dataset, model, device, logger=None,ema=None):
if args.distributed:
model = model.module
torch.cuda.empty_cache()
model.eval()
if get_rank() == 0:
pbar = tqdm(enumerate(loader), total=len(loader), dynamic_ncols=True)
else:
pbar = enumerate(loader)
preds = {}
for idx, (images, targets, ids) in pbar:
model.zero_grad()
images = images.to(device)
if ema: ema.apply_shadow()
pred, _ = model(images)
if ema: ema.restore()
pred = [p.to('cpu') for p in pred]
preds.update({id: p for id, p in zip(ids, pred)})
preds = accumulate_predictions(preds)
if get_rank() != 0:
return
evl_res = evaluate(dataset, preds)
# writing log to tensorboard
if logger:
log_group_name = "validation"
box_result = evl_res['bbox']
logger.add_scalar(log_group_name + '/AP', box_result['AP'], epoch)
logger.add_scalar(log_group_name + '/AP50', box_result['AP50'], epoch)
logger.add_scalar(log_group_name + '/AP75', box_result['AP75'], epoch)
logger.add_scalar(log_group_name + '/APl', box_result['APl'], epoch)
logger.add_scalar(log_group_name + '/APm', box_result['APm'], epoch)
logger.add_scalar(log_group_name + '/APs', box_result['APs'], epoch)
return preds
def train(args, epoch, loader, model, optimizer, device, scheduler=None,logger=None,ema=None):
epoch_loss = []
model.train()
scheduler, warmup_scheduler = scheduler[0], scheduler[1]
if get_rank() == 0:
pbar = tqdm(enumerate(loader), total=len(loader), dynamic_ncols=True)
else:
pbar = enumerate(loader)
for idx, (images, targets, _) in pbar:
model.zero_grad()
images = images.to(device)
targets = [target.to(device) for target in targets]
_, loss_dict = model(images, targets=targets)
loss_cls = loss_dict['loss_cls'].mean()
loss_box = loss_dict['loss_reg'].mean()
loss_center = loss_dict['loss_centerness'].mean()
loss = loss_cls + loss_box + loss_center
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 10)
optimizer.step()
# ema update
ema.update()
# for iter scheduler
if idx<warmup_scheduler.niters and epoch<args.warmup_epoch:
warmup_scheduler.step()
else:
scheduler.step()
loss_reduced = reduce_loss_dict(loss_dict)
loss_cls = loss_reduced['loss_cls'].mean().item()
loss_box = loss_reduced['loss_reg'].mean().item()
loss_center = loss_reduced['loss_centerness'].mean().item()
if get_rank() == 0:
pbar.set_description(
(
f'epoch: {epoch + 1}; cls: {loss_cls:.4f}; '
f'box: {loss_box:.4f}; center: {loss_center:.4f}'
)
)
# writing log to tensorboard
if logger and idx % 50 == 0:
lr_rate = optimizer.param_groups[0]['lr']
totalStep = (epoch * len(loader) + idx) * args.batch * args.n_gpu
logger.add_scalar('training/loss_cls', loss_cls, totalStep)
logger.add_scalar('training/loss_box', loss_box, totalStep)
logger.add_scalar('training/loss_center', loss_center, totalStep)
logger.add_scalar('training/loss_all', (loss_cls + loss_box + loss_center), totalStep)
logger.add_scalar('learning_rate',lr_rate,totalStep)
epoch_loss.append(float(loss_cls+loss_box+loss_center))
if logger:
logger.add_scalar('training/loss_epoch_all',np.mean(epoch_loss),epoch)
return epoch_loss
def data_sampler(dataset, shuffle, distributed):
if distributed:
return DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
return sampler.RandomSampler(dataset)
else:
return sampler.SequentialSampler(dataset)
def save_checkpoint(model,args,optimizer,epoch):
if get_rank() == 0:
torch.save(
{'model': model.module.state_dict(), 'optim': optimizer.state_dict()},
args.working_dir + f'/epoch-{epoch + 1}.pt',
)
if __name__ == '__main__':
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
args = get_args()
# Create working directory for saving intermediate results
working_dir = args.working_dir
if not os.path.exists(working_dir):
os.makedirs(working_dir)
logger = SummaryWriter(working_dir)
n_gpu = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
args.n_gpu = n_gpu
args.distributed = n_gpu > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='gloo', init_method='env://')
#torch.distributed.init_process_group(backend='nccl', init_method='env://')
synchronize()
device = 'cuda'
# train_trans = transform.Compose(
# [
# transform.RandomResize(args.train_min_size_range, args.train_max_size),
# transform.RandomHorizontalFlip(0.5),
# transform.ToTensor(),
# transform.Normalize(args.pixel_mean, args.pixel_std)
# ]
# )
# for efficientdet resize the image
train_trans = transform.Compose(
[
transform.RandomHorizontalFlip(0.5),
transform.Resize_For_Efficientnet(compund_coef=args.backbone_coef),
transform.ToTensor(),
transform.Normalize(args.pixel_mean, args.pixel_std),
]
)
valid_trans = transform.Compose(
[
transform.Resize_For_Efficientnet(compund_coef=args.backbone_coef),
transform.ToTensor(),
transform.Normalize(args.pixel_mean, args.pixel_std)
]
)
train_set = COCODataset(args.path, 'train', train_trans)
train_loader = DataLoader(
train_set,
batch_size=args.batch,
sampler=data_sampler(train_set, shuffle=True, distributed=args.distributed),
num_workers=args.num_workers,
collate_fn=collate_fn(args),
)
valid_set = COCODataset(args.path, 'val', valid_trans)
valid_loader = DataLoader(
valid_set,
batch_size=args.batch,
sampler=data_sampler(valid_set, shuffle=False, distributed=args.distributed),
num_workers=args.num_workers,
collate_fn=collate_fn(args),
)
if args.val_with_loss:
valid_loss_set = COCODataset(args.path, 'val_loss', valid_trans)
val_loss_loader = DataLoader(
valid_loss_set,
batch_size=args.batch,
sampler=data_sampler(valid_loss_set, shuffle=False, distributed=args.distributed),
num_workers=args.num_workers,
collate_fn=collate_fn(args),
)
# backbone = vovnet39(pretrained=True)
# backbone = vovnet57(pretrained=True)
# backbone = resnet18(pretrained=True)
# backbone = resnet50(pretrained=True)
# backbone = resnet101(pretrained=True)
# model = ATSS(args, backbone)
if args.backbone_type == 'Efficientdet':
if args.load_pretrained_weight:
model = Efficientnet_Bifpn_ATSS(args,compound_coef=args.backbone_coef,load_backboe_weight=True,weight_path=args.weight_path)
else:
model = Efficientnet_Bifpn_ATSS(args,compound_coef=args.backbone_coef,load_backboe_weight=False)
elif args.backbone_type == 'ResNet':
if args.backbone_coef == 18:
backbone = resnet50(pretrained=True)
elif args.backbone_coef == 50:
backbone = resnet50(pretrained=True)
elif args.backbone_coef == 101:
backbone = resnet101(pretrained=True)
else:
raise NotImplementedError(f'Not supported backbone name :{args.backbone_name}')
model = ATSS(args, backbone)
elif args.backbone_type == 'VovNet':
if args.backbone_coef == 39:
backbone = vovnet39(pretrained=True)
elif args.backbone_coef == 57:
backbone = vovnet57(pretrained=True)
else:
raise NotImplementedError(f'Not supported backbone name :{args.backbone_name}')
model = ATSS(args, backbone)
else:
raise NotImplementedError(f'Not supported backbone name :{args.backbone_name}')
model = model.to(device)
if args.load_checkpoint:
model.load_state_dict(torch.load(args.weight_path,map_location='cpu')['model'])
print(f'[INFO] load checkpoint weight successfully!')
# freeze backbone and FPN if train head_only
if args.head_only:
def freeze_backbone(m):
classname = m.__class__.__name__
for ntl in ['EfficientNet', 'BiFPN','FPN','FPNTopP6P7','ResNet']:
if ntl == classname:
for param in m.parameters():
param.requires_grad = False
model.apply(freeze_backbone)
print('[Info] freezed backbone')
if not args.head_only and args.finetune:
# if not freeze the backbone, then finetune the backbone,
optimizer = optim.SGD(
model.backbone.backbone_net.parameters(),
lr = 0,
momentum = 0.9,
weight_decay = 0.0001,
nesterov = True,
)
optimizer.add_param_group({'params':list(model.backbone.bifpn.parameters()),'lr':0,
'momentum': 0.9, 'weight_decay': 0.0001, 'nesterov': True})
optimizer.add_param_group({'params':list(model.head.parameters()),'lr':0,'momentum':0.9,'weight_decay':0.0001,
'nesterov':True})
print(f'[INFO] efficientnet use the lr :{args.lr*args.lr_gamma_Efficientnet} to finetune,'
f' bifpn use the lr:{args.lr*args.lr_gamma_BiFPN} to finetune')
else:
optimizer = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=0.9,
weight_decay=0.0001,
nesterov=True,
)
if args.load_checkpoint:
optimizer.load_state_dict(torch.load(args.weight_path)['optim'])
last_epoch = int(os.path.basename(args.weight_path).split('.')[0][6:])
print(f'[INFO] load optimizer state:{last_epoch}')
last_epoch = last_epoch - 1
else:
last_epoch = -1
# scheduler = optim.lr_scheduler.MultiStepLR(
# optimizer, milestones=args.lr_steps, gamma=args.lr_gamma,last_epoch=last_epoch
# )
#scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,factor=args.lr_gamma, patience=3,verbose=True)
iter_per_epoch = iter_per_epoch_cal(args, train_set)
scheduler = GluonLRScheduler(optimizer,mode='step',nepochs=(args.epoch-args.warmup_epoch),
iters_per_epoch=iter_per_epoch,step_epoch=[9,11])
warmup_scheduler, schdeduler = set_schduler_with_wormup(args,iter_per_epoch,optimizer,scheduler)
ema = EMA(model,decay=0.999,enable=args.EMA)
if args.distributed:
# if args.batch <= 4:
# #if the batchsize for a single GPU <= 4, then use the sync_batchnorm
# world_size = get_world_size()
# rank = get_rank()
# sync_groups = world_size // args.n_gpu
# process_group = simple_group_split(world_size, rank, sync_groups)
# convert_sync_bn(model, process_group)
model = nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
broadcast_buffers=False,
)
train_loader = DataLoader(
train_set,
batch_size=args.batch,
sampler=data_sampler(train_set, shuffle=True, distributed=args.distributed),
num_workers=args.num_workers,
collate_fn=collate_fn(args),
)
valid_loader = DataLoader(
valid_set,
batch_size=args.batch,
sampler=data_sampler(valid_set, shuffle=False, distributed=args.distributed),
num_workers=args.num_workers,
collate_fn=collate_fn(args),
)
print(f'[INFO] Start training: learning rate:{args.lr}, total batchsize:{args.batch*get_world_size()}, '
f'working dir:{args.working_dir}')
logger.add_text('exp_info',f'learning_rate:{args.lr},total_batchsize:{args.batch*get_world_size()},'
f'backbone_name:{args.backbone_name},freeze_backbone:{args.head_only},'
f'finetune_backbone:{args.finetune}')
if args.finetune:
logger.add_text('exp_info',f'efficientnet lr gamma:{args.lr_gamma_Efficientnet},'
f'BiFPN lr gamma:{args.lr_gamma_BiFPN}')
val_best_loss = 1e5
val_best_epoch = 0
for epoch in range(args.epoch-(last_epoch+1)):
epoch += (last_epoch + 1)
epoch_loss = train(args, epoch, train_loader, model, optimizer, device, [scheduler,warmup_scheduler],
logger=logger,ema=ema)
save_checkpoint(model,args,optimizer,epoch)
valid(args, epoch, valid_loader, valid_set, model, device, logger=logger,ema=None)
if args.val_with_loss and epoch > 1 and epoch % 2 ==0:
val_epoch_loss = valid_loss(args,epoch,val_loss_loader,valid_loss_set,model,device,logger=logger)
if args.early_stopping :
if val_epoch_loss < val_best_loss:
val_best_loss = val_epoch_loss
val_best_epoch = epoch
if epoch - val_best_epoch > args.es_patience:
print(f'[INFO]Stop training at epoch {epoch}. The lowest validation loss achieved is {val_best_loss}')
save_checkpoint(model,args,optimizer,epoch)
# scheduler.step(np.mean(epoch_loss))
|
[
"evaluate.evaluate"
] |
[((1151, 1166), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1164, 1166), False, 'import torch\n'), ((2741, 2756), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2754, 2756), False, 'import torch\n'), ((797, 820), 'distributed.all_gather', 'all_gather', (['predictions'], {}), '(predictions)\n', (807, 820), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather, get_world_size, convert_sync_bn, simple_group_split\n'), ((1381, 1405), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1403, 1405), False, 'import torch\n'), ((2895, 2919), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (2917, 2919), False, 'import torch\n'), ((3508, 3532), 'evaluate.evaluate', 'evaluate', (['dataset', 'preds'], {}), '(dataset, preds)\n', (3516, 3532), False, 'from evaluate import evaluate\n'), ((7060, 7080), 'cv2.setNumThreads', 'cv2.setNumThreads', (['(0)'], {}), '(0)\n', (7077, 7080), False, 'import os, cv2\n'), ((7085, 7112), 'cv2.ocl.setUseOpenCL', 'cv2.ocl.setUseOpenCL', (['(False)'], {}), '(False)\n', (7105, 7112), False, 'import os, cv2\n'), ((7125, 7135), 'argument.get_args', 'get_args', ([], {}), '()\n', (7133, 7135), False, 'from argument import get_args\n'), ((7321, 7347), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['working_dir'], {}), '(working_dir)\n', (7334, 7347), False, 'from tensorboardX import SummaryWriter\n'), ((8674, 8718), 'utils.dataset.COCODataset', 'COCODataset', (['args.path', '"""train"""', 'train_trans'], {}), "(args.path, 'train', train_trans)\n", (8685, 8718), False, 'from utils.dataset import COCODataset, collate_fn\n'), ((8983, 9025), 'utils.dataset.COCODataset', 'COCODataset', (['args.path', '"""val"""', 'valid_trans'], {}), "(args.path, 'val', valid_trans)\n", (8994, 9025), False, 'from utils.dataset import COCODataset, collate_fn\n'), ((13469, 13504), 'utils.lrscheduler.iter_per_epoch_cal', 'iter_per_epoch_cal', (['args', 'train_set'], {}), '(args, train_set)\n', (13487, 13504), False, 'from utils.lrscheduler import GluonLRScheduler, iter_per_epoch_cal, set_schduler_with_wormup\n'), ((13521, 13658), 'utils.lrscheduler.GluonLRScheduler', 'GluonLRScheduler', (['optimizer'], {'mode': '"""step"""', 'nepochs': '(args.epoch - args.warmup_epoch)', 'iters_per_epoch': 'iter_per_epoch', 'step_epoch': '[9, 11]'}), "(optimizer, mode='step', nepochs=args.epoch - args.\n warmup_epoch, iters_per_epoch=iter_per_epoch, step_epoch=[9, 11])\n", (13537, 13658), False, 'from utils.lrscheduler import GluonLRScheduler, iter_per_epoch_cal, set_schduler_with_wormup\n'), ((13718, 13786), 'utils.lrscheduler.set_schduler_with_wormup', 'set_schduler_with_wormup', (['args', 'iter_per_epoch', 'optimizer', 'scheduler'], {}), '(args, iter_per_epoch, optimizer, scheduler)\n', (13742, 13786), False, 'from utils.lrscheduler import GluonLRScheduler, iter_per_epoch_cal, set_schduler_with_wormup\n'), ((13796, 13836), 'utils.ema.EMA', 'EMA', (['model'], {'decay': '(0.999)', 'enable': 'args.EMA'}), '(model, decay=0.999, enable=args.EMA)\n', (13799, 13836), False, 'from utils.ema import EMA\n'), ((829, 839), 'distributed.get_rank', 'get_rank', ([], {}), '()\n', (837, 839), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather, get_world_size, convert_sync_bn, simple_group_split\n'), ((1432, 1442), 'distributed.get_rank', 'get_rank', ([], {}), '()\n', (1440, 1442), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather, get_world_size, convert_sync_bn, simple_group_split\n'), ((1845, 1872), 'distributed.reduce_loss_dict', 'reduce_loss_dict', (['loss_dict'], {}), '(loss_dict)\n', (1861, 1872), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather, get_world_size, convert_sync_bn, simple_group_split\n'), ((2946, 2956), 'distributed.get_rank', 'get_rank', ([], {}), '()\n', (2954, 2956), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather, get_world_size, convert_sync_bn, simple_group_split\n'), ((3461, 3471), 'distributed.get_rank', 'get_rank', ([], {}), '()\n', (3469, 3471), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather, get_world_size, convert_sync_bn, simple_group_split\n'), ((4341, 4351), 'distributed.get_rank', 'get_rank', ([], {}), '()\n', (4349, 4351), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather, get_world_size, convert_sync_bn, simple_group_split\n'), ((5256, 5283), 'distributed.reduce_loss_dict', 'reduce_loss_dict', (['loss_dict'], {}), '(loss_dict)\n', (5272, 5283), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather, get_world_size, convert_sync_bn, simple_group_split\n'), ((6611, 6655), 'distributed.DistributedSampler', 'DistributedSampler', (['dataset'], {'shuffle': 'shuffle'}), '(dataset, shuffle=shuffle)\n', (6629, 6655), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather, get_world_size, convert_sync_bn, simple_group_split\n'), ((6688, 6718), 'torch.utils.data.sampler.RandomSampler', 'sampler.RandomSampler', (['dataset'], {}), '(dataset)\n', (6709, 6718), False, 'from torch.utils.data import DataLoader, sampler\n'), ((6745, 6779), 'torch.utils.data.sampler.SequentialSampler', 'sampler.SequentialSampler', (['dataset'], {}), '(dataset)\n', (6770, 6779), False, 'from torch.utils.data import DataLoader, sampler\n'), ((6839, 6849), 'distributed.get_rank', 'get_rank', ([], {}), '()\n', (6847, 6849), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather, get_world_size, convert_sync_bn, simple_group_split\n'), ((7246, 7273), 'os.path.exists', 'os.path.exists', (['working_dir'], {}), '(working_dir)\n', (7260, 7273), False, 'import os, cv2\n'), ((7283, 7307), 'os.makedirs', 'os.makedirs', (['working_dir'], {}), '(working_dir)\n', (7294, 7307), False, 'import os, cv2\n'), ((7518, 7556), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.local_rank'], {}), '(args.local_rank)\n', (7539, 7556), False, 'import torch\n'), ((7565, 7639), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""gloo"""', 'init_method': '"""env://"""'}), "(backend='gloo', init_method='env://')\n", (7601, 7639), False, 'import torch\n'), ((7732, 7745), 'distributed.synchronize', 'synchronize', ([], {}), '()\n', (7743, 7745), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather, get_world_size, convert_sync_bn, simple_group_split\n'), ((9328, 9375), 'utils.dataset.COCODataset', 'COCODataset', (['args.path', '"""val_loss"""', 'valid_trans'], {}), "(args.path, 'val_loss', valid_trans)\n", (9339, 9375), False, 'from utils.dataset import COCODataset, collate_fn\n'), ((14251, 14383), 'torch.nn.parallel.DistributedDataParallel', 'nn.parallel.DistributedDataParallel', (['model'], {'device_ids': '[args.local_rank]', 'output_device': 'args.local_rank', 'broadcast_buffers': '(False)'}), '(model, device_ids=[args.local_rank],\n output_device=args.local_rank, broadcast_buffers=False)\n', (14286, 14383), False, 'from torch import nn, optim\n'), ((2319, 2341), 'numpy.mean', 'np.mean', (['loss_cls_list'], {}), '(loss_cls_list)\n', (2326, 2341), True, 'import numpy as np\n'), ((2409, 2435), 'numpy.mean', 'np.mean', (['loss_regress_list'], {}), '(loss_regress_list)\n', (2416, 2435), True, 'import numpy as np\n'), ((2503, 2532), 'numpy.mean', 'np.mean', (['loss_centerness_list'], {}), '(loss_centerness_list)\n', (2510, 2532), True, 'import numpy as np\n'), ((2613, 2642), 'numpy.mean', 'np.mean', (['loss_centerness_list'], {}), '(loss_centerness_list)\n', (2620, 2642), True, 'import numpy as np\n'), ((5480, 5490), 'distributed.get_rank', 'get_rank', ([], {}), '()\n', (5488, 5490), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather, get_world_size, convert_sync_bn, simple_group_split\n'), ((6477, 6496), 'numpy.mean', 'np.mean', (['epoch_loss'], {}), '(epoch_loss)\n', (6484, 6496), True, 'import numpy as np\n'), ((8180, 8215), 'utils.transform.RandomHorizontalFlip', 'transform.RandomHorizontalFlip', (['(0.5)'], {}), '(0.5)\n', (8210, 8215), False, 'from utils import transform\n'), ((8229, 8295), 'utils.transform.Resize_For_Efficientnet', 'transform.Resize_For_Efficientnet', ([], {'compund_coef': 'args.backbone_coef'}), '(compund_coef=args.backbone_coef)\n', (8262, 8295), False, 'from utils import transform\n'), ((8309, 8329), 'utils.transform.ToTensor', 'transform.ToTensor', ([], {}), '()\n', (8327, 8329), False, 'from utils import transform\n'), ((8343, 8395), 'utils.transform.Normalize', 'transform.Normalize', (['args.pixel_mean', 'args.pixel_std'], {}), '(args.pixel_mean, args.pixel_std)\n', (8362, 8395), False, 'from utils import transform\n'), ((8473, 8539), 'utils.transform.Resize_For_Efficientnet', 'transform.Resize_For_Efficientnet', ([], {'compund_coef': 'args.backbone_coef'}), '(compund_coef=args.backbone_coef)\n', (8506, 8539), False, 'from utils import transform\n'), ((8553, 8573), 'utils.transform.ToTensor', 'transform.ToTensor', ([], {}), '()\n', (8571, 8573), False, 'from utils import transform\n'), ((8588, 8640), 'utils.transform.Normalize', 'transform.Normalize', (['args.pixel_mean', 'args.pixel_std'], {}), '(args.pixel_mean, args.pixel_std)\n', (8607, 8640), False, 'from utils import transform\n'), ((8942, 8958), 'utils.dataset.collate_fn', 'collate_fn', (['args'], {}), '(args)\n', (8952, 8958), False, 'from utils.dataset import COCODataset, collate_fn\n'), ((9250, 9266), 'utils.dataset.collate_fn', 'collate_fn', (['args'], {}), '(args)\n', (9260, 9266), False, 'from utils.dataset import COCODataset, collate_fn\n'), ((10026, 10149), 'model.Efficientnet_Bifpn_ATSS', 'Efficientnet_Bifpn_ATSS', (['args'], {'compound_coef': 'args.backbone_coef', 'load_backboe_weight': '(True)', 'weight_path': 'args.weight_path'}), '(args, compound_coef=args.backbone_coef,\n load_backboe_weight=True, weight_path=args.weight_path)\n', (10049, 10149), False, 'from model import ATSS, Efficientnet_Bifpn_ATSS\n'), ((10177, 10271), 'model.Efficientnet_Bifpn_ATSS', 'Efficientnet_Bifpn_ATSS', (['args'], {'compound_coef': 'args.backbone_coef', 'load_backboe_weight': '(False)'}), '(args, compound_coef=args.backbone_coef,\n load_backboe_weight=False)\n', (10200, 10271), False, 'from model import ATSS, Efficientnet_Bifpn_ATSS\n'), ((10693, 10713), 'model.ATSS', 'ATSS', (['args', 'backbone'], {}), '(args, backbone)\n', (10697, 10713), False, 'from model import ATSS, Efficientnet_Bifpn_ATSS\n'), ((14665, 14681), 'utils.dataset.collate_fn', 'collate_fn', (['args'], {}), '(args)\n', (14675, 14681), False, 'from utils.dataset import COCODataset, collate_fn\n'), ((14913, 14929), 'utils.dataset.collate_fn', 'collate_fn', (['args'], {}), '(args)\n', (14923, 14929), False, 'from utils.dataset import COCODataset, collate_fn\n'), ((2559, 2581), 'numpy.mean', 'np.mean', (['loss_cls_list'], {}), '(loss_cls_list)\n', (2566, 2581), True, 'import numpy as np\n'), ((2584, 2610), 'numpy.mean', 'np.mean', (['loss_regress_list'], {}), '(loss_regress_list)\n', (2591, 2610), True, 'import numpy as np\n'), ((9637, 9653), 'utils.dataset.collate_fn', 'collate_fn', (['args'], {}), '(args)\n', (9647, 9653), False, 'from utils.dataset import COCODataset, collate_fn\n'), ((10367, 10392), 'backbone.resnet50', 'resnet50', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (10375, 10392), False, 'from backbone import vovnet39, vovnet57, resnet50, resnet101\n'), ((11051, 11071), 'model.ATSS', 'ATSS', (['args', 'backbone'], {}), '(args, backbone)\n', (11055, 11071), False, 'from model import ATSS, Efficientnet_Bifpn_ATSS\n'), ((11260, 11308), 'torch.load', 'torch.load', (['args.weight_path'], {'map_location': '"""cpu"""'}), "(args.weight_path, map_location='cpu')\n", (11270, 11308), False, 'import torch\n'), ((12941, 12969), 'torch.load', 'torch.load', (['args.weight_path'], {}), '(args.weight_path)\n', (12951, 12969), False, 'import torch\n'), ((10455, 10480), 'backbone.resnet50', 'resnet50', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (10463, 10480), False, 'from backbone import vovnet39, vovnet57, resnet50, resnet101\n'), ((10815, 10840), 'backbone.vovnet39', 'vovnet39', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (10823, 10840), False, 'from backbone import vovnet39, vovnet57, resnet50, resnet101\n'), ((15025, 15041), 'distributed.get_world_size', 'get_world_size', ([], {}), '()\n', (15039, 15041), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather, get_world_size, convert_sync_bn, simple_group_split\n'), ((15177, 15193), 'distributed.get_world_size', 'get_world_size', ([], {}), '()\n', (15191, 15193), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather, get_world_size, convert_sync_bn, simple_group_split\n'), ((10544, 10570), 'backbone.resnet101', 'resnet101', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (10553, 10570), False, 'from backbone import vovnet39, vovnet57, resnet50, resnet101\n'), ((10903, 10928), 'backbone.vovnet57', 'vovnet57', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (10911, 10928), False, 'from backbone import vovnet39, vovnet57, resnet50, resnet101\n'), ((13005, 13039), 'os.path.basename', 'os.path.basename', (['args.weight_path'], {}), '(args.weight_path)\n', (13021, 13039), False, 'import os, cv2\n')]
|
import sys
import os
import numpy as np
from shutil import copyfile
from stat import S_IREAD, S_IRGRP, S_IROTH
from network import TasnetWithDprnn
from train import train_network
from separate import Separator
from evaluate import Evaluator
FILE_LIST_DIR = '/data1/ditter/speechSeparation/preprocessedData/create-speaker-mixtures-mod/'
FILE_LIST_PATH_TRAIN = os.path.join(FILE_LIST_DIR, 'mix_2_spk_min_' + 'tr' + '_mix')
FILE_LIST_PATH_VALID = os.path.join(FILE_LIST_DIR, 'mix_2_spk_min_' + 'cv' + '_mix')
FILE_LIST_PATH_TEST = os.path.join(FILE_LIST_DIR, 'mix_2_spk_min_' + 'tt' + '_mix')
WAV_ROOT_DIR = '/data1/ditter/speechSeparation/preprocessedData/wham/wav8k/min/'
WAV_DIR_TRAIN = os.path.join(WAV_ROOT_DIR, 'tr') # must contain subfolders 's1' and 's2'
WAV_DIR_VALID = os.path.join(WAV_ROOT_DIR, 'cv') # must contain subfolders 's1' and 's2'
WAV_DIR_TEST = os.path.join(WAV_ROOT_DIR, 'tt') # must contain subfolders 's1' and 's2'
EXPERIMENT_ROOT_DIR = '../exp/'
EXPERIMENT_TAG = 'example'
RESUME_TRAINING = False
RESUME_FROM_EPOCH = 2
RESUME_FROM_MODEL_DIR = 'tag_test__time_2019_11_04__10_37_08'
NUM_SPEAKERS = 2 # Cannot be changed
SAMPLERATE_HZ = 8000
# TRAINING PARAMETERS
BATCH_SIZE = 2
NUM_BATCHES_TRAIN = 10000
NUM_BATCHES_VALID = 200
NUM_EPOCHS = 200
NUM_EPOCHS_FOR_EARLY_STOPPING = 10
OPTIMIZER_CLIP_L2_NORM_VALUE = 5
TRAIN_UTTERANCE_LENGTH_IN_SECONDS = 4
SEPARATE_MAX_UTTERANCE_LENGTH_IN_SECONDS = 14
# NETWORK PARAMETERS
NETWORK_IS_CAUSAL = False
NETWORK_NUM_FILTERS_IN_ENCODER = 64
NETWORK_ENCODER_FILTER_LENGTH = 2
NETWORK_NUM_UNITS_PER_LSTM = 200
NETWORK_NUM_DPRNN_BLOCKS = 3 # TODO 6 according to paper
NETWORK_CHUNK_SIZE = 256
def run_experiment(stage=0):
experiment_dir = os.path.join(EXPERIMENT_ROOT_DIR, EXPERIMENT_TAG)
wav_output_dir = os.path.join(experiment_dir, 'separate')
validation_loss_file = os.path.join(experiment_dir, 'validation_loss.npy.txt')
train_num_full_chunks = SAMPLERATE_HZ*TRAIN_UTTERANCE_LENGTH_IN_SECONDS//NETWORK_CHUNK_SIZE
separate_max_num_full_chunks = SAMPLERATE_HZ*SEPARATE_MAX_UTTERANCE_LENGTH_IN_SECONDS//NETWORK_CHUNK_SIZE
if stage <= 0: # Start with training
if os.path.exists(experiment_dir):
sys.exit('Experiment tag already in use. Change tag and run again')
os.mkdir(experiment_dir)
config_backup_file = os.path.join(experiment_dir, 'config.py')
copyfile(os.path.realpath(__file__), config_backup_file)
os.chmod(config_backup_file, S_IREAD|S_IRGRP|S_IROTH)
if RESUME_TRAINING:
model_weights_file = os.path.join(RESUME_FROM_MODEL_DIR, \
'state_epoch_' + str(RESUME_FROM_EPOCH) + '.h5')
else:
model_weights_file = None
# Generate network
tasnet = TasnetWithDprnn(batch_size=BATCH_SIZE,
is_causal=NETWORK_IS_CAUSAL,
model_weights_file=model_weights_file,
num_filters_in_encoder=NETWORK_NUM_FILTERS_IN_ENCODER,
encoder_filter_length=NETWORK_ENCODER_FILTER_LENGTH,
chunk_size=NETWORK_CHUNK_SIZE,
num_full_chunks=train_num_full_chunks,
units_per_lstm=NETWORK_NUM_UNITS_PER_LSTM,
num_dprnn_blocks=NETWORK_NUM_DPRNN_BLOCKS,
samplerate_hz=SAMPLERATE_HZ)
# Train network
tensorboard_dir = os.path.join(experiment_dir, 'tensorboard_logs')
print('Run follwing command to run Tensorboard: \n', 'tensorboard --bind_all --logdir ' + tensorboard_dir)
validation_loss = train_network(experiment_dir=experiment_dir,
tensorboard_dir=tensorboard_dir,
batch_size=BATCH_SIZE,
num_batches_train=NUM_BATCHES_TRAIN,
num_batches_valid=NUM_BATCHES_VALID,
num_epochs=NUM_EPOCHS,
num_epochs_for_early_stopping=NUM_EPOCHS_FOR_EARLY_STOPPING,
optimizer_clip_l2_norm_value=OPTIMIZER_CLIP_L2_NORM_VALUE,
samplerate_hz=SAMPLERATE_HZ,
utterance_length_in_seconds=TRAIN_UTTERANCE_LENGTH_IN_SECONDS,
wav_data_dir_train=WAV_DIR_TRAIN,
wav_data_dir_valid=WAV_DIR_VALID,
file_list_path_train=FILE_LIST_PATH_TRAIN,
file_list_path_valid=FILE_LIST_PATH_VALID,
tasnet=tasnet)
np.savetxt(validation_loss_file, validation_loss, fmt="%.2f")
if stage <= 1: # Start with separation
if os.path.exists(wav_output_dir):
sys.exit('Separation folder already exists')
os.mkdir(wav_output_dir)
validation_loss_per_epoch = np.loadtxt(validation_loss_file)
epoch_with_best_validation_result = np.argmin(validation_loss_per_epoch) + 1
model_weights_file = os.path.join(experiment_dir, 'state_epoch_' \
+ str(epoch_with_best_validation_result) + '.h5')
# Generate trained network
tasnet = TasnetWithDprnn(batch_size=1,
is_causal=NETWORK_IS_CAUSAL,
model_weights_file=model_weights_file,
num_filters_in_encoder=NETWORK_NUM_FILTERS_IN_ENCODER,
encoder_filter_length=NETWORK_ENCODER_FILTER_LENGTH,
chunk_size=NETWORK_CHUNK_SIZE,
num_full_chunks=separate_max_num_full_chunks,
units_per_lstm=NETWORK_NUM_UNITS_PER_LSTM,
num_dprnn_blocks=NETWORK_NUM_DPRNN_BLOCKS,
samplerate_hz=SAMPLERATE_HZ)
# Use network to separate list of wav files
separator = Separator(tasnet=tasnet,
input_dir=os.path.join(WAV_DIR_TEST, 'mix_clean'),
output_dir=wav_output_dir,
max_num_chunks=separate_max_num_full_chunks)
separator.process_file_list(FILE_LIST_PATH_TEST)
if stage <= 2: # Start with evaluation
# Evaluate list of separated wav files
evaluator = Evaluator(estimate_wav_dir=wav_output_dir,
groundtruth_wav_dir=WAV_DIR_TEST,
sample_list_path=FILE_LIST_PATH_TEST)
print('SI-SNR Performance on Test Set:', evaluator.mean_sisnr)
np.savetxt(os.path.join(experiment_dir, 'results.npy.txt'), evaluator.results, fmt="%.2f")
np.savetxt(os.path.join(experiment_dir, 'mean_result.npy.txt'), np.array([evaluator.mean_sisnr]), fmt="%.2f")
if len(sys.argv) > 1:
STAGE = int(sys.argv[1])
else:
STAGE = 0
run_experiment(stage=STAGE)
|
[
"evaluate.Evaluator"
] |
[((361, 422), 'os.path.join', 'os.path.join', (['FILE_LIST_DIR', "('mix_2_spk_min_' + 'tr' + '_mix')"], {}), "(FILE_LIST_DIR, 'mix_2_spk_min_' + 'tr' + '_mix')\n", (373, 422), False, 'import os\n'), ((446, 507), 'os.path.join', 'os.path.join', (['FILE_LIST_DIR', "('mix_2_spk_min_' + 'cv' + '_mix')"], {}), "(FILE_LIST_DIR, 'mix_2_spk_min_' + 'cv' + '_mix')\n", (458, 507), False, 'import os\n'), ((530, 591), 'os.path.join', 'os.path.join', (['FILE_LIST_DIR', "('mix_2_spk_min_' + 'tt' + '_mix')"], {}), "(FILE_LIST_DIR, 'mix_2_spk_min_' + 'tt' + '_mix')\n", (542, 591), False, 'import os\n'), ((690, 722), 'os.path.join', 'os.path.join', (['WAV_ROOT_DIR', '"""tr"""'], {}), "(WAV_ROOT_DIR, 'tr')\n", (702, 722), False, 'import os\n'), ((779, 811), 'os.path.join', 'os.path.join', (['WAV_ROOT_DIR', '"""cv"""'], {}), "(WAV_ROOT_DIR, 'cv')\n", (791, 811), False, 'import os\n'), ((867, 899), 'os.path.join', 'os.path.join', (['WAV_ROOT_DIR', '"""tt"""'], {}), "(WAV_ROOT_DIR, 'tt')\n", (879, 899), False, 'import os\n'), ((1709, 1758), 'os.path.join', 'os.path.join', (['EXPERIMENT_ROOT_DIR', 'EXPERIMENT_TAG'], {}), '(EXPERIMENT_ROOT_DIR, EXPERIMENT_TAG)\n', (1721, 1758), False, 'import os\n'), ((1780, 1820), 'os.path.join', 'os.path.join', (['experiment_dir', '"""separate"""'], {}), "(experiment_dir, 'separate')\n", (1792, 1820), False, 'import os\n'), ((1848, 1903), 'os.path.join', 'os.path.join', (['experiment_dir', '"""validation_loss.npy.txt"""'], {}), "(experiment_dir, 'validation_loss.npy.txt')\n", (1860, 1903), False, 'import os\n'), ((2163, 2193), 'os.path.exists', 'os.path.exists', (['experiment_dir'], {}), '(experiment_dir)\n', (2177, 2193), False, 'import os\n'), ((2283, 2307), 'os.mkdir', 'os.mkdir', (['experiment_dir'], {}), '(experiment_dir)\n', (2291, 2307), False, 'import os\n'), ((2337, 2378), 'os.path.join', 'os.path.join', (['experiment_dir', '"""config.py"""'], {}), "(experiment_dir, 'config.py')\n", (2349, 2378), False, 'import os\n'), ((2452, 2509), 'os.chmod', 'os.chmod', (['config_backup_file', '(S_IREAD | S_IRGRP | S_IROTH)'], {}), '(config_backup_file, S_IREAD | S_IRGRP | S_IROTH)\n', (2460, 2509), False, 'import os\n'), ((2798, 3224), 'network.TasnetWithDprnn', 'TasnetWithDprnn', ([], {'batch_size': 'BATCH_SIZE', 'is_causal': 'NETWORK_IS_CAUSAL', 'model_weights_file': 'model_weights_file', 'num_filters_in_encoder': 'NETWORK_NUM_FILTERS_IN_ENCODER', 'encoder_filter_length': 'NETWORK_ENCODER_FILTER_LENGTH', 'chunk_size': 'NETWORK_CHUNK_SIZE', 'num_full_chunks': 'train_num_full_chunks', 'units_per_lstm': 'NETWORK_NUM_UNITS_PER_LSTM', 'num_dprnn_blocks': 'NETWORK_NUM_DPRNN_BLOCKS', 'samplerate_hz': 'SAMPLERATE_HZ'}), '(batch_size=BATCH_SIZE, is_causal=NETWORK_IS_CAUSAL,\n model_weights_file=model_weights_file, num_filters_in_encoder=\n NETWORK_NUM_FILTERS_IN_ENCODER, encoder_filter_length=\n NETWORK_ENCODER_FILTER_LENGTH, chunk_size=NETWORK_CHUNK_SIZE,\n num_full_chunks=train_num_full_chunks, units_per_lstm=\n NETWORK_NUM_UNITS_PER_LSTM, num_dprnn_blocks=NETWORK_NUM_DPRNN_BLOCKS,\n samplerate_hz=SAMPLERATE_HZ)\n', (2813, 3224), False, 'from network import TasnetWithDprnn\n'), ((3546, 3594), 'os.path.join', 'os.path.join', (['experiment_dir', '"""tensorboard_logs"""'], {}), "(experiment_dir, 'tensorboard_logs')\n", (3558, 3594), False, 'import os\n'), ((3736, 4356), 'train.train_network', 'train_network', ([], {'experiment_dir': 'experiment_dir', 'tensorboard_dir': 'tensorboard_dir', 'batch_size': 'BATCH_SIZE', 'num_batches_train': 'NUM_BATCHES_TRAIN', 'num_batches_valid': 'NUM_BATCHES_VALID', 'num_epochs': 'NUM_EPOCHS', 'num_epochs_for_early_stopping': 'NUM_EPOCHS_FOR_EARLY_STOPPING', 'optimizer_clip_l2_norm_value': 'OPTIMIZER_CLIP_L2_NORM_VALUE', 'samplerate_hz': 'SAMPLERATE_HZ', 'utterance_length_in_seconds': 'TRAIN_UTTERANCE_LENGTH_IN_SECONDS', 'wav_data_dir_train': 'WAV_DIR_TRAIN', 'wav_data_dir_valid': 'WAV_DIR_VALID', 'file_list_path_train': 'FILE_LIST_PATH_TRAIN', 'file_list_path_valid': 'FILE_LIST_PATH_VALID', 'tasnet': 'tasnet'}), '(experiment_dir=experiment_dir, tensorboard_dir=\n tensorboard_dir, batch_size=BATCH_SIZE, num_batches_train=\n NUM_BATCHES_TRAIN, num_batches_valid=NUM_BATCHES_VALID, num_epochs=\n NUM_EPOCHS, num_epochs_for_early_stopping=NUM_EPOCHS_FOR_EARLY_STOPPING,\n optimizer_clip_l2_norm_value=OPTIMIZER_CLIP_L2_NORM_VALUE,\n samplerate_hz=SAMPLERATE_HZ, utterance_length_in_seconds=\n TRAIN_UTTERANCE_LENGTH_IN_SECONDS, wav_data_dir_train=WAV_DIR_TRAIN,\n wav_data_dir_valid=WAV_DIR_VALID, file_list_path_train=\n FILE_LIST_PATH_TRAIN, file_list_path_valid=FILE_LIST_PATH_VALID, tasnet\n =tasnet)\n', (3749, 4356), False, 'from train import train_network\n'), ((4883, 4944), 'numpy.savetxt', 'np.savetxt', (['validation_loss_file', 'validation_loss'], {'fmt': '"""%.2f"""'}), "(validation_loss_file, validation_loss, fmt='%.2f')\n", (4893, 4944), True, 'import numpy as np\n'), ((5000, 5030), 'os.path.exists', 'os.path.exists', (['wav_output_dir'], {}), '(wav_output_dir)\n', (5014, 5030), False, 'import os\n'), ((5097, 5121), 'os.mkdir', 'os.mkdir', (['wav_output_dir'], {}), '(wav_output_dir)\n', (5105, 5121), False, 'import os\n'), ((5158, 5190), 'numpy.loadtxt', 'np.loadtxt', (['validation_loss_file'], {}), '(validation_loss_file)\n', (5168, 5190), True, 'import numpy as np\n'), ((5496, 5920), 'network.TasnetWithDprnn', 'TasnetWithDprnn', ([], {'batch_size': '(1)', 'is_causal': 'NETWORK_IS_CAUSAL', 'model_weights_file': 'model_weights_file', 'num_filters_in_encoder': 'NETWORK_NUM_FILTERS_IN_ENCODER', 'encoder_filter_length': 'NETWORK_ENCODER_FILTER_LENGTH', 'chunk_size': 'NETWORK_CHUNK_SIZE', 'num_full_chunks': 'separate_max_num_full_chunks', 'units_per_lstm': 'NETWORK_NUM_UNITS_PER_LSTM', 'num_dprnn_blocks': 'NETWORK_NUM_DPRNN_BLOCKS', 'samplerate_hz': 'SAMPLERATE_HZ'}), '(batch_size=1, is_causal=NETWORK_IS_CAUSAL,\n model_weights_file=model_weights_file, num_filters_in_encoder=\n NETWORK_NUM_FILTERS_IN_ENCODER, encoder_filter_length=\n NETWORK_ENCODER_FILTER_LENGTH, chunk_size=NETWORK_CHUNK_SIZE,\n num_full_chunks=separate_max_num_full_chunks, units_per_lstm=\n NETWORK_NUM_UNITS_PER_LSTM, num_dprnn_blocks=NETWORK_NUM_DPRNN_BLOCKS,\n samplerate_hz=SAMPLERATE_HZ)\n', (5511, 5920), False, 'from network import TasnetWithDprnn\n'), ((6672, 6790), 'evaluate.Evaluator', 'Evaluator', ([], {'estimate_wav_dir': 'wav_output_dir', 'groundtruth_wav_dir': 'WAV_DIR_TEST', 'sample_list_path': 'FILE_LIST_PATH_TEST'}), '(estimate_wav_dir=wav_output_dir, groundtruth_wav_dir=WAV_DIR_TEST,\n sample_list_path=FILE_LIST_PATH_TEST)\n', (6681, 6790), False, 'from evaluate import Evaluator\n'), ((2207, 2274), 'sys.exit', 'sys.exit', (['"""Experiment tag already in use. Change tag and run again"""'], {}), "('Experiment tag already in use. Change tag and run again')\n", (2215, 2274), False, 'import sys\n'), ((2396, 2422), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2412, 2422), False, 'import os\n'), ((5044, 5088), 'sys.exit', 'sys.exit', (['"""Separation folder already exists"""'], {}), "('Separation folder already exists')\n", (5052, 5088), False, 'import sys\n'), ((5235, 5271), 'numpy.argmin', 'np.argmin', (['validation_loss_per_epoch'], {}), '(validation_loss_per_epoch)\n', (5244, 5271), True, 'import numpy as np\n'), ((6937, 6984), 'os.path.join', 'os.path.join', (['experiment_dir', '"""results.npy.txt"""'], {}), "(experiment_dir, 'results.npy.txt')\n", (6949, 6984), False, 'import os\n'), ((7036, 7087), 'os.path.join', 'os.path.join', (['experiment_dir', '"""mean_result.npy.txt"""'], {}), "(experiment_dir, 'mean_result.npy.txt')\n", (7048, 7087), False, 'import os\n'), ((7089, 7121), 'numpy.array', 'np.array', (['[evaluator.mean_sisnr]'], {}), '([evaluator.mean_sisnr])\n', (7097, 7121), True, 'import numpy as np\n'), ((6329, 6368), 'os.path.join', 'os.path.join', (['WAV_DIR_TEST', '"""mix_clean"""'], {}), "(WAV_DIR_TEST, 'mix_clean')\n", (6341, 6368), False, 'import os\n')]
|
'''
Reference implementation of Learn2Perturb.
Author: <NAME>
For more details, refer to the paper:
Learn2Perturb: an End-to-end Feature Perturbation Learning to Improve Adversarial Robustness
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>
Computer Vision and Pattern Recogniton (CVPR), 2020
'''
import argparse
import torchvision
import torchvision.transforms as transforms
import models
from models.normalizer import Normalize_layer
from train import train
from evaluate import evaluate
import os
def parse_args():
'''
parses the learn2perturb arguments
'''
parser = argparse.ArgumentParser(description="Learn2Perturb for adversarial robustness")
# dataset and model config
parser.add_argument('--dataset', type=str, choices=['cifar10', 'cifar100'])
parser.add_argument('--baseline', type=str, choices=['resnet_v1', 'resnet_v2'], default='resnet_v1')
parser.add_argument('--res_v1_depth', type=int, default=20, help='depth of the res v1')
parser.add_argument('--res_v2_num_blocks', type=int, nargs=4, default=[2,2,2,2], help='num blocks for each of the four layers of Res V2')
# training optimization parameters
parser.add_argument('--epochs', type=int, default=350, help='training epochs number')
parser.add_argument('--batch_size', type=int, default=128, help='Training batch size.')
parser.add_argument('--learning_rate', type=float, default=0.01, help='The Learning Rate.')
parser.add_argument('--lr_schedule', type=int, nargs='+', default=[150, 250], help='epochs in which lr is decreased')
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum')
parser.add_argument('--weight_decay', type=float, default=1e-4, help='Weight decay')
parser.add_argument('--model_base', type=str, required=True, help='path to where save the models')
# Learn2Perturb parameters
parser.add_argument('--noise_add_delay', type=int, default=10, help='number of epochs to delay noise injection')
parser.add_argument('--adv_train_delay', type=int, default=20, help='number of epochs to delay adversarial training')
parser.add_argument('--gamma', type=float, default=1e-4, help='parameter gamma in equation (7)')
return parser.parse_args()
def main(args):
'''
pipeline for training and evaluating robust deep convolutional models with Learn2Perturb
'''
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
transform_test = transforms.Compose([
transforms.ToTensor()
])
if args.dataset == 'cifar10':
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
else: # cifar100
trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=4)
if args.dataset == 'cifar10':
n_classes = 10
else: # cifar100
n_classes = 100
if args.baseline == 'resnet_v1':
net = models.resnet_v1.l2p_resnet_v1(depth= args.res_v1_depth, num_classes= n_classes)
else:
net = models.resnet_v2.l2p_resnet_v2(num_blocks= args.res_v2_num_blocks, num_classes= n_classes)
mean = [0.4914, 0.4822, 0.4465]
std = [0.2023, 0.1994, 0.2010]
model = torch.nn.Sequential(
Normalize_layer(mean,std),
net
)
criterion = torch.nn.CrossEntropyLoss()
if torch.cuda.is_available():
model.cuda()
criterion.cuda()
if args.baseline == 'resnet_v1':
layers = [net.stage_1, net.stage_2, net.stage_3]
else:
layers = [net.layer1, net.layer2, net.layer3, net.layer4]
model_sigma_maps = []
model_sigma_maps.append(net.cn1_sigma_map)
for layer in layers:
for block in layer:
model_sigma_maps.append(block.sigma_map)
normal_param = [
param for name, param in model.named_parameters()
if not 'sigma' in name
]
sigma_param = [
param for name, param in model.named_parameters()
if 'sigma' in name
]
optimizer1 = torch.optim.SGD(normal_param,
lr=args.learning_rate,
momentum=args.momentum, weight_decay=args.weight_decay,
nesterov=True
)
optimizer2 = torch.optim.SGD(sigma_param,
lr=args.learning_rate,
momentum=args.momentum, weight_decay=0,
nesterov=True
)
## create the folder to save models
# if not os.path.exists(args.model_base):
# os.makedirs(args.model_base)
for epoch in range(args.epochs):
print("epoch: {} / {} ...".format(epoch+1, args.epochs))
print(" Training:")
train(model, trainloader, epoch, optimizer1, optimizer2, criterion, layers, model_sigma_maps, args)
print(" Evaluation:")
evaluate(model, testloader, attack=None)
evaluate(model, testloader, attack='pgd')
evaluate(model, testloader, attack='fgsm')
# if (epoch +1) % 25 == 0:
# path = args.model_base + str(epoch + 1) + ".pt"
# torch.save(model, path)
if __name__ =='__main__':
args = parse_args()
main(args)
|
[
"evaluate.evaluate"
] |
[((609, 688), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Learn2Perturb for adversarial robustness"""'}), "(description='Learn2Perturb for adversarial robustness')\n", (632, 688), False, 'import argparse\n'), ((2691, 2792), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', train=True, download=True,\n transform=transform_train)\n", (2719, 2792), False, 'import torchvision\n'), ((2807, 2908), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_test'}), "(root='./data', train=False, download=True,\n transform=transform_test)\n", (2835, 2908), False, 'import torchvision\n'), ((2945, 3047), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', train=True, download=True,\n transform=transform_train)\n", (2974, 3047), False, 'import torchvision\n'), ((3062, 3164), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_test'}), "(root='./data', train=False, download=True,\n transform=transform_test)\n", (3091, 3164), False, 'import torchvision\n'), ((3546, 3624), 'models.resnet_v1.l2p_resnet_v1', 'models.resnet_v1.l2p_resnet_v1', ([], {'depth': 'args.res_v1_depth', 'num_classes': 'n_classes'}), '(depth=args.res_v1_depth, num_classes=n_classes)\n', (3576, 3624), False, 'import models\n'), ((3651, 3743), 'models.resnet_v2.l2p_resnet_v2', 'models.resnet_v2.l2p_resnet_v2', ([], {'num_blocks': 'args.res_v2_num_blocks', 'num_classes': 'n_classes'}), '(num_blocks=args.res_v2_num_blocks,\n num_classes=n_classes)\n', (3681, 3743), False, 'import models\n'), ((3856, 3882), 'models.normalizer.Normalize_layer', 'Normalize_layer', (['mean', 'std'], {}), '(mean, std)\n', (3871, 3882), False, 'from models.normalizer import Normalize_layer\n'), ((5211, 5314), 'train.train', 'train', (['model', 'trainloader', 'epoch', 'optimizer1', 'optimizer2', 'criterion', 'layers', 'model_sigma_maps', 'args'], {}), '(model, trainloader, epoch, optimizer1, optimizer2, criterion, layers,\n model_sigma_maps, args)\n', (5216, 5314), False, 'from train import train\n'), ((5352, 5392), 'evaluate.evaluate', 'evaluate', (['model', 'testloader'], {'attack': 'None'}), '(model, testloader, attack=None)\n', (5360, 5392), False, 'from evaluate import evaluate\n'), ((5401, 5442), 'evaluate.evaluate', 'evaluate', (['model', 'testloader'], {'attack': '"""pgd"""'}), "(model, testloader, attack='pgd')\n", (5409, 5442), False, 'from evaluate import evaluate\n'), ((5451, 5493), 'evaluate.evaluate', 'evaluate', (['model', 'testloader'], {'attack': '"""fgsm"""'}), "(model, testloader, attack='fgsm')\n", (5459, 5493), False, 'from evaluate import evaluate\n'), ((2439, 2475), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (2460, 2475), True, 'import torchvision.transforms as transforms\n'), ((2485, 2518), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (2516, 2518), True, 'import torchvision.transforms as transforms\n'), ((2528, 2549), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2547, 2549), True, 'import torchvision.transforms as transforms\n'), ((2608, 2629), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2627, 2629), True, 'import torchvision.transforms as transforms\n')]
|
import os
import sys
import math
import argparse
import numpy as np
from tqdm import tqdm
import torch
from torch.multiprocessing import Queue, Process
sys.path.insert(0, '../lib')
sys.path.insert(0, '../model')
# from data.CrowdHuman import CrowdHuman
from data.CrowdHuman_json import CrowdHuman
from utils import misc_utils, nms_utils
from evaluate import compute_JI, compute_APMR
from evaluate import compute_MMR
from det_oprs.bbox_opr import Pointlist_dis, matcher
from scipy.optimize import linear_sum_assignment
MAX_VAL = 8e6
def eval_all(args, config, network):
# model_path
saveDir = os.path.join('../model', args.model_dir, config.model_dir)
evalDir = os.path.join('../model', args.model_dir, config.eval_dir)
misc_utils.ensure_dir(evalDir)
if 'pth' not in args.resume_weights:
model_file = os.path.join(saveDir,
'dump-{}.pth'.format(args.resume_weights))
else:
model_file = args.resume_weights
assert os.path.exists(model_file)
# get devices
str_devices = args.devices
devices = misc_utils.device_parser(str_devices)
# load data
crowdhuman = CrowdHuman(config, if_train=False)
#crowdhuman.records = crowdhuman.records[:10]
# multiprocessing
num_devs = len(devices)
len_dataset = len(crowdhuman)
num_image = math.ceil(len_dataset / num_devs)
result_queue = Queue(500)
result_queue_match = Queue(500)
procs = []
all_results = []
all_results_match = []
for i in range(num_devs):
start = i * num_image
end = min(start + num_image, len_dataset)
if config.network == 'pos':
proc = Process(target=inference_pos, args=(
config, network, model_file, devices[i], crowdhuman, start, end, result_queue, result_queue_match))
else:
proc = Process(target=inference_bfj, args=(
config, network, model_file, devices[i], crowdhuman, start, end, result_queue, result_queue_match))
proc.start()
procs.append(proc)
pbar = tqdm(total=len_dataset, ncols=50)
for i in range(len_dataset):
t = result_queue.get()
all_results.append(t)
t_match = result_queue_match.get()
all_results_match.extend(t_match)
pbar.update(1)
pbar.close()
for p in procs:
p.join()
# fpath = os.path.join(evalDir, 'dump-{}.json'.format(args.resume_weights))
fpath = os.path.join(evalDir, 'dump-{}.json'.format(30))
misc_utils.save_json_lines(all_results, fpath)
fpath_match = os.path.join(evalDir, 'bf_match_bbox.json')
misc_utils.save_json(all_results_match, fpath_match)
# evaluation
# res_line, JI = compute_JI.evaluation_all(fpath, 'box')
print('processing body...')
AP, MR = compute_APMR.compute_APMR(fpath, config.eval_source, 'box')
line = 'BODY-->AP:{:.4f}, MR:{:.4f}.'.format(AP, MR)
print(line)
print('processing face...')
AP, MR = compute_APMR.compute_APMR(fpath, config.eval_source, 'box', if_face=True)
line = 'FACE-->AP:{:.4f}, MR:{:.4f}.'.format(AP, MR)
print(line)
MMR = compute_MMR.compute_MMR(fpath_match, config.eval_source)
def inference_pos(config, network, model_file, device, dataset, start, end, result_queue, result_queue_match):
torch.set_default_tensor_type('torch.FloatTensor')
torch.multiprocessing.set_sharing_strategy('file_system')
# init model
net = network()
net.cuda(device)
net = net.eval()
check_point = torch.load(model_file)
net.load_state_dict(check_point['state_dict'])
# init data
dataset.records = dataset.records[start:end];
data_iter = torch.utils.data.DataLoader(dataset=dataset, shuffle=False)
# inference
for (image, gt_boxes, im_info, ID, image_id) in data_iter:
pred_boxes, class_num = net(image.cuda(device), im_info.cuda(device))
scale = im_info[0, 2]
if config.test_nms_method == 'set_nms':
assert pred_boxes.shape[-1] > 6, "Not EMD Network! Using normal_nms instead."
assert pred_boxes.shape[-1] % 6 == 0, "Prediction dim Error!"
top_k = pred_boxes.shape[-1] // 6
n = pred_boxes.shape[0]
pred_boxes = pred_boxes.reshape(-1, 6)
idents = np.tile(np.arange(n)[:,None], (1, top_k)).reshape(-1, 1)
pred_boxes = np.hstack((pred_boxes, idents))
keep = pred_boxes[:, 4] > config.pred_cls_threshold
pred_boxes = pred_boxes[keep]
result = []
for classid in range(class_num):
keep = pred_boxes[:, 5] == (classid + 1)
class_boxes = pred_boxes[keep]
keep = nms_utils.set_cpu_nms(class_boxes, 0.5)
class_boxes = class_boxes[keep]
result.append(class_boxes)
pred_boxes = np.vstack(result)
elif config.test_nms_method == 'normal_nms':
assert pred_boxes.shape[-1] % 6 == 0, "Prediction dim Error!"
pred_boxes = pred_boxes.reshape(-1, 6)
keep = pred_boxes[:, 4] > config.pred_cls_threshold
pred_boxes = pred_boxes[keep]
result = []
for classid in range(class_num):
keep = pred_boxes[:, 5] == (classid + 1)
class_boxes = pred_boxes[keep]
keep = nms_utils.cpu_nms(class_boxes, config.test_nms)
class_boxes = class_boxes[keep]
result.append(class_boxes)
pred_boxes = np.vstack(result)
elif config.test_nms_method == 'none':
assert pred_boxes.shape[-1] % 6 == 0, "Prediction dim Error!"
pred_boxes = pred_boxes.reshape(-1, 6)
keep = pred_boxes[:, 4] > config.pred_cls_threshold
pred_boxes = pred_boxes[keep]
else:
raise ValueError('Unknown NMS method.')
#if pred_boxes.shape[0] > config.detection_per_image and \
# config.test_nms_method != 'none':
# order = np.argsort(-pred_boxes[:, 4])
# order = order[:config.detection_per_image]
# pred_boxes = pred_boxes[order]
# recovery the scale
pred_boxes[:, :4] /= scale
pred_boxes[:, 2:4] -= pred_boxes[:, :2]
gt_boxes = gt_boxes[0].numpy()
gt_boxes[:, 2:4] -= gt_boxes[:, :2]
match_result = match_body_face_pos(pred_boxes, image_id)
result_dict = dict(ID=ID[0], height=int(im_info[0, -3]), width=int(im_info[0, -2]),
dtboxes=boxes_dump(pred_boxes), gtboxes=boxes_dump(gt_boxes))
result_queue.put_nowait(result_dict)
result_queue_match.put_nowait(match_result)
def inference_bfj(config, network, model_file, device, dataset, start, end, result_queue, result_queue_match):
torch.set_default_tensor_type('torch.FloatTensor')
torch.multiprocessing.set_sharing_strategy('file_system')
# init model
net = network()
net.cuda(device)
net = net.eval()
check_point = torch.load(model_file)
net.load_state_dict(check_point['state_dict'])
# init data
dataset.records = dataset.records[start:end];
data_iter = torch.utils.data.DataLoader(dataset=dataset, shuffle=False)
# inference
for (image, gt_boxes, im_info, ID, image_id) in data_iter:
pred_boxes, pred_emb, class_num = net(image.cuda(device), im_info.cuda(device))
scale = im_info[0, 2]
if config.test_nms_method == 'set_nms':
assert pred_boxes.shape[-1] > 6, "Not EMD Network! Using normal_nms instead."
assert pred_boxes.shape[-1] % 6 == 0, "Prediction dim Error!"
top_k = pred_boxes.shape[-1] // 6
n = pred_boxes.shape[0]
pred_boxes = pred_boxes.reshape(-1, 6)
idents = np.tile(np.arange(n)[:,None], (1, top_k)).reshape(-1, 1)
pred_boxes = np.hstack((pred_boxes, idents))
keep = pred_boxes[:, 4] > config.pred_cls_threshold
pred_boxes = pred_boxes[keep]
keep = nms_utils.set_cpu_nms(pred_boxes, 0.5)
pred_boxes = pred_boxes[keep]
elif config.test_nms_method == 'normal_nms':
assert pred_boxes.shape[-1] % 8 == 0, "Prediction dim Error!"
pred_boxes = pred_boxes.reshape(-1, 8)
pred_emb = pred_emb.reshape(-1, 32)
keep = pred_boxes[:, 6] > config.pred_cls_threshold
pred_boxes = pred_boxes[keep]
pred_emb = pred_emb[keep]
result = []
result_emb = []
for classid in range(class_num):
keep = pred_boxes[:, 7] == (classid + 1)
class_boxes = pred_boxes[keep]
class_emb = pred_emb[keep]
keep = nms_utils.cpu_nms(class_boxes, config.test_nms)
class_boxes = class_boxes[keep]
class_emb = class_emb[keep]
result.append(class_boxes)
result_emb.append(class_emb)
pred_boxes = np.vstack(result)
pred_emb = np.vstack(result_emb)
elif config.test_nms_method == 'none':
assert pred_boxes.shape[-1] % 6 == 0, "Prediction dim Error!"
pred_boxes = pred_boxes.reshape(-1, 6)
keep = pred_boxes[:, 4] > config.pred_cls_threshold
pred_boxes = pred_boxes[keep]
else:
raise ValueError('Unknown NMS method.')
#if pred_boxes.shape[0] > config.detection_per_image and \
# config.test_nms_method != 'none':
# order = np.argsort(-pred_boxes[:, 4])
# order = order[:config.detection_per_image]
# pred_boxes = pred_boxes[order]
# recovery the scale
pred_boxes[:, :6] /= scale
pred_boxes[:, 2:4] -= pred_boxes[:, :2]
gt_boxes = gt_boxes[0].numpy()
gt_boxes[:, 2:4] -= gt_boxes[:, :2]
match_result = match_body_face_bfj(pred_boxes, pred_emb, image_id)
# match_result = match_body_face_bfj(pred_boxes, image_id)
result_dict = dict(ID=ID[0], height=int(im_info[0, -3]), width=int(im_info[0, -2]),
dtboxes=boxes_dump(pred_boxes, pred_emb), gtboxes=boxes_dump(gt_boxes))
result_queue.put_nowait(result_dict)
result_queue_match.put_nowait(match_result)
def match_body_face_bfj(pred_boxes, pred_emb, image_id):
keep_body = pred_boxes[:, 7] == 1
keep_face = pred_boxes[:, 7] == 2
body_boxes = pred_boxes[keep_body]
body_embs = pred_emb[keep_body]
face_boxes = pred_boxes[keep_face]
face_embs = pred_emb[keep_face]
wof_flag=False
if len(face_boxes) == 0:
wof_flag = True
base_body_boxes = body_boxes[:, :4]
base_body_scores = body_boxes[:, 6]
base_body_hooks = body_boxes[:, 4:6]
base_face_boxes = face_boxes[:, :4]
base_face_scores = face_boxes[:, 6]
base_face_hooks = face_boxes[:, 4:6]
inds_conf_base_body = (base_body_scores > 0.3).nonzero()
if not inds_conf_base_body[0].size:
inds_conf_base_body = np.argmax(base_body_scores)[None]
wof_flag = True
inds_conf_base_face = (base_face_scores > 0.3).nonzero()
if not inds_conf_base_face[0].size and (not wof_flag):
inds_conf_base_face = np.argmax(base_face_scores)[None]
wof_flag = True
base_body_boxes = base_body_boxes[inds_conf_base_body]
base_body_hooks = base_body_hooks[inds_conf_base_body]
base_body_scores = base_body_scores[inds_conf_base_body]
base_body_embeddings = body_embs[inds_conf_base_body]
if not wof_flag:
base_face_boxes = base_face_boxes[inds_conf_base_face]
base_face_scores = base_face_scores[inds_conf_base_face]
base_face_hooks = base_face_hooks[inds_conf_base_face]
base_face_embeddings = face_embs[inds_conf_base_face]
if wof_flag:
face_boxes = np.zeros_like(base_body_boxes)
face_scores = np.zeros_like(base_body_scores)
else:
score_matrix = (base_face_scores[:, None] + base_body_scores) / 2
distance_matrix = Pointlist_dis(base_face_hooks, base_body_hooks, base_body_boxes)
embedding_matrix = np.sqrt(np.square(base_face_embeddings[:, None] - base_body_embeddings).sum(-1))
distance_matrix_max = np.max(distance_matrix, axis=0)
distance_matrix = distance_matrix / distance_matrix_max
embedding_matrix_max = np.max(embedding_matrix, axis=0)
embedding_matrix = embedding_matrix / embedding_matrix_max
match_merge_matrix = distance_matrix * score_matrix * score_matrix + embedding_matrix * (1 - score_matrix * score_matrix)
match_merge_matrix = np.exp(-match_merge_matrix)
matched_vals = np.max(match_merge_matrix, axis=0)
matched_indices = np.argmax(match_merge_matrix, axis=0)
ignore_indices = (matched_vals < 0.98).nonzero()
dummy_tensor = np.array([0.0, 0.0, 0.0, 0.0])
face_boxes = base_face_boxes[matched_indices]
face_scores = base_face_scores[matched_indices]
if ignore_indices[0].size:
face_boxes[ignore_indices] = dummy_tensor
face_scores[ignore_indices] = 0
bodylist = np.hstack((base_body_boxes, base_body_scores[:, None]))
facelist = np.hstack((face_boxes, face_scores[:, None]))
result = []
for body, face in zip(bodylist, facelist):
body = body.tolist()
face = face.tolist()
content = {
'image_id': int(image_id),
'category_id': 1,
'bbox':[round(i, 1) for i in body[:4]],
'score':round(float(body[4]), 5),
'f_bbox':[round(i, 1) for i in face[:4]],
'f_score':round(float(face[4]), 5)
}
result.append(content)
return result
def match_body_face_pos(pred_boxes, image_id):
keep_body = pred_boxes[:, 5] == 1
keep_face = pred_boxes[:, 5] == 2
body_boxes = pred_boxes[keep_body]
face_boxes = pred_boxes[keep_face]
wof_flag=False
if len(face_boxes) == 0:
wof_flag = True
base_body_boxes = body_boxes[:, :4]
base_body_scores = body_boxes[:, 4]
base_face_boxes = face_boxes[:, :4]
base_face_scores = face_boxes[:, 4]
inds_conf_base_body = (base_body_scores > 0.3).nonzero()
if not inds_conf_base_body[0].size:
inds_conf_base_body = np.argmax(base_body_scores)[None]
wof_flag = True
inds_conf_base_face = (base_face_scores > 0.3).nonzero()
if not inds_conf_base_face[0].size and (not wof_flag):
inds_conf_base_face = np.argmax(base_face_scores)[None]
wof_flag = True
base_body_boxes = base_body_boxes[inds_conf_base_body]
base_body_scores = base_body_scores[inds_conf_base_body]
if not wof_flag:
base_face_boxes = base_face_boxes[inds_conf_base_face]
base_face_scores = base_face_scores[inds_conf_base_face]
if wof_flag:
face_boxes = np.zeros_like(base_body_boxes)
face_scores = np.zeros_like(base_body_scores)
else:
body_face_distance_matrix = cal_body_face_distance_matrix(base_body_boxes, base_face_boxes)
base_body_boxes_filter = []
base_body_scores_filter = []
base_face_boxes_filter = []
base_face_scores_filter = []
body_row_idxs, face_col_idxs = linear_sum_assignment(body_face_distance_matrix)
for body_idx in body_row_idxs:
f_idx = np.where(body_row_idxs == body_idx)[0][0]
col_face_idx = face_col_idxs[f_idx]
if body_face_distance_matrix[body_idx, col_face_idx] != MAX_VAL:
# for body_idx in body_row_idxs:
# f_idx = np.where(body_row_idxs == body_idx)[0][0]
# col_face_idx = face_col_idxs[f_idx]
# if body_face_distance_matrix[body_idx, col_face_idx] != MAX_VAL:
base_body_boxes_filter.append(base_body_boxes[body_idx])
base_body_scores_filter.append(base_body_scores[body_idx])
base_face_boxes_filter.append(base_face_boxes[col_face_idx])
base_face_scores_filter.append(base_face_scores[col_face_idx])
if base_body_boxes_filter == []:
face_boxes = np.zeros_like(base_body_boxes)
face_scores = np.zeros_like(base_body_scores)
wof_flag = True
else:
base_body_boxes = np.vstack(base_body_boxes_filter)
base_body_scores = np.hstack(base_body_scores_filter)
face_boxes = np.vstack(base_face_boxes_filter)
face_scores = np.hstack(base_face_scores_filter)
bodylist = np.hstack((base_body_boxes, base_body_scores[:, None]))
facelist = np.hstack((face_boxes, face_scores[:, None]))
result = []
for body, face in zip(bodylist, facelist):
body = body.tolist()
face = face.tolist()
content = {
'image_id': int(image_id),
'category_id': 1,
'bbox':[round(i, 1) for i in body[:4]],
'score':round(float(body[4]), 5),
'f_bbox':[round(i, 1) for i in face[:4]],
'f_score':round(float(face[4]), 5)
}
result.append(content)
return result
def cal_body_face_distance_matrix(body_boxes, face_boxes):
body_boxes_nums = len(body_boxes)
face_boxes_nums = len(face_boxes)
body_face_distance_matrix = np.zeros((body_boxes_nums, face_boxes_nums))
for body_idx in range(body_boxes_nums):
body_box = body_boxes[body_idx]
for face_idx in range(face_boxes_nums):
face_box = face_boxes[face_idx]
face_iou_in_body = one_side_iou(face_box, body_box)
if face_iou_in_body > 0.2:
body_face_distance_matrix[body_idx, face_idx] = 1 / face_iou_in_body
else:
body_face_distance_matrix[body_idx, face_idx] = MAX_VAL
return body_face_distance_matrix
def one_side_iou(box1, box2):
# 1. to corner box
# box1[2:4] = box1[0:2] + box1[2:4]
# box2[2:4] = box2[0:2] + box2[2:4]
x1 = max(box1[0], box2[0])
x2 = min(box1[2] + box1[0], box2[2] + box2[0])
y1 = max(box1[1], box2[1])
y2 = min(box1[3] + box1[1], box2[3] + box2[1])
intersection = max(x2 - x1, 0) * max(y2 - y1, 0)
# a1 = (box1[2] - box1[0]) * (box1[3] - box1[1])
a1 = box1[2] * box1[3]
iou = intersection / a1 # intersection over box 1
return iou
def boxes_dump(boxes, embs=None):
if boxes.shape[-1] == 8: # v2 or v3
if embs is not None:
result = [{'box':[round(i, 1) for i in box[:6].tolist()],
'score':round(float(box[6]), 5),
'tag':int(box[7]),
'emb':emb.tolist()} for box, emb in zip(boxes, embs)]
else:
result = [{'box':[round(i, 1) for i in box[:4].tolist()],
'score':round(float(box[6]), 5),
'tag':int(box[7])} for box in boxes]
elif boxes.shape[-1] == 7:
result = [{'box':[round(i, 1) for i in box[:4]],
'score':round(float(box[4]), 5),
'tag':int(box[5]),
'proposal_num':int(box[6])} for box in boxes]
elif boxes.shape[-1] == 6: # v1
result = [{'box':[round(i, 1) for i in box[:4].tolist()],
'score':round(float(box[4]), 5),
'tag':int(box[5])} for box in boxes]
elif boxes.shape[-1] == 5:
result = [{'box':[round(i, 1) for i in box[:4]],
'tag':int(box[4])} for box in boxes]
else:
raise ValueError('Unknown box dim.')
return result
def run_test():
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', '-md', default=None, required=True, type=str)
parser.add_argument('--config', '-c', default=None,required=True,type=str)
parser.add_argument('--resume_weights', '-r', default=None, required=True, type=str)
parser.add_argument('--devices', '-d', default='0', type=str)
os.environ['NCCL_IB_DISABLE'] = '1'
args = parser.parse_args()
# import libs
model_root_dir = os.path.join('../model/', args.model_dir)
sys.path.insert(0, model_root_dir)
if args.config == 'pos':
from config_pos import config
elif args.config == 'bfj':
from config_bfj import config
else:
raise Exception('Error - only support for bfj or pos.')
if config.network == 'pos':
from network_pos import Network
elif config.network == 'bfj':
from network_bfj import Network
else:
raise Exception('Error - only support for bfj or pos.')
eval_all(args, config, Network)
if __name__ == '__main__':
run_test()
|
[
"evaluate.compute_MMR.compute_MMR",
"evaluate.compute_APMR.compute_APMR"
] |
[((154, 182), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../lib"""'], {}), "(0, '../lib')\n", (169, 182), False, 'import sys\n'), ((183, 213), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../model"""'], {}), "(0, '../model')\n", (198, 213), False, 'import sys\n'), ((604, 662), 'os.path.join', 'os.path.join', (['"""../model"""', 'args.model_dir', 'config.model_dir'], {}), "('../model', args.model_dir, config.model_dir)\n", (616, 662), False, 'import os\n'), ((677, 734), 'os.path.join', 'os.path.join', (['"""../model"""', 'args.model_dir', 'config.eval_dir'], {}), "('../model', args.model_dir, config.eval_dir)\n", (689, 734), False, 'import os\n'), ((739, 769), 'utils.misc_utils.ensure_dir', 'misc_utils.ensure_dir', (['evalDir'], {}), '(evalDir)\n', (760, 769), False, 'from utils import misc_utils, nms_utils\n'), ((976, 1002), 'os.path.exists', 'os.path.exists', (['model_file'], {}), '(model_file)\n', (990, 1002), False, 'import os\n'), ((1066, 1103), 'utils.misc_utils.device_parser', 'misc_utils.device_parser', (['str_devices'], {}), '(str_devices)\n', (1090, 1103), False, 'from utils import misc_utils, nms_utils\n'), ((1137, 1171), 'data.CrowdHuman_json.CrowdHuman', 'CrowdHuman', (['config'], {'if_train': '(False)'}), '(config, if_train=False)\n', (1147, 1171), False, 'from data.CrowdHuman_json import CrowdHuman\n'), ((1322, 1355), 'math.ceil', 'math.ceil', (['(len_dataset / num_devs)'], {}), '(len_dataset / num_devs)\n', (1331, 1355), False, 'import math\n'), ((1375, 1385), 'torch.multiprocessing.Queue', 'Queue', (['(500)'], {}), '(500)\n', (1380, 1385), False, 'from torch.multiprocessing import Queue, Process\n'), ((1411, 1421), 'torch.multiprocessing.Queue', 'Queue', (['(500)'], {}), '(500)\n', (1416, 1421), False, 'from torch.multiprocessing import Queue, Process\n'), ((2056, 2089), 'tqdm.tqdm', 'tqdm', ([], {'total': 'len_dataset', 'ncols': '(50)'}), '(total=len_dataset, ncols=50)\n', (2060, 2089), False, 'from tqdm import tqdm\n'), ((2491, 2537), 'utils.misc_utils.save_json_lines', 'misc_utils.save_json_lines', (['all_results', 'fpath'], {}), '(all_results, fpath)\n', (2517, 2537), False, 'from utils import misc_utils, nms_utils\n'), ((2556, 2599), 'os.path.join', 'os.path.join', (['evalDir', '"""bf_match_bbox.json"""'], {}), "(evalDir, 'bf_match_bbox.json')\n", (2568, 2599), False, 'import os\n'), ((2604, 2656), 'utils.misc_utils.save_json', 'misc_utils.save_json', (['all_results_match', 'fpath_match'], {}), '(all_results_match, fpath_match)\n', (2624, 2656), False, 'from utils import misc_utils, nms_utils\n'), ((2780, 2839), 'evaluate.compute_APMR.compute_APMR', 'compute_APMR.compute_APMR', (['fpath', 'config.eval_source', '"""box"""'], {}), "(fpath, config.eval_source, 'box')\n", (2805, 2839), False, 'from evaluate import compute_JI, compute_APMR\n'), ((2958, 3031), 'evaluate.compute_APMR.compute_APMR', 'compute_APMR.compute_APMR', (['fpath', 'config.eval_source', '"""box"""'], {'if_face': '(True)'}), "(fpath, config.eval_source, 'box', if_face=True)\n", (2983, 3031), False, 'from evaluate import compute_JI, compute_APMR\n'), ((3115, 3171), 'evaluate.compute_MMR.compute_MMR', 'compute_MMR.compute_MMR', (['fpath_match', 'config.eval_source'], {}), '(fpath_match, config.eval_source)\n', (3138, 3171), False, 'from evaluate import compute_MMR\n'), ((3289, 3339), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['"""torch.FloatTensor"""'], {}), "('torch.FloatTensor')\n", (3318, 3339), False, 'import torch\n'), ((3344, 3401), 'torch.multiprocessing.set_sharing_strategy', 'torch.multiprocessing.set_sharing_strategy', (['"""file_system"""'], {}), "('file_system')\n", (3386, 3401), False, 'import torch\n'), ((3499, 3521), 'torch.load', 'torch.load', (['model_file'], {}), '(model_file)\n', (3509, 3521), False, 'import torch\n'), ((3655, 3714), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'shuffle': '(False)'}), '(dataset=dataset, shuffle=False)\n', (3682, 3714), False, 'import torch\n'), ((6772, 6822), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['"""torch.FloatTensor"""'], {}), "('torch.FloatTensor')\n", (6801, 6822), False, 'import torch\n'), ((6827, 6884), 'torch.multiprocessing.set_sharing_strategy', 'torch.multiprocessing.set_sharing_strategy', (['"""file_system"""'], {}), "('file_system')\n", (6869, 6884), False, 'import torch\n'), ((6982, 7004), 'torch.load', 'torch.load', (['model_file'], {}), '(model_file)\n', (6992, 7004), False, 'import torch\n'), ((7138, 7197), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'shuffle': '(False)'}), '(dataset=dataset, shuffle=False)\n', (7165, 7197), False, 'import torch\n'), ((13123, 13178), 'numpy.hstack', 'np.hstack', (['(base_body_boxes, base_body_scores[:, None])'], {}), '((base_body_boxes, base_body_scores[:, None]))\n', (13132, 13178), True, 'import numpy as np\n'), ((13194, 13239), 'numpy.hstack', 'np.hstack', (['(face_boxes, face_scores[:, None])'], {}), '((face_boxes, face_scores[:, None]))\n', (13203, 13239), True, 'import numpy as np\n'), ((16509, 16564), 'numpy.hstack', 'np.hstack', (['(base_body_boxes, base_body_scores[:, None])'], {}), '((base_body_boxes, base_body_scores[:, None]))\n', (16518, 16564), True, 'import numpy as np\n'), ((16580, 16625), 'numpy.hstack', 'np.hstack', (['(face_boxes, face_scores[:, None])'], {}), '((face_boxes, face_scores[:, None]))\n', (16589, 16625), True, 'import numpy as np\n'), ((17262, 17306), 'numpy.zeros', 'np.zeros', (['(body_boxes_nums, face_boxes_nums)'], {}), '((body_boxes_nums, face_boxes_nums))\n', (17270, 17306), True, 'import numpy as np\n'), ((19533, 19558), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (19556, 19558), False, 'import argparse\n'), ((19988, 20029), 'os.path.join', 'os.path.join', (['"""../model/"""', 'args.model_dir'], {}), "('../model/', args.model_dir)\n", (20000, 20029), False, 'import os\n'), ((20034, 20068), 'sys.path.insert', 'sys.path.insert', (['(0)', 'model_root_dir'], {}), '(0, model_root_dir)\n', (20049, 20068), False, 'import sys\n'), ((11807, 11837), 'numpy.zeros_like', 'np.zeros_like', (['base_body_boxes'], {}), '(base_body_boxes)\n', (11820, 11837), True, 'import numpy as np\n'), ((11860, 11891), 'numpy.zeros_like', 'np.zeros_like', (['base_body_scores'], {}), '(base_body_scores)\n', (11873, 11891), True, 'import numpy as np\n'), ((12012, 12076), 'det_oprs.bbox_opr.Pointlist_dis', 'Pointlist_dis', (['base_face_hooks', 'base_body_hooks', 'base_body_boxes'], {}), '(base_face_hooks, base_body_hooks, base_body_boxes)\n', (12025, 12076), False, 'from det_oprs.bbox_opr import Pointlist_dis, matcher\n'), ((12215, 12246), 'numpy.max', 'np.max', (['distance_matrix'], {'axis': '(0)'}), '(distance_matrix, axis=0)\n', (12221, 12246), True, 'import numpy as np\n'), ((12342, 12374), 'numpy.max', 'np.max', (['embedding_matrix'], {'axis': '(0)'}), '(embedding_matrix, axis=0)\n', (12348, 12374), True, 'import numpy as np\n'), ((12601, 12628), 'numpy.exp', 'np.exp', (['(-match_merge_matrix)'], {}), '(-match_merge_matrix)\n', (12607, 12628), True, 'import numpy as np\n'), ((12652, 12686), 'numpy.max', 'np.max', (['match_merge_matrix'], {'axis': '(0)'}), '(match_merge_matrix, axis=0)\n', (12658, 12686), True, 'import numpy as np\n'), ((12713, 12750), 'numpy.argmax', 'np.argmax', (['match_merge_matrix'], {'axis': '(0)'}), '(match_merge_matrix, axis=0)\n', (12722, 12750), True, 'import numpy as np\n'), ((12832, 12862), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0])\n', (12840, 12862), True, 'import numpy as np\n'), ((14852, 14882), 'numpy.zeros_like', 'np.zeros_like', (['base_body_boxes'], {}), '(base_body_boxes)\n', (14865, 14882), True, 'import numpy as np\n'), ((14905, 14936), 'numpy.zeros_like', 'np.zeros_like', (['base_body_scores'], {}), '(base_body_scores)\n', (14918, 14936), True, 'import numpy as np\n'), ((15232, 15280), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['body_face_distance_matrix'], {}), '(body_face_distance_matrix)\n', (15253, 15280), False, 'from scipy.optimize import linear_sum_assignment\n'), ((1650, 1789), 'torch.multiprocessing.Process', 'Process', ([], {'target': 'inference_pos', 'args': '(config, network, model_file, devices[i], crowdhuman, start, end,\n result_queue, result_queue_match)'}), '(target=inference_pos, args=(config, network, model_file, devices[i],\n crowdhuman, start, end, result_queue, result_queue_match))\n', (1657, 1789), False, 'from torch.multiprocessing import Queue, Process\n'), ((1840, 1979), 'torch.multiprocessing.Process', 'Process', ([], {'target': 'inference_bfj', 'args': '(config, network, model_file, devices[i], crowdhuman, start, end,\n result_queue, result_queue_match)'}), '(target=inference_bfj, args=(config, network, model_file, devices[i],\n crowdhuman, start, end, result_queue, result_queue_match))\n', (1847, 1979), False, 'from torch.multiprocessing import Queue, Process\n'), ((4350, 4381), 'numpy.hstack', 'np.hstack', (['(pred_boxes, idents)'], {}), '((pred_boxes, idents))\n', (4359, 4381), True, 'import numpy as np\n'), ((4840, 4857), 'numpy.vstack', 'np.vstack', (['result'], {}), '(result)\n', (4849, 4857), True, 'import numpy as np\n'), ((7843, 7874), 'numpy.hstack', 'np.hstack', (['(pred_boxes, idents)'], {}), '((pred_boxes, idents))\n', (7852, 7874), True, 'import numpy as np\n'), ((8000, 8038), 'utils.nms_utils.set_cpu_nms', 'nms_utils.set_cpu_nms', (['pred_boxes', '(0.5)'], {}), '(pred_boxes, 0.5)\n', (8021, 8038), False, 'from utils import misc_utils, nms_utils\n'), ((10989, 11016), 'numpy.argmax', 'np.argmax', (['base_body_scores'], {}), '(base_body_scores)\n', (10998, 11016), True, 'import numpy as np\n'), ((11197, 11224), 'numpy.argmax', 'np.argmax', (['base_face_scores'], {}), '(base_face_scores)\n', (11206, 11224), True, 'import numpy as np\n'), ((14276, 14303), 'numpy.argmax', 'np.argmax', (['base_body_scores'], {}), '(base_body_scores)\n', (14285, 14303), True, 'import numpy as np\n'), ((14484, 14511), 'numpy.argmax', 'np.argmax', (['base_face_scores'], {}), '(base_face_scores)\n', (14493, 14511), True, 'import numpy as np\n'), ((16112, 16142), 'numpy.zeros_like', 'np.zeros_like', (['base_body_boxes'], {}), '(base_body_boxes)\n', (16125, 16142), True, 'import numpy as np\n'), ((16169, 16200), 'numpy.zeros_like', 'np.zeros_like', (['base_body_scores'], {}), '(base_body_scores)\n', (16182, 16200), True, 'import numpy as np\n'), ((16273, 16306), 'numpy.vstack', 'np.vstack', (['base_body_boxes_filter'], {}), '(base_body_boxes_filter)\n', (16282, 16306), True, 'import numpy as np\n'), ((16338, 16372), 'numpy.hstack', 'np.hstack', (['base_body_scores_filter'], {}), '(base_body_scores_filter)\n', (16347, 16372), True, 'import numpy as np\n'), ((16398, 16431), 'numpy.vstack', 'np.vstack', (['base_face_boxes_filter'], {}), '(base_face_boxes_filter)\n', (16407, 16431), True, 'import numpy as np\n'), ((16458, 16492), 'numpy.hstack', 'np.hstack', (['base_face_scores_filter'], {}), '(base_face_scores_filter)\n', (16467, 16492), True, 'import numpy as np\n'), ((4684, 4723), 'utils.nms_utils.set_cpu_nms', 'nms_utils.set_cpu_nms', (['class_boxes', '(0.5)'], {}), '(class_boxes, 0.5)\n', (4705, 4723), False, 'from utils import misc_utils, nms_utils\n'), ((5502, 5519), 'numpy.vstack', 'np.vstack', (['result'], {}), '(result)\n', (5511, 5519), True, 'import numpy as np\n'), ((8971, 8988), 'numpy.vstack', 'np.vstack', (['result'], {}), '(result)\n', (8980, 8988), True, 'import numpy as np\n'), ((9012, 9033), 'numpy.vstack', 'np.vstack', (['result_emb'], {}), '(result_emb)\n', (9021, 9033), True, 'import numpy as np\n'), ((5338, 5385), 'utils.nms_utils.cpu_nms', 'nms_utils.cpu_nms', (['class_boxes', 'config.test_nms'], {}), '(class_boxes, config.test_nms)\n', (5355, 5385), False, 'from utils import misc_utils, nms_utils\n'), ((8718, 8765), 'utils.nms_utils.cpu_nms', 'nms_utils.cpu_nms', (['class_boxes', 'config.test_nms'], {}), '(class_boxes, config.test_nms)\n', (8735, 8765), False, 'from utils import misc_utils, nms_utils\n'), ((12112, 12175), 'numpy.square', 'np.square', (['(base_face_embeddings[:, None] - base_body_embeddings)'], {}), '(base_face_embeddings[:, None] - base_body_embeddings)\n', (12121, 12175), True, 'import numpy as np\n'), ((15340, 15375), 'numpy.where', 'np.where', (['(body_row_idxs == body_idx)'], {}), '(body_row_idxs == body_idx)\n', (15348, 15375), True, 'import numpy as np\n'), ((4276, 4288), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (4285, 4288), True, 'import numpy as np\n'), ((7769, 7781), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (7778, 7781), True, 'import numpy as np\n')]
|
"""Test model for SMP-CAIL2020-Argmine.
Author: Tsinghuaboy <EMAIL>
Usage:
python main.py --model_config 'config/bert_config.json' \
--in_file 'data/SMP-CAIL2020-test1.csv' \
--out_file 'bert-submission-test-1.csv'
python main.py --model_config 'config/rnn_config.json' \
--in_file 'data/SMP-CAIL2020-test1.csv' \
--out_file 'rnn-submission-test-1.csv'
"""
import argparse
import json
import os
from types import SimpleNamespace
import fire
import pandas
import pandas as pd
import torch
from torch.utils.data import DataLoader
from data import Data
from evaluate import evaluate
from model import BertForClassification, RnnForSentencePairClassification, LogisticRegression
from utils import load_torch_model
LABELS = ['0', '1']
MODEL_MAP = {
'bert': BertForClassification,
'rnn': RnnForSentencePairClassification,
'lr': LogisticRegression
}
def main(in_file='/data/SMP-CAIL2020-test1.csv',
out_file='/output/result1.csv',
model_config='config/bert_config.json'):
"""Test model for given test set on 1 GPU or CPU.
Args:
in_file: file to be tested
out_file: output file
model_config: config file
"""
# 0. Load config
with open(model_config) as fin:
config = json.load(fin, object_hook=lambda d: SimpleNamespace(**d))
if torch.cuda.is_available():
device = torch.device('cuda')
# device = torch.device('cpu')
else:
device = torch.device('cpu')
#0. preprocess file
tag_sents = []
para_id = 0
with open(in_file, 'r', encoding='utf-8') as fin:
for line in fin:
sents = json.loads(line.strip())
text = sents['text']
sentences = [item['sentence'] for item in text]
for sent in sentences:
tag_sents.append((para_id, sent))
para_id += 1
df = pandas.DataFrame(tag_sents, columns=['para', 'content'])
df.to_csv("data/para_content_test.csv", columns=['para', 'content'], index=False)
# 1. Load data
data = Data(vocab_file=os.path.join(config.model_path, 'vocab.txt'),
max_seq_len=config.max_seq_len,
model_type=config.model_type, config=config)
test_set = data.load_file("data/para_content_test.csv", train=False)
data_loader_test = DataLoader(
test_set, batch_size=config.batch_size, shuffle=False)
# 2. Load model
model = MODEL_MAP[config.model_type](config)
model = load_torch_model(
model, model_path=os.path.join(config.model_path, 'model.bin'))
model.to(device)
# 3. Evaluate
answer_list = evaluate(model, data_loader_test, device)
# 4. Write answers to file
df = pd.read_csv("data/para_content_test.csv")
idcontent_list = list(df.itertuples(index=False))
filter_list = [k for k,v in zip(idcontent_list, answer_list) if v]
df = pd.DataFrame(filter_list, columns=['para', 'content'])
df.to_csv(out_file, columns=['para', 'content'], index=False)
if __name__ == '__main__':
fire.Fire(main)
|
[
"evaluate.evaluate"
] |
[((1399, 1424), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1422, 1424), False, 'import torch\n'), ((2394, 2459), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'batch_size': 'config.batch_size', 'shuffle': '(False)'}), '(test_set, batch_size=config.batch_size, shuffle=False)\n', (2404, 2459), False, 'from torch.utils.data import DataLoader\n'), ((2697, 2738), 'evaluate.evaluate', 'evaluate', (['model', 'data_loader_test', 'device'], {}), '(model, data_loader_test, device)\n', (2705, 2738), False, 'from evaluate import evaluate\n'), ((2779, 2820), 'pandas.read_csv', 'pd.read_csv', (['"""data/para_content_test.csv"""'], {}), "('data/para_content_test.csv')\n", (2790, 2820), True, 'import pandas as pd\n'), ((2955, 3009), 'pandas.DataFrame', 'pd.DataFrame', (['filter_list'], {'columns': "['para', 'content']"}), "(filter_list, columns=['para', 'content'])\n", (2967, 3009), True, 'import pandas as pd\n'), ((3109, 3124), 'fire.Fire', 'fire.Fire', (['main'], {}), '(main)\n', (3118, 3124), False, 'import fire\n'), ((1443, 1463), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1455, 1463), False, 'import torch\n'), ((1530, 1549), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1542, 1549), False, 'import torch\n'), ((1949, 2005), 'pandas.DataFrame', 'pandas.DataFrame', (['tag_sents'], {'columns': "['para', 'content']"}), "(tag_sents, columns=['para', 'content'])\n", (1965, 2005), False, 'import pandas\n'), ((2143, 2187), 'os.path.join', 'os.path.join', (['config.model_path', '"""vocab.txt"""'], {}), "(config.model_path, 'vocab.txt')\n", (2155, 2187), False, 'import os\n'), ((2594, 2638), 'os.path.join', 'os.path.join', (['config.model_path', '"""model.bin"""'], {}), "(config.model_path, 'model.bin')\n", (2606, 2638), False, 'import os\n'), ((1370, 1390), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '(**d)\n', (1385, 1390), False, 'from types import SimpleNamespace\n')]
|
import argparse
import itertools
import os.path
import time
import torch
import torch.optim.lr_scheduler
import numpy as np
import evaluate
import trees
import vocabulary
import nkutil
import parse_nk
tokens = parse_nk
def torch_load(load_path):
if parse_nk.use_cuda:
return torch.load(load_path)
else:
return torch.load(load_path, map_location=lambda storage, location: storage)
def format_elapsed(start_time):
elapsed_time = int(time.time() - start_time)
minutes, seconds = divmod(elapsed_time, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
elapsed_string = "{}h{:02}m{:02}s".format(hours, minutes, seconds)
if days > 0:
elapsed_string = "{}d{}".format(days, elapsed_string)
return elapsed_string
def make_hparams():
return nkutil.HParams(
max_len_train=0, # no length limit
max_len_dev=0, # no length limit
sentence_max_len=300,
learning_rate=0.0008,
learning_rate_warmup_steps=160,
clip_grad_norm=0., # no clipping
step_decay=True, # note that disabling step decay is not implemented
step_decay_factor=0.5,
step_decay_patience=5,
max_consecutive_decays=3, # establishes a termination criterion
partitioned=True,
num_layers_position_only=0,
num_layers=8,
d_model=1024,
num_heads=8,
d_kv=64,
d_ff=2048,
d_label_hidden=250,
d_tag_hidden=250,
tag_loss_scale=5.0,
attention_dropout=0.2,
embedding_dropout=0.0,
relu_dropout=0.1,
residual_dropout=0.2,
use_tags=False,
use_words=False,
use_chars_lstm=False,
use_elmo=False,
use_bert=False,
use_bert_only=False,
predict_tags=False,
d_char_emb=32, # A larger value may be better for use_chars_lstm
tag_emb_dropout=0.2,
word_emb_dropout=0.4,
morpho_emb_dropout=0.2,
timing_dropout=0.0,
char_lstm_input_dropout=0.2,
elmo_dropout=0.5, # Note that this semi-stacks with morpho_emb_dropout!
bert_model="bert-base-uncased",
bert_do_lower_case=True,
bert_transliterate="",
)
def run_train(args, hparams):
if args.numpy_seed is not None:
print("Setting numpy random seed to {}...".format(args.numpy_seed))
np.random.seed(args.numpy_seed)
# Make sure that pytorch is actually being initialized randomly.
# On my cluster I was getting highly correlated results from multiple
# runs, but calling reset_parameters() changed that. A brief look at the
# pytorch source code revealed that pytorch initializes its RNG by
# calling std::random_device, which according to the C++ spec is allowed
# to be deterministic.
seed_from_numpy = np.random.randint(2147483648)
print("Manual seed for pytorch:", seed_from_numpy)
torch.manual_seed(seed_from_numpy)
hparams.set_from_args(args)
print("Hyperparameters:")
hparams.print()
print("Loading training trees from {}...".format(args.train_path))
if hparams.predict_tags and args.train_path.endswith('10way.clean'):
print("WARNING: The data distributed with this repository contains "
"predicted part-of-speech tags only (not gold tags!) We do not "
"recommend enabling predict_tags in this configuration.")
train_treebank = trees.load_trees(args.train_path)
if hparams.max_len_train > 0:
train_treebank = [tree for tree in train_treebank if len(list(tree.leaves())) <= hparams.max_len_train]
print("Loaded {:,} training examples.".format(len(train_treebank)))
print("Loading development trees from {}...".format(args.dev_path))
dev_treebank = trees.load_trees(args.dev_path)
if hparams.max_len_dev > 0:
dev_treebank = [tree for tree in dev_treebank if len(list(tree.leaves())) <= hparams.max_len_dev]
print("Loaded {:,} development examples.".format(len(dev_treebank)))
print("Processing trees for training...")
train_parse = [tree.convert() for tree in train_treebank]
print("Constructing vocabularies...")
tag_vocab = vocabulary.Vocabulary()
tag_vocab.index(tokens.START)
tag_vocab.index(tokens.STOP)
tag_vocab.index(tokens.TAG_UNK)
word_vocab = vocabulary.Vocabulary()
word_vocab.index(tokens.START)
word_vocab.index(tokens.STOP)
word_vocab.index(tokens.UNK)
label_vocab = vocabulary.Vocabulary()
label_vocab.index(())
char_set = set()
for tree in train_parse:
nodes = [tree]
while nodes:
node = nodes.pop()
if isinstance(node, trees.InternalParseNode):
label_vocab.index(node.label)
nodes.extend(reversed(node.children))
else:
tag_vocab.index(node.tag)
word_vocab.index(node.word)
char_set |= set(node.word)
char_vocab = vocabulary.Vocabulary()
# If codepoints are small (e.g. Latin alphabet), index by codepoint directly
highest_codepoint = max(ord(char) for char in char_set)
if highest_codepoint < 512:
if highest_codepoint < 256:
highest_codepoint = 256
else:
highest_codepoint = 512
# This also takes care of constants like tokens.CHAR_PAD
for codepoint in range(highest_codepoint):
char_index = char_vocab.index(chr(codepoint))
assert char_index == codepoint
else:
char_vocab.index(tokens.CHAR_UNK)
char_vocab.index(tokens.CHAR_START_SENTENCE)
char_vocab.index(tokens.CHAR_START_WORD)
char_vocab.index(tokens.CHAR_STOP_WORD)
char_vocab.index(tokens.CHAR_STOP_SENTENCE)
for char in sorted(char_set):
char_vocab.index(char)
tag_vocab.freeze()
word_vocab.freeze()
label_vocab.freeze()
char_vocab.freeze()
def print_vocabulary(name, vocab):
special = {tokens.START, tokens.STOP, tokens.UNK}
print("{} ({:,}): {}".format(
name, vocab.size,
sorted(value for value in vocab.values if value in special) +
sorted(value for value in vocab.values if value not in special)))
if args.print_vocabs:
print_vocabulary("Tag", tag_vocab)
print_vocabulary("Word", word_vocab)
print_vocabulary("Label", label_vocab)
print("Initializing model...")
load_path = None
if load_path is not None:
print(f"Loading parameters from {load_path}")
info = torch_load(load_path)
parser = parse_nk.NKChartParser.from_spec(info['spec'], info['state_dict'])
else:
parser = parse_nk.NKChartParser(
tag_vocab,
word_vocab,
label_vocab,
char_vocab,
hparams,
)
print("Initializing optimizer...")
trainable_parameters = [param for param in parser.parameters() if param.requires_grad]
trainer = torch.optim.Adam(trainable_parameters, lr=1., betas=(0.9, 0.98), eps=1e-9)
if load_path is not None:
trainer.load_state_dict(info['trainer'])
def set_lr(new_lr):
for param_group in trainer.param_groups:
param_group['lr'] = new_lr
assert hparams.step_decay, "Only step_decay schedule is supported"
warmup_coeff = hparams.learning_rate / hparams.learning_rate_warmup_steps
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
trainer, 'max',
factor=hparams.step_decay_factor,
patience=hparams.step_decay_patience,
verbose=True,
)
def schedule_lr(iteration):
iteration = iteration + 1
if iteration <= hparams.learning_rate_warmup_steps:
set_lr(iteration * warmup_coeff)
clippable_parameters = trainable_parameters
grad_clip_threshold = np.inf if hparams.clip_grad_norm == 0 else hparams.clip_grad_norm
print("Training...")
total_processed = 0
current_processed = 0
check_every = len(train_parse) / args.checks_per_epoch
best_dev_fscore = -np.inf
best_dev_model_path = None
best_dev_processed = 0
start_time = time.time()
def check_dev():
nonlocal best_dev_fscore
nonlocal best_dev_model_path
nonlocal best_dev_processed
dev_start_time = time.time()
dev_predicted = []
for dev_start_index in range(0, len(dev_treebank), args.eval_batch_size):
subbatch_trees = dev_treebank[dev_start_index:dev_start_index + args.eval_batch_size]
subbatch_sentences = [[(leaf.tag, leaf.word) for leaf in tree.leaves()] for tree in subbatch_trees]
predicted, _ = parser.parse_batch(subbatch_sentences)
del _
dev_predicted.extend([p.convert() for p in predicted])
dev_fscore = evaluate.evalb(args.evalb_dir, dev_treebank, dev_predicted)
print(
"dev-fscore {} "
"dev-elapsed {} "
"total-elapsed {}".format(
dev_fscore,
format_elapsed(dev_start_time),
format_elapsed(start_time),
)
)
if dev_fscore.fscore > best_dev_fscore:
if best_dev_model_path is not None:
extensions = [".pt"]
for ext in extensions:
path = best_dev_model_path + ext
if os.path.exists(path):
print("Removing previous model file {}...".format(path))
os.remove(path)
best_dev_fscore = dev_fscore.fscore
best_dev_model_path = "{}_dev={:.2f}".format(
args.model_path_base, dev_fscore.fscore)
best_dev_processed = total_processed
print("Saving new best model to {}...".format(best_dev_model_path))
torch.save({
'spec': parser.spec,
'state_dict': parser.state_dict(),
'trainer': trainer.state_dict(),
}, best_dev_model_path + ".pt")
for epoch in itertools.count(start=1):
if args.epochs is not None and epoch > args.epochs:
break
np.random.shuffle(train_parse)
epoch_start_time = time.time()
for start_index in range(0, len(train_parse), args.batch_size):
trainer.zero_grad()
schedule_lr(total_processed // args.batch_size)
batch_loss_value = 0.0
batch_trees = train_parse[start_index: start_index + args.batch_size]
batch_sentences = [[(leaf.tag, leaf.word) for leaf in tree.leaves()] for tree in batch_trees]
batch_num_tokens = sum(len(sentence) for sentence in batch_sentences)
for subbatch_sentences, subbatch_trees in parser.split_batch(batch_sentences, batch_trees,
args.subbatch_max_tokens):
_, loss = parser.parse_batch(subbatch_sentences, subbatch_trees) # _是predicted
if hparams.predict_tags:
loss = loss[0] / len(batch_trees) + loss[1] / batch_num_tokens
else:
loss = loss / len(batch_trees)
loss_value = float(loss.data.cpu().numpy())
batch_loss_value += loss_value
if loss_value > 0:
loss.backward()
del loss
total_processed += len(subbatch_trees)
current_processed += len(subbatch_trees)
grad_norm = torch.nn.utils.clip_grad_norm_(clippable_parameters, grad_clip_threshold)
trainer.step()
print(
"epoch {:,} "
"batch {:,}/{:,} "
"processed {:,} "
"batch-loss {:.4f} "
"grad-norm {:.4f} "
"epoch-elapsed {} "
"total-elapsed {}".format(
epoch,
start_index // args.batch_size + 1,
int(np.ceil(len(train_parse) / args.batch_size)),
total_processed,
batch_loss_value,
grad_norm,
format_elapsed(epoch_start_time),
format_elapsed(start_time),
)
)
if current_processed >= check_every:
current_processed -= check_every
check_dev()
# adjust learning rate at the end of an epoch
if (total_processed // args.batch_size + 1) > hparams.learning_rate_warmup_steps:
scheduler.step(best_dev_fscore)
if (total_processed - best_dev_processed) > (
(hparams.step_decay_patience + 1) * hparams.max_consecutive_decays * len(train_parse)):
print("Terminating due to lack of improvement in dev fscore.")
break
def run_test(args):
print("Loading test trees from {}...".format(args.test_path))
test_treebank = trees.load_trees(args.test_path)
print("Loaded {:,} test examples.".format(len(test_treebank)))
print("Loading model from {}...".format(args.model_path_base))
assert args.model_path_base.endswith(".pt"), "Only pytorch savefiles supported"
info = torch_load(args.model_path_base)
assert 'hparams' in info['spec'], "Older savefiles not supported"
parser = parse_nk.NKChartParser.from_spec(info['spec'], info['state_dict'])
print("Parsing test sentences...")
start_time = time.time()
test_predicted = []
for start_index in range(0, len(test_treebank), args.eval_batch_size):
subbatch_trees = test_treebank[start_index:start_index + args.eval_batch_size]
subbatch_sentences = [[(leaf.tag, leaf.word) for leaf in tree.leaves()] for tree in subbatch_trees]
predicted, _ = parser.parse_batch(subbatch_sentences) # _是loss
del _
test_predicted.extend([p.convert() for p in predicted])
# The tree loader does some preprocessing to the trees (e.g. stripping TOP
# symbols or SPMRL morphological features). We compare with the input file
# directly to be extra careful about not corrupting the evaluation. We also
# allow specifying a separate "raw" file for the gold trees: the inputs to
# our parser have traces removed and may have predicted tags substituted,
# and we may wish to compare against the raw gold trees to make sure we
# haven't made a mistake. As far as we can tell all of these variations give
# equivalent results.
ref_gold_path = args.test_path
if args.test_path_raw is not None:
print("Comparing with raw trees from", args.test_path_raw)
ref_gold_path = args.test_path_raw
test_fscore = evaluate.evalb(args.evalb_dir, test_treebank, test_predicted, ref_gold_path=ref_gold_path)
print(
"test-fscore {} "
"test-elapsed {}".format(
test_fscore,
format_elapsed(start_time),
)
)
# %%
def run_ensemble(args):
print("Loading test trees from {}...".format(args.test_path))
test_treebank = trees.load_trees(args.test_path)
print("Loaded {:,} test examples.".format(len(test_treebank)))
parsers = []
for model_path_base in args.model_path_base:
print("Loading model from {}...".format(model_path_base))
assert model_path_base.endswith(".pt"), "Only pytorch savefiles supported"
info = torch_load(model_path_base)
assert 'hparams' in info['spec'], "Older savefiles not supported"
parser = parse_nk.NKChartParser.from_spec(info['spec'], info['state_dict'])
parsers.append(parser)
# Ensure that label scores charts produced by the models can be combined
# using simple averaging
ref_label_vocab = parsers[0].label_vocab
for parser in parsers:
assert parser.label_vocab.indices == ref_label_vocab.indices
print("Parsing test sentences...")
start_time = time.time()
test_predicted = []
# Ensemble by averaging label score charts from different models
# We did not observe any benefits to doing weighted averaging, probably
# because all our parsers output label scores of around the same magnitude
for start_index in range(0, len(test_treebank), args.eval_batch_size):
subbatch_trees = test_treebank[start_index:start_index + args.eval_batch_size]
subbatch_sentences = [[(leaf.tag, leaf.word) for leaf in tree.leaves()] for tree in subbatch_trees]
chart_lists = []
for parser in parsers:
charts = parser.parse_batch(subbatch_sentences, return_label_scores_charts=True)
chart_lists.append(charts)
# todo: 对各parser的结果如何做ensemble的?
subbatch_charts = [np.mean(list(sentence_charts), 0) for sentence_charts in zip(*chart_lists)]
predicted, _ = parsers[0].decode_from_chart_batch(subbatch_sentences, subbatch_charts)
del _
test_predicted.extend([p.convert() for p in predicted])
test_fscore = evaluate.evalb(args.evalb_dir, test_treebank, test_predicted, ref_gold_path=args.test_path)
print(
"test-fscore {} "
"test-elapsed {}".format(
test_fscore,
format_elapsed(start_time),
)
)
# %%
def run_parse(args):
if args.output_path != '-' and os.path.exists(args.output_path):
print("Error: output file already exists:", args.output_path)
return
print("Loading model from {}...".format(args.model_path_base))
assert args.model_path_base.endswith(".pt"), "Only pytorch savefiles supported"
info = torch_load(args.model_path_base)
assert 'hparams' in info['spec'], "Older savefiles not supported"
parser = parse_nk.NKChartParser.from_spec(info['spec'], info['state_dict'])
print("Parsing sentences...")
with open(args.input_path) as input_file:
sentences = input_file.readlines()
sentences = [sentence.split() for sentence in sentences]
# Tags are not available when parsing from raw text, so use a dummy tag
if 'UNK' in parser.tag_vocab.indices:
dummy_tag = 'UNK'
else:
dummy_tag = parser.tag_vocab.value(0)
start_time = time.time()
all_predicted = []
for start_index in range(0, len(sentences), args.eval_batch_size):
subbatch_sentences = sentences[start_index:start_index + args.eval_batch_size]
subbatch_sentences = [[(dummy_tag, word) for word in sentence] for sentence in subbatch_sentences]
predicted, _ = parser.parse_batch(subbatch_sentences)
del _
if args.output_path == '-':
for p in predicted:
print(p.convert().linearize())
else:
all_predicted.extend([p.convert() for p in predicted])
if args.output_path != '-':
with open(args.output_path, 'w') as output_file:
for tree in all_predicted:
output_file.write("{}\n".format(tree.linearize()))
print("Output written to:", args.output_path)
# %%
def run_viz(args):
assert args.model_path_base.endswith(".pt"), "Only pytorch savefiles supported"
print("Loading test trees from {}...".format(args.viz_path))
viz_treebank = trees.load_trees(args.viz_path)
print("Loaded {:,} test examples.".format(len(viz_treebank)))
print("Loading model from {}...".format(args.model_path_base))
info = torch_load(args.model_path_base)
assert 'hparams' in info['spec'], "Only self-attentive models are supported"
parser = parse_nk.NKChartParser.from_spec(info['spec'], info['state_dict'])
from viz import viz_attention
stowed_values = {}
orig_multihead_forward = parse_nk.MultiHeadAttention.forward
def wrapped_multihead_forward(self, inp, batch_idxs, **kwargs):
res, attns = orig_multihead_forward(self, inp, batch_idxs, **kwargs)
stowed_values[f'attns{stowed_values["stack"]}'] = attns.cpu().data.numpy()
stowed_values['stack'] += 1
return res, attns
parse_nk.MultiHeadAttention.forward = wrapped_multihead_forward
# Select the sentences we will actually be visualizing
max_len_viz = 15
if max_len_viz > 0:
viz_treebank = [tree for tree in viz_treebank if len(list(tree.leaves())) <= max_len_viz]
viz_treebank = viz_treebank[:1]
print("Parsing viz sentences...")
for start_index in range(0, len(viz_treebank), args.eval_batch_size):
subbatch_trees = viz_treebank[start_index:start_index + args.eval_batch_size]
subbatch_sentences = [[(leaf.tag, leaf.word) for leaf in tree.leaves()] for tree in subbatch_trees]
stowed_values = dict(stack=0)
predicted, _ = parser.parse_batch(subbatch_sentences)
del _
predicted = [p.convert() for p in predicted]
stowed_values['predicted'] = predicted
for snum, sentence in enumerate(subbatch_sentences):
sentence_words = [tokens.START] + [x[1] for x in sentence] + [tokens.STOP]
for stacknum in range(stowed_values['stack']):
attns_padded = stowed_values[f'attns{stacknum}']
attns = attns_padded[snum::len(subbatch_sentences), :len(sentence_words), :len(sentence_words)]
viz_attention(sentence_words, attns)
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
hparams = make_hparams()
subparser = subparsers.add_parser("train")
subparser.set_defaults(callback=lambda args: run_train(args, hparams))
hparams.populate_arguments(subparser)
subparser.add_argument("--numpy-seed", type=int)
subparser.add_argument("--model-path-base", required=True)
subparser.add_argument("--evalb-dir", default="EVALB/")
subparser.add_argument("--train-path", default="data/02-21.10way.clean")
subparser.add_argument("--dev-path", default="data/22.auto.clean")
subparser.add_argument("--batch-size", type=int, default=250)
subparser.add_argument("--subbatch-max-tokens", type=int, default=2000)
subparser.add_argument("--eval-batch-size", type=int, default=100)
subparser.add_argument("--epochs", type=int)
subparser.add_argument("--checks-per-epoch", type=int, default=4)
subparser.add_argument("--print-vocabs", action="store_true")
subparser = subparsers.add_parser("test")
subparser.set_defaults(callback=run_test)
subparser.add_argument("--model-path-base", required=True)
subparser.add_argument("--evalb-dir", default="EVALB/")
subparser.add_argument("--test-path", default="data/23.auto.clean")
subparser.add_argument("--test-path-raw", type=str)
subparser.add_argument("--eval-batch-size", type=int, default=100)
subparser = subparsers.add_parser("ensemble")
subparser.set_defaults(callback=run_ensemble)
subparser.add_argument("--model-path-base", nargs='+', required=True)
subparser.add_argument("--evalb-dir", default="EVALB/")
subparser.add_argument("--test-path", default="data/22.auto.clean")
subparser.add_argument("--eval-batch-size", type=int, default=100)
subparser = subparsers.add_parser("parse")
subparser.set_defaults(callback=run_parse)
subparser.add_argument("--model-path-base", required=True)
subparser.add_argument("--input-path", type=str, required=True)
subparser.add_argument("--output-path", type=str, default="-")
subparser.add_argument("--eval-batch-size", type=int, default=100)
subparser = subparsers.add_parser("viz")
subparser.set_defaults(callback=run_viz)
subparser.add_argument("--model-path-base", required=True)
subparser.add_argument("--evalb-dir", default="EVALB/")
subparser.add_argument("--viz-path", default="data/22.auto.clean")
subparser.add_argument("--eval-batch-size", type=int, default=100)
args = parser.parse_args()
args.callback(args)
# %%
if __name__ == "__main__":
main()
|
[
"evaluate.evalb"
] |
[((827, 1717), 'nkutil.HParams', 'nkutil.HParams', ([], {'max_len_train': '(0)', 'max_len_dev': '(0)', 'sentence_max_len': '(300)', 'learning_rate': '(0.0008)', 'learning_rate_warmup_steps': '(160)', 'clip_grad_norm': '(0.0)', 'step_decay': '(True)', 'step_decay_factor': '(0.5)', 'step_decay_patience': '(5)', 'max_consecutive_decays': '(3)', 'partitioned': '(True)', 'num_layers_position_only': '(0)', 'num_layers': '(8)', 'd_model': '(1024)', 'num_heads': '(8)', 'd_kv': '(64)', 'd_ff': '(2048)', 'd_label_hidden': '(250)', 'd_tag_hidden': '(250)', 'tag_loss_scale': '(5.0)', 'attention_dropout': '(0.2)', 'embedding_dropout': '(0.0)', 'relu_dropout': '(0.1)', 'residual_dropout': '(0.2)', 'use_tags': '(False)', 'use_words': '(False)', 'use_chars_lstm': '(False)', 'use_elmo': '(False)', 'use_bert': '(False)', 'use_bert_only': '(False)', 'predict_tags': '(False)', 'd_char_emb': '(32)', 'tag_emb_dropout': '(0.2)', 'word_emb_dropout': '(0.4)', 'morpho_emb_dropout': '(0.2)', 'timing_dropout': '(0.0)', 'char_lstm_input_dropout': '(0.2)', 'elmo_dropout': '(0.5)', 'bert_model': '"""bert-base-uncased"""', 'bert_do_lower_case': '(True)', 'bert_transliterate': '""""""'}), "(max_len_train=0, max_len_dev=0, sentence_max_len=300,\n learning_rate=0.0008, learning_rate_warmup_steps=160, clip_grad_norm=\n 0.0, step_decay=True, step_decay_factor=0.5, step_decay_patience=5,\n max_consecutive_decays=3, partitioned=True, num_layers_position_only=0,\n num_layers=8, d_model=1024, num_heads=8, d_kv=64, d_ff=2048,\n d_label_hidden=250, d_tag_hidden=250, tag_loss_scale=5.0,\n attention_dropout=0.2, embedding_dropout=0.0, relu_dropout=0.1,\n residual_dropout=0.2, use_tags=False, use_words=False, use_chars_lstm=\n False, use_elmo=False, use_bert=False, use_bert_only=False,\n predict_tags=False, d_char_emb=32, tag_emb_dropout=0.2,\n word_emb_dropout=0.4, morpho_emb_dropout=0.2, timing_dropout=0.0,\n char_lstm_input_dropout=0.2, elmo_dropout=0.5, bert_model=\n 'bert-base-uncased', bert_do_lower_case=True, bert_transliterate='')\n", (841, 1717), False, 'import nkutil\n'), ((2863, 2892), 'numpy.random.randint', 'np.random.randint', (['(2147483648)'], {}), '(2147483648)\n', (2880, 2892), True, 'import numpy as np\n'), ((2952, 2986), 'torch.manual_seed', 'torch.manual_seed', (['seed_from_numpy'], {}), '(seed_from_numpy)\n', (2969, 2986), False, 'import torch\n'), ((3464, 3497), 'trees.load_trees', 'trees.load_trees', (['args.train_path'], {}), '(args.train_path)\n', (3480, 3497), False, 'import trees\n'), ((3808, 3839), 'trees.load_trees', 'trees.load_trees', (['args.dev_path'], {}), '(args.dev_path)\n', (3824, 3839), False, 'import trees\n'), ((4220, 4243), 'vocabulary.Vocabulary', 'vocabulary.Vocabulary', ([], {}), '()\n', (4241, 4243), False, 'import vocabulary\n'), ((4365, 4388), 'vocabulary.Vocabulary', 'vocabulary.Vocabulary', ([], {}), '()\n', (4386, 4388), False, 'import vocabulary\n'), ((4510, 4533), 'vocabulary.Vocabulary', 'vocabulary.Vocabulary', ([], {}), '()\n', (4531, 4533), False, 'import vocabulary\n'), ((5010, 5033), 'vocabulary.Vocabulary', 'vocabulary.Vocabulary', ([], {}), '()\n', (5031, 5033), False, 'import vocabulary\n'), ((7038, 7114), 'torch.optim.Adam', 'torch.optim.Adam', (['trainable_parameters'], {'lr': '(1.0)', 'betas': '(0.9, 0.98)', 'eps': '(1e-09)'}), '(trainable_parameters, lr=1.0, betas=(0.9, 0.98), eps=1e-09)\n', (7054, 7114), False, 'import torch\n'), ((7472, 7621), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['trainer', '"""max"""'], {'factor': 'hparams.step_decay_factor', 'patience': 'hparams.step_decay_patience', 'verbose': '(True)'}), "(trainer, 'max', factor=hparams.\n step_decay_factor, patience=hparams.step_decay_patience, verbose=True)\n", (7514, 7621), False, 'import torch\n'), ((8210, 8221), 'time.time', 'time.time', ([], {}), '()\n', (8219, 8221), False, 'import time\n'), ((10108, 10132), 'itertools.count', 'itertools.count', ([], {'start': '(1)'}), '(start=1)\n', (10123, 10132), False, 'import itertools\n'), ((13057, 13089), 'trees.load_trees', 'trees.load_trees', (['args.test_path'], {}), '(args.test_path)\n', (13073, 13089), False, 'import trees\n'), ((13437, 13503), 'parse_nk.NKChartParser.from_spec', 'parse_nk.NKChartParser.from_spec', (["info['spec']", "info['state_dict']"], {}), "(info['spec'], info['state_dict'])\n", (13469, 13503), False, 'import parse_nk\n'), ((13561, 13572), 'time.time', 'time.time', ([], {}), '()\n', (13570, 13572), False, 'import time\n'), ((14800, 14895), 'evaluate.evalb', 'evaluate.evalb', (['args.evalb_dir', 'test_treebank', 'test_predicted'], {'ref_gold_path': 'ref_gold_path'}), '(args.evalb_dir, test_treebank, test_predicted, ref_gold_path\n =ref_gold_path)\n', (14814, 14895), False, 'import evaluate\n'), ((15161, 15193), 'trees.load_trees', 'trees.load_trees', (['args.test_path'], {}), '(args.test_path)\n', (15177, 15193), False, 'import trees\n'), ((16015, 16026), 'time.time', 'time.time', ([], {}), '()\n', (16024, 16026), False, 'import time\n'), ((17072, 17168), 'evaluate.evalb', 'evaluate.evalb', (['args.evalb_dir', 'test_treebank', 'test_predicted'], {'ref_gold_path': 'args.test_path'}), '(args.evalb_dir, test_treebank, test_predicted, ref_gold_path\n =args.test_path)\n', (17086, 17168), False, 'import evaluate\n'), ((17780, 17846), 'parse_nk.NKChartParser.from_spec', 'parse_nk.NKChartParser.from_spec', (["info['spec']", "info['state_dict']"], {}), "(info['spec'], info['state_dict'])\n", (17812, 17846), False, 'import parse_nk\n'), ((18251, 18262), 'time.time', 'time.time', ([], {}), '()\n', (18260, 18262), False, 'import time\n'), ((19270, 19301), 'trees.load_trees', 'trees.load_trees', (['args.viz_path'], {}), '(args.viz_path)\n', (19286, 19301), False, 'import trees\n'), ((19576, 19642), 'parse_nk.NKChartParser.from_spec', 'parse_nk.NKChartParser.from_spec', (["info['spec']", "info['state_dict']"], {}), "(info['spec'], info['state_dict'])\n", (19608, 19642), False, 'import parse_nk\n'), ((21354, 21379), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (21377, 21379), False, 'import argparse\n'), ((293, 314), 'torch.load', 'torch.load', (['load_path'], {}), '(load_path)\n', (303, 314), False, 'import torch\n'), ((340, 409), 'torch.load', 'torch.load', (['load_path'], {'map_location': '(lambda storage, location: storage)'}), '(load_path, map_location=lambda storage, location: storage)\n', (350, 409), False, 'import torch\n'), ((2413, 2444), 'numpy.random.seed', 'np.random.seed', (['args.numpy_seed'], {}), '(args.numpy_seed)\n', (2427, 2444), True, 'import numpy as np\n'), ((6648, 6714), 'parse_nk.NKChartParser.from_spec', 'parse_nk.NKChartParser.from_spec', (["info['spec']", "info['state_dict']"], {}), "(info['spec'], info['state_dict'])\n", (6680, 6714), False, 'import parse_nk\n'), ((6742, 6821), 'parse_nk.NKChartParser', 'parse_nk.NKChartParser', (['tag_vocab', 'word_vocab', 'label_vocab', 'char_vocab', 'hparams'], {}), '(tag_vocab, word_vocab, label_vocab, char_vocab, hparams)\n', (6764, 6821), False, 'import parse_nk\n'), ((8376, 8387), 'time.time', 'time.time', ([], {}), '()\n', (8385, 8387), False, 'import time\n'), ((8881, 8940), 'evaluate.evalb', 'evaluate.evalb', (['args.evalb_dir', 'dev_treebank', 'dev_predicted'], {}), '(args.evalb_dir, dev_treebank, dev_predicted)\n', (8895, 8940), False, 'import evaluate\n'), ((10221, 10251), 'numpy.random.shuffle', 'np.random.shuffle', (['train_parse'], {}), '(train_parse)\n', (10238, 10251), True, 'import numpy as np\n'), ((10279, 10290), 'time.time', 'time.time', ([], {}), '()\n', (10288, 10290), False, 'import time\n'), ((15612, 15678), 'parse_nk.NKChartParser.from_spec', 'parse_nk.NKChartParser.from_spec', (["info['spec']", "info['state_dict']"], {}), "(info['spec'], info['state_dict'])\n", (15644, 15678), False, 'import parse_nk\n'), ((467, 478), 'time.time', 'time.time', ([], {}), '()\n', (476, 478), False, 'import time\n'), ((11600, 11673), 'torch.nn.utils.clip_grad_norm_', 'torch.nn.utils.clip_grad_norm_', (['clippable_parameters', 'grad_clip_threshold'], {}), '(clippable_parameters, grad_clip_threshold)\n', (11630, 11673), False, 'import torch\n'), ((21290, 21326), 'viz.viz_attention', 'viz_attention', (['sentence_words', 'attns'], {}), '(sentence_words, attns)\n', (21303, 21326), False, 'from viz import viz_attention\n')]
|
"""
测试检测器的精度
"""
import torch
import json
import time
import os, cv2
import tqdm
import numpy as np
from torchvision.transforms import transforms as cvtransforms
from torch.utils.data.dataloader import DataLoader
from lib.models.model_factory import create_model, load_model
from lib.datasets.jde import OriginDetDataset
from lib.transform.train_transform import collate_fn
from lib.models.utils.decode import mot_decode
from lib.utils.post_process import ctdet_post_process, simple_ctdet_post_process
from evaluate.utils import get_annotations_cache, cache_annotations
from config.exp import config_factory
def post_process(opt, dets_in_one_image, meta, simple_post_map = False):
"""
对单幅图像中的所有检测信息框进行后处理
Args:
opt: 配置信息
dets_in_one_image: 单幅图像中经过解码和最大值抑制之后的所有原始检测框信息 1 * k * (bboxes + scores + clses = 6)
meta: 图像描述信息 meta['c']为原始尺寸均除以2 meta['s']为输入尺寸中最大的那个边
simple_post_map: 是否进行简单后处理
其中scores已经降序排列
Returns:
dets_mapped_results: 针对某张图像返回他对应的各个类别的一个字典
"""
dets_in_one_image = dets_in_one_image.detach().cpu().numpy()
dets_in_one_image = dets_in_one_image.reshape(1, -1, dets_in_one_image.shape[2]) # 1 * k * 6
# 检测框处理流程 将输出得到的bbox给映射回原来的图像中的大小尺寸
if not simple_post_map:
dets_mapped_results = ctdet_post_process(
dets_in_one_image.copy(),
[meta['c']],
[meta['s']],
meta['out_height'],
meta['out_width'],
opt.num_classes
)
else:
dets_mapped_results = simple_ctdet_post_process(
dets_in_one_image.copy(),
meta['output_shape'],
meta['origin_shape'],
opt.num_classes
)
# 将这一副图像中的各个类别作为numpy中的形式返回
for j in range(opt.num_classes):
dets_mapped_results[0][j] = np.array(dets_mapped_results[0][j], dtype=np.float32).reshape(-1, 5)
return dets_mapped_results[0]
def merge_outputs_into_dict(opt, detections):
"""
合并多个输出结果
Args:
detections: 单幅图像对应的字典所构成的列表 [det_dict]
Returns:
results: 将所有结果按照类别归类之后的结果
"""
results = {}
for j in range(opt.num_classes):
# 将各个图像中的输出结果整合到一个字典中
results[j] = np.concatenate([detection[j] for detection in detections], axis=0).astype(np.float32)
# 所有类别的得分汇总起来 得到一个1维的得分 是按照类别的顺序将所有的图像中对应的对象的得分给堆叠成1维
# scores = np.hstack([results[j][:, 4] for j in range(opt.num_classes)])
# 如果所有的对象的数目超过了128 则需要做一下过滤
# not necessary
# if len(scores) > 128:
# # 在所有的对象中挑选前k个最大的值 如果>=2*128 ???
# kth = len(scores) - 128
# thresh = np.partition(scores, kth)[kth]
# # 每个类别中只有超过了得分阈值的才可以作为最后结果
# for j in range(opt.num_classes):
# keep_inds = (results[j][:, 4] >= thresh)
# results[j] = results[j][keep_inds]
# 按照类别将结果字典输出 key为类别id
return results
def test_det(
opt,
batch_size,
img_size,
iou_thres,
print_interval=40,
):
# ===================================
# 数据加载
# ===================================
data_cfg = opt.data_cfg
f = open(data_cfg)
data_cfg_dict = json.load(f)
f.close()
nC = opt.num_classes
test_path = data_cfg_dict['test']
dataset_root = data_cfg_dict['root']
# Get dataloader
transforms = cvtransforms.Compose([
cvtransforms.ToTensor(),
cvtransforms.Normalize(opt.mean, opt.std)
])
dataset = OriginDetDataset(dataset_root, test_path, augment=False, transforms=transforms)
# Anno Cache
anno_cache_file = "det_annos_cache.pkl"
cls_ref_annos = get_annotations_cache(anno_cache_file)
if cls_ref_annos is None:
annosloader = DataLoader(dataset, 1, shuffle=False, num_workers=1, drop_last=False)
cache_annotations(annosloader, 2, anno_cache_file)
cls_ref_annos = get_annotations_cache(anno_cache_file)
print("=" * 60)
print("Annos Summary:")
# =================================================================
# TODO only for evaluating ball or players. ignore it.
# =================================================================
cls_ref_annos[0] = cls_ref_annos[1]
del cls_ref_annos[1]
for cls_id in range(nC):
print("Class Samples:", cls_id, len(cls_ref_annos[cls_id]), "Total Objs:", cls_ref_annos[cls_id]["gt_total"])
print("=" * 60)
# ===================================
# 验证数据集得到结果
# ===================================
# ===================================
# 环境设置
# ===================================
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
if opt.gpus[0] >= 0:
opt.device = torch.device('cuda')
else:
opt.device = torch.device('cpu')
# opt.device = torch.device('cpu')
# ===================================
# 创建和加载模型
# ===================================
print('Creating model...')
model = create_model("fairmot", opt.arch, opt.heads, opt.head_conv)
model = load_model(model, opt.load_model)
model = model.to(opt.device)
model.eval()
dataloader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=1, drop_last=False, collate_fn=collate_fn)
# ==================
# tiling mechanism
# ==================
tiling_size = 256
step = 128
for i, batch in enumerate(tqdm.tqdm(dataloader, desc="Detect")):
(imgs, _, paths, shapes, _) = batch
# 1 * 3 * origin_h * origin_w
imgs = imgs.to(opt.device)
origin_shape = shapes[0]
width = origin_shape[1]
height = origin_shape[0]
# TODO special adjust for img smaller than 256 * 256
width_corps = (width - tiling_size) // step + 1
height_corps = (height - tiling_size) // step + 1
# record
best_result = {
"crop_y": 0,
"crop_x": 0,
"hm_score": 0,
"detections": None,
}
# sliding detection
for col in range(height_corps):
for row in range(width_corps):
crop_y = col * step
crop_x = row * step
if crop_y + tiling_size > height:
crop_y = height - tiling_size
if crop_x + tiling_size > width:
crop_x = width - tiling_size
patch = imgs[:, :, crop_y:crop_y + tiling_size, crop_x:crop_x + tiling_size]
# output
output = model({'input':patch})[-1]
# select
hm = output['hm'][:, 0:1].sigmoid_()
if hm.max() > best_result["hm_score"]:
best_result['hm_score'] = hm.max()
else:
continue
# detection
wh = output['wh']
reg = output['reg'] if opt.reg_offset else None
opt.K = 1
# !!! 从输出到得到detections的解码过程
detections, inds = mot_decode(hm, wh, center_offset_reg=reg, ltrb=opt.ltrb, K=opt.K)
# record
best_result["crop_y"] = crop_y
best_result["crop_x"] = crop_x
best_result["detections"] = detections
# calc
detection = best_result["detections"].squeeze(0).squeeze(0).cpu().numpy()
detection *= opt.down_ratio
dets_in_one_image_dict = {}
dets_in_one_image_dict[0] = np.asarray([detection[:-1]])
image_path = paths[0]
# patch = origin_img[best_result["crop_y"]:best_result["crop_y"] + tiling_size, best_result["crop_x"]:best_result["crop_x"] + tiling_size]
dets_in_one_image_dict[0][:, 0] += best_result["crop_x"]
dets_in_one_image_dict[0][:, 1] += best_result["crop_y"]
dets_in_one_image_dict[0][:, 2] += best_result["crop_x"]
dets_in_one_image_dict[0][:, 3] += best_result["crop_y"]
# display
# pred and gt
origin_img = cv2.imread(image_path)
labels = np.loadtxt(image_path.replace("images", "labels_with_ids").replace(".jpg", ".txt"))
gt_labels = labels[labels[:, 0] == 1]
if gt_labels.shape[0] != 0:
gt_label = gt_labels[0][2:6]
bbox = gt_label.copy()
bbox[0] = int((gt_label[0] - gt_label[2] / 2) * origin_shape[1])
bbox[1] = int((gt_label[1] - gt_label[3] / 2) * origin_shape[0])
bbox[2] = int((gt_label[0] + gt_label[2] / 2) * origin_shape[1])
bbox[3] = int((gt_label[1] + gt_label[3] / 2) * origin_shape[0])
bbox = bbox.astype(np.int32)
cv2.rectangle(origin_img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=(0,0,255), thickness=3)
bbox = dets_in_one_image_dict[0][0, :4].astype(np.int32)
cv2.rectangle(origin_img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=(255,255,0), thickness=3)
cv2.imwrite("../demos/det_%d_%.06f_result.jpg" % (i, best_result["hm_score"]), origin_img)
if __name__ == '__main__':
# opt = config_factory.get_config("det_val_resdcn18_4x")()
# opt = config_factory.get_config("det_val_resdcn18_2x")()
# opt = config_factory.get_config("det_val_flynet_tiny")()
# opt = config_factory.get_config("det_val_gridnet_tiny")()
opt = config_factory.get_config("det_val_gridnet_ball")()
opt.recipe = "fairmot"
opt.task = "ball"
opt.load_model = "../models/mnet_det_ball.pth"
with torch.no_grad():
map = test_det(opt, batch_size=1, img_size=opt.img_size, iou_thres=0.2)
|
[
"evaluate.utils.cache_annotations",
"evaluate.utils.get_annotations_cache"
] |
[((3157, 3169), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3166, 3169), False, 'import json\n'), ((3458, 3537), 'lib.datasets.jde.OriginDetDataset', 'OriginDetDataset', (['dataset_root', 'test_path'], {'augment': '(False)', 'transforms': 'transforms'}), '(dataset_root, test_path, augment=False, transforms=transforms)\n', (3474, 3537), False, 'from lib.datasets.jde import OriginDetDataset\n'), ((3620, 3658), 'evaluate.utils.get_annotations_cache', 'get_annotations_cache', (['anno_cache_file'], {}), '(anno_cache_file)\n', (3641, 3658), False, 'from evaluate.utils import get_annotations_cache, cache_annotations\n'), ((4927, 4986), 'lib.models.model_factory.create_model', 'create_model', (['"""fairmot"""', 'opt.arch', 'opt.heads', 'opt.head_conv'], {}), "('fairmot', opt.arch, opt.heads, opt.head_conv)\n", (4939, 4986), False, 'from lib.models.model_factory import create_model, load_model\n'), ((4999, 5032), 'lib.models.model_factory.load_model', 'load_model', (['model', 'opt.load_model'], {}), '(model, opt.load_model)\n', (5009, 5032), False, 'from lib.models.model_factory import create_model, load_model\n'), ((5101, 5209), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(1)', 'drop_last': '(False)', 'collate_fn': 'collate_fn'}), '(dataset, batch_size=1, shuffle=False, num_workers=1, drop_last=\n False, collate_fn=collate_fn)\n', (5111, 5209), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((3711, 3780), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['dataset', '(1)'], {'shuffle': '(False)', 'num_workers': '(1)', 'drop_last': '(False)'}), '(dataset, 1, shuffle=False, num_workers=1, drop_last=False)\n', (3721, 3780), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((3789, 3839), 'evaluate.utils.cache_annotations', 'cache_annotations', (['annosloader', '(2)', 'anno_cache_file'], {}), '(annosloader, 2, anno_cache_file)\n', (3806, 3839), False, 'from evaluate.utils import get_annotations_cache, cache_annotations\n'), ((3864, 3902), 'evaluate.utils.get_annotations_cache', 'get_annotations_cache', (['anno_cache_file'], {}), '(anno_cache_file)\n', (3885, 3902), False, 'from evaluate.utils import get_annotations_cache, cache_annotations\n'), ((4675, 4695), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4687, 4695), False, 'import torch\n'), ((4727, 4746), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4739, 4746), False, 'import torch\n'), ((5347, 5383), 'tqdm.tqdm', 'tqdm.tqdm', (['dataloader'], {'desc': '"""Detect"""'}), "(dataloader, desc='Detect')\n", (5356, 5383), False, 'import tqdm\n'), ((7422, 7450), 'numpy.asarray', 'np.asarray', (['[detection[:-1]]'], {}), '([detection[:-1]])\n', (7432, 7450), True, 'import numpy as np\n'), ((7959, 7981), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (7969, 7981), False, 'import os, cv2\n'), ((8772, 8876), 'cv2.rectangle', 'cv2.rectangle', (['origin_img', '(bbox[0], bbox[1])', '(bbox[2], bbox[3])'], {'color': '(255, 255, 0)', 'thickness': '(3)'}), '(origin_img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=(\n 255, 255, 0), thickness=3)\n', (8785, 8876), False, 'import os, cv2\n'), ((8878, 8973), 'cv2.imwrite', 'cv2.imwrite', (["('../demos/det_%d_%.06f_result.jpg' % (i, best_result['hm_score']))", 'origin_img'], {}), "('../demos/det_%d_%.06f_result.jpg' % (i, best_result['hm_score'\n ]), origin_img)\n", (8889, 8973), False, 'import os, cv2\n'), ((9260, 9309), 'config.exp.config_factory.get_config', 'config_factory.get_config', (['"""det_val_gridnet_ball"""'], {}), "('det_val_gridnet_ball')\n", (9285, 9309), False, 'from config.exp import config_factory\n'), ((9423, 9438), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9436, 9438), False, 'import torch\n'), ((3358, 3381), 'torchvision.transforms.transforms.ToTensor', 'cvtransforms.ToTensor', ([], {}), '()\n', (3379, 3381), True, 'from torchvision.transforms import transforms as cvtransforms\n'), ((3391, 3432), 'torchvision.transforms.transforms.Normalize', 'cvtransforms.Normalize', (['opt.mean', 'opt.std'], {}), '(opt.mean, opt.std)\n', (3413, 3432), True, 'from torchvision.transforms import transforms as cvtransforms\n'), ((8602, 8703), 'cv2.rectangle', 'cv2.rectangle', (['origin_img', '(bbox[0], bbox[1])', '(bbox[2], bbox[3])'], {'color': '(0, 0, 255)', 'thickness': '(3)'}), '(origin_img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=(0,\n 0, 255), thickness=3)\n', (8615, 8703), False, 'import os, cv2\n'), ((1823, 1876), 'numpy.array', 'np.array', (['dets_mapped_results[0][j]'], {'dtype': 'np.float32'}), '(dets_mapped_results[0][j], dtype=np.float32)\n', (1831, 1876), True, 'import numpy as np\n'), ((2211, 2277), 'numpy.concatenate', 'np.concatenate', (['[detection[j] for detection in detections]'], {'axis': '(0)'}), '([detection[j] for detection in detections], axis=0)\n', (2225, 2277), True, 'import numpy as np\n'), ((6974, 7039), 'lib.models.utils.decode.mot_decode', 'mot_decode', (['hm', 'wh'], {'center_offset_reg': 'reg', 'ltrb': 'opt.ltrb', 'K': 'opt.K'}), '(hm, wh, center_offset_reg=reg, ltrb=opt.ltrb, K=opt.K)\n', (6984, 7039), False, 'from lib.models.utils.decode import mot_decode\n')]
|
import torch.nn as nn
from torch.nn import functional as F
from pykp.masked_loss import masked_cross_entropy
from utils.statistics import LossStatistics
from utils.time_log import time_since, convert_time2str
from evaluate import evaluate_loss
import time
import math
import logging
import torch
import sys
import os
EPS = 1e-6
# Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(recon_x, x, mu, logvar):
BCE = F.binary_cross_entropy(recon_x, x, size_average=False)
# BCE = -(recon_x * x).sum(1).mean()
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return BCE + KLD
def l1_penalty(para):
return nn.L1Loss()(para, torch.zeros_like(para))
def check_sparsity(para, sparsity_threshold=1e-3):
num_weights = para.shape[0] * para.shape[1]
num_zero = (para.abs() < sparsity_threshold).sum().float()
return num_zero / float(num_weights)
def update_l1(cur_l1, cur_sparsity, sparsity_target):
diff = sparsity_target - cur_sparsity
cur_l1.mul_(2.0 ** diff)
def train_ntm_one_epoch(model, dataloader, optimizer, opt, epoch):
model.train()
train_loss = 0
for batch_idx, data_bow in enumerate(dataloader):
data_bow = data_bow.to(opt.device)
# normalize data
data_bow_norm = F.normalize(data_bow)
optimizer.zero_grad()
_, _, recon_batch, mu, logvar = model(data_bow_norm)
loss = loss_function(recon_batch, data_bow, mu, logvar)
# fcd1 weight's shape is [vocab_size, topic_num]
loss = loss + model.l1_strength * l1_penalty(model.fcd1.weight)
loss.backward()
train_loss += loss.item()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data_bow), len(dataloader.dataset),
100. * batch_idx / len(dataloader),
loss.item() / len(data_bow)))
logging.info('====>Train epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss / len(dataloader.dataset)))
sparsity = check_sparsity(model.fcd1.weight.data)
logging.info("Overall sparsity = %.3f, l1 strength = %.5f" % (sparsity, model.l1_strength))
logging.info("Target sparsity = %.3f" % opt.target_sparsity)
update_l1(model.l1_strength, sparsity, opt.target_sparsity)
return sparsity
def test_ntm_one_epoch(model, dataloader, opt, epoch):
model.eval()
test_loss = 0
with torch.no_grad():
for i, data_bow in enumerate(dataloader):
data_bow = data_bow.to(opt.device)
data_bow_norm = F.normalize(data_bow)
_, _, recon_batch, mu, logvar = model(data_bow_norm)
test_loss += loss_function(recon_batch, data_bow, mu, logvar).item()
avg_loss = test_loss / len(dataloader.dataset)
logging.info('====> Test epoch: {} Average loss: {:.4f}'.format(epoch, avg_loss))
return avg_loss
def fix_model(model):
for param in model.parameters():
param.requires_grad = False
def fix_model_seq2seq_decoder(model):
for name, param in model.named_parameters():
if 'decoder' in name:
param.requires_grad = False
def unfix_model_seq2seq_decoder(model):
for name, param in model.named_parameters():
if 'decoder' in name:
param.requires_grad = True
def unfix_model(model):
for param in model.parameters():
param.requires_grad = True
def train_model(model, ntm_model, optimizer_ml, optimizer_ntm, optimizer_whole, train_data_loader, valid_data_loader,
bow_dictionary, train_bow_loader, valid_bow_loader, opt):
logging.info('====================== Start Training =========================')
if opt.only_train_ntm or (opt.use_topic_represent and not opt.load_pretrain_ntm):
print("\nWarming up ntm for %d epochs" % opt.ntm_warm_up_epochs)
for epoch in range(1, opt.ntm_warm_up_epochs + 1):
sparsity = train_ntm_one_epoch(ntm_model, train_bow_loader, optimizer_ntm, opt, epoch)
val_loss = test_ntm_one_epoch(ntm_model, valid_bow_loader, opt, epoch)
if epoch % 10 == 0:
ntm_model.print_topic_words(bow_dictionary, os.path.join(opt.model_path, 'topwords_e%d.txt' % epoch))
best_ntm_model_path = os.path.join(opt.model_path, 'e%d.val_loss=%.3f.sparsity=%.3f.ntm_model' %
(epoch, val_loss, sparsity))
logging.info("\nSaving warm up ntm model into %s" % best_ntm_model_path)
torch.save(ntm_model.state_dict(), open(best_ntm_model_path, 'wb'))
elif opt.use_topic_represent:
print("Loading ntm model from %s" % opt.check_pt_ntm_model_path)
ntm_model.load_state_dict(torch.load(opt.check_pt_ntm_model_path))
if opt.only_train_ntm:
return
total_batch = 0
total_train_loss_statistics = LossStatistics()
report_train_loss_statistics = LossStatistics()
report_train_ppl = []
report_valid_ppl = []
report_train_loss = []
report_valid_loss = []
best_valid_ppl = float('inf')
best_valid_loss = float('inf')
best_ntm_valid_loss = float('inf')
joint_train_patience = 1
ntm_train_patience = 1
global_patience = 5
num_stop_dropping = 0
num_stop_dropping_ntm = 0
num_stop_dropping_global = 0
t0 = time.time()
Train_Seq2seq = True
begin_iterate_train_ntm = opt.iterate_train_ntm
check_pt_model_path = ""
print("\nEntering main training for %d epochs" % opt.epochs)
for epoch in range(opt.start_epoch, opt.epochs + 1):
if Train_Seq2seq:
if epoch <= opt.p_seq2seq_e or not opt.joint_train:
optimizer = optimizer_ml
model.train()
ntm_model.eval()
logging.info("\nTraining seq2seq epoch: {}/{}".format(epoch, opt.epochs))
elif begin_iterate_train_ntm:
optimizer = optimizer_ntm
model.train()
ntm_model.train()
fix_model(model)
logging.info("\nTraining ntm epoch: {}/{}".format(epoch, opt.epochs))
begin_iterate_train_ntm = False
else:
optimizer = optimizer_whole
unfix_model(model)
model.train()
ntm_model.train()
logging.info("\nTraining seq2seq+ntm epoch: {}/{}".format(epoch, opt.epochs))
if opt.iterate_train_ntm:
begin_iterate_train_ntm = True
logging.info("The total num of batches: %d, current learning rate:%.6f" %
(len(train_data_loader), optimizer.param_groups[0]['lr']))
for batch_i, batch in enumerate(train_data_loader):
total_batch += 1
batch_loss_stat, _ = train_one_batch(batch, model, ntm_model, optimizer, opt, batch_i)
report_train_loss_statistics.update(batch_loss_stat)
total_train_loss_statistics.update(batch_loss_stat)
if (batch_i + 1) % (len(train_data_loader) // 10) == 0:
print("Train: %d/%d batches, current avg loss: %.3f" %
((batch_i + 1), len(train_data_loader), batch_loss_stat.xent()))
current_train_ppl = report_train_loss_statistics.ppl()
current_train_loss = report_train_loss_statistics.xent()
# test the model on the validation dataset for one epoch
model.eval()
valid_loss_stat = evaluate_loss(valid_data_loader, model, ntm_model, opt)
current_valid_loss = valid_loss_stat.xent()
current_valid_ppl = valid_loss_stat.ppl()
# debug
if math.isnan(current_valid_loss) or math.isnan(current_train_loss):
logging.info(
"NaN valid loss. Epoch: %d; batch_i: %d, total_batch: %d" % (epoch, batch_i, total_batch))
exit()
if current_valid_loss < best_valid_loss: # update the best valid loss and save the model parameters
print("Valid loss drops")
sys.stdout.flush()
best_valid_loss = current_valid_loss
best_valid_ppl = current_valid_ppl
num_stop_dropping = 0
num_stop_dropping_global = 0
if epoch >= opt.start_checkpoint_at and epoch > opt.p_seq2seq_e and not opt.save_each_epoch:
check_pt_model_path = os.path.join(opt.model_path, 'e%d.val_loss=%.3f.model-%s' %
(epoch, current_valid_loss, convert_time2str(time.time() - t0)))
# save model parameters
torch.save(
model.state_dict(),
open(check_pt_model_path, 'wb')
)
logging.info('Saving seq2seq checkpoints to %s' % check_pt_model_path)
if opt.joint_train:
check_pt_ntm_model_path = check_pt_model_path.replace('.model-', '.model_ntm-')
# save model parameters
torch.save(
ntm_model.state_dict(),
open(check_pt_ntm_model_path, 'wb')
)
logging.info('Saving ntm checkpoints to %s' % check_pt_ntm_model_path)
ntm_model.print_topic_words(bow_dictionary,
os.path.join(opt.model_path, 'topwords_e%d.txt' % epoch))
else:
print("Valid loss does not drop")
sys.stdout.flush()
num_stop_dropping += 1
num_stop_dropping_global += 1
# decay the learning rate by a factor
for i, param_group in enumerate(optimizer.param_groups):
old_lr = float(param_group['lr'])
new_lr = old_lr * opt.learning_rate_decay
if old_lr - new_lr > EPS:
param_group['lr'] = new_lr
print("The new learning rate for seq2seq is decayed to %.6f" % new_lr)
if opt.save_each_epoch:
check_pt_model_path = os.path.join(opt.model_path, 'e%d.train_loss=%.3f.val_loss=%.3f.model-%s' %
(epoch, current_train_loss, current_valid_loss,
convert_time2str(time.time() - t0)))
torch.save( # save model parameters
model.state_dict(),
open(check_pt_model_path, 'wb')
)
logging.info('Saving seq2seq checkpoints to %s' % check_pt_model_path)
if opt.joint_train:
check_pt_ntm_model_path = check_pt_model_path.replace('.model-', '.model_ntm-')
torch.save( # save model parameters
ntm_model.state_dict(),
open(check_pt_ntm_model_path, 'wb')
)
logging.info('Saving ntm checkpoints to %s' % check_pt_ntm_model_path)
# log loss, ppl, and time
logging.info('Epoch: %d; Time spent: %.2f' % (epoch, time.time() - t0))
logging.info(
'avg training ppl: %.3f; avg validation ppl: %.3f; best validation ppl: %.3f' % (
current_train_ppl, current_valid_ppl, best_valid_ppl))
logging.info(
'avg training loss: %.3f; avg validation loss: %.3f; best validation loss: %.3f' % (
current_train_loss, current_valid_loss, best_valid_loss))
report_train_ppl.append(current_train_ppl)
report_valid_ppl.append(current_valid_ppl)
report_train_loss.append(current_train_loss)
report_valid_loss.append(current_valid_loss)
report_train_loss_statistics.clear()
if not opt.save_each_epoch and num_stop_dropping >= opt.early_stop_tolerance: # not opt.joint_train or
logging.info('Have not increased for %d check points, early stop training' % num_stop_dropping)
break
# if num_stop_dropping_global >= global_patience and opt.joint_train:
# logging.info('Reach global stoping dropping patience: %d' % global_patience)
# break
# if num_stop_dropping >= joint_train_patience and opt.joint_train:
# Train_Seq2seq = False
# num_stop_dropping_ntm = 0
# break
# else:
# logging.info("\nTraining ntm epoch: {}/{}".format(epoch, opt.epochs))
# logging.info("The total num of batches: {}".format(len(train_bow_loader)))
# sparsity = train_ntm_one_epoch(ntm_model, train_bow_loader, optimizer_ntm, opt, epoch)
# val_loss = test_ntm_one_epoch(ntm_model, valid_bow_loader, opt, epoch)
# if val_loss < best_ntm_valid_loss:
# print('Ntm loss drops...')
# best_ntm_valid_loss = val_loss
# num_stop_dropping_ntm = 0
# num_stop_dropping_global = 0
# else:
# print('Ntm loss does not drop...')
# num_stop_dropping_ntm += 1
# num_stop_dropping_global += 1
#
# if num_stop_dropping_global > global_patience:
# logging.info('Reach global stoping dropping patience: %d' % global_patience)
# break
#
# if num_stop_dropping_ntm >= ntm_train_patience:
# Train_Seq2seq = True
# num_stop_dropping = 0
# # continue
#
# if opt.joint_train:
# ntm_model.print_topic_words(bow_dictionary, os.path.join(opt.model_path, 'topwords_e%d.txt' % epoch))
return check_pt_model_path
def train_one_batch(batch, model, ntm_model, optimizer, opt, batch_i):
# train for one batch
src, src_lens, src_mask, trg, trg_lens, trg_mask, src_oov, trg_oov, oov_lists, src_bow = batch
max_num_oov = max([len(oov) for oov in oov_lists]) # max number of oov for each batch
# move data to GPU if available
src = src.to(opt.device)
src_mask = src_mask.to(opt.device)
trg = trg.to(opt.device)
trg_mask = trg_mask.to(opt.device)
src_oov = src_oov.to(opt.device)
trg_oov = trg_oov.to(opt.device)
# model.train()
optimizer.zero_grad()
if opt.use_topic_represent:
src_bow = src_bow.to(opt.device)
src_bow_norm = F.normalize(src_bow)
if opt.topic_type == 'z':
topic_represent, _, recon_batch, mu, logvar = ntm_model(src_bow_norm)
else:
_, topic_represent, recon_batch, mu, logvar = ntm_model(src_bow_norm)
if opt.add_two_loss:
ntm_loss = loss_function(recon_batch, src_bow, mu, logvar)
else:
topic_represent = None
start_time = time.time()
# for one2one setting
decoder_dist, h_t, attention_dist, encoder_final_state, coverage, _, _, _ \
= model(src, src_lens, trg, src_oov, max_num_oov, src_mask, topic_represent)
forward_time = time_since(start_time)
start_time = time.time()
if opt.copy_attention: # Compute the loss using target with oov words
loss = masked_cross_entropy(decoder_dist, trg_oov, trg_mask, trg_lens,
opt.coverage_attn, coverage, attention_dist, opt.lambda_coverage, opt.coverage_loss)
else: # Compute the loss using target without oov words
loss = masked_cross_entropy(decoder_dist, trg, trg_mask, trg_lens,
opt.coverage_attn, coverage, attention_dist, opt.lambda_coverage, opt.coverage_loss)
loss_compute_time = time_since(start_time)
total_trg_tokens = sum(trg_lens)
if math.isnan(loss.item()):
print("Batch i: %d" % batch_i)
print("src")
print(src)
print(src_oov)
print(src_lens)
print(src_mask)
print("trg")
print(trg)
print(trg_oov)
print(trg_lens)
print(trg_mask)
print("oov list")
print(oov_lists)
print("Decoder")
print(decoder_dist)
print(h_t)
print(attention_dist)
raise ValueError("Loss is NaN")
if opt.loss_normalization == "tokens": # use number of target tokens to normalize the loss
normalization = total_trg_tokens
elif opt.loss_normalization == 'batches': # use batch_size to normalize the loss
normalization = src.size(0)
else:
raise ValueError('The type of loss normalization is invalid.')
assert normalization > 0, 'normalization should be a positive number'
start_time = time.time()
if opt.add_two_loss:
loss += ntm_loss
# back propagation on the normalized loss
loss.div(normalization).backward()
backward_time = time_since(start_time)
if opt.max_grad_norm > 0:
grad_norm_before_clipping = nn.utils.clip_grad_norm_(model.parameters(), opt.max_grad_norm)
optimizer.step()
# construct a statistic object for the loss
stat = LossStatistics(loss.item(), total_trg_tokens, n_batch=1, forward_time=forward_time,
loss_compute_time=loss_compute_time, backward_time=backward_time)
return stat, decoder_dist.detach()
|
[
"evaluate.evaluate_loss"
] |
[((459, 513), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['recon_x', 'x'], {'size_average': '(False)'}), '(recon_x, x, size_average=False)\n', (481, 513), True, 'from torch.nn import functional as F\n'), ((2170, 2265), 'logging.info', 'logging.info', (["('Overall sparsity = %.3f, l1 strength = %.5f' % (sparsity, model.l1_strength))"], {}), "('Overall sparsity = %.3f, l1 strength = %.5f' % (sparsity,\n model.l1_strength))\n", (2182, 2265), False, 'import logging\n'), ((2266, 2326), 'logging.info', 'logging.info', (["('Target sparsity = %.3f' % opt.target_sparsity)"], {}), "('Target sparsity = %.3f' % opt.target_sparsity)\n", (2278, 2326), False, 'import logging\n'), ((3694, 3780), 'logging.info', 'logging.info', (['"""====================== Start Training ========================="""'], {}), "(\n '====================== Start Training =========================')\n", (3706, 3780), False, 'import logging\n'), ((4973, 4989), 'utils.statistics.LossStatistics', 'LossStatistics', ([], {}), '()\n', (4987, 4989), False, 'from utils.statistics import LossStatistics\n'), ((5025, 5041), 'utils.statistics.LossStatistics', 'LossStatistics', ([], {}), '()\n', (5039, 5041), False, 'from utils.statistics import LossStatistics\n'), ((5435, 5446), 'time.time', 'time.time', ([], {}), '()\n', (5444, 5446), False, 'import time\n'), ((15214, 15225), 'time.time', 'time.time', ([], {}), '()\n', (15223, 15225), False, 'import time\n'), ((15438, 15460), 'utils.time_log.time_since', 'time_since', (['start_time'], {}), '(start_time)\n', (15448, 15460), False, 'from utils.time_log import time_since, convert_time2str\n'), ((15479, 15490), 'time.time', 'time.time', ([], {}), '()\n', (15488, 15490), False, 'import time\n'), ((16048, 16070), 'utils.time_log.time_since', 'time_since', (['start_time'], {}), '(start_time)\n', (16058, 16070), False, 'from utils.time_log import time_since, convert_time2str\n'), ((17030, 17041), 'time.time', 'time.time', ([], {}), '()\n', (17039, 17041), False, 'import time\n'), ((17197, 17219), 'utils.time_log.time_since', 'time_since', (['start_time'], {}), '(start_time)\n', (17207, 17219), False, 'from utils.time_log import time_since, convert_time2str\n'), ((677, 688), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (686, 688), True, 'import torch.nn as nn\n'), ((695, 717), 'torch.zeros_like', 'torch.zeros_like', (['para'], {}), '(para)\n', (711, 717), False, 'import torch\n'), ((1303, 1324), 'torch.nn.functional.normalize', 'F.normalize', (['data_bow'], {}), '(data_bow)\n', (1314, 1324), True, 'from torch.nn import functional as F\n'), ((2512, 2527), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2525, 2527), False, 'import torch\n'), ((14821, 14841), 'torch.nn.functional.normalize', 'F.normalize', (['src_bow'], {}), '(src_bow)\n', (14832, 14841), True, 'from torch.nn import functional as F\n'), ((15581, 15739), 'pykp.masked_loss.masked_cross_entropy', 'masked_cross_entropy', (['decoder_dist', 'trg_oov', 'trg_mask', 'trg_lens', 'opt.coverage_attn', 'coverage', 'attention_dist', 'opt.lambda_coverage', 'opt.coverage_loss'], {}), '(decoder_dist, trg_oov, trg_mask, trg_lens, opt.\n coverage_attn, coverage, attention_dist, opt.lambda_coverage, opt.\n coverage_loss)\n', (15601, 15739), False, 'from pykp.masked_loss import masked_cross_entropy\n'), ((15842, 15996), 'pykp.masked_loss.masked_cross_entropy', 'masked_cross_entropy', (['decoder_dist', 'trg', 'trg_mask', 'trg_lens', 'opt.coverage_attn', 'coverage', 'attention_dist', 'opt.lambda_coverage', 'opt.coverage_loss'], {}), '(decoder_dist, trg, trg_mask, trg_lens, opt.\n coverage_attn, coverage, attention_dist, opt.lambda_coverage, opt.\n coverage_loss)\n', (15862, 15996), False, 'from pykp.masked_loss import masked_cross_entropy\n'), ((2654, 2675), 'torch.nn.functional.normalize', 'F.normalize', (['data_bow'], {}), '(data_bow)\n', (2665, 2675), True, 'from torch.nn import functional as F\n'), ((7632, 7687), 'evaluate.evaluate_loss', 'evaluate_loss', (['valid_data_loader', 'model', 'ntm_model', 'opt'], {}), '(valid_data_loader, model, ntm_model, opt)\n', (7645, 7687), False, 'from evaluate import evaluate_loss\n'), ((11469, 11627), 'logging.info', 'logging.info', (["('avg training ppl: %.3f; avg validation ppl: %.3f; best validation ppl: %.3f'\n % (current_train_ppl, current_valid_ppl, best_valid_ppl))"], {}), "(\n 'avg training ppl: %.3f; avg validation ppl: %.3f; best validation ppl: %.3f'\n % (current_train_ppl, current_valid_ppl, best_valid_ppl))\n", (11481, 11627), False, 'import logging\n'), ((11668, 11832), 'logging.info', 'logging.info', (["('avg training loss: %.3f; avg validation loss: %.3f; best validation loss: %.3f'\n % (current_train_loss, current_valid_loss, best_valid_loss))"], {}), "(\n 'avg training loss: %.3f; avg validation loss: %.3f; best validation loss: %.3f'\n % (current_train_loss, current_valid_loss, best_valid_loss))\n", (11680, 11832), False, 'import logging\n'), ((4365, 4472), 'os.path.join', 'os.path.join', (['opt.model_path', "('e%d.val_loss=%.3f.sparsity=%.3f.ntm_model' % (epoch, val_loss, sparsity))"], {}), "(opt.model_path, 'e%d.val_loss=%.3f.sparsity=%.3f.ntm_model' %\n (epoch, val_loss, sparsity))\n", (4377, 4472), False, 'import os\n'), ((4536, 4611), 'logging.info', 'logging.info', (['("""\nSaving warm up ntm model into %s""" % best_ntm_model_path)'], {}), '("""\nSaving warm up ntm model into %s""" % best_ntm_model_path)\n', (4548, 4611), False, 'import logging\n'), ((4834, 4873), 'torch.load', 'torch.load', (['opt.check_pt_ntm_model_path'], {}), '(opt.check_pt_ntm_model_path)\n', (4844, 4873), False, 'import torch\n'), ((7834, 7864), 'math.isnan', 'math.isnan', (['current_valid_loss'], {}), '(current_valid_loss)\n', (7844, 7864), False, 'import math\n'), ((7868, 7898), 'math.isnan', 'math.isnan', (['current_train_loss'], {}), '(current_train_loss)\n', (7878, 7898), False, 'import math\n'), ((7916, 8024), 'logging.info', 'logging.info', (["('NaN valid loss. Epoch: %d; batch_i: %d, total_batch: %d' % (epoch,\n batch_i, total_batch))"], {}), "('NaN valid loss. Epoch: %d; batch_i: %d, total_batch: %d' % (\n epoch, batch_i, total_batch))\n", (7928, 8024), False, 'import logging\n'), ((8236, 8254), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8252, 8254), False, 'import sys\n'), ((9790, 9808), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9806, 9808), False, 'import sys\n'), ((10847, 10917), 'logging.info', 'logging.info', (["('Saving seq2seq checkpoints to %s' % check_pt_model_path)"], {}), "('Saving seq2seq checkpoints to %s' % check_pt_model_path)\n", (10859, 10917), False, 'import logging\n'), ((12269, 12368), 'logging.info', 'logging.info', (["('Have not increased for %d check points, early stop training' %\n num_stop_dropping)"], {}), "('Have not increased for %d check points, early stop training' %\n num_stop_dropping)\n", (12281, 12368), False, 'import logging\n'), ((4269, 4325), 'os.path.join', 'os.path.join', (['opt.model_path', "('topwords_e%d.txt' % epoch)"], {}), "(opt.model_path, 'topwords_e%d.txt' % epoch)\n", (4281, 4325), False, 'import os\n'), ((8991, 9061), 'logging.info', 'logging.info', (["('Saving seq2seq checkpoints to %s' % check_pt_model_path)"], {}), "('Saving seq2seq checkpoints to %s' % check_pt_model_path)\n", (9003, 9061), False, 'import logging\n'), ((11262, 11332), 'logging.info', 'logging.info', (["('Saving ntm checkpoints to %s' % check_pt_ntm_model_path)"], {}), "('Saving ntm checkpoints to %s' % check_pt_ntm_model_path)\n", (11274, 11332), False, 'import logging\n'), ((9457, 9527), 'logging.info', 'logging.info', (["('Saving ntm checkpoints to %s' % check_pt_ntm_model_path)"], {}), "('Saving ntm checkpoints to %s' % check_pt_ntm_model_path)\n", (9469, 9527), False, 'import logging\n'), ((9648, 9704), 'os.path.join', 'os.path.join', (['opt.model_path', "('topwords_e%d.txt' % epoch)"], {}), "(opt.model_path, 'topwords_e%d.txt' % epoch)\n", (9660, 9704), False, 'import os\n'), ((11438, 11449), 'time.time', 'time.time', ([], {}), '()\n', (11447, 11449), False, 'import time\n'), ((10648, 10659), 'time.time', 'time.time', ([], {}), '()\n', (10657, 10659), False, 'import time\n'), ((8753, 8764), 'time.time', 'time.time', ([], {}), '()\n', (8762, 8764), False, 'import time\n')]
|
"""
SouthPark Chatbot
"""
import os
import argparse
import torch
import config
from models import MobileHairNet
from trainer import Trainer
from evaluate import evalTest, evaluate, evaluateOne
from dataset import HairDataset, ImgTransformer
from utils import CheckpointManager
DIR_PATH = os.path.dirname(__file__)
USE_CUDA = torch.cuda.is_available()
device = torch.device('cuda' if USE_CUDA else 'cpu')
SAVE_PATH = os.path.join(DIR_PATH, config.SAVE_DIR, config.MODEL_NAME)
def build_model(checkpoint):
model = MobileHairNet()
if checkpoint:
model.load_state_dict(checkpoint['model'])
# Use appropriate device
model = model.to(device)
return model
def train(mode, model, checkpoint, checkpoint_mng):
trainer = Trainer(model, checkpoint_mng)
if checkpoint:
trainer.resume(checkpoint)
trianfile = os.path.join(DIR_PATH, config.TRAIN_CORPUS)
devfile = os.path.join(DIR_PATH, config.TEST_CORPUS)
print("Reading training data from %s..." % trianfile)
train_datasets = HairDataset(trianfile, config.IMG_SIZE, color_aug=True)
print(f'Read {len(train_datasets)} training images')
print("Reading development data from %s..." % devfile)
dev_datasets = HairDataset(devfile, config.IMG_SIZE)
print(f'Read {len(dev_datasets)} development images')
# Ensure dropout layers are in train mode
model.train()
trainer.train(train_datasets, config.EPOCHS, config.BATCH_SIZE, stage=mode, dev_data=dev_datasets)
def test(model, checkpoint):
# Set dropout layers to eval mode
model.eval()
testfile = os.path.join(DIR_PATH, config.TEST_CORPUS)
print("Reading Testing data from %s..." % testfile)
test_datasets = HairDataset(testfile, config.IMG_SIZE)
print(f'Read {len(test_datasets)} testing images')
evalTest(test_datasets, model)
def run(model, checkpoint, dset='test', num=4, img_path=None):
# Set dropout layers to eval mode
model.eval()
if not img_path:
if dset == 'train':
path = config.TRAIN_CORPUS
else:
path = config.TEST_CORPUS
testfile = os.path.join(DIR_PATH, path)
print("Reading Testing data from %s..." % testfile)
test_datasets = HairDataset(testfile, config.IMG_SIZE)
print(f'Read {len(test_datasets)} testing images')
evaluate(test_datasets, model, num, absolute=False)
else:
transformer = ImgTransformer(config.IMG_SIZE, color_aug=False)
img = transformer.load(img_path)
evaluateOne(img, model, absolute=False)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', choices={'train', 'test', 'run'}, help="mode to run the network")
parser.add_argument('-cp', '--checkpoint')
parser.add_argument('-st', '--set', choices={'train', 'test'}, default='test')
parser.add_argument('-im', '--image')
parser.add_argument('-n', '--num', type=int, default=4)
args = parser.parse_args()
print('Saving path:', SAVE_PATH)
checkpoint_mng = CheckpointManager(SAVE_PATH)
checkpoint = None
if args.checkpoint:
print('Load checkpoint:', args.checkpoint)
checkpoint = checkpoint_mng.load(args.checkpoint, device)
model = build_model(checkpoint)
if args.mode == 'train':
train(args.mode, model, checkpoint, checkpoint_mng)
elif args.mode == 'test':
test(model, checkpoint)
elif args.mode == 'run':
run(model, checkpoint, dset=args.set, num=args.num, img_path=args.image)
def init():
parser = argparse.ArgumentParser()
parser.add_argument('-cp', '--checkpoint')
args = parser.parse_args()
checkpoint_mng = CheckpointManager(SAVE_PATH)
checkpoint = None if not args.checkpoint else checkpoint_mng.load(args.checkpoint, device)
model = build_model(checkpoint)
# Set dropout layers to eval mode
model.eval()
return model
if __name__ == '__main__':
main()
|
[
"evaluate.evaluate",
"evaluate.evalTest",
"evaluate.evaluateOne"
] |
[((292, 317), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (307, 317), False, 'import os\n'), ((329, 354), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (352, 354), False, 'import torch\n'), ((364, 407), 'torch.device', 'torch.device', (["('cuda' if USE_CUDA else 'cpu')"], {}), "('cuda' if USE_CUDA else 'cpu')\n", (376, 407), False, 'import torch\n'), ((421, 479), 'os.path.join', 'os.path.join', (['DIR_PATH', 'config.SAVE_DIR', 'config.MODEL_NAME'], {}), '(DIR_PATH, config.SAVE_DIR, config.MODEL_NAME)\n', (433, 479), False, 'import os\n'), ((523, 538), 'models.MobileHairNet', 'MobileHairNet', ([], {}), '()\n', (536, 538), False, 'from models import MobileHairNet\n'), ((755, 785), 'trainer.Trainer', 'Trainer', (['model', 'checkpoint_mng'], {}), '(model, checkpoint_mng)\n', (762, 785), False, 'from trainer import Trainer\n'), ((858, 901), 'os.path.join', 'os.path.join', (['DIR_PATH', 'config.TRAIN_CORPUS'], {}), '(DIR_PATH, config.TRAIN_CORPUS)\n', (870, 901), False, 'import os\n'), ((916, 958), 'os.path.join', 'os.path.join', (['DIR_PATH', 'config.TEST_CORPUS'], {}), '(DIR_PATH, config.TEST_CORPUS)\n', (928, 958), False, 'import os\n'), ((1040, 1095), 'dataset.HairDataset', 'HairDataset', (['trianfile', 'config.IMG_SIZE'], {'color_aug': '(True)'}), '(trianfile, config.IMG_SIZE, color_aug=True)\n', (1051, 1095), False, 'from dataset import HairDataset, ImgTransformer\n'), ((1234, 1271), 'dataset.HairDataset', 'HairDataset', (['devfile', 'config.IMG_SIZE'], {}), '(devfile, config.IMG_SIZE)\n', (1245, 1271), False, 'from dataset import HairDataset, ImgTransformer\n'), ((1601, 1643), 'os.path.join', 'os.path.join', (['DIR_PATH', 'config.TEST_CORPUS'], {}), '(DIR_PATH, config.TEST_CORPUS)\n', (1613, 1643), False, 'import os\n'), ((1721, 1759), 'dataset.HairDataset', 'HairDataset', (['testfile', 'config.IMG_SIZE'], {}), '(testfile, config.IMG_SIZE)\n', (1732, 1759), False, 'from dataset import HairDataset, ImgTransformer\n'), ((1821, 1851), 'evaluate.evalTest', 'evalTest', (['test_datasets', 'model'], {}), '(test_datasets, model)\n', (1829, 1851), False, 'from evaluate import evalTest, evaluate, evaluateOne\n'), ((2604, 2629), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2627, 2629), False, 'import argparse\n'), ((3058, 3086), 'utils.CheckpointManager', 'CheckpointManager', (['SAVE_PATH'], {}), '(SAVE_PATH)\n', (3075, 3086), False, 'from utils import CheckpointManager\n'), ((3578, 3603), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3601, 3603), False, 'import argparse\n'), ((3704, 3732), 'utils.CheckpointManager', 'CheckpointManager', (['SAVE_PATH'], {}), '(SAVE_PATH)\n', (3721, 3732), False, 'from utils import CheckpointManager\n'), ((2132, 2160), 'os.path.join', 'os.path.join', (['DIR_PATH', 'path'], {}), '(DIR_PATH, path)\n', (2144, 2160), False, 'import os\n'), ((2246, 2284), 'dataset.HairDataset', 'HairDataset', (['testfile', 'config.IMG_SIZE'], {}), '(testfile, config.IMG_SIZE)\n', (2257, 2284), False, 'from dataset import HairDataset, ImgTransformer\n'), ((2354, 2405), 'evaluate.evaluate', 'evaluate', (['test_datasets', 'model', 'num'], {'absolute': '(False)'}), '(test_datasets, model, num, absolute=False)\n', (2362, 2405), False, 'from evaluate import evalTest, evaluate, evaluateOne\n'), ((2438, 2486), 'dataset.ImgTransformer', 'ImgTransformer', (['config.IMG_SIZE'], {'color_aug': '(False)'}), '(config.IMG_SIZE, color_aug=False)\n', (2452, 2486), False, 'from dataset import HairDataset, ImgTransformer\n'), ((2537, 2576), 'evaluate.evaluateOne', 'evaluateOne', (['img', 'model'], {'absolute': '(False)'}), '(img, model, absolute=False)\n', (2548, 2576), False, 'from evaluate import evalTest, evaluate, evaluateOne\n')]
|
from pathlib import Path
import sys
sys.path.append(str(Path().absolute()))
import logging
log_level = "INFO"
logging.basicConfig(
filename=str(snakemake.log),
filemode="w",
level=log_level,
format="[%(asctime)s]:%(levelname)s: %(message)s",
datefmt="%d/%m/%Y %I:%M:%S %p",
)
from evaluate.report import RecallReport
# setup
recall_report_files_for_one_sample_and_all_gt_conf_percentiles = (
snakemake.input.recall_report_files_for_one_sample_and_all_gt_conf_percentiles
)
recall_report_per_sample_for_calculator = snakemake.output.recall_report_per_sample_for_calculator
# API usage
logging.info(f"Loading report")
recall_report = RecallReport.from_files(recall_report_files_for_one_sample_and_all_gt_conf_percentiles,
concatenate_dfs_one_by_one_keeping_only_best_mappings=True)
with open(recall_report_per_sample_for_calculator, "w") as recall_report_per_sample_for_calculator_filehandler:
recall_report.save_report(recall_report_per_sample_for_calculator_filehandler)
logging.info(f"Done")
|
[
"evaluate.report.RecallReport.from_files"
] |
[((612, 643), 'logging.info', 'logging.info', (['f"""Loading report"""'], {}), "(f'Loading report')\n", (624, 643), False, 'import logging\n'), ((660, 816), 'evaluate.report.RecallReport.from_files', 'RecallReport.from_files', (['recall_report_files_for_one_sample_and_all_gt_conf_percentiles'], {'concatenate_dfs_one_by_one_keeping_only_best_mappings': '(True)'}), '(\n recall_report_files_for_one_sample_and_all_gt_conf_percentiles,\n concatenate_dfs_one_by_one_keeping_only_best_mappings=True)\n', (683, 816), False, 'from evaluate.report import RecallReport\n'), ((1045, 1066), 'logging.info', 'logging.info', (['f"""Done"""'], {}), "(f'Done')\n", (1057, 1066), False, 'import logging\n'), ((56, 62), 'pathlib.Path', 'Path', ([], {}), '()\n', (60, 62), False, 'from pathlib import Path\n')]
|
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import h5py
import math
import time
import pickle
import logging
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from evaluate import Evaluator
from config import (sample_rate, classes_num, mel_bins, fmin, fmax,
window_size, hop_size, window, pad_mode, center, device, ref, amin, top_db)
from losses import get_loss_func
from pytorch_utils import move_data_to_device, do_mixup
from utilities import (create_folder, get_filename, create_logging,
StatisticsContainer, Mixup)
from data_generator import (DCASE2017Task4Dataset, TrainSampler, TestSampler,
collate_fn)
from models import *
def train(args):
"""Train and evaluate.
Args:
dataset_dir: str
workspace: str
holdout_fold: '1'
model_type: str, e.g., 'Cnn_9layers_Gru_FrameAtt'
loss_type: str, e.g., 'clip_bce'
augmentation: str, e.g., 'mixup'
learning_rate, float
batch_size: int
resume_iteration: int
stop_iteration: int
device: 'cuda' | 'cpu'
mini_data: bool
"""
# Arugments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
holdout_fold = args.holdout_fold
model_type = args.model_type
loss_type = args.loss_type
augmentation = args.augmentation
learning_rate = args.learning_rate
batch_size = args.batch_size
resume_iteration = args.resume_iteration
stop_iteration = args.stop_iteration
device = 'cuda' if (args.cuda and torch.cuda.is_available()) else 'cpu'
mini_data = args.mini_data
filename = args.filename
loss_func = get_loss_func(loss_type)
num_workers = 8
# Paths
if mini_data:
prefix = 'minidata_'
else:
prefix = ''
train_hdf5_path = os.path.join(workspace, 'hdf5s',
'{}training.h5'.format(prefix))
test_hdf5_path = os.path.join(workspace, 'hdf5s',
'{}testing.h5'.format(prefix))
evaluate_hdf5_path = os.path.join(workspace, 'hdf5s',
'evaluation.h5'.format(prefix))
test_reference_csv_path = os.path.join(dataset_dir, 'metadata',
'groundtruth_strong_label_testing_set.csv')
evaluate_reference_csv_path = os.path.join(dataset_dir, 'metadata',
'groundtruth_strong_label_evaluation_set.csv')
checkpoints_dir = os.path.join(workspace, 'checkpoints',
'{}{}'.format(prefix, filename), 'holdout_fold={}'.format(holdout_fold),
'model_type={}'.format(model_type), 'loss_type={}'.format(loss_type),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size))
create_folder(checkpoints_dir)
tmp_submission_path = os.path.join(workspace, '_tmp_submission',
'{}{}'.format(prefix, filename), 'holdout_fold={}'.format(holdout_fold),
'model_type={}'.format(model_type), 'loss_type={}'.format(loss_type),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'_submission.csv')
create_folder(os.path.dirname(tmp_submission_path))
statistics_path = os.path.join(workspace, 'statistics',
'{}{}'.format(prefix, filename), 'holdout_fold={}'.format(holdout_fold),
'model_type={}'.format(model_type), 'loss_type={}'.format(loss_type),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pickle')
create_folder(os.path.dirname(statistics_path))
logs_dir = os.path.join(workspace, 'logs', '{}{}'.format(prefix, filename),
'holdout_fold={}'.format(holdout_fold), 'model_type={}'.format(model_type),
'loss_type={}'.format(loss_type), 'augmentation={}'.format(augmentation),
'batch_size={}'.format(batch_size))
create_logging(logs_dir, 'w')
logging.info(args)
if 'cuda' in device:
logging.info('Using GPU.')
else:
logging.info('Using CPU. Set --cuda flag to use GPU.')
# Model
assert model_type, 'Please specify model_type!'
Model = eval(model_type)
model = Model(sample_rate, window_size, hop_size, mel_bins, fmin, fmax,
classes_num)
if resume_iteration:
resume_checkpoint_path = os.path.join(checkpoints_dir,
'{}_iterations.pth'.format(resume_iteration))
logging.info('Load resume model from {}'.format(resume_checkpoint_path))
resume_checkpoint = torch.load(resume_checkpoint_path)
model.load_state_dict(resume_checkpoint['model'])
statistics_container.load_state_dict(resume_iteration)
iteration = resume_checkpoint['iteration']
else:
iteration = 0
# Parallel
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
if 'cuda' in device:
model.to(device)
# Optimizer
optimizer = optim.Adam(model.parameters(), lr=learning_rate,
betas=(0.9, 0.999), eps=1e-08, weight_decay=0., amsgrad=True)
# Dataset
dataset = DCASE2017Task4Dataset()
# Sampler
train_sampler = TrainSampler(
hdf5_path=train_hdf5_path,
batch_size=batch_size * 2 if 'mixup' in augmentation else batch_size)
test_sampler = TestSampler(hdf5_path=test_hdf5_path, batch_size=batch_size)
evaluate_sampler = TestSampler(hdf5_path=evaluate_hdf5_path, batch_size=batch_size)
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=train_sampler, collate_fn=collate_fn,
num_workers=num_workers, pin_memory=True)
test_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=test_sampler, collate_fn=collate_fn,
num_workers=num_workers, pin_memory=True)
evaluate_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=evaluate_sampler, collate_fn=collate_fn,
num_workers=num_workers, pin_memory=True)
if 'mixup' in augmentation:
mixup_augmenter = Mixup(mixup_alpha=1.)
# Evaluator
evaluator = Evaluator(model=model)
# Statistics
statistics_container = StatisticsContainer(statistics_path)
train_bgn_time = time.time()
# Train on mini batches
for batch_data_dict in train_loader:
# Evaluate
if (iteration % 1000 == 0 and iteration > resume_iteration):# or (iteration == 0):
logging.info('------------------------------------')
logging.info('Iteration: {}'.format(iteration))
train_fin_time = time.time()
for (data_type, data_loader, reference_csv_path) in [
('test', test_loader, test_reference_csv_path),
('evaluate', evaluate_loader, evaluate_reference_csv_path)]:
# Calculate tatistics
(statistics, _) = evaluator.evaluate(
data_loader, reference_csv_path, tmp_submission_path)
logging.info('{} statistics:'.format(data_type))
logging.info(' Clipwise mAP: {:.3f}'.format(np.mean(statistics['clipwise_ap'])))
logging.info(' Framewise mAP: {:.3f}'.format(np.mean(statistics['framewise_ap'])))
logging.info(' {}'.format(statistics['sed_metrics']['overall']['error_rate']))
statistics_container.append(data_type, iteration, statistics)
statistics_container.dump()
train_time = train_fin_time - train_bgn_time
validate_time = time.time() - train_fin_time
logging.info(
'Train time: {:.3f} s, validate time: {:.3f} s'
''.format(train_time, validate_time))
train_bgn_time = time.time()
# Save model
if iteration % 10000 == 0:
checkpoint = {
'iteration': iteration,
'model': model.module.state_dict(),
'optimizer': optimizer.state_dict()}
checkpoint_path = os.path.join(
checkpoints_dir, '{}_iterations.pth'.format(iteration))
torch.save(checkpoint, checkpoint_path)
logging.info('Model saved to {}'.format(checkpoint_path))
if 'mixup' in augmentation:
batch_data_dict['mixup_lambda'] = mixup_augmenter.get_lambda(
batch_size=len(batch_data_dict['waveform']))
# Move data to GPU
for key in batch_data_dict.keys():
batch_data_dict[key] = move_data_to_device(batch_data_dict[key], device)
# Train
model.train()
if 'mixup' in augmentation:
batch_output_dict = model(batch_data_dict['waveform'], batch_data_dict['mixup_lambda'])
batch_target_dict = {'target': do_mixup(batch_data_dict['target'], batch_data_dict['mixup_lambda'])}
else:
batch_output_dict = model(batch_data_dict['waveform'], None)
batch_target_dict = {'target': batch_data_dict['target']}
# loss
loss = loss_func(batch_output_dict, batch_target_dict)
print(iteration, loss)
# Backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Stop learning
if iteration == stop_iteration:
break
iteration += 1
def inference_prob(self):
"""Inference test and evaluate data and dump predicted probabilites to
pickle files.
Args:
dataset_dir: str
workspace: str
holdout_fold: '1'
model_type: str, e.g., 'Cnn_9layers_Gru_FrameAtt'
loss_type: str, e.g., 'clip_bce'
augmentation: str, e.g., 'mixup'
batch_size: int
device: 'cuda' | 'cpu'
"""
# Arugments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
holdout_fold = args.holdout_fold
model_type = args.model_type
loss_type = args.loss_type
augmentation = args.augmentation
batch_size = args.batch_size
iteration = args.iteration
device = 'cuda' if (args.cuda and torch.cuda.is_available()) else 'cpu'
filename = args.filename
num_workers = 8
# Paths
test_hdf5_path = os.path.join(workspace, 'hdf5s', 'testing.h5')
evaluate_hdf5_path = os.path.join(workspace, 'hdf5s', 'evaluation.h5')
test_reference_csv_path = os.path.join(dataset_dir, 'metadata',
'groundtruth_strong_label_testing_set.csv')
evaluate_reference_csv_path = os.path.join(dataset_dir, 'metadata',
'groundtruth_strong_label_evaluation_set.csv')
checkpoint_path = os.path.join(workspace, 'checkpoints',
'{}'.format(filename), 'holdout_fold={}'.format(holdout_fold),
'model_type={}'.format(model_type), 'loss_type={}'.format(loss_type),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'{}_iterations.pth'.format(iteration))
predictions_dir = os.path.join(workspace, 'predictions',
'{}'.format(filename), 'holdout_fold={}'.format(holdout_fold),
'model_type={}'.format(model_type), 'loss_type={}'.format(loss_type),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size))
create_folder(predictions_dir)
tmp_submission_path = os.path.join(workspace, '_tmp_submission',
'{}'.format(filename), 'holdout_fold={}'.format(holdout_fold),
'model_type={}'.format(model_type), 'loss_type={}'.format(loss_type),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'_submission.csv')
create_folder(os.path.dirname(tmp_submission_path))
# Load model
assert model_type, 'Please specify model_type!'
Model = eval(model_type)
model = Model(sample_rate, window_size, hop_size, mel_bins, fmin, fmax,
classes_num)
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint['model'])
# Parallel
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
if 'cuda' in device:
model.to(device)
# Dataset
dataset = DCASE2017Task4Dataset()
# Sampler
test_sampler = TestSampler(hdf5_path=test_hdf5_path, batch_size=batch_size)
evaluate_sampler = TestSampler(hdf5_path=evaluate_hdf5_path, batch_size=batch_size)
# Data loader
test_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=test_sampler, collate_fn=collate_fn,
num_workers=num_workers, pin_memory=True)
evaluate_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=evaluate_sampler, collate_fn=collate_fn,
num_workers=num_workers, pin_memory=True)
# Evaluator
evaluator = Evaluator(model=model)
for (data_type, data_loader, reference_csv_path) in [
('test', test_loader, test_reference_csv_path),
('evaluate', evaluate_loader, evaluate_reference_csv_path)]:
print('Inferencing {} data in about 1 min ...'.format(data_type))
(statistics, output_dict) = evaluator.evaluate(
data_loader, reference_csv_path, tmp_submission_path)
prediction_path = os.path.join(predictions_dir,
'{}_iterations.prediction.{}.pkl'.format(iteration, data_type))
# write_out_prediction(output_dict, prediction_path)
pickle.dump(output_dict, open(prediction_path, 'wb'))
print('Write out to {}'.format(prediction_path))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example of parser. ')
subparsers = parser.add_subparsers(dest='mode')
# Train
parser_train = subparsers.add_parser('train')
parser_train.add_argument('--dataset_dir', type=str, required=True, help='Directory of dataset.')
parser_train.add_argument('--workspace', type=str, required=True, help='Directory of your workspace.')
parser_train.add_argument('--holdout_fold', type=str, choices=['1'], required=True)
parser_train.add_argument('--model_type', type=str, required=True)
parser_train.add_argument('--loss_type', type=str, required=True)
parser_train.add_argument('--augmentation', type=str, choices=['none', 'mixup'], required=True)
parser_train.add_argument('--learning_rate', type=float, required=True)
parser_train.add_argument('--batch_size', type=int, required=True)
parser_train.add_argument('--resume_iteration', type=int)
parser_train.add_argument('--stop_iteration', type=int, required=True)
parser_train.add_argument('--cuda', action='store_true', default=False)
parser_train.add_argument('--mini_data', action='store_true', default=False)
# Inference
parser_inference_prob = subparsers.add_parser('inference_prob')
parser_inference_prob.add_argument('--dataset_dir', type=str, required=True, help='Directory of dataset.')
parser_inference_prob.add_argument('--workspace', type=str, required=True, help='Directory of your workspace.')
parser_inference_prob.add_argument('--holdout_fold', type=str, choices=['1'], required=True)
parser_inference_prob.add_argument('--model_type', type=str, required=True)
parser_inference_prob.add_argument('--loss_type', type=str, required=True)
parser_inference_prob.add_argument('--augmentation', type=str, choices=['none', 'mixup'], required=True)
parser_inference_prob.add_argument('--batch_size', type=int, required=True)
parser_inference_prob.add_argument('--iteration', type=int, required=True)
parser_inference_prob.add_argument('--cuda', action='store_true', default=False)
# Parse arguments
args = parser.parse_args()
args.filename = get_filename(__file__)
if args.mode == 'train':
train(args)
elif args.mode == 'inference_prob':
inference_prob(args)
else:
raise Exception('Error argument!')
|
[
"evaluate.Evaluator"
] |
[((40, 77), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""../utils"""'], {}), "(sys.path[0], '../utils')\n", (52, 77), False, 'import os\n'), ((1785, 1809), 'losses.get_loss_func', 'get_loss_func', (['loss_type'], {}), '(loss_type)\n', (1798, 1809), False, 'from losses import get_loss_func\n'), ((2247, 2332), 'os.path.join', 'os.path.join', (['dataset_dir', '"""metadata"""', '"""groundtruth_strong_label_testing_set.csv"""'], {}), "(dataset_dir, 'metadata',\n 'groundtruth_strong_label_testing_set.csv')\n", (2259, 2332), False, 'import os\n'), ((2381, 2469), 'os.path.join', 'os.path.join', (['dataset_dir', '"""metadata"""', '"""groundtruth_strong_label_evaluation_set.csv"""'], {}), "(dataset_dir, 'metadata',\n 'groundtruth_strong_label_evaluation_set.csv')\n", (2393, 2469), False, 'import os\n'), ((2787, 2817), 'utilities.create_folder', 'create_folder', (['checkpoints_dir'], {}), '(checkpoints_dir)\n', (2800, 2817), False, 'from utilities import create_folder, get_filename, create_logging, StatisticsContainer, Mixup\n'), ((3904, 3933), 'utilities.create_logging', 'create_logging', (['logs_dir', '"""w"""'], {}), "(logs_dir, 'w')\n", (3918, 3933), False, 'from utilities import create_folder, get_filename, create_logging, StatisticsContainer, Mixup\n'), ((3938, 3956), 'logging.info', 'logging.info', (['args'], {}), '(args)\n', (3950, 3956), False, 'import logging\n'), ((4873, 4901), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (4894, 4901), False, 'import torch\n'), ((5135, 5158), 'data_generator.DCASE2017Task4Dataset', 'DCASE2017Task4Dataset', ([], {}), '()\n', (5156, 5158), False, 'from data_generator import DCASE2017Task4Dataset, TrainSampler, TestSampler, collate_fn\n'), ((5194, 5308), 'data_generator.TrainSampler', 'TrainSampler', ([], {'hdf5_path': 'train_hdf5_path', 'batch_size': "(batch_size * 2 if 'mixup' in augmentation else batch_size)"}), "(hdf5_path=train_hdf5_path, batch_size=batch_size * 2 if \n 'mixup' in augmentation else batch_size)\n", (5206, 5308), False, 'from data_generator import DCASE2017Task4Dataset, TrainSampler, TestSampler, collate_fn\n'), ((5346, 5406), 'data_generator.TestSampler', 'TestSampler', ([], {'hdf5_path': 'test_hdf5_path', 'batch_size': 'batch_size'}), '(hdf5_path=test_hdf5_path, batch_size=batch_size)\n', (5357, 5406), False, 'from data_generator import DCASE2017Task4Dataset, TrainSampler, TestSampler, collate_fn\n'), ((5431, 5495), 'data_generator.TestSampler', 'TestSampler', ([], {'hdf5_path': 'evaluate_hdf5_path', 'batch_size': 'batch_size'}), '(hdf5_path=evaluate_hdf5_path, batch_size=batch_size)\n', (5442, 5495), False, 'from data_generator import DCASE2017Task4Dataset, TrainSampler, TestSampler, collate_fn\n'), ((5534, 5676), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_sampler': 'train_sampler', 'collate_fn': 'collate_fn', 'num_workers': 'num_workers', 'pin_memory': '(True)'}), '(dataset=dataset, batch_sampler=train_sampler,\n collate_fn=collate_fn, num_workers=num_workers, pin_memory=True)\n', (5561, 5676), False, 'import torch\n'), ((5714, 5855), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_sampler': 'test_sampler', 'collate_fn': 'collate_fn', 'num_workers': 'num_workers', 'pin_memory': '(True)'}), '(dataset=dataset, batch_sampler=test_sampler,\n collate_fn=collate_fn, num_workers=num_workers, pin_memory=True)\n', (5741, 5855), False, 'import torch\n'), ((5893, 6038), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_sampler': 'evaluate_sampler', 'collate_fn': 'collate_fn', 'num_workers': 'num_workers', 'pin_memory': '(True)'}), '(dataset=dataset, batch_sampler=evaluate_sampler,\n collate_fn=collate_fn, num_workers=num_workers, pin_memory=True)\n', (5920, 6038), False, 'import torch\n'), ((6175, 6197), 'evaluate.Evaluator', 'Evaluator', ([], {'model': 'model'}), '(model=model)\n', (6184, 6197), False, 'from evaluate import Evaluator\n'), ((6243, 6279), 'utilities.StatisticsContainer', 'StatisticsContainer', (['statistics_path'], {}), '(statistics_path)\n', (6262, 6279), False, 'from utilities import create_folder, get_filename, create_logging, StatisticsContainer, Mixup\n'), ((6302, 6313), 'time.time', 'time.time', ([], {}), '()\n', (6311, 6313), False, 'import time\n'), ((10310, 10356), 'os.path.join', 'os.path.join', (['workspace', '"""hdf5s"""', '"""testing.h5"""'], {}), "(workspace, 'hdf5s', 'testing.h5')\n", (10322, 10356), False, 'import os\n'), ((10383, 10432), 'os.path.join', 'os.path.join', (['workspace', '"""hdf5s"""', '"""evaluation.h5"""'], {}), "(workspace, 'hdf5s', 'evaluation.h5')\n", (10395, 10432), False, 'import os\n'), ((10464, 10549), 'os.path.join', 'os.path.join', (['dataset_dir', '"""metadata"""', '"""groundtruth_strong_label_testing_set.csv"""'], {}), "(dataset_dir, 'metadata',\n 'groundtruth_strong_label_testing_set.csv')\n", (10476, 10549), False, 'import os\n'), ((10598, 10686), 'os.path.join', 'os.path.join', (['dataset_dir', '"""metadata"""', '"""groundtruth_strong_label_evaluation_set.csv"""'], {}), "(dataset_dir, 'metadata',\n 'groundtruth_strong_label_evaluation_set.csv')\n", (10610, 10686), False, 'import os\n'), ((11339, 11369), 'utilities.create_folder', 'create_folder', (['predictions_dir'], {}), '(predictions_dir)\n', (11352, 11369), False, 'from utilities import create_folder, get_filename, create_logging, StatisticsContainer, Mixup\n'), ((11974, 12001), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (11984, 12001), False, 'import torch\n'), ((12139, 12167), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (12160, 12167), False, 'import torch\n'), ((12248, 12271), 'data_generator.DCASE2017Task4Dataset', 'DCASE2017Task4Dataset', ([], {}), '()\n', (12269, 12271), False, 'from data_generator import DCASE2017Task4Dataset, TrainSampler, TestSampler, collate_fn\n'), ((12306, 12366), 'data_generator.TestSampler', 'TestSampler', ([], {'hdf5_path': 'test_hdf5_path', 'batch_size': 'batch_size'}), '(hdf5_path=test_hdf5_path, batch_size=batch_size)\n', (12317, 12366), False, 'from data_generator import DCASE2017Task4Dataset, TrainSampler, TestSampler, collate_fn\n'), ((12391, 12455), 'data_generator.TestSampler', 'TestSampler', ([], {'hdf5_path': 'evaluate_hdf5_path', 'batch_size': 'batch_size'}), '(hdf5_path=evaluate_hdf5_path, batch_size=batch_size)\n', (12402, 12455), False, 'from data_generator import DCASE2017Task4Dataset, TrainSampler, TestSampler, collate_fn\n'), ((12493, 12634), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_sampler': 'test_sampler', 'collate_fn': 'collate_fn', 'num_workers': 'num_workers', 'pin_memory': '(True)'}), '(dataset=dataset, batch_sampler=test_sampler,\n collate_fn=collate_fn, num_workers=num_workers, pin_memory=True)\n', (12520, 12634), False, 'import torch\n'), ((12672, 12817), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_sampler': 'evaluate_sampler', 'collate_fn': 'collate_fn', 'num_workers': 'num_workers', 'pin_memory': '(True)'}), '(dataset=dataset, batch_sampler=evaluate_sampler,\n collate_fn=collate_fn, num_workers=num_workers, pin_memory=True)\n', (12699, 12817), False, 'import torch\n'), ((12865, 12887), 'evaluate.Evaluator', 'Evaluator', ([], {'model': 'model'}), '(model=model)\n', (12874, 12887), False, 'from evaluate import Evaluator\n'), ((13628, 13686), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Example of parser. """'}), "(description='Example of parser. ')\n", (13651, 13686), False, 'import argparse\n'), ((15780, 15802), 'utilities.get_filename', 'get_filename', (['__file__'], {}), '(__file__)\n', (15792, 15802), False, 'from utilities import create_folder, get_filename, create_logging, StatisticsContainer, Mixup\n'), ((3179, 3215), 'os.path.dirname', 'os.path.dirname', (['tmp_submission_path'], {}), '(tmp_submission_path)\n', (3194, 3215), False, 'import os\n'), ((3572, 3604), 'os.path.dirname', 'os.path.dirname', (['statistics_path'], {}), '(statistics_path)\n', (3587, 3604), False, 'import os\n'), ((3991, 4017), 'logging.info', 'logging.info', (['"""Using GPU."""'], {}), "('Using GPU.')\n", (4003, 4017), False, 'import logging\n'), ((4036, 4090), 'logging.info', 'logging.info', (['"""Using CPU. Set --cuda flag to use GPU."""'], {}), "('Using CPU. Set --cuda flag to use GPU.')\n", (4048, 4090), False, 'import logging\n'), ((4544, 4578), 'torch.load', 'torch.load', (['resume_checkpoint_path'], {}), '(resume_checkpoint_path)\n', (4554, 4578), False, 'import torch\n'), ((6116, 6138), 'utilities.Mixup', 'Mixup', ([], {'mixup_alpha': '(1.0)'}), '(mixup_alpha=1.0)\n', (6121, 6138), False, 'from utilities import create_folder, get_filename, create_logging, StatisticsContainer, Mixup\n'), ((11721, 11757), 'os.path.dirname', 'os.path.dirname', (['tmp_submission_path'], {}), '(tmp_submission_path)\n', (11736, 11757), False, 'import os\n'), ((1670, 1695), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1693, 1695), False, 'import torch\n'), ((4833, 4858), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (4856, 4858), False, 'import torch\n'), ((6520, 6572), 'logging.info', 'logging.info', (['"""------------------------------------"""'], {}), "('------------------------------------')\n", (6532, 6572), False, 'import logging\n'), ((6663, 6674), 'time.time', 'time.time', ([], {}), '()\n', (6672, 6674), False, 'import time\n'), ((7827, 7838), 'time.time', 'time.time', ([], {}), '()\n', (7836, 7838), False, 'import time\n'), ((8217, 8256), 'torch.save', 'torch.save', (['checkpoint', 'checkpoint_path'], {}), '(checkpoint, checkpoint_path)\n', (8227, 8256), False, 'import torch\n'), ((8613, 8662), 'pytorch_utils.move_data_to_device', 'move_data_to_device', (['batch_data_dict[key]', 'device'], {}), '(batch_data_dict[key], device)\n', (8632, 8662), False, 'from pytorch_utils import move_data_to_device, do_mixup\n'), ((10188, 10213), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10211, 10213), False, 'import torch\n'), ((12099, 12124), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (12122, 12124), False, 'import torch\n'), ((7623, 7634), 'time.time', 'time.time', ([], {}), '()\n', (7632, 7634), False, 'import time\n'), ((8890, 8958), 'pytorch_utils.do_mixup', 'do_mixup', (["batch_data_dict['target']", "batch_data_dict['mixup_lambda']"], {}), "(batch_data_dict['target'], batch_data_dict['mixup_lambda'])\n", (8898, 8958), False, 'from pytorch_utils import move_data_to_device, do_mixup\n'), ((7180, 7214), 'numpy.mean', 'np.mean', (["statistics['clipwise_ap']"], {}), "(statistics['clipwise_ap'])\n", (7187, 7214), True, 'import numpy as np\n'), ((7281, 7316), 'numpy.mean', 'np.mean', (["statistics['framewise_ap']"], {}), "(statistics['framewise_ap'])\n", (7288, 7316), True, 'import numpy as np\n')]
|
from keras import backend as K
from keras.models import load_model
from sacred import Experiment
from sacred.utils import apply_backspaces_and_linefeeds
from sacred.observers import FileStorageObserver
from keras.callbacks import ModelCheckpoint, EarlyStopping, Callback, TensorBoard
from utils.util import prepare_dataset, split_data
from utils.logging import *
from train import train
from evaluate import evaluate
import tensorflow as tf
import os
import json
import shutil
import importlib
# initialize globals
config = None
config_path = None
experiment_name = None
experiment_path = None
# file paths
full_kfold_summary_file_path = 'kfold_summary.txt'
all_results_file_path = 'all_results.txt'
# reset tensorflow graph
tf.reset_default_graph()
# remove unnecessary tensorflow output
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# set dimension ordering to tensorflow
K.set_image_dim_ordering('tf')
# main run function
def run():
# declare global variables
global config
global config_path
# add config file to experiment
experiment.add_artifact(config_path)
if config['dataset'].get('link', True):
dataset_config_path = f'../configs/datasets/{ config["dataset"]["link"] }'
experiment.add_artifact(dataset_config_path)
config['dataset'].update( json.load( open( dataset_config_path ) ) )
# dataset specific variables
folds = config['dataset']['split']
data_directory = config['dataset']['path']
# split dataset into k folds
split_dirs = split_data(folds, data_directory, data_name)
# total results dictionary
results = {
'f1': [],
'rec': [],
'acc': [],
'mcc': [],
'prec': [],
'spec': []
}
# iterate over each dataset split
for split_index in range(len(split_dirs)):
# print current validation split index
print(f'start validating on split {split_index}')
# restart keras session
K.clear_session()
# prepare dataset by distributing the k splits
# into training and validation sets
training_directory, validation_directory = prepare_dataset(split_dirs, split_index, len(split_dirs))
if config['dataset'].get('validation_extension', False):
extension_path = config['dataset']['validation_extension_path']
for class_extension in os.listdir(extension_path):
class_path = os.path.join(extension_path, class_extension)
target_path = os.path.join(validation_directory, class_extension)
for filename in os.listdir(class_path):
shutil.copy(os.path.join(class_path, filename), os.path.join(target_path, filename))
# print training directories for sanity
print(f'training on {training_directory}')
print(f'validation on {validation_directory}')
# load model from model file or build it using a build file.
if config['model'].get('load_model', False):
model = load_model(config['model']['model_splits'][split_index])
else:
model_builder_path = config['model']['build_file']
model_builder = importlib.import_module(f'models.{model_builder_path}')
model = model_builder.build(config)
# train model and get last weigths
if config['model'].get('train', True):
print("Start training...")
model = train(model, config, experiment, training_directory, validation_directory, f'split_{split_index}')
evaluate(model, config, experiment, validation_directory, f'split_{split_index}')
# if fine tune, train model again on config link found in config
if config.get('fine_tuning', { }).get('link', False) and config['model'].get('train', True):
print("Start fine tuning...")
# load config link from config
fine_tuning_config_name = config['fine_tuning']['link']
fine_tuning_config_path = f'../configs/links/{fine_tuning_config_name}'
fine_tuning_config = json.load(open(fine_tuning_config_path))
if fine_tuning_config['dataset'].get('link', True):
dataset_config_path = f'../configs/datasets/{fine_tuning_config["dataset"]["link"]}'
experiment.add_artifact( dataset_config_path )
fine_tuning_config['dataset'].update( json.load(open( dataset_config_path ) ) )
# add link config to experiment
experiment.add_artifact(fine_tuning_config_path)
# train using new config
model = train(model, fine_tuning_config, experiment, training_directory, validation_directory, f'fine_split_{split_index}')
# evaluate train model and get metrics
print("Start evaluation...")
split_results = evaluate(model, config, experiment, validation_directory, f'split_{split_index}')
# merge split results with total results
for key in split_results:
results[key].append(split_results[key])
print(key, results[key])
# log results
log_cross_validation_results(full_kfold_summary_file_path, results, experiment_name, folds)
log_to_results_comparison( results, experiment_name, folds)
experiment.add_artifact(full_kfold_summary_file_path)
experiment.add_artifact(all_results_file_path)
if __name__ == '__main__':
# list configs in active directory
configs = os.listdir( '../configs/active' )
# iterate over each config and perform experiment
for config_file in configs:
# set config path
config_path = f'../configs/active/{config_file}'
# load config file
config = json.load(open(config_path))
# get experiment path
experiment_name = config['experiment']['name']
experiment_path = f'../experiments/{experiment_name}'
# initialize experiment
experiment = Experiment(experiment_name)
experiment.captured_out_filter = apply_backspaces_and_linefeeds
experiment.observers.append(FileStorageObserver.create(experiment_path))
# run experiment
experiment.automain(run)
|
[
"evaluate.evaluate"
] |
[((734, 758), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (756, 758), True, 'import tensorflow as tf\n'), ((880, 910), 'keras.backend.set_image_dim_ordering', 'K.set_image_dim_ordering', (['"""tf"""'], {}), "('tf')\n", (904, 910), True, 'from keras import backend as K\n'), ((1535, 1579), 'utils.util.split_data', 'split_data', (['folds', 'data_directory', 'data_name'], {}), '(folds, data_directory, data_name)\n', (1545, 1579), False, 'from utils.util import prepare_dataset, split_data\n'), ((5516, 5547), 'os.listdir', 'os.listdir', (['"""../configs/active"""'], {}), "('../configs/active')\n", (5526, 5547), False, 'import os\n'), ((1997, 2014), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (2012, 2014), True, 'from keras import backend as K\n'), ((4885, 4970), 'evaluate.evaluate', 'evaluate', (['model', 'config', 'experiment', 'validation_directory', 'f"""split_{split_index}"""'], {}), "(model, config, experiment, validation_directory,\n f'split_{split_index}')\n", (4893, 4970), False, 'from evaluate import evaluate\n'), ((5997, 6024), 'sacred.Experiment', 'Experiment', (['experiment_name'], {}), '(experiment_name)\n', (6007, 6024), False, 'from sacred import Experiment\n'), ((2404, 2430), 'os.listdir', 'os.listdir', (['extension_path'], {}), '(extension_path)\n', (2414, 2430), False, 'import os\n'), ((3058, 3114), 'keras.models.load_model', 'load_model', (["config['model']['model_splits'][split_index]"], {}), "(config['model']['model_splits'][split_index])\n", (3068, 3114), False, 'from keras.models import load_model\n'), ((3221, 3276), 'importlib.import_module', 'importlib.import_module', (['f"""models.{model_builder_path}"""'], {}), "(f'models.{model_builder_path}')\n", (3244, 3276), False, 'import importlib\n'), ((3475, 3577), 'train.train', 'train', (['model', 'config', 'experiment', 'training_directory', 'validation_directory', 'f"""split_{split_index}"""'], {}), "(model, config, experiment, training_directory, validation_directory,\n f'split_{split_index}')\n", (3480, 3577), False, 'from train import train\n'), ((3586, 3671), 'evaluate.evaluate', 'evaluate', (['model', 'config', 'experiment', 'validation_directory', 'f"""split_{split_index}"""'], {}), "(model, config, experiment, validation_directory,\n f'split_{split_index}')\n", (3594, 3671), False, 'from evaluate import evaluate\n'), ((4659, 4778), 'train.train', 'train', (['model', 'fine_tuning_config', 'experiment', 'training_directory', 'validation_directory', 'f"""fine_split_{split_index}"""'], {}), "(model, fine_tuning_config, experiment, training_directory,\n validation_directory, f'fine_split_{split_index}')\n", (4664, 4778), False, 'from train import train\n'), ((6133, 6176), 'sacred.observers.FileStorageObserver.create', 'FileStorageObserver.create', (['experiment_path'], {}), '(experiment_path)\n', (6159, 6176), False, 'from sacred.observers import FileStorageObserver\n'), ((2462, 2507), 'os.path.join', 'os.path.join', (['extension_path', 'class_extension'], {}), '(extension_path, class_extension)\n', (2474, 2507), False, 'import os\n'), ((2538, 2589), 'os.path.join', 'os.path.join', (['validation_directory', 'class_extension'], {}), '(validation_directory, class_extension)\n', (2550, 2589), False, 'import os\n'), ((2623, 2645), 'os.listdir', 'os.listdir', (['class_path'], {}), '(class_path)\n', (2633, 2645), False, 'import os\n'), ((2679, 2713), 'os.path.join', 'os.path.join', (['class_path', 'filename'], {}), '(class_path, filename)\n', (2691, 2713), False, 'import os\n'), ((2715, 2750), 'os.path.join', 'os.path.join', (['target_path', 'filename'], {}), '(target_path, filename)\n', (2727, 2750), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""
Python File Template
"""
import os
import sys
import argparse
import logging
import numpy as np
import time
import torchtext
from torch.autograd import Variable
from torch.optim import Adam
from torch.utils.data import DataLoader
import config
import utils
import copy
import torch
import torch.nn as nn
from torch import cuda
from beam_search import SequenceGenerator
from evaluate import evaluate_beam_search
from pykp.dataloader import KeyphraseDataLoader
from utils import Progbar, plot_learning_curve_and_write_csv
import pykp
from pykp.io import KeyphraseDataset
from pykp.model import Seq2SeqLSTMAttention, Seq2SeqLSTMAttentionCopy
import time
def time_usage(func):
# argnames = func.func_code.co_varnames[:func.func_code.co_argcount]
fname = func.__name__
def wrapper(*args, **kwargs):
beg_ts = time.time()
retval = func(*args, **kwargs)
end_ts = time.time()
print(fname, "elapsed time: %f" % (end_ts - beg_ts))
return retval
return wrapper
__author__ = "<NAME>"
__email__ = "<EMAIL>"
@time_usage
def _valid_error(data_loader, model, criterion, epoch, opt):
progbar = Progbar(title='Validating', target=len(data_loader), batch_size=data_loader.batch_size,
total_examples=len(data_loader.dataset))
model.eval()
losses = []
# Note that the data should be shuffled every time
for i, batch in enumerate(data_loader):
# if i >= 100:
# break
one2many_batch, one2one_batch = batch
src, trg, trg_target, trg_copy_target, src_ext, oov_lists = one2one_batch
if torch.cuda.is_available():
src = src.cuda()
trg = trg.cuda()
trg_target = trg_target.cuda()
trg_copy_target = trg_copy_target.cuda()
src_ext = src_ext.cuda()
decoder_log_probs, _, _ = model.forward(src, trg, src_ext)
if not opt.copy_model:
loss = criterion(
decoder_log_probs.contiguous().view(-1, opt.vocab_size),
trg_target.contiguous().view(-1)
)
else:
loss = criterion(
decoder_log_probs.contiguous().view(-1, opt.vocab_size + opt.max_unk_words),
trg_copy_target.contiguous().view(-1)
)
losses.append(loss.data[0])
progbar.update(epoch, i, [('valid_loss', loss.data[0]), ('PPL', loss.data[0])])
return losses
def get_loss_rl():
pred_seq_list = generator.beam_search(src_list, src_oov_map_list, oov_list, opt.word2id)
for src, src_str, trg, trg_str, trg_copy, pred_seq, oov in zip(src_list, src_str_list, trg_list,
trg_str_list, trg_copy_target_list,
pred_seq_list, oov_list):
# 1st round filtering
processed_pred_seq, processed_pred_str_seqs, processed_pred_score = process_predseqs(pred_seq, src_str,
oov, opt.id2word,
opt,
must_appear_in_src=opt.must_appear_in_src)
match_list = get_match_result(true_seqs=trg_str, pred_seqs=processed_pred_str_seqs)
def train_model(model, optimizer, criterion, train_data_loader, valid_data_loader, test_data_loader, opt):
generator = SequenceGenerator(model,
eos_id=opt.word2id[pykp.io.EOS_WORD],
beam_size=opt.beam_size,
max_sequence_length=opt.max_sent_length
)
logging.info('====================== Checking GPU Availability =========================')
if torch.cuda.is_available():
if isinstance(opt.gpuid, int):
opt.gpuid = [opt.gpuid]
logging.info('Running on GPU! devices=%s' % str(opt.gpuid))
# model = nn.DataParallel(model, device_ids=opt.gpuid)
else:
logging.info('Running on CPU!')
logging.info('====================== Start Training =========================')
checkpoint_names = []
train_history_losses = []
valid_history_losses = []
test_history_losses = []
# best_loss = sys.float_info.max # for normal training/testing loss (likelihood)
best_loss = 0.0 # for f-score
stop_increasing = 0
train_losses = []
total_batch = 0
early_stop_flag = False
if opt.train_from:
state_path = opt.train_from.replace('.model', '.state')
logging.info('Loading training state from: %s' % state_path)
if os.path.exists(state_path):
(epoch, total_batch, best_loss, stop_increasing, checkpoint_names, train_history_losses, valid_history_losses,
test_history_losses) = torch.load(open(state_path, 'rb'))
opt.start_epoch = epoch
for epoch in range(opt.start_epoch , opt.epochs):
if early_stop_flag:
break
progbar = Progbar(title='Training', target=len(train_data_loader), batch_size=train_data_loader.batch_size,
total_examples=len(train_data_loader.dataset))
for batch_i, batch in enumerate(train_data_loader):
model.train()
batch_i += 1 # for the aesthetics of printing
total_batch += 1
one2many_batch, one2one_batch = batch
src, trg, trg_target, trg_copy_target, src_ext, oov_lists = one2one_batch
max_oov_number = max([len(oov) for oov in oov_lists])
print("src size - ",src.size())
print("target size - ",trg.size())
if torch.cuda.is_available():
src = src.cuda()
trg = trg.cuda()
trg_target = trg_target.cuda()
trg_copy_target = trg_copy_target.cuda()
src_ext = src_ext.cuda()
optimizer.zero_grad()
'''
Training with Maximum Likelihood (word-level error)
'''
decoder_log_probs, _, _ = model.forward(src, trg, src_ext, oov_lists)
# simply average losses of all the predicitons
# IMPORTANT, must use logits instead of probs to compute the loss, otherwise it's super super slow at the beginning (grads of probs are small)!
start_time = time.time()
if not opt.copy_model:
ml_loss = criterion(
decoder_log_probs.contiguous().view(-1, opt.vocab_size),
trg_target.contiguous().view(-1)
)
else:
ml_loss = criterion(
decoder_log_probs.contiguous().view(-1, opt.vocab_size + max_oov_number),
trg_copy_target.contiguous().view(-1)
)
'''
Training with Reinforcement Learning (instance-level reward f-score)
'''
src_list, trg_list, _, trg_copy_target_list, src_oov_map_list, oov_list, src_str_list, trg_str_list = one2many_batch
if torch.cuda.is_available():
src_list = src_list.cuda()
src_oov_map_list = src_oov_map_list.cuda()
rl_loss = get_loss_rl()
start_time = time.time()
ml_loss.backward()
print("--backward- %s seconds ---" % (time.time() - start_time))
if opt.max_grad_norm > 0:
pre_norm = torch.nn.utils.clip_grad_norm(model.parameters(), opt.max_grad_norm)
after_norm = (sum([p.grad.data.norm(2) ** 2 for p in model.parameters() if p.grad is not None])) ** (1.0 / 2)
logging.info('clip grad (%f -> %f)' % (pre_norm, after_norm))
optimizer.step()
train_losses.append(ml_loss.data[0])
progbar.update(epoch, batch_i, [('train_loss', ml_loss.data[0]), ('PPL', ml_loss.data[0])])
if batch_i > 1 and batch_i % opt.report_every == 0:
logging.info('====================== %d =========================' % (batch_i))
logging.info('Epoch : %d Minibatch : %d, Loss=%.5f' % (epoch, batch_i, np.mean(ml_loss.data[0])))
sampled_size = 2
logging.info('Printing predictions on %d sampled examples by greedy search' % sampled_size)
if torch.cuda.is_available():
src = src.data.cpu().numpy()
decoder_log_probs = decoder_log_probs.data.cpu().numpy()
max_words_pred = decoder_log_probs.argmax(axis=-1)
trg_target = trg_target.data.cpu().numpy()
trg_copy_target = trg_copy_target.data.cpu().numpy()
else:
src = src.data.numpy()
decoder_log_probs = decoder_log_probs.data.numpy()
max_words_pred = decoder_log_probs.argmax(axis=-1)
trg_target = trg_target.data.numpy()
trg_copy_target = trg_copy_target.data.numpy()
sampled_trg_idx = np.random.random_integers(low=0, high=len(trg) - 1, size=sampled_size)
src = src[sampled_trg_idx]
oov_lists = [oov_lists[i] for i in sampled_trg_idx]
max_words_pred = [max_words_pred[i] for i in sampled_trg_idx]
decoder_log_probs = decoder_log_probs[sampled_trg_idx]
if not opt.copy_model:
trg_target = [trg_target[i] for i in sampled_trg_idx] # use the real target trg_loss (the starting <BOS> has been removed and contains oov ground-truth)
else:
trg_target = [trg_copy_target[i] for i in sampled_trg_idx]
for i, (src_wi, pred_wi, trg_i, oov_i) in enumerate(zip(src, max_words_pred, trg_target, oov_lists)):
nll_prob = -np.sum([decoder_log_probs[i][l][pred_wi[l]] for l in range(len(trg_i))])
find_copy = np.any([x >= opt.vocab_size for x in src_wi])
has_copy = np.any([x >= opt.vocab_size for x in trg_i])
sentence_source = [opt.id2word[x] if x < opt.vocab_size else oov_i[x-opt.vocab_size] for x in src_wi]
sentence_pred = [opt.id2word[x] if x < opt.vocab_size else oov_i[x-opt.vocab_size] for x in pred_wi]
sentence_real = [opt.id2word[x] if x < opt.vocab_size else oov_i[x-opt.vocab_size] for x in trg_i]
sentence_source = sentence_source[:sentence_source.index('<pad>')] if '<pad>' in sentence_source else sentence_source
sentence_pred = sentence_pred[:sentence_pred.index('<pad>')] if '<pad>' in sentence_pred else sentence_pred
sentence_real = sentence_real[:sentence_real.index('<pad>')] if '<pad>' in sentence_real else sentence_real
logging.info('==================================================')
logging.info('Source: %s ' % (' '.join(sentence_source)))
logging.info('\t\tPred : %s (%.4f)' % (' '.join(sentence_pred), nll_prob) + (' [FIND COPY]' if find_copy else ''))
logging.info('\t\tReal : %s ' % (' '.join(sentence_real)) + (' [HAS COPY]' + str(trg_i) if has_copy else ''))
if total_batch > 1 and total_batch % opt.run_valid_every == 0:
logging.info('*' * 50)
logging.info('Run validing and testing @Epoch=%d,#(Total batch)=%d' % (epoch, total_batch))
# valid_losses = _valid_error(valid_data_loader, model, criterion, epoch, opt)
# valid_history_losses.append(valid_losses)
valid_score_dict = evaluate_beam_search(generator, valid_data_loader, opt, title='valid', epoch=epoch, predict_save_path=opt.exp_path + '/epoch%d_batch%d_total_batch%d' % (epoch, batch_i, total_batch))
test_score_dict = evaluate_beam_search(generator, test_data_loader, opt, title='test', epoch=epoch, predict_save_path=opt.exp_path + '/epoch%d_batch%d_total_batch%d' % (epoch, batch_i, total_batch))
checkpoint_names.append('epoch=%d-batch=%d-total_batch=%d' % (epoch, batch_i, total_batch))
train_history_losses.append(copy.copy(train_losses))
valid_history_losses.append(valid_score_dict)
test_history_losses.append(test_score_dict)
train_losses = []
scores = [train_history_losses]
curve_names = ['Training Error']
scores += [[result_dict[name] for result_dict in valid_history_losses] for name in opt.report_score_names]
curve_names += ['Valid-'+name for name in opt.report_score_names]
scores += [[result_dict[name] for result_dict in test_history_losses] for name in opt.report_score_names]
curve_names += ['Test-'+name for name in opt.report_score_names]
scores = [np.asarray(s) for s in scores]
# Plot the learning curve
plot_learning_curve_and_write_csv(scores=scores,
curve_names=curve_names,
checkpoint_names=checkpoint_names,
title='Training Validation & Test',
save_path=opt.exp_path + '/[epoch=%d,batch=%d,total_batch=%d]train_valid_test_curve.png' % (epoch, batch_i, total_batch))
'''
determine if early stop training (whether f-score increased, before is if valid error decreased)
'''
valid_loss = np.average(valid_history_losses[-1][opt.report_score_names[0]])
is_best_loss = valid_loss > best_loss
rate_of_change = float(valid_loss - best_loss) / float(best_loss) if float(best_loss) > 0 else 0.0
# valid error doesn't increase
if rate_of_change <= 0:
stop_increasing += 1
else:
stop_increasing = 0
if is_best_loss:
logging.info('Validation: update best loss (%.4f --> %.4f), rate of change (ROC)=%.2f' % (
best_loss, valid_loss, rate_of_change * 100))
else:
logging.info('Validation: best loss is not updated for %d times (%.4f --> %.4f), rate of change (ROC)=%.2f' % (
stop_increasing, best_loss, valid_loss, rate_of_change * 100))
best_loss = max(valid_loss, best_loss)
# only store the checkpoints that make better validation performances
if total_batch > 1 and (total_batch % opt.save_model_every == 0 or is_best_loss): #epoch >= opt.start_checkpoint_at and
# Save the checkpoint
logging.info('Saving checkpoint to: %s' % os.path.join(opt.save_path, '%s.epoch=%d.batch=%d.total_batch=%d.error=%f' % (opt.exp, epoch, batch_i, total_batch, valid_loss) + '.model'))
torch.save(
model.state_dict(),
open(os.path.join(opt.save_path, '%s.epoch=%d.batch=%d.total_batch=%d' % (opt.exp, epoch, batch_i, total_batch) + '.model'), 'wb')
)
torch.save(
(epoch, total_batch, best_loss, stop_increasing, checkpoint_names, train_history_losses, valid_history_losses, test_history_losses),
open(os.path.join(opt.save_path, '%s.epoch=%d.batch=%d.total_batch=%d' % (opt.exp, epoch, batch_i, total_batch) + '.state'), 'wb')
)
if stop_increasing >= opt.early_stop_tolerance:
logging.info('Have not increased for %d epoches, early stop training' % stop_increasing)
early_stop_flag = True
break
logging.info('*' * 50)
def load_data_vocab(opt, load_train=True):
logging.info("Loading vocab from disk: %s" % (opt.vocab))
word2id, id2word, vocab = torch.load(opt.vocab, 'wb')
# one2one data loader
logging.info("Loading train and validate data from '%s'" % opt.data)
'''
train_one2one = torch.load(opt.data + '.train.one2one.pt', 'wb')
valid_one2one = torch.load(opt.data + '.valid.one2one.pt', 'wb')
train_one2one_dataset = KeyphraseDataset(train_one2one, word2id=word2id)
valid_one2one_dataset = KeyphraseDataset(valid_one2one, word2id=word2id)
train_one2one_loader = DataLoader(dataset=train_one2one_dataset, collate_fn=train_one2one_dataset.collate_fn_one2one, num_workers=opt.batch_workers, batch_size=opt.batch_size, pin_memory=True, shuffle=True)
valid_one2one_loader = DataLoader(dataset=valid_one2one_dataset, collate_fn=valid_one2one_dataset.collate_fn_one2one, num_workers=opt.batch_workers, batch_size=opt.batch_size, pin_memory=True, shuffle=False)
'''
logging.info('====================== Dataset =========================')
# one2many data loader
if load_train:
train_one2many = torch.load(opt.data + '.train.one2many.pt', 'wb')
train_one2many_dataset = KeyphraseDataset(train_one2many, word2id=word2id, id2word=id2word, type='one2many')
train_one2many_loader = KeyphraseDataLoader(dataset=train_one2many_dataset, collate_fn=train_one2many_dataset.collate_fn_one2many, num_workers=opt.batch_workers, max_batch_pair=opt.batch_size, pin_memory=True, shuffle=True)
logging.info('#(train data size: #(one2many pair)=%d, #(one2one pair)=%d, #(batch)=%d' % (len(train_one2many_loader.dataset), train_one2many_loader.one2one_number(), len(train_one2many_loader)))
else:
train_one2many_loader = None
valid_one2many = torch.load(opt.data + '.valid.one2many.pt', 'wb')
test_one2many = torch.load(opt.data + '.test.one2many.pt', 'wb')
# !important. As it takes too long to do beam search, thus reduce the size of validation and test datasets
valid_one2many = valid_one2many[:2000]
test_one2many = test_one2many[:2000]
valid_one2many_dataset = KeyphraseDataset(valid_one2many, word2id=word2id, id2word=id2word, type='one2many', include_original=True)
test_one2many_dataset = KeyphraseDataset(test_one2many, word2id=word2id, id2word=id2word, type='one2many', include_original=True)
"""
# temporary code, exporting test data for Theano model
for e_id, e in enumerate(test_one2many_dataset.examples):
with open(os.path.join('data', 'new_kp20k_for_theano_model', 'text', '%d.txt' % e_id), 'w') as t_file:
t_file.write(' '.join(e['src_str']))
with open(os.path.join('data', 'new_kp20k_for_theano_model', 'keyphrase', '%d.txt' % e_id), 'w') as t_file:
t_file.writelines([(' '.join(t))+'\n' for t in e['trg_str']])
exit()
"""
valid_one2many_loader = KeyphraseDataLoader(dataset=valid_one2many_dataset, collate_fn=valid_one2many_dataset.collate_fn_one2many, num_workers=opt.batch_workers, max_batch_pair=opt.beam_search_batch_size, pin_memory=True, shuffle=False)
test_one2many_loader = KeyphraseDataLoader(dataset=test_one2many_dataset, collate_fn=test_one2many_dataset.collate_fn_one2many, num_workers=opt.batch_workers, max_batch_pair=opt.beam_search_batch_size, pin_memory=True, shuffle=False)
opt.word2id = word2id
opt.id2word = id2word
opt.vocab = vocab
logging.info('#(valid data size: #(one2many pair)=%d, #(one2one pair)=%d, #(batch)=%d' % (len(valid_one2many_loader.dataset), valid_one2many_loader.one2one_number(), len(valid_one2many_loader)))
logging.info('#(test data size: #(one2many pair)=%d, #(one2one pair)=%d, #(batch)=%d' % (len(test_one2many_loader.dataset), test_one2many_loader.one2one_number(), len(test_one2many_loader)))
logging.info('#(vocab)=%d' % len(vocab))
logging.info('#(vocab used)=%d' % opt.vocab_size)
return train_one2many_loader, valid_one2many_loader, test_one2many_loader, word2id, id2word, vocab
def init_optimizer_criterion(model, opt):
"""
mask the PAD <pad> when computing loss, before we used weight matrix, but not handy for copy-model, change to ignore_index
:param model:
:param opt:
:return:
"""
'''
if not opt.copy_model:
weight_mask = torch.ones(opt.vocab_size).cuda() if torch.cuda.is_available() else torch.ones(opt.vocab_size)
else:
weight_mask = torch.ones(opt.vocab_size + opt.max_unk_words).cuda() if torch.cuda.is_available() else torch.ones(opt.vocab_size + opt.max_unk_words)
weight_mask[opt.word2id[pykp.IO.PAD_WORD]] = 0
criterion = torch.nn.NLLLoss(weight=weight_mask)
optimizer = Adam(params=filter(lambda p: p.requires_grad, model.parameters()), lr=opt.learning_rate)
# optimizer = torch.optim.Adadelta(model.parameters(), lr=0.1)
# optimizer = torch.optim.RMSprop(model.parameters(), lr=0.1)
'''
criterion = torch.nn.NLLLoss(ignore_index=opt.word2id[pykp.io.PAD_WORD], reduce=False)
optimizer = Adam(params=filter(lambda p: p.requires_grad, model.parameters()), lr=opt.learning_rate)
if torch.cuda.is_available():
criterion = criterion.cuda()
return optimizer, criterion
def init_model(opt):
logging.info('====================== Model Parameters =========================')
if not opt.copy_model:
logging.info('Train a normal seq2seq model')
model = Seq2SeqLSTMAttention(
emb_dim=opt.word_vec_size,
vocab_size=opt.vocab_size,
src_hidden_dim=opt.rnn_size,
trg_hidden_dim=opt.rnn_size,
ctx_hidden_dim=opt.rnn_size,
attention_mode='dot',
batch_size=opt.batch_size,
bidirectional=opt.bidirectional,
pad_token_src = opt.word2id[pykp.io.PAD_WORD],
pad_token_trg = opt.word2id[pykp.io.PAD_WORD],
nlayers_src=opt.enc_layers,
nlayers_trg=opt.dec_layers,
dropout=opt.dropout,
must_teacher_forcing=opt.must_teacher_forcing,
teacher_forcing_ratio=opt.teacher_forcing_ratio,
scheduled_sampling=opt.scheduled_sampling,
scheduled_sampling_batches=opt.scheduled_sampling_batches,
)
else:
logging.info('Train a seq2seq model with copy mechanism')
model = Seq2SeqLSTMAttentionCopy(
emb_dim=opt.word_vec_size,
vocab_size=opt.vocab_size,
src_hidden_dim=opt.rnn_size,
trg_hidden_dim=opt.rnn_size,
ctx_hidden_dim=opt.rnn_size,
attention_mode='dot',
batch_size=opt.batch_size,
bidirectional=opt.bidirectional,
pad_token_src = opt.word2id[pykp.io.PAD_WORD],
pad_token_trg = opt.word2id[pykp.io.PAD_WORD],
nlayers_src=opt.enc_layers,
nlayers_trg=opt.dec_layers,
dropout=opt.dropout,
must_teacher_forcing=opt.must_teacher_forcing,
teacher_forcing_ratio=opt.teacher_forcing_ratio,
scheduled_sampling=opt.scheduled_sampling,
scheduled_sampling_batches=opt.scheduled_sampling_batches,
unk_word=opt.word2id[pykp.io.UNK_WORD],
)
if torch.cuda.is_available():
model = model.cuda()
if opt.train_from:
logging.info("loading previous checkpoint from %s" % opt.train_from)
if torch.cuda.is_available():
checkpoint = torch.load(open(opt.train_from, 'rb'))
else:
checkpoint = torch.load(
open(opt.train_from, 'rb'), map_location=lambda storage, loc: storage
)
print(checkpoint.keys())
# some compatible problems, keys are started with 'module.'
checkpoint = dict([(k[7:],v) if k.startswith('module.') else (k,v) for k,v in checkpoint.items()])
model.load_state_dict(checkpoint)
utils.tally_parameters(model)
return model
def main():
# load settings for training
parser = argparse.ArgumentParser(
description='train.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
config.preprocess_opts(parser)
config.model_opts(parser)
config.train_opts(parser)
config.predict_opts(parser)
opt = parser.parse_args()
if opt.seed > 0:
torch.manual_seed(opt.seed)
print(opt.gpuid)
if torch.cuda.is_available() and not opt.gpuid:
opt.gpuid = 0
if hasattr(opt, 'copy_model') and opt.copy_model:
opt.exp += '.copy'
if hasattr(opt, 'bidirectional'):
if opt.bidirectional:
opt.exp += '.bi-directional'
else:
opt.exp += '.uni-directional'
# fill time into the name
if opt.exp_path.find('%s') > 0:
opt.exp_path = opt.exp_path % (opt.exp, opt.timemark)
opt.save_path = opt.save_path % (opt.exp, opt.timemark)
if not os.path.exists(opt.exp_path):
os.makedirs(opt.exp_path)
if not os.path.exists(opt.save_path):
os.makedirs(opt.save_path)
config.init_logging(opt.exp_path + '/output.log')
logging.info('Parameters:')
[logging.info('%s : %s' % (k, str(v))) for k, v in opt.__dict__.items()]
try:
train_data_loader, valid_data_loader, test_data_loader, word2id, id2word, vocab = load_data_vocab(opt)
model = init_model(opt)
optimizer, criterion = init_optimizer_criterion(model, opt)
train_model(model, optimizer, criterion, train_data_loader, valid_data_loader, test_data_loader, opt)
except Exception as e:
logging.exception("message")
if __name__ == '__main__':
main()
|
[
"evaluate.evaluate_beam_search"
] |
[((3646, 3779), 'beam_search.SequenceGenerator', 'SequenceGenerator', (['model'], {'eos_id': 'opt.word2id[pykp.io.EOS_WORD]', 'beam_size': 'opt.beam_size', 'max_sequence_length': 'opt.max_sent_length'}), '(model, eos_id=opt.word2id[pykp.io.EOS_WORD], beam_size=\n opt.beam_size, max_sequence_length=opt.max_sent_length)\n', (3663, 3779), False, 'from beam_search import SequenceGenerator\n'), ((3917, 4019), 'logging.info', 'logging.info', (['"""====================== Checking GPU Availability ========================="""'], {}), "(\n '====================== Checking GPU Availability ========================='\n )\n", (3929, 4019), False, 'import logging\n'), ((4017, 4042), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4040, 4042), False, 'import torch\n'), ((4305, 4391), 'logging.info', 'logging.info', (['"""====================== Start Training ========================="""'], {}), "(\n '====================== Start Training =========================')\n", (4317, 4391), False, 'import logging\n'), ((16530, 16585), 'logging.info', 'logging.info', (["('Loading vocab from disk: %s' % opt.vocab)"], {}), "('Loading vocab from disk: %s' % opt.vocab)\n", (16542, 16585), False, 'import logging\n'), ((16618, 16645), 'torch.load', 'torch.load', (['opt.vocab', '"""wb"""'], {}), "(opt.vocab, 'wb')\n", (16628, 16645), False, 'import torch\n'), ((16677, 16745), 'logging.info', 'logging.info', (['("Loading train and validate data from \'%s\'" % opt.data)'], {}), '("Loading train and validate data from \'%s\'" % opt.data)\n', (16689, 16745), False, 'import logging\n'), ((17485, 17559), 'logging.info', 'logging.info', (['"""====================== Dataset ========================="""'], {}), "('====================== Dataset =========================')\n", (17497, 17559), False, 'import logging\n'), ((18303, 18352), 'torch.load', 'torch.load', (["(opt.data + '.valid.one2many.pt')", '"""wb"""'], {}), "(opt.data + '.valid.one2many.pt', 'wb')\n", (18313, 18352), False, 'import torch\n'), ((18374, 18422), 'torch.load', 'torch.load', (["(opt.data + '.test.one2many.pt')", '"""wb"""'], {}), "(opt.data + '.test.one2many.pt', 'wb')\n", (18384, 18422), False, 'import torch\n'), ((18650, 18761), 'pykp.io.KeyphraseDataset', 'KeyphraseDataset', (['valid_one2many'], {'word2id': 'word2id', 'id2word': 'id2word', 'type': '"""one2many"""', 'include_original': '(True)'}), "(valid_one2many, word2id=word2id, id2word=id2word, type=\n 'one2many', include_original=True)\n", (18666, 18761), False, 'from pykp.io import KeyphraseDataset\n'), ((18786, 18896), 'pykp.io.KeyphraseDataset', 'KeyphraseDataset', (['test_one2many'], {'word2id': 'word2id', 'id2word': 'id2word', 'type': '"""one2many"""', 'include_original': '(True)'}), "(test_one2many, word2id=word2id, id2word=id2word, type=\n 'one2many', include_original=True)\n", (18802, 18896), False, 'from pykp.io import KeyphraseDataset\n'), ((19421, 19648), 'pykp.dataloader.KeyphraseDataLoader', 'KeyphraseDataLoader', ([], {'dataset': 'valid_one2many_dataset', 'collate_fn': 'valid_one2many_dataset.collate_fn_one2many', 'num_workers': 'opt.batch_workers', 'max_batch_pair': 'opt.beam_search_batch_size', 'pin_memory': '(True)', 'shuffle': '(False)'}), '(dataset=valid_one2many_dataset, collate_fn=\n valid_one2many_dataset.collate_fn_one2many, num_workers=opt.\n batch_workers, max_batch_pair=opt.beam_search_batch_size, pin_memory=\n True, shuffle=False)\n', (19440, 19648), False, 'from pykp.dataloader import KeyphraseDataLoader\n'), ((19663, 19888), 'pykp.dataloader.KeyphraseDataLoader', 'KeyphraseDataLoader', ([], {'dataset': 'test_one2many_dataset', 'collate_fn': 'test_one2many_dataset.collate_fn_one2many', 'num_workers': 'opt.batch_workers', 'max_batch_pair': 'opt.beam_search_batch_size', 'pin_memory': '(True)', 'shuffle': '(False)'}), '(dataset=test_one2many_dataset, collate_fn=\n test_one2many_dataset.collate_fn_one2many, num_workers=opt.\n batch_workers, max_batch_pair=opt.beam_search_batch_size, pin_memory=\n True, shuffle=False)\n', (19682, 19888), False, 'from pykp.dataloader import KeyphraseDataLoader\n'), ((20397, 20446), 'logging.info', 'logging.info', (["('#(vocab used)=%d' % opt.vocab_size)"], {}), "('#(vocab used)=%d' % opt.vocab_size)\n", (20409, 20446), False, 'import logging\n'), ((21470, 21544), 'torch.nn.NLLLoss', 'torch.nn.NLLLoss', ([], {'ignore_index': 'opt.word2id[pykp.io.PAD_WORD]', 'reduce': '(False)'}), '(ignore_index=opt.word2id[pykp.io.PAD_WORD], reduce=False)\n', (21486, 21544), False, 'import torch\n'), ((21658, 21683), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (21681, 21683), False, 'import torch\n'), ((21781, 21869), 'logging.info', 'logging.info', (['"""====================== Model Parameters ========================="""'], {}), "(\n '====================== Model Parameters =========================')\n", (21793, 21869), False, 'import logging\n'), ((23774, 23799), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (23797, 23799), False, 'import torch\n'), ((24439, 24468), 'utils.tally_parameters', 'utils.tally_parameters', (['model'], {}), '(model)\n', (24461, 24468), False, 'import utils\n'), ((24546, 24654), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""train.py"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='train.py', formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n", (24569, 24654), False, 'import argparse\n'), ((24671, 24701), 'config.preprocess_opts', 'config.preprocess_opts', (['parser'], {}), '(parser)\n', (24693, 24701), False, 'import config\n'), ((24706, 24731), 'config.model_opts', 'config.model_opts', (['parser'], {}), '(parser)\n', (24723, 24731), False, 'import config\n'), ((24736, 24761), 'config.train_opts', 'config.train_opts', (['parser'], {}), '(parser)\n', (24753, 24761), False, 'import config\n'), ((24766, 24793), 'config.predict_opts', 'config.predict_opts', (['parser'], {}), '(parser)\n', (24785, 24793), False, 'import config\n'), ((25569, 25618), 'config.init_logging', 'config.init_logging', (["(opt.exp_path + '/output.log')"], {}), "(opt.exp_path + '/output.log')\n", (25588, 25618), False, 'import config\n'), ((25624, 25651), 'logging.info', 'logging.info', (['"""Parameters:"""'], {}), "('Parameters:')\n", (25636, 25651), False, 'import logging\n'), ((861, 872), 'time.time', 'time.time', ([], {}), '()\n', (870, 872), False, 'import time\n'), ((929, 940), 'time.time', 'time.time', ([], {}), '()\n', (938, 940), False, 'import time\n'), ((1645, 1670), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1668, 1670), False, 'import torch\n'), ((4268, 4299), 'logging.info', 'logging.info', (['"""Running on CPU!"""'], {}), "('Running on CPU!')\n", (4280, 4299), False, 'import logging\n'), ((4852, 4912), 'logging.info', 'logging.info', (["('Loading training state from: %s' % state_path)"], {}), "('Loading training state from: %s' % state_path)\n", (4864, 4912), False, 'import logging\n'), ((4924, 4950), 'os.path.exists', 'os.path.exists', (['state_path'], {}), '(state_path)\n', (4938, 4950), False, 'import os\n'), ((17631, 17680), 'torch.load', 'torch.load', (["(opt.data + '.train.one2many.pt')", '"""wb"""'], {}), "(opt.data + '.train.one2many.pt', 'wb')\n", (17641, 17680), False, 'import torch\n'), ((17714, 17802), 'pykp.io.KeyphraseDataset', 'KeyphraseDataset', (['train_one2many'], {'word2id': 'word2id', 'id2word': 'id2word', 'type': '"""one2many"""'}), "(train_one2many, word2id=word2id, id2word=id2word, type=\n 'one2many')\n", (17730, 17802), False, 'from pykp.io import KeyphraseDataset\n'), ((17831, 18045), 'pykp.dataloader.KeyphraseDataLoader', 'KeyphraseDataLoader', ([], {'dataset': 'train_one2many_dataset', 'collate_fn': 'train_one2many_dataset.collate_fn_one2many', 'num_workers': 'opt.batch_workers', 'max_batch_pair': 'opt.batch_size', 'pin_memory': '(True)', 'shuffle': '(True)'}), '(dataset=train_one2many_dataset, collate_fn=\n train_one2many_dataset.collate_fn_one2many, num_workers=opt.\n batch_workers, max_batch_pair=opt.batch_size, pin_memory=True, shuffle=True\n )\n', (17850, 18045), False, 'from pykp.dataloader import KeyphraseDataLoader\n'), ((21901, 21945), 'logging.info', 'logging.info', (['"""Train a normal seq2seq model"""'], {}), "('Train a normal seq2seq model')\n", (21913, 21945), False, 'import logging\n'), ((21962, 22611), 'pykp.model.Seq2SeqLSTMAttention', 'Seq2SeqLSTMAttention', ([], {'emb_dim': 'opt.word_vec_size', 'vocab_size': 'opt.vocab_size', 'src_hidden_dim': 'opt.rnn_size', 'trg_hidden_dim': 'opt.rnn_size', 'ctx_hidden_dim': 'opt.rnn_size', 'attention_mode': '"""dot"""', 'batch_size': 'opt.batch_size', 'bidirectional': 'opt.bidirectional', 'pad_token_src': 'opt.word2id[pykp.io.PAD_WORD]', 'pad_token_trg': 'opt.word2id[pykp.io.PAD_WORD]', 'nlayers_src': 'opt.enc_layers', 'nlayers_trg': 'opt.dec_layers', 'dropout': 'opt.dropout', 'must_teacher_forcing': 'opt.must_teacher_forcing', 'teacher_forcing_ratio': 'opt.teacher_forcing_ratio', 'scheduled_sampling': 'opt.scheduled_sampling', 'scheduled_sampling_batches': 'opt.scheduled_sampling_batches'}), "(emb_dim=opt.word_vec_size, vocab_size=opt.vocab_size,\n src_hidden_dim=opt.rnn_size, trg_hidden_dim=opt.rnn_size,\n ctx_hidden_dim=opt.rnn_size, attention_mode='dot', batch_size=opt.\n batch_size, bidirectional=opt.bidirectional, pad_token_src=opt.word2id[\n pykp.io.PAD_WORD], pad_token_trg=opt.word2id[pykp.io.PAD_WORD],\n nlayers_src=opt.enc_layers, nlayers_trg=opt.dec_layers, dropout=opt.\n dropout, must_teacher_forcing=opt.must_teacher_forcing,\n teacher_forcing_ratio=opt.teacher_forcing_ratio, scheduled_sampling=opt\n .scheduled_sampling, scheduled_sampling_batches=opt.\n scheduled_sampling_batches)\n", (21982, 22611), False, 'from pykp.model import Seq2SeqLSTMAttention, Seq2SeqLSTMAttentionCopy\n'), ((22808, 22865), 'logging.info', 'logging.info', (['"""Train a seq2seq model with copy mechanism"""'], {}), "('Train a seq2seq model with copy mechanism')\n", (22820, 22865), False, 'import logging\n'), ((22882, 23576), 'pykp.model.Seq2SeqLSTMAttentionCopy', 'Seq2SeqLSTMAttentionCopy', ([], {'emb_dim': 'opt.word_vec_size', 'vocab_size': 'opt.vocab_size', 'src_hidden_dim': 'opt.rnn_size', 'trg_hidden_dim': 'opt.rnn_size', 'ctx_hidden_dim': 'opt.rnn_size', 'attention_mode': '"""dot"""', 'batch_size': 'opt.batch_size', 'bidirectional': 'opt.bidirectional', 'pad_token_src': 'opt.word2id[pykp.io.PAD_WORD]', 'pad_token_trg': 'opt.word2id[pykp.io.PAD_WORD]', 'nlayers_src': 'opt.enc_layers', 'nlayers_trg': 'opt.dec_layers', 'dropout': 'opt.dropout', 'must_teacher_forcing': 'opt.must_teacher_forcing', 'teacher_forcing_ratio': 'opt.teacher_forcing_ratio', 'scheduled_sampling': 'opt.scheduled_sampling', 'scheduled_sampling_batches': 'opt.scheduled_sampling_batches', 'unk_word': 'opt.word2id[pykp.io.UNK_WORD]'}), "(emb_dim=opt.word_vec_size, vocab_size=opt.\n vocab_size, src_hidden_dim=opt.rnn_size, trg_hidden_dim=opt.rnn_size,\n ctx_hidden_dim=opt.rnn_size, attention_mode='dot', batch_size=opt.\n batch_size, bidirectional=opt.bidirectional, pad_token_src=opt.word2id[\n pykp.io.PAD_WORD], pad_token_trg=opt.word2id[pykp.io.PAD_WORD],\n nlayers_src=opt.enc_layers, nlayers_trg=opt.dec_layers, dropout=opt.\n dropout, must_teacher_forcing=opt.must_teacher_forcing,\n teacher_forcing_ratio=opt.teacher_forcing_ratio, scheduled_sampling=opt\n .scheduled_sampling, scheduled_sampling_batches=opt.\n scheduled_sampling_batches, unk_word=opt.word2id[pykp.io.UNK_WORD])\n", (22906, 23576), False, 'from pykp.model import Seq2SeqLSTMAttention, Seq2SeqLSTMAttentionCopy\n'), ((23862, 23930), 'logging.info', 'logging.info', (["('loading previous checkpoint from %s' % opt.train_from)"], {}), "('loading previous checkpoint from %s' % opt.train_from)\n", (23874, 23930), False, 'import logging\n'), ((23942, 23967), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (23965, 23967), False, 'import torch\n'), ((24854, 24881), 'torch.manual_seed', 'torch.manual_seed', (['opt.seed'], {}), '(opt.seed)\n', (24871, 24881), False, 'import torch\n'), ((24911, 24936), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (24934, 24936), False, 'import torch\n'), ((25423, 25451), 'os.path.exists', 'os.path.exists', (['opt.exp_path'], {}), '(opt.exp_path)\n', (25437, 25451), False, 'import os\n'), ((25461, 25486), 'os.makedirs', 'os.makedirs', (['opt.exp_path'], {}), '(opt.exp_path)\n', (25472, 25486), False, 'import os\n'), ((25498, 25527), 'os.path.exists', 'os.path.exists', (['opt.save_path'], {}), '(opt.save_path)\n', (25512, 25527), False, 'import os\n'), ((25537, 25563), 'os.makedirs', 'os.makedirs', (['opt.save_path'], {}), '(opt.save_path)\n', (25548, 25563), False, 'import os\n'), ((5968, 5993), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5991, 5993), False, 'import torch\n'), ((6661, 6672), 'time.time', 'time.time', ([], {}), '()\n', (6670, 6672), False, 'import time\n'), ((7378, 7403), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7401, 7403), False, 'import torch\n'), ((7569, 7580), 'time.time', 'time.time', ([], {}), '()\n', (7578, 7580), False, 'import time\n'), ((26101, 26129), 'logging.exception', 'logging.exception', (['"""message"""'], {}), "('message')\n", (26118, 26129), False, 'import logging\n'), ((7966, 8027), 'logging.info', 'logging.info', (["('clip grad (%f -> %f)' % (pre_norm, after_norm))"], {}), "('clip grad (%f -> %f)' % (pre_norm, after_norm))\n", (7978, 8027), False, 'import logging\n'), ((8294, 8373), 'logging.info', 'logging.info', (["('====================== %d =========================' % batch_i)"], {}), "('====================== %d =========================' % batch_i)\n", (8306, 8373), False, 'import logging\n'), ((8540, 8635), 'logging.info', 'logging.info', (["('Printing predictions on %d sampled examples by greedy search' % sampled_size)"], {}), "('Printing predictions on %d sampled examples by greedy search' %\n sampled_size)\n", (8552, 8635), False, 'import logging\n'), ((8652, 8677), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8675, 8677), False, 'import torch\n'), ((11836, 11858), 'logging.info', 'logging.info', (["('*' * 50)"], {}), "('*' * 50)\n", (11848, 11858), False, 'import logging\n'), ((11875, 11971), 'logging.info', 'logging.info', (["('Run validing and testing @Epoch=%d,#(Total batch)=%d' % (epoch, total_batch))"], {}), "('Run validing and testing @Epoch=%d,#(Total batch)=%d' % (\n epoch, total_batch))\n", (11887, 11971), False, 'import logging\n'), ((12161, 12352), 'evaluate.evaluate_beam_search', 'evaluate_beam_search', (['generator', 'valid_data_loader', 'opt'], {'title': '"""valid"""', 'epoch': 'epoch', 'predict_save_path': "(opt.exp_path + '/epoch%d_batch%d_total_batch%d' % (epoch, batch_i,\n total_batch))"}), "(generator, valid_data_loader, opt, title='valid',\n epoch=epoch, predict_save_path=opt.exp_path + \n '/epoch%d_batch%d_total_batch%d' % (epoch, batch_i, total_batch))\n", (12181, 12352), False, 'from evaluate import evaluate_beam_search\n'), ((12380, 12570), 'evaluate.evaluate_beam_search', 'evaluate_beam_search', (['generator', 'test_data_loader', 'opt'], {'title': '"""test"""', 'epoch': 'epoch', 'predict_save_path': "(opt.exp_path + '/epoch%d_batch%d_total_batch%d' % (epoch, batch_i,\n total_batch))"}), "(generator, test_data_loader, opt, title='test', epoch=\n epoch, predict_save_path=opt.exp_path + \n '/epoch%d_batch%d_total_batch%d' % (epoch, batch_i, total_batch))\n", (12400, 12570), False, 'from evaluate import evaluate_beam_search\n'), ((13517, 13801), 'utils.plot_learning_curve_and_write_csv', 'plot_learning_curve_and_write_csv', ([], {'scores': 'scores', 'curve_names': 'curve_names', 'checkpoint_names': 'checkpoint_names', 'title': '"""Training Validation & Test"""', 'save_path': "(opt.exp_path + \n '/[epoch=%d,batch=%d,total_batch=%d]train_valid_test_curve.png' % (\n epoch, batch_i, total_batch))"}), "(scores=scores, curve_names=curve_names,\n checkpoint_names=checkpoint_names, title='Training Validation & Test',\n save_path=opt.exp_path + \n '/[epoch=%d,batch=%d,total_batch=%d]train_valid_test_curve.png' % (\n epoch, batch_i, total_batch))\n", (13550, 13801), False, 'from utils import Progbar, plot_learning_curve_and_write_csv\n'), ((14172, 14235), 'numpy.average', 'np.average', (['valid_history_losses[-1][opt.report_score_names[0]]'], {}), '(valid_history_losses[-1][opt.report_score_names[0]])\n', (14182, 14235), True, 'import numpy as np\n'), ((16458, 16480), 'logging.info', 'logging.info', (["('*' * 50)"], {}), "('*' * 50)\n", (16470, 16480), False, 'import logging\n'), ((10403, 10450), 'numpy.any', 'np.any', (['[(x >= opt.vocab_size) for x in src_wi]'], {}), '([(x >= opt.vocab_size) for x in src_wi])\n', (10409, 10450), True, 'import numpy as np\n'), ((10487, 10533), 'numpy.any', 'np.any', (['[(x >= opt.vocab_size) for x in trg_i]'], {}), '([(x >= opt.vocab_size) for x in trg_i])\n', (10493, 10533), True, 'import numpy as np\n'), ((11319, 11385), 'logging.info', 'logging.info', (['"""=================================================="""'], {}), "('==================================================')\n", (11331, 11385), False, 'import logging\n'), ((12714, 12737), 'copy.copy', 'copy.copy', (['train_losses'], {}), '(train_losses)\n', (12723, 12737), False, 'import copy\n'), ((13428, 13441), 'numpy.asarray', 'np.asarray', (['s'], {}), '(s)\n', (13438, 13441), True, 'import numpy as np\n'), ((14654, 14798), 'logging.info', 'logging.info', (["('Validation: update best loss (%.4f --> %.4f), rate of change (ROC)=%.2f' %\n (best_loss, valid_loss, rate_of_change * 100))"], {}), "(\n 'Validation: update best loss (%.4f --> %.4f), rate of change (ROC)=%.2f' %\n (best_loss, valid_loss, rate_of_change * 100))\n", (14666, 14798), False, 'import logging\n'), ((14857, 15040), 'logging.info', 'logging.info', (["('Validation: best loss is not updated for %d times (%.4f --> %.4f), rate of change (ROC)=%.2f'\n % (stop_increasing, best_loss, valid_loss, rate_of_change * 100))"], {}), "(\n 'Validation: best loss is not updated for %d times (%.4f --> %.4f), rate of change (ROC)=%.2f'\n % (stop_increasing, best_loss, valid_loss, rate_of_change * 100))\n", (14869, 15040), False, 'import logging\n'), ((16284, 16376), 'logging.info', 'logging.info', (["('Have not increased for %d epoches, early stop training' % stop_increasing)"], {}), "('Have not increased for %d epoches, early stop training' %\n stop_increasing)\n", (16296, 16376), False, 'import logging\n'), ((7662, 7673), 'time.time', 'time.time', ([], {}), '()\n', (7671, 7673), False, 'import time\n'), ((8464, 8488), 'numpy.mean', 'np.mean', (['ml_loss.data[0]'], {}), '(ml_loss.data[0])\n', (8471, 8488), True, 'import numpy as np\n'), ((15439, 15582), 'os.path.join', 'os.path.join', (['opt.save_path', "('%s.epoch=%d.batch=%d.total_batch=%d.error=%f' % (opt.exp, epoch, batch_i,\n total_batch, valid_loss) + '.model')"], {}), "(opt.save_path, '%s.epoch=%d.batch=%d.total_batch=%d.error=%f' %\n (opt.exp, epoch, batch_i, total_batch, valid_loss) + '.model')\n", (15451, 15582), False, 'import os\n'), ((15685, 15808), 'os.path.join', 'os.path.join', (['opt.save_path', "('%s.epoch=%d.batch=%d.total_batch=%d' % (opt.exp, epoch, batch_i,\n total_batch) + '.model')"], {}), "(opt.save_path, '%s.epoch=%d.batch=%d.total_batch=%d' % (opt.\n exp, epoch, batch_i, total_batch) + '.model')\n", (15697, 15808), False, 'import os\n'), ((16051, 16174), 'os.path.join', 'os.path.join', (['opt.save_path', "('%s.epoch=%d.batch=%d.total_batch=%d' % (opt.exp, epoch, batch_i,\n total_batch) + '.state')"], {}), "(opt.save_path, '%s.epoch=%d.batch=%d.total_batch=%d' % (opt.\n exp, epoch, batch_i, total_batch) + '.state')\n", (16063, 16174), False, 'import os\n')]
|
import tqdm
import struct
import os
import numpy as np
import pickle
import json
import random
from collections import Counter
#from lightfm import LightFM
from scipy import sparse
from evaluate import evaluate, coverage
from sklearn.preprocessing import LabelBinarizer
from implicit.als import AlternatingLeastSquares
from scipy.linalg import norm
split_folder = 'lastfm'
user_features_playcounts_filename = 'tracks_out_user_playcounts_als_{}.feats'
item_features_playcounts_filename = 'tracks_out_item_playcounts_als_{}.feats'
predictions_playcounts_filename = 'tracks_predicted_playcounts_als_{}.npy'
user_features_file = 'data/LFM-1b/LFM-1b_users.txt'
gender_location = 'data/lastfm/track_gender.json'
def evaluate2(iteration_tracks, items_dict, tracks_pop):
all_songs = {}
popularity = []
for user in range(len(iteration_tracks)):
if len(iteration_tracks[user]):
curr_pop = 0
for track in iteration_tracks[user]:
curr_pop += tracks_pop[0, track]
if track not in all_songs:
all_songs[track] = 0
all_songs[track] += 1
popularity.append(curr_pop/len(iteration_tracks[user]))
#return len(different_songs)/len(iteration_tracks) #return np.mean(all_songs)
#print (len(different_songs), len(items_dict))
#return len(different_songs)/len(items_dict)#sum(all_songs) #return np.mean(all_songs)
popularity = np.mean(popularity)
different_songs = len(all_songs)
if different_songs > len(items_dict):
np_counts = np.zeros(different_songs, np.dtype('float64'))
else:
np_counts = np.zeros(len(items_dict), np.dtype('float64'))
np_counts[:different_songs] = np.array(list(all_songs.values()))
return gini(np_counts), different_songs, popularity
def gini(array):
# based on bottom eq: http://www.statsdirect.com/help/content/image/stat0206_wmf.gif
# from: http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm
array = array.flatten() #all values are treated equally, arrays must be 1d
if np.amin(array) < 0:
array -= np.amin(array) #values cannot be negative
array += 0.0000001 #values cannot be 0
array = np.sort(array) #values must be sorted
index = np.arange(1,array.shape[0]+1) #index per array element
n = array.shape[0]#number of array elements
return ((np.sum((2 * index - n - 1) * array)) / (n * np.sum(array))) #Gini coefficient
def load_feats(feat_fname, meta_only=False, nrz=False):
with open(feat_fname, 'rb') as fin:
keys = fin.readline().strip().split()
R, C = struct.unpack('qq', fin.read(16))
if meta_only:
return keys, (R, C)
feat = np.fromstring(fin.read(), count=R * C, dtype=np.float32)
feat = feat.reshape((R, C))
if nrz:
feat = feat / np.sqrt((feat ** 2).sum(-1) + 1e-8)[..., np.newaxis]
return keys, feat
def save(keys, feats, out_fname):
feats = np.array(feats, dtype=np.float32)
with open(out_fname + '.tmp', 'wb') as fout:
fout.write(b' '.join([k.encode() for k in keys]))
fout.write(b'\n')
R, C = feats.shape
fout.write(struct.pack('qq', *(R, C)))
fout.write(feats.tostring())
os.rename(out_fname + '.tmp', out_fname)
def train_als(impl_train_data, dims, user_ids, item_ids, user_features_file, item_features_file, save_res=True):
model = AlternatingLeastSquares(factors=dims, iterations=50)
model.fit(impl_train_data.T)
user_vecs_reg = model.user_factors
item_vecs_reg = model.item_factors
print("USER FEAT:", user_vecs_reg.shape)
print("ITEM FEAT:", item_vecs_reg.shape)
if save_res==True:
save(item_ids, item_vecs_reg, item_features_file)
save(user_ids, user_vecs_reg, user_features_file)
return item_ids, item_vecs_reg, user_ids, user_vecs_reg
def train(impl_train_data, dims, user_ids, item_ids, item_features_filem, user_features_file, user_features=None, save_res=True):
model = LightFM(loss='warp', no_components=dims, max_sampled=30, user_alpha=1e-06)
#model = model.fit(impl_train_data, epochs=50, num_threads=8)
model = model.fit(impl_train_data, user_features=user_features, epochs=50, num_threads=8)
user_biases, user_embeddings = model.get_user_representations(user_features)
#user_biases, user_embeddings = model.get_user_representations()
item_biases, item_embeddings = model.get_item_representations()
item_vecs_reg = np.concatenate((item_embeddings, np.reshape(item_biases, (1, -1)).T), axis=1)
user_vecs_reg = np.concatenate((user_embeddings, np.ones((1, user_biases.shape[0])).T), axis=1)
print("USER FEAT:", user_vecs_reg.shape)
print("ITEM FEAT:", item_vecs_reg.shape)
if save_res==True:
save(item_ids, item_vecs_reg, item_features_file)
save(user_ids, user_vecs_reg, user_features_file)
return item_ids, item_vecs_reg, user_ids, user_vecs_reg
def predict(item_vecs_reg, user_vecs_reg, prediction_file,impl_train_data, N=100, step=1000, save_res=True):
#listened_dict = sparse.dok_matrix(impl_train_data)
listened_dict = impl_train_data
predicted = np.zeros((user_vecs_reg.shape[0],N), dtype=np.uint32)
for u in range(0,user_vecs_reg.shape[0], step):
sims = user_vecs_reg[u:u+step].dot(item_vecs_reg.T)
curr_users = listened_dict[u:u+step].todense() == 0
topn = np.argsort(-np.multiply(sims,curr_users), axis=1)[:,:N]
predicted[u:u+step, :] = topn
if u % 100000 == 0:
print ("Precited users: ", u)
if save_res==True:
np.save(open(prediction_file, 'wb'), predicted)
return predicted
def rerank(predicted, items_gender, lambda1=10):
for u in range(0,predicted.shape[0]):
recs_dict = {item:p for p,item in enumerate(predicted[u, :])}
for track in recs_dict.keys():
if items_gender[track] == "Male":
recs_dict[track] += lambda1
predicted[u] = np.array([k for k,v in sorted(recs_dict.items(), key=lambda x: x[1])])
if u % 50000 == 0:
print ("reranked users: ", u)
return predicted
from math import log2
def show_eval(predicted_x, fan_test_data,item_ids,items_gender, sum_listen):
topn = predicted_x.shape[1]
print (topn)
fan_test_data_sorted = []
fan_test_data_male = []
fan_test_data_female = []
predicted_male = []
predicted_female = []
all_res = {'test_fidelity': [], 'test_engagement': [], 'test_awearnes': [], 'test_playcounts': [], 'pred_fidelity': {}, 'pred_awearnes': {}, 'pred_engagement': {}, 'pred_playcounts': {}}
for cutoff in ('1', '3', '5', '10', '100'):
for name in ('pred_fidelity', 'pred_awearnes', 'pred_engagement', 'pred_playcounts'):
all_res[name][cutoff] = []
_SQRT2 = np.sqrt(2) # sqrt(2) with default precision np.float64
artist_gender_user = []
artist_gender_user_recommend = []
artist_gender_dist = []
artist_gender_first_female = []
artist_gender_first_male = []
reco_set= {}
miss_male = []
miss_female = []
for i in range(len(fan_test_data)):
#fan_test_data_sorted.append(fan_test_data[i])
test_u_sorted_playcount = sorted([(a, p) for a,p in fan_test_data[i]], key=lambda x: x[1])
fan_test_data_sorted.append([a[0] for a in test_u_sorted_playcount])
fan_test_data_male.append([a[0] for a in test_u_sorted_playcount if items_gender[a[0]] == "Male"])
test_male_dict = {a[0]:1 for a in test_u_sorted_playcount if items_gender[a[0]] == "Male"}
test_female_dict = {a[0]:1 for a in test_u_sorted_playcount if items_gender[a[0]] == "Female"}
fan_test_data_female.append([a[0] for a in test_u_sorted_playcount if items_gender[a[0]] == "Female"])
if len(fan_test_data_sorted) == 0:
continue
first_female = None
first_male = None
curr_predict_female = []
curr_predict_male = []
fn_female = len(test_female_dict)
fn_male = len(test_male_dict)
for p,a in enumerate(predicted_x[i]):
if first_female == None and items_gender[a] == 'Female':
first_female = p
if first_male == None and items_gender[a] == 'Male':
first_male = p
#if first_male != None and first_female != None:
# break
if items_gender[a] == 'Female':
curr_predict_female.append(a)
if a in test_female_dict:
fn_female -=1
elif items_gender[a] == 'Male':
curr_predict_male.append(a)
if a in test_male_dict:
fn_male -=1
if len(test_male_dict):
miss_male.append(fn_male/len(test_male_dict))
if len(test_female_dict):
miss_female.append(fn_female/len(test_female_dict))
predicted_female.append(curr_predict_female)
predicted_male.append(curr_predict_male)
if first_female != None:
artist_gender_first_female.append(first_female)
else:
artist_gender_first_female.append(len(predicted_x[i])+1)
if first_male != None:
artist_gender_first_male.append(first_male)
else:
artist_gender_first_male.append(len(predicted_x[i])+1)
reco_set.update({a:1 for a in predicted_x[i]})
listened = dict(Counter([items_gender[a[0]] for a in test_u_sorted_playcount]))
female = 0
male = 0
if 'Female' in listened:
female = listened['Female']
if 'Male' in listened:
male = listened['Male']
if (male+female) > 0:
artist_gender_user.append(female / (male+female))
q = [female / (male+female), male/ (male+female)]
listened= dict(Counter([items_gender[a] for a in predicted_x[i]]))
female = 0
male = 0
if 'Female' in listened:
female = listened['Female']
if 'Male' in listened:
male = listened['Male']
artist_gender_user_recommend.append(female / (male+female))
p = [female / (male+female), male/ (male+female)]
artist_gender_dist.append(norm(np.sqrt(p) - np.sqrt(q)) / _SQRT2)
reco_set_total = dict(Counter([items_gender[a] for a in reco_set.keys()]))
print ("Coverage@100 gender", reco_set_total)
print ("Distribution", np.mean(artist_gender_dist))
print ("Female listened", np.mean(artist_gender_user))
print ("Female recommended", np.mean(artist_gender_user_recommend))
print ("First Female", np.mean(artist_gender_first_female))
print ("First Male", np.mean(artist_gender_first_male))
metrics = ['map@100', 'precision@1', 'precision@3', 'precision@5', 'precision@10', 'r-precision', 'ndcg@100']
#results = evaluate(metrics, fan_test_data_male, np.array(predicted_male))
results = evaluate(metrics, fan_test_data_male, predicted_x)
#gini_val,cov_val,pop_val = evaluate2(np.array(predicted_male), item_ids, sum_listen)
print ("MALE")
print ('FAN', results)
#print ('GINI@100', gini_val, 'pop@100', pop_val, 'coverage@100', cov_val)
#print ('Coverage@10', coverage(predicted_male, 10), 'Coverage on FAN test set', coverage(fan_test_data_male, 100))
results = evaluate(metrics, fan_test_data_female, predicted_x)
#results = evaluate(metrics, fan_test_data_female, np.array(predicted_female))
#gini_val,cov_val,pop_val = evaluate2(np.array(predicted_female), item_ids, sum_listen)
print ("FEMALE")
print ('FAN', results)
#print ('GINI@100', gini_val, 'pop@100', pop_val, 'coverage@100', cov_val)
#print ('Coverage@10', coverage(predicted_female, 10), 'Coverage on FAN test set', coverage(fan_test_data_female, 100))
print ("MISS RATE: Male", np.mean(miss_male), 'Female: ', np.mean(miss_female))
"""
results = evaluate(metrics, fan_test_data_sorted, predicted_x)
gini_val,cov_val,pop_val = evaluate2(predicted_x, item_ids, sum_listen)
print ("ALL")
print ('FAN', results)
print ('GINI@100', gini_val, 'pop@100', pop_val, 'coverage@100', cov_val)
print ('Coverage@10', coverage(predicted_x.tolist(), 10), 'Coverage on FAN test set', coverage(fan_test_data_sorted, 100))
print ('----------------------------')
"""
def predict_pop(pop_artists, impl_train_data, N=100):
predicted = np.zeros((impl_train_data.shape[0],N), dtype=np.uint32)
for u in range(0, impl_train_data.shape[0]):
curr_val = 0
for a in pop_artists:
if impl_train_data[u,a] == 0:
predicted[u,curr_val] = a
curr_val += 1
if curr_val == 100:
break
return predicted
def predict_rnd(item_ids, impl_train_data, N=100):
predicted = np.zeros((impl_train_data.shape[0],N), dtype=np.uint32)
items = range(len(item_ids))
for u in range(0, impl_train_data.shape[0]):
selected = random.sample(items, N)
predicted[u,:] = selected
return predicted
if __name__== "__main__":
artists_gender = json.load(open(gender_location))
fan_train_data = sparse.load_npz(os.path.join('data', split_folder, 'tracks_train_data_playcount.npz')).tocsr()
sum_listen = fan_train_data.sum(axis=0)
fan_test_data = pickle.load(open(os.path.join('data', split_folder, 'tracks_test_data.pkl'), 'rb'))
fan_items_dict = pickle.load(open(os.path.join('data', split_folder, 'tracks_items_dict.pkl'), 'rb'))
items_gender = [0]*len(fan_items_dict)
for a in fan_items_dict.keys():
items_gender[fan_items_dict[a]] =artists_gender[a]
fan_users_dict = pickle.load(open(os.path.join('data', split_folder,'tracks_users_dict.pkl'), 'rb'))
print ("Item", len(fan_items_dict))
print ("User", len(fan_users_dict))
print (sum_listen.shape)
model_folder = 'models'
#for lambda1 in [40, 30]:
dims = 300
user_features_file = os.path.join(model_folder, split_folder, user_features_playcounts_filename.format(dims))
item_features_file = os.path.join(model_folder, split_folder, item_features_playcounts_filename.format(dims))
item_ids, item_vecs_reg, user_ids, user_vecs_reg = train_als(fan_train_data, dims, fan_users_dict, fan_items_dict, user_features_file, item_features_file, save_res=True)
#item_ids, item_vecs_reg, user_ids, user_vecs_reg = train(fan_train_data_fidelity, 50, fan_users_dict, fan_items_dict, model_folder, save_res=True)
#user_ids, user_vecs_reg = load_feats(user_features_file)
#item_ids, item_vecs_reg = load_feats(item_features_file)
predictions_file = os.path.join(model_folder, split_folder,predictions_playcounts_filename.format(dims))
predicted = predict(item_vecs_reg, user_vecs_reg, predictions_file, fan_train_data, step=500)
#predicted = np.load(predictions_file)
#rerank(predicted, items_gender, lambda1)
print (predicted.shape, len(fan_test_data), user_vecs_reg.shape, len(user_ids))
print ("ALS: -->", dims)#, "Lambda", lambda1)
show_eval(predicted, fan_test_data, fan_items_dict, items_gender, sum_listen)
print ("POP: -->", dims)#, "Lambda", lambda1)
pop_artists = np.argsort(-sum_listen.flatten())[0,:1000].tolist()[0]
predicted_pop = predict_pop(pop_artists, fan_train_data)
#rerank(predicted_pop, items_gender, lambda1)
show_eval(predicted_pop, fan_test_data, fan_items_dict, items_gender, sum_listen)
print ("RND: -->", dims)#, "Lambda", lambda1)
predicted_rnd = predict_rnd(fan_items_dict, fan_train_data)
#rerank(predicted_rnd, items_gender, lambda1)
show_eval(predicted_rnd, fan_test_data, fan_items_dict, items_gender, sum_listen)
|
[
"evaluate.evaluate"
] |
[((1453, 1472), 'numpy.mean', 'np.mean', (['popularity'], {}), '(popularity)\n', (1460, 1472), True, 'import numpy as np\n'), ((2236, 2250), 'numpy.sort', 'np.sort', (['array'], {}), '(array)\n', (2243, 2250), True, 'import numpy as np\n'), ((2286, 2318), 'numpy.arange', 'np.arange', (['(1)', '(array.shape[0] + 1)'], {}), '(1, array.shape[0] + 1)\n', (2295, 2318), True, 'import numpy as np\n'), ((3004, 3037), 'numpy.array', 'np.array', (['feats'], {'dtype': 'np.float32'}), '(feats, dtype=np.float32)\n', (3012, 3037), True, 'import numpy as np\n'), ((3314, 3354), 'os.rename', 'os.rename', (["(out_fname + '.tmp')", 'out_fname'], {}), "(out_fname + '.tmp', out_fname)\n", (3323, 3354), False, 'import os\n'), ((3482, 3534), 'implicit.als.AlternatingLeastSquares', 'AlternatingLeastSquares', ([], {'factors': 'dims', 'iterations': '(50)'}), '(factors=dims, iterations=50)\n', (3505, 3534), False, 'from implicit.als import AlternatingLeastSquares\n'), ((5241, 5295), 'numpy.zeros', 'np.zeros', (['(user_vecs_reg.shape[0], N)'], {'dtype': 'np.uint32'}), '((user_vecs_reg.shape[0], N), dtype=np.uint32)\n', (5249, 5295), True, 'import numpy as np\n'), ((6897, 6907), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6904, 6907), True, 'import numpy as np\n'), ((11036, 11086), 'evaluate.evaluate', 'evaluate', (['metrics', 'fan_test_data_male', 'predicted_x'], {}), '(metrics, fan_test_data_male, predicted_x)\n', (11044, 11086), False, 'from evaluate import evaluate, coverage\n'), ((11436, 11488), 'evaluate.evaluate', 'evaluate', (['metrics', 'fan_test_data_female', 'predicted_x'], {}), '(metrics, fan_test_data_female, predicted_x)\n', (11444, 11488), False, 'from evaluate import evaluate, coverage\n'), ((12522, 12578), 'numpy.zeros', 'np.zeros', (['(impl_train_data.shape[0], N)'], {'dtype': 'np.uint32'}), '((impl_train_data.shape[0], N), dtype=np.uint32)\n', (12530, 12578), True, 'import numpy as np\n'), ((12936, 12992), 'numpy.zeros', 'np.zeros', (['(impl_train_data.shape[0], N)'], {'dtype': 'np.uint32'}), '((impl_train_data.shape[0], N), dtype=np.uint32)\n', (12944, 12992), True, 'import numpy as np\n'), ((2102, 2116), 'numpy.amin', 'np.amin', (['array'], {}), '(array)\n', (2109, 2116), True, 'import numpy as np\n'), ((2139, 2153), 'numpy.amin', 'np.amin', (['array'], {}), '(array)\n', (2146, 2153), True, 'import numpy as np\n'), ((2402, 2437), 'numpy.sum', 'np.sum', (['((2 * index - n - 1) * array)'], {}), '((2 * index - n - 1) * array)\n', (2408, 2437), True, 'import numpy as np\n'), ((10544, 10571), 'numpy.mean', 'np.mean', (['artist_gender_dist'], {}), '(artist_gender_dist)\n', (10551, 10571), True, 'import numpy as np\n'), ((10603, 10630), 'numpy.mean', 'np.mean', (['artist_gender_user'], {}), '(artist_gender_user)\n', (10610, 10630), True, 'import numpy as np\n'), ((10665, 10702), 'numpy.mean', 'np.mean', (['artist_gender_user_recommend'], {}), '(artist_gender_user_recommend)\n', (10672, 10702), True, 'import numpy as np\n'), ((10731, 10766), 'numpy.mean', 'np.mean', (['artist_gender_first_female'], {}), '(artist_gender_first_female)\n', (10738, 10766), True, 'import numpy as np\n'), ((10793, 10826), 'numpy.mean', 'np.mean', (['artist_gender_first_male'], {}), '(artist_gender_first_male)\n', (10800, 10826), True, 'import numpy as np\n'), ((11945, 11963), 'numpy.mean', 'np.mean', (['miss_male'], {}), '(miss_male)\n', (11952, 11963), True, 'import numpy as np\n'), ((11977, 11997), 'numpy.mean', 'np.mean', (['miss_female'], {}), '(miss_female)\n', (11984, 11997), True, 'import numpy as np\n'), ((13093, 13116), 'random.sample', 'random.sample', (['items', 'N'], {}), '(items, N)\n', (13106, 13116), False, 'import random\n'), ((1598, 1617), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (1606, 1617), True, 'import numpy as np\n'), ((1675, 1694), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (1683, 1694), True, 'import numpy as np\n'), ((2447, 2460), 'numpy.sum', 'np.sum', (['array'], {}), '(array)\n', (2453, 2460), True, 'import numpy as np\n'), ((3237, 3263), 'struct.pack', 'struct.pack', (['"""qq"""', '*(R, C)'], {}), "('qq', *(R, C))\n", (3248, 3263), False, 'import struct\n'), ((9500, 9562), 'collections.Counter', 'Counter', (['[items_gender[a[0]] for a in test_u_sorted_playcount]'], {}), '([items_gender[a[0]] for a in test_u_sorted_playcount])\n', (9507, 9562), False, 'from collections import Counter\n'), ((13451, 13509), 'os.path.join', 'os.path.join', (['"""data"""', 'split_folder', '"""tracks_test_data.pkl"""'], {}), "('data', split_folder, 'tracks_test_data.pkl')\n", (13463, 13509), False, 'import os\n'), ((13556, 13615), 'os.path.join', 'os.path.join', (['"""data"""', 'split_folder', '"""tracks_items_dict.pkl"""'], {}), "('data', split_folder, 'tracks_items_dict.pkl')\n", (13568, 13615), False, 'import os\n'), ((13800, 13859), 'os.path.join', 'os.path.join', (['"""data"""', 'split_folder', '"""tracks_users_dict.pkl"""'], {}), "('data', split_folder, 'tracks_users_dict.pkl')\n", (13812, 13859), False, 'import os\n'), ((4589, 4621), 'numpy.reshape', 'np.reshape', (['item_biases', '(1, -1)'], {}), '(item_biases, (1, -1))\n', (4599, 4621), True, 'import numpy as np\n'), ((4687, 4721), 'numpy.ones', 'np.ones', (['(1, user_biases.shape[0])'], {}), '((1, user_biases.shape[0]))\n', (4694, 4721), True, 'import numpy as np\n'), ((9921, 9971), 'collections.Counter', 'Counter', (['[items_gender[a] for a in predicted_x[i]]'], {}), '([items_gender[a] for a in predicted_x[i]])\n', (9928, 9971), False, 'from collections import Counter\n'), ((13291, 13360), 'os.path.join', 'os.path.join', (['"""data"""', 'split_folder', '"""tracks_train_data_playcount.npz"""'], {}), "('data', split_folder, 'tracks_train_data_playcount.npz')\n", (13303, 13360), False, 'import os\n'), ((5494, 5523), 'numpy.multiply', 'np.multiply', (['sims', 'curr_users'], {}), '(sims, curr_users)\n', (5505, 5523), True, 'import numpy as np\n'), ((10352, 10362), 'numpy.sqrt', 'np.sqrt', (['p'], {}), '(p)\n', (10359, 10362), True, 'import numpy as np\n'), ((10365, 10375), 'numpy.sqrt', 'np.sqrt', (['q'], {}), '(q)\n', (10372, 10375), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from evaluate import strict, loose_macro, loose_micro
def get_true_and_prediction(scores, y_data):
true_and_prediction = []
for score,true_label in zip(scores,y_data):
predicted_tag = []
true_tag = []
for label_id,label_score in enumerate(list(true_label)):
if label_score > 0:
true_tag.append(label_id)
lid,ls = max(enumerate(list(score)),key=lambda x: x[1])
predicted_tag.append(lid)
for label_id,label_score in enumerate(list(score)):
if label_score > 0.5:
if label_id != lid:
predicted_tag.append(label_id)
true_and_prediction.append((true_tag, predicted_tag))
return true_and_prediction
def acc_hook(scores, y_data):
true_and_prediction = get_true_and_prediction(scores, y_data)
print(" strict (p,r,f1):",strict(true_and_prediction))
print("loose macro (p,r,f1):",loose_macro(true_and_prediction))
print("loose micro (p,r,f1):",loose_micro(true_and_prediction))
def save_predictions(scores, y_data, id2label, fname):
true_and_prediction = get_true_and_prediction(scores, y_data)
with open(fname,"w") as f:
for t, p in true_and_prediction:
f.write(" ".join([id2label[id] for id in t]) + "\t" + " ".join([id2label[id] for id in p]) + "\n")
f.close()
|
[
"evaluate.loose_macro",
"evaluate.loose_micro",
"evaluate.strict"
] |
[((892, 919), 'evaluate.strict', 'strict', (['true_and_prediction'], {}), '(true_and_prediction)\n', (898, 919), False, 'from evaluate import strict, loose_macro, loose_micro\n'), ((955, 987), 'evaluate.loose_macro', 'loose_macro', (['true_and_prediction'], {}), '(true_and_prediction)\n', (966, 987), False, 'from evaluate import strict, loose_macro, loose_micro\n'), ((1023, 1055), 'evaluate.loose_micro', 'loose_micro', (['true_and_prediction'], {}), '(true_and_prediction)\n', (1034, 1055), False, 'from evaluate import strict, loose_macro, loose_micro\n')]
|
import argparse
from evaluate import evaluate_model
from models import parse_class_weights
parser = argparse.ArgumentParser(
description="Provide a list of model files and a setting of class weights to visualize performance")
parser.add_argument('-m', type=str, nargs='+', help="list of paths to the saved model files")
parser.add_argument('-wtf', help="boolean for redirecting output to file", action='store_true', default=False)
args = parser.parse_args()
for model_path in args.m:
weights = parse_class_weights(model_path)
evaluate_model(model_path, class_weights=weights, write_to_file=args.wtf)
|
[
"evaluate.evaluate_model"
] |
[((102, 236), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Provide a list of model files and a setting of class weights to visualize performance"""'}), "(description=\n 'Provide a list of model files and a setting of class weights to visualize performance'\n )\n", (125, 236), False, 'import argparse\n'), ((506, 537), 'models.parse_class_weights', 'parse_class_weights', (['model_path'], {}), '(model_path)\n', (525, 537), False, 'from models import parse_class_weights\n'), ((542, 615), 'evaluate.evaluate_model', 'evaluate_model', (['model_path'], {'class_weights': 'weights', 'write_to_file': 'args.wtf'}), '(model_path, class_weights=weights, write_to_file=args.wtf)\n', (556, 615), False, 'from evaluate import evaluate_model\n')]
|
import numpy as np
import os
import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from torch import optim
import random
import json
from collections import Counter
from torch.utils.data import Dataset, DataLoader, TensorDataset
import model
import pickle
import argparse
import evaluate
test_batch_size=1
gpu_no=0
predict_parent_from_child=False
eval_on_leaf_only=False
parser = argparse.ArgumentParser(description="Code to Inference")
parser.add_argument("-checkpoint_path",help="Path of checkpoint to test", type=str,required=True)
parser.add_argument("-test_data_path",help="Path of test data",type=str,required=True)
parser.add_argument("-concept_embd_path",help="Path of learned concept embeddings",type=str,required=True)
parser.add_argument("-features_dir_path",help="directory path to features path",type=str,required=True)
parser.add_argument("-quantum_emdb_dimension",help="quantum embedding dimension",type=int,default=300)
parser.add_argument("-threshold_level_wise",help="Threshold Parameter (referred as tau in the paper)",type=float, default=0.15)
parser.add_argument("-constant_to_divide",help="Threshold Parameter (referred as delta in the paper)",type=int, default=5)
args = parser.parse_args()
checkpoint_path = args.checkpoint_path
threshold_level_wise=args.threshold_level_wise
constant_to_divide=args.constant_to_divide
test_data_path=args.test_data_path
file_quantam_concept_embd=args.concept_embd_path
features_dir_path=args.features_dir_path
quantum_emdb_dimension=args.quantum_emdb_dimension
with open(test_data_path, 'r') as filehandle:
test_data = json.load(filehandle)
for i in range(len(test_data)):
test_data[i]['entity_id']=i
test_data[i]['mentionid'] = i
infile = open(file_quantam_concept_embd,'rb')
quantam_concept_embds = pickle.load(infile)
infile.close()
words_train=Counter()
test_left_context=[]
test_right_context=[]
test_labels=[]
test_entity=[]
with open(os.path.join(features_dir_path,'token_list.json'), 'r') as filehandle:
words_train = json.load(filehandle)
with open(os.path.join(features_dir_path,'with_non_leaf_type_name_train_split.json'), 'r') as filehandle:
total_node_list = json.load(filehandle)
with open(os.path.join(features_dir_path,'with_non_leaf_leaf_node_list_train_split.json'), 'r') as filehandle:
leaf_node_list = json.load(filehandle)
non_leaf_node_list=total_node_list[len(leaf_node_list):]
no_total_nodes=len(total_node_list)
no_leaf_nodes = len(leaf_node_list)
with open(os.path.join(features_dir_path,'with_non_leaf_parent_list_train_split.json'), 'r') as filehandle:
parent_of = json.load(filehandle)
childern_of_root = [i for i, x in enumerate(parent_of) if x == -1]
# create word to index dictionary and reverse
word2idx_train = {o:i for i,o in enumerate(words_train)}
UNK_index=word2idx_train['<UNK>']
for instance in test_data:
if instance['left_context']==[]:
instance['left_context']=['<PAD>']
if instance['right_context'] == []:
instance['right_context'] = ['<PAD>']
instance['left_context']= [word2idx_train.get(token, UNK_index) for token in instance['left_context']]
instance['right_context'] = [word2idx_train.get(token, UNK_index) for token in instance['right_context']]
class IRDataset(Dataset):
def __init__(self, test_data):
self.test_data = test_data
def __len__(self):
return len(self.test_data)
def vectorize(self,tokens, word2idx):
"""Convert tweet to vector
"""
vec = [self.word2idx.get(token, word2idx_train['<UNK>']) for token in tokens]
return vec
def __getitem__(self, index):
tokens=[]
tokens.append(self.test_data[index]['left_context'])
tokens.append(self.test_data[index]['right_context'])
entity=[]
entity.append(self.test_data[index]['entity_id'])
entity.append(self.test_data[index]['entity_id'])
predicted_labels=self.test_data[index]['label']
mentionid=[]
mentionid.append(self.test_data[index]['mentionid'])
mentionid.append(self.test_data[index]['mentionid'])
return tokens,entity,mentionid,predicted_labels
def my_collate_fn(old_data):
"""This function will be used to pad the questions to max length
in the batch and transpose the batch from
batch_size x max_seq_len to max_seq_len x batch_size.
It will return padded vectors, labels and lengths of each tweets (before padding)
It will be used in the Dataloader
"""
data=[]
for i in range(len(old_data)):
data.append((old_data[i][0][0],old_data[i][1][1],old_data[i][2][1],0,old_data[i][3]))
data.append((old_data[i][0][1],old_data[i][1][1],old_data[i][2][1],1,old_data[i][3]))
data.sort(key=lambda x: len(x[0]), reverse=True)
lens = [len(sent) for sent, entity_id , mentionid, left_right,labels in data]
entity_ids = []
mentionids=[]
left_rights=[]
labels=[]
padded_sents = torch.zeros(len(data), max(lens)).long()
for i, (sent, entity_id,mentionid,left_right,label) in enumerate(data):
padded_sents[i, :lens[i]] = torch.LongTensor(sent)
entity_ids.append(entity_id)
mentionids.append(mentionid)
left_rights.append(left_right)
labels.append(label)
padded_sents = padded_sents.transpose(0, 1)
return padded_sents, torch.FloatTensor(entity_ids),mentionids, torch.Tensor(left_rights), torch.tensor(lens),labels
test_dataset = IRDataset(test_data)
test_data_loader = DataLoader(
test_dataset,
batch_size=test_batch_size,
shuffle=False,
sampler=None,
batch_sampler=None,
num_workers=1,
pin_memory=False,
drop_last=False,
timeout=0,
collate_fn=my_collate_fn,
worker_init_fn=None,
)
model_vocab_size=len(words_train)
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu') # 'cpu' in this case
print("device {}".format(device))
model = model.BiLSTM(lstm_layer=1, vocab_size=model_vocab_size, hidden_dim=100,quant_embedding_dim=quantum_emdb_dimension,device=device)
checkpoint = torch.load(os.path.join(checkpoint_path), map_location=device)
model.load_state_dict(checkpoint["model_state_dict"])
model=model.to(device)
model.eval()
quantam_concept_embds=np.array(quantam_concept_embds)
if predict_parent_from_child == False:
for internal_node in non_leaf_node_list:
internal_node_index=total_node_list.index(internal_node)
indices = [i for i, x in enumerate(parent_of) if x == internal_node_index]
for i in indices:
quantam_concept_embds[internal_node_index]=np.logical_or(quantam_concept_embds[internal_node_index],quantam_concept_embds[i])
def is_only_leaf(label_list):
is_leaf = False
is_non_leaf = False
for label in gt_labels[0]:
if label in leaf_node_list:
is_leaf=True
else:
label_index = total_node_list.index(label)
if label_index not in parent_list:
is_non_leaf = True
if is_leaf == True and is_non_leaf == False:
return 0
if is_leaf == True and is_non_leaf == True:
return 1
if is_leaf == False and is_non_leaf == True:
return 2
def predict_labels(score,threshold,constant_to_divide):
score_level_1=score[childern_of_root]
max_level_1_index=childern_of_root[np.argmax(score_level_1)]
score_max_level_1=score[max_level_1_index]
best_level_1_node = total_node_list[max_level_1_index]
predicted_labels_id=[]
predicted_labels_id.append(max_level_1_index)
person_index=total_node_list.index('person')
organization_index=total_node_list.index('organization')
childern_of_max_level_1 = [i for i, x in enumerate(parent_of) if x == max_level_1_index]
if childern_of_max_level_1 == []:
return predicted_labels_id, "root",0.0, best_level_1_node,score_max_level_1
score_level_2=score[childern_of_max_level_1]
max_level_2_index=childern_of_max_level_1[np.argmax(score_level_2)]
best_level_2_node = total_node_list[max_level_2_index]
score_max_level_2=score[max_level_2_index]
if len(childern_of_max_level_1) > 10:
constant = constant_to_divide
else:
constant = 1
if (score_max_level_1-score_max_level_2)/score_max_level_1 < threshold/constant:
predicted_labels_id.append(max_level_2_index)
return predicted_labels_id, best_level_1_node, score_max_level_1,best_level_2_node,score_max_level_2
def predict_labels_simple(score,threshold):
predicted_labels_id = np.argwhere(score > threshold)
# print(predicted_labels_id.transpose())
predicted_labels_id = list(predicted_labels_id[:, 0])
new_predicted_labels_id = set()
for id in predicted_labels_id:
new_predicted_labels_id.add(id)
if parent_of[id] != -1 and predict_parent_from_child == True:
new_predicted_labels_id.add(parent_of[id])
predicted_labels_id = list(new_predicted_labels_id)
return predicted_labels_id,0,0,0,0
complete_gt_labels=[]
complete_predicted_labels=[]
results=[]
print("Starting Inference at threshold level wise {} constant to divide {}".format(threshold_level_wise,constant_to_divide))
true_and_prediction = []
ground_truth_dict={}
prediction_dict={}
total_gt_label_len=[]
total_predicted_label_len=[]
gt_labels_ids=[]
gt_labels_ids_length=[]
for i in range(len(test_data)):
gt_labels_ids.append([total_node_list.index(label) for label in test_data[i]['label']])
gt_labels_ids_length.append(len(test_data[i]['label']))
for epoch in range(1):
for batch_idx, (context,entity,ids,left_right,lens,gt_labels) in enumerate(test_data_loader):
gt_labels_id = [total_node_list.index(i) for i in gt_labels[0]]
parent_list=[parent_of[total_node_list.index(label)] for label in gt_labels[0]]
if eval_on_leaf_only == True:
is_leaf=is_only_leaf(gt_labels[0])
if is_leaf != 0:
continue
predict_quant_embd,id_list = model.forward(context.to(device), lens.to(device),ids,left_right)
predict_quant_embd=predict_quant_embd.data.cpu().numpy()
mask_matrix=np.multiply(quantam_concept_embds, predict_quant_embd)
normalizing_constant=np.sum(np.abs(predict_quant_embd)**2,axis=-1)
score=(np.sum(np.abs(mask_matrix) ** 2, axis=-1))/normalizing_constant
predicted_labels_id,best_level_1_node,best_level_1_score,best_level_2_node,best_level_2_score=predict_labels(score,threshold_level_wise,constant_to_divide)
predicted_labels=[total_node_list[i] for i in predicted_labels_id]
temp_dict = {}
temp_dict['gt_label'] = gt_labels[0]
temp_dict['predicted_label'] = predicted_labels
temp_dict['best_level_1_node']=best_level_1_node
temp_dict['best_level_2_node'] = best_level_2_node
temp_dict['best_level_1_score']=best_level_1_score
temp_dict['best_level_2_score']=best_level_2_score
total_gt_label_len.append(len(gt_labels[0]))
total_predicted_label_len.append(len(predicted_labels))
p,r,f=evaluate.loose_macro([(gt_labels_id, predicted_labels_id)])
p1,r1,f1=evaluate.strict([(gt_labels_id, predicted_labels_id)])
true_and_prediction.append((gt_labels_id, predicted_labels_id))
print("strict (p,r,f1):",evaluate.strict(true_and_prediction))
print("loose micro (p,r,f1):",evaluate.loose_micro(true_and_prediction))
print("loose macro (p,r,f1):",evaluate.loose_macro(true_and_prediction))
|
[
"evaluate.strict",
"evaluate.loose_micro",
"evaluate.loose_macro"
] |
[((474, 530), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Code to Inference"""'}), "(description='Code to Inference')\n", (497, 530), False, 'import argparse\n'), ((1874, 1893), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (1885, 1893), False, 'import pickle\n'), ((1924, 1933), 'collections.Counter', 'Counter', ([], {}), '()\n', (1931, 1933), False, 'from collections import Counter\n'), ((5618, 5837), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'test_batch_size', 'shuffle': '(False)', 'sampler': 'None', 'batch_sampler': 'None', 'num_workers': '(1)', 'pin_memory': '(False)', 'drop_last': '(False)', 'timeout': '(0)', 'collate_fn': 'my_collate_fn', 'worker_init_fn': 'None'}), '(test_dataset, batch_size=test_batch_size, shuffle=False, sampler\n =None, batch_sampler=None, num_workers=1, pin_memory=False, drop_last=\n False, timeout=0, collate_fn=my_collate_fn, worker_init_fn=None)\n', (5628, 5837), False, 'from torch.utils.data import Dataset, DataLoader, TensorDataset\n'), ((5923, 5948), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5946, 5948), False, 'import torch\n'), ((5958, 6001), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (5970, 6001), False, 'import torch\n'), ((6069, 6203), 'model.BiLSTM', 'model.BiLSTM', ([], {'lstm_layer': '(1)', 'vocab_size': 'model_vocab_size', 'hidden_dim': '(100)', 'quant_embedding_dim': 'quantum_emdb_dimension', 'device': 'device'}), '(lstm_layer=1, vocab_size=model_vocab_size, hidden_dim=100,\n quant_embedding_dim=quantum_emdb_dimension, device=device)\n', (6081, 6203), False, 'import model\n'), ((6274, 6327), 'model.load_state_dict', 'model.load_state_dict', (["checkpoint['model_state_dict']"], {}), "(checkpoint['model_state_dict'])\n", (6295, 6327), False, 'import model\n'), ((6335, 6351), 'model.to', 'model.to', (['device'], {}), '(device)\n', (6343, 6351), False, 'import model\n'), ((6352, 6364), 'model.eval', 'model.eval', ([], {}), '()\n', (6362, 6364), False, 'import model\n'), ((6388, 6419), 'numpy.array', 'np.array', (['quantam_concept_embds'], {}), '(quantam_concept_embds)\n', (6396, 6419), True, 'import numpy as np\n'), ((1680, 1701), 'json.load', 'json.load', (['filehandle'], {}), '(filehandle)\n', (1689, 1701), False, 'import json\n'), ((2109, 2130), 'json.load', 'json.load', (['filehandle'], {}), '(filehandle)\n', (2118, 2130), False, 'import json\n'), ((2261, 2282), 'json.load', 'json.load', (['filehandle'], {}), '(filehandle)\n', (2270, 2282), False, 'import json\n'), ((2416, 2437), 'json.load', 'json.load', (['filehandle'], {}), '(filehandle)\n', (2425, 2437), False, 'import json\n'), ((2694, 2715), 'json.load', 'json.load', (['filehandle'], {}), '(filehandle)\n', (2703, 2715), False, 'import json\n'), ((6222, 6251), 'os.path.join', 'os.path.join', (['checkpoint_path'], {}), '(checkpoint_path)\n', (6234, 6251), False, 'import os\n'), ((8676, 8706), 'numpy.argwhere', 'np.argwhere', (['(score > threshold)'], {}), '(score > threshold)\n', (8687, 8706), True, 'import numpy as np\n'), ((11460, 11496), 'evaluate.strict', 'evaluate.strict', (['true_and_prediction'], {}), '(true_and_prediction)\n', (11475, 11496), False, 'import evaluate\n'), ((11528, 11569), 'evaluate.loose_micro', 'evaluate.loose_micro', (['true_and_prediction'], {}), '(true_and_prediction)\n', (11548, 11569), False, 'import evaluate\n'), ((11601, 11642), 'evaluate.loose_macro', 'evaluate.loose_macro', (['true_and_prediction'], {}), '(true_and_prediction)\n', (11621, 11642), False, 'import evaluate\n'), ((2020, 2070), 'os.path.join', 'os.path.join', (['features_dir_path', '"""token_list.json"""'], {}), "(features_dir_path, 'token_list.json')\n", (2032, 2070), False, 'import os\n'), ((2143, 2218), 'os.path.join', 'os.path.join', (['features_dir_path', '"""with_non_leaf_type_name_train_split.json"""'], {}), "(features_dir_path, 'with_non_leaf_type_name_train_split.json')\n", (2155, 2218), False, 'import os\n'), ((2294, 2379), 'os.path.join', 'os.path.join', (['features_dir_path', '"""with_non_leaf_leaf_node_list_train_split.json"""'], {}), "(features_dir_path, 'with_non_leaf_leaf_node_list_train_split.json'\n )\n", (2306, 2379), False, 'import os\n'), ((2580, 2657), 'os.path.join', 'os.path.join', (['features_dir_path', '"""with_non_leaf_parent_list_train_split.json"""'], {}), "(features_dir_path, 'with_non_leaf_parent_list_train_split.json')\n", (2592, 2657), False, 'import os\n'), ((5226, 5248), 'torch.LongTensor', 'torch.LongTensor', (['sent'], {}), '(sent)\n', (5242, 5248), False, 'import torch\n'), ((5465, 5494), 'torch.FloatTensor', 'torch.FloatTensor', (['entity_ids'], {}), '(entity_ids)\n', (5482, 5494), False, 'import torch\n'), ((5507, 5532), 'torch.Tensor', 'torch.Tensor', (['left_rights'], {}), '(left_rights)\n', (5519, 5532), False, 'import torch\n'), ((5534, 5552), 'torch.tensor', 'torch.tensor', (['lens'], {}), '(lens)\n', (5546, 5552), False, 'import torch\n'), ((7475, 7499), 'numpy.argmax', 'np.argmax', (['score_level_1'], {}), '(score_level_1)\n', (7484, 7499), True, 'import numpy as np\n'), ((8113, 8137), 'numpy.argmax', 'np.argmax', (['score_level_2'], {}), '(score_level_2)\n', (8122, 8137), True, 'import numpy as np\n'), ((10288, 10342), 'numpy.multiply', 'np.multiply', (['quantam_concept_embds', 'predict_quant_embd'], {}), '(quantam_concept_embds, predict_quant_embd)\n', (10299, 10342), True, 'import numpy as np\n'), ((11230, 11289), 'evaluate.loose_macro', 'evaluate.loose_macro', (['[(gt_labels_id, predicted_labels_id)]'], {}), '([(gt_labels_id, predicted_labels_id)])\n', (11250, 11289), False, 'import evaluate\n'), ((11307, 11361), 'evaluate.strict', 'evaluate.strict', (['[(gt_labels_id, predicted_labels_id)]'], {}), '([(gt_labels_id, predicted_labels_id)])\n', (11322, 11361), False, 'import evaluate\n'), ((6734, 6821), 'numpy.logical_or', 'np.logical_or', (['quantam_concept_embds[internal_node_index]', 'quantam_concept_embds[i]'], {}), '(quantam_concept_embds[internal_node_index],\n quantam_concept_embds[i])\n', (6747, 6821), True, 'import numpy as np\n'), ((10379, 10405), 'numpy.abs', 'np.abs', (['predict_quant_embd'], {}), '(predict_quant_embd)\n', (10385, 10405), True, 'import numpy as np\n'), ((10440, 10459), 'numpy.abs', 'np.abs', (['mask_matrix'], {}), '(mask_matrix)\n', (10446, 10459), True, 'import numpy as np\n')]
|
import os
import pandas as pd
import torch
from scipy.spatial import distance
from contextualized_topic_models.models.ctm import CombinedTM
from contextualized_topic_models.utils.preprocessing import WhiteSpacePreprocessing
from contextualized_topic_models.utils.data_preparation import TopicModelDataPreparation
from contextualized_topic_models.utils.data_preparation import bert_embeddings_from_file
from data import prune_vocabulary2
from evaluate import evaluate_scores, compute_jsd, compute_kld2
import argparse
argparser = argparse.ArgumentParser()
argparser.add_argument('--data_path', default='project_dir/datasets/semeval-multilingual-news', type=str)
argparser.add_argument('--articles_file', default='test_split_batch2_translated_mbart.csv', type=str)
argparser.add_argument('--sbert_model', default='multi-qa-mpnet-base-dot-v1', type=str)
argparser.add_argument('--save_dir', default='bin/results/ctm', type=str)
argparser.add_argument('--num_topics', default=100, type=int)
argparser.add_argument('--num_epochs', default=200, type=int)
#argparser.add_argument('--test_articles_file', default='test_articles.csv', type=str)
#argparser.add_argument('--test_pairs_file', default='test.csv', type=str)
args = argparser.parse_args()
print("\n" + "-"*5, "Train Combined CTM - monolingual only", "-"*5)
print("data_path:", args.data_path)
print("articles_file:", args.articles_file)
print("sbert_model:", args.sbert_model)
print("save_dir:", args.save_dir)
print("num_topics:", args.num_topics)
print("num_epochs:", args.num_epochs)
print("-"*50 + "\n")
df = pd.read_csv(os.path.join(args.data_path, args.articles_file))
df = df.dropna()
if 'trg_text' in df.columns:
documents_raw = list(df.trg_text)
else:
documents_raw = list(df.text)
print('documents_raw:', len(documents_raw))
# ----- Preprocessing -----
# articles_unproc, articles_proc = prune_vocabulary2(documents_raw)
# text_for_contextual = articles_unproc
# text_for_bow = articles_proc
# preprocess documents
preproc_pipeline = WhiteSpacePreprocessing(documents=documents_raw, vocabulary_size=5000)
preprocessed_docs, unpreprocessed_docs, vocab = preproc_pipeline.preprocess()
text_for_bow = preprocessed_docs
text_for_contextual = unpreprocessed_docs
print('text_for_contextual:', len(text_for_contextual))
print('text_for_bow:', len(text_for_bow))
print('vocab:', len(vocab))
qt = TopicModelDataPreparation(args.sbert_model)
training_dataset = qt.fit(text_for_contextual=text_for_contextual, text_for_bow=text_for_bow)
#print("-"*10, "final vocab size:", len(qt.vocab), "-"*10)
# ----- Training -----
# initialize model
ctm = CombinedTM(bow_size=len(qt.vocab),
contextual_size=768,
n_components=args.num_topics,
num_epochs=args.num_epochs)
# run model
ctm.fit(train_dataset=training_dataset,
save_dir=args.save_dir)
# see topics
ctm.get_topics()
# ----- Inference -----
# load test articles
test_art_file = "test_split_batch2_translated_mbart.csv"
test_path = os.path.join(args.data_path, test_art_file)
test_df = pd.read_csv(test_path)
if 'text' in test_df.columns:
test_articles = list(test_df['text'])
else:
test_articles = list(test_df['trg_text'])
test_ids = list(test_df['id'])
print("Test articles:", len(test_articles))
# process test docs using the same DataPrep pipeline from training
testing_dataset = qt.transform(text_for_contextual=test_articles, text_for_bow=test_articles)
# get document-topic distribution
doc_topics = ctm.get_doc_topic_distribution(testing_dataset, n_samples=50)
print("doc_topics:", doc_topics.shape)
encdf = pd.DataFrame(doc_topics)
encdf['id'] = test_ids
topics_outfile = os.path.join(args.data_path, "combinedCTM_K" + str(args.num_topics) + "_" + args.sbert_model + ".csv")
encdf.to_csv(topics_outfile, index=False)
print("Saved topic distributions to", topics_outfile, "!")
# make topic distributions more sparse and normalise
#doc_topics[doc_topics < 1/num_topics] = 0
#doc_topics = doc_topics/doc_topics.sum(axis=1)[:, np.newaxis]
# compute JSD or cosine sim between topic distributions
test_pairs_file = "test_split_batch2.csv"
test_pairs_path = os.path.join(args.data_path, test_pairs_file)
test_pairs_df = pd.read_csv(test_pairs_path)
pair_id = list(test_pairs_df['pair_id'])
true_scores = list(test_pairs_df['Overall'])
cosine_pred_scores = []
jsd_pred_scores = []
for i in range(len(pair_id)):
id1 = int(pair_id[i].split("_")[0])
id2 = int(pair_id[i].split("_")[1])
if id1 in test_ids and id2 in test_ids:
topics1 = doc_topics[test_ids.index(id1)]
topics2 = doc_topics[test_ids.index(id2)]
jsd = compute_jsd(topics1, topics2)
cosine_dist = distance.cosine(topics1, topics2)
cosine_pred_scores.append(cosine_dist)
jsd_pred_scores.append(jsd)
else:
cosine_pred_scores.append(0.5)
jsd_pred_scores.append(0.5)
# get Pearson-corr between true similarity and predicted
# print("\ntrue_scores:", true_scores)
# print("\ncosine_pred_scores:", cosine_pred_scores)
# print("\njsd_pred_scores:", jsd_pred_scores)
print("\n--- cosine distance ---")
pearson_r, p_val = evaluate_scores(true_scores, cosine_pred_scores)
#print("\n--- JSD ---")
#pearson_r, p_val = evaluate_scores(true_scores, jsd_pred_scores)
|
[
"evaluate.evaluate_scores",
"evaluate.compute_jsd"
] |
[((531, 556), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (554, 556), False, 'import argparse\n'), ((2010, 2080), 'contextualized_topic_models.utils.preprocessing.WhiteSpacePreprocessing', 'WhiteSpacePreprocessing', ([], {'documents': 'documents_raw', 'vocabulary_size': '(5000)'}), '(documents=documents_raw, vocabulary_size=5000)\n', (2033, 2080), False, 'from contextualized_topic_models.utils.preprocessing import WhiteSpacePreprocessing\n'), ((2368, 2411), 'contextualized_topic_models.utils.data_preparation.TopicModelDataPreparation', 'TopicModelDataPreparation', (['args.sbert_model'], {}), '(args.sbert_model)\n', (2393, 2411), False, 'from contextualized_topic_models.utils.data_preparation import TopicModelDataPreparation\n'), ((3012, 3055), 'os.path.join', 'os.path.join', (['args.data_path', 'test_art_file'], {}), '(args.data_path, test_art_file)\n', (3024, 3055), False, 'import os\n'), ((3067, 3089), 'pandas.read_csv', 'pd.read_csv', (['test_path'], {}), '(test_path)\n', (3078, 3089), True, 'import pandas as pd\n'), ((3613, 3637), 'pandas.DataFrame', 'pd.DataFrame', (['doc_topics'], {}), '(doc_topics)\n', (3625, 3637), True, 'import pandas as pd\n'), ((4160, 4205), 'os.path.join', 'os.path.join', (['args.data_path', 'test_pairs_file'], {}), '(args.data_path, test_pairs_file)\n', (4172, 4205), False, 'import os\n'), ((4224, 4252), 'pandas.read_csv', 'pd.read_csv', (['test_pairs_path'], {}), '(test_pairs_path)\n', (4235, 4252), True, 'import pandas as pd\n'), ((5158, 5206), 'evaluate.evaluate_scores', 'evaluate_scores', (['true_scores', 'cosine_pred_scores'], {}), '(true_scores, cosine_pred_scores)\n', (5173, 5206), False, 'from evaluate import evaluate_scores, compute_jsd, compute_kld2\n'), ((1581, 1629), 'os.path.join', 'os.path.join', (['args.data_path', 'args.articles_file'], {}), '(args.data_path, args.articles_file)\n', (1593, 1629), False, 'import os\n'), ((4652, 4681), 'evaluate.compute_jsd', 'compute_jsd', (['topics1', 'topics2'], {}), '(topics1, topics2)\n', (4663, 4681), False, 'from evaluate import evaluate_scores, compute_jsd, compute_kld2\n'), ((4704, 4737), 'scipy.spatial.distance.cosine', 'distance.cosine', (['topics1', 'topics2'], {}), '(topics1, topics2)\n', (4719, 4737), False, 'from scipy.spatial import distance\n')]
|
# import dependencies
import pickle
import matplotlib as mpl
mpl.use('TKAgg')
import matplotlib.pyplot as plt
import numpy as np
import torch
import tqdm
import visdom
import evaluate
# define function
def train(model, train_loader, train_size, val_loader, val_size, criterion,
optimizer, scheduler, epochs, device, model_path, checkpoint,
hist_path, resume, visdom, environment, matplotlib, pbar_file):
'''train model'''
# setup loss and accuracy visualization
if visdom:
viz = visdom.Visdom()
loss_plot, acc_plot = None, None
else:
train_loss, val_loss, train_acc, val_acc = {}, {}, {}, {}
# load checkpoints, if training to be resumed
if resume:
checkpoint_dict = torch.load(checkpoint)
best_val_loss = checkpoint_dict['best_val_loss']
no_improvement = checkpoint_dict['no_improvement']
start_epoch = checkpoint_dict['epoch']
if not visdom:
train_loss = checkpoint_dict['train_loss']
val_loss = checkpoint_dict['val_loss']
train_acc = checkpoint_dict['train_acc']
val_acc = checkpoint_dict['val_acc']
else:
best_val_loss = float('inf')
no_improvement = 0
start_epoch = 1
# train
model.train()
for epoch in range(start_epoch, epochs + 1):
# save checkpoint
torch.save(
{
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
'best_val_loss': best_val_loss,
'no_improvement': no_improvement,
'train_loss': train_loss if not visdom else None,
'val_loss': val_loss if not visdom else None,
'train_acc': train_acc if not visdom else None,
'val_acc': val_acc if not visdom else None
}, checkpoint)
# setup progress bar
desc = "ITERATION - loss: {:.2f}"
pbar = tqdm.tqdm(desc=desc.format(0),
total=len(train_loader),
leave=False,
file=pbar_file,
initial=0)
tqdm.tqdm.write('epoch {} of {}'.format(epoch, epochs), file=pbar_file)
# iterate
batch_idx = 0
running_loss = 0
correct = 0
for batch_idx, data in enumerate(train_loader):
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
# optimize and save stats
optimizer.zero_grad()
outputs = model(inputs)
preds = torch.argmax(outputs, dim=1)
correct += torch.sum(preds == labels).item()
loss = criterion(outputs, labels)
running_loss += loss.item()
loss.backward()
optimizer.step()
# evaluate model performance and invoke learning rate scheduler
if batch_idx == (len(train_loader) - 1):
avg_val_loss, val_correct = evaluate.evaluate(
model, val_loader, val_size, criterion, device, False,
pbar_file)
scheduler.step(metrics=avg_val_loss)
# update progress bar
pbar.desc = desc.format(loss.item())
pbar.update()
# print epoch end losses and accuracies
tqdm.tqdm.write('training loss: {:.4f}, val loss: {:.4f}'.format(
running_loss / (batch_idx + 1), avg_val_loss, file=pbar_file))
tqdm.tqdm.write('training acc: {:.2f}%, val acc: {:.2f}%\n'.format(
(correct * 100) / train_size, (val_correct * 100) / val_size,
file=pbar_file))
# close progress bar
pbar.close()
# plot loss history
if visdom:
if not loss_plot:
loss_plot = viz.line(X=np.array([epoch]),
Y=np.array(
[running_loss / (batch_idx + 1)]),
env=environment,
opts=dict(legend=['train', 'val'],
title='loss hist',
xlabel='epochs',
ylabel='loss'))
else:
viz.line(X=np.array([epoch]),
Y=np.array([running_loss / (batch_idx + 1)]),
env=environment,
win=loss_plot,
name='train',
update='append')
if not acc_plot:
acc_plot = viz.line(X=np.array([epoch]),
Y=np.array([(correct * 100) / train_size]),
env=environment,
opts=dict(legend=['train', 'val'],
title='acc hist',
xlabel='epochs',
ylabel='acc'))
else:
viz.line(X=np.array([epoch]),
Y=np.array([(correct * 100) / train_size]),
env=environment,
win=acc_plot,
name='train',
update='append')
else:
train_loss[epoch] = running_loss / (batch_idx + 1)
val_loss[epoch] = avg_val_loss
train_acc[epoch] = (correct * 100) / train_size
val_acc[epoch] = (val_correct * 100) / val_size
# save model and apply early stopping
if avg_val_loss < best_val_loss:
torch.save(model.state_dict(), model_path)
best_val_loss = avg_val_loss
no_improvement = 0
else:
no_improvement += 1
if no_improvement == 5:
print('applying early stopping')
break
# save training history
if not visdom:
hist = {
'train_loss': train_loss,
'val_loss': val_loss,
'train_acc': train_acc,
'val_acc': val_acc
}
with open(hist_path, 'wb') as hist_file:
pickle.dump(hist, hist_file)
# visualize losses and accuracies
if not visdom and matplotlib:
for subplot in ['loss', 'acc']:
if subplot == 'loss':
plt.subplot(1, 2, 1)
else:
plt.subplot(1, 2, 2)
plt.title(subplot)
plt.xlabel = 'epochs'
plt.ylabel = 'loss' if subplot == 'loss' else 'acc'
train_plot, = plt.plot(train_loss.values() if subplot == 'loss'
else train_acc.values(),
label='train')
val_plot, = plt.plot(
val_loss.values() if subplot == 'loss' else val_acc.values(),
label='val')
plt.legend(handles=[train_plot, val_plot], loc='best')
plt.tight_layout()
plt.show()
|
[
"evaluate.evaluate"
] |
[((62, 78), 'matplotlib.use', 'mpl.use', (['"""TKAgg"""'], {}), "('TKAgg')\n", (69, 78), True, 'import matplotlib as mpl\n'), ((523, 538), 'visdom.Visdom', 'visdom.Visdom', ([], {}), '()\n', (536, 538), False, 'import visdom\n'), ((748, 770), 'torch.load', 'torch.load', (['checkpoint'], {}), '(checkpoint)\n', (758, 770), False, 'import torch\n'), ((7193, 7211), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7209, 7211), True, 'import matplotlib.pyplot as plt\n'), ((7220, 7230), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7228, 7230), True, 'import matplotlib.pyplot as plt\n'), ((2746, 2774), 'torch.argmax', 'torch.argmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (2758, 2774), False, 'import torch\n'), ((6391, 6419), 'pickle.dump', 'pickle.dump', (['hist', 'hist_file'], {}), '(hist, hist_file)\n', (6402, 6419), False, 'import pickle\n'), ((6672, 6690), 'matplotlib.pyplot.title', 'plt.title', (['subplot'], {}), '(subplot)\n', (6681, 6690), True, 'import matplotlib.pyplot as plt\n'), ((7129, 7183), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[train_plot, val_plot]', 'loc': '"""best"""'}), "(handles=[train_plot, val_plot], loc='best')\n", (7139, 7183), True, 'import matplotlib.pyplot as plt\n'), ((3151, 3238), 'evaluate.evaluate', 'evaluate.evaluate', (['model', 'val_loader', 'val_size', 'criterion', 'device', '(False)', 'pbar_file'], {}), '(model, val_loader, val_size, criterion, device, False,\n pbar_file)\n', (3168, 3238), False, 'import evaluate\n'), ((6583, 6603), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (6594, 6603), True, 'import matplotlib.pyplot as plt\n'), ((6638, 6658), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (6649, 6658), True, 'import matplotlib.pyplot as plt\n'), ((2798, 2824), 'torch.sum', 'torch.sum', (['(preds == labels)'], {}), '(preds == labels)\n', (2807, 2824), False, 'import torch\n'), ((3984, 4001), 'numpy.array', 'np.array', (['[epoch]'], {}), '([epoch])\n', (3992, 4001), True, 'import numpy as np\n'), ((4042, 4084), 'numpy.array', 'np.array', (['[running_loss / (batch_idx + 1)]'], {}), '([running_loss / (batch_idx + 1)])\n', (4050, 4084), True, 'import numpy as np\n'), ((4492, 4509), 'numpy.array', 'np.array', (['[epoch]'], {}), '([epoch])\n', (4500, 4509), True, 'import numpy as np\n'), ((4538, 4580), 'numpy.array', 'np.array', (['[running_loss / (batch_idx + 1)]'], {}), '([running_loss / (batch_idx + 1)])\n', (4546, 4580), True, 'import numpy as np\n'), ((4813, 4830), 'numpy.array', 'np.array', (['[epoch]'], {}), '([epoch])\n', (4821, 4830), True, 'import numpy as np\n'), ((4870, 4908), 'numpy.array', 'np.array', (['[correct * 100 / train_size]'], {}), '([correct * 100 / train_size])\n', (4878, 4908), True, 'import numpy as np\n'), ((5269, 5286), 'numpy.array', 'np.array', (['[epoch]'], {}), '([epoch])\n', (5277, 5286), True, 'import numpy as np\n'), ((5315, 5353), 'numpy.array', 'np.array', (['[correct * 100 / train_size]'], {}), '([correct * 100 / train_size])\n', (5323, 5353), True, 'import numpy as np\n')]
|
from typing import Dict, List, Any
import chess
import sys
import time
from evaluate import evaluate_board, move_value, check_end_game
debug_info: Dict[str, Any] = {}
def next_move(depth: int, board: chess.Board, debug=True) -> chess.Move:
"""
What is the next best move?
"""
debug_info.clear()
debug_info["nodes"] = 0
t0 = time.time()
move = minimax_root(depth, board)
debug_info["time"] = time.time() - t0
if debug == True:
print(f">>> {debug_info}", file=sys.stderr)
return move
def get_ordered_moves(board: chess.Board) -> List[chess.Move]:
"""
Get legal moves.
Attempt to sort moves by best to worst.
Use piece values (and positional gains/losses) to weight captures.
"""
end_game = check_end_game(board)
def orderer(move):
return move_value(board, move, end_game)
in_order = sorted(
board.legal_moves, key=orderer, reverse=(board.turn == chess.WHITE)
)
return list(in_order)
def minimax_root(depth: int, board: chess.Board) -> chess.Move:
# White always wants to maximize (and black to minimize)
# the board score according to evaluate_board()
maximize = board.turn == chess.WHITE
best_move = -float("inf")
if not maximize:
best_move = float("inf")
moves = get_ordered_moves(board)
best_move_found = moves[0]
for move in moves:
board.push(move)
# Checking if draw can be claimed at this level, because the threefold repetition check
# can be expensive. This should help the bot avoid a draw if it's not favorable
# https://python-chess.readthedocs.io/en/latest/core.html#chess.Board.can_claim_draw
if board.can_claim_draw():
value = 0.0
else:
value = minimax(depth - 1, board, -float("inf"), float("inf"), not maximize)
board.pop()
if maximize and value >= best_move:
best_move = value
best_move_found = move
elif not maximize and value <= best_move:
best_move = value
best_move_found = move
return best_move_found
def minimax(
depth: int,
board: chess.Board,
alpha: float,
beta: float,
is_maximising_player: bool,
) -> float:
debug_info["nodes"] += 1
if board.is_checkmate():
# The previous move resulted in checkmate
return -float("inf") if is_maximising_player else float("inf")
# When the game is over and it's not a checkmate it's a draw
# In this case, don't evaluate. Just return a neutral result: zero
elif board.is_game_over():
return 0
if depth == 0:
return evaluate_board(board)
if is_maximising_player:
best_move = -float("inf")
moves = get_ordered_moves(board)
for move in moves:
board.push(move)
best_move = max(
best_move,
minimax(depth - 1, board, alpha, beta, not is_maximising_player),
)
board.pop()
alpha = max(alpha, best_move)
if beta <= alpha:
return best_move
return best_move
else:
best_move = float("inf")
moves = get_ordered_moves(board)
for move in moves:
board.push(move)
best_move = min(
best_move,
minimax(depth - 1, board, alpha, beta, not is_maximising_player),
)
board.pop()
beta = min(beta, best_move)
if beta <= alpha:
return best_move
return best_move
|
[
"evaluate.evaluate_board",
"evaluate.move_value",
"evaluate.check_end_game"
] |
[((351, 362), 'time.time', 'time.time', ([], {}), '()\n', (360, 362), False, 'import time\n'), ((767, 788), 'evaluate.check_end_game', 'check_end_game', (['board'], {}), '(board)\n', (781, 788), False, 'from evaluate import evaluate_board, move_value, check_end_game\n'), ((428, 439), 'time.time', 'time.time', ([], {}), '()\n', (437, 439), False, 'import time\n'), ((828, 861), 'evaluate.move_value', 'move_value', (['board', 'move', 'end_game'], {}), '(board, move, end_game)\n', (838, 861), False, 'from evaluate import evaluate_board, move_value, check_end_game\n'), ((2660, 2681), 'evaluate.evaluate_board', 'evaluate_board', (['board'], {}), '(board)\n', (2674, 2681), False, 'from evaluate import evaluate_board, move_value, check_end_game\n')]
|
# ==================第一步: 文件写入raw_path
import argparse
# https://www.openslr.org/resources/93/data_aishell3.tgz 扔迅雷里面很快.
import yaml
from preprocessor import ljspeech, aishell3, libritts
def main(config):
if "LJSpeech" in config["dataset"]:
ljspeech.prepare_align(config)
if "AISHELL3" in config["dataset"]:
aishell3.prepare_align(config)
if "LibriTTS" in config["dataset"]:
libritts.prepare_align(config)
if 0:
if __name__ == "__main__":
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.config='config/AISHELL3/preprocess.yaml'
config = yaml.load(open(args.config, "r"), Loader=yaml.FullLoader)
config['path']['lexicon_path'] = 'lexicon/madarin_lexicon.txt'
config['path']['corpus_path']='AISHELL-3-Sample'
config['path']['raw_path']='raw_path/AISHELL-3-Sample'
# config['path']['lexicon_path']='AISHELL-3-Sample/train/label_train-set.txt'
config['dataset']='AISHELL3'
main(config)
#=====================现在文件都写入了raw_path里面.
#============开启第二步/ mfa111
# https://www.bilibili.com/read/cv7351673/
# ./montreal-forced-aligner/bin/mfa_align raw_data/LJSpeech/ lexicon/librispeech-lexicon.txt english preprocessed_data/LJSpeech
# cmd 运行
# ./montreal-forced-aligner/bin/mfa_align raw_path/AISHELL-3-Sample/ lexicon/pinyin-lexicon-r.txt english preprocessed_data/LJSpeech
# https://montreal-forced-aligner.readthedocs.io/en/latest/pretrained_models.html
#========如果有chinese model就用这个==========我们可以下载到.
#=====================目前使用这个...........
# E:\Users\Administrator\PycharmProjects\fairseq-gec\FastSpeech2\montreal-forced-aligner\bin\mfa_align.exe raw_path/AISHELL-3-Sample/ lexicon/madarin_lexicon.txt mandarin.zip preprocessed_data/atshell/TextGrid
#=====================其他东西.
# 字典下载:https://github.com/Jackiexiao/MTTS/blob/master/misc/mandarin-for-montreal-forced-aligner-pre-trained-model.lexicon
#=====有bug 自己训练一个. E:\Users\Administrator\PycharmProjects\fairseq-gec\FastSpeech2\montreal-forced-aligner\bin\mfa_train_and_align.exe raw_path/AISHELL-3-Sample/ lexicon/madarin_lexicon.txt preprocessed_data/atshell
#=====有bug 自己训练一个. E:\Users\Administrator\PycharmProjects\fairseq-gec\FastSpeech2\montreal-forced-aligner\bin\mfa_align.exe raw_path/AISHELL-3-Sample/SSB0009 lexicon/madarin_lexicon.txt mandarin_pinyin_g2p.zip test1
#==========第三部python3 preprocess.py config/LJSpeech/preprocess.yaml
import argparse
import yaml
from preprocessor.preprocessor import Preprocessor
if 1:
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# parser.add_argument("config", type=str, help="path to preprocess.yaml")
args = parser.parse_args()
args.config='config/AISHELL3/preprocess.yaml'
config = yaml.load(open(args.config, "r"), Loader=yaml.FullLoader)
config['path']['corpus_path']='AISHELL-3-Sample'
config['path']['raw_path']='raw_path/AISHELL-3-Sample'
config['path']['preprocessed_path']='preprocessed_data/atshell'
config['path']['lexicon_path']='lexicon/madarin_lexicon.txt'
config["preprocessing"]["val_size"]=0 # 不要测试数据.
# config['path']['lexicon_path']='AISHELL-3-Sample/train/label_train-set.txt'
config['dataset']='AISHELL3'
preprocessor = Preprocessor(config)
preprocessor.build_from_path()
#=========================最后一步......训练!!!!!!!!!!!!!!!!!!
import argparse
import os
import torch
import yaml
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from utils.model import get_model, get_vocoder, get_param_num
from utils.tools import to_device, log, synth_one_sample
from model import FastSpeech2Loss
from dataset import Dataset
from evaluate import evaluate
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(args, configs):
print("Prepare training ...")
preprocess_config, model_config, train_config = configs
# Get dataset
dataset = Dataset(
"train.txt", preprocess_config, train_config, sort=True, drop_last=True
)
batch_size = train_config["optimizer"]["batch_size"]
group_size = 1 # Set this larger than 1 to enable sorting in Dataset
assert batch_size * group_size < len(dataset)
loader = DataLoader(
dataset,
batch_size=batch_size * group_size,
shuffle=True,
collate_fn=dataset.collate_fn,
)
# Prepare model
model, optimizer = get_model(args, configs, device, train=True)
model = nn.DataParallel(model)
num_param = get_param_num(model)
Loss = FastSpeech2Loss(preprocess_config, model_config).to(device)
print("Number of FastSpeech2 Parameters:", num_param)
# Load vocoder
vocoder = get_vocoder(model_config, device)
# Init logger
for p in train_config["path"].values():
os.makedirs(p, exist_ok=True)
train_log_path = os.path.join(train_config["path"]["log_path"], "train")
val_log_path = os.path.join(train_config["path"]["log_path"], "val")
os.makedirs(train_log_path, exist_ok=True)
os.makedirs(val_log_path, exist_ok=True)
train_logger = SummaryWriter(train_log_path)
val_logger = SummaryWriter(val_log_path)
# Training
step = args.restore_step + 1
epoch = 1
grad_acc_step = train_config["optimizer"]["grad_acc_step"]
grad_clip_thresh = train_config["optimizer"]["grad_clip_thresh"]
total_step = train_config["step"]["total_step"]
log_step = train_config["step"]["log_step"]
save_step = train_config["step"]["save_step"]
synth_step = train_config["step"]["synth_step"]
val_step = train_config["step"]["val_step"]
outer_bar = tqdm(total=total_step, desc="Training", position=0)
outer_bar.n = args.restore_step
outer_bar.update()
while True:
inner_bar = tqdm(total=len(loader), desc="Epoch {}".format(epoch), position=1)
for batchs in loader:
for batch in batchs:
batch = to_device(batch, device)
'''
sample = {
ids, 声音文件名
raw_texts, # 是拼音
speakers,
texts,
text_lens,
max(text_lens),
mels,
mel_lens,
max(mel_lens),
pitches,
energies,
durations,
}
'''
# Forward
output = model(*(batch[2:]))
# Cal Loss
losses = Loss(batch, output)
total_loss = losses[0]
# Backward
total_loss = total_loss / grad_acc_step
total_loss.backward()
if step % grad_acc_step == 0:
# Clipping gradients to avoid gradient explosion
nn.utils.clip_grad_norm_(model.parameters(), grad_clip_thresh)
# optimizer.step(),模型才会更新,而scheduler.step()是对lr进行调整 因为如果不清零,那么使用的这个grad就得同上一个mini-batch有关,这不是我们需要的结果。再回过头来看,我们知道optimizer更新参数空间需要基于反向梯度,因此,当调用optimizer.step()的时候应当是loss.backward()的时候,
# Update weights
optimizer.step_and_update_lr()
optimizer.zero_grad()
if step % log_step == 0:
losses = [l.item() for l in losses]
message1 = "Step {}/{}, ".format(step, total_step)
message2 = "Total Loss: {:.4f}, Mel Loss: {:.4f}, Mel PostNet Loss: {:.4f}, Pitch Loss: {:.4f}, Energy Loss: {:.4f}, Duration Loss: {:.4f}".format(
*losses
)
with open(os.path.join(train_log_path, "log.txt"), "a") as f:
f.write(message1 + message2 + "\n")
outer_bar.write(message1 + message2)
log(train_logger, step, losses=losses)
if step % synth_step == 0:
fig, wav_reconstruction, wav_prediction, tag = synth_one_sample(
batch,
output,
vocoder,
model_config,
preprocess_config,
)
log(
train_logger,
fig=fig,
tag="Training/step_{}_{}".format(step, tag),
)
sampling_rate = preprocess_config["preprocessing"]["audio"][
"sampling_rate"
]
log(
train_logger,
audio=wav_reconstruction,
sampling_rate=sampling_rate,
tag="Training/step_{}_{}_reconstructed".format(step, tag),
)
log(
train_logger,
audio=wav_prediction,
sampling_rate=sampling_rate,
tag="Training/step_{}_{}_synthesized".format(step, tag),
)
if step % val_step == 0:
model.eval()
message = evaluate(model, step, configs, val_logger, vocoder)
with open(os.path.join(val_log_path, "log.txt"), "a") as f:
f.write(message + "\n")
outer_bar.write(message)
model.train()
if step % save_step == 0:
torch.save(
{
"model": model.module.state_dict(),
"optimizer": optimizer._optimizer.state_dict(),
},
os.path.join(
train_config["path"]["ckpt_path"],
"{}.pth.tar".format(step),
),
)
if step == total_step:
quit()
step += 1
outer_bar.update(1)
inner_bar.update(1)
epoch += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--restore_step", type=int, default=0)
parser.add_argument(
"-p",
"--preprocess_config",
type=str,
required=False,
help="path to preprocess.yaml",
)
parser.add_argument(
"-m", "--model_config", type=str, required=False, help="path to model.yaml"
)
parser.add_argument(
"-t", "--train_config", type=str, required=False, help="path to train.yaml"
)
args = parser.parse_args()
args.preprocess_config='config/AISHELL3/preprocess.yaml'
config = yaml.load(open(args.preprocess_config, "r"), Loader=yaml.FullLoader)
config['path']['corpus_path']='AISHELL-3-Sample'
config['path']['raw_path']='raw_path/AISHELL-3-Sample'
config['path']['preprocessed_path']='preprocessed_data/atshell'
# config['path']['lexicon_path']='AISHELL-3-Sample/train/label_train-set.txt'
config['dataset']='AISHELL3'
config['path']['lexicon_path'] = 'lexicon/madarin_lexicon.txt'
args.m='config/AISHELL3/model.yaml '
args.t='config/AISHELL3/train.yaml '
preprocess_config=config
# Read Config
# preprocess_config = yaml.load(
# open(args.preprocess_config, "r"), Loader=yaml.FullLoader
# )
model_config = yaml.load(open(args.m, "r"), Loader=yaml.FullLoader)
train_config = yaml.load(open(args.t, "r"), Loader=yaml.FullLoader)
train_config['optimizer']['batch_size']=2 # 数据集小我就开小.
configs = (preprocess_config, model_config, train_config)
main(args, configs)
|
[
"evaluate.evaluate"
] |
[((4122, 4207), 'dataset.Dataset', 'Dataset', (['"""train.txt"""', 'preprocess_config', 'train_config'], {'sort': '(True)', 'drop_last': '(True)'}), "('train.txt', preprocess_config, train_config, sort=True, drop_last=True\n )\n", (4129, 4207), False, 'from dataset import Dataset\n'), ((4411, 4515), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(batch_size * group_size)', 'shuffle': '(True)', 'collate_fn': 'dataset.collate_fn'}), '(dataset, batch_size=batch_size * group_size, shuffle=True,\n collate_fn=dataset.collate_fn)\n', (4421, 4515), False, 'from torch.utils.data import DataLoader\n'), ((4595, 4639), 'utils.model.get_model', 'get_model', (['args', 'configs', 'device'], {'train': '(True)'}), '(args, configs, device, train=True)\n', (4604, 4639), False, 'from utils.model import get_model, get_vocoder, get_param_num\n'), ((4652, 4674), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (4667, 4674), True, 'import torch.nn as nn\n'), ((4691, 4711), 'utils.model.get_param_num', 'get_param_num', (['model'], {}), '(model)\n', (4704, 4711), False, 'from utils.model import get_model, get_vocoder, get_param_num\n'), ((4875, 4908), 'utils.model.get_vocoder', 'get_vocoder', (['model_config', 'device'], {}), '(model_config, device)\n', (4886, 4908), False, 'from utils.model import get_model, get_vocoder, get_param_num\n'), ((5031, 5086), 'os.path.join', 'os.path.join', (["train_config['path']['log_path']", '"""train"""'], {}), "(train_config['path']['log_path'], 'train')\n", (5043, 5086), False, 'import os\n'), ((5106, 5159), 'os.path.join', 'os.path.join', (["train_config['path']['log_path']", '"""val"""'], {}), "(train_config['path']['log_path'], 'val')\n", (5118, 5159), False, 'import os\n'), ((5164, 5206), 'os.makedirs', 'os.makedirs', (['train_log_path'], {'exist_ok': '(True)'}), '(train_log_path, exist_ok=True)\n', (5175, 5206), False, 'import os\n'), ((5211, 5251), 'os.makedirs', 'os.makedirs', (['val_log_path'], {'exist_ok': '(True)'}), '(val_log_path, exist_ok=True)\n', (5222, 5251), False, 'import os\n'), ((5271, 5300), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['train_log_path'], {}), '(train_log_path)\n', (5284, 5300), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((5318, 5345), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['val_log_path'], {}), '(val_log_path)\n', (5331, 5345), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((5808, 5859), 'tqdm.tqdm', 'tqdm', ([], {'total': 'total_step', 'desc': '"""Training"""', 'position': '(0)'}), "(total=total_step, desc='Training', position=0)\n", (5812, 5859), False, 'from tqdm import tqdm\n'), ((10220, 10245), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10243, 10245), False, 'import argparse\n'), ((271, 301), 'preprocessor.ljspeech.prepare_align', 'ljspeech.prepare_align', (['config'], {}), '(config)\n', (293, 301), False, 'from preprocessor import ljspeech, aishell3, libritts\n'), ((350, 380), 'preprocessor.aishell3.prepare_align', 'aishell3.prepare_align', (['config'], {}), '(config)\n', (372, 380), False, 'from preprocessor import ljspeech, aishell3, libritts\n'), ((429, 459), 'preprocessor.libritts.prepare_align', 'libritts.prepare_align', (['config'], {}), '(config)\n', (451, 459), False, 'from preprocessor import ljspeech, aishell3, libritts\n'), ((515, 540), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (538, 540), False, 'import argparse\n'), ((2632, 2657), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2655, 2657), False, 'import argparse\n'), ((3370, 3390), 'preprocessor.preprocessor.Preprocessor', 'Preprocessor', (['config'], {}), '(config)\n', (3382, 3390), False, 'from preprocessor.preprocessor import Preprocessor\n'), ((3929, 3954), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3952, 3954), False, 'import torch\n'), ((4980, 5009), 'os.makedirs', 'os.makedirs', (['p'], {'exist_ok': '(True)'}), '(p, exist_ok=True)\n', (4991, 5009), False, 'import os\n'), ((4723, 4771), 'model.FastSpeech2Loss', 'FastSpeech2Loss', (['preprocess_config', 'model_config'], {}), '(preprocess_config, model_config)\n', (4738, 4771), False, 'from model import FastSpeech2Loss\n'), ((6110, 6134), 'utils.tools.to_device', 'to_device', (['batch', 'device'], {}), '(batch, device)\n', (6119, 6134), False, 'from utils.tools import to_device, log, synth_one_sample\n'), ((7926, 7964), 'utils.tools.log', 'log', (['train_logger', 'step'], {'losses': 'losses'}), '(train_logger, step, losses=losses)\n', (7929, 7964), False, 'from utils.tools import to_device, log, synth_one_sample\n'), ((8076, 8149), 'utils.tools.synth_one_sample', 'synth_one_sample', (['batch', 'output', 'vocoder', 'model_config', 'preprocess_config'], {}), '(batch, output, vocoder, model_config, preprocess_config)\n', (8092, 8149), False, 'from utils.tools import to_device, log, synth_one_sample\n'), ((9264, 9315), 'evaluate.evaluate', 'evaluate', (['model', 'step', 'configs', 'val_logger', 'vocoder'], {}), '(model, step, configs, val_logger, vocoder)\n', (9272, 9315), False, 'from evaluate import evaluate\n'), ((7735, 7774), 'os.path.join', 'os.path.join', (['train_log_path', '"""log.txt"""'], {}), "(train_log_path, 'log.txt')\n", (7747, 7774), False, 'import os\n'), ((9346, 9383), 'os.path.join', 'os.path.join', (['val_log_path', '"""log.txt"""'], {}), "(val_log_path, 'log.txt')\n", (9358, 9383), False, 'import os\n')]
|
import matplotlib
matplotlib.use('Agg')
import train
import dataset as ds
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
from entity_lstm import EntityLSTM
import utils
import os
import conll_to_brat
import glob
import codecs
import shutil
import time
import copy
import evaluate
import random
import pickle
import brat_to_conll
import numpy as np
import utils_nlp
import distutils.util
import configparser
from pprint import pprint
# http://stackoverflow.com/questions/42217532/tensorflow-version-1-0-0-rc2-on-windows-opkernel-op-bestsplits-device-typ
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
print('NeuroNER version: {0}'.format('1.0-dev'))
print('TensorFlow version: {0}'.format(tf.__version__))
import warnings
warnings.filterwarnings('ignore')
class NeuroNER(object):
argument_default_value = 'argument_default_dummy_value_please_ignore_d41d8cd98f00b204e9800998ecf8427e'
prediction_count = 0
def _create_stats_graph_folder(self, parameters):
# Initialize stats_graph_folder
experiment_timestamp = utils.get_current_time_in_miliseconds()
dataset_name = utils.get_basename_without_extension(parameters['dataset_text_folder'])
model_name = '{0}_{1}'.format(dataset_name, experiment_timestamp)
utils.create_folder_if_not_exists(parameters['output_folder'])
stats_graph_folder = os.path.join(parameters['output_folder'], model_name) # Folder where to save graphs
utils.create_folder_if_not_exists(stats_graph_folder)
return stats_graph_folder, experiment_timestamp
def _load_parameters(self, parameters_filepath, arguments={}, verbose=True):
'''
Load parameters from the ini file if specified, take into account any command line argument, and ensure that each parameter is cast to the correct type.
Command line arguments take precedence over parameters specified in the parameter file.
'''
parameters = {'pretrained_model_folder':'../trained_models/PharmaCoNERModel',
'dataset_text_folder':'../data/PharmaCoNERCorpus',
'character_embedding_dimension':25,
'character_lstm_hidden_state_dimension':25,
'check_for_digits_replaced_with_zeros':True,
'check_for_lowercase':True,
'debug':False,
'dropout_rate':0.5,
'experiment_name':'test',
'freeze_token_embeddings':False,
'gradient_clipping_value':5.0,
'learning_rate':0.005,
'load_only_pretrained_token_embeddings':False,
'load_all_pretrained_token_embeddings':False,
'main_evaluation_mode':'conll',
'maximum_number_of_epochs':100,
'number_of_cpu_threads':8,
'number_of_gpus':0,
'optimizer':'sgd',
'output_folder':'../output',
'patience':10,
'plot_format':'pdf',
'reload_character_embeddings':True,
'reload_character_lstm':True,
'reload_crf':True,
'reload_feedforward':True,
'reload_token_embeddings':True,
'reload_token_lstm':True,
'remap_unknown_tokens_to_unk':True,
'spacylanguage':'en',
'tagging_format':'bioes',
'token_embedding_dimension':100,
'token_lstm_hidden_state_dimension':100,
'token_pretrained_embedding_filepath':'../data/word_vectors/glove-sbwc.i25.vec',
'tokenizer':'spacy',
'train_model':True,
'use_character_lstm':True,
'use_crf':True,
'use_pretrained_model':False,
'verbose':False,
'use_pos': False,
'freeze_pos': False,
'use_gaz': False,
'freeze_gaz': False,
'gaz_filepath': '../data/gazetteers/gazetteer.txt',
'use_aff': False,
'freeze_aff': False,
'aff_filepath': '../data/affixes/affixes.tsv'}
# If a parameter file is specified, load it
if len(parameters_filepath) > 0:
conf_parameters = configparser.ConfigParser()
conf_parameters.read(parameters_filepath)
nested_parameters = utils.convert_configparser_to_dictionary(conf_parameters)
for k,v in nested_parameters.items():
parameters.update(v)
# Ensure that any arguments the specified in the command line overwrite parameters specified in the parameter file
for k,v in arguments.items():
if arguments[k] != arguments['argument_default_value']:
parameters[k] = v
for k,v in parameters.items():
v = str(v)
# If the value is a list delimited with a comma, choose one element at random.
if ',' in v:
v = random.choice(v.split(','))
parameters[k] = v
# Ensure that each parameter is cast to the correct type
if k in ['character_embedding_dimension','character_lstm_hidden_state_dimension','token_embedding_dimension',
'token_lstm_hidden_state_dimension','patience','maximum_number_of_epochs','maximum_training_time','number_of_cpu_threads','number_of_gpus']:
parameters[k] = int(v)
elif k in ['dropout_rate', 'learning_rate', 'gradient_clipping_value']:
parameters[k] = float(v)
elif k in ['remap_unknown_tokens_to_unk', 'use_character_lstm', 'use_crf', 'train_model', 'use_pretrained_model', 'debug', 'verbose',
'reload_character_embeddings', 'reload_character_lstm', 'reload_token_embeddings', 'reload_token_lstm', 'reload_feedforward', 'reload_crf',
'check_for_lowercase', 'check_for_digits_replaced_with_zeros', 'freeze_token_embeddings', 'load_only_pretrained_token_embeddings', 'load_all_pretrained_token_embeddings','use_pos', 'freeze_pos', 'use_gaz', 'freeze_gaz','use_aff','freeze_aff']:
parameters[k] = distutils.util.strtobool(v)
# If loading pretrained model, set the model hyperparameters according to the pretraining parameters
if parameters['use_pretrained_model']:
pretraining_parameters = self._load_parameters(parameters_filepath=os.path.join(parameters['pretrained_model_folder'], 'parameters.ini'), verbose=False)[0]
for name in ['use_character_lstm', 'character_embedding_dimension', 'character_lstm_hidden_state_dimension', 'token_embedding_dimension', 'token_lstm_hidden_state_dimension', 'use_crf', 'use_pos', 'use_gaz','use_aff']:
if parameters[name] != pretraining_parameters[name]:
print('WARNING: parameter {0} was overwritten from {1} to {2} to be consistent with the pretrained model'.format(name, parameters[name], pretraining_parameters[name]))
parameters[name] = pretraining_parameters[name]
if verbose: pprint(parameters)
# Update conf_parameters to reflect final parameter values
conf_parameters = configparser.ConfigParser()
conf_parameters.read(os.path.join('test', 'test-parameters-training.ini'))
parameter_to_section = utils.get_parameter_to_section_of_configparser(conf_parameters)
for k, v in parameters.items():
conf_parameters.set(parameter_to_section[k], k, str(v))
return parameters, conf_parameters
def _get_valid_dataset_filepaths(self, parameters, dataset_types=['train', 'valid', 'test', 'deploy']):
dataset_filepaths = {}
dataset_brat_folders = {}
for dataset_type in dataset_types:
dataset_filepaths[dataset_type] = os.path.join(parameters['dataset_text_folder'], '{0}.txt'.format(dataset_type))
dataset_brat_folders[dataset_type] = os.path.join(parameters['dataset_text_folder'], dataset_type)
dataset_compatible_with_brat_filepath = os.path.join(parameters['dataset_text_folder'], '{0}_compatible_with_brat.txt'.format(dataset_type))
# Conll file exists
if os.path.isfile(dataset_filepaths[dataset_type]) and os.path.getsize(dataset_filepaths[dataset_type]) > 0:
# Brat text files exist
if os.path.exists(dataset_brat_folders[dataset_type]) and len(glob.glob(os.path.join(dataset_brat_folders[dataset_type], '*.txt'))) > 0:
# Check compatibility between conll and brat files
brat_to_conll.check_brat_annotation_and_text_compatibility(dataset_brat_folders[dataset_type])
if os.path.exists(dataset_compatible_with_brat_filepath):
dataset_filepaths[dataset_type] = dataset_compatible_with_brat_filepath
conll_to_brat.check_compatibility_between_conll_and_brat_text(dataset_filepaths[dataset_type], dataset_brat_folders[dataset_type])
# Brat text files do not exist
else:
# Populate brat text and annotation files based on conll file
conll_to_brat.conll_to_brat(dataset_filepaths[dataset_type], dataset_compatible_with_brat_filepath, dataset_brat_folders[dataset_type], dataset_brat_folders[dataset_type])
dataset_filepaths[dataset_type] = dataset_compatible_with_brat_filepath
# Conll file does not exist
else:
# Brat text files exist
if os.path.exists(dataset_brat_folders[dataset_type]) and len(glob.glob(os.path.join(dataset_brat_folders[dataset_type], '*.txt'))) > 0:
dataset_filepath_for_tokenizer = os.path.join(parameters['dataset_text_folder'], '{0}_{1}.txt'.format(dataset_type, parameters['tokenizer']))
if os.path.exists(dataset_filepath_for_tokenizer):
conll_to_brat.check_compatibility_between_conll_and_brat_text(dataset_filepath_for_tokenizer, dataset_brat_folders[dataset_type])
else:
# Populate conll file based on brat files
brat_to_conll.brat_to_conll(dataset_brat_folders[dataset_type], dataset_filepath_for_tokenizer, parameters['tokenizer'], parameters['spacylanguage'])
dataset_filepaths[dataset_type] = dataset_filepath_for_tokenizer
# Brat text files do not exist
else:
del dataset_filepaths[dataset_type]
del dataset_brat_folders[dataset_type]
continue
if parameters['tagging_format'] == 'bioes':
# Generate conll file with BIOES format
bioes_filepath = os.path.join(parameters['dataset_text_folder'], '{0}_bioes.txt'.format(utils.get_basename_without_extension(dataset_filepaths[dataset_type])))
utils_nlp.convert_conll_from_bio_to_bioes(dataset_filepaths[dataset_type], bioes_filepath)
dataset_filepaths[dataset_type] = bioes_filepath
return dataset_filepaths, dataset_brat_folders
def _check_parameter_compatiblity(self, parameters, dataset_filepaths):
# Check mode of operation
if parameters['train_model']:
if 'train' not in dataset_filepaths or 'valid' not in dataset_filepaths:
raise IOError("If train_model is set to True, both train and valid set must exist in the specified dataset folder: {0}".format(parameters['dataset_text_folder']))
elif parameters['use_pretrained_model']:
if 'train' in dataset_filepaths and 'valid' in dataset_filepaths:
print("WARNING: train and valid set exist in the specified dataset folder, but train_model is set to FALSE: {0}".format(parameters['dataset_text_folder']))
if 'test' not in dataset_filepaths and 'deploy' not in dataset_filepaths:
raise IOError("For prediction mode, either test set and deploy set must exist in the specified dataset folder: {0}".format(parameters['dataset_text_folder']))
else: #if not parameters['train_model'] and not parameters['use_pretrained_model']:
raise ValueError('At least one of train_model and use_pretrained_model must be set to True.')
if parameters['use_pretrained_model']:
if all([not parameters[s] for s in ['reload_character_embeddings', 'reload_character_lstm', 'reload_token_embeddings', 'reload_token_lstm', 'reload_feedforward', 'reload_crf']]):
raise ValueError('If use_pretrained_model is set to True, at least one of reload_character_embeddings, reload_character_lstm, reload_token_embeddings, reload_token_lstm, reload_feedforward, reload_crf must be set to True.')
if parameters['gradient_clipping_value'] < 0:
parameters['gradient_clipping_value'] = abs(parameters['gradient_clipping_value'])
def __init__(self,
parameters_filepath=argument_default_value,
pretrained_model_folder=argument_default_value,
dataset_text_folder=argument_default_value,
character_embedding_dimension=argument_default_value,
character_lstm_hidden_state_dimension=argument_default_value,
check_for_digits_replaced_with_zeros=argument_default_value,
check_for_lowercase=argument_default_value,
debug=argument_default_value,
dropout_rate=argument_default_value,
experiment_name=argument_default_value,
freeze_token_embeddings=argument_default_value,
gradient_clipping_value=argument_default_value,
learning_rate=argument_default_value,
load_only_pretrained_token_embeddings=argument_default_value,
load_all_pretrained_token_embeddings=argument_default_value,
main_evaluation_mode=argument_default_value,
maximum_number_of_epochs=argument_default_value,
number_of_cpu_threads=argument_default_value,
number_of_gpus=argument_default_value,
optimizer=argument_default_value,
output_folder=argument_default_value,
patience=argument_default_value,
plot_format=argument_default_value,
reload_character_embeddings=argument_default_value,
reload_character_lstm=argument_default_value,
reload_crf=argument_default_value,
reload_feedforward=argument_default_value,
reload_token_embeddings=argument_default_value,
reload_token_lstm=argument_default_value,
remap_unknown_tokens_to_unk=argument_default_value,
spacylanguage=argument_default_value,
tagging_format=argument_default_value,
token_embedding_dimension=argument_default_value,
token_lstm_hidden_state_dimension=argument_default_value,
token_pretrained_embedding_filepath=argument_default_value,
tokenizer=argument_default_value,
train_model=argument_default_value,
use_character_lstm=argument_default_value,
use_crf=argument_default_value,
use_pretrained_model=argument_default_value,
verbose=argument_default_value,
use_pos = argument_default_value,
freeze_pos = argument_default_value,
use_gaz = argument_default_value,
freeze_gaz =argument_default_value,
gaz_filepath = argument_default_value,
use_aff = argument_default_value,
freeze_aff =argument_default_value,
aff_filepath = argument_default_value,
argument_default_value=argument_default_value):
# Parse arguments
arguments = dict( (k,str(v)) for k,v in locals().items() if k !='self')
# Initialize parameters
parameters, conf_parameters = self._load_parameters(arguments['parameters_filepath'], arguments=arguments)
dataset_filepaths, dataset_brat_folders = self._get_valid_dataset_filepaths(parameters)
self._check_parameter_compatiblity(parameters, dataset_filepaths)
# Load dataset
dataset = ds.Dataset(verbose=parameters['verbose'], debug=parameters['debug'])
token_to_vector = dataset.load_dataset(dataset_filepaths, parameters)
# Launch session
session_conf = tf.ConfigProto(
intra_op_parallelism_threads=parameters['number_of_cpu_threads'],
inter_op_parallelism_threads=parameters['number_of_cpu_threads'],
device_count={'CPU': 1, 'GPU': parameters['number_of_gpus']},
allow_soft_placement=True, # automatically choose an existing and supported device to run the operations in case the specified one doesn't exist
log_device_placement=False
)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Create model and initialize or load pretrained model
### Instantiate the model
model = EntityLSTM(dataset, parameters)
### Initialize the model and restore from pretrained model if needed
sess.run(tf.global_variables_initializer())
if not parameters['use_pretrained_model']:
model.load_pretrained_token_embeddings(sess, dataset, parameters, token_to_vector)
self.transition_params_trained = np.random.rand(len(dataset.unique_labels)+2,len(dataset.unique_labels)+2)
else:
self.transition_params_trained = model.restore_from_pretrained_model(parameters, dataset, sess, token_to_vector=token_to_vector)
del token_to_vector
self.dataset = dataset
self.dataset_brat_folders = dataset_brat_folders
self.dataset_filepaths = dataset_filepaths
self.model = model
self.parameters = parameters
self.conf_parameters = conf_parameters
self.sess = sess
def fit(self):
parameters = self.parameters
conf_parameters = self.conf_parameters
dataset_filepaths = self.dataset_filepaths
dataset = self.dataset
dataset_brat_folders = self.dataset_brat_folders
sess = self.sess
model = self.model
transition_params_trained = self.transition_params_trained
stats_graph_folder, experiment_timestamp = self._create_stats_graph_folder(parameters)
# Initialize and save execution details
start_time = time.time()
results = {}
results['epoch'] = {}
results['execution_details'] = {}
results['execution_details']['train_start'] = start_time
results['execution_details']['time_stamp'] = experiment_timestamp
results['execution_details']['early_stop'] = False
results['execution_details']['keyboard_interrupt'] = False
results['execution_details']['num_epochs'] = 0
results['model_options'] = copy.copy(parameters)
model_folder = os.path.join(stats_graph_folder, 'model')
utils.create_folder_if_not_exists(model_folder)
with open(os.path.join(model_folder, 'parameters.ini'), 'w') as parameters_file:
conf_parameters.write(parameters_file)
try:
pickle.dump(dataset, open(os.path.join(model_folder, 'dataset.pickle'), 'wb'))
except:
print("WARNING: Couldn't dump dataset.pickle due to memory error (probably due to dataset size)")
tensorboard_log_folder = os.path.join(stats_graph_folder, 'tensorboard_logs')
utils.create_folder_if_not_exists(tensorboard_log_folder)
tensorboard_log_folders = {}
for dataset_type in dataset_filepaths.keys():
tensorboard_log_folders[dataset_type] = os.path.join(stats_graph_folder, 'tensorboard_logs', dataset_type)
utils.create_folder_if_not_exists(tensorboard_log_folders[dataset_type])
# Instantiate the writers for TensorBoard
writers = {}
for dataset_type in dataset_filepaths.keys():
writers[dataset_type] = tf.summary.FileWriter(tensorboard_log_folders[dataset_type], graph=sess.graph)
embedding_writer = tf.summary.FileWriter(model_folder) # embedding_writer has to write in model_folder, otherwise TensorBoard won't be able to view embeddings
embeddings_projector_config = projector.ProjectorConfig()
tensorboard_token_embeddings = embeddings_projector_config.embeddings.add()
tensorboard_token_embeddings.tensor_name = model.token_embedding_weights.name
token_list_file_path = os.path.join(model_folder, 'tensorboard_metadata_tokens.tsv')
tensorboard_token_embeddings.metadata_path = os.path.relpath(token_list_file_path, '..')
tensorboard_character_embeddings = embeddings_projector_config.embeddings.add()
tensorboard_character_embeddings.tensor_name = model.character_embedding_weights.name
character_list_file_path = os.path.join(model_folder, 'tensorboard_metadata_characters.tsv')
tensorboard_character_embeddings.metadata_path = os.path.relpath(character_list_file_path, '..')
projector.visualize_embeddings(embedding_writer, embeddings_projector_config)
# Write metadata for TensorBoard embeddings
token_list_file = codecs.open(token_list_file_path,'w', 'UTF-8')
for token_index in range(dataset.vocabulary_size):
token_list_file.write('{0}\n'.format(dataset.index_to_token[token_index]))
token_list_file.close()
character_list_file = codecs.open(character_list_file_path,'w', 'UTF-8')
for character_index in range(dataset.alphabet_size):
if character_index == dataset.PADDING_CHARACTER_INDEX:
character_list_file.write('PADDING\n')
else:
character_list_file.write('{0}\n'.format(dataset.index_to_character[character_index]))
character_list_file.close()
# Start training + evaluation loop. Each iteration corresponds to 1 epoch.
bad_counter = 0 # number of epochs with no improvement on the validation test in terms of F1-score
previous_best_valid_f1_score = 0
epoch_number = -1
try:
while True:
step = 0
epoch_number += 1
print('\nStarting epoch {0}'.format(epoch_number))
epoch_start_time = time.time()
if epoch_number != 0:
# Train model: loop over all sequences of training set with shuffling
sequence_numbers=list(range(len(dataset.token_indices['train'])))
random.shuffle(sequence_numbers)
for sequence_number in sequence_numbers:
transition_params_trained = train.train_step(sess, dataset, sequence_number, model, parameters)
step += 1
if step % 10 == 0:
print('Training {0:.2f}% done'.format(step/len(sequence_numbers)*100), end='\r', flush=True)
epoch_elapsed_training_time = time.time() - epoch_start_time
print('Training completed in {0:.2f} seconds'.format(epoch_elapsed_training_time), flush=True)
y_pred, y_true, output_filepaths = train.predict_labels(sess, model, transition_params_trained, parameters, dataset, epoch_number, stats_graph_folder, dataset_filepaths)
# Evaluate model: save and plot results
evaluate.evaluate_model(results, dataset, y_pred, y_true, stats_graph_folder, epoch_number, epoch_start_time, output_filepaths, parameters)
if parameters['use_pretrained_model'] and not parameters['train_model']:
conll_to_brat.output_brat(output_filepaths, dataset_brat_folders, stats_graph_folder)
break
# Save model
model.saver.save(sess, os.path.join(model_folder, 'model_{0:05d}.ckpt'.format(epoch_number)))
# Save TensorBoard logs
summary = sess.run(model.summary_op, feed_dict=None)
writers['train'].add_summary(summary, epoch_number)
writers['train'].flush()
utils.copytree(writers['train'].get_logdir(), model_folder)
# Early stop
valid_f1_score = results['epoch'][epoch_number][0]['valid']['f1_score']['micro']
if valid_f1_score > previous_best_valid_f1_score:
bad_counter = 0
previous_best_valid_f1_score = valid_f1_score
conll_to_brat.output_brat(output_filepaths, dataset_brat_folders, stats_graph_folder, overwrite=True)
self.transition_params_trained = transition_params_trained
else:
bad_counter += 1
print("The last {0} epochs have not shown improvements on the validation set.".format(bad_counter))
if bad_counter >= parameters['patience']:
print('Early Stop!')
results['execution_details']['early_stop'] = True
break
if epoch_number >= parameters['maximum_number_of_epochs']: break
except KeyboardInterrupt:
results['execution_details']['keyboard_interrupt'] = True
print('Training interrupted')
print('Finishing the experiment')
end_time = time.time()
results['execution_details']['train_duration'] = end_time - start_time
results['execution_details']['train_end'] = end_time
evaluate.save_results(results, stats_graph_folder)
for dataset_type in dataset_filepaths.keys():
writers[dataset_type].close()
def predict(self, text):
self.prediction_count += 1
if self.prediction_count == 1:
self.parameters['dataset_text_folder'] = os.path.join('..', 'data', 'temp')
self.stats_graph_folder, _ = self._create_stats_graph_folder(self.parameters)
# Update the deploy folder, file, and dataset
dataset_type = 'deploy'
### Delete all deployment data
for filepath in glob.glob(os.path.join(self.parameters['dataset_text_folder'], '{0}*'.format(dataset_type))):
if os.path.isdir(filepath):
shutil.rmtree(filepath)
else:
os.remove(filepath)
### Create brat folder and file
dataset_brat_deploy_folder = os.path.join(self.parameters['dataset_text_folder'], dataset_type)
utils.create_folder_if_not_exists(dataset_brat_deploy_folder)
dataset_brat_deploy_filepath = os.path.join(dataset_brat_deploy_folder, 'temp_{0}.txt'.format(str(self.prediction_count).zfill(5)))#self._get_dataset_brat_deploy_filepath(dataset_brat_deploy_folder)
with codecs.open(dataset_brat_deploy_filepath, 'w', 'UTF-8') as f:
f.write(text)
### Update deploy filepaths
dataset_filepaths, dataset_brat_folders = self._get_valid_dataset_filepaths(self.parameters, dataset_types=[dataset_type])
self.dataset_filepaths.update(dataset_filepaths)
self.dataset_brat_folders.update(dataset_brat_folders)
### Update the dataset for the new deploy set
self.dataset.update_dataset(self.dataset_filepaths, [dataset_type])
# Predict labels and output brat
output_filepaths = {}
prediction_output = train.prediction_step(self.sess, self.dataset, dataset_type, self.model, self.transition_params_trained, self.stats_graph_folder, self.prediction_count, self.parameters, self.dataset_filepaths)
_, _, output_filepaths[dataset_type] = prediction_output
conll_to_brat.output_brat(output_filepaths, self.dataset_brat_folders, self.stats_graph_folder, overwrite=True)
# Print and output result
text_filepath = os.path.join(self.stats_graph_folder, 'brat', 'deploy', os.path.basename(dataset_brat_deploy_filepath))
annotation_filepath = os.path.join(self.stats_graph_folder, 'brat', 'deploy', '{0}.ann'.format(utils.get_basename_without_extension(dataset_brat_deploy_filepath)))
text2, entities = brat_to_conll.get_entities_from_brat(text_filepath, annotation_filepath, verbose=True)
assert(text == text2)
return entities
def get_params(self):
return self.parameters
def close(self):
self.__del__()
def __del__(self):
self.sess.close()
|
[
"evaluate.evaluate_model",
"evaluate.save_results"
] |
[((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((753, 786), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (776, 786), False, 'import warnings\n'), ((1071, 1110), 'utils.get_current_time_in_miliseconds', 'utils.get_current_time_in_miliseconds', ([], {}), '()\n', (1108, 1110), False, 'import utils\n'), ((1134, 1205), 'utils.get_basename_without_extension', 'utils.get_basename_without_extension', (["parameters['dataset_text_folder']"], {}), "(parameters['dataset_text_folder'])\n", (1170, 1205), False, 'import utils\n'), ((1288, 1350), 'utils.create_folder_if_not_exists', 'utils.create_folder_if_not_exists', (["parameters['output_folder']"], {}), "(parameters['output_folder'])\n", (1321, 1350), False, 'import utils\n'), ((1380, 1433), 'os.path.join', 'os.path.join', (["parameters['output_folder']", 'model_name'], {}), "(parameters['output_folder'], model_name)\n", (1392, 1433), False, 'import os\n'), ((1472, 1525), 'utils.create_folder_if_not_exists', 'utils.create_folder_if_not_exists', (['stats_graph_folder'], {}), '(stats_graph_folder)\n', (1505, 1525), False, 'import utils\n'), ((7528, 7555), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (7553, 7555), False, 'import configparser\n'), ((7670, 7733), 'utils.get_parameter_to_section_of_configparser', 'utils.get_parameter_to_section_of_configparser', (['conf_parameters'], {}), '(conf_parameters)\n', (7716, 7733), False, 'import utils\n'), ((16887, 16955), 'dataset.Dataset', 'ds.Dataset', ([], {'verbose': "parameters['verbose']", 'debug': "parameters['debug']"}), "(verbose=parameters['verbose'], debug=parameters['debug'])\n", (16897, 16955), True, 'import dataset as ds\n'), ((17091, 17369), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'intra_op_parallelism_threads': "parameters['number_of_cpu_threads']", 'inter_op_parallelism_threads': "parameters['number_of_cpu_threads']", 'device_count': "{'CPU': 1, 'GPU': parameters['number_of_gpus']}", 'allow_soft_placement': '(True)', 'log_device_placement': '(False)'}), "(intra_op_parallelism_threads=parameters[\n 'number_of_cpu_threads'], inter_op_parallelism_threads=parameters[\n 'number_of_cpu_threads'], device_count={'CPU': 1, 'GPU': parameters[\n 'number_of_gpus']}, allow_soft_placement=True, log_device_placement=False)\n", (17105, 17369), True, 'import tensorflow as tf\n'), ((17538, 17569), 'tensorflow.Session', 'tf.Session', ([], {'config': 'session_conf'}), '(config=session_conf)\n', (17548, 17569), True, 'import tensorflow as tf\n'), ((19183, 19194), 'time.time', 'time.time', ([], {}), '()\n', (19192, 19194), False, 'import time\n'), ((19643, 19664), 'copy.copy', 'copy.copy', (['parameters'], {}), '(parameters)\n', (19652, 19664), False, 'import copy\n'), ((19689, 19730), 'os.path.join', 'os.path.join', (['stats_graph_folder', '"""model"""'], {}), "(stats_graph_folder, 'model')\n", (19701, 19730), False, 'import os\n'), ((19739, 19786), 'utils.create_folder_if_not_exists', 'utils.create_folder_if_not_exists', (['model_folder'], {}), '(model_folder)\n', (19772, 19786), False, 'import utils\n'), ((20203, 20255), 'os.path.join', 'os.path.join', (['stats_graph_folder', '"""tensorboard_logs"""'], {}), "(stats_graph_folder, 'tensorboard_logs')\n", (20215, 20255), False, 'import os\n'), ((20264, 20321), 'utils.create_folder_if_not_exists', 'utils.create_folder_if_not_exists', (['tensorboard_log_folder'], {}), '(tensorboard_log_folder)\n', (20297, 20321), False, 'import utils\n'), ((20901, 20936), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['model_folder'], {}), '(model_folder)\n', (20922, 20936), True, 'import tensorflow as tf\n'), ((21080, 21107), 'tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig', 'projector.ProjectorConfig', ([], {}), '()\n', (21105, 21107), False, 'from tensorflow.contrib.tensorboard.plugins import projector\n'), ((21309, 21370), 'os.path.join', 'os.path.join', (['model_folder', '"""tensorboard_metadata_tokens.tsv"""'], {}), "(model_folder, 'tensorboard_metadata_tokens.tsv')\n", (21321, 21370), False, 'import os\n'), ((21424, 21467), 'os.path.relpath', 'os.path.relpath', (['token_list_file_path', '""".."""'], {}), "(token_list_file_path, '..')\n", (21439, 21467), False, 'import os\n'), ((21686, 21751), 'os.path.join', 'os.path.join', (['model_folder', '"""tensorboard_metadata_characters.tsv"""'], {}), "(model_folder, 'tensorboard_metadata_characters.tsv')\n", (21698, 21751), False, 'import os\n'), ((21809, 21856), 'os.path.relpath', 'os.path.relpath', (['character_list_file_path', '""".."""'], {}), "(character_list_file_path, '..')\n", (21824, 21856), False, 'import os\n'), ((21866, 21943), 'tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings', 'projector.visualize_embeddings', (['embedding_writer', 'embeddings_projector_config'], {}), '(embedding_writer, embeddings_projector_config)\n', (21896, 21943), False, 'from tensorflow.contrib.tensorboard.plugins import projector\n'), ((22023, 22070), 'codecs.open', 'codecs.open', (['token_list_file_path', '"""w"""', '"""UTF-8"""'], {}), "(token_list_file_path, 'w', 'UTF-8')\n", (22034, 22070), False, 'import codecs\n'), ((22279, 22330), 'codecs.open', 'codecs.open', (['character_list_file_path', '"""w"""', '"""UTF-8"""'], {}), "(character_list_file_path, 'w', 'UTF-8')\n", (22290, 22330), False, 'import codecs\n'), ((26194, 26205), 'time.time', 'time.time', ([], {}), '()\n', (26203, 26205), False, 'import time\n'), ((26354, 26404), 'evaluate.save_results', 'evaluate.save_results', (['results', 'stats_graph_folder'], {}), '(results, stats_graph_folder)\n', (26375, 26404), False, 'import evaluate\n'), ((27269, 27335), 'os.path.join', 'os.path.join', (["self.parameters['dataset_text_folder']", 'dataset_type'], {}), "(self.parameters['dataset_text_folder'], dataset_type)\n", (27281, 27335), False, 'import os\n'), ((27344, 27405), 'utils.create_folder_if_not_exists', 'utils.create_folder_if_not_exists', (['dataset_brat_deploy_folder'], {}), '(dataset_brat_deploy_folder)\n', (27377, 27405), False, 'import utils\n'), ((28248, 28450), 'train.prediction_step', 'train.prediction_step', (['self.sess', 'self.dataset', 'dataset_type', 'self.model', 'self.transition_params_trained', 'self.stats_graph_folder', 'self.prediction_count', 'self.parameters', 'self.dataset_filepaths'], {}), '(self.sess, self.dataset, dataset_type, self.model,\n self.transition_params_trained, self.stats_graph_folder, self.\n prediction_count, self.parameters, self.dataset_filepaths)\n', (28269, 28450), False, 'import train\n'), ((28515, 28631), 'conll_to_brat.output_brat', 'conll_to_brat.output_brat', (['output_filepaths', 'self.dataset_brat_folders', 'self.stats_graph_folder'], {'overwrite': '(True)'}), '(output_filepaths, self.dataset_brat_folders, self\n .stats_graph_folder, overwrite=True)\n', (28540, 28631), False, 'import conll_to_brat\n'), ((28996, 29086), 'brat_to_conll.get_entities_from_brat', 'brat_to_conll.get_entities_from_brat', (['text_filepath', 'annotation_filepath'], {'verbose': '(True)'}), '(text_filepath, annotation_filepath,\n verbose=True)\n', (29032, 29086), False, 'import brat_to_conll\n'), ((4584, 4611), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (4609, 4611), False, 'import configparser\n'), ((4698, 4755), 'utils.convert_configparser_to_dictionary', 'utils.convert_configparser_to_dictionary', (['conf_parameters'], {}), '(conf_parameters)\n', (4738, 4755), False, 'import utils\n'), ((7416, 7434), 'pprint.pprint', 'pprint', (['parameters'], {}), '(parameters)\n', (7422, 7434), False, 'from pprint import pprint\n'), ((7585, 7637), 'os.path.join', 'os.path.join', (['"""test"""', '"""test-parameters-training.ini"""'], {}), "('test', 'test-parameters-training.ini')\n", (7597, 7637), False, 'import os\n'), ((8286, 8347), 'os.path.join', 'os.path.join', (["parameters['dataset_text_folder']", 'dataset_type'], {}), "(parameters['dataset_text_folder'], dataset_type)\n", (8298, 8347), False, 'import os\n'), ((17736, 17767), 'entity_lstm.EntityLSTM', 'EntityLSTM', (['dataset', 'parameters'], {}), '(dataset, parameters)\n', (17746, 17767), False, 'from entity_lstm import EntityLSTM\n'), ((20465, 20531), 'os.path.join', 'os.path.join', (['stats_graph_folder', '"""tensorboard_logs"""', 'dataset_type'], {}), "(stats_graph_folder, 'tensorboard_logs', dataset_type)\n", (20477, 20531), False, 'import os\n'), ((20544, 20616), 'utils.create_folder_if_not_exists', 'utils.create_folder_if_not_exists', (['tensorboard_log_folders[dataset_type]'], {}), '(tensorboard_log_folders[dataset_type])\n', (20577, 20616), False, 'import utils\n'), ((20795, 20873), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['tensorboard_log_folders[dataset_type]'], {'graph': 'sess.graph'}), '(tensorboard_log_folders[dataset_type], graph=sess.graph)\n', (20816, 20873), True, 'import tensorflow as tf\n'), ((26675, 26709), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', '"""temp"""'], {}), "('..', 'data', 'temp')\n", (26687, 26709), False, 'import os\n'), ((27072, 27095), 'os.path.isdir', 'os.path.isdir', (['filepath'], {}), '(filepath)\n', (27085, 27095), False, 'import os\n'), ((27627, 27682), 'codecs.open', 'codecs.open', (['dataset_brat_deploy_filepath', '"""w"""', '"""UTF-8"""'], {}), "(dataset_brat_deploy_filepath, 'w', 'UTF-8')\n", (27638, 27682), False, 'import codecs\n'), ((28750, 28796), 'os.path.basename', 'os.path.basename', (['dataset_brat_deploy_filepath'], {}), '(dataset_brat_deploy_filepath)\n', (28766, 28796), False, 'import os\n'), ((8553, 8600), 'os.path.isfile', 'os.path.isfile', (['dataset_filepaths[dataset_type]'], {}), '(dataset_filepaths[dataset_type])\n', (8567, 8600), False, 'import os\n'), ((11334, 11428), 'utils_nlp.convert_conll_from_bio_to_bioes', 'utils_nlp.convert_conll_from_bio_to_bioes', (['dataset_filepaths[dataset_type]', 'bioes_filepath'], {}), '(dataset_filepaths[dataset_type],\n bioes_filepath)\n', (11375, 11428), False, 'import utils_nlp\n'), ((17870, 17903), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (17901, 17903), True, 'import tensorflow as tf\n'), ((19805, 19849), 'os.path.join', 'os.path.join', (['model_folder', '"""parameters.ini"""'], {}), "(model_folder, 'parameters.ini')\n", (19817, 19849), False, 'import os\n'), ((23128, 23139), 'time.time', 'time.time', ([], {}), '()\n', (23137, 23139), False, 'import time\n'), ((24028, 24166), 'train.predict_labels', 'train.predict_labels', (['sess', 'model', 'transition_params_trained', 'parameters', 'dataset', 'epoch_number', 'stats_graph_folder', 'dataset_filepaths'], {}), '(sess, model, transition_params_trained, parameters,\n dataset, epoch_number, stats_graph_folder, dataset_filepaths)\n', (24048, 24166), False, 'import train\n'), ((24236, 24383), 'evaluate.evaluate_model', 'evaluate.evaluate_model', (['results', 'dataset', 'y_pred', 'y_true', 'stats_graph_folder', 'epoch_number', 'epoch_start_time', 'output_filepaths', 'parameters'], {}), '(results, dataset, y_pred, y_true,\n stats_graph_folder, epoch_number, epoch_start_time, output_filepaths,\n parameters)\n', (24259, 24383), False, 'import evaluate\n'), ((27114, 27137), 'shutil.rmtree', 'shutil.rmtree', (['filepath'], {}), '(filepath)\n', (27127, 27137), False, 'import shutil\n'), ((27172, 27191), 'os.remove', 'os.remove', (['filepath'], {}), '(filepath)\n', (27181, 27191), False, 'import os\n'), ((28901, 28967), 'utils.get_basename_without_extension', 'utils.get_basename_without_extension', (['dataset_brat_deploy_filepath'], {}), '(dataset_brat_deploy_filepath)\n', (28937, 28967), False, 'import utils\n'), ((8605, 8653), 'os.path.getsize', 'os.path.getsize', (['dataset_filepaths[dataset_type]'], {}), '(dataset_filepaths[dataset_type])\n', (8620, 8653), False, 'import os\n'), ((8718, 8768), 'os.path.exists', 'os.path.exists', (['dataset_brat_folders[dataset_type]'], {}), '(dataset_brat_folders[dataset_type])\n', (8732, 8768), False, 'import os\n'), ((8948, 9047), 'brat_to_conll.check_brat_annotation_and_text_compatibility', 'brat_to_conll.check_brat_annotation_and_text_compatibility', (['dataset_brat_folders[dataset_type]'], {}), '(dataset_brat_folders\n [dataset_type])\n', (9006, 9047), False, 'import brat_to_conll\n'), ((9066, 9119), 'os.path.exists', 'os.path.exists', (['dataset_compatible_with_brat_filepath'], {}), '(dataset_compatible_with_brat_filepath)\n', (9080, 9119), False, 'import os\n'), ((9237, 9372), 'conll_to_brat.check_compatibility_between_conll_and_brat_text', 'conll_to_brat.check_compatibility_between_conll_and_brat_text', (['dataset_filepaths[dataset_type]', 'dataset_brat_folders[dataset_type]'], {}), '(dataset_filepaths\n [dataset_type], dataset_brat_folders[dataset_type])\n', (9298, 9372), False, 'import conll_to_brat\n'), ((9549, 9729), 'conll_to_brat.conll_to_brat', 'conll_to_brat.conll_to_brat', (['dataset_filepaths[dataset_type]', 'dataset_compatible_with_brat_filepath', 'dataset_brat_folders[dataset_type]', 'dataset_brat_folders[dataset_type]'], {}), '(dataset_filepaths[dataset_type],\n dataset_compatible_with_brat_filepath, dataset_brat_folders[\n dataset_type], dataset_brat_folders[dataset_type])\n', (9576, 9729), False, 'import conll_to_brat\n'), ((9935, 9985), 'os.path.exists', 'os.path.exists', (['dataset_brat_folders[dataset_type]'], {}), '(dataset_brat_folders[dataset_type])\n', (9949, 9985), False, 'import os\n'), ((10254, 10300), 'os.path.exists', 'os.path.exists', (['dataset_filepath_for_tokenizer'], {}), '(dataset_filepath_for_tokenizer)\n', (10268, 10300), False, 'import os\n'), ((19978, 20022), 'os.path.join', 'os.path.join', (['model_folder', '"""dataset.pickle"""'], {}), "(model_folder, 'dataset.pickle')\n", (19990, 20022), False, 'import os\n'), ((23375, 23407), 'random.shuffle', 'random.shuffle', (['sequence_numbers'], {}), '(sequence_numbers)\n', (23389, 23407), False, 'import random\n'), ((23834, 23845), 'time.time', 'time.time', ([], {}), '()\n', (23843, 23845), False, 'import time\n'), ((24486, 24575), 'conll_to_brat.output_brat', 'conll_to_brat.output_brat', (['output_filepaths', 'dataset_brat_folders', 'stats_graph_folder'], {}), '(output_filepaths, dataset_brat_folders,\n stats_graph_folder)\n', (24511, 24575), False, 'import conll_to_brat\n'), ((25350, 25455), 'conll_to_brat.output_brat', 'conll_to_brat.output_brat', (['output_filepaths', 'dataset_brat_folders', 'stats_graph_folder'], {'overwrite': '(True)'}), '(output_filepaths, dataset_brat_folders,\n stats_graph_folder, overwrite=True)\n', (25375, 25455), False, 'import conll_to_brat\n'), ((6751, 6820), 'os.path.join', 'os.path.join', (["parameters['pretrained_model_folder']", '"""parameters.ini"""'], {}), "(parameters['pretrained_model_folder'], 'parameters.ini')\n", (6763, 6820), False, 'import os\n'), ((10326, 10460), 'conll_to_brat.check_compatibility_between_conll_and_brat_text', 'conll_to_brat.check_compatibility_between_conll_and_brat_text', (['dataset_filepath_for_tokenizer', 'dataset_brat_folders[dataset_type]'], {}), '(\n dataset_filepath_for_tokenizer, dataset_brat_folders[dataset_type])\n', (10387, 10460), False, 'import conll_to_brat\n'), ((10572, 10730), 'brat_to_conll.brat_to_conll', 'brat_to_conll.brat_to_conll', (['dataset_brat_folders[dataset_type]', 'dataset_filepath_for_tokenizer', "parameters['tokenizer']", "parameters['spacylanguage']"], {}), "(dataset_brat_folders[dataset_type],\n dataset_filepath_for_tokenizer, parameters['tokenizer'], parameters[\n 'spacylanguage'])\n", (10599, 10730), False, 'import brat_to_conll\n'), ((11246, 11315), 'utils.get_basename_without_extension', 'utils.get_basename_without_extension', (['dataset_filepaths[dataset_type]'], {}), '(dataset_filepaths[dataset_type])\n', (11282, 11315), False, 'import utils\n'), ((23521, 23588), 'train.train_step', 'train.train_step', (['sess', 'dataset', 'sequence_number', 'model', 'parameters'], {}), '(sess, dataset, sequence_number, model, parameters)\n', (23537, 23588), False, 'import train\n'), ((8787, 8844), 'os.path.join', 'os.path.join', (['dataset_brat_folders[dataset_type]', '"""*.txt"""'], {}), "(dataset_brat_folders[dataset_type], '*.txt')\n", (8799, 8844), False, 'import os\n'), ((10004, 10061), 'os.path.join', 'os.path.join', (['dataset_brat_folders[dataset_type]', '"""*.txt"""'], {}), "(dataset_brat_folders[dataset_type], '*.txt')\n", (10016, 10061), False, 'import os\n')]
|
#run_experiment.py
#Copyright (c) 2020 <NAME> <NAME>
#MIT License
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE
import os
import timeit
import datetime
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader
import torch, torch.nn as nn, torch.nn.functional as F
import torchvision
from torchvision import transforms, models, utils
import evaluate
from load_dataset import custom_datasets
#Set seeds
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
torch.cuda.manual_seed_all(0)
class DukeCTModel(object):
def __init__(self, descriptor, custom_net, custom_net_args,
loss, loss_args, num_epochs, patience, batch_size, device, data_parallel,
use_test_set, task, old_params_dir, dataset_class, dataset_args):
"""Variables:
<descriptor>: string describing the experiment
<custom_net>: class defining a model
<custom_net_args>: dictionary where keys correspond to custom net
input arguments, and values are the desired values
<loss>: 'bce' for binary cross entropy
<loss_args>: arguments to pass to the loss function if any
<num_epochs>: int for the maximum number of epochs to train
<patience>: number of epochs for which loss must fail to improve to
cause early stopping
<batch_size>: int for number of examples per batch
<device>: int specifying which device to use, or 'all' for all devices
<data_parallel>: if True then parallelize across available GPUs.
<use_test_set>: if True, then run model on the test set. If False, use
only the training and validation sets.
<task>:
'train_eval': train and evaluate a new model. 'evaluate' will
always imply use of the validation set. if <use_test_set> is
True, then 'evaluate' also includes calculation of test set
performance for the best validation epoch.
'predict_on_test': load a trained model and make predictions on
the test set using that model.
<old_params_dir>: this is only needed if <task>=='predict_on_test'. This
is the path to the parameters that will be loaded in to the model.
<dataset_class>: CT Dataset class for preprocessing the data
<dataset_args>: arguments for the dataset class specifying how
the data should be prepared."""
self.descriptor = descriptor
self.set_up_results_dirs()
self.custom_net = custom_net
self.custom_net_args = custom_net_args
self.loss = loss
self.loss_args = loss_args
self.num_epochs = num_epochs
self.batch_size = batch_size
print('self.batch_size=',self.batch_size)
#num_workers is number of threads to use for data loading
self.num_workers = int(batch_size*4) #batch_size 1 = num_workers 4. batch_size 2 = num workers 8. batch_size 4 = num_workers 16.
print('self.num_workers=',self.num_workers)
if self.num_workers == 1:
print('Warning: Using only one worker will slow down data loading')
#Set Device and Data Parallelism
if device in [0,1,2,3]: #i.e. if a GPU number was specified:
self.device = torch.device('cuda:'+str(device))
print('using device:',str(self.device),'\ndescriptor: ',self.descriptor)
elif device == 'all':
self.device = torch.device('cuda')
self.data_parallel = data_parallel
if self.data_parallel:
assert device == 'all' #use all devices when running data parallel
#Set Task
self.use_test_set = use_test_set
self.task = task
assert self.task in ['train_eval','predict_on_test']
if self.task == 'predict_on_test':
#overwrite the params dir that was created in the call to
#set_up_results_dirs() with the dir you want to load from
self.params_dir = old_params_dir
#Data and Labels
self.CTDatasetClass = dataset_class
self.dataset_args = dataset_args
#Get label meanings, a list of descriptive strings (list elements must
#be strings found in the column headers of the labels file)
self.set_up_label_meanings(self.dataset_args['label_meanings'])
if self.task == 'train_eval':
self.dataset_train = self.CTDatasetClass(setname = 'train', **self.dataset_args)
self.dataset_valid = self.CTDatasetClass(setname = 'valid', **self.dataset_args)
if self.use_test_set:
self.dataset_test = self.CTDatasetClass(setname = 'test', **self.dataset_args)
#Tracking losses and evaluation results
self.train_loss = np.zeros((self.num_epochs))
self.valid_loss = np.zeros((self.num_epochs))
self.eval_results_valid, self.eval_results_test = evaluate.initialize_evaluation_dfs(self.label_meanings, self.num_epochs)
#For early stopping
self.initial_patience = patience
self.patience_remaining = patience
self.best_valid_epoch = 0
self.min_val_loss = np.inf
#Run everything
self.run_model()
### Methods ###
def set_up_label_meanings(self,label_meanings):
if label_meanings == 'all': #get full list of all available labels
temp = custom_datasets.read_in_labels(self.dataset_args['label_type_ld'], 'valid')
self.label_meanings = temp.columns.values.tolist()
else: #use the label meanings that were passed in
self.label_meanings = label_meanings
print('label meanings ('+str(len(self.label_meanings))+' labels total):',self.label_meanings)
def set_up_results_dirs(self):
if not os.path.isdir('results'):
os.mkdir('results')
self.results_dir = os.path.join('results',datetime.datetime.today().strftime('%Y-%m-%d')+'_'+self.descriptor)
if not os.path.isdir(self.results_dir):
os.mkdir(self.results_dir)
self.params_dir = os.path.join(self.results_dir,'params')
if not os.path.isdir(self.params_dir):
os.mkdir(self.params_dir)
self.backup_dir = os.path.join(self.results_dir,'backup')
if not os.path.isdir(self.backup_dir):
os.mkdir(self.backup_dir)
def run_model(self):
if self.data_parallel:
self.model = nn.DataParallel(self.custom_net(**self.custom_net_args)).to(self.device)
else:
self.model = self.custom_net(**self.custom_net_args).to(self.device)
self.sigmoid = torch.nn.Sigmoid()
self.set_up_loss_function()
momentum = 0.99
print('Running with optimizer lr=1e-3, momentum='+str(round(momentum,2))+' and weight_decay=1e-7')
self.optimizer = torch.optim.SGD(self.model.parameters(), lr = 1e-3, momentum=momentum, weight_decay=1e-7)
train_dataloader = DataLoader(self.dataset_train, batch_size=self.batch_size, shuffle=True, num_workers = self.num_workers)
valid_dataloader = DataLoader(self.dataset_valid, batch_size=self.batch_size, shuffle=False, num_workers = self.num_workers)
if self.task == 'train_eval':
for epoch in range(self.num_epochs):
t0 = timeit.default_timer()
self.train(train_dataloader, epoch)
self.valid(valid_dataloader, epoch)
self.save_evals(epoch)
if self.patience_remaining <= 0:
print('No more patience (',self.initial_patience,') left at epoch',epoch)
print('--> Implementing early stopping. Best epoch was:',self.best_valid_epoch)
break
t1 = timeit.default_timer()
self.back_up_model_every_ten(epoch)
print('Epoch',epoch,'time:',round((t1 - t0)/60.0,2),'minutes')
if self.use_test_set: self.test(DataLoader(self.dataset_test, batch_size=self.batch_size, shuffle=False, num_workers = self.num_workers))
self.save_final_summary()
def set_up_loss_function(self):
if self.loss == 'bce':
self.loss_func = nn.BCEWithLogitsLoss() #includes application of sigmoid for numerical stability
def train(self, dataloader, epoch):
model = self.model.train()
epoch_loss, pred_epoch, gr_truth_epoch, volume_accs_epoch = self.iterate_through_batches(model, dataloader, epoch, training=True)
self.train_loss[epoch] = epoch_loss
self.plot_roc_and_pr_curves('train', epoch, pred_epoch, gr_truth_epoch)
print("{:5s} {:<3d} {:11s} {:.3f}".format('Epoch', epoch, 'Train Loss', epoch_loss))
def valid(self, dataloader, epoch):
model = self.model.eval()
with torch.no_grad():
epoch_loss, pred_epoch, gr_truth_epoch, volume_accs_epoch = self.iterate_through_batches(model, dataloader, epoch, training=False)
self.valid_loss[epoch] = epoch_loss
self.eval_results_valid = evaluate.evaluate_all(self.eval_results_valid, epoch,
self.label_meanings, gr_truth_epoch, pred_epoch)
self.early_stopping_check(epoch, pred_epoch, gr_truth_epoch, volume_accs_epoch)
print("{:5s} {:<3d} {:11s} {:.3f}".format('Epoch', epoch, 'Valid Loss', epoch_loss))
def early_stopping_check(self, epoch, val_pred_epoch, val_gr_truth_epoch, val_volume_accs_epoch):
"""Check whether criteria for early stopping are met and update
counters accordingly"""
val_loss = self.valid_loss[epoch]
if (val_loss < self.min_val_loss) or epoch==0: #then save parameters
self.min_val_loss = val_loss
check_point = {'params': self.model.state_dict(),
'optimizer': self.optimizer.state_dict()}
torch.save(check_point, os.path.join(self.params_dir, self.descriptor))
self.best_valid_epoch = epoch
self.patience_remaining = self.initial_patience
print('model saved, val loss',val_loss)
self.plot_roc_and_pr_curves('valid', epoch, val_pred_epoch, val_gr_truth_epoch)
self.save_all_pred_probs('valid', epoch, val_pred_epoch, val_gr_truth_epoch, val_volume_accs_epoch)
else:
self.patience_remaining -= 1
def back_up_model_every_ten(self, epoch):
"""Back up the model parameters every 10 epochs"""
if epoch % 10 == 0:
check_point = {'params': self.model.state_dict(),
'optimizer': self.optimizer.state_dict()}
torch.save(check_point, os.path.join(self.backup_dir, self.descriptor+'_ep_'+str(epoch)))
def test(self, dataloader):
epoch = self.best_valid_epoch
if self.data_parallel:
model = nn.DataParallel(self.custom_net(**self.custom_net_args)).to(self.device).eval()
else:
model = self.custom_net(**self.custom_net_args).to(self.device).eval()
params_path = os.path.join(self.params_dir,self.descriptor)
print('For test set predictions, loading model params from params_path=',params_path)
check_point = torch.load(params_path)
model.load_state_dict(check_point['params'])
with torch.no_grad():
epoch_loss, pred_epoch, gr_truth_epoch, volume_accs_epoch = self.iterate_through_batches(model, dataloader, epoch, training=False)
self.eval_results_test = evaluate.evaluate_all(self.eval_results_test, epoch,
self.label_meanings, gr_truth_epoch, pred_epoch)
self.plot_roc_and_pr_curves('test', epoch, pred_epoch, gr_truth_epoch)
self.save_all_pred_probs('test', epoch, pred_epoch, gr_truth_epoch, volume_accs_epoch)
print("{:5s} {:<3d} {:11s} {:.3f}".format('Epoch', epoch, 'Test Loss', epoch_loss))
def iterate_through_batches(self, model, dataloader, epoch, training):
epoch_loss = 0
#Initialize numpy arrays for storing results. examples x labels
#Do NOT use concatenation, or else you will have memory fragmentation.
num_examples = len(dataloader.dataset)
num_labels = len(self.label_meanings)
pred_epoch = np.zeros([num_examples,num_labels])
gr_truth_epoch = np.zeros([num_examples,num_labels])
volume_accs_epoch = np.empty(num_examples,dtype='U32') #need to use U32 to allow string of length 32
for batch_idx, batch in enumerate(dataloader):
data, gr_truth = self.move_data_to_device(batch)
self.optimizer.zero_grad()
if training:
out = model(data)
else:
with torch.set_grad_enabled(False):
out = model(data)
loss = self.loss_func(out, gr_truth)
if training:
loss.backward()
self.optimizer.step()
epoch_loss += loss.item()
torch.cuda.empty_cache()
#Save predictions and ground truth across batches
pred = self.sigmoid(out.data).detach().cpu().numpy()
gr_truth = gr_truth.detach().cpu().numpy()
start_row = batch_idx*self.batch_size
stop_row = min(start_row + self.batch_size, num_examples)
pred_epoch[start_row:stop_row,:] = pred #pred_epoch is e.g. [25355,80] and pred is e.g. [1,80] for a batch size of 1
gr_truth_epoch[start_row:stop_row,:] = gr_truth #gr_truth_epoch has same shape as pred_epoch
volume_accs_epoch[start_row:stop_row] = batch['volume_acc'] #volume_accs_epoch stores the volume accessions in the order they were used
#the following line to empty the cache is necessary in order to
#reduce memory usage and avoid OOM error:
torch.cuda.empty_cache()
return epoch_loss, pred_epoch, gr_truth_epoch, volume_accs_epoch
def move_data_to_device(self, batch):
"""Move data and ground truth to device."""
assert self.dataset_args['crop_type'] == 'single'
if self.dataset_args['crop_type'] == 'single':
data = batch['data'].to(self.device)
#Ground truth to device
gr_truth = batch['gr_truth'].to(self.device)
return data, gr_truth
def plot_roc_and_pr_curves(self, setname, epoch, pred_epoch, gr_truth_epoch):
outdir = os.path.join(self.results_dir,'curves')
if not os.path.isdir(outdir):
os.mkdir(outdir)
evaluate.plot_roc_curve_multi_class(label_meanings=self.label_meanings,
y_test=gr_truth_epoch, y_score=pred_epoch,
outdir = outdir, setname = setname, epoch = epoch)
evaluate.plot_pr_curve_multi_class(label_meanings=self.label_meanings,
y_test=gr_truth_epoch, y_score=pred_epoch,
outdir = outdir, setname = setname, epoch = epoch)
def save_all_pred_probs(self, setname, epoch, pred_epoch, gr_truth_epoch, volume_accs_epoch):
outdir = os.path.join(self.results_dir,'pred_probs')
if not os.path.isdir(outdir):
os.mkdir(outdir)
(pd.DataFrame(pred_epoch,columns=self.label_meanings,index=volume_accs_epoch.tolist())).to_csv(os.path.join(outdir, setname+'_predprob_ep'+str(epoch)+'.csv'))
(pd.DataFrame(gr_truth_epoch,columns=self.label_meanings,index=volume_accs_epoch.tolist())).to_csv(os.path.join(outdir, setname+'_grtruth_ep'+str(epoch)+'.csv'))
def save_evals(self, epoch):
evaluate.save(self.eval_results_valid, self.results_dir, self.descriptor+'_valid')
if self.use_test_set: evaluate.save(self.eval_results_test, self.results_dir, self.descriptor+'_test')
evaluate.plot_learning_curves(self.train_loss, self.valid_loss, self.results_dir, self.descriptor)
def save_final_summary(self):
evaluate.save_final_summary(self.eval_results_valid, self.best_valid_epoch, 'valid', self.results_dir)
if self.use_test_set: evaluate.save_final_summary(self.eval_results_test, self.best_valid_epoch, 'test', self.results_dir)
evaluate.clean_up_output_files(self.best_valid_epoch, self.results_dir)
|
[
"evaluate.clean_up_output_files",
"evaluate.evaluate_all",
"evaluate.plot_roc_curve_multi_class",
"evaluate.save",
"evaluate.plot_learning_curves",
"evaluate.save_final_summary",
"evaluate.plot_pr_curve_multi_class",
"evaluate.initialize_evaluation_dfs"
] |
[((1429, 1446), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1443, 1446), True, 'import numpy as np\n'), ((1447, 1467), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (1464, 1467), False, 'import torch, torch.nn as nn, torch.nn.functional as F\n'), ((1468, 1493), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(0)'], {}), '(0)\n', (1490, 1493), False, 'import torch, torch.nn as nn, torch.nn.functional as F\n'), ((1494, 1523), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(0)'], {}), '(0)\n', (1520, 1523), False, 'import torch, torch.nn as nn, torch.nn.functional as F\n'), ((5805, 5830), 'numpy.zeros', 'np.zeros', (['self.num_epochs'], {}), '(self.num_epochs)\n', (5813, 5830), True, 'import numpy as np\n'), ((5859, 5884), 'numpy.zeros', 'np.zeros', (['self.num_epochs'], {}), '(self.num_epochs)\n', (5867, 5884), True, 'import numpy as np\n'), ((5945, 6017), 'evaluate.initialize_evaluation_dfs', 'evaluate.initialize_evaluation_dfs', (['self.label_meanings', 'self.num_epochs'], {}), '(self.label_meanings, self.num_epochs)\n', (5979, 6017), False, 'import evaluate\n'), ((7133, 7173), 'os.path.join', 'os.path.join', (['self.results_dir', '"""params"""'], {}), "(self.results_dir, 'params')\n", (7145, 7173), False, 'import os\n'), ((7284, 7324), 'os.path.join', 'os.path.join', (['self.results_dir', '"""backup"""'], {}), "(self.results_dir, 'backup')\n", (7296, 7324), False, 'import os\n'), ((7690, 7708), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ([], {}), '()\n', (7706, 7708), False, 'import torch, torch.nn as nn, torch.nn.functional as F\n'), ((8036, 8142), 'torch.utils.data.DataLoader', 'DataLoader', (['self.dataset_train'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'num_workers': 'self.num_workers'}), '(self.dataset_train, batch_size=self.batch_size, shuffle=True,\n num_workers=self.num_workers)\n', (8046, 8142), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((8168, 8275), 'torch.utils.data.DataLoader', 'DataLoader', (['self.dataset_valid'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'num_workers': 'self.num_workers'}), '(self.dataset_valid, batch_size=self.batch_size, shuffle=False,\n num_workers=self.num_workers)\n', (8178, 8275), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((10134, 10240), 'evaluate.evaluate_all', 'evaluate.evaluate_all', (['self.eval_results_valid', 'epoch', 'self.label_meanings', 'gr_truth_epoch', 'pred_epoch'], {}), '(self.eval_results_valid, epoch, self.label_meanings,\n gr_truth_epoch, pred_epoch)\n', (10155, 10240), False, 'import evaluate\n'), ((12217, 12263), 'os.path.join', 'os.path.join', (['self.params_dir', 'self.descriptor'], {}), '(self.params_dir, self.descriptor)\n', (12229, 12263), False, 'import os\n'), ((12379, 12402), 'torch.load', 'torch.load', (['params_path'], {}), '(params_path)\n', (12389, 12402), False, 'import torch, torch.nn as nn, torch.nn.functional as F\n'), ((12662, 12767), 'evaluate.evaluate_all', 'evaluate.evaluate_all', (['self.eval_results_test', 'epoch', 'self.label_meanings', 'gr_truth_epoch', 'pred_epoch'], {}), '(self.eval_results_test, epoch, self.label_meanings,\n gr_truth_epoch, pred_epoch)\n', (12683, 12767), False, 'import evaluate\n'), ((13419, 13455), 'numpy.zeros', 'np.zeros', (['[num_examples, num_labels]'], {}), '([num_examples, num_labels])\n', (13427, 13455), True, 'import numpy as np\n'), ((13480, 13516), 'numpy.zeros', 'np.zeros', (['[num_examples, num_labels]'], {}), '([num_examples, num_labels])\n', (13488, 13516), True, 'import numpy as np\n'), ((13544, 13579), 'numpy.empty', 'np.empty', (['num_examples'], {'dtype': '"""U32"""'}), "(num_examples, dtype='U32')\n", (13552, 13579), True, 'import numpy as np\n'), ((15643, 15683), 'os.path.join', 'os.path.join', (['self.results_dir', '"""curves"""'], {}), "(self.results_dir, 'curves')\n", (15655, 15683), False, 'import os\n'), ((15758, 15926), 'evaluate.plot_roc_curve_multi_class', 'evaluate.plot_roc_curve_multi_class', ([], {'label_meanings': 'self.label_meanings', 'y_test': 'gr_truth_epoch', 'y_score': 'pred_epoch', 'outdir': 'outdir', 'setname': 'setname', 'epoch': 'epoch'}), '(label_meanings=self.label_meanings,\n y_test=gr_truth_epoch, y_score=pred_epoch, outdir=outdir, setname=\n setname, epoch=epoch)\n', (15793, 15926), False, 'import evaluate\n'), ((15972, 16139), 'evaluate.plot_pr_curve_multi_class', 'evaluate.plot_pr_curve_multi_class', ([], {'label_meanings': 'self.label_meanings', 'y_test': 'gr_truth_epoch', 'y_score': 'pred_epoch', 'outdir': 'outdir', 'setname': 'setname', 'epoch': 'epoch'}), '(label_meanings=self.label_meanings,\n y_test=gr_truth_epoch, y_score=pred_epoch, outdir=outdir, setname=\n setname, epoch=epoch)\n', (16006, 16139), False, 'import evaluate\n'), ((16297, 16341), 'os.path.join', 'os.path.join', (['self.results_dir', '"""pred_probs"""'], {}), "(self.results_dir, 'pred_probs')\n", (16309, 16341), False, 'import os\n'), ((16795, 16883), 'evaluate.save', 'evaluate.save', (['self.eval_results_valid', 'self.results_dir', "(self.descriptor + '_valid')"], {}), "(self.eval_results_valid, self.results_dir, self.descriptor +\n '_valid')\n", (16808, 16883), False, 'import evaluate\n'), ((16997, 17100), 'evaluate.plot_learning_curves', 'evaluate.plot_learning_curves', (['self.train_loss', 'self.valid_loss', 'self.results_dir', 'self.descriptor'], {}), '(self.train_loss, self.valid_loss, self.\n results_dir, self.descriptor)\n', (17026, 17100), False, 'import evaluate\n'), ((17154, 17260), 'evaluate.save_final_summary', 'evaluate.save_final_summary', (['self.eval_results_valid', 'self.best_valid_epoch', '"""valid"""', 'self.results_dir'], {}), "(self.eval_results_valid, self.best_valid_epoch,\n 'valid', self.results_dir)\n", (17181, 17260), False, 'import evaluate\n'), ((17396, 17467), 'evaluate.clean_up_output_files', 'evaluate.clean_up_output_files', (['self.best_valid_epoch', 'self.results_dir'], {}), '(self.best_valid_epoch, self.results_dir)\n', (17426, 17467), False, 'import evaluate\n'), ((6437, 6512), 'load_dataset.custom_datasets.read_in_labels', 'custom_datasets.read_in_labels', (["self.dataset_args['label_type_ld']", '"""valid"""'], {}), "(self.dataset_args['label_type_ld'], 'valid')\n", (6467, 6512), False, 'from load_dataset import custom_datasets\n'), ((6844, 6868), 'os.path.isdir', 'os.path.isdir', (['"""results"""'], {}), "('results')\n", (6857, 6868), False, 'import os\n'), ((6882, 6901), 'os.mkdir', 'os.mkdir', (['"""results"""'], {}), "('results')\n", (6890, 6901), False, 'import os\n'), ((7035, 7066), 'os.path.isdir', 'os.path.isdir', (['self.results_dir'], {}), '(self.results_dir)\n', (7048, 7066), False, 'import os\n'), ((7080, 7106), 'os.mkdir', 'os.mkdir', (['self.results_dir'], {}), '(self.results_dir)\n', (7088, 7106), False, 'import os\n'), ((7188, 7218), 'os.path.isdir', 'os.path.isdir', (['self.params_dir'], {}), '(self.params_dir)\n', (7201, 7218), False, 'import os\n'), ((7232, 7257), 'os.mkdir', 'os.mkdir', (['self.params_dir'], {}), '(self.params_dir)\n', (7240, 7257), False, 'import os\n'), ((7339, 7369), 'os.path.isdir', 'os.path.isdir', (['self.backup_dir'], {}), '(self.backup_dir)\n', (7352, 7369), False, 'import os\n'), ((7383, 7408), 'os.mkdir', 'os.mkdir', (['self.backup_dir'], {}), '(self.backup_dir)\n', (7391, 7408), False, 'import os\n'), ((9285, 9307), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (9305, 9307), True, 'import torch, torch.nn as nn, torch.nn.functional as F\n'), ((9896, 9911), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9909, 9911), False, 'import torch, torch.nn as nn, torch.nn.functional as F\n'), ((12469, 12484), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12482, 12484), False, 'import torch, torch.nn as nn, torch.nn.functional as F\n'), ((14165, 14189), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (14187, 14189), False, 'import torch, torch.nn as nn, torch.nn.functional as F\n'), ((15055, 15079), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (15077, 15079), False, 'import torch, torch.nn as nn, torch.nn.functional as F\n'), ((15698, 15719), 'os.path.isdir', 'os.path.isdir', (['outdir'], {}), '(outdir)\n', (15711, 15719), False, 'import os\n'), ((15733, 15749), 'os.mkdir', 'os.mkdir', (['outdir'], {}), '(outdir)\n', (15741, 15749), False, 'import os\n'), ((16356, 16377), 'os.path.isdir', 'os.path.isdir', (['outdir'], {}), '(outdir)\n', (16369, 16377), False, 'import os\n'), ((16391, 16407), 'os.mkdir', 'os.mkdir', (['outdir'], {}), '(outdir)\n', (16399, 16407), False, 'import os\n'), ((16908, 16994), 'evaluate.save', 'evaluate.save', (['self.eval_results_test', 'self.results_dir', "(self.descriptor + '_test')"], {}), "(self.eval_results_test, self.results_dir, self.descriptor +\n '_test')\n", (16921, 16994), False, 'import evaluate\n'), ((17287, 17391), 'evaluate.save_final_summary', 'evaluate.save_final_summary', (['self.eval_results_test', 'self.best_valid_epoch', '"""test"""', 'self.results_dir'], {}), "(self.eval_results_test, self.best_valid_epoch,\n 'test', self.results_dir)\n", (17314, 17391), False, 'import evaluate\n'), ((4483, 4503), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4495, 4503), False, 'import torch, torch.nn as nn, torch.nn.functional as F\n'), ((8391, 8413), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (8411, 8413), False, 'import timeit\n'), ((8847, 8869), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (8867, 8869), False, 'import timeit\n'), ((9043, 9149), 'torch.utils.data.DataLoader', 'DataLoader', (['self.dataset_test'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'num_workers': 'self.num_workers'}), '(self.dataset_test, batch_size=self.batch_size, shuffle=False,\n num_workers=self.num_workers)\n', (9053, 9149), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((10996, 11042), 'os.path.join', 'os.path.join', (['self.params_dir', 'self.descriptor'], {}), '(self.params_dir, self.descriptor)\n', (11008, 11042), False, 'import os\n'), ((13887, 13916), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (13909, 13916), False, 'import torch, torch.nn as nn, torch.nn.functional as F\n'), ((6952, 6977), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (6975, 6977), False, 'import datetime\n')]
|
# -*- coding: utf-8 -*-
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import time
import math
import setting
from evaluate import evaluate
from buildVocab import readVocab
import torch
from ..metrics.metric import metricPair
def getMetirc(ref_str, gen_str):
return metricPair(ref_str, gen_str)
def showPlot(points):
plt.figure()
fig, ax = plt.subplots()
# this locator puts ticks at regular intervals
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
plt.savefig(setting.PNG_HOME + time.strftime("/[%Y%m%d %H:%M:%S].png", time.localtime()))
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent+0.01)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
def showAttention(attentions, input_sentence, output_words ):
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
print("atten:", attentions)
cax = ax.matshow(torch.FloatTensor(attentions).numpy(), cmap='bone')
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + input_sentence.split(' ') +
['<EOS>'], rotation=90)
ax.set_yticklabels([''] + output_words)
# Show label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
plt.savefig(setting.PNG_HOME + time.strftime("/[%Y%m%d %H:%M:%S]attention.png", time.localtime()))
def evalDemo(dataSet, lang, CodeStr):
nlVocab, codeVocab = readVocab(lang, dataSet)
encoder = torch.load(setting.MODEL_HOME + "/%s.%s.encoder.pkl" % (dataSet, lang))
decoder = torch.load(setting.MODEL_HOME + "/%s.%s.decoder.pkl" % (dataSet, lang))
output_words, attentions = evaluate(nlVocab, codeVocab, encoder, decoder, CodeStr)
print('> Code Input: ', CodeStr)
print('< NL generate: ', ' '.join(output_words))
showAttention(CodeStr, output_words, attentions)
def evaluateAndShowAttention(nlVocab, codeVocab, encoder, attn_decoder,input_sentence):
output_words, attentions = evaluate(nlVocab, codeVocab, encoder, attn_decoder, input_sentence)
print('> Code Input: ', input_sentence)
print('< NL generate: ', ' '.join(output_words))
showAttention(input_sentence, output_words, attentions)
|
[
"evaluate.evaluate"
] |
[((49, 63), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (56, 63), True, 'import matplotlib as mpl\n'), ((388, 400), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (398, 400), True, 'import matplotlib.pyplot as plt\n'), ((415, 429), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (427, 429), True, 'import matplotlib.pyplot as plt\n'), ((491, 523), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', ([], {'base': '(0.2)'}), '(base=0.2)\n', (513, 523), True, 'import matplotlib.ticker as ticker\n'), ((564, 580), 'matplotlib.pyplot.plot', 'plt.plot', (['points'], {}), '(points)\n', (572, 580), True, 'import matplotlib.pyplot as plt\n'), ((702, 720), 'math.floor', 'math.floor', (['(s / 60)'], {}), '(s / 60)\n', (712, 720), False, 'import math\n'), ((810, 821), 'time.time', 'time.time', ([], {}), '()\n', (819, 821), False, 'import time\n'), ((1048, 1060), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1058, 1060), True, 'import matplotlib.pyplot as plt\n'), ((1539, 1549), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1547, 1549), True, 'import matplotlib.pyplot as plt\n'), ((1718, 1742), 'buildVocab.readVocab', 'readVocab', (['lang', 'dataSet'], {}), '(lang, dataSet)\n', (1727, 1742), False, 'from buildVocab import readVocab\n'), ((1757, 1828), 'torch.load', 'torch.load', (["(setting.MODEL_HOME + '/%s.%s.encoder.pkl' % (dataSet, lang))"], {}), "(setting.MODEL_HOME + '/%s.%s.encoder.pkl' % (dataSet, lang))\n", (1767, 1828), False, 'import torch\n'), ((1843, 1914), 'torch.load', 'torch.load', (["(setting.MODEL_HOME + '/%s.%s.decoder.pkl' % (dataSet, lang))"], {}), "(setting.MODEL_HOME + '/%s.%s.decoder.pkl' % (dataSet, lang))\n", (1853, 1914), False, 'import torch\n'), ((1947, 2002), 'evaluate.evaluate', 'evaluate', (['nlVocab', 'codeVocab', 'encoder', 'decoder', 'CodeStr'], {}), '(nlVocab, codeVocab, encoder, decoder, CodeStr)\n', (1955, 2002), False, 'from evaluate import evaluate\n'), ((2267, 2334), 'evaluate.evaluate', 'evaluate', (['nlVocab', 'codeVocab', 'encoder', 'attn_decoder', 'input_sentence'], {}), '(nlVocab, codeVocab, encoder, attn_decoder, input_sentence)\n', (2275, 2334), False, 'from evaluate import evaluate\n'), ((1449, 1474), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(1)'], {}), '(1)\n', (1471, 1474), True, 'import matplotlib.ticker as ticker\n'), ((1507, 1532), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(1)'], {}), '(1)\n', (1529, 1532), True, 'import matplotlib.ticker as ticker\n'), ((656, 672), 'time.localtime', 'time.localtime', ([], {}), '()\n', (670, 672), False, 'import time\n'), ((1144, 1173), 'torch.FloatTensor', 'torch.FloatTensor', (['attentions'], {}), '(attentions)\n', (1161, 1173), False, 'import torch\n'), ((1634, 1650), 'time.localtime', 'time.localtime', ([], {}), '()\n', (1648, 1650), False, 'import time\n')]
|
from __future__ import print_function
import torch
import argparse
import sys, os
import os.path as osp
import h5py
from train_utils import save_model_epoch
from models import DSN
from train_eval import train
from train_utils import Logger, read_json, weights_init
from evaluate import evaluate
parser = argparse.ArgumentParser("Pytorch code for ultrasound video summarization using reinforcement learning")
parser.add_argument('-s', '--split', type=str, required=False, help="path to split file",
default="../datasets/us_dataset/splits_50.json")
parser.add_argument('--split-id', type=int, default=0, help="split index (default: 0)")
parser.add_argument('-g', '--gtpath', type=str, required=False, help="path to txt gtscores",
default="../datasets/us_dataset/gt_scores/")
parser.add_argument('--train-model', type=str, default='unsup', choices=['sup', 'unsup'], help="(training model)")
parser.add_argument('--reward-type', type=str, default='Rall', choices=['Rdet', 'RrepRdet', 'RdivRdet', 'Rall'],
help="Reward type (default: Rdiv)")
parser.add_argument('--comb-score', action='store_false', help="whether to combine sononet detection scores")
parser.add_argument('--lr', type=float, default=1e-4, help="learning rate (default: 1e-05)")
parser.add_argument('--proportion', type=float, default=0.15, help="proportion(default: 0.15)")
parser.add_argument('--hidden-dim', type=int, default=256, help="hidden unit dimension of DSN (default: 256)")
parser.add_argument('--num-layers', type=int, default=1, help="number of RNN layers (default: 1)")
parser.add_argument('--rnn-cell', type=str, default='lstm', help="RNN cell type (default: lstm)")
# Optimization options
parser.add_argument('--weight-decay', type=float, default=1e-05, help="weight decay rate (default: 1e-05)")
parser.add_argument('--max-epoch', type=int, default=300, help="maximum epoch for training (default: 60)")
parser.add_argument('--stepsize', type=int, default=60, help="how many steps to decay learning rate (default: 30)")
parser.add_argument('--gamma', type=float, default=0.5, help="learning rate decay (default: 0.5)")
parser.add_argument('--num-episode', type=int, default=5, help="number of episodes (default: 5)")
parser.add_argument('--beta', type=float, default=0.1, help="weight for summary length penalty term (default: 0.01)")
# Misc
parser.add_argument('--seed', type=int, default=1, help="random seed (default: 1)")
parser.add_argument('--gpu', type=str, default='0', help="which gpu devices to use")
parser.add_argument('--save-dir', type=str, default='../output/', help="path to save output (default: 'log')")
parser.add_argument('--resume', type=str, default='', help="path to resume file")
parser.add_argument('--verbose', action='store_true', help="whether to show detailed test results")
args = parser.parse_args()
torch.manual_seed(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
use_gpu = torch.cuda.is_available()
if __name__ == '__main__':
BASE_DATA_PATH = '../datasets/us_dataset/h5_files/'
dataset = {}
# load the video data ( features saved in .h5 format)
print("Loading all dataset: ")
for subj in os.listdir(BASE_DATA_PATH):
h5data_path = os.path.join(BASE_DATA_PATH, subj)
if not os.path.exists(h5data_path):
print("The dataset for subj %s doesn't exist. Skipping..." % subj)
continue
dataset[subj[:10]] = h5py.File(h5data_path, 'r')
splits = read_json(args.split)
split = splits[args.split_id]
num_train_vids = len(split['train_keys'])
num_test_vids = len(split['test_keys'])
specif_path = 'train_' + str(args.split_id) + '_' + args.reward_type
args.save_path = osp.join(args.save_dir, args.train_model, 'split' + str(args.split_id))
sys.stdout = Logger(osp.join(args.save_path, 'log_' + args.reward_type + str(args.lr)
+ args.train_model + '.txt'))
model = DSN(in_dim=64, hid_dim=args.hidden_dim, num_layers=args.num_layers, cell=args.rnn_cell)
model.apply(weights_init)
print("Model size: {:.5f}M".format(sum(p.numel() for p in model.parameters()) / 1000000.0))
print(" ========== \nArgs:{} \n========== ".format(args))
args.train_keys = split['train_keys']
args.test_keys = split['test_keys']
if args.train_model == 'sup':
print("========Supervised Learning========")
else:
args.use_reward = True
print("========Unsupervised Learning========")
args.demo_h5 = osp.join(args.save_path, 'h5_res' + args.reward_type + str(args.lr) +
args.train_model)
model = train(args, model, dataset)
# Testing
Fscore, Precision, Recall = evaluate(args, model, dataset)
# save model
save_model_epoch(args, model, args.max_epoch)
|
[
"evaluate.evaluate"
] |
[((306, 419), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Pytorch code for ultrasound video summarization using reinforcement learning"""'], {}), "(\n 'Pytorch code for ultrasound video summarization using reinforcement learning'\n )\n", (329, 419), False, 'import argparse\n'), ((2938, 2966), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2955, 2966), False, 'import torch\n'), ((3023, 3048), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3046, 3048), False, 'import torch\n'), ((3262, 3288), 'os.listdir', 'os.listdir', (['BASE_DATA_PATH'], {}), '(BASE_DATA_PATH)\n', (3272, 3288), False, 'import sys, os\n'), ((3563, 3584), 'train_utils.read_json', 'read_json', (['args.split'], {}), '(args.split)\n', (3572, 3584), False, 'from train_utils import Logger, read_json, weights_init\n'), ((4042, 4134), 'models.DSN', 'DSN', ([], {'in_dim': '(64)', 'hid_dim': 'args.hidden_dim', 'num_layers': 'args.num_layers', 'cell': 'args.rnn_cell'}), '(in_dim=64, hid_dim=args.hidden_dim, num_layers=args.num_layers, cell=\n args.rnn_cell)\n', (4045, 4134), False, 'from models import DSN\n'), ((4734, 4761), 'train_eval.train', 'train', (['args', 'model', 'dataset'], {}), '(args, model, dataset)\n', (4739, 4761), False, 'from train_eval import train\n'), ((4809, 4839), 'evaluate.evaluate', 'evaluate', (['args', 'model', 'dataset'], {}), '(args, model, dataset)\n', (4817, 4839), False, 'from evaluate import evaluate\n'), ((4862, 4907), 'train_utils.save_model_epoch', 'save_model_epoch', (['args', 'model', 'args.max_epoch'], {}), '(args, model, args.max_epoch)\n', (4878, 4907), False, 'from train_utils import save_model_epoch\n'), ((3312, 3346), 'os.path.join', 'os.path.join', (['BASE_DATA_PATH', 'subj'], {}), '(BASE_DATA_PATH, subj)\n', (3324, 3346), False, 'import sys, os\n'), ((3521, 3548), 'h5py.File', 'h5py.File', (['h5data_path', '"""r"""'], {}), "(h5data_path, 'r')\n", (3530, 3548), False, 'import h5py\n'), ((3362, 3389), 'os.path.exists', 'os.path.exists', (['h5data_path'], {}), '(h5data_path)\n', (3376, 3389), False, 'import sys, os\n')]
|
from agents import *
from models import *
import copy
from evaluate import evaluate
import multiprocessing as mp
from itertools import product
#
class Tuner():
def __init__(self, model, algorithm, params_dict, passive=True):
self.safety = []
self.efficiency = []
self.collision_cnt = []
self.result = []
self.model = model
self.algorithm = algorithm
self.params = list(params_dict.keys())
self.params_range = list(params_dict.values())
#dT will be reset when evaluating by simulate_data
self.robot = eval(model + '(' + algorithm + '(), 0.02)')
self.param_combs = list()
self.robots = list()
self.passive = passive
def dfs(self, idx, param_str, param_set, robot):
if idx == len(self.params):
self.param_combs.append(param_str)
self.robots.append(robot)
return
for p in self.params_range[idx]:
p = round(p, 3)
param_set[self.params[idx]] = p
new_robot = copy.deepcopy(robot)
exec('new_robot.agent.'+self.params[idx]+' = '+str(p))
self.dfs(idx+1, param_str+self.params[idx]+'='+str(p)+'__', param_set, new_robot)
def processInput(self, param_str, robot):
score = evaluate(self.model, self.algorithm, False, robot, param_str[:-2], self.passive)
# self.safety.append(-score['safety'])
# self.efficiency.append(score['efficiency'])
# self.collision_cnt.append(score['collision_cnt'])
# print(-score['safety'])
return (score['safety'], score['efficiency'], score['collision_cnt'], param_str)
def tune(self):
self.dfs(0, '', dict(), self.robot)
num_cores = mp.cpu_count()
pool = mp.Pool(num_cores)
self.result = pool.starmap(self.processInput, zip(self.param_combs, self.robots))
return self.result
|
[
"evaluate.evaluate"
] |
[((1327, 1412), 'evaluate.evaluate', 'evaluate', (['self.model', 'self.algorithm', '(False)', 'robot', 'param_str[:-2]', 'self.passive'], {}), '(self.model, self.algorithm, False, robot, param_str[:-2], self.passive\n )\n', (1335, 1412), False, 'from evaluate import evaluate\n'), ((1811, 1825), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (1823, 1825), True, 'import multiprocessing as mp\n'), ((1841, 1859), 'multiprocessing.Pool', 'mp.Pool', (['num_cores'], {}), '(num_cores)\n', (1848, 1859), True, 'import multiprocessing as mp\n'), ((1074, 1094), 'copy.deepcopy', 'copy.deepcopy', (['robot'], {}), '(robot)\n', (1087, 1094), False, 'import copy\n')]
|
# Code adapted from https://github.com/guoyang9/NCF
import os
import time
import argparse
import numpy as np
import torch
import torch.nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch.utils.tensorboard import SummaryWriter
import model
import evaluate
import data_utils
import adaptdl
import adaptdl.torch as adl
import adaptdl.env
import os.path
parser = argparse.ArgumentParser()
parser.add_argument("--lr",
type=float,
default=0.001,
help="learning rate")
parser.add_argument("--dropout",
type=float,
default=0.1,
help="dropout rate")
parser.add_argument("--batch_size",
type=int,
default=256,
help="batch size for training")
parser.add_argument("--epochs",
type=int,
default=20,
help="training epoches")
parser.add_argument("--top_k",
type=int,
default=10,
help="compute metrics@top_k")
parser.add_argument("--factor_num",
type=int,
default=32,
help="predictive factors numbers in the model")
parser.add_argument("--num_layers",
type=int,
default=3,
help="number of layers in MLP model")
parser.add_argument("--num_ng",
type=int,
default=4,
help="sample negative items for training")
parser.add_argument("--test_num_ng",
type=int,
default=99,
help="sample part of negative items for testing")
parser.add_argument("--out",
default=True,
help="save model or not")
parser.add_argument("--gpu",
type=str,
default="0",
help="gpu card ID")
parser.add_argument("--autoscale-bsz",
dest='autoscale_bsz',
default=False,
action='store_true',
help="Use AdaptDL batchsize autoscaling")
parser.add_argument("--gradient-accumulation",
dest='gradient_accumulation',
default=False,
action='store_true',
help="Use AdaptDL batchsize autoscaling")
parser.add_argument("--dataset",
type=str,
choices=['ml-1m', 'pinterest-20'],
default="ml-1m")
parser.add_argument("--model-type",
dest="model_type",
type=str,
choices=['MLP', 'GMF', 'NeuMF-end', 'NeuMF-pre'],
default="NeuMF-end")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
cudnn.benchmark = True
dataset = args.dataset
model_type = args.model_type
# paths
main_path = adaptdl.env.share_path()
train_rating = os.path.join(main_path, '{}.train.rating'.format(dataset))
test_rating = os.path.join(main_path, '{}.test.rating'.format(dataset))
test_negative = os.path.join(main_path, '{}.test.negative'.format(dataset))
model_path = os.path.join(main_path, 'models')
GMF_model_path = os.path.join(model_path, 'GMF.pth')
MLP_model_path = os.path.join(model_path, 'MLP.pth')
NeuMF_model_path = os.path.join(model_path, 'NeuMF.pth')
############################## PREPARE DATASET ##########################
train_data, test_data, user_num, item_num, train_mat = \
data_utils.load_all(main_path, train_rating, test_negative, dataset)
# construct the train and test datasets
train_dataset = data_utils.NCFData(
train_data, item_num, train_mat, args.num_ng, True)
test_dataset = data_utils.NCFData(
test_data, item_num, train_mat, 0, False)
train_loader = adl.AdaptiveDataLoader(
train_dataset,
batch_size=args.batch_size, shuffle=True, num_workers=4, drop_last=True)
test_loader = adl.AdaptiveDataLoader(
test_dataset,
batch_size=args.test_num_ng+1, shuffle=False, num_workers=0)
if args.autoscale_bsz:
train_loader.autoscale_batch_size(
8192, local_bsz_bounds=(32, 512),
gradient_accumulation=args.gradient_accumulation)
########################### CREATE MODEL #################################
if model_type == 'NeuMF-pre':
assert os.path.exists(GMF_model_path), 'lack of GMF model'
assert os.path.exists(MLP_model_path), 'lack of MLP model'
GMF_model = torch.load(GMF_model_path)
MLP_model = torch.load(MLP_model_path)
else:
GMF_model = None
MLP_model = None
network = model.NCF(user_num, item_num, args.factor_num, args.num_layers,
args.dropout, model_type, GMF_model, MLP_model)
adaptdl.torch.init_process_group("nccl" if torch.cuda.is_available()
else "gloo")
network.cuda()
loss_function = torch.nn.BCEWithLogitsLoss()
if model_type == 'NeuMF-pre':
optimizer = optim.SGD(network.parameters(), lr=args.lr)
else:
optimizer = optim.Adam(network.parameters(), lr=args.lr)
network = adl.AdaptiveDataParallel(network, optimizer,
find_unused_parameters=True)
########################### TRAINING #####################################
count, best_hr = 0, 0
tensorboard_dir = os.path.join(os.getenv("ADAPTDL_TENSORBOARD_LOGDIR", "/tmp"),
adaptdl.env.job_id())
with SummaryWriter(tensorboard_dir) as writer:
for epoch in adl.remaining_epochs_until(args.epochs):
network.train() # Enable dropout (if have).
start_time = time.time()
train_loader.dataset.ng_sample()
gain = 0
for user, item, label in train_loader:
user = user.cuda()
item = item.cuda()
label = label.float().cuda()
network.zero_grad()
prediction = network(user, item)
loss = loss_function(prediction, label)
loss.backward()
optimizer.step()
count += 1
gain = network.gain
batchsize = train_loader.current_batch_size
accumulation_steps = train_loader.accumulation_steps
train_loader.to_tensorboard(writer, epoch, tag_prefix="AdaptDL/Data/")
network.to_tensorboard(writer, epoch, tag_prefix="AdaptDL/Model/")
network.eval()
stats = adl.Accumulator()
HR, NDCG = evaluate.metrics(network, test_loader, args.top_k)
stats['HR'] += HR
stats['replicas'] += 1.0
with stats.synchronized():
writer.add_scalar('Loss/HR', stats['HR'] / stats['replicas'],
epoch)
elapsed_time = time.time() - start_time
print("The time elapse of epoch {:03d}".format(epoch) + " is: " +
time.strftime("%H: %M: %S", time.gmtime(elapsed_time)))
print("HR: {:.3f}\tNDCG: {:.3f}".format(np.mean(HR), np.mean(NDCG)))
if HR > best_hr:
best_hr, best_ndcg, best_epoch = HR, NDCG, epoch
if args.out and adaptdl.env.replica_rank() == 0:
if not os.path.exists(model_path):
os.mkdir(model_path)
torch.save(
network._state.model,
'{}/{}.pth'.format(model_path, model_type))
print("End. Best epoch {:03d}: HR = {:.3f}, NDCG = {:.3f}".format(
best_epoch, best_hr, best_ndcg))
|
[
"evaluate.metrics"
] |
[((391, 416), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (414, 416), False, 'import argparse\n'), ((3009, 3033), 'adaptdl.env.share_path', 'adaptdl.env.share_path', ([], {}), '()\n', (3031, 3033), False, 'import adaptdl\n'), ((3271, 3304), 'os.path.join', 'os.path.join', (['main_path', '"""models"""'], {}), "(main_path, 'models')\n", (3283, 3304), False, 'import os\n'), ((3322, 3357), 'os.path.join', 'os.path.join', (['model_path', '"""GMF.pth"""'], {}), "(model_path, 'GMF.pth')\n", (3334, 3357), False, 'import os\n'), ((3375, 3410), 'os.path.join', 'os.path.join', (['model_path', '"""MLP.pth"""'], {}), "(model_path, 'MLP.pth')\n", (3387, 3410), False, 'import os\n'), ((3430, 3467), 'os.path.join', 'os.path.join', (['model_path', '"""NeuMF.pth"""'], {}), "(model_path, 'NeuMF.pth')\n", (3442, 3467), False, 'import os\n'), ((3604, 3672), 'data_utils.load_all', 'data_utils.load_all', (['main_path', 'train_rating', 'test_negative', 'dataset'], {}), '(main_path, train_rating, test_negative, dataset)\n', (3623, 3672), False, 'import data_utils\n'), ((3730, 3800), 'data_utils.NCFData', 'data_utils.NCFData', (['train_data', 'item_num', 'train_mat', 'args.num_ng', '(True)'], {}), '(train_data, item_num, train_mat, args.num_ng, True)\n', (3748, 3800), False, 'import data_utils\n'), ((3825, 3885), 'data_utils.NCFData', 'data_utils.NCFData', (['test_data', 'item_num', 'train_mat', '(0)', '(False)'], {}), '(test_data, item_num, train_mat, 0, False)\n', (3843, 3885), False, 'import data_utils\n'), ((3910, 4025), 'adaptdl.torch.AdaptiveDataLoader', 'adl.AdaptiveDataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': '(4)', 'drop_last': '(True)'}), '(train_dataset, batch_size=args.batch_size, shuffle=\n True, num_workers=4, drop_last=True)\n', (3932, 4025), True, 'import adaptdl.torch as adl\n'), ((4044, 4147), 'adaptdl.torch.AdaptiveDataLoader', 'adl.AdaptiveDataLoader', (['test_dataset'], {'batch_size': '(args.test_num_ng + 1)', 'shuffle': '(False)', 'num_workers': '(0)'}), '(test_dataset, batch_size=args.test_num_ng + 1,\n shuffle=False, num_workers=0)\n', (4066, 4147), True, 'import adaptdl.torch as adl\n'), ((4691, 4807), 'model.NCF', 'model.NCF', (['user_num', 'item_num', 'args.factor_num', 'args.num_layers', 'args.dropout', 'model_type', 'GMF_model', 'MLP_model'], {}), '(user_num, item_num, args.factor_num, args.num_layers, args.\n dropout, model_type, GMF_model, MLP_model)\n', (4700, 4807), False, 'import model\n'), ((4969, 4997), 'torch.nn.BCEWithLogitsLoss', 'torch.nn.BCEWithLogitsLoss', ([], {}), '()\n', (4995, 4997), False, 'import torch\n'), ((5166, 5239), 'adaptdl.torch.AdaptiveDataParallel', 'adl.AdaptiveDataParallel', (['network', 'optimizer'], {'find_unused_parameters': '(True)'}), '(network, optimizer, find_unused_parameters=True)\n', (5190, 5239), True, 'import adaptdl.torch as adl\n'), ((4431, 4461), 'os.path.exists', 'os.path.exists', (['GMF_model_path'], {}), '(GMF_model_path)\n', (4445, 4461), False, 'import os\n'), ((4494, 4524), 'os.path.exists', 'os.path.exists', (['MLP_model_path'], {}), '(MLP_model_path)\n', (4508, 4524), False, 'import os\n'), ((4562, 4588), 'torch.load', 'torch.load', (['GMF_model_path'], {}), '(GMF_model_path)\n', (4572, 4588), False, 'import torch\n'), ((4605, 4631), 'torch.load', 'torch.load', (['MLP_model_path'], {}), '(MLP_model_path)\n', (4615, 4631), False, 'import torch\n'), ((5403, 5450), 'os.getenv', 'os.getenv', (['"""ADAPTDL_TENSORBOARD_LOGDIR"""', '"""/tmp"""'], {}), "('ADAPTDL_TENSORBOARD_LOGDIR', '/tmp')\n", (5412, 5450), False, 'import os\n'), ((5483, 5503), 'adaptdl.env.job_id', 'adaptdl.env.job_id', ([], {}), '()\n', (5501, 5503), False, 'import adaptdl\n'), ((5510, 5540), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['tensorboard_dir'], {}), '(tensorboard_dir)\n', (5523, 5540), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((5569, 5608), 'adaptdl.torch.remaining_epochs_until', 'adl.remaining_epochs_until', (['args.epochs'], {}), '(args.epochs)\n', (5595, 5608), True, 'import adaptdl.torch as adl\n'), ((4866, 4891), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4889, 4891), False, 'import torch\n'), ((5684, 5695), 'time.time', 'time.time', ([], {}), '()\n', (5693, 5695), False, 'import time\n'), ((6462, 6479), 'adaptdl.torch.Accumulator', 'adl.Accumulator', ([], {}), '()\n', (6477, 6479), True, 'import adaptdl.torch as adl\n'), ((6499, 6549), 'evaluate.metrics', 'evaluate.metrics', (['network', 'test_loader', 'args.top_k'], {}), '(network, test_loader, args.top_k)\n', (6515, 6549), False, 'import evaluate\n'), ((6779, 6790), 'time.time', 'time.time', ([], {}), '()\n', (6788, 6790), False, 'import time\n'), ((6996, 7007), 'numpy.mean', 'np.mean', (['HR'], {}), '(HR)\n', (7003, 7007), True, 'import numpy as np\n'), ((7009, 7022), 'numpy.mean', 'np.mean', (['NDCG'], {}), '(NDCG)\n', (7016, 7022), True, 'import numpy as np\n'), ((6920, 6945), 'time.gmtime', 'time.gmtime', (['elapsed_time'], {}), '(elapsed_time)\n', (6931, 6945), False, 'import time\n'), ((7140, 7166), 'adaptdl.env.replica_rank', 'adaptdl.env.replica_rank', ([], {}), '()\n', (7164, 7166), False, 'import adaptdl\n'), ((7196, 7222), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (7210, 7222), False, 'import os\n'), ((7244, 7264), 'os.mkdir', 'os.mkdir', (['model_path'], {}), '(model_path)\n', (7252, 7264), False, 'import os\n')]
|
#!/usr/bin/env python3
import argparse
import os
import numpy as np
import time
import torch
from torch import optim
from torch import nn
import visual_plt
import utils
import evaluate
from data import get_multitask_experiment
from encoder import Classifier
from vae_models import AutoEncoder
import callbacks as cb
from train import train_cl
from continual_learner import ContinualLearner
parser = argparse.ArgumentParser('./main.py', description='Run individual continual learning experiment.')
parser.add_argument('--seed', type=int, default=0, help='random seed (for each random-module used)')
parser.add_argument('--no-gpus', action='store_false', dest='cuda', help="don't use GPUs")
parser.add_argument('--data-dir', type=str, default='./datasets', dest='d_dir', help="default: %(default)s")
parser.add_argument('--plot-dir', type=str, default='./plots', dest='p_dir', help="default: %(default)s")
parser.add_argument('--results-dir', type=str, default='./results', dest='r_dir', help="default: %(default)s")
# expirimental task parameters.
task_params = parser.add_argument_group('Task Parameters')
task_params.add_argument('--experiment', type=str, default='splitMNIST', choices=['permMNIST', 'splitMNIST'])
task_params.add_argument('--scenario', type=str, default='class', choices=['task', 'domain', 'class'])
task_params.add_argument('--tasks', type=int, default=5, help='number of tasks')
# model architecture parameters
model_params = parser.add_argument_group('Model Parameters')
model_params.add_argument('--fc-layers', type=int, default=3, dest='fc_lay', help="# of fully-connected layers")
model_params.add_argument('--fc-units', type=int, default=400, metavar="N", help="# of units in first fc-layers")
model_params.add_argument('--fc-drop', type=float, default=0., help="dropout probability for fc-units")
model_params.add_argument('--fc-bn', type=str, default="no", help="use batch-norm in the fc-layers (no|yes)")
model_params.add_argument('--fc-nl', type=str, default="relu", choices=["relu", "leakyrelu"])
# training hyperparameters / initialization
train_params = parser.add_argument_group('Training Parameters')
train_params.add_argument('--iters', type=int, default=2000, help="# batches to optimize solver")
train_params.add_argument('--lr', type=float, default=0.001, help="learning rate")
train_params.add_argument('--batch', type=int, default=128, help="batch-size")
train_params.add_argument('--optimizer', type=str, choices=['adam', 'adam_reset', 'sgd'], default='adam')
# "memory replay" parameters
replay_params = parser.add_argument_group('Replay Parameters')
replay_params.add_argument('--feedback', action="store_true", help="equip model with feedback connections")
replay_params.add_argument('--replay', type=str, default='none', choices=['offline', 'exact', 'generative', 'none', 'current'])
replay_params.add_argument('--distill', action='store_true', help="use distillation for replay?")
replay_params.add_argument('--temp', type=float, default=2., dest='temp', help="temperature for distillation")
# -generative model parameters
genmodel_params = parser.add_argument_group('Generative Model Parameters')
genmodel_params.add_argument('--z-dim', type=int, default=100, help='size of latent representation (default: 100)')
genmodel_params.add_argument('--g-fc-lay', type=int, help='[fc_layers] in generator (default: same as classifier)')
genmodel_params.add_argument('--g-fc-uni', type=int, help='[fc_units] in generator (default: same as classifier)')
genmodel_params.add_argument('--g-iters', type=int, help="# batches to train generator (default: as classifier)")
# "memory allocation" parameters
cl_params = parser.add_argument_group('Memory Allocation Parameters')
cl_params.add_argument('--ewc', action='store_true', help="use 'EWC' (Kirkpatrick et al, 2017)")
cl_params.add_argument('--lambda', type=float, default=5000.,dest="ewc_lambda", help="--> EWC: regularisation strength")
cl_params.add_argument('--fisher-n', type=int, help="--> EWC: sample size estimating Fisher Information")
cl_params.add_argument('--online', action='store_true', help="--> EWC: perform 'online EWC'")
cl_params.add_argument('--gamma', type=float, default=1., help="--> EWC: forgetting coefficient (for 'online EWC')")
cl_params.add_argument('--emp-fi', action='store_true', help="--> EWC: estimate FI with provided labels")
cl_params.add_argument('--si', action='store_true', help="use 'Synaptic Intelligence' (Zenke, Poole et al, 2017)")
cl_params.add_argument('--c', type=float, default=0.1, dest="si_c", help="--> SI: regularisation strength")
cl_params.add_argument('--epsilon', type=float, default=0.1, dest="epsilon", help="--> SI: dampening parameter")
cl_params.add_argument('--XdG', type=float, default=0., dest="gating_prop",help="XdG: prop neurons per layer to gate")
# evaluation parameters
eval_params = parser.add_argument_group('Evaluation Parameters')
eval_params.add_argument('--pdf', action='store_true', help="generate pdf with results")
eval_params.add_argument('--visdom', action='store_true', help="use visdom for on-the-fly plots")
eval_params.add_argument('--log-per-task', action='store_true', help="set all visdom-logs to [iters]")
eval_params.add_argument('--loss-log', type=int, default=200, metavar="N", help="# iters after which to plot loss")
eval_params.add_argument('--prec-log', type=int, default=200, metavar="N", help="# iters after which to plot precision")
eval_params.add_argument('--prec-n', type=int, default=1024, help="# samples for evaluating solver's precision")
eval_params.add_argument('--sample-log', type=int, default=500, metavar="N", help="# iters after which to plot samples")
eval_params.add_argument('--sample-n', type=int, default=64, help="# images to show")
def run(args):
# Set default arguments
args.g_fc_lay = args.fc_lay if args.g_fc_lay is None else args.g_fc_lay
args.g_fc_uni = args.fc_units if args.g_fc_uni is None else args.g_fc_uni
args.g_iters = args.iters if args.g_iters is None else args.g_iters
# -if [log_per_task], reset all logs
if args.log_per_task:
args.prec_log = args.iters
args.loss_log = args.iters
args.sample_log = args.iters
# -if XdG is selected but not the incremental task learning scenario, give error
if (not args.scenario=="task") and args.gating_prop>0:
raise ValueError("'XdG' only works for the incremental task learning scenario.")
# -if EWC, SI or XdG is selected together with 'feedback', give error
if args.feedback and (args.ewc or args.si or args.gating_prop>0):
raise NotImplementedError("EWC, SI and XdG are not supported with feedback connections.")
# -if XdG is selected together with replay of any kind, give error
if args.gating_prop>0 and (not args.replay=="none"):
raise NotImplementedError("XdG is not supported with '{}' replay.".format(args.replay))
# -create plots- and results-directories if needed
if not os.path.isdir(args.r_dir):
os.mkdir(args.r_dir)
if args.pdf and not os.path.isdir(args.p_dir):
os.mkdir(args.p_dir)
# Use cuda?
cuda = torch.cuda.is_available() and args.cuda
device = torch.device("cuda" if cuda else "cpu")
# Set random seeds
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if cuda:
torch.cuda.manual_seed(args.seed)
#-------------------------------------------------------------------------------------------------#
#----------------#
#----- DATA -----#
#----------------#
# Prepare data for chosen experiment
(train_datasets, test_datasets), config, classes_per_task = get_multitask_experiment(
name=args.experiment, scenario=args.scenario, tasks=args.tasks, data_dir=args.d_dir,
verbose=True, exception=True if args.seed==0 else False,
)
#-------------------------------------------------------------------------------------------------#
#------------------------------#
#----- MODEL (CLASSIFIER) -----#
#------------------------------#
# Define main model (i.e., classifier, if requested with feedback connections)
if args.feedback:
model = AutoEncoder(
image_size=config['size'], image_channels=config['channels'], classes=config['classes'],
fc_layers=args.fc_lay, fc_units=args.fc_units, z_dim=args.z_dim,
fc_drop=args.fc_drop, fc_bn=True if args.fc_bn=="yes" else False, fc_nl=args.fc_nl,
).to(device)
model.lamda_pl = 1. #--> to make that this VAE is also trained to classify
else:
model = Classifier(
image_size=config['size'], image_channels=config['channels'], classes=config['classes'],
fc_layers=args.fc_lay, fc_units=args.fc_units, fc_drop=args.fc_drop, fc_nl=args.fc_nl,
fc_bn=True if args.fc_bn=="yes" else False, excit_buffer=True if args.gating_prop>0 else False,
).to(device)
# Define optimizer (only include parameters that "requires_grad")
model.optim_list = [{'params': filter(lambda p: p.requires_grad, model.parameters()), 'lr': args.lr}]
model.optim_type = args.optimizer
if model.optim_type in ("adam", "adam_reset"):
model.optimizer = optim.Adam(model.optim_list, betas=(0.9, 0.999))
elif model.optim_type=="sgd":
model.optimizer = optim.SGD(model.optim_list)
else:
raise ValueError("Unrecognized optimizer, '{}' is not currently a valid option".format(args.optimizer))
# Set loss-function for reconstruction
if args.feedback:
model.recon_criterion = nn.BCELoss(size_average=True)
#-------------------------------------------------------------------------------------------------#
#-----------------------------------#
#----- CL-STRATEGY: ALLOCATION -----#
#-----------------------------------#
# Elastic Weight Consolidation (EWC)
if isinstance(model, ContinualLearner):
model.ewc_lambda = args.ewc_lambda if args.ewc else 0
model.fisher_n = args.fisher_n
model.gamma = args.gamma
model.online = args.online
model.emp_FI = args.emp_fi
# Synpatic Intelligence (SI)
if isinstance(model, ContinualLearner):
model.si_c = args.si_c if args.si else 0
model.epsilon = args.epsilon
# XdG: create for every task a "mask" for each hidden fully connected layer
if isinstance(model, ContinualLearner) and args.gating_prop>0:
mask_dict = {}
excit_buffer_list = []
for task_id in range(args.tasks):
mask_dict[task_id+1] = {}
for i in range(model.fcE.layers):
layer = getattr(model.fcE, "fcLayer{}".format(i+1)).linear
if task_id==0:
excit_buffer_list.append(layer.excit_buffer)
n_units = len(layer.excit_buffer)
gated_units = np.random.choice(n_units, size=int(args.gating_prop*n_units), replace=False)
mask_dict[task_id+1][i] = gated_units
model.mask_dict = mask_dict
model.excit_buffer_list = excit_buffer_list
#-------------------------------------------------------------------------------------------------#
#-------------------------------#
#----- CL-STRATEGY: REPLAY -----#
#-------------------------------#
# Use distillation loss (i.e., soft targets) for replayed data? (and set temperature)
model.replay_targets = "soft" if args.distill else "hard"
model.KD_temp = args.temp
# If needed, specify separate model for the generator
train_gen = True if (args.replay=="generative" and not args.feedback) else False
if train_gen:
# -specify architecture
generator = AutoEncoder(
image_size=config['size'], image_channels=config['channels'],
fc_layers=args.g_fc_lay, fc_units=args.g_fc_uni, z_dim=args.z_dim, classes=config['classes'],
fc_drop=args.fc_drop, fc_bn=True if args.fc_bn=="yes" else False, fc_nl=args.fc_nl,
).to(device)
# -set optimizer(s)
generator.optim_list = [{'params': filter(lambda p: p.requires_grad, generator.parameters()), 'lr': args.lr}]
generator.optim_type = args.optimizer
if generator.optim_type in ("adam", "adam_reset"):
generator.optimizer = optim.Adam(generator.optim_list, betas=(0.9, 0.999))
elif generator.optim_type == "sgd":
generator.optimizer = optim.SGD(generator.optim_list)
# -set reconstruction criterion
generator.recon_criterion = nn.BCELoss(size_average=True)
else:
generator = None
#-------------------------------------------------------------------------------------------------#
#---------------------#
#----- REPORTING -----#
#---------------------#
# Get parameter-stamp (and print on screen)
param_stamp = utils.get_param_stamp(
args, model.name, verbose=True, replay=True if (not args.replay=="none") else False,
replay_model_name=generator.name if (args.replay=="generative" and not args.feedback) else None,
)
# Print some model-characteristics on the screen
# -main model
print("\n")
utils.print_model_info(model, title="MAIN MODEL")
# -generator
if generator is not None:
utils.print_model_info(generator, title="GENERATOR")
# Prepare for plotting
# -open pdf
pp = visual_plt.open_pdf("{}/{}.pdf".format(args.p_dir, param_stamp)) if args.pdf else None
# -define [precision_dict] to keep track of performance during training for later plotting
precision_dict = evaluate.initiate_precision_dict(args.tasks)
# -visdom-settings
if args.visdom:
env_name = "{exp}{tasks}-{scenario}".format(exp=args.experiment, tasks=args.tasks, scenario=args.scenario)
graph_name = "{fb}{mode}{syn}{ewc}{XdG}".format(
fb="1M-" if args.feedback else "", mode=args.replay,
syn="-si{}".format(args.si_c) if args.si else "",
ewc="-ewc{}{}".format(args.ewc_lambda, "-O{}".format(args.gamma) if args.online else "") if args.ewc else "",
XdG="" if args.gating_prop==0 else "-XdG{}".format(args.gating_prop)
)
visdom = {'env': env_name, 'graph': graph_name}
else:
visdom = None
#-------------------------------------------------------------------------------------------------#
#---------------------#
#----- CALLBACKS -----#
#---------------------#
# Callbacks for reporting on and visualizing loss
generator_loss_cbs = [
cb._VAE_loss_cb(log=args.loss_log, visdom=visdom, model=model if args.feedback else generator, tasks=args.tasks,
iters_per_task=args.g_iters, replay=False if args.replay=="none" else True)
] if (train_gen or args.feedback) else [None]
solver_loss_cbs = [
cb._solver_loss_cb(log=args.loss_log, visdom=visdom, model=model, tasks=args.tasks,
iters_per_task=args.iters, replay=False if args.replay=="none" else True)
] if (not args.feedback) else [None]
# Callbacks for evaluating and plotting generated / reconstructed samples
sample_cbs = [
cb._sample_cb(log=args.sample_log, visdom=visdom, config=config, test_datasets=test_datasets,
sample_size=args.sample_n, iters_per_task=args.g_iters)
] if (train_gen or args.feedback) else [None]
# Callbacks for reporting and visualizing accuracy
# -visdom (i.e., after each [prec_log])
eval_cb = cb._eval_cb(
log=args.prec_log, test_datasets=test_datasets, visdom=visdom, iters_per_task=args.iters,scenario=args.scenario,
collate_fn=utils.label_squeezing_collate_fn, test_size=args.prec_n, classes_per_task=classes_per_task,
task_mask=True if isinstance(model, ContinualLearner) and (args.gating_prop>0) else False
)
# -pdf: for summary plots (i.e, only after each task)
eval_cb_full = cb._eval_cb(
log=args.iters, test_datasets=test_datasets, precision_dict=precision_dict, scenario=args.scenario,
collate_fn=utils.label_squeezing_collate_fn, iters_per_task=args.iters, classes_per_task=classes_per_task,
task_mask = True if isinstance(model, ContinualLearner) and (args.gating_prop > 0) else False
)
# -collect them in <lists>
eval_cbs = [eval_cb, eval_cb_full]
#-------------------------------------------------------------------------------------------------#
#--------------------#
#----- TRAINING -----#
#--------------------#
print("--> Training:")
# Keep track of training-time
start = time.time()
# Train model
train_cl(
model, train_datasets, replay_mode=args.replay, scenario=args.scenario, classes_per_task=classes_per_task,
iters=args.iters, batch_size=args.batch, collate_fn=utils.label_squeezing_collate_fn,
visualize=True if args.visdom else False,
generator=generator, gen_iters=args.g_iters, gen_loss_cbs=generator_loss_cbs,
sample_cbs=sample_cbs, eval_cbs=eval_cbs, loss_cbs=generator_loss_cbs if args.feedback else solver_loss_cbs,
)
# Get total training-time in seconds, and write to file
training_time = time.time() - start
time_file = open("{}/time-{}.txt".format(args.r_dir, param_stamp), 'w')
time_file.write('{}\n'.format(training_time))
time_file.close()
#-------------------------------------------------------------------------------------------------#
#----------------------#
#----- EVALUATION -----#
#----------------------#
print('\n\n--> Evaluation ("incremental {} learning scenario"):'.format(args.scenario))
# Generation (plot in pdf)
if (pp is not None) and train_gen:
evaluate.show_samples(generator, config, size=args.sample_n, pdf=pp)
if (pp is not None) and args.feedback:
evaluate.show_samples(model, config, size=args.sample_n, pdf=pp)
# Reconstruction (plot in pdf)
if (pp is not None) and (train_gen or args.feedback):
for i in range(args.tasks):
if args.feedback:
evaluate.show_reconstruction(model, test_datasets[i], config, pdf=pp, task=i+1)
else:
evaluate.show_reconstruction(generator, test_datasets[i], config, pdf=pp, task=i+1)
# Classifier (print on screen & write to file)
if args.scenario=="task":
precs = [evaluate.validate(
model, test_datasets[i], verbose=False, test_size=None,
task_mask=True if isinstance(model, ContinualLearner) and args.gating_prop>0 else False,
task=i+1, allowed_classes=list(range(classes_per_task*i, classes_per_task*(i+1)))
) for i in range(args.tasks)]
else:
precs = [evaluate.validate(
model, test_datasets[i], verbose=False, test_size=None, task=i+1
) for i in range(args.tasks)]
print("\n Precision on test-set:")
for i in range(args.tasks):
print(" - Task {}: {:.4f}".format(i + 1, precs[i]))
average_precs = sum(precs) / args.tasks
print('=> average precision over all {} tasks: {:.4f}\n'.format(args.tasks, average_precs))
#-------------------------------------------------------------------------------------------------#
#------------------#
#----- OUTPUT -----#
#------------------#
# Average precision on full test set (no restrictions on which nodes can be predicted: "incremental" / "singlehead")
output_file = open("{}/prec-{}.txt".format(args.r_dir, param_stamp), 'w')
output_file.write('{}\n'.format(average_precs))
output_file.close()
# Precision-dictionary
file_name = "{}/dict-{}".format(args.r_dir, param_stamp)
utils.save_object(precision_dict, file_name)
#-------------------------------------------------------------------------------------------------#
#--------------------#
#----- PLOTTING -----#
#--------------------#
# If requested, generate pdf
if pp is not None:
# -create list to store all figures to be plotted.
figure_list = []
# -generate all figures (and store them in [figure_list])
figure = visual_plt.plot_lines(
precision_dict["all_tasks"], x_axes=precision_dict["x_task"],
line_names=['task {}'.format(i + 1) for i in range(args.tasks)]
)
figure_list.append(figure)
figure = visual_plt.plot_lines(
[precision_dict["average"]], x_axes=precision_dict["x_task"],
line_names=['average all tasks so far']
)
figure_list.append(figure)
# -add figures to pdf (and close this pdf).
for figure in figure_list:
pp.savefig(figure)
# Close pdf
if pp is not None:
pp.close()
if __name__ == '__main__':
args = parser.parse_args()
run(args)
|
[
"evaluate.show_reconstruction",
"evaluate.show_samples",
"evaluate.initiate_precision_dict",
"evaluate.validate"
] |
[((401, 503), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""./main.py"""'], {'description': '"""Run individual continual learning experiment."""'}), "('./main.py', description=\n 'Run individual continual learning experiment.')\n", (424, 503), False, 'import argparse\n'), ((7178, 7217), 'torch.device', 'torch.device', (["('cuda' if cuda else 'cpu')"], {}), "('cuda' if cuda else 'cpu')\n", (7190, 7217), False, 'import torch\n'), ((7246, 7271), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (7260, 7271), True, 'import numpy as np\n'), ((7276, 7304), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (7293, 7304), False, 'import torch\n'), ((7642, 7819), 'data.get_multitask_experiment', 'get_multitask_experiment', ([], {'name': 'args.experiment', 'scenario': 'args.scenario', 'tasks': 'args.tasks', 'data_dir': 'args.d_dir', 'verbose': '(True)', 'exception': '(True if args.seed == 0 else False)'}), '(name=args.experiment, scenario=args.scenario,\n tasks=args.tasks, data_dir=args.d_dir, verbose=True, exception=True if \n args.seed == 0 else False)\n', (7666, 7819), False, 'from data import get_multitask_experiment\n'), ((12869, 13082), 'utils.get_param_stamp', 'utils.get_param_stamp', (['args', 'model.name'], {'verbose': '(True)', 'replay': "(True if not args.replay == 'none' else False)", 'replay_model_name': "(generator.name if args.replay == 'generative' and not args.feedback else None)"}), "(args, model.name, verbose=True, replay=True if not \n args.replay == 'none' else False, replay_model_name=generator.name if \n args.replay == 'generative' and not args.feedback else None)\n", (12890, 13082), False, 'import utils\n'), ((13188, 13237), 'utils.print_model_info', 'utils.print_model_info', (['model'], {'title': '"""MAIN MODEL"""'}), "(model, title='MAIN MODEL')\n", (13210, 13237), False, 'import utils\n'), ((13602, 13646), 'evaluate.initiate_precision_dict', 'evaluate.initiate_precision_dict', (['args.tasks'], {}), '(args.tasks)\n', (13634, 13646), False, 'import evaluate\n'), ((16636, 16647), 'time.time', 'time.time', ([], {}), '()\n', (16645, 16647), False, 'import time\n'), ((16670, 17127), 'train.train_cl', 'train_cl', (['model', 'train_datasets'], {'replay_mode': 'args.replay', 'scenario': 'args.scenario', 'classes_per_task': 'classes_per_task', 'iters': 'args.iters', 'batch_size': 'args.batch', 'collate_fn': 'utils.label_squeezing_collate_fn', 'visualize': '(True if args.visdom else False)', 'generator': 'generator', 'gen_iters': 'args.g_iters', 'gen_loss_cbs': 'generator_loss_cbs', 'sample_cbs': 'sample_cbs', 'eval_cbs': 'eval_cbs', 'loss_cbs': '(generator_loss_cbs if args.feedback else solver_loss_cbs)'}), '(model, train_datasets, replay_mode=args.replay, scenario=args.\n scenario, classes_per_task=classes_per_task, iters=args.iters,\n batch_size=args.batch, collate_fn=utils.label_squeezing_collate_fn,\n visualize=True if args.visdom else False, generator=generator,\n gen_iters=args.g_iters, gen_loss_cbs=generator_loss_cbs, sample_cbs=\n sample_cbs, eval_cbs=eval_cbs, loss_cbs=generator_loss_cbs if args.\n feedback else solver_loss_cbs)\n', (16678, 17127), False, 'from train import train_cl\n'), ((19723, 19767), 'utils.save_object', 'utils.save_object', (['precision_dict', 'file_name'], {}), '(precision_dict, file_name)\n', (19740, 19767), False, 'import utils\n'), ((6961, 6986), 'os.path.isdir', 'os.path.isdir', (['args.r_dir'], {}), '(args.r_dir)\n', (6974, 6986), False, 'import os\n'), ((6996, 7016), 'os.mkdir', 'os.mkdir', (['args.r_dir'], {}), '(args.r_dir)\n', (7004, 7016), False, 'import os\n'), ((7076, 7096), 'os.mkdir', 'os.mkdir', (['args.p_dir'], {}), '(args.p_dir)\n', (7084, 7096), False, 'import os\n'), ((7125, 7150), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7148, 7150), False, 'import torch\n'), ((7326, 7359), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (7348, 7359), False, 'import torch\n'), ((9222, 9270), 'torch.optim.Adam', 'optim.Adam', (['model.optim_list'], {'betas': '(0.9, 0.999)'}), '(model.optim_list, betas=(0.9, 0.999))\n', (9232, 9270), False, 'from torch import optim\n'), ((9579, 9608), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {'size_average': '(True)'}), '(size_average=True)\n', (9589, 9608), False, 'from torch import nn\n'), ((12546, 12575), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {'size_average': '(True)'}), '(size_average=True)\n', (12556, 12575), False, 'from torch import nn\n'), ((13293, 13345), 'utils.print_model_info', 'utils.print_model_info', (['generator'], {'title': '"""GENERATOR"""'}), "(generator, title='GENERATOR')\n", (13315, 13345), False, 'import utils\n'), ((17228, 17239), 'time.time', 'time.time', ([], {}), '()\n', (17237, 17239), False, 'import time\n'), ((17762, 17830), 'evaluate.show_samples', 'evaluate.show_samples', (['generator', 'config'], {'size': 'args.sample_n', 'pdf': 'pp'}), '(generator, config, size=args.sample_n, pdf=pp)\n', (17783, 17830), False, 'import evaluate\n'), ((17882, 17946), 'evaluate.show_samples', 'evaluate.show_samples', (['model', 'config'], {'size': 'args.sample_n', 'pdf': 'pp'}), '(model, config, size=args.sample_n, pdf=pp)\n', (17903, 17946), False, 'import evaluate\n'), ((20414, 20543), 'visual_plt.plot_lines', 'visual_plt.plot_lines', (["[precision_dict['average']]"], {'x_axes': "precision_dict['x_task']", 'line_names': "['average all tasks so far']"}), "([precision_dict['average']], x_axes=precision_dict[\n 'x_task'], line_names=['average all tasks so far'])\n", (20435, 20543), False, 'import visual_plt\n'), ((7041, 7066), 'os.path.isdir', 'os.path.isdir', (['args.p_dir'], {}), '(args.p_dir)\n', (7054, 7066), False, 'import os\n'), ((9331, 9358), 'torch.optim.SGD', 'optim.SGD', (['model.optim_list'], {}), '(model.optim_list)\n', (9340, 9358), False, 'from torch import optim\n'), ((12307, 12359), 'torch.optim.Adam', 'optim.Adam', (['generator.optim_list'], {'betas': '(0.9, 0.999)'}), '(generator.optim_list, betas=(0.9, 0.999))\n', (12317, 12359), False, 'from torch import optim\n'), ((14571, 14770), 'callbacks._VAE_loss_cb', 'cb._VAE_loss_cb', ([], {'log': 'args.loss_log', 'visdom': 'visdom', 'model': '(model if args.feedback else generator)', 'tasks': 'args.tasks', 'iters_per_task': 'args.g_iters', 'replay': "(False if args.replay == 'none' else True)"}), "(log=args.loss_log, visdom=visdom, model=model if args.\n feedback else generator, tasks=args.tasks, iters_per_task=args.g_iters,\n replay=False if args.replay == 'none' else True)\n", (14586, 14770), True, 'import callbacks as cb\n'), ((14866, 15034), 'callbacks._solver_loss_cb', 'cb._solver_loss_cb', ([], {'log': 'args.loss_log', 'visdom': 'visdom', 'model': 'model', 'tasks': 'args.tasks', 'iters_per_task': 'args.iters', 'replay': "(False if args.replay == 'none' else True)"}), "(log=args.loss_log, visdom=visdom, model=model, tasks=\n args.tasks, iters_per_task=args.iters, replay=False if args.replay ==\n 'none' else True)\n", (14884, 15034), True, 'import callbacks as cb\n'), ((15198, 15356), 'callbacks._sample_cb', 'cb._sample_cb', ([], {'log': 'args.sample_log', 'visdom': 'visdom', 'config': 'config', 'test_datasets': 'test_datasets', 'sample_size': 'args.sample_n', 'iters_per_task': 'args.g_iters'}), '(log=args.sample_log, visdom=visdom, config=config,\n test_datasets=test_datasets, sample_size=args.sample_n, iters_per_task=\n args.g_iters)\n', (15211, 15356), True, 'import callbacks as cb\n'), ((18767, 18856), 'evaluate.validate', 'evaluate.validate', (['model', 'test_datasets[i]'], {'verbose': '(False)', 'test_size': 'None', 'task': '(i + 1)'}), '(model, test_datasets[i], verbose=False, test_size=None,\n task=i + 1)\n', (18784, 18856), False, 'import evaluate\n'), ((8172, 8437), 'vae_models.AutoEncoder', 'AutoEncoder', ([], {'image_size': "config['size']", 'image_channels': "config['channels']", 'classes': "config['classes']", 'fc_layers': 'args.fc_lay', 'fc_units': 'args.fc_units', 'z_dim': 'args.z_dim', 'fc_drop': 'args.fc_drop', 'fc_bn': "(True if args.fc_bn == 'yes' else False)", 'fc_nl': 'args.fc_nl'}), "(image_size=config['size'], image_channels=config['channels'],\n classes=config['classes'], fc_layers=args.fc_lay, fc_units=args.\n fc_units, z_dim=args.z_dim, fc_drop=args.fc_drop, fc_bn=True if args.\n fc_bn == 'yes' else False, fc_nl=args.fc_nl)\n", (8183, 8437), False, 'from vae_models import AutoEncoder\n'), ((8589, 8893), 'encoder.Classifier', 'Classifier', ([], {'image_size': "config['size']", 'image_channels': "config['channels']", 'classes': "config['classes']", 'fc_layers': 'args.fc_lay', 'fc_units': 'args.fc_units', 'fc_drop': 'args.fc_drop', 'fc_nl': 'args.fc_nl', 'fc_bn': "(True if args.fc_bn == 'yes' else False)", 'excit_buffer': '(True if args.gating_prop > 0 else False)'}), "(image_size=config['size'], image_channels=config['channels'],\n classes=config['classes'], fc_layers=args.fc_lay, fc_units=args.\n fc_units, fc_drop=args.fc_drop, fc_nl=args.fc_nl, fc_bn=True if args.\n fc_bn == 'yes' else False, excit_buffer=True if args.gating_prop > 0 else\n False)\n", (8599, 8893), False, 'from encoder import Classifier\n'), ((11712, 11978), 'vae_models.AutoEncoder', 'AutoEncoder', ([], {'image_size': "config['size']", 'image_channels': "config['channels']", 'fc_layers': 'args.g_fc_lay', 'fc_units': 'args.g_fc_uni', 'z_dim': 'args.z_dim', 'classes': "config['classes']", 'fc_drop': 'args.fc_drop', 'fc_bn': "(True if args.fc_bn == 'yes' else False)", 'fc_nl': 'args.fc_nl'}), "(image_size=config['size'], image_channels=config['channels'],\n fc_layers=args.g_fc_lay, fc_units=args.g_fc_uni, z_dim=args.z_dim,\n classes=config['classes'], fc_drop=args.fc_drop, fc_bn=True if args.\n fc_bn == 'yes' else False, fc_nl=args.fc_nl)\n", (11723, 11978), False, 'from vae_models import AutoEncoder\n'), ((12438, 12469), 'torch.optim.SGD', 'optim.SGD', (['generator.optim_list'], {}), '(generator.optim_list)\n', (12447, 12469), False, 'from torch import optim\n'), ((18123, 18209), 'evaluate.show_reconstruction', 'evaluate.show_reconstruction', (['model', 'test_datasets[i]', 'config'], {'pdf': 'pp', 'task': '(i + 1)'}), '(model, test_datasets[i], config, pdf=pp, task=\n i + 1)\n', (18151, 18209), False, 'import evaluate\n'), ((18237, 18326), 'evaluate.show_reconstruction', 'evaluate.show_reconstruction', (['generator', 'test_datasets[i]', 'config'], {'pdf': 'pp', 'task': '(i + 1)'}), '(generator, test_datasets[i], config, pdf=pp,\n task=i + 1)\n', (18265, 18326), False, 'import evaluate\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
import pandas as pd
import sys
sys.path.append("..")
sys.path.append("../technical-analysis_python/")
mpl.use('tkagg') # issues with Big Sur
# technical analysis
from strategy.macd_crossover import macdCrossover
from backtest import Backtest
from evaluate import PortfolioReturn, SharpeRatio, MaxDrawdown, CAGR
def backtest(symbol):
price_file = '../../database/microeconomic_data/hkex_ticks_day/hkex_' + symbol + '.csv'
# load price data
df_whole = pd.read_csv(price_file, header=0, index_col='Date', parse_dates=True)
# select time range (for trading)
#start = '2017-01-03'
start = '2020-06-10'
end = '2021-03-03'
start_date = pd.Timestamp(start)
end_date = pd.Timestamp(end)
df = df_whole.loc[start_date:end_date]
ticker = symbol + ".HK"
# load signals csv (output from ML model)
signals_file = './LSTM_output_trend/' + symbol + '_output.csv'
signals = pd.read_csv(signals_file,
header=0, index_col='Date', parse_dates=True)
signals['positions'] = signals['signal'].diff()
signals = signals[~signals.index.duplicated(keep='first')]
df = df[~df.index.duplicated(keep='first')]
#print(signals.head())
#print(df.head())
"""
Backtesting & evaluation
"""
portfolio, backtest_fig = Backtest(ticker, signals, df)
plt.close() # hide figure
print("Final total value: {value:.4f} ".format(
value=portfolio['total'][-1]))
portfolio_return = (
((portfolio['total'][-1] - portfolio['total'][0]) / portfolio['total'][0]) * 100)
print("Total return: {value:.4f}%".format(value=portfolio_return))
trade_signals_num = len(signals[signals.positions == 1])
print("No. of trade: {value}".format(
value=trade_signals_num))
"""
Plotting figures
"""
backtest_fig.suptitle('Portfolio value', fontsize=14)
#backtest_fig.savefig('./figures_LSTM-price-only/' + symbol + '-portfolio-value')
#plt.show()
# Evaluate strategy
# 1. Portfolio return
returns_fig = PortfolioReturn(portfolio)
returns_fig.suptitle('Portfolio return')
#returns_filename = './figures_LSTM-price-only/' + symbol + '-portfolo-return'
#returns_fig.savefig(returns_filename)
#plt.show()
# 2. Sharpe ratio
sharpe_ratio = SharpeRatio(portfolio)
print("Sharpe ratio: {ratio:.4f} ".format(ratio=sharpe_ratio))
# 3. Maximum drawdown
maxDrawdown_fig, max_daily_drawdown, daily_drawdown = MaxDrawdown(df)
maxDrawdown_fig.suptitle('Maximum drawdown', fontsize=14)
#maxDrawdown_filename = './figures/' + symbol + '-LSTM_maximum-drawdown'
#maxDrawdown_fig.savefig(maxDrawdown_filename)
#plt.show()
# 4. Compound Annual Growth Rate
cagr = CAGR(portfolio)
print("CAGR: {cagr:.4f} ".format(cagr=cagr))
# Write to file
f = open("LSTM_trend_results_MACD.csv", "a")
f.write(ticker + ',' + start + ',' + end + ',' + str(portfolio_return) + ',' +
str(sharpe_ratio) + ',' + str(cagr) + ',' + str(trade_signals_num) + '\n')
f.close()
def main():
ticker_list = ['0001', '0002', '0003', '0004', '0005', '0016', '0019', '0168', '0175', '0386', '0669', '0700',
'0762', '0823', '0857', '0868', '0883', '0939', '0941', '0968', '1211', '1299', '1818', '2319', '2382', '2688', '2689', '2899']
#ticker_list = ['0001', '0002', '0003', '0004', '0005']
for ticker in ticker_list:
print("############ Ticker: " + ticker + " ############")
backtest(ticker)
print('\n')
if __name__ == "__main__":
main()
|
[
"evaluate.MaxDrawdown",
"evaluate.SharpeRatio",
"evaluate.PortfolioReturn",
"evaluate.CAGR"
] |
[((108, 129), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (123, 129), False, 'import sys\n'), ((130, 178), 'sys.path.append', 'sys.path.append', (['"""../technical-analysis_python/"""'], {}), "('../technical-analysis_python/')\n", (145, 178), False, 'import sys\n'), ((179, 195), 'matplotlib.use', 'mpl.use', (['"""tkagg"""'], {}), "('tkagg')\n", (186, 195), True, 'import matplotlib as mpl\n'), ((544, 613), 'pandas.read_csv', 'pd.read_csv', (['price_file'], {'header': '(0)', 'index_col': '"""Date"""', 'parse_dates': '(True)'}), "(price_file, header=0, index_col='Date', parse_dates=True)\n", (555, 613), True, 'import pandas as pd\n'), ((744, 763), 'pandas.Timestamp', 'pd.Timestamp', (['start'], {}), '(start)\n', (756, 763), True, 'import pandas as pd\n'), ((779, 796), 'pandas.Timestamp', 'pd.Timestamp', (['end'], {}), '(end)\n', (791, 796), True, 'import pandas as pd\n'), ((999, 1070), 'pandas.read_csv', 'pd.read_csv', (['signals_file'], {'header': '(0)', 'index_col': '"""Date"""', 'parse_dates': '(True)'}), "(signals_file, header=0, index_col='Date', parse_dates=True)\n", (1010, 1070), True, 'import pandas as pd\n'), ((1384, 1413), 'backtest.Backtest', 'Backtest', (['ticker', 'signals', 'df'], {}), '(ticker, signals, df)\n', (1392, 1413), False, 'from backtest import Backtest\n'), ((1418, 1429), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1427, 1429), True, 'import matplotlib.pyplot as plt\n'), ((2134, 2160), 'evaluate.PortfolioReturn', 'PortfolioReturn', (['portfolio'], {}), '(portfolio)\n', (2149, 2160), False, 'from evaluate import PortfolioReturn, SharpeRatio, MaxDrawdown, CAGR\n'), ((2390, 2412), 'evaluate.SharpeRatio', 'SharpeRatio', (['portfolio'], {}), '(portfolio)\n', (2401, 2412), False, 'from evaluate import PortfolioReturn, SharpeRatio, MaxDrawdown, CAGR\n'), ((2565, 2580), 'evaluate.MaxDrawdown', 'MaxDrawdown', (['df'], {}), '(df)\n', (2576, 2580), False, 'from evaluate import PortfolioReturn, SharpeRatio, MaxDrawdown, CAGR\n'), ((2836, 2851), 'evaluate.CAGR', 'CAGR', (['portfolio'], {}), '(portfolio)\n', (2840, 2851), False, 'from evaluate import PortfolioReturn, SharpeRatio, MaxDrawdown, CAGR\n')]
|
import tqdm
import struct
import os
import numpy as np
import pickle
import argparse
from scipy import sparse
from evaluate import evaluate
from implicit.als import AlternatingLeastSquares
os.environ["OPENBLAS_NUM_THREADS"] = "1"
user_features_filename = 'out_user_features_{}.feats'
item_features_filename = 'out_item_features_{}.feats'
predictions_filename = 'predicted_{}.npy'
def load_feats(feat_fname, meta_only=False, nrz=False):
with open(feat_fname, 'rb') as fin:
keys = fin.readline().strip().split()
R, C = struct.unpack('qq', fin.read(16))
if meta_only:
return keys, (R, C)
feat = np.fromstring(fin.read(), count=R * C, dtype=np.float32)
feat = feat.reshape((R, C))
if nrz:
feat = feat / np.sqrt((feat ** 2).sum(-1) + 1e-8)[..., np.newaxis]
return keys, feat
def save(keys, feats, out_fname):
feats = np.array(feats, dtype=np.float32)
with open(out_fname + '.tmp', 'wb') as fout:
fout.write(b' '.join([k.encode() for k in keys]))
fout.write(b'\n')
R, C = feats.shape
fout.write(struct.pack('qq', *(R, C)))
fout.write(feats.tostring())
os.rename(out_fname + '.tmp', out_fname)
def train_als(impl_train_data, dims, user_ids, item_ids, user_features_filem, item_features_file, save_res=True):
# Train the Matrix Factorization model using the given dimensions
model = AlternatingLeastSquares(factors=dims, iterations=30)
model.fit(impl_train_data.T)
user_vecs_reg = model.user_factors
item_vecs_reg = model.item_factors
if save_res==True:
save(item_ids, item_vecs_reg, item_features_file)
save(user_ids, user_vecs_reg, user_features_file)
return item_ids, item_vecs_reg, user_ids, user_vecs_reg
def predict(item_vecs_reg, user_vecs_reg, prediction_file,impl_train_data, N=100, step=1000, save_res=True):
# Make the predictions given the representations of the items and the users
listened_dict = impl_train_data
predicted = np.zeros((user_vecs_reg.shape[0],N), dtype=np.uint32)
for u in range(0,user_vecs_reg.shape[0], step):
sims = user_vecs_reg[u:u+step].dot(item_vecs_reg.T)
curr_users = listened_dict[u:u+step].todense() == 0
# We remove the items that the users already listened
topn = np.argsort(-np.multiply(sims,curr_users), axis=1)[:,:N]
predicted[u:u+step, :] = topn
#if u % 100000 == 0:
# print ("Precited users: ", u)
if save_res==True:
np.save(open(prediction_file, 'wb'), predicted)
return predicted
def show_eval(predicted_x, fan_test_data,item_ids, fan_train_data, items_names):
# Print the Evalaution of the recommendations given the following metrics
metrics = ['map@10', 'precision@1', 'precision@3', 'precision@5', 'precision@10', 'r-precision', 'ndcg@10']
results, all_results = evaluate(metrics, fan_test_data, predicted_x)
print (results)
def show_recs(predicted_x, fan_test_data,item_ids, fan_train_data, items_names, i=10):
# For a given user print the items in train and test, also print the first ten recommendations
print ('---------')
print ("Listened (test)", [items_names[a] for a in fan_test_data[i]])
print ('---------')
print ("Listened (train)", [items_names[a] for a in fan_train_data[i, :].nonzero()[1].tolist()])
print ('---------')
print ("Recommended", [(items_names[a],a in fan_test_data[i]) for a in predicted_x[i][:10]])
print ('---------')
if __name__== "__main__":
parser = argparse.ArgumentParser(description='Run model training and evaluation.')
parser.add_argument('-f', "--split_folder", default=False)
parser.add_argument('-d', "--dims", default=200)
args = parser.parse_args()
split_folder = args.split_folder
dims = int(args.dims)
print ("Dataset:", split_folder, 'Dimension:', dims)
fan_train_data = sparse.load_npz(os.path.join('data', split_folder, 'fan_train_data.npz')).tocsr()
orig_fan_train_data = sparse.load_npz(os.path.join('data', split_folder, 'fan_train_data.npz')).tocsr()
sum_listen = fan_train_data.sum(axis=0)
fan_test_data = pickle.load(open(os.path.join('data', split_folder, 'fan_test_data.pkl'), 'rb'))
fan_items_dict = pickle.load(open(os.path.join('data', split_folder, 'fan_items_dict.pkl'), 'rb'))
items_ids_names= pickle.load(open(os.path.join('data', split_folder, 'fan_item_ids.pkl'), 'rb'))
fan_users_dict = pickle.load(open(os.path.join('data', split_folder,'fan_users_dict.pkl'), 'rb'))
model_folder = 'models'
user_features_file = os.path.join(model_folder, split_folder, user_features_filename)
item_features_file = os.path.join(model_folder, split_folder, item_features_filename)
item_ids, item_vecs_reg, user_ids, user_vecs_reg = train_als(fan_train_data, dims, fan_users_dict, fan_items_dict, user_features_file, item_features_file, save_res=True)
#user_ids, user_vecs_reg = load_feats(user_features_file)
item_ids, item_vecs_reg = load_feats(item_features_file)
predictions_file = os.path.join(model_folder, split_folder,predictions_filename)
predicted = predict(item_vecs_reg, user_vecs_reg, predictions_file, orig_fan_train_data, step=500)
#predicted = np.load(predictions_file)
show_eval(predicted, fan_test_data, item_ids, fan_train_data, items_ids_names)
show_recs(predicted, fan_test_data, item_ids, fan_train_data, items_ids_names, i=10)
|
[
"evaluate.evaluate"
] |
[((905, 938), 'numpy.array', 'np.array', (['feats'], {'dtype': 'np.float32'}), '(feats, dtype=np.float32)\n', (913, 938), True, 'import numpy as np\n'), ((1215, 1255), 'os.rename', 'os.rename', (["(out_fname + '.tmp')", 'out_fname'], {}), "(out_fname + '.tmp', out_fname)\n", (1224, 1255), False, 'import os\n'), ((1453, 1505), 'implicit.als.AlternatingLeastSquares', 'AlternatingLeastSquares', ([], {'factors': 'dims', 'iterations': '(30)'}), '(factors=dims, iterations=30)\n', (1476, 1505), False, 'from implicit.als import AlternatingLeastSquares\n'), ((2061, 2115), 'numpy.zeros', 'np.zeros', (['(user_vecs_reg.shape[0], N)'], {'dtype': 'np.uint32'}), '((user_vecs_reg.shape[0], N), dtype=np.uint32)\n', (2069, 2115), True, 'import numpy as np\n'), ((2929, 2974), 'evaluate.evaluate', 'evaluate', (['metrics', 'fan_test_data', 'predicted_x'], {}), '(metrics, fan_test_data, predicted_x)\n', (2937, 2974), False, 'from evaluate import evaluate\n'), ((3591, 3664), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run model training and evaluation."""'}), "(description='Run model training and evaluation.')\n", (3614, 3664), False, 'import argparse\n'), ((4652, 4716), 'os.path.join', 'os.path.join', (['model_folder', 'split_folder', 'user_features_filename'], {}), '(model_folder, split_folder, user_features_filename)\n', (4664, 4716), False, 'import os\n'), ((4742, 4806), 'os.path.join', 'os.path.join', (['model_folder', 'split_folder', 'item_features_filename'], {}), '(model_folder, split_folder, item_features_filename)\n', (4754, 4806), False, 'import os\n'), ((5128, 5190), 'os.path.join', 'os.path.join', (['model_folder', 'split_folder', 'predictions_filename'], {}), '(model_folder, split_folder, predictions_filename)\n', (5140, 5190), False, 'import os\n'), ((1138, 1164), 'struct.pack', 'struct.pack', (['"""qq"""', '*(R, C)'], {}), "('qq', *(R, C))\n", (1149, 1164), False, 'import struct\n'), ((4228, 4283), 'os.path.join', 'os.path.join', (['"""data"""', 'split_folder', '"""fan_test_data.pkl"""'], {}), "('data', split_folder, 'fan_test_data.pkl')\n", (4240, 4283), False, 'import os\n'), ((4330, 4386), 'os.path.join', 'os.path.join', (['"""data"""', 'split_folder', '"""fan_items_dict.pkl"""'], {}), "('data', split_folder, 'fan_items_dict.pkl')\n", (4342, 4386), False, 'import os\n'), ((4433, 4487), 'os.path.join', 'os.path.join', (['"""data"""', 'split_folder', '"""fan_item_ids.pkl"""'], {}), "('data', split_folder, 'fan_item_ids.pkl')\n", (4445, 4487), False, 'import os\n'), ((4534, 4590), 'os.path.join', 'os.path.join', (['"""data"""', 'split_folder', '"""fan_users_dict.pkl"""'], {}), "('data', split_folder, 'fan_users_dict.pkl')\n", (4546, 4590), False, 'import os\n'), ((3971, 4027), 'os.path.join', 'os.path.join', (['"""data"""', 'split_folder', '"""fan_train_data.npz"""'], {}), "('data', split_folder, 'fan_train_data.npz')\n", (3983, 4027), False, 'import os\n'), ((4079, 4135), 'os.path.join', 'os.path.join', (['"""data"""', 'split_folder', '"""fan_train_data.npz"""'], {}), "('data', split_folder, 'fan_train_data.npz')\n", (4091, 4135), False, 'import os\n'), ((2376, 2405), 'numpy.multiply', 'np.multiply', (['sims', 'curr_users'], {}), '(sims, curr_users)\n', (2387, 2405), True, 'import numpy as np\n')]
|
import argparse
import functools
import itertools
import os.path
import os
import time
import torch
import torch.nn.functional as F
import numpy as np
from benepar import char_lstm
from benepar import decode_chart
from benepar import nkutil
from benepar import parse_chart
from benepar import InputSentence
from benepar.parse_base import BaseInputExample
import evaluate
import learning_rates
import treebanks
from tree_transforms import collapse_unlabel_binarize, random_parsing_subspan
def format_elapsed(start_time):
elapsed_time = int(time.time() - start_time)
minutes, seconds = divmod(elapsed_time, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
elapsed_string = "{}h{:02}m{:02}s".format(hours, minutes, seconds)
if days > 0:
elapsed_string = "{}d{}".format(days, elapsed_string)
return elapsed_string
def make_hparams():
return nkutil.HParams(
# Cycle consistency
back_cycle=False,
back_layers=4,
back_loss_constant=1.0,
back_use_gold_trees=True,
back_loss_type='kl',
# Discrete gumbel
use_vq=False,
vq_decay=0.97,
vq_commitment=0.1,
vq_coreset_size_multiplier=10,
vq_wait_steps=1245,
vq_observe_steps=1245,
vq_interpolate_steps=1245,
discrete_cats=0,
tau=3.0,
anneal_rate=2e-5,
en_tau=3.0,
en_anneal_rate=2e-5,
tau_min=0.05,
pretrained_divide=1.0,
encoder_gum=False,
tags_per_word=1,
tag_combine_start=np.inf,
tag_combine_interval=300,
tag_combine_mask_thres=0.05,
tag_split_thres=1.001, # disabled by default
# Data processing
two_label_subspan=False,
two_label=False,
max_len_train=0, # no length limit
max_len_dev=0, # no length limit
# Optimization
batch_size=32,
novel_learning_rate=0., # don't use separate learning rate
learning_rate=0.00005,
pretrained_lr=0.00005,
learning_rate_warmup_steps=160,
clip_grad_norm=0.0, # no clipping
checks_per_epoch=4,
step_decay_factor=0.5,
step_decay_patience=5,
max_consecutive_decays=3, # establishes a termination criterion
# Clustered Lexicon
use_ft_clustered_lexicon='',
use_w2v_clustered_lexicon='',
# CharLSTM
use_chars_lstm=False,
d_char_emb=64,
char_lstm_input_dropout=0.2,
# BERT and other pre-trained models
use_pretrained=False,
pretrained_model="bert-base-uncased",
bpe_dropout=0.2,
use_forced_lm=False,
# Partitioned transformer encoder
tag_dist='',
uni=False,
all_layers_uni=False,
first_heads=-1,
use_encoder=False,
d_model=1024,
num_layers=8,
num_heads=8,
d_kv=64,
d_ff=2048,
encoder_max_len=512,
# Dropout
morpho_emb_dropout=0.2,
attention_dropout=0.2,
relu_dropout=0.1,
residual_dropout=0.2,
# Output heads and losses
force_root_constituent="false", # "auto",
predict_tags=False,
d_label_hidden=256,
d_tag_hidden=256,
tag_loss_scale=5.0,
)
def run_train(args, hparams):
if args.numpy_seed is not None:
print("Setting numpy random seed to {}...".format(args.numpy_seed))
np.random.seed(args.numpy_seed)
# Make sure that pytorch is actually being initialized randomly.
# On my cluster I was getting highly correlated results from multiple
# runs, but calling reset_parameters() changed that. A brief look at the
# pytorch source code revealed that pytorch initializes its RNG by
# calling std::random_device, which according to the C++ spec is allowed
# to be deterministic.
seed_from_numpy = np.random.randint(2147483648)
print("Manual seed for pytorch:", seed_from_numpy)
torch.manual_seed(seed_from_numpy)
hparams.set_from_args(args)
print("Hyperparameters:")
hparams.print()
print("Loading training trees from {}...".format(args.train_path))
print(args.train_path, args.train_path_text, args.text_processing)
train_treebank = treebanks.load_trees(
args.train_path, args.train_path_text, args.text_processing
)
if hparams.max_len_train > 0:
train_treebank = train_treebank.filter_by_length(hparams.max_len_train)
print("Loaded {:,} training examples.".format(len(train_treebank)))
print("Loading development trees from {}...".format(args.dev_path))
dev_treebank = treebanks.load_trees(
args.dev_path, args.dev_path_text, args.text_processing
)
if hparams.max_len_dev > 0:
dev_treebank = dev_treebank.filter_by_length(hparams.max_len_dev)
print("Loaded {:,} development examples.".format(len(dev_treebank)))
hparams.tree_transform = None
if hparams.two_label:
hparams.tree_transform = collapse_unlabel_binarize
if hparams.tree_transform is not None:
for treebank in [train_treebank, dev_treebank]:
for parsing_example in treebank.examples:
parsing_example.tree = hparams.tree_transform(
parsing_example.tree)
print("Constructing vocabularies...")
op_trees = [tree if hparams.tree_transform is None else hparams.tree_transform(
tree) for tree in train_treebank.trees]
label_vocab = decode_chart.ChartDecoder.build_vocab(op_trees)
if hparams.use_chars_lstm:
char_vocab = char_lstm.RetokenizerForCharLSTM.build_vocab(
train_treebank.sents)
else:
char_vocab = None
print('Label Vocab Size:', len(label_vocab))
tag_vocab = set()
for tree in op_trees:
for _, tag in tree.pos():
tag_vocab.add(tag)
tag_vocab = ["UNK"] + sorted(tag_vocab)
tag_vocab = {label: i for i, label in enumerate(tag_vocab)}
del op_trees
if hparams.two_label_subspan:
for parsing_example in dev_treebank:
parsing_example.tree = collapse_unlabel_binarize(
parsing_example.tree)
hparams.tree_transform = collapse_unlabel_binarize
hparams.two_label_subspan = random_parsing_subspan
if hparams.force_root_constituent.lower() in ("true", "yes", "1"):
hparams.force_root_constituent = True
elif hparams.force_root_constituent.lower() in ("false", "no", "0"):
hparams.force_root_constituent = False
elif hparams.force_root_constituent.lower() == "auto":
hparams.force_root_constituent = (
decode_chart.ChartDecoder.infer_force_root_constituent(
train_treebank.trees)
)
print("Set hparams.force_root_constituent to",
hparams.force_root_constituent)
print("Initializing model...")
parser = parse_chart.ChartParser(
tag_vocab=tag_vocab,
label_vocab=label_vocab,
char_vocab=char_vocab,
hparams=hparams,
)
if args.parallelize:
parser.parallelize()
elif torch.cuda.is_available():
parser.cuda()
else:
print("Not using CUDA!")
print("Initializing optimizer...")
if parser.pretrained_model:
pretrained_weights = list(
params for params in parser.pretrained_model.parameters() if params.requires_grad)
else:
pretrained_weights = []
other_weights = []
for p in parser.parameters():
if p.requires_grad and all(p is not p2 for p2 in pretrained_weights):
other_weights.append(p)
trainable_parameters = [
{'params': pretrained_weights, 'lr': hparams.pretrained_lr},
{'params': other_weights}
]
# trainable_parameters = list(
# params for params in parser.parameters() if params.requires_grad)
if hparams.novel_learning_rate == 0.0:
optimizer = torch.optim.Adam(
trainable_parameters, lr=hparams.learning_rate, betas=(0.9, 0.98), eps=1e-9
)
base_lr = hparams.learning_rate
else:
trainable_parameters = list(
params for params in parser.parameters() if params.requires_grad)
pretrained_param_set = set(
parser.pretrained_model.parameters()) if parser.pretrained_model else set([])
pretrained_params = set(trainable_parameters) & pretrained_param_set
del pretrained_param_set
novel_params = set(trainable_parameters) - pretrained_params
grouped_trainable_parameters = [
{
'params': list(pretrained_params),
'lr': hparams.learning_rate,
},
{
'params': list(novel_params),
'lr': hparams.novel_learning_rate,
},
]
optimizer = torch.optim.Adam(
grouped_trainable_parameters, lr=hparams.learning_rate, betas=(0.9, 0.98), eps=1e-9)
base_lr = min(hparams.learning_rate, hparams.novel_learning_rate)
scheduler = learning_rates.WarmupThenReduceLROnPlateau(
optimizer,
hparams.learning_rate_warmup_steps,
mode="max",
factor=hparams.step_decay_factor,
patience=hparams.step_decay_patience * hparams.checks_per_epoch,
verbose=True,
)
clippable_parameters = pretrained_weights + other_weights
grad_clip_threshold = (
np.inf if hparams.clip_grad_norm == 0 else hparams.clip_grad_norm
)
print("Training...")
total_processed = 0
current_processed = 0
check_every = len(train_treebank) / hparams.checks_per_epoch
best_dev_fscore = -np.inf
best_dev_model_path = None
best_dev_processed = 0
start_time = time.time()
all_dists = []
all_dev_accs = []
def check_dev():
nonlocal best_dev_fscore
nonlocal best_dev_model_path
nonlocal best_dev_processed
dev_start_time = time.time()
dev_predicted_and_cats = parser.parse(
dev_treebank.without_gold_annotations(),
subbatch_max_tokens=args.subbatch_max_tokens,
tau=0.0,
en_tau=0.0,
return_cats=hparams.discrete_cats != 0
)
if hparams.discrete_cats == 0:
dev_predicted = dev_predicted_and_cats
dist = None
else:
dist = torch.zeros(hparams.discrete_cats)
dev_predicted = []
for dev_tree, cat in dev_predicted_and_cats:
dev_predicted.append(dev_tree)
if len(cat.shape) == 1:
cat = F.one_hot(torch.tensor(cat), hparams.discrete_cats)
dist += cat.sum(dim=0).cpu()
dist /= dist.sum()
print(dist)
dev_fscore = evaluate.evalb(
args.evalb_dir, dev_treebank.trees, dev_predicted)
print(
"dev-fscore {} "
"best-dev {} "
"dev-elapsed {} "
"total-elapsed {}".format(
dev_fscore,
best_dev_fscore,
format_elapsed(dev_start_time),
format_elapsed(start_time),
)
)
if hparams.tag_dist:
all_dists.append(dist)
# all_dev_accs.append(dev_fscore.fscore)
# to_save = np.hstack(
# [np.array(all_dev_accs).reshape((-1, 1)), np.array(all_dists)])
with open(hparams.tag_dist, 'wb') as f:
np.save(f, np.vstack(all_dists))
if dev_fscore.fscore > best_dev_fscore:
if best_dev_model_path is not None:
extensions = [".pt"]
for ext in extensions:
path = best_dev_model_path + ext
if os.path.exists(path):
print("Removing previous model file {}...".format(path))
os.remove(path)
best_dev_fscore = dev_fscore.fscore
best_dev_model_path = "{}_dev={:.2f}".format(
args.model_path_base, dev_fscore.fscore
)
best_dev_processed = total_processed
print("Saving new best model to {}...".format(best_dev_model_path))
torch.save(
{
"config": parser.config,
"state_dict": parser.state_dict(),
"optimizer": optimizer.state_dict(),
},
best_dev_model_path + ".pt",
)
if dist is None:
return dist
return dist.cpu().numpy()
data_loader = torch.utils.data.DataLoader(
train_treebank,
batch_size=hparams.batch_size,
shuffle=True,
collate_fn=functools.partial(
parser.encode_and_collate_subbatches,
subbatch_max_tokens=args.subbatch_max_tokens,
),
)
tau = hparams.tau
en_tau = hparams.tau
tag_combine_start = hparams.tag_combine_start
iteration = 0
dist = check_dev()
for epoch in itertools.count(start=1):
epoch_start_time = time.time()
for batch_num, batch in enumerate(data_loader, start=1):
iteration += 1
optimizer.zero_grad()
parser.commit_loss_accum = 0.0
parser.train()
if hparams.use_vq and hparams.vq_interpolate_steps == 0:
tau = 0.0
elif hparams.use_vq:
step = (total_processed // hparams.batch_size) - (
hparams.vq_wait_steps + hparams.vq_observe_steps)
if step < 0:
tau = 1.0
elif step >= hparams.vq_interpolate_steps:
tau = 0.0
else:
tau = max(0.0, 1.0 - step / hparams.vq_interpolate_steps)
if hparams.use_vq:
steps_past_warmup = (total_processed // hparams.batch_size
) - hparams.learning_rate_warmup_steps
if steps_past_warmup > 0:
current_lr = min([g["lr"] for g in optimizer.param_groups])
new_vq_decay = 1.0 - (
(1.0 - hparams.vq_decay) * (current_lr / base_lr))
if new_vq_decay != parser.vq.decay:
parser.vq.decay = new_vq_decay
print("Adjusted vq decay to:", new_vq_decay)
batch_loss_value = 0.0
for subbatch_size, subbatch in batch:
loss = parser.compute_loss(subbatch, tau=tau, en_tau=en_tau)
loss_value = float(loss.data.cpu().numpy())
batch_loss_value += loss_value
if loss_value > 0:
loss.backward()
del loss
total_processed += subbatch_size
current_processed += subbatch_size
grad_norm = torch.nn.utils.clip_grad_norm_(
clippable_parameters, grad_clip_threshold
)
optimizer.step()
print(
"epoch {:,} "
"batch {:,}/{:,} "
"processed {:,} "
"batch-loss {:.4f} "
"grad-norm {:.4f} "
"commit-loss {:.4f} "
"epoch-elapsed {} "
"total-elapsed {}".format(
epoch,
batch_num,
int(np.ceil(len(train_treebank) / hparams.batch_size)),
total_processed,
batch_loss_value,
grad_norm,
float(parser.commit_loss_accum),
format_elapsed(epoch_start_time),
format_elapsed(start_time),
)
)
if current_processed >= check_every:
current_processed -= check_every
if not hparams.use_vq or epoch > 2:
# fix bug saving models that dont use vq
dist = check_dev()
scheduler.step(metrics=best_dev_fscore)
if (hparams.discrete_cats > 0 and hparams.use_vq):
ptdist = parser.vq.cluster_size.cpu().numpy()
if np.sum(ptdist) > 0:
ptdist = ptdist / np.sum(ptdist)
num_categories_in_use = np.sum(ptdist > 1e-20)
print("Number of categories in use:", num_categories_in_use)
if hparams.discrete_cats > 0 and not hparams.use_vq:
# Gumbel temperature annealing
tau = np.maximum(
hparams.tau * np.exp(-hparams.anneal_rate * iteration), hparams.tau_min)
en_tau = np.maximum(
hparams.en_tau * np.exp(-hparams.en_anneal_rate * iteration), hparams.tau_min)
print('setting temperature to: {:.4f}, hard attention tau to {:.3f}'.format(
tau, en_tau))
else:
scheduler.step()
if (total_processed - best_dev_processed) > (
(hparams.step_decay_patience + 1)
* hparams.max_consecutive_decays
* len(train_treebank)
):
print("Terminating due to lack of improvement in dev fscore.")
break
def run_test(args):
print("Loading test trees from {}...".format(args.test_path))
test_treebank = treebanks.load_trees(
args.test_path, args.test_path_text, args.text_processing
)
print("Loaded {:,} test examples.".format(len(test_treebank)))
if len(args.model_path) != 1:
raise NotImplementedError(
"Ensembling multiple parsers is not "
"implemented in this version of the code."
)
model_path = args.model_path[0]
print("Loading model from {}...".format(model_path))
parser = parse_chart.ChartParser.from_trained(model_path)
if args.no_predict_tags and parser.f_tag is not None:
print("Removing part-of-speech tagging head...")
parser.f_tag = None
if args.parallelize:
parser.parallelize()
elif torch.cuda.is_available():
parser.cuda()
print("Parsing test sentences...")
start_time = time.time()
test_predicted = parser.parse(
test_treebank.without_gold_annotations(),
subbatch_max_tokens=args.subbatch_max_tokens,
)
if args.output_path == "-":
for tree in test_predicted:
print(tree.pformat(margin=1e100))
elif args.output_path:
with open(args.output_path, "w") as outfile:
for tree in test_predicted:
outfile.write("{}\n".format(tree.pformat(margin=1e100)))
# The tree loader does some preprocessing to the trees (e.g. stripping TOP
# symbols or SPMRL morphological features). We compare with the input file
# directly to be extra careful about not corrupting the evaluation. We also
# allow specifying a separate "raw" file for the gold trees: the inputs to
# our parser have traces removed and may have predicted tags substituted,
# and we may wish to compare against the raw gold trees to make sure we
# haven't made a mistake. As far as we can tell all of these variations give
# equivalent results.
ref_gold_path = args.test_path
if args.test_path_raw is not None:
print("Comparing with raw trees from", args.test_path_raw)
ref_gold_path = args.test_path_raw
test_fscore = evaluate.evalb(
args.evalb_dir, test_treebank.trees, test_predicted, ref_gold_path=ref_gold_path
)
print(
"test-fscore {} "
"test-elapsed {}".format(
test_fscore,
format_elapsed(start_time),
)
)
def main():
os.environ["TOKENIZERS_PARALLELISM"] = "true"
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
hparams = make_hparams()
subparser = subparsers.add_parser("train")
subparser.set_defaults(callback=lambda args: run_train(args, hparams))
hparams.populate_arguments(subparser)
subparser.add_argument("--numpy-seed", type=int)
subparser.add_argument("--model-path-base", required=True)
subparser.add_argument("--evalb-dir", default="EVALB/")
subparser.add_argument(
"--train-path", default="data/wsj/train_02-21.LDC99T42")
subparser.add_argument("--train-path-text", type=str)
subparser.add_argument("--dev-path", default="data/wsj/dev_22.LDC99T42")
subparser.add_argument("--dev-path-text", type=str)
subparser.add_argument("--text-processing", default="default")
subparser.add_argument("--subbatch-max-tokens", type=int, default=2000)
subparser.add_argument("--parallelize", action="store_true")
subparser.add_argument("--print-vocabs", action="store_true")
subparser = subparsers.add_parser("test")
subparser.set_defaults(callback=run_test)
subparser.add_argument("--model-path", nargs="+", required=True)
subparser.add_argument("--evalb-dir", default="EVALB/")
subparser.add_argument("--test-path", default="data/wsj/test_23.LDC99T42")
subparser.add_argument("--test-path-text", type=str)
subparser.add_argument("--test-path-raw", type=str)
subparser.add_argument("--text-processing", default="default")
subparser.add_argument("--subbatch-max-tokens", type=int, default=500)
subparser.add_argument("--parallelize", action="store_true")
subparser.add_argument("--output-path", default="")
subparser.add_argument("--no-predict-tags", action="store_true")
args = parser.parse_args()
args.callback(args)
if __name__ == "__main__":
main()
|
[
"evaluate.evalb"
] |
[((908, 2400), 'benepar.nkutil.HParams', 'nkutil.HParams', ([], {'back_cycle': '(False)', 'back_layers': '(4)', 'back_loss_constant': '(1.0)', 'back_use_gold_trees': '(True)', 'back_loss_type': '"""kl"""', 'use_vq': '(False)', 'vq_decay': '(0.97)', 'vq_commitment': '(0.1)', 'vq_coreset_size_multiplier': '(10)', 'vq_wait_steps': '(1245)', 'vq_observe_steps': '(1245)', 'vq_interpolate_steps': '(1245)', 'discrete_cats': '(0)', 'tau': '(3.0)', 'anneal_rate': '(2e-05)', 'en_tau': '(3.0)', 'en_anneal_rate': '(2e-05)', 'tau_min': '(0.05)', 'pretrained_divide': '(1.0)', 'encoder_gum': '(False)', 'tags_per_word': '(1)', 'tag_combine_start': 'np.inf', 'tag_combine_interval': '(300)', 'tag_combine_mask_thres': '(0.05)', 'tag_split_thres': '(1.001)', 'two_label_subspan': '(False)', 'two_label': '(False)', 'max_len_train': '(0)', 'max_len_dev': '(0)', 'batch_size': '(32)', 'novel_learning_rate': '(0.0)', 'learning_rate': '(5e-05)', 'pretrained_lr': '(5e-05)', 'learning_rate_warmup_steps': '(160)', 'clip_grad_norm': '(0.0)', 'checks_per_epoch': '(4)', 'step_decay_factor': '(0.5)', 'step_decay_patience': '(5)', 'max_consecutive_decays': '(3)', 'use_ft_clustered_lexicon': '""""""', 'use_w2v_clustered_lexicon': '""""""', 'use_chars_lstm': '(False)', 'd_char_emb': '(64)', 'char_lstm_input_dropout': '(0.2)', 'use_pretrained': '(False)', 'pretrained_model': '"""bert-base-uncased"""', 'bpe_dropout': '(0.2)', 'use_forced_lm': '(False)', 'tag_dist': '""""""', 'uni': '(False)', 'all_layers_uni': '(False)', 'first_heads': '(-1)', 'use_encoder': '(False)', 'd_model': '(1024)', 'num_layers': '(8)', 'num_heads': '(8)', 'd_kv': '(64)', 'd_ff': '(2048)', 'encoder_max_len': '(512)', 'morpho_emb_dropout': '(0.2)', 'attention_dropout': '(0.2)', 'relu_dropout': '(0.1)', 'residual_dropout': '(0.2)', 'force_root_constituent': '"""false"""', 'predict_tags': '(False)', 'd_label_hidden': '(256)', 'd_tag_hidden': '(256)', 'tag_loss_scale': '(5.0)'}), "(back_cycle=False, back_layers=4, back_loss_constant=1.0,\n back_use_gold_trees=True, back_loss_type='kl', use_vq=False, vq_decay=\n 0.97, vq_commitment=0.1, vq_coreset_size_multiplier=10, vq_wait_steps=\n 1245, vq_observe_steps=1245, vq_interpolate_steps=1245, discrete_cats=0,\n tau=3.0, anneal_rate=2e-05, en_tau=3.0, en_anneal_rate=2e-05, tau_min=\n 0.05, pretrained_divide=1.0, encoder_gum=False, tags_per_word=1,\n tag_combine_start=np.inf, tag_combine_interval=300,\n tag_combine_mask_thres=0.05, tag_split_thres=1.001, two_label_subspan=\n False, two_label=False, max_len_train=0, max_len_dev=0, batch_size=32,\n novel_learning_rate=0.0, learning_rate=5e-05, pretrained_lr=5e-05,\n learning_rate_warmup_steps=160, clip_grad_norm=0.0, checks_per_epoch=4,\n step_decay_factor=0.5, step_decay_patience=5, max_consecutive_decays=3,\n use_ft_clustered_lexicon='', use_w2v_clustered_lexicon='',\n use_chars_lstm=False, d_char_emb=64, char_lstm_input_dropout=0.2,\n use_pretrained=False, pretrained_model='bert-base-uncased', bpe_dropout\n =0.2, use_forced_lm=False, tag_dist='', uni=False, all_layers_uni=False,\n first_heads=-1, use_encoder=False, d_model=1024, num_layers=8,\n num_heads=8, d_kv=64, d_ff=2048, encoder_max_len=512,\n morpho_emb_dropout=0.2, attention_dropout=0.2, relu_dropout=0.1,\n residual_dropout=0.2, force_root_constituent='false', predict_tags=\n False, d_label_hidden=256, d_tag_hidden=256, tag_loss_scale=5.0)\n", (922, 2400), False, 'from benepar import nkutil\n'), ((3919, 3948), 'numpy.random.randint', 'np.random.randint', (['(2147483648)'], {}), '(2147483648)\n', (3936, 3948), True, 'import numpy as np\n'), ((4008, 4042), 'torch.manual_seed', 'torch.manual_seed', (['seed_from_numpy'], {}), '(seed_from_numpy)\n', (4025, 4042), False, 'import torch\n'), ((4289, 4375), 'treebanks.load_trees', 'treebanks.load_trees', (['args.train_path', 'args.train_path_text', 'args.text_processing'], {}), '(args.train_path, args.train_path_text, args.\n text_processing)\n', (4309, 4375), False, 'import treebanks\n'), ((4663, 4740), 'treebanks.load_trees', 'treebanks.load_trees', (['args.dev_path', 'args.dev_path_text', 'args.text_processing'], {}), '(args.dev_path, args.dev_path_text, args.text_processing)\n', (4683, 4740), False, 'import treebanks\n'), ((5507, 5554), 'benepar.decode_chart.ChartDecoder.build_vocab', 'decode_chart.ChartDecoder.build_vocab', (['op_trees'], {}), '(op_trees)\n', (5544, 5554), False, 'from benepar import decode_chart\n'), ((6916, 7029), 'benepar.parse_chart.ChartParser', 'parse_chart.ChartParser', ([], {'tag_vocab': 'tag_vocab', 'label_vocab': 'label_vocab', 'char_vocab': 'char_vocab', 'hparams': 'hparams'}), '(tag_vocab=tag_vocab, label_vocab=label_vocab,\n char_vocab=char_vocab, hparams=hparams)\n', (6939, 7029), False, 'from benepar import parse_chart\n'), ((9066, 9295), 'learning_rates.WarmupThenReduceLROnPlateau', 'learning_rates.WarmupThenReduceLROnPlateau', (['optimizer', 'hparams.learning_rate_warmup_steps'], {'mode': '"""max"""', 'factor': 'hparams.step_decay_factor', 'patience': '(hparams.step_decay_patience * hparams.checks_per_epoch)', 'verbose': '(True)'}), "(optimizer, hparams.\n learning_rate_warmup_steps, mode='max', factor=hparams.\n step_decay_factor, patience=hparams.step_decay_patience * hparams.\n checks_per_epoch, verbose=True)\n", (9108, 9295), False, 'import learning_rates\n'), ((9754, 9765), 'time.time', 'time.time', ([], {}), '()\n', (9763, 9765), False, 'import time\n'), ((13040, 13064), 'itertools.count', 'itertools.count', ([], {'start': '(1)'}), '(start=1)\n', (13055, 13064), False, 'import itertools\n'), ((17446, 17525), 'treebanks.load_trees', 'treebanks.load_trees', (['args.test_path', 'args.test_path_text', 'args.text_processing'], {}), '(args.test_path, args.test_path_text, args.text_processing)\n', (17466, 17525), False, 'import treebanks\n'), ((17899, 17947), 'benepar.parse_chart.ChartParser.from_trained', 'parse_chart.ChartParser.from_trained', (['model_path'], {}), '(model_path)\n', (17935, 17947), False, 'from benepar import parse_chart\n'), ((18260, 18271), 'time.time', 'time.time', ([], {}), '()\n', (18269, 18271), False, 'import time\n'), ((19508, 19608), 'evaluate.evalb', 'evaluate.evalb', (['args.evalb_dir', 'test_treebank.trees', 'test_predicted'], {'ref_gold_path': 'ref_gold_path'}), '(args.evalb_dir, test_treebank.trees, test_predicted,\n ref_gold_path=ref_gold_path)\n', (19522, 19608), False, 'import evaluate\n'), ((19849, 19874), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (19872, 19874), False, 'import argparse\n'), ((3469, 3500), 'numpy.random.seed', 'np.random.seed', (['args.numpy_seed'], {}), '(args.numpy_seed)\n', (3483, 3500), True, 'import numpy as np\n'), ((5607, 5673), 'benepar.char_lstm.RetokenizerForCharLSTM.build_vocab', 'char_lstm.RetokenizerForCharLSTM.build_vocab', (['train_treebank.sents'], {}), '(train_treebank.sents)\n', (5651, 5673), False, 'from benepar import char_lstm\n'), ((7128, 7153), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7151, 7153), False, 'import torch\n'), ((7950, 8048), 'torch.optim.Adam', 'torch.optim.Adam', (['trainable_parameters'], {'lr': 'hparams.learning_rate', 'betas': '(0.9, 0.98)', 'eps': '(1e-09)'}), '(trainable_parameters, lr=hparams.learning_rate, betas=(0.9,\n 0.98), eps=1e-09)\n', (7966, 8048), False, 'import torch\n'), ((8860, 8966), 'torch.optim.Adam', 'torch.optim.Adam', (['grouped_trainable_parameters'], {'lr': 'hparams.learning_rate', 'betas': '(0.9, 0.98)', 'eps': '(1e-09)'}), '(grouped_trainable_parameters, lr=hparams.learning_rate,\n betas=(0.9, 0.98), eps=1e-09)\n', (8876, 8966), False, 'import torch\n'), ((9962, 9973), 'time.time', 'time.time', ([], {}), '()\n', (9971, 9973), False, 'import time\n'), ((10794, 10859), 'evaluate.evalb', 'evaluate.evalb', (['args.evalb_dir', 'dev_treebank.trees', 'dev_predicted'], {}), '(args.evalb_dir, dev_treebank.trees, dev_predicted)\n', (10808, 10859), False, 'import evaluate\n'), ((13093, 13104), 'time.time', 'time.time', ([], {}), '()\n', (13102, 13104), False, 'import time\n'), ((18154, 18179), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (18177, 18179), False, 'import torch\n'), ((548, 559), 'time.time', 'time.time', ([], {}), '()\n', (557, 559), False, 'import time\n'), ((6127, 6174), 'tree_transforms.collapse_unlabel_binarize', 'collapse_unlabel_binarize', (['parsing_example.tree'], {}), '(parsing_example.tree)\n', (6152, 6174), False, 'from tree_transforms import collapse_unlabel_binarize, random_parsing_subspan\n'), ((10387, 10421), 'torch.zeros', 'torch.zeros', (['hparams.discrete_cats'], {}), '(hparams.discrete_cats)\n', (10398, 10421), False, 'import torch\n'), ((12738, 12844), 'functools.partial', 'functools.partial', (['parser.encode_and_collate_subbatches'], {'subbatch_max_tokens': 'args.subbatch_max_tokens'}), '(parser.encode_and_collate_subbatches, subbatch_max_tokens\n =args.subbatch_max_tokens)\n', (12755, 12844), False, 'import functools\n'), ((14909, 14982), 'torch.nn.utils.clip_grad_norm_', 'torch.nn.utils.clip_grad_norm_', (['clippable_parameters', 'grad_clip_threshold'], {}), '(clippable_parameters, grad_clip_threshold)\n', (14939, 14982), False, 'import torch\n'), ((6662, 6738), 'benepar.decode_chart.ChartDecoder.infer_force_root_constituent', 'decode_chart.ChartDecoder.infer_force_root_constituent', (['train_treebank.trees'], {}), '(train_treebank.trees)\n', (6716, 6738), False, 'from benepar import decode_chart\n'), ((11505, 11525), 'numpy.vstack', 'np.vstack', (['all_dists'], {}), '(all_dists)\n', (11514, 11525), True, 'import numpy as np\n'), ((11777, 11797), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (11791, 11797), False, 'import os\n'), ((16364, 16386), 'numpy.sum', 'np.sum', (['(ptdist > 1e-20)'], {}), '(ptdist > 1e-20)\n', (16370, 16386), True, 'import numpy as np\n'), ((10633, 10650), 'torch.tensor', 'torch.tensor', (['cat'], {}), '(cat)\n', (10645, 10650), False, 'import torch\n'), ((11904, 11919), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (11913, 11919), False, 'import os\n'), ((16243, 16257), 'numpy.sum', 'np.sum', (['ptdist'], {}), '(ptdist)\n', (16249, 16257), True, 'import numpy as np\n'), ((16305, 16319), 'numpy.sum', 'np.sum', (['ptdist'], {}), '(ptdist)\n', (16311, 16319), True, 'import numpy as np\n'), ((16665, 16705), 'numpy.exp', 'np.exp', (['(-hparams.anneal_rate * iteration)'], {}), '(-hparams.anneal_rate * iteration)\n', (16671, 16705), True, 'import numpy as np\n'), ((16806, 16849), 'numpy.exp', 'np.exp', (['(-hparams.en_anneal_rate * iteration)'], {}), '(-hparams.en_anneal_rate * iteration)\n', (16812, 16849), True, 'import numpy as np\n')]
|
from methods import cusum
from methods import vae
import data_utils
import evaluate
import numpy as np
import pickle
from sklearn.decomposition import PCA
from sklearn.svm import LinearSVC
import matplotlib.pyplot as plt
def _test_transferability(model_id, test_data_ids):
# TODO Get rid of the copy-paste from main()
# Collect to compute evaluation metrics from all datasets.
cusum_tp, cusum_fp, cusum_tn, cusum_fn = 0, 0, 0, 0
cusum_pc_1_tp, cusum_pc_1_fp, cusum_pc_1_tn, cusum_pc_1_fn = 0, 0, 0, 0
vae_tp, vae_fp, vae_tn, vae_fn = 0, 0, 0, 0
vae_rnn_tp, vae_rnn_fp, vae_rnn_tn, vae_rnn_fn = 0, 0, 0, 0
svc_tp, svc_fp, svc_tn, svc_fn = 0, 0, 0, 0
diff = 50 # For lowering frequency.
print('Model ID:', model_id)
print('Test datasets IDs:', test_data_ids)
for dataset_id in test_data_ids:
print()
print('=====')
print()
print('Test dataset ID:', dataset_id)
data, labels = data_utils.get_data(dataset_id, diff=diff)
# Crop data to match VAE (it needs the data length to be divisible by batch size).
data = data[:-(len(data) % vae.BATCH_SIZE), :]
labels = labels[:-(len(labels) % vae.BATCH_SIZE)]
assert data.shape[0] == labels.shape[0]
data_sum = np.sum(data, axis=1)
# Print number of event and no event points.
event_count = labels.sum()
print('Total entries:', labels.shape[0])
print('Event:', event_count)
print('No event:', labels.shape[0] - event_count)
print()
print('CUSUM')
mu, sigma = 0, 15 # Always set empirically, based on dataset 1.
print('* CUSUM mu and sigma:', mu, sigma)
print('* Data sum mu and sigma:', np.mean(data_sum), np.std(data_sum))
cusum_labels = cusum.cusum(data_sum, mu, sigma)
print()
print('CUSUM on the 1st principal component')
mu_pc_1, sigma_pc_1 = 0, 4 # Set empirically, based on dataset 1.
pca = PCA(n_components=2)
data_pca = pca.fit_transform(data)
data_pc_1 = data_pca[:, 0]
print('* CUSUM 1st PC mu and sigma:', mu_pc_1, sigma_pc_1)
print('* Data 1st PC mu and sigma:', np.mean(data_pc_1),
np.std(data_pc_1))
cusum_pc_1_labels = cusum.cusum(data_pc_1, mu_pc_1, sigma_pc_1)
print()
# Standardise data for easier training of DNNs.
data_mu = np.mean(data, axis=0)
data_sigma = np.std(data, axis=0)
vae_data = (data - data_mu) / data_sigma
print('SVM')
svc = LinearSVC(random_state=0, tol=1e-5, class_weight='balanced')
svc.fit(vae_data, labels)
svc_labels = svc.predict(vae_data)
print()
print('Variational Auto-Encoder (Dense)')
variational = vae.VAEClassifier(vae.DenseVAE, input_dim=data_utils.IN_DIM, suffix='bridge{}_diff={}'.format(model_id, diff),
recproba_threshold=-130)
variational.fit(vae_data, shuffle=True)
vae_labels = variational.predict(vae_data)
print()
print('Variational Auto-Encoder (RNN)')
variational_rnn = vae.VAEClassifier(vae.RNNVAE, input_dim=data_utils.IN_DIM, suffix='bridge{}_diff={}'.format(model_id, diff),
recproba_threshold=-200)
variational_rnn.fit(vae_data, shuffle=False)
vae_rnn_labels = variational_rnn.predict(vae_data)
print()
print('Evaluate VAE Dense ({}):'.format(dataset_id))
tp, fp, tn, fn = evaluate.evaluate(vae_labels, labels)
vae_tp += tp
vae_fp += fp
vae_tn += tn
vae_fn += fn
print()
print('Evaluate VAE RNN ({}):'.format(dataset_id))
tp, fp, tn, fn = evaluate.evaluate(vae_rnn_labels, labels)
vae_rnn_tp += tp
vae_rnn_fp += fp
vae_rnn_tn += tn
vae_rnn_fn += fn
print()
print('Evaluate CUSUM ({}):'.format(dataset_id))
tp, fp, tn, fn = evaluate.evaluate(cusum_labels, labels)
cusum_tp += tp
cusum_fp += fp
cusum_tn += tn
cusum_fn += fn
print()
print('Evaluale CUSUM 1st PC ({}):'.format(dataset_id))
tp, fp, tn, fn = evaluate.evaluate(cusum_pc_1_labels, labels)
cusum_pc_1_tp += tp
cusum_pc_1_fp += fp
cusum_pc_1_tn += tn
cusum_pc_1_fn += fn
print()
print('Evaluate SVM ({}):'.format(dataset_id))
tp, fp, tn, fn = evaluate.evaluate(svc_labels, labels)
svc_tp += tp
svc_fp += fp
svc_tn += tn
svc_fn += fn
print()
print('=====')
print()
print('Final evaluation:')
print()
print('VAE Dense:')
prec, rec, f1 = evaluate.metrics(vae_tp, vae_fp, vae_tn, vae_fn)
print('Precision', prec)
print('Recall', rec)
print('F1', f1)
print()
print('VAE RNN:')
prec, rec, f1 = evaluate.metrics(vae_rnn_tp, vae_rnn_fp, vae_rnn_tn, vae_rnn_fn)
print('Precision', prec)
print('Recall', rec)
print('F1', f1)
print()
print('CUSUM:')
prec, rec, f1 = evaluate.metrics(cusum_tp, cusum_fp, cusum_tn, cusum_fn)
print('Precision', prec)
print('Recall', rec)
print('F1', f1)
print()
print('CUSUM 1st PC:')
prec, rec, f1 = evaluate.metrics(cusum_pc_1_tp, cusum_pc_1_fp,
cusum_pc_1_tn, cusum_pc_1_fn)
print('Precision', prec)
print('Recall', rec)
print('F1', f1)
print()
print('SVM:')
prec, rec, f1 = evaluate.metrics(svc_tp, svc_fp,
svc_tn, svc_fn)
print('Precision', prec)
print('Recall', rec)
print('F1', f1)
print()
def main_transfer():
"""
Evaluate transferability of our models.
Model 1 is tested on datasets 2 and 3.
Model 2 is tested on datasets 1 and 3.
Model 3 is tested on datasets 1 and 2.
"""
_test_transferability(model_id=1, test_data_ids=(2, 3))
_test_transferability(model_id=2, test_data_ids=(1, 3))
_test_transferability(model_id=3, test_data_ids=(1, 2))
def main():
# Collect to compute evaluation metrics from all subsets of data.
cusum_tp, cusum_fp, cusum_tn, cusum_fn = 0, 0, 0, 0
cusum_pc_1_tp, cusum_pc_1_fp, cusum_pc_1_tn, cusum_pc_1_fn = 0, 0, 0, 0
vae_tp, vae_fp, vae_tn, vae_fn = 0, 0, 0, 0
vae_rnn_tp, vae_rnn_fp, vae_rnn_tn, vae_rnn_fn = 0, 0, 0, 0
svc_tp, svc_fp, svc_tn, svc_fn = 0, 0, 0, 0
diff = 50 # For lowering frequency.
for dataset_id in [1, 2, 3]:
print()
print('=====')
print()
print('Dataset ID:', dataset_id)
data, labels = data_utils.get_data(dataset_id, diff=diff)
# Crop data to match VAE (it needs the data length to be divisible by batch size).
data = data[:-(len(data) % vae.BATCH_SIZE), :]
labels = labels[:-(len(labels) % vae.BATCH_SIZE)]
assert data.shape[0] == labels.shape[0]
data_sum = np.sum(data, axis=1)
# Print number of event and no event points.
event_count = labels.sum()
print('Total entries:', labels.shape[0])
print('Event:', event_count)
print('No event:', labels.shape[0] - event_count)
print()
print('CUSUM')
mu, sigma = 0, 15 # Set empirically, based on dataset 1.
print('* CUSUM mu and sigma:', mu, sigma)
print('* Data sum mu and sigma:', np.mean(data_sum), np.std(data_sum))
cusum_labels = cusum.cusum(data_sum, mu, sigma)
### Plot - to illustrate problems with CUSUM on the 1st PC ###
seconds = np.arange(data.shape[0]) * 1. / 250
plt.plot(seconds, data_sum, color='black', linestyle=':')
plt.ticklabel_format(useOffset=False)
plt.xlabel('Second')
plt.ylabel('Microstrain')
plt.savefig('data_sum_dataset{}'.format(dataset_id), dpi=300)
plt.gcf().clear()
###
print()
print('CUSUM on the 1st principal component')
mu_pc_1, sigma_pc_1 = 0, 4 # Set empirically, based on dataset 1.
pca = PCA(n_components=2)
data_pca = pca.fit_transform(data)
data_pc_1 = data_pca[:, 0]
### Plot - to illustrate problems with CUSUM on the 1st PC ###
plt.plot(seconds, data_pc_1, color='black', linestyle=':')
plt.ticklabel_format(useOffset=False)
plt.xlabel('Second')
plt.ylabel('Microstrain')
plt.savefig('data_pc1_dataset{}'.format(dataset_id), dpi=300)
plt.gcf().clear()
###
print('* CUSUM 1st PC mu and sigma:', mu_pc_1, sigma_pc_1)
print('* Data 1st PC mu and sigma:', np.mean(data_pc_1), np.std(data_pc_1))
cusum_pc_1_labels = cusum.cusum(data_pc_1, mu_pc_1, sigma_pc_1)
print()
# Standardise data for easier training of DNNs.
data_mu = np.mean(data, axis=0)
data_sigma = np.std(data, axis=0)
vae_data = (data - data_mu) / data_sigma
print('SVM')
svc = LinearSVC(random_state=0, tol=1e-5, class_weight='balanced')
svc.fit(vae_data, labels)
svc_labels = svc.predict(vae_data)
print()
print('Variational Auto-Encoder (Dense)')
variational = vae.VAEClassifier(vae.DenseVAE, input_dim=data_utils.IN_DIM, suffix='bridge{}_diff={}'.format(dataset_id, diff),
recproba_threshold=-130)
variational.fit(vae_data, shuffle=True, dump_latent=(dataset_id == 1), dump_latent_true_labels=labels)
vae_labels = variational.predict(vae_data)
print()
print('Variational Auto-Encoder (RNN)')
variational_rnn = vae.VAEClassifier(vae.RNNVAE, input_dim=data_utils.IN_DIM, suffix='bridge{}_diff={}'.format(dataset_id, diff),
recproba_threshold=-200)
variational_rnn.fit(vae_data, shuffle=False, dump_latent=(dataset_id == 1), dump_latent_true_labels=labels)
vae_rnn_labels = variational_rnn.predict(vae_data)
# Plot an event, zoomed.
if dataset_id == 1:
with open('figure6.pkl', 'wb') as f:
pickle.dump((data_sum, cusum_labels, cusum_pc_1_labels, vae_labels, vae_rnn_labels, labels), f)
print()
print('Evaluate VAE Dense ({}):'.format(dataset_id))
tp, fp, tn, fn = evaluate.evaluate(vae_labels, labels)
vae_tp += tp
vae_fp += fp
vae_tn += tn
vae_fn += fn
print()
print('Evaluate VAE RNN ({}):'.format(dataset_id))
tp, fp, tn, fn = evaluate.evaluate(vae_rnn_labels, labels)
vae_rnn_tp += tp
vae_rnn_fp += fp
vae_rnn_tn += tn
vae_rnn_fn += fn
print()
print('Evaluate CUSUM ({}):'.format(dataset_id))
tp, fp, tn, fn = evaluate.evaluate(cusum_labels, labels)
cusum_tp += tp
cusum_fp += fp
cusum_tn += tn
cusum_fn += fn
print()
print('Evaluale CUSUM 1st PC ({}):'.format(dataset_id))
tp, fp, tn, fn = evaluate.evaluate(cusum_pc_1_labels, labels)
cusum_pc_1_tp += tp
cusum_pc_1_fp += fp
cusum_pc_1_tn += tn
cusum_pc_1_fn += fn
print()
print('Evaluate SVM ({}):'.format(dataset_id))
tp, fp, tn, fn = evaluate.evaluate(svc_labels, labels)
svc_tp += tp
svc_fp += fp
svc_tn += tn
svc_fn += fn
print()
print('=====')
print()
print('Final evaluation:')
print()
print('VAE Dense:')
prec, rec, f1 = evaluate.metrics(vae_tp, vae_fp, vae_tn, vae_fn)
print('Precision', prec)
print('Recall', rec)
print('F1', f1)
print()
print('VAE RNN:')
prec, rec, f1 = evaluate.metrics(vae_rnn_tp, vae_rnn_fp, vae_rnn_tn, vae_rnn_fn)
print('Precision', prec)
print('Recall', rec)
print('F1', f1)
print()
print('CUSUM:')
prec, rec, f1 = evaluate.metrics(cusum_tp, cusum_fp, cusum_tn, cusum_fn)
print('Precision', prec)
print('Recall', rec)
print('F1', f1)
print()
print('CUSUM 1st PC:')
prec, rec, f1 = evaluate.metrics(cusum_pc_1_tp, cusum_pc_1_fp,
cusum_pc_1_tn, cusum_pc_1_fn)
print('Precision', prec)
print('Recall', rec)
print('F1', f1)
print()
print('SVM:')
prec, rec, f1 = evaluate.metrics(svc_tp, svc_fp,
svc_tn, svc_fn)
print('Precision', prec)
print('Recall', rec)
print('F1', f1)
print()
if __name__ == '__main__':
main()
main_transfer()
|
[
"evaluate.metrics",
"evaluate.evaluate"
] |
[((4763, 4811), 'evaluate.metrics', 'evaluate.metrics', (['vae_tp', 'vae_fp', 'vae_tn', 'vae_fn'], {}), '(vae_tp, vae_fp, vae_tn, vae_fn)\n', (4779, 4811), False, 'import evaluate\n'), ((4940, 5004), 'evaluate.metrics', 'evaluate.metrics', (['vae_rnn_tp', 'vae_rnn_fp', 'vae_rnn_tn', 'vae_rnn_fn'], {}), '(vae_rnn_tp, vae_rnn_fp, vae_rnn_tn, vae_rnn_fn)\n', (4956, 5004), False, 'import evaluate\n'), ((5131, 5187), 'evaluate.metrics', 'evaluate.metrics', (['cusum_tp', 'cusum_fp', 'cusum_tn', 'cusum_fn'], {}), '(cusum_tp, cusum_fp, cusum_tn, cusum_fn)\n', (5147, 5187), False, 'import evaluate\n'), ((5321, 5397), 'evaluate.metrics', 'evaluate.metrics', (['cusum_pc_1_tp', 'cusum_pc_1_fp', 'cusum_pc_1_tn', 'cusum_pc_1_fn'], {}), '(cusum_pc_1_tp, cusum_pc_1_fp, cusum_pc_1_tn, cusum_pc_1_fn)\n', (5337, 5397), False, 'import evaluate\n'), ((5559, 5607), 'evaluate.metrics', 'evaluate.metrics', (['svc_tp', 'svc_fp', 'svc_tn', 'svc_fn'], {}), '(svc_tp, svc_fp, svc_tn, svc_fn)\n', (5575, 5607), False, 'import evaluate\n'), ((11584, 11632), 'evaluate.metrics', 'evaluate.metrics', (['vae_tp', 'vae_fp', 'vae_tn', 'vae_fn'], {}), '(vae_tp, vae_fp, vae_tn, vae_fn)\n', (11600, 11632), False, 'import evaluate\n'), ((11761, 11825), 'evaluate.metrics', 'evaluate.metrics', (['vae_rnn_tp', 'vae_rnn_fp', 'vae_rnn_tn', 'vae_rnn_fn'], {}), '(vae_rnn_tp, vae_rnn_fp, vae_rnn_tn, vae_rnn_fn)\n', (11777, 11825), False, 'import evaluate\n'), ((11952, 12008), 'evaluate.metrics', 'evaluate.metrics', (['cusum_tp', 'cusum_fp', 'cusum_tn', 'cusum_fn'], {}), '(cusum_tp, cusum_fp, cusum_tn, cusum_fn)\n', (11968, 12008), False, 'import evaluate\n'), ((12142, 12218), 'evaluate.metrics', 'evaluate.metrics', (['cusum_pc_1_tp', 'cusum_pc_1_fp', 'cusum_pc_1_tn', 'cusum_pc_1_fn'], {}), '(cusum_pc_1_tp, cusum_pc_1_fp, cusum_pc_1_tn, cusum_pc_1_fn)\n', (12158, 12218), False, 'import evaluate\n'), ((12380, 12428), 'evaluate.metrics', 'evaluate.metrics', (['svc_tp', 'svc_fp', 'svc_tn', 'svc_fn'], {}), '(svc_tp, svc_fp, svc_tn, svc_fn)\n', (12396, 12428), False, 'import evaluate\n'), ((969, 1011), 'data_utils.get_data', 'data_utils.get_data', (['dataset_id'], {'diff': 'diff'}), '(dataset_id, diff=diff)\n', (988, 1011), False, 'import data_utils\n'), ((1285, 1305), 'numpy.sum', 'np.sum', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (1291, 1305), True, 'import numpy as np\n'), ((1804, 1836), 'methods.cusum.cusum', 'cusum.cusum', (['data_sum', 'mu', 'sigma'], {}), '(data_sum, mu, sigma)\n', (1815, 1836), False, 'from methods import cusum\n'), ((1997, 2016), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (2000, 2016), False, 'from sklearn.decomposition import PCA\n'), ((2288, 2331), 'methods.cusum.cusum', 'cusum.cusum', (['data_pc_1', 'mu_pc_1', 'sigma_pc_1'], {}), '(data_pc_1, mu_pc_1, sigma_pc_1)\n', (2299, 2331), False, 'from methods import cusum\n'), ((2423, 2444), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (2430, 2444), True, 'import numpy as np\n'), ((2466, 2486), 'numpy.std', 'np.std', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (2472, 2486), True, 'import numpy as np\n'), ((2572, 2633), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)', 'tol': '(1e-05)', 'class_weight': '"""balanced"""'}), "(random_state=0, tol=1e-05, class_weight='balanced')\n", (2581, 2633), False, 'from sklearn.svm import LinearSVC\n'), ((3558, 3595), 'evaluate.evaluate', 'evaluate.evaluate', (['vae_labels', 'labels'], {}), '(vae_labels, labels)\n', (3575, 3595), False, 'import evaluate\n'), ((3780, 3821), 'evaluate.evaluate', 'evaluate.evaluate', (['vae_rnn_labels', 'labels'], {}), '(vae_rnn_labels, labels)\n', (3797, 3821), False, 'import evaluate\n'), ((4020, 4059), 'evaluate.evaluate', 'evaluate.evaluate', (['cusum_labels', 'labels'], {}), '(cusum_labels, labels)\n', (4037, 4059), False, 'import evaluate\n'), ((4257, 4301), 'evaluate.evaluate', 'evaluate.evaluate', (['cusum_pc_1_labels', 'labels'], {}), '(cusum_pc_1_labels, labels)\n', (4274, 4301), False, 'import evaluate\n'), ((4510, 4547), 'evaluate.evaluate', 'evaluate.evaluate', (['svc_labels', 'labels'], {}), '(svc_labels, labels)\n', (4527, 4547), False, 'import evaluate\n'), ((6696, 6738), 'data_utils.get_data', 'data_utils.get_data', (['dataset_id'], {'diff': 'diff'}), '(dataset_id, diff=diff)\n', (6715, 6738), False, 'import data_utils\n'), ((7011, 7031), 'numpy.sum', 'np.sum', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (7017, 7031), True, 'import numpy as np\n'), ((7523, 7555), 'methods.cusum.cusum', 'cusum.cusum', (['data_sum', 'mu', 'sigma'], {}), '(data_sum, mu, sigma)\n', (7534, 7555), False, 'from methods import cusum\n'), ((7689, 7746), 'matplotlib.pyplot.plot', 'plt.plot', (['seconds', 'data_sum'], {'color': '"""black"""', 'linestyle': '""":"""'}), "(seconds, data_sum, color='black', linestyle=':')\n", (7697, 7746), True, 'import matplotlib.pyplot as plt\n'), ((7755, 7792), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'useOffset': '(False)'}), '(useOffset=False)\n', (7775, 7792), True, 'import matplotlib.pyplot as plt\n'), ((7801, 7821), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Second"""'], {}), "('Second')\n", (7811, 7821), True, 'import matplotlib.pyplot as plt\n'), ((7830, 7855), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Microstrain"""'], {}), "('Microstrain')\n", (7840, 7855), True, 'import matplotlib.pyplot as plt\n'), ((8124, 8143), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (8127, 8143), False, 'from sklearn.decomposition import PCA\n'), ((8301, 8359), 'matplotlib.pyplot.plot', 'plt.plot', (['seconds', 'data_pc_1'], {'color': '"""black"""', 'linestyle': '""":"""'}), "(seconds, data_pc_1, color='black', linestyle=':')\n", (8309, 8359), True, 'import matplotlib.pyplot as plt\n'), ((8368, 8405), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'useOffset': '(False)'}), '(useOffset=False)\n', (8388, 8405), True, 'import matplotlib.pyplot as plt\n'), ((8414, 8434), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Second"""'], {}), "('Second')\n", (8424, 8434), True, 'import matplotlib.pyplot as plt\n'), ((8443, 8468), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Microstrain"""'], {}), "('Microstrain')\n", (8453, 8468), True, 'import matplotlib.pyplot as plt\n'), ((8756, 8799), 'methods.cusum.cusum', 'cusum.cusum', (['data_pc_1', 'mu_pc_1', 'sigma_pc_1'], {}), '(data_pc_1, mu_pc_1, sigma_pc_1)\n', (8767, 8799), False, 'from methods import cusum\n'), ((8891, 8912), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (8898, 8912), True, 'import numpy as np\n'), ((8934, 8954), 'numpy.std', 'np.std', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (8940, 8954), True, 'import numpy as np\n'), ((9040, 9101), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)', 'tol': '(1e-05)', 'class_weight': '"""balanced"""'}), "(random_state=0, tol=1e-05, class_weight='balanced')\n", (9049, 9101), False, 'from sklearn.svm import LinearSVC\n'), ((10379, 10416), 'evaluate.evaluate', 'evaluate.evaluate', (['vae_labels', 'labels'], {}), '(vae_labels, labels)\n', (10396, 10416), False, 'import evaluate\n'), ((10601, 10642), 'evaluate.evaluate', 'evaluate.evaluate', (['vae_rnn_labels', 'labels'], {}), '(vae_rnn_labels, labels)\n', (10618, 10642), False, 'import evaluate\n'), ((10841, 10880), 'evaluate.evaluate', 'evaluate.evaluate', (['cusum_labels', 'labels'], {}), '(cusum_labels, labels)\n', (10858, 10880), False, 'import evaluate\n'), ((11078, 11122), 'evaluate.evaluate', 'evaluate.evaluate', (['cusum_pc_1_labels', 'labels'], {}), '(cusum_pc_1_labels, labels)\n', (11095, 11122), False, 'import evaluate\n'), ((11331, 11368), 'evaluate.evaluate', 'evaluate.evaluate', (['svc_labels', 'labels'], {}), '(svc_labels, labels)\n', (11348, 11368), False, 'import evaluate\n'), ((1744, 1761), 'numpy.mean', 'np.mean', (['data_sum'], {}), '(data_sum)\n', (1751, 1761), True, 'import numpy as np\n'), ((1763, 1779), 'numpy.std', 'np.std', (['data_sum'], {}), '(data_sum)\n', (1769, 1779), True, 'import numpy as np\n'), ((2207, 2225), 'numpy.mean', 'np.mean', (['data_pc_1'], {}), '(data_pc_1)\n', (2214, 2225), True, 'import numpy as np\n'), ((2241, 2258), 'numpy.std', 'np.std', (['data_pc_1'], {}), '(data_pc_1)\n', (2247, 2258), True, 'import numpy as np\n'), ((7463, 7480), 'numpy.mean', 'np.mean', (['data_sum'], {}), '(data_sum)\n', (7470, 7480), True, 'import numpy as np\n'), ((7482, 7498), 'numpy.std', 'np.std', (['data_sum'], {}), '(data_sum)\n', (7488, 7498), True, 'import numpy as np\n'), ((8689, 8707), 'numpy.mean', 'np.mean', (['data_pc_1'], {}), '(data_pc_1)\n', (8696, 8707), True, 'import numpy as np\n'), ((8709, 8726), 'numpy.std', 'np.std', (['data_pc_1'], {}), '(data_pc_1)\n', (8715, 8726), True, 'import numpy as np\n'), ((7645, 7669), 'numpy.arange', 'np.arange', (['data.shape[0]'], {}), '(data.shape[0])\n', (7654, 7669), True, 'import numpy as np\n'), ((7934, 7943), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (7941, 7943), True, 'import matplotlib.pyplot as plt\n'), ((8547, 8556), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (8554, 8556), True, 'import matplotlib.pyplot as plt\n'), ((10180, 10279), 'pickle.dump', 'pickle.dump', (['(data_sum, cusum_labels, cusum_pc_1_labels, vae_labels, vae_rnn_labels, labels)', 'f'], {}), '((data_sum, cusum_labels, cusum_pc_1_labels, vae_labels,\n vae_rnn_labels, labels), f)\n', (10191, 10279), False, 'import pickle\n')]
|
"""Train and evaluate the model"""
import os
import torch
import utils
import random
import logging
import argparse
import torch.nn as nn
from tqdm import trange
from evaluate import evaluate
from data_loader import DataLoader
from SequenceTagger import BertForSequenceTagging
from transformers.optimization import get_linear_schedule_with_warmup, AdamW
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='conll', help="Directory containing the dataset")
parser.add_argument('--seed', type=int, default=2020, help="random seed for initialization")
parser.add_argument('--restore_dir', default=None,
help="Optional, name of the directory containing weights to reload before training, e.g., 'experiments/conll/'")
def train_epoch(model, data_iterator, optimizer, scheduler, params):
"""Train the model on `steps` batches"""
# set model to training mode
model.train()
# a running average object for loss
loss_avg = utils.RunningAverage()
# Use tqdm for progress bar
one_epoch = trange(params.train_steps)
for batch in one_epoch:
# fetch the next training batch
batch_data, batch_token_starts, batch_tags = next(data_iterator)
batch_masks = batch_data.gt(0) # get padding mask
# compute model output and loss
loss = model((batch_data, batch_token_starts), token_type_ids=None, attention_mask=batch_masks, labels=batch_tags)[0]
# clear previous gradients, compute gradients of all variables wrt loss
model.zero_grad()
loss.backward()
# gradient clipping
nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=params.clip_grad)
# performs updates using calculated gradients
optimizer.step()
scheduler.step()
# update the average loss
loss_avg.update(loss.item())
one_epoch.set_postfix(loss='{:05.3f}'.format(loss_avg()))
def train_and_evaluate(model, train_data, val_data, optimizer, scheduler, params, model_dir, restore_dir=None):
"""Train the model and evaluate every epoch."""
# reload weights from restore_dir if specified
if restore_dir is not None:
model = BertForSequenceTagging.from_pretrained(tagger_model_dir)
best_val_f1 = 0.0
patience_counter = 0
for epoch in range(1, params.epoch_num + 1):
# Run one epoch
logging.info("Epoch {}/{}".format(epoch, params.epoch_num))
# Compute number of batches in one epoch
params.train_steps = params.train_size // params.batch_size
params.val_steps = params.val_size // params.batch_size
# data iterator for training
train_data_iterator = data_loader.data_iterator(train_data, shuffle=True)
# Train for one epoch on training set
train_epoch(model, train_data_iterator, optimizer, scheduler, params)
# data iterator for evaluation
# train_data_iterator = data_loader.data_iterator(train_data, shuffle=False)
val_data_iterator = data_loader.data_iterator(val_data, shuffle=False)
# Evaluate for one epoch on training set and validation set
# params.eval_steps = params.train_steps
# train_metrics = evaluate(model, train_data_iterator, params, mark='Train') # callback train f1
params.eval_steps = params.val_steps
val_metrics = evaluate(model, val_data_iterator, params, mark='Val')
val_f1 = val_metrics['f1']
improve_f1 = val_f1 - best_val_f1
if improve_f1 > 1e-5:
logging.info("- Found new best F1")
best_val_f1 = val_f1
model.save_pretrained(model_dir)
if improve_f1 < params.patience:
patience_counter += 1
else:
patience_counter = 0
else:
patience_counter += 1
# Early stopping and logging best f1
if (patience_counter >= params.patience_num and epoch > params.min_epoch_num) or epoch == params.epoch_num:
logging.info("Best val f1: {:05.2f}".format(best_val_f1))
break
if __name__ == '__main__':
args = parser.parse_args()
tagger_model_dir = 'experiments/' + args.dataset
# Load the parameters from json file
json_path = os.path.join(tagger_model_dir, 'params.json')
assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
params = utils.Params(json_path)
# Use GPUs if available
params.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Set the random seed for reproducible experiments
random.seed(args.seed)
torch.manual_seed(args.seed)
params.seed = args.seed
# Set the logger
utils.set_logger(os.path.join(tagger_model_dir, 'train.log'))
logging.info("device: {}".format(params.device))
# Create the input data pipeline
# Initialize the DataLoader
data_dir = 'data/' + args.dataset
if args.dataset in ["conll"]:
bert_class = 'bert-base-cased' # auto
# bert_class = 'pretrained_bert_models/bert-base-cased/' # manual
elif args.dataset in ["msra"]:
bert_class = 'bert-base-chinese' # auto
# bert_class = 'pretrained_bert_models/bert-base-chinese/' # manual
data_loader = DataLoader(data_dir, bert_class, params, token_pad_idx=0, tag_pad_idx=-1)
logging.info("Loading the datasets...")
# Load training data and test data
train_data = data_loader.load_data('train')
val_data = data_loader.load_data('val')
# Specify the training and validation dataset sizes
params.train_size = train_data['size']
params.val_size = val_data['size']
logging.info("Loading BERT model...")
# Prepare model
model = BertForSequenceTagging.from_pretrained(bert_class, num_labels=len(params.tag2idx))
model.to(params.device)
# Prepare optimizer
if params.full_finetuning:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay': params.weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
else: # only finetune the head classifier
param_optimizer = list(model.classifier.named_parameters())
optimizer_grouped_parameters = [{'params': [p for n, p in param_optimizer]}]
optimizer = AdamW(optimizer_grouped_parameters, lr=params.learning_rate, correct_bias=False)
train_steps_per_epoch = params.train_size // params.batch_size
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=train_steps_per_epoch, num_training_steps=params.epoch_num * train_steps_per_epoch)
# Train and evaluate the model
logging.info("Starting training for {} epoch(s)".format(params.epoch_num))
train_and_evaluate(model, train_data, val_data, optimizer, scheduler, params, tagger_model_dir, args.restore_dir)
|
[
"evaluate.evaluate"
] |
[((410, 435), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (433, 435), False, 'import argparse\n'), ((1027, 1049), 'utils.RunningAverage', 'utils.RunningAverage', ([], {}), '()\n', (1047, 1049), False, 'import utils\n'), ((1103, 1129), 'tqdm.trange', 'trange', (['params.train_steps'], {}), '(params.train_steps)\n', (1109, 1129), False, 'from tqdm import trange\n'), ((4351, 4396), 'os.path.join', 'os.path.join', (['tagger_model_dir', '"""params.json"""'], {}), "(tagger_model_dir, 'params.json')\n", (4363, 4396), False, 'import os\n'), ((4408, 4433), 'os.path.isfile', 'os.path.isfile', (['json_path'], {}), '(json_path)\n', (4422, 4433), False, 'import os\n'), ((4507, 4530), 'utils.Params', 'utils.Params', (['json_path'], {}), '(json_path)\n', (4519, 4530), False, 'import utils\n'), ((4701, 4723), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (4712, 4723), False, 'import random\n'), ((4728, 4756), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (4745, 4756), False, 'import torch\n'), ((5381, 5454), 'data_loader.DataLoader', 'DataLoader', (['data_dir', 'bert_class', 'params'], {'token_pad_idx': '(0)', 'tag_pad_idx': '(-1)'}), '(data_dir, bert_class, params, token_pad_idx=0, tag_pad_idx=-1)\n', (5391, 5454), False, 'from data_loader import DataLoader\n'), ((5464, 5503), 'logging.info', 'logging.info', (['"""Loading the datasets..."""'], {}), "('Loading the datasets...')\n", (5476, 5503), False, 'import logging\n'), ((5784, 5821), 'logging.info', 'logging.info', (['"""Loading BERT model..."""'], {}), "('Loading BERT model...')\n", (5796, 5821), False, 'import logging\n'), ((6691, 6776), 'transformers.optimization.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': 'params.learning_rate', 'correct_bias': '(False)'}), '(optimizer_grouped_parameters, lr=params.learning_rate, correct_bias=False\n )\n', (6696, 6776), False, 'from transformers.optimization import get_linear_schedule_with_warmup, AdamW\n'), ((6855, 7007), 'transformers.optimization.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'train_steps_per_epoch', 'num_training_steps': '(params.epoch_num * train_steps_per_epoch)'}), '(optimizer, num_warmup_steps=\n train_steps_per_epoch, num_training_steps=params.epoch_num *\n train_steps_per_epoch)\n', (6886, 7007), False, 'from transformers.optimization import get_linear_schedule_with_warmup, AdamW\n'), ((2259, 2315), 'SequenceTagger.BertForSequenceTagging.from_pretrained', 'BertForSequenceTagging.from_pretrained', (['tagger_model_dir'], {}), '(tagger_model_dir)\n', (2297, 2315), False, 'from SequenceTagger import BertForSequenceTagging\n'), ((3435, 3489), 'evaluate.evaluate', 'evaluate', (['model', 'val_data_iterator', 'params'], {'mark': '"""Val"""'}), "(model, val_data_iterator, params, mark='Val')\n", (3443, 3489), False, 'from evaluate import evaluate\n'), ((4833, 4876), 'os.path.join', 'os.path.join', (['tagger_model_dir', '"""train.log"""'], {}), "(tagger_model_dir, 'train.log')\n", (4845, 4876), False, 'import os\n'), ((3622, 3657), 'logging.info', 'logging.info', (['"""- Found new best F1"""'], {}), "('- Found new best F1')\n", (3634, 3657), False, 'import logging\n'), ((4603, 4628), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4626, 4628), False, 'import torch\n')]
|
from unittest.mock import patch, Mock, PropertyMock
from evaluate.vcf_file import VCFFile
from evaluate.vcf import NullVCFError, VCFFactory
import pytest
import pysam
from io import StringIO
@pytest.fixture
def pysam_variant_record_mock_that_maps_to_chrom_1_and_one_sample():
pysam_variant_record_mock_that_maps_to_chrom_1_and_one_sample = Mock()
pysam_variant_record_mock_that_maps_to_chrom_1_and_one_sample.samples = ["sample_1"]
pysam_variant_record_mock_that_maps_to_chrom_1_and_one_sample.chrom = "chrom_1"
return pysam_variant_record_mock_that_maps_to_chrom_1_and_one_sample
@pytest.fixture
def pysam_variant_record_mock_that_maps_to_chrom_2_and_one_sample():
pysam_variant_record_mock_that_maps_to_chrom_2_and_one_sample = Mock()
pysam_variant_record_mock_that_maps_to_chrom_2_and_one_sample.samples = ["sample_1"]
pysam_variant_record_mock_that_maps_to_chrom_2_and_one_sample.chrom = "chrom_2"
return pysam_variant_record_mock_that_maps_to_chrom_2_and_one_sample
@pytest.fixture
def pysam_variant_record_mock_that_maps_to_chrom_1_and_two_samples():
pysam_variant_record_mock_that_maps_to_chrom_1_and_two_samples = Mock()
pysam_variant_record_mock_that_maps_to_chrom_1_and_two_samples.samples = ["sample_1", "sample_2"]
pysam_variant_record_mock_that_maps_to_chrom_1_and_two_samples.chrom = "chrom_1"
return pysam_variant_record_mock_that_maps_to_chrom_1_and_two_samples
@pytest.fixture
def vcf_record_1_mock():
vcf_record_1_mock = Mock()
return vcf_record_1_mock
@pytest.fixture
def vcf_record_2_mock():
vcf_record_2_mock = Mock()
return vcf_record_2_mock
@pytest.fixture
def vcf_record_3_mock():
vcf_record_3_mock = Mock()
return vcf_record_3_mock
sample_with_some_genes = {
"sample_1": {
"gene_1": [1, 2, 3, 4],
"gene_2": [5, 6],
},
"sample_2": {
"gene_1": [7, 8, 9],
"gene_2": [10],
}
}
def chrom_1_raises_NullVCFError_others_are_fine(pysam_variant_record, sample):
if pysam_variant_record.chrom == "chrom_1":
raise NullVCFError()
else:
return vcf_record_2_mock
class pysam_VariantRecord_Mock:
def __init__(self, list_of_records, header=""):
self.list_of_records=list_of_records
self.header=header
def __iter__(self):
return self.list_of_records.__iter__()
def __next__(self):
return self.list_of_records.__next__()
class Test_VCFFile:
def test___constructor___no_records_in_VCF_returns_nothing(self):
vcf_file = VCFFile(pysam_VariantRecord_Mock([]), VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample)
actual = vcf_file.sample_to_gene_to_VCFs
expected = {}
assert actual == expected
@patch.object(VCFFactory, VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample.__name__, return_value=vcf_record_1_mock)
def test___constructor___one_record_in_one_sample_and_one_gene(self, from_VariantRecord_and_Sample_Mock,
pysam_variant_record_mock_that_maps_to_chrom_1_and_one_sample):
vcf_file = VCFFile(pysam_VariantRecord_Mock([pysam_variant_record_mock_that_maps_to_chrom_1_and_one_sample]), VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample)
actual = vcf_file.sample_to_gene_to_VCFs
expected = {"sample_1": {"chrom_1": [vcf_record_1_mock]}}
assert actual == expected
@patch.object(VCFFactory, VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample.__name__,
return_value=vcf_record_1_mock)
def test___constructor___one_record_in_two_samples_and_one_gene(self, from_VariantRecord_and_Sample_Mock,
pysam_variant_record_mock_that_maps_to_chrom_1_and_two_samples):
vcf_file = VCFFile(pysam_VariantRecord_Mock([pysam_variant_record_mock_that_maps_to_chrom_1_and_two_samples]), VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample)
actual = vcf_file.sample_to_gene_to_VCFs
expected = {"sample_1": {"chrom_1": [vcf_record_1_mock]},
"sample_2": {"chrom_1": [vcf_record_1_mock]}}
assert actual == expected
@patch.object(VCFFactory, VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample.__name__,
side_effect=[vcf_record_1_mock, vcf_record_2_mock])
def test___constructor___two_records_in_one_sample_and_two_genes(self, from_VariantRecord_and_Sample_Mock,
pysam_variant_record_mock_that_maps_to_chrom_1_and_one_sample,
pysam_variant_record_mock_that_maps_to_chrom_2_and_one_sample):
vcf_file = VCFFile(pysam_VariantRecord_Mock([pysam_variant_record_mock_that_maps_to_chrom_1_and_one_sample,
pysam_variant_record_mock_that_maps_to_chrom_2_and_one_sample]), VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample)
actual = vcf_file.sample_to_gene_to_VCFs
expected = {"sample_1": {"chrom_1": [vcf_record_1_mock], "chrom_2": [vcf_record_2_mock]}}
assert actual == expected
@patch.object(VCFFactory, VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample.__name__,
side_effect=[vcf_record_1_mock, vcf_record_2_mock])
def test___constructor___two_records_in_one_sample_and_one_gene(self, from_VariantRecord_and_Sample_Mock,
pysam_variant_record_mock_that_maps_to_chrom_1_and_one_sample):
vcf_file = VCFFile(pysam_VariantRecord_Mock([pysam_variant_record_mock_that_maps_to_chrom_1_and_one_sample,
pysam_variant_record_mock_that_maps_to_chrom_1_and_one_sample]), VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample)
actual = vcf_file.sample_to_gene_to_VCFs
expected = {"sample_1": {"chrom_1": [vcf_record_1_mock, vcf_record_2_mock]}}
assert actual == expected
@patch.object(VCFFactory, VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample.__name__,
side_effect=chrom_1_raises_NullVCFError_others_are_fine)
def test___constructor___two_records_in_one_sample_and_two_genes___first_is_null_and_is_not_added(self, from_VariantRecord_and_Sample_Mock,
pysam_variant_record_mock_that_maps_to_chrom_1_and_one_sample,
pysam_variant_record_mock_that_maps_to_chrom_2_and_one_sample):
vcf_file = VCFFile(pysam_VariantRecord_Mock([pysam_variant_record_mock_that_maps_to_chrom_1_and_one_sample,
pysam_variant_record_mock_that_maps_to_chrom_2_and_one_sample]), VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample)
actual = vcf_file.sample_to_gene_to_VCFs
expected = {"sample_1": {"chrom_2": [vcf_record_2_mock]}}
assert actual == expected
@patch.object(VCFFactory, VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample.__name__,
side_effect=chrom_1_raises_NullVCFError_others_are_fine)
def test___constructor___two_records_in_one_sample_and_two_genes___second_is_null_and_is_not_added(self, from_VariantRecord_and_Sample_Mock,
pysam_variant_record_mock_that_maps_to_chrom_1_and_one_sample,
pysam_variant_record_mock_that_maps_to_chrom_2_and_one_sample):
vcf_file = VCFFile(pysam_VariantRecord_Mock([pysam_variant_record_mock_that_maps_to_chrom_2_and_one_sample,
pysam_variant_record_mock_that_maps_to_chrom_1_and_one_sample,]), VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample)
actual = vcf_file.sample_to_gene_to_VCFs
expected = {"sample_1": {"chrom_2": [vcf_record_2_mock]}}
assert actual == expected
def test___constructor___several_records_in_several_samples_and_several_genes(self):
pysam_variant_record_mock_that_maps_to_chrom_1_and_sample_1 = Mock()
pysam_variant_record_mock_that_maps_to_chrom_1_and_sample_1.samples = ["sample_1"]
pysam_variant_record_mock_that_maps_to_chrom_1_and_sample_1.chrom = "chrom_1"
pysam_variant_record_mock_that_maps_to_chrom_1_and_sample_1_2_3 = Mock()
pysam_variant_record_mock_that_maps_to_chrom_1_and_sample_1_2_3.samples = ["sample_1", "sample_2", "sample_3"]
pysam_variant_record_mock_that_maps_to_chrom_1_and_sample_1_2_3.chrom = "chrom_1"
pysam_variant_record_mock_that_maps_to_chrom_2_and_sample_1_2 = Mock()
pysam_variant_record_mock_that_maps_to_chrom_2_and_sample_1_2.samples = ["sample_1", "sample_2"]
pysam_variant_record_mock_that_maps_to_chrom_2_and_sample_1_2.chrom = "chrom_2"
pysam_variant_record_mock_that_maps_to_chrom_2_and_sample_2 = Mock()
pysam_variant_record_mock_that_maps_to_chrom_2_and_sample_2.samples = ["sample_2"]
pysam_variant_record_mock_that_maps_to_chrom_2_and_sample_2.chrom = "chrom_2"
another_pysam_variant_record_mock_that_maps_to_chrom_2_and_sample_2 = Mock()
another_pysam_variant_record_mock_that_maps_to_chrom_2_and_sample_2.samples = ["sample_2"]
another_pysam_variant_record_mock_that_maps_to_chrom_2_and_sample_2.chrom = "chrom_2"
vcf_record_1_mock = Mock(name="vcf_record_1_mock")
vcf_record_2_mock = Mock(name="vcf_record_2_mock")
vcf_record_3_mock = Mock(name="vcf_record_3_mock")
vcf_record_4_mock = Mock(name="vcf_record_4_mock")
vcf_record_5_mock = Mock(name="vcf_record_5_mock")
vcf_record_6_mock = Mock(name="vcf_record_6_mock")
vcf_record_7_mock = Mock(name="vcf_record_7_mock")
vcf_record_8_mock = Mock(name="vcf_record_8_mock")
with patch.object(VCFFactory, VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample.__name__,
side_effect=[vcf_record_1_mock, vcf_record_2_mock, vcf_record_3_mock,
vcf_record_4_mock, vcf_record_5_mock, vcf_record_6_mock,
vcf_record_7_mock, vcf_record_8_mock]):
vcf_file = VCFFile(pysam_VariantRecord_Mock([pysam_variant_record_mock_that_maps_to_chrom_1_and_sample_1,
pysam_variant_record_mock_that_maps_to_chrom_1_and_sample_1_2_3,
pysam_variant_record_mock_that_maps_to_chrom_2_and_sample_1_2,
pysam_variant_record_mock_that_maps_to_chrom_2_and_sample_2,
another_pysam_variant_record_mock_that_maps_to_chrom_2_and_sample_2]), VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample)
actual = vcf_file.sample_to_gene_to_VCFs
expected = {"sample_1": {"chrom_1": [vcf_record_1_mock, vcf_record_2_mock],
"chrom_2": [vcf_record_5_mock]},
"sample_2": {"chrom_1": [vcf_record_3_mock],
"chrom_2": [vcf_record_6_mock, vcf_record_7_mock, vcf_record_8_mock]},
"sample_3": {"chrom_1": [vcf_record_4_mock]}}
assert actual == expected
@patch.object(VCFFile, "sample_to_gene_to_VCFs", new_callable=PropertyMock, return_value = sample_with_some_genes)
def test___get_VCF_records_given_sample_and_gene___sample_1_gene_1(self, *mocks):
vcf_file = VCFFile(pysam_VariantRecord_Mock([]), VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample)
actual = vcf_file.get_VCF_records_given_sample_and_gene("sample_1", "gene_1")
expected = [1,2,3,4]
assert actual == expected
@patch.object(VCFFile, "sample_to_gene_to_VCFs", new_callable=PropertyMock, return_value=sample_with_some_genes)
def test___get_VCF_records_given_sample_and_gene___sample_1_gene_2(self, *mocks):
vcf_file = VCFFile(pysam_VariantRecord_Mock([]), VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample)
actual = vcf_file.get_VCF_records_given_sample_and_gene("sample_1", "gene_2")
expected = [5,6]
assert actual == expected
@patch.object(VCFFile, "sample_to_gene_to_VCFs", new_callable=PropertyMock, return_value = sample_with_some_genes)
def test___get_VCF_records_given_sample_and_gene___sample_2_gene_1(self, *mocks):
vcf_file = VCFFile(pysam_VariantRecord_Mock([]), VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample)
actual = vcf_file.get_VCF_records_given_sample_and_gene("sample_2", "gene_1")
expected = [7, 8, 9]
assert actual == expected
@patch.object(VCFFile, "sample_to_gene_to_VCFs", new_callable=PropertyMock, return_value=sample_with_some_genes)
def test___get_VCF_records_given_sample_and_gene___sample_2_gene_2(self, *mocks):
vcf_file = VCFFile(pysam_VariantRecord_Mock([]), VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample)
actual = vcf_file.get_VCF_records_given_sample_and_gene("sample_2", "gene_2")
expected = [10]
assert actual == expected
def test___write(self):
vcf_filepath = "tests/test_cases/test.vcf"
with pysam.VariantFile(vcf_filepath) as pysam_variant_file:
vcf_file = VCFFile(pysam_variant_file=pysam_variant_file, VCF_creator_method=VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample)
filehandler = StringIO()
vcf_file.write(filehandler)
actual_vcf = filehandler.getvalue()
filehandler.close()
expected_vcf="""##fileformat=VCFv4.3
##FILTER=<ID=PASS,Description="All filters passed">
##fileDate==26/04/19
##ALT=<ID=SNP,Description="SNP">
##ALT=<ID=PH_SNPs,Description="Phased SNPs">
##ALT=<ID=INDEL,Description="Insertion-deletion">
##ALT=<ID=COMPLEX,Description="Complex variant, collection of SNPs and indels">
##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of variant">
##ALT=<ID=SIMPLE,Description="Graph bubble is simple">
##ALT=<ID=NESTED,Description="Variation site was a nested feature in the graph">
##ALT=<ID=TOO_MANY_ALTS,Description="Variation site was a multinested feature with too many alts to include all in the VCF">
##INFO=<ID=GRAPHTYPE,Number=1,Type=String,Description="Type of graph feature">
##contig=<ID=GC00000001_155>
##FORMAT=<ID=GT,Number=1,Type=String,Description="Dummy">
##FORMAT=<ID=MEAN_FWD_COVG,Number=1,Type=String,Description="Dummy">
##FORMAT=<ID=MEAN_REV_COVG,Number=1,Type=String,Description="Dummy">
##FORMAT=<ID=MED_FWD_COVG,Number=1,Type=String,Description="Dummy">
##FORMAT=<ID=MED_REV_COVG,Number=1,Type=String,Description="Dummy">
##FORMAT=<ID=SUM_FWD_COVG,Number=1,Type=String,Description="Dummy">
##FORMAT=<ID=SUM_REV_COVG,Number=1,Type=String,Description="Dummy">
##FORMAT=<ID=GAPS,Number=1,Type=String,Description="Dummy">
##FORMAT=<ID=LIKELIHOOD,Number=1,Type=String,Description="Dummy">
##FORMAT=<ID=GT_CONF,Number=1,Type=String,Description="Dummy">
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT sample
GC00000001_155 1 . CTGCCCGTTGGC TTGGGGGAAGGCTCTGCACTGCCCGTTGGC,TTGGGGGAAGGCTCTGCACTGCCTGTTGGT . . SVTYPE=COMPLEX;GRAPHTYPE=NESTED GT:MEAN_FWD_COVG:MEAN_REV_COVG:MED_FWD_COVG:MED_REV_COVG:SUM_FWD_COVG:SUM_REV_COVG:GAPS:LIKELIHOOD:GT_CONF 1:24,6,0:30,7,0:24,0,0:30,0,0:24,24,0:30,30,0:0,0.75,1:-63.3221,-326.079,-432.546:262.757
GC00000001_155 1 . ACGT TTGGGGGAAGGCTCTGCACTGCCCGTTGGC,TTGGGGGAAGGCTCTGCACTGCCTGTTGGT . . SVTYPE=COMPLEX;GRAPHTYPE=NESTED GT:MEAN_FWD_COVG:MEAN_REV_COVG:MED_FWD_COVG:MED_REV_COVG:SUM_FWD_COVG:SUM_REV_COVG:GAPS:LIKELIHOOD:GT_CONF 1:6,25,0:7,30,0:0,24,0:0,30,0:24,24,0:30,30,0:0.75,0,1:-326.079,-63.3221,-432.546:262.757
GC00000001_155 1 . CTGCCCGTTGGC TTGGGGGAAGGCTCTGCACTGCCCGTTGGC,TTGGGGGAAGGCTCTGCACTGCCTGTTGGT . . SVTYPE=COMPLEX;GRAPHTYPE=NESTED GT:MEAN_FWD_COVG:MEAN_REV_COVG:MED_FWD_COVG:MED_REV_COVG:SUM_FWD_COVG:SUM_REV_COVG:GAPS:LIKELIHOOD:GT_CONF 0:24,6,0:30,7,0:24,0,0:30,0,0:24,24,0:30,30,0:0,0.75,1:-63.3221,-326.079,-432.546:262.757
"""
assert actual_vcf == expected_vcf
|
[
"evaluate.vcf.NullVCFError",
"evaluate.vcf_file.VCFFile"
] |
[((346, 352), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (350, 352), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((753, 759), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (757, 759), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((1162, 1168), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1166, 1168), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((1496, 1502), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1500, 1502), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((1598, 1604), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1602, 1604), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((1700, 1706), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1704, 1706), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((2751, 2887), 'unittest.mock.patch.object', 'patch.object', (['VCFFactory', 'VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample.__name__'], {'return_value': 'vcf_record_1_mock'}), '(VCFFactory, VCFFactory.\n create_Pandora_VCF_from_VariantRecord_and_Sample.__name__, return_value\n =vcf_record_1_mock)\n', (2763, 2887), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((3453, 3589), 'unittest.mock.patch.object', 'patch.object', (['VCFFactory', 'VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample.__name__'], {'return_value': 'vcf_record_1_mock'}), '(VCFFactory, VCFFactory.\n create_Pandora_VCF_from_VariantRecord_and_Sample.__name__, return_value\n =vcf_record_1_mock)\n', (3465, 3589), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((4243, 4399), 'unittest.mock.patch.object', 'patch.object', (['VCFFactory', 'VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample.__name__'], {'side_effect': '[vcf_record_1_mock, vcf_record_2_mock]'}), '(VCFFactory, VCFFactory.\n create_Pandora_VCF_from_VariantRecord_and_Sample.__name__, side_effect=\n [vcf_record_1_mock, vcf_record_2_mock])\n', (4255, 4399), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((5242, 5398), 'unittest.mock.patch.object', 'patch.object', (['VCFFactory', 'VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample.__name__'], {'side_effect': '[vcf_record_1_mock, vcf_record_2_mock]'}), '(VCFFactory, VCFFactory.\n create_Pandora_VCF_from_VariantRecord_and_Sample.__name__, side_effect=\n [vcf_record_1_mock, vcf_record_2_mock])\n', (5254, 5398), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((6094, 6255), 'unittest.mock.patch.object', 'patch.object', (['VCFFactory', 'VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample.__name__'], {'side_effect': 'chrom_1_raises_NullVCFError_others_are_fine'}), '(VCFFactory, VCFFactory.\n create_Pandora_VCF_from_VariantRecord_and_Sample.__name__, side_effect=\n chrom_1_raises_NullVCFError_others_are_fine)\n', (6106, 6255), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((7100, 7261), 'unittest.mock.patch.object', 'patch.object', (['VCFFactory', 'VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample.__name__'], {'side_effect': 'chrom_1_raises_NullVCFError_others_are_fine'}), '(VCFFactory, VCFFactory.\n create_Pandora_VCF_from_VariantRecord_and_Sample.__name__, side_effect=\n chrom_1_raises_NullVCFError_others_are_fine)\n', (7112, 7261), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((11453, 11568), 'unittest.mock.patch.object', 'patch.object', (['VCFFile', '"""sample_to_gene_to_VCFs"""'], {'new_callable': 'PropertyMock', 'return_value': 'sample_with_some_genes'}), "(VCFFile, 'sample_to_gene_to_VCFs', new_callable=PropertyMock,\n return_value=sample_with_some_genes)\n", (11465, 11568), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((11926, 12041), 'unittest.mock.patch.object', 'patch.object', (['VCFFile', '"""sample_to_gene_to_VCFs"""'], {'new_callable': 'PropertyMock', 'return_value': 'sample_with_some_genes'}), "(VCFFile, 'sample_to_gene_to_VCFs', new_callable=PropertyMock,\n return_value=sample_with_some_genes)\n", (11938, 12041), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((12393, 12508), 'unittest.mock.patch.object', 'patch.object', (['VCFFile', '"""sample_to_gene_to_VCFs"""'], {'new_callable': 'PropertyMock', 'return_value': 'sample_with_some_genes'}), "(VCFFile, 'sample_to_gene_to_VCFs', new_callable=PropertyMock,\n return_value=sample_with_some_genes)\n", (12405, 12508), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((12866, 12981), 'unittest.mock.patch.object', 'patch.object', (['VCFFile', '"""sample_to_gene_to_VCFs"""'], {'new_callable': 'PropertyMock', 'return_value': 'sample_with_some_genes'}), "(VCFFile, 'sample_to_gene_to_VCFs', new_callable=PropertyMock,\n return_value=sample_with_some_genes)\n", (12878, 12981), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((2069, 2083), 'evaluate.vcf.NullVCFError', 'NullVCFError', ([], {}), '()\n', (2081, 2083), False, 'from evaluate.vcf import NullVCFError, VCFFactory\n'), ((8262, 8268), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (8266, 8268), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((8521, 8527), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (8525, 8527), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((8810, 8816), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (8814, 8816), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((9081, 9087), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (9085, 9087), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((9344, 9350), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (9348, 9350), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((9573, 9603), 'unittest.mock.Mock', 'Mock', ([], {'name': '"""vcf_record_1_mock"""'}), "(name='vcf_record_1_mock')\n", (9577, 9603), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((9632, 9662), 'unittest.mock.Mock', 'Mock', ([], {'name': '"""vcf_record_2_mock"""'}), "(name='vcf_record_2_mock')\n", (9636, 9662), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((9691, 9721), 'unittest.mock.Mock', 'Mock', ([], {'name': '"""vcf_record_3_mock"""'}), "(name='vcf_record_3_mock')\n", (9695, 9721), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((9750, 9780), 'unittest.mock.Mock', 'Mock', ([], {'name': '"""vcf_record_4_mock"""'}), "(name='vcf_record_4_mock')\n", (9754, 9780), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((9809, 9839), 'unittest.mock.Mock', 'Mock', ([], {'name': '"""vcf_record_5_mock"""'}), "(name='vcf_record_5_mock')\n", (9813, 9839), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((9868, 9898), 'unittest.mock.Mock', 'Mock', ([], {'name': '"""vcf_record_6_mock"""'}), "(name='vcf_record_6_mock')\n", (9872, 9898), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((9927, 9957), 'unittest.mock.Mock', 'Mock', ([], {'name': '"""vcf_record_7_mock"""'}), "(name='vcf_record_7_mock')\n", (9931, 9957), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((9986, 10016), 'unittest.mock.Mock', 'Mock', ([], {'name': '"""vcf_record_8_mock"""'}), "(name='vcf_record_8_mock')\n", (9990, 10016), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((13648, 13658), 'io.StringIO', 'StringIO', ([], {}), '()\n', (13656, 13658), False, 'from io import StringIO\n'), ((10033, 10311), 'unittest.mock.patch.object', 'patch.object', (['VCFFactory', 'VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample.__name__'], {'side_effect': '[vcf_record_1_mock, vcf_record_2_mock, vcf_record_3_mock, vcf_record_4_mock,\n vcf_record_5_mock, vcf_record_6_mock, vcf_record_7_mock, vcf_record_8_mock]'}), '(VCFFactory, VCFFactory.\n create_Pandora_VCF_from_VariantRecord_and_Sample.__name__, side_effect=\n [vcf_record_1_mock, vcf_record_2_mock, vcf_record_3_mock,\n vcf_record_4_mock, vcf_record_5_mock, vcf_record_6_mock,\n vcf_record_7_mock, vcf_record_8_mock])\n', (10045, 10311), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((13420, 13451), 'pysam.VariantFile', 'pysam.VariantFile', (['vcf_filepath'], {}), '(vcf_filepath)\n', (13437, 13451), False, 'import pysam\n'), ((13498, 13629), 'evaluate.vcf_file.VCFFile', 'VCFFile', ([], {'pysam_variant_file': 'pysam_variant_file', 'VCF_creator_method': 'VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample'}), '(pysam_variant_file=pysam_variant_file, VCF_creator_method=\n VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample)\n', (13505, 13629), False, 'from evaluate.vcf_file import VCFFile\n')]
|
import argparse
import logging
import sys
from pathlib import Path
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard.writer import SummaryWriter
from tqdm import tqdm
import numpy as np
from utils.data_loading import BasicDataset, MyDataset
from utils.dice_score import dice_loss
from evaluate import evaluate
from model.snl_unet import SNLUNet
from utils.metrics import Evaluator
dir_img = Path('dataset/origindataset/train/')
dir_mask = Path('dataset/origindataset/label/')
dir_checkpoint = Path('checkpoints/SNLUNet/')
def train_net(net,
device,
epochs: int = 10,
batch_size: int = 12,
learning_rate: float = 0.00001,
val_percent: float = 0.1,
save_checkpoint: bool = True,
img_scale: float = 1,
amp: bool = False):
# Create dataset
try:
dataset = MyDataset(dir_img, dir_mask, img_scale)
except (AssertionError, RuntimeError):
dataset = BasicDataset(dir_img, dir_mask, img_scale)
# Split into train / validation partitions
n_val = int(len(dataset) * val_percent)
n_train = len(dataset) - n_val
train_set, val_set = random_split(dataset, [n_train, n_val], generator=torch.Generator().manual_seed(0))
# tensorboard
tb_writer = SummaryWriter(log_dir="runs/SNLUNet")
# number of workers
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])
# 3. Create data loaders
loader_args = dict(batch_size=batch_size, num_workers=nw, pin_memory=True)
train_loader = DataLoader(train_set, shuffle=True, **loader_args)
val_loader = DataLoader(val_set, shuffle=False, drop_last=True, **loader_args)
# Set up the optimizer, the loss, the learning rate scheduler and the loss scaling for AMP
optimizer = optim.RMSprop(net.parameters(), lr=learning_rate, weight_decay=1e-8, momentum=0.9)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', patience=2) # goal: maximize Dice score
grad_scaler = torch.cuda.amp.GradScaler(enabled=amp)
criterion = nn.CrossEntropyLoss()
global_step = 0
evaluator = Evaluator(2)
evaluator.reset()
# Begin training
for epoch in range(epochs):
net.train()
epoch_loss = 0
with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='img') as pbar:
for batch in train_loader:
images = batch['image']
true_masks = batch['mask']
assert images.shape[1] == net.n_channels, \
f'Network has been defined with {net.n_channels} input channels, ' \
f'but loaded images have {images.shape[1]} channels. Please check that ' \
'the images are loaded correctly.'
images = images.to(device=device, dtype=torch.float32)
true_masks = true_masks.to(device=device, dtype=torch.long)
with torch.cuda.amp.autocast(enabled=amp):
masks_pred = net(images)
loss = criterion(masks_pred, true_masks) \
+ dice_loss(F.softmax(masks_pred, dim=1).float(),
F.one_hot(true_masks, net.n_classes).permute(0, 3, 1, 2).float(),
multiclass=True)
# acc
output = masks_pred.data.cpu().numpy()
label = true_masks.float().cpu().numpy()
output = np.argmax(output, axis=1)
# Add batch sample into evaluator
evaluator.add_batch(label, output)
optimizer.zero_grad(set_to_none=True)
grad_scaler.scale(loss).backward()
grad_scaler.step(optimizer)
grad_scaler.update()
pbar.update(images.shape[0])
global_step += 1
epoch_loss += loss.item()
pbar.set_postfix(**{'loss (batch)': loss.item()})
# Evaluation round
if global_step % (n_train // batch_size) == 0:
val_score = evaluate(net, val_loader, device)
scheduler.step(val_score)
logging.info('Validation Dice score: {}'.format(val_score))
Acc = evaluator.Pixel_Accuracy()
mIoU = evaluator.Mean_Intersection_over_Union()
logging.info('epoch{} Acc: {} '.format(epoch + 1, Acc))
logging.info('epoch{} mIoU: {} '.format(epoch + 1, mIoU))
tags = ["loss", "accuracy", "mIoU", "learning_rate"]
tb_writer.add_scalar(tags[0], epoch_loss, epoch)
tb_writer.add_scalar(tags[1], Acc, epoch)
tb_writer.add_scalar(tags[2], mIoU, epoch)
tb_writer.add_scalar(tags[3], optimizer.param_groups[0]["lr"], epoch)
if save_checkpoint:
Path(dir_checkpoint).mkdir(parents=True, exist_ok=True)
torch.save(net.state_dict(), str(dir_checkpoint / 'checkpoint_epoch{}.pth'.format(epoch + 1)))
logging.info(f'Checkpoint {epoch + 1} saved!')
def get_args():
parser = argparse.ArgumentParser(description='Train the UNet on images and target masks')
parser.add_argument('--epochs', '-e', metavar='E', type=int, default=5, help='Number of epochs')
parser.add_argument('--batch-size', '-b', dest='batch_size', metavar='B', type=int, default=16, help='Batch size')
parser.add_argument('--learning-rate', '-l', metavar='LR', type=float, default=0.00001,
help='Learning rate', dest='lr')
parser.add_argument('--load', '-f', type=str, default=False, help='Load model from a .pth file')
parser.add_argument('--scale', '-s', type=float, default=0.5, help='Downscaling factor of the images')
parser.add_argument('--validation', '-v', dest='val', type=float, default=10.0,
help='Percent of the data that is used as validation (0-100)')
parser.add_argument('--amp', action='store_true', default=True, help='Use mixed precision')
parser.add_argument('--num_classes', type=int, default=2)
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
if torch.cuda.is_available() is False:
raise EnvironmentError("not find GPU device for training.")
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logging.info(f'Using device {device}')
# n_channels=3 for RGB images
# n_classes is the number of probabilities you want to get per pixel
net = SNLUNet(n_channels=3, n_classes=2, bilinear=True)
if args.load:
net.load_state_dict(torch.load(args.load, map_location=device))
net.to(device=device)
try:
train_net(net=net,
epochs=args.epochs,
batch_size=args.batch_size,
learning_rate=args.lr,
device=device,
img_scale=args.scale,
val_percent=args.val / 100,
amp=args.amp)
except KeyboardInterrupt:
torch.save(net.state_dict(), 'INTERRUPTED.pth')
logging.info('Saved interrupt')
sys.exit(0)
|
[
"evaluate.evaluate"
] |
[((528, 564), 'pathlib.Path', 'Path', (['"""dataset/origindataset/train/"""'], {}), "('dataset/origindataset/train/')\n", (532, 564), False, 'from pathlib import Path\n'), ((576, 612), 'pathlib.Path', 'Path', (['"""dataset/origindataset/label/"""'], {}), "('dataset/origindataset/label/')\n", (580, 612), False, 'from pathlib import Path\n'), ((630, 658), 'pathlib.Path', 'Path', (['"""checkpoints/SNLUNet/"""'], {}), "('checkpoints/SNLUNet/')\n", (634, 658), False, 'from pathlib import Path\n'), ((1437, 1474), 'torch.utils.tensorboard.writer.SummaryWriter', 'SummaryWriter', ([], {'log_dir': '"""runs/SNLUNet"""'}), "(log_dir='runs/SNLUNet')\n", (1450, 1474), False, 'from torch.utils.tensorboard.writer import SummaryWriter\n'), ((1699, 1749), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'shuffle': '(True)'}), '(train_set, shuffle=True, **loader_args)\n', (1709, 1749), False, 'from torch.utils.data import DataLoader, random_split\n'), ((1767, 1832), 'torch.utils.data.DataLoader', 'DataLoader', (['val_set'], {'shuffle': '(False)', 'drop_last': '(True)'}), '(val_set, shuffle=False, drop_last=True, **loader_args)\n', (1777, 1832), False, 'from torch.utils.data import DataLoader, random_split\n'), ((2044, 2110), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'optim.lr_scheduler.ReduceLROnPlateau', (['optimizer', '"""max"""'], {'patience': '(2)'}), "(optimizer, 'max', patience=2)\n", (2080, 2110), False, 'from torch import optim\n'), ((2158, 2196), 'torch.cuda.amp.GradScaler', 'torch.cuda.amp.GradScaler', ([], {'enabled': 'amp'}), '(enabled=amp)\n', (2183, 2196), False, 'import torch\n'), ((2213, 2234), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2232, 2234), True, 'import torch.nn as nn\n'), ((2271, 2283), 'utils.metrics.Evaluator', 'Evaluator', (['(2)'], {}), '(2)\n', (2280, 2283), False, 'from utils.metrics import Evaluator\n'), ((5259, 5344), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train the UNet on images and target masks"""'}), "(description='Train the UNet on images and target masks'\n )\n", (5282, 5344), False, 'import argparse\n'), ((6445, 6521), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(levelname)s: %(message)s"""'}), "(level=logging.INFO, format='%(levelname)s: %(message)s')\n", (6464, 6521), False, 'import logging\n'), ((6600, 6638), 'logging.info', 'logging.info', (['f"""Using device {device}"""'], {}), "(f'Using device {device}')\n", (6612, 6638), False, 'import logging\n'), ((6757, 6806), 'model.snl_unet.SNLUNet', 'SNLUNet', ([], {'n_channels': '(3)', 'n_classes': '(2)', 'bilinear': '(True)'}), '(n_channels=3, n_classes=2, bilinear=True)\n', (6764, 6806), False, 'from model.snl_unet import SNLUNet\n'), ((1018, 1057), 'utils.data_loading.MyDataset', 'MyDataset', (['dir_img', 'dir_mask', 'img_scale'], {}), '(dir_img, dir_mask, img_scale)\n', (1027, 1057), False, 'from utils.data_loading import BasicDataset, MyDataset\n'), ((6336, 6361), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6359, 6361), False, 'import torch\n'), ((1119, 1161), 'utils.data_loading.BasicDataset', 'BasicDataset', (['dir_img', 'dir_mask', 'img_scale'], {}), '(dir_img, dir_mask, img_scale)\n', (1131, 1161), False, 'from utils.data_loading import BasicDataset, MyDataset\n'), ((1514, 1528), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (1526, 1528), False, 'import os\n'), ((2417, 2484), 'tqdm.tqdm', 'tqdm', ([], {'total': 'n_train', 'desc': 'f"""Epoch {epoch + 1}/{epochs}"""', 'unit': '"""img"""'}), "(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='img')\n", (2421, 2484), False, 'from tqdm import tqdm\n'), ((5181, 5227), 'logging.info', 'logging.info', (['f"""Checkpoint {epoch + 1} saved!"""'], {}), "(f'Checkpoint {epoch + 1} saved!')\n", (5193, 5227), False, 'import logging\n'), ((6558, 6583), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6581, 6583), False, 'import torch\n'), ((6854, 6896), 'torch.load', 'torch.load', (['args.load'], {'map_location': 'device'}), '(args.load, map_location=device)\n', (6864, 6896), False, 'import torch\n'), ((7331, 7362), 'logging.info', 'logging.info', (['"""Saved interrupt"""'], {}), "('Saved interrupt')\n", (7343, 7362), False, 'import logging\n'), ((7371, 7382), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (7379, 7382), False, 'import sys\n'), ((3630, 3655), 'numpy.argmax', 'np.argmax', (['output'], {'axis': '(1)'}), '(output, axis=1)\n', (3639, 3655), True, 'import numpy as np\n'), ((1364, 1381), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (1379, 1381), False, 'import torch\n'), ((3086, 3122), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {'enabled': 'amp'}), '(enabled=amp)\n', (3109, 3122), False, 'import torch\n'), ((4263, 4296), 'evaluate.evaluate', 'evaluate', (['net', 'val_loader', 'device'], {}), '(net, val_loader, device)\n', (4271, 4296), False, 'from evaluate import evaluate\n'), ((5006, 5026), 'pathlib.Path', 'Path', (['dir_checkpoint'], {}), '(dir_checkpoint)\n', (5010, 5026), False, 'from pathlib import Path\n'), ((3271, 3299), 'torch.nn.functional.softmax', 'F.softmax', (['masks_pred'], {'dim': '(1)'}), '(masks_pred, dim=1)\n', (3280, 3299), True, 'import torch.nn.functional as F\n'), ((3348, 3384), 'torch.nn.functional.one_hot', 'F.one_hot', (['true_masks', 'net.n_classes'], {}), '(true_masks, net.n_classes)\n', (3357, 3384), True, 'import torch.nn.functional as F\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import collections
import copy
import dotdict
import json
import numpy as np
import os
import random
import regex
import tempfile
import torch
import torch.nn as nn
from chinese_converter import to_traditional, to_simplified
from tqdm import tqdm
from evaluate import evaluate
from models import CRISSWrapper, LexiconInducer
cos = nn.CosineSimilarity(dim=-1)
def setup_configs(configs):
configs.save_path = configs.save_path.format(src=configs.src_lang, trg=configs.trg_lang)
configs.stats_path = configs.save_path + '/stats.pt'
def collect_bitext_stats(bitext_path, align_path, save_path, src_lang, trg_lang, is_reversed=False):
stats_path = save_path + '/stats.pt'
freq_path = save_path + '/freqs.pt'
if os.path.exists(stats_path):
coocc, semi_matched_coocc, matched_coocc = torch.load(stats_path)
else:
coocc = collections.defaultdict(collections.Counter)
semi_matched_coocc = collections.defaultdict(collections.Counter)
matched_coocc = collections.defaultdict(collections.Counter)
tmpdir = tempfile.TemporaryDirectory()
os.system(f'cat {bitext_path} > {tmpdir.name}/bitext.txt')
os.system(f'cat {align_path} > {tmpdir.name}/aligns.txt')
bitext = open(f'{tmpdir.name}/bitext.txt').readlines()
aligns = open(f'{tmpdir.name}/aligns.txt').readlines()
tmpdir.cleanup()
assert len(bitext) == len(aligns)
bar = tqdm(bitext)
for i, item in enumerate(bar):
try:
src_sent, trg_sent = regex.split(r'\|\|\|', item.strip())
if is_reversed:
src_sent, trg_sent = trg_sent, src_sent
align = [tuple(x if not is_reversed else reversed(x)) for x in json.loads(aligns[i])['inter']]
except:
continue
if src_lang == 'zh_CN':
src_sent = to_simplified(src_sent)
if trg_lang == 'zh_CN':
trg_sent = to_simplified(trg_sent)
src_words = src_sent.lower().split()
trg_words = trg_sent.lower().split()
src_cnt = collections.Counter([x[0] for x in align])
trg_cnt = collections.Counter([x[1] for x in align])
for x, sw in enumerate(src_words):
for y, tw in enumerate(trg_words):
if (x, y) in align:
semi_matched_coocc[sw][tw] += 1
if src_cnt[x] == 1 and trg_cnt[y] == 1:
matched_coocc[sw][tw] += 1
coocc[sw][tw] += 1
torch.save((coocc, semi_matched_coocc, matched_coocc), stats_path)
if os.path.exists(freq_path):
freq_src, freq_trg = torch.load(freq_path)
else:
freq_src = collections.Counter()
freq_trg = collections.Counter()
tmpdir = tempfile.TemporaryDirectory()
os.system(f'cat {bitext_path} > {tmpdir.name}/bitext.txt')
bitext = open(f'{tmpdir.name}/bitext.txt').readlines()
tmpdir.cleanup()
bar = tqdm(bitext)
for i, item in enumerate(bar):
try:
src_sent, trg_sent = regex.split(r'\|\|\|', item.strip())
if is_reversed:
src_sent, trg_sent = trg_sent, src_sent
except:
continue
if src_lang == 'zh_CN':
src_sent = to_simplified(src_sent)
if trg_lang == 'zh_CN':
trg_sent = to_simplified(trg_sent)
for w in src_sent.split():
freq_src[w] += 1
for w in trg_sent.split():
freq_trg[w] += 1
torch.save((freq_src, freq_trg), freq_path)
return coocc, semi_matched_coocc, matched_coocc, freq_src, freq_trg
def load_lexicon(path):
lexicon = [regex.split(r'\t| ', x.strip()) for x in open(path)]
return set([tuple(x) for x in lexicon])
def extract_dataset(train_lexicon, test_lexicon, coocc, configs):
cooccs = [coocc]
test_set = set()
pos_training_set = set()
neg_training_set = set()
for tsw in set([x[0] for x in train_lexicon]):
for coocc in cooccs:
ssw = to_simplified(tsw) if configs.src_lang == 'zh_CN' else tsw
for stw in coocc[ssw]:
if stw == ssw:
added_self = True
ttw = to_traditional(stw) if configs.trg_lang == 'zh_CN' else stw
if (tsw, ttw) in train_lexicon:
pos_training_set.add((ssw, stw))
else:
neg_training_set.add((ssw, stw))
if (ssw, ssw) in train_lexicon:
pos_training_set.add((ssw, ssw))
else:
neg_training_set.add((ssw, ssw))
for tsw in set([x[0] for x in test_lexicon]):
for coocc in cooccs:
ssw = to_simplified(tsw) if configs.src_lang == 'zh_CN' else tsw
added_self = False
for stw in coocc[ssw]:
if stw == ssw:
added_self = True
test_set.add((ssw, stw))
test_set.add((ssw, ssw))
pos_training_set = list(pos_training_set)
neg_training_set = list(neg_training_set)
test_set = list(test_set)
return pos_training_set, neg_training_set, test_set
def extract_probs(batch, criss, lexicon_inducer, info, configs):
matched_coocc, semi_matched_coocc, coocc, freq_src, freq_trg = info
all_probs = list()
for i in range(0, len(batch), configs.batch_size):
subbatch = batch[i:i+configs.batch_size]
src_words, trg_words = zip(*subbatch)
src_encodings = criss.word_embed(src_words, configs.src_lang).detach()
trg_encodings = criss.word_embed(trg_words, configs.trg_lang).detach()
cos_sim = cos(src_encodings, trg_encodings).reshape(-1, 1)
dot_prod = (src_encodings * trg_encodings).sum(-1).reshape(-1, 1)
features = torch.tensor(
[
[
matched_coocc[x[0]][x[1]],
semi_matched_coocc[x[0]][x[1]],
coocc[x[0]][x[1]],
freq_src[x[0]],
freq_trg[x[1]],
] for x in subbatch
]
).float().to(configs.device).reshape(-1, 5)
features = torch.cat([cos_sim, dot_prod, features], dim=-1)
probs = lexicon_inducer(features).squeeze(-1)
all_probs.append(probs)
return torch.cat(all_probs, dim=0)
def get_test_lexicon(
test_set, test_lexicon, criss, lexicon_inducer, info, configs, best_threshold, best_n_cand
):
induced_lexicon = list()
pred_test_lexicon = collections.defaultdict(collections.Counter)
probs = extract_probs(
test_set, criss, lexicon_inducer, info, configs
)
for i, (x, y) in enumerate(test_set):
pred_test_lexicon[x][y] = max(pred_test_lexicon[x][y], probs[i].item())
possible_predictions = list()
for tsw in set([x[0] for x in test_lexicon]):
ssw = to_simplified(tsw)
for stw in pred_test_lexicon[ssw]:
ttw = to_traditional(stw)
pos = 1 if (tsw, ttw) in test_lexicon else 0
possible_predictions.append([tsw, ttw, pred_test_lexicon[ssw][stw], pos])
possible_predictions = sorted(possible_predictions, key=lambda x:-x[-2])
word_cnt = collections.Counter()
correct_predictions = 0
for i, item in enumerate(possible_predictions):
if item[-2] < best_threshold:
prec = correct_predictions / (sum(word_cnt.values()) + 1) * 100.0
rec = correct_predictions / len(test_lexicon) * 100.0
f1 = 2 * prec * rec / (rec + prec)
print(f'Test F1: {f1:.2f}')
break
if word_cnt[item[0]] == best_n_cand:
continue
word_cnt[item[0]] += 1
if item[-1] == 1:
correct_predictions += 1
induced_lexicon.append(item[:2])
eval_result = evaluate(induced_lexicon, test_lexicon)
return induced_lexicon, eval_result
def get_optimal_parameters(
pos_training_set, neg_training_set, train_lexicon, criss,
lexicon_inducer, info, configs,
):
pred_train_lexicon = collections.defaultdict(collections.Counter)
probs = extract_probs(
pos_training_set + neg_training_set, criss, lexicon_inducer, info, configs
)
for i, (x, y) in enumerate(pos_training_set + neg_training_set):
pred_train_lexicon[x][y] = max(pred_train_lexicon[x][y], probs[i].item())
possible_predictions = list()
for tsw in set([x[0] for x in train_lexicon]):
ssw = to_simplified(tsw)
for stw in pred_train_lexicon[ssw]:
ttw = to_traditional(stw)
pos = 1 if (tsw, ttw) in train_lexicon else 0
possible_predictions.append([tsw, ttw, pred_train_lexicon[ssw][stw], pos])
possible_predictions = sorted(possible_predictions, key=lambda x:-x[-2])
best_f1 = -1e10
best_threshold = best_n_cand = 0
for n_cand in range(1, 6):
word_cnt = collections.Counter()
correct_predictions = 0
bar = tqdm(possible_predictions)
for i, item in enumerate(bar):
if word_cnt[item[0]] == n_cand:
continue
word_cnt[item[0]] += 1
if item[-1] == 1:
correct_predictions += 1
prec = correct_predictions / (sum(word_cnt.values()) + 1) * 100.0
rec = correct_predictions / len(train_lexicon) * 100.0
f1 = 2 * prec * rec / (rec + prec)
if f1 > best_f1:
best_f1 = f1
best_threshold = item[-2]
best_n_cand = n_cand
bar.set_description(
f'Best F1={f1:.1f}, Prec={prec:.1f}, Rec={rec:.1f}, NCand={n_cand}, Threshold={item[-2]}'
)
return best_threshold, best_n_cand
def train_test(configs, logging_steps=50000):
setup_configs(configs)
os.system(f'mkdir -p {configs.save_path}')
torch.save(configs, configs.save_path + '/configs.pt')
# prepare feature extractor
info = collect_bitext_stats(
configs.bitext_path, configs.align_path, configs.save_path, configs.src_lang, configs.trg_lang, configs.reversed)
# dataset
train_lexicon = load_lexicon(configs.tuning_set)
sim_train_lexicon = {(to_simplified(x[0]), to_simplified(x[1])) for x in train_lexicon}
all_train_lexicon = train_lexicon.union(sim_train_lexicon)
test_lexicon = load_lexicon(configs.test_set)
pos_training_set, neg_training_set, test_set = extract_dataset(
train_lexicon, test_lexicon, info[2], configs
)
training_set_modifier = max(1, len(neg_training_set) // len(pos_training_set))
training_set = pos_training_set * training_set_modifier + neg_training_set
print(f'Positive training set is repeated {training_set_modifier} times due to data imbalance.')
# model and optimizers
criss = CRISSWrapper(device=configs.device)
lexicon_inducer = LexiconInducer(7, configs.hiddens, 1, 5).to(configs.device)
optimizer = torch.optim.Adam(lexicon_inducer.parameters(), lr=.0005)
# train model
for epoch in range(configs.epochs):
model_path = configs.save_path + f'/{epoch}.model.pt'
if os.path.exists(model_path):
lexicon_inducer.load_state_dict(torch.load(model_path))
continue
random.shuffle(training_set)
bar = tqdm(range(0, len(training_set), configs.batch_size))
total_loss = total_cnt = 0
for i, sid in enumerate(bar):
batch = training_set[sid:sid+configs.batch_size]
probs = extract_probs(batch, criss, lexicon_inducer, info, configs)
targets = torch.tensor(
[1 if tuple(x) in all_train_lexicon else 0 for x in batch]).float().to(configs.device)
optimizer.zero_grad()
loss = nn.BCELoss()(probs, targets)
loss.backward()
optimizer.step()
total_loss += loss.item() * len(batch)
total_cnt += len(batch)
bar.set_description(f'loss={total_loss / total_cnt:.5f}')
if (i + 1) % logging_steps == 0:
print(f'Epoch {epoch}, step {i+1}, loss = {total_loss / total_cnt:.5f}', flush=True)
torch.save(lexicon_inducer.state_dict(), configs.save_path + f'/{epoch}.{i+1}.model.pt')
print(f'Epoch {epoch}, loss = {total_loss / total_cnt:.5f}', flush=True)
torch.save(lexicon_inducer.state_dict(), configs.save_path + f'/model.pt')
best_threshold, best_n_cand = get_optimal_parameters(
pos_training_set, neg_training_set, train_lexicon, criss,
lexicon_inducer, info, configs,
)
induced_test_lexicon, test_eval = get_test_lexicon(
test_set, test_lexicon, criss, lexicon_inducer, info, configs, best_threshold, best_n_cand
)
with open(configs.save_path + '/induced.weaklysup.dict', 'w') as fout:
for item in induced_test_lexicon:
fout.write('\t'.join([str(x) for x in item]) + '\n')
fout.close()
return induced_test_lexicon, test_eval
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--align', type=str, help='path to word alignment')
parser.add_argument('-b', '--bitext', type=str, help='path to bitext')
parser.add_argument('-src', '--source', type=str, help='source language code')
parser.add_argument('-trg', '--target', type=str, help='target language code')
parser.add_argument('-te', '--test', type=str, help='path to test lexicon')
parser.add_argument('-tr', '--train', type=str, help='path to training lexicon')
parser.add_argument('-o', '--output', type=str, default='./model/', help='path to output folder')
parser.add_argument('-d', '--device', type=str, default='cuda', help='device for training [cuda|cpu]')
args = parser.parse_args()
configs = dotdict.DotDict(
{
'test_set': args.test,
'tuning_set': args.train,
'align_path': args.align,
'bitext_path': args.bitext,
'save_path': args.output,
'batch_size': 128,
'epochs': 50,
'device': args.device,
'hiddens': [8],
'src_lang': args.source,
'trg_lang': args.target
}
)
res = train_test(configs)
print(res[-1])
|
[
"evaluate.evaluate"
] |
[((402, 429), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', ([], {'dim': '(-1)'}), '(dim=-1)\n', (421, 429), True, 'import torch.nn as nn\n'), ((801, 827), 'os.path.exists', 'os.path.exists', (['stats_path'], {}), '(stats_path)\n', (815, 827), False, 'import os\n'), ((2732, 2757), 'os.path.exists', 'os.path.exists', (['freq_path'], {}), '(freq_path)\n', (2746, 2757), False, 'import os\n'), ((6530, 6557), 'torch.cat', 'torch.cat', (['all_probs'], {'dim': '(0)'}), '(all_probs, dim=0)\n', (6539, 6557), False, 'import torch\n'), ((6757, 6801), 'collections.defaultdict', 'collections.defaultdict', (['collections.Counter'], {}), '(collections.Counter)\n', (6780, 6801), False, 'import collections\n'), ((7446, 7467), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (7465, 7467), False, 'import collections\n'), ((8054, 8093), 'evaluate.evaluate', 'evaluate', (['induced_lexicon', 'test_lexicon'], {}), '(induced_lexicon, test_lexicon)\n', (8062, 8093), False, 'from evaluate import evaluate\n'), ((8322, 8366), 'collections.defaultdict', 'collections.defaultdict', (['collections.Counter'], {}), '(collections.Counter)\n', (8345, 8366), False, 'import collections\n'), ((10124, 10166), 'os.system', 'os.system', (['f"""mkdir -p {configs.save_path}"""'], {}), "(f'mkdir -p {configs.save_path}')\n", (10133, 10166), False, 'import os\n'), ((10171, 10225), 'torch.save', 'torch.save', (['configs', "(configs.save_path + '/configs.pt')"], {}), "(configs, configs.save_path + '/configs.pt')\n", (10181, 10225), False, 'import torch\n'), ((11117, 11152), 'models.CRISSWrapper', 'CRISSWrapper', ([], {'device': 'configs.device'}), '(device=configs.device)\n', (11129, 11152), False, 'from models import CRISSWrapper, LexiconInducer\n'), ((13340, 13365), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (13363, 13365), False, 'import argparse\n'), ((14109, 14390), 'dotdict.DotDict', 'dotdict.DotDict', (["{'test_set': args.test, 'tuning_set': args.train, 'align_path': args.align,\n 'bitext_path': args.bitext, 'save_path': args.output, 'batch_size': 128,\n 'epochs': 50, 'device': args.device, 'hiddens': [8], 'src_lang': args.\n source, 'trg_lang': args.target}"], {}), "({'test_set': args.test, 'tuning_set': args.train,\n 'align_path': args.align, 'bitext_path': args.bitext, 'save_path': args\n .output, 'batch_size': 128, 'epochs': 50, 'device': args.device,\n 'hiddens': [8], 'src_lang': args.source, 'trg_lang': args.target})\n", (14124, 14390), False, 'import dotdict\n'), ((880, 902), 'torch.load', 'torch.load', (['stats_path'], {}), '(stats_path)\n', (890, 902), False, 'import torch\n'), ((929, 973), 'collections.defaultdict', 'collections.defaultdict', (['collections.Counter'], {}), '(collections.Counter)\n', (952, 973), False, 'import collections\n'), ((1003, 1047), 'collections.defaultdict', 'collections.defaultdict', (['collections.Counter'], {}), '(collections.Counter)\n', (1026, 1047), False, 'import collections\n'), ((1072, 1116), 'collections.defaultdict', 'collections.defaultdict', (['collections.Counter'], {}), '(collections.Counter)\n', (1095, 1116), False, 'import collections\n'), ((1134, 1163), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1161, 1163), False, 'import tempfile\n'), ((1172, 1230), 'os.system', 'os.system', (['f"""cat {bitext_path} > {tmpdir.name}/bitext.txt"""'], {}), "(f'cat {bitext_path} > {tmpdir.name}/bitext.txt')\n", (1181, 1230), False, 'import os\n'), ((1239, 1296), 'os.system', 'os.system', (['f"""cat {align_path} > {tmpdir.name}/aligns.txt"""'], {}), "(f'cat {align_path} > {tmpdir.name}/aligns.txt')\n", (1248, 1296), False, 'import os\n'), ((1504, 1516), 'tqdm.tqdm', 'tqdm', (['bitext'], {}), '(bitext)\n', (1508, 1516), False, 'from tqdm import tqdm\n'), ((2658, 2724), 'torch.save', 'torch.save', (['(coocc, semi_matched_coocc, matched_coocc)', 'stats_path'], {}), '((coocc, semi_matched_coocc, matched_coocc), stats_path)\n', (2668, 2724), False, 'import torch\n'), ((2788, 2809), 'torch.load', 'torch.load', (['freq_path'], {}), '(freq_path)\n', (2798, 2809), False, 'import torch\n'), ((2839, 2860), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (2858, 2860), False, 'import collections\n'), ((2880, 2901), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (2899, 2901), False, 'import collections\n'), ((2919, 2948), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2946, 2948), False, 'import tempfile\n'), ((2957, 3015), 'os.system', 'os.system', (['f"""cat {bitext_path} > {tmpdir.name}/bitext.txt"""'], {}), "(f'cat {bitext_path} > {tmpdir.name}/bitext.txt')\n", (2966, 3015), False, 'import os\n'), ((3118, 3130), 'tqdm.tqdm', 'tqdm', (['bitext'], {}), '(bitext)\n', (3122, 3130), False, 'from tqdm import tqdm\n'), ((3724, 3767), 'torch.save', 'torch.save', (['(freq_src, freq_trg)', 'freq_path'], {}), '((freq_src, freq_trg), freq_path)\n', (3734, 3767), False, 'import torch\n'), ((6384, 6432), 'torch.cat', 'torch.cat', (['[cos_sim, dot_prod, features]'], {'dim': '(-1)'}), '([cos_sim, dot_prod, features], dim=-1)\n', (6393, 6432), False, 'import torch\n'), ((7111, 7129), 'chinese_converter.to_simplified', 'to_simplified', (['tsw'], {}), '(tsw)\n', (7124, 7129), False, 'from chinese_converter import to_traditional, to_simplified\n'), ((8733, 8751), 'chinese_converter.to_simplified', 'to_simplified', (['tsw'], {}), '(tsw)\n', (8746, 8751), False, 'from chinese_converter import to_traditional, to_simplified\n'), ((9163, 9184), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (9182, 9184), False, 'import collections\n'), ((9231, 9257), 'tqdm.tqdm', 'tqdm', (['possible_predictions'], {}), '(possible_predictions)\n', (9235, 9257), False, 'from tqdm import tqdm\n'), ((11439, 11465), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (11453, 11465), False, 'import os\n'), ((11564, 11592), 'random.shuffle', 'random.shuffle', (['training_set'], {}), '(training_set)\n', (11578, 11592), False, 'import random\n'), ((2190, 2232), 'collections.Counter', 'collections.Counter', (['[x[0] for x in align]'], {}), '([x[0] for x in align])\n', (2209, 2232), False, 'import collections\n'), ((2255, 2297), 'collections.Counter', 'collections.Counter', (['[x[1] for x in align]'], {}), '([x[1] for x in align])\n', (2274, 2297), False, 'import collections\n'), ((7191, 7210), 'chinese_converter.to_traditional', 'to_traditional', (['stw'], {}), '(stw)\n', (7205, 7210), False, 'from chinese_converter import to_traditional, to_simplified\n'), ((8814, 8833), 'chinese_converter.to_traditional', 'to_traditional', (['stw'], {}), '(stw)\n', (8828, 8833), False, 'from chinese_converter import to_traditional, to_simplified\n'), ((10508, 10527), 'chinese_converter.to_simplified', 'to_simplified', (['x[0]'], {}), '(x[0])\n', (10521, 10527), False, 'from chinese_converter import to_traditional, to_simplified\n'), ((10529, 10548), 'chinese_converter.to_simplified', 'to_simplified', (['x[1]'], {}), '(x[1])\n', (10542, 10548), False, 'from chinese_converter import to_traditional, to_simplified\n'), ((11175, 11215), 'models.LexiconInducer', 'LexiconInducer', (['(7)', 'configs.hiddens', '(1)', '(5)'], {}), '(7, configs.hiddens, 1, 5)\n', (11189, 11215), False, 'from models import CRISSWrapper, LexiconInducer\n'), ((1959, 1982), 'chinese_converter.to_simplified', 'to_simplified', (['src_sent'], {}), '(src_sent)\n', (1972, 1982), False, 'from chinese_converter import to_traditional, to_simplified\n'), ((2046, 2069), 'chinese_converter.to_simplified', 'to_simplified', (['trg_sent'], {}), '(trg_sent)\n', (2059, 2069), False, 'from chinese_converter import to_traditional, to_simplified\n'), ((3461, 3484), 'chinese_converter.to_simplified', 'to_simplified', (['src_sent'], {}), '(src_sent)\n', (3474, 3484), False, 'from chinese_converter import to_traditional, to_simplified\n'), ((3548, 3571), 'chinese_converter.to_simplified', 'to_simplified', (['trg_sent'], {}), '(trg_sent)\n', (3561, 3571), False, 'from chinese_converter import to_traditional, to_simplified\n'), ((4248, 4266), 'chinese_converter.to_simplified', 'to_simplified', (['tsw'], {}), '(tsw)\n', (4261, 4266), False, 'from chinese_converter import to_traditional, to_simplified\n'), ((4926, 4944), 'chinese_converter.to_simplified', 'to_simplified', (['tsw'], {}), '(tsw)\n', (4939, 4944), False, 'from chinese_converter import to_traditional, to_simplified\n'), ((11511, 11533), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (11521, 11533), False, 'import torch\n'), ((12067, 12079), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (12077, 12079), True, 'import torch.nn as nn\n'), ((4433, 4452), 'chinese_converter.to_traditional', 'to_traditional', (['stw'], {}), '(stw)\n', (4447, 4452), False, 'from chinese_converter import to_traditional, to_simplified\n'), ((1818, 1839), 'json.loads', 'json.loads', (['aligns[i]'], {}), '(aligns[i])\n', (1828, 1839), False, 'import json\n'), ((6006, 6154), 'torch.tensor', 'torch.tensor', (['[[matched_coocc[x[0]][x[1]], semi_matched_coocc[x[0]][x[1]], coocc[x[0]][x[\n 1]], freq_src[x[0]], freq_trg[x[1]]] for x in subbatch]'], {}), '([[matched_coocc[x[0]][x[1]], semi_matched_coocc[x[0]][x[1]],\n coocc[x[0]][x[1]], freq_src[x[0]], freq_trg[x[1]]] for x in subbatch])\n', (6018, 6154), False, 'import torch\n')]
|
# Author: <NAME>
# Shanghai Jiao Tong University
# Code adapted from PointNetVlad code: https://github.com/jac99/MinkLoc3D.git
# Train on Oxford dataset (from PointNetVLAD paper) using BatchHard hard negative mining.
import os
import numpy as np
import open3d as o3d
import torch
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from evaluate import evaluate
from loss.d_loss import make_d_loss
from loss.metric_loss import make_loss
from misc.log import log_dir, log_string, reg_log_dir
from misc.utils import MinkLocParams
from models.model_factory import model_factory, load_weights
from training.optimizer_factory import optimizer_factory, scheduler_factory
from training.reg_train import trainVCRNet, testVCRNet
class IOStream:
def __init__(self, path):
self.f = open(path, 'a')
def cprint(self, text):
"""print to cmd and save to a txt file at the same time
"""
print(text)
self.f.write(text + '\n')
self.f.flush()
def close(self):
self.f.close()
def tensors_to_numbers(stats):
stats = {e: stats[e].item() if torch.is_tensor(stats[e]) else stats[e] for e in stats}
return stats
class Trainer:
def __init__(self, dataloaders, params: MinkLocParams, checkpoint="", debug=False, visualize=False):
log_string('Model name: {}'.format(params.model_params.model))
self.params = params
self.eval_simple = params.eval_simple
self.lamda = params.lamda
self.lamda_reg = params.lamda_reg
self.domain_adapt = params.domain_adapt
if params.domain_adapt:
self.lamda_gd = params.lamda_gd
self.lamda_d = params.lamda_d
self.repeat_g = params.repeat_g
self.visualize = visualize
self.model, self.device, self.d_model, self.vcr_model = model_factory(self.params)
self.loss_fn = make_loss(self.params)
self.loss_d = make_d_loss(self.params) if self.domain_adapt else None
self.optimizer, self.optimizer_d = optimizer_factory(self.params, self.model, self.d_model)
self.scheduler, self.scheduler_d = scheduler_factory(self.params, self.optimizer, self.optimizer_d)
self.resume = False
if checkpoint == "":
from glob import glob
checkpoint_set = sorted(glob(os.path.join(log_dir, "weights/*")))
if len(checkpoint_set) == 0:
checkpoint = ""
else:
for i in range(len(checkpoint_set)):
dir, filename = os.path.split(checkpoint_set[i])
num = filename[filename.find('-') + 1:filename.find('.')]
checkpoint_set[i] = os.path.join(dir, filename.replace(num, num.zfill(3)))
checkpoint = sorted(checkpoint_set)[-1]
dir, filename = os.path.split(checkpoint)
num = filename[filename.find('-') + 1:filename.find('.')]
checkpoint = os.path.join(dir, filename.replace(num, str(int(num))))
self.resume = True
self.starting_epoch = load_weights(checkpoint, self.model, self.optimizer, self.scheduler)
self.writer = SummaryWriter(log_dir)
self.dataloaders_train = dataloaders['train']
self.total_train = len(self.dataloaders_train) if not debug else 2
if 'val' in dataloaders:
self.dataloaders_val = self.dataloaders['val']
self.total_val = len(self.dataloaders_train)
else:
self.dataloaders_val = None
if params.is_register:
self.params.reg.log_dir = reg_log_dir
self.boardio = SummaryWriter(reg_log_dir)
self.textio = IOStream(os.path.join(reg_log_dir, "run.log"))
self.dataloaders_train_reg = dataloaders['reg_train']
self.dataloaders_test_reg = dataloaders['reg_test']
def do_train(self):
if self.params.is_register:
if not self.resume:
log_string("***********start trainVCRNet*************")
trainVCRNet(self.params.reg, self.vcr_model, self.dataloaders_train_reg, self.dataloaders_test_reg,
self.boardio, self.textio)
saved_state_dict = self.vcr_model.state_dict()
model_state_dict = self.model.state_dict() # 获取已创建net的state_dict
saved_state_dict = {k: v for k, v in saved_state_dict.items() if k in model_state_dict}
model_state_dict.update(saved_state_dict)
self.model.load_state_dict(model_state_dict, strict=True)
self.optimizer, self.optimizer_d = optimizer_factory(self.params, self.model, self.d_model)
self.scheduler, self.scheduler_d = scheduler_factory(self.params, self.optimizer, self.optimizer_d)
self.params.lpd_fixed = True
self.params.is_register = True
self.params.lamda_reg = 0
self.count_samples = 0
self.count_batch = 0
log_string("***********start train vLPD-Net*************")
for epoch in range(self.starting_epoch, self.params.epochs + 1):
if self.domain_adapt:
epoch_stats = self.train_one_epoch_da(epoch)
else:
epoch_stats = self.train_one_epoch(epoch)
self.update_statics(epoch, epoch_stats, 'train')
if self.scheduler is not None:
self.scheduler.step()
# if not self.eval_simple:
# return
eval_stats = self.save_every_epoch(log_dir, epoch)
# 输出和记录eval_stats
for database_name in eval_stats:
self.writer.add_scalar('evaluation_{}/Avg. top 1 recall every epoch'.format(database_name),
eval_stats[database_name]['ave_recall'][0], epoch)
self.writer.add_scalar('evaluation_{}/Avg. top 1% recall every epoch'.format(database_name),
eval_stats[database_name]['ave_one_percent_recall'], epoch)
self.writer.add_scalar('evaluation_{}/Avg. similarity recall every epoch'.format(database_name),
eval_stats[database_name]['average_similarity'], epoch)
def prepare_data(self):
batch, self.positives_mask, self.negatives_mask, da_batch, R_gt, t_gt, source_batch, gt_T = self.dataset_iter.next()
self.source_xyz = source_batch['cloud'].to(self.device) if self.params.is_register else source_batch['cloud']
self.target_xyz = batch['cloud'].to(self.device)
# self.visual_pcl_simple(self.target_xyz[0], da_batch['cloud'].to(self.device)[0], "1")
self.R_gt = R_gt.to(self.device)
self.t_gt = t_gt.to(self.device)
self.gt_T = gt_T.to(self.device)
self.target_batch = {e: batch[e].to(self.device) if e != 'coords' and e != None else batch[e] for e in batch}
self.da_batch = {e: da_batch[e].to(self.device) if e != 'coords' and e != None else da_batch[e] for e in
da_batch} if da_batch != None else None
self.source_batch = {e: source_batch[e].to(self.device) if self.params.is_register else source_batch[
e] if e != 'coords' and e != None else source_batch[e] for e in source_batch}
n_positives = torch.sum(self.positives_mask).item()
n_negatives = torch.sum(self.negatives_mask).item()
if n_positives == 0 or n_negatives == 0:
return False
else:
return True
def train_one_epoch(self, epoch):
self.model.train()
self.model.emb_nn.eval()
all_stats_in_epoch = [] # running stats for the current epoch
self.rotations_ab = []
self.translations_ab = []
self.rotations_ab_pred = []
self.translations_ab_pred = []
self.dataset_iter = self.dataloaders_train.__iter__()
self.total_train = len(self.dataset_iter)
torch.cuda.empty_cache() # Prevent excessive GPU memory consumption by SparseTensors
for i in tqdm(range(self.total_train)):
try:
if not self.prepare_data():
continue
except ValueError:
log_string('dataloader error.')
continue
# Move everything to the device except 'coords' which must stay on CPU
self.temp_stats = {}
self.optimizer.zero_grad()
metric_loss, out_states = self.metric_train(self.source_batch, self.target_batch, self.positives_mask,
self.negatives_mask, self.gt_T)
loss = self.lamda * metric_loss
loss.backward()
self.optimizer.step()
temp_stats = tensors_to_numbers(self.temp_stats)
all_stats_in_epoch.append(temp_stats)
self.count_samples = self.count_samples + self.negatives_mask.shape[0]
self.count_batch += self.negatives_mask.shape[0]
for key, value in temp_stats.items():
self.writer.add_scalar('{}/batch_train'.format(key), value, self.count_samples)
# Compute mean stats for the epoch
epoch_stats = {'epoch': epoch}
for key in all_stats_in_epoch[0].keys():
temp = [e[key] for e in all_stats_in_epoch]
epoch_stats[key] = np.mean(temp)
return epoch_stats
def visual_pcl_simple(self, pcl1, pcl2, name='Open3D Origin'):
pcl1 = pcl1.detach().cpu().numpy()
pcl2 = pcl2.detach().cpu().numpy()
pcd1 = o3d.geometry.PointCloud()
pcd1.points = o3d.utility.Vector3dVector(pcl1[:, :3])
pcd2 = o3d.geometry.PointCloud()
pcd2.points = o3d.utility.Vector3dVector(pcl2[:, :3])
pcd1.paint_uniform_color([1, 0.706, 0])
pcd2.paint_uniform_color([0, 0.651, 0.929])
o3d.visualization.draw_geometries([pcd1, pcd2], window_name=name, width=1920, height=1080,
left=50,
top=50,
point_show_normal=False, mesh_show_wireframe=False,
mesh_show_back_face=False)
def metric_train(self, source_batch, target_batch, positives_mask, negatives_mask, gt_T):
# Compute embeddings of all elements
out_states = self.model(source_batch, target_batch, gt_T)
if self.lamda <= 0:
return 0.0, out_states
metric_loss, temp_stats, self.hard_triplets = self.loss_fn(out_states['embeddings'], positives_mask,
negatives_mask)
self.temp_stats.update(temp_stats)
self.temp_stats.update({'metric_loss_lamda': self.lamda * metric_loss.detach().cpu().item()})
return metric_loss, out_states
def backward_G(self, batch, positives_mask, negatives_mask):
self.temp_stats = {}
self.optimizer.zero_grad()
out_states = self.model(None, batch, None)
metric_loss, temp_stats, self.hard_triplets = self.loss_fn(out_states['embeddings'], positives_mask,
negatives_mask)
adv_gen_loss = self.adv_gen_train(out_states['embeddings'])
loss = self.lamda * metric_loss + self.lamda_gd * adv_gen_loss
loss.backward()
self.optimizer.step()
def adv_gen_train(self, embeddings):
if not self.domain_adapt:
return 0.0
pred_syn = self.d_model(embeddings)
adv_gen_loss, temp_stats = self.loss_d(pred_syn, self.hard_triplets)
self.temp_stats.update(temp_stats)
return adv_gen_loss
def backward_D(self, batch, da_batch, positives_mask, negatives_mask):
if not self.domain_adapt:
return
self.optimizer_d.zero_grad()
# Compute embeddings of all elements
out_states = self.model(None, batch, None)
out_states_da = self.model(None, da_batch, None)
pred_syn = self.d_model(out_states['embeddings'])
pred_real = self.d_model(out_states_da['embeddings'])
# embeddings [B,256]
d_loss, d_stats = self.loss_d(pred_syn, self.hard_triplets, pred_real)
self.temp_stats.update(d_stats)
d_loss = self.lamda_d * d_loss
d_loss.backward()
self.optimizer_d.step()
def update_statics(self, epoch, epoch_stats, phase):
log_string('{} epoch {}. '.format(phase, epoch_stats['epoch']), end='')
for key in epoch_stats:
if key != 'epoch':
log_string("{}: {}. ".format(key, epoch_stats[key]), end='')
self.writer.add_scalar('{}/epoch_{}/'.format(key, phase), epoch_stats[key], epoch)
log_string('')
if self.params.batch_expansion_th is not None:
# Dynamic batch expansion
epoch_train_stats = epoch_stats
if 'num_non_zero_triplets' not in epoch_train_stats:
pass
# log_string('WARNING: Batch size expansion is enabled, but the loss function is not supported')
else:
# Ratio of non-zero triplets
rnz = epoch_train_stats['num_non_zero_triplets'] / epoch_train_stats['num_triplets']
if rnz < self.params.batch_expansion_th:
self.dataloaders_train.batch_sampler.expand_batch()
if phase != 'train':
return
eval_stats = self.save_every_epoch(log_dir, epoch)
# 输出和记录eval_stats
for database_name in eval_stats:
self.writer.add_scalar('evaluation_{}/Avg. top 1 recall every epoch'.format(database_name),
eval_stats[database_name]['ave_recall'][0], epoch)
self.writer.add_scalar('evaluation_{}/Avg. top 1% recall every epoch'.format(database_name),
eval_stats[database_name]['ave_one_percent_recall'], epoch)
self.writer.add_scalar('evaluation_{}/Avg. similarity recall every epoch'.format(database_name),
eval_stats[database_name]['average_similarity'], epoch)
def save_batch_model(self, batch, epoch):
return
weights_path = os.path.join(log_dir, 'weights')
if not os.path.exists(weights_path):
os.mkdir(weights_path)
model_path = self.params.model_params.model + '-{}_{}.pth'.format(epoch, batch)
model_path = os.path.join(weights_path, model_path)
if isinstance(self.model, torch.nn.DataParallel):
model_to_save = self.model.module
else:
model_to_save = self.model
tqdm.write('Model saved.Epoch:{},Batch:{}'.format(epoch, batch))
torch.save({
'epoch': epoch,
'state_dict': model_to_save.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
}, model_path)
def save_every_epoch(self, log_dir, epoch):
# Save final model weights
weights_path = os.path.join(log_dir, 'weights')
if not os.path.exists(weights_path):
os.mkdir(weights_path)
model_path = self.params.model_params.model + '-{}.pth'.format(epoch)
model_path = os.path.join(weights_path, model_path)
if isinstance(self.model, torch.nn.DataParallel):
model_to_save = self.model.module
else:
model_to_save = self.model
torch.save({
'epoch': epoch,
'state_dict': model_to_save.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
}, model_path)
# Evaluate the final model
# if self.eval_simple:
# return {}
self.model.eval()
eval_stats = evaluate(self.model, self.device, self.params)
return eval_stats
def train_one_epoch_da(self, epoch):
self.model.train()
all_stats_in_epoch = [] # running stats for the current epoch
self.dataset_iter = self.dataloaders_train.__iter__()
self.total_train = len(self.dataset_iter)
torch.cuda.empty_cache() # Prevent excessive GPU memory consumption by SparseTensors
for i in tqdm(range(self.total_train)):
try:
if not self.prepare_data():
continue
except ValueError:
log_string('dataloader error.')
continue
# Move everything to the device except 'coords' which must stay on CPU
self.backward_G(self.target_batch, self.positives_mask, self.negatives_mask)
self.backward_D(self.target_batch, self.da_batch, self.positives_mask, self.negatives_mask)
if self.count_batch > 2000:
self.save_batch_model(self.count_batch, epoch)
# evaluate(self.model, self.device, self.params)
self.count_batch -= 2000
temp_stats = tensors_to_numbers(self.temp_stats)
all_stats_in_epoch.append(temp_stats)
self.count_samples = self.count_samples + self.negatives_mask.shape[0]
self.count_batch += self.negatives_mask.shape[0]
for key, value in temp_stats.items():
self.writer.add_scalar('{}/batch_train'.format(key), value, self.count_samples)
# Compute mean stats for the epoch
epoch_stats = {'epoch': epoch}
for key in all_stats_in_epoch[0].keys():
temp = [e[key] for e in all_stats_in_epoch]
epoch_stats[key] = np.mean(temp)
return epoch_stats
|
[
"evaluate.evaluate"
] |
[((1851, 1877), 'models.model_factory.model_factory', 'model_factory', (['self.params'], {}), '(self.params)\n', (1864, 1877), False, 'from models.model_factory import model_factory, load_weights\n'), ((1902, 1924), 'loss.metric_loss.make_loss', 'make_loss', (['self.params'], {}), '(self.params)\n', (1911, 1924), False, 'from loss.metric_loss import make_loss\n'), ((2048, 2104), 'training.optimizer_factory.optimizer_factory', 'optimizer_factory', (['self.params', 'self.model', 'self.d_model'], {}), '(self.params, self.model, self.d_model)\n', (2065, 2104), False, 'from training.optimizer_factory import optimizer_factory, scheduler_factory\n'), ((2149, 2213), 'training.optimizer_factory.scheduler_factory', 'scheduler_factory', (['self.params', 'self.optimizer', 'self.optimizer_d'], {}), '(self.params, self.optimizer, self.optimizer_d)\n', (2166, 2213), False, 'from training.optimizer_factory import optimizer_factory, scheduler_factory\n'), ((3110, 3178), 'models.model_factory.load_weights', 'load_weights', (['checkpoint', 'self.model', 'self.optimizer', 'self.scheduler'], {}), '(checkpoint, self.model, self.optimizer, self.scheduler)\n', (3122, 3178), False, 'from models.model_factory import model_factory, load_weights\n'), ((3202, 3224), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['log_dir'], {}), '(log_dir)\n', (3215, 3224), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((5029, 5087), 'misc.log.log_string', 'log_string', (['"""***********start train vLPD-Net*************"""'], {}), "('***********start train vLPD-Net*************')\n", (5039, 5087), False, 'from misc.log import log_dir, log_string, reg_log_dir\n'), ((7960, 7984), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (7982, 7984), False, 'import torch\n'), ((9589, 9614), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (9612, 9614), True, 'import open3d as o3d\n'), ((9637, 9676), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['pcl1[:, :3]'], {}), '(pcl1[:, :3])\n', (9663, 9676), True, 'import open3d as o3d\n'), ((9692, 9717), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (9715, 9717), True, 'import open3d as o3d\n'), ((9740, 9779), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['pcl2[:, :3]'], {}), '(pcl2[:, :3])\n', (9766, 9779), True, 'import open3d as o3d\n'), ((9888, 10083), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[pcd1, pcd2]'], {'window_name': 'name', 'width': '(1920)', 'height': '(1080)', 'left': '(50)', 'top': '(50)', 'point_show_normal': '(False)', 'mesh_show_wireframe': '(False)', 'mesh_show_back_face': '(False)'}), '([pcd1, pcd2], window_name=name, width=\n 1920, height=1080, left=50, top=50, point_show_normal=False,\n mesh_show_wireframe=False, mesh_show_back_face=False)\n', (9921, 10083), True, 'import open3d as o3d\n'), ((12833, 12847), 'misc.log.log_string', 'log_string', (['""""""'], {}), "('')\n", (12843, 12847), False, 'from misc.log import log_dir, log_string, reg_log_dir\n'), ((14331, 14363), 'os.path.join', 'os.path.join', (['log_dir', '"""weights"""'], {}), "(log_dir, 'weights')\n", (14343, 14363), False, 'import os\n'), ((14553, 14591), 'os.path.join', 'os.path.join', (['weights_path', 'model_path'], {}), '(weights_path, model_path)\n', (14565, 14591), False, 'import os\n'), ((15164, 15196), 'os.path.join', 'os.path.join', (['log_dir', '"""weights"""'], {}), "(log_dir, 'weights')\n", (15176, 15196), False, 'import os\n'), ((15376, 15414), 'os.path.join', 'os.path.join', (['weights_path', 'model_path'], {}), '(weights_path, model_path)\n', (15388, 15414), False, 'import os\n'), ((15946, 15992), 'evaluate.evaluate', 'evaluate', (['self.model', 'self.device', 'self.params'], {}), '(self.model, self.device, self.params)\n', (15954, 15992), False, 'from evaluate import evaluate\n'), ((16282, 16306), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (16304, 16306), False, 'import torch\n'), ((1119, 1144), 'torch.is_tensor', 'torch.is_tensor', (['stats[e]'], {}), '(stats[e])\n', (1134, 1144), False, 'import torch\n'), ((1948, 1972), 'loss.d_loss.make_d_loss', 'make_d_loss', (['self.params'], {}), '(self.params)\n', (1959, 1972), False, 'from loss.d_loss import make_d_loss\n'), ((3667, 3693), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['reg_log_dir'], {}), '(reg_log_dir)\n', (3680, 3693), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((9378, 9391), 'numpy.mean', 'np.mean', (['temp'], {}), '(temp)\n', (9385, 9391), True, 'import numpy as np\n'), ((14379, 14407), 'os.path.exists', 'os.path.exists', (['weights_path'], {}), '(weights_path)\n', (14393, 14407), False, 'import os\n'), ((14421, 14443), 'os.mkdir', 'os.mkdir', (['weights_path'], {}), '(weights_path)\n', (14429, 14443), False, 'import os\n'), ((15212, 15240), 'os.path.exists', 'os.path.exists', (['weights_path'], {}), '(weights_path)\n', (15226, 15240), False, 'import os\n'), ((15254, 15276), 'os.mkdir', 'os.mkdir', (['weights_path'], {}), '(weights_path)\n', (15262, 15276), False, 'import os\n'), ((17720, 17733), 'numpy.mean', 'np.mean', (['temp'], {}), '(temp)\n', (17727, 17733), True, 'import numpy as np\n'), ((2859, 2884), 'os.path.split', 'os.path.split', (['checkpoint'], {}), '(checkpoint)\n', (2872, 2884), False, 'import os\n'), ((3729, 3765), 'os.path.join', 'os.path.join', (['reg_log_dir', '"""run.log"""'], {}), "(reg_log_dir, 'run.log')\n", (3741, 3765), False, 'import os\n'), ((4006, 4061), 'misc.log.log_string', 'log_string', (['"""***********start trainVCRNet*************"""'], {}), "('***********start trainVCRNet*************')\n", (4016, 4061), False, 'from misc.log import log_dir, log_string, reg_log_dir\n'), ((4078, 4208), 'training.reg_train.trainVCRNet', 'trainVCRNet', (['self.params.reg', 'self.vcr_model', 'self.dataloaders_train_reg', 'self.dataloaders_test_reg', 'self.boardio', 'self.textio'], {}), '(self.params.reg, self.vcr_model, self.dataloaders_train_reg,\n self.dataloaders_test_reg, self.boardio, self.textio)\n', (4089, 4208), False, 'from training.reg_train import trainVCRNet, testVCRNet\n'), ((4665, 4721), 'training.optimizer_factory.optimizer_factory', 'optimizer_factory', (['self.params', 'self.model', 'self.d_model'], {}), '(self.params, self.model, self.d_model)\n', (4682, 4721), False, 'from training.optimizer_factory import optimizer_factory, scheduler_factory\n'), ((4773, 4837), 'training.optimizer_factory.scheduler_factory', 'scheduler_factory', (['self.params', 'self.optimizer', 'self.optimizer_d'], {}), '(self.params, self.optimizer, self.optimizer_d)\n', (4790, 4837), False, 'from training.optimizer_factory import optimizer_factory, scheduler_factory\n'), ((7316, 7346), 'torch.sum', 'torch.sum', (['self.positives_mask'], {}), '(self.positives_mask)\n', (7325, 7346), False, 'import torch\n'), ((7376, 7406), 'torch.sum', 'torch.sum', (['self.negatives_mask'], {}), '(self.negatives_mask)\n', (7385, 7406), False, 'import torch\n'), ((2347, 2381), 'os.path.join', 'os.path.join', (['log_dir', '"""weights/*"""'], {}), "(log_dir, 'weights/*')\n", (2359, 2381), False, 'import os\n'), ((2564, 2596), 'os.path.split', 'os.path.split', (['checkpoint_set[i]'], {}), '(checkpoint_set[i])\n', (2577, 2596), False, 'import os\n'), ((8231, 8262), 'misc.log.log_string', 'log_string', (['"""dataloader error."""'], {}), "('dataloader error.')\n", (8241, 8262), False, 'from misc.log import log_dir, log_string, reg_log_dir\n'), ((16553, 16584), 'misc.log.log_string', 'log_string', (['"""dataloader error."""'], {}), "('dataloader error.')\n", (16563, 16584), False, 'from misc.log import log_dir, log_string, reg_log_dir\n')]
|
"""Entry point for training HSD-scripted.
Trains high-level role assignment policy with environment reward
Trains low-level action policies with role-specific rewards given by environment
"""
import json
import os
import random
import sys
import time
sys.path.append('../env/')
import numpy as np
import tensorflow as tf
import alg_hsd_scripted
import alg_qmix
import env_wrapper
import evaluate
import replay_buffer
def train_function(config):
config_env = config['env']
config_main = config['main']
config_alg = config['alg']
config_h = config['h_params']
seed = config_main['seed']
np.random.seed(seed)
random.seed(seed)
tf.set_random_seed(seed)
alg_name = config_main['alg_name']
dir_name = config_main['dir_name']
model_name = config_main['model_name']
summarize = config_main['summarize']
save_period = config_main['save_period']
os.makedirs('../results/%s'%dir_name, exist_ok=True)
N_train = config_alg['N_train']
N_eval = config_alg['N_eval']
period = config_alg['period']
buffer_size = config_alg['buffer_size']
batch_size = config_alg['batch_size']
pretrain_episodes = config_alg['pretrain_episodes']
steps_per_train = config_alg['steps_per_train']
epsilon_start = config_alg['epsilon_start']
epsilon_end = config_alg['epsilon_end']
epsilon_div = config_alg['epsilon_div']
epsilon_step = (epsilon_start - epsilon_end)/float(epsilon_div)
epsilon = epsilon_start
N_roles = config_h['N_roles']
steps_per_assign = config_h['steps_per_assign']
# Each <steps_per_assign> is one "step" for the high-level policy
# This means we train the high-level policy once for every
# <steps_per_train> high-level steps
steps_per_train_h = steps_per_assign * steps_per_train
env = env_wrapper.Env(config_env, config_main)
l_state = env.state_dim
l_action = env.action_dim
l_obs = env.obs_dim
N_home = config_env['num_home_players']
if config_main['alg_name'] == 'qmix':
alg = alg_qmix.Alg(config_alg, N_home, l_state, l_obs, l_action, config['nn_qmix'])
elif alg_name == 'hsd-scripted' or alg_name == 'mara-c':
alg = alg_hsd_scripted.Alg(alg_name, config_alg, N_home, l_state, l_obs, l_action, N_roles, config['nn_hsd_scripted'])
config_proto = tf.ConfigProto()
config_proto.gpu_options.allow_growth = True
sess = tf.Session(config=config_proto)
sess.run(tf.global_variables_initializer())
sess.run(alg.list_initialize_target_ops)
if summarize:
writer = tf.summary.FileWriter('../results/%s' % dir_name, sess.graph)
saver = tf.train.Saver(max_to_keep=config_main['max_to_keep'])
# Buffer for high level role assignment policy
buf_high = replay_buffer.Replay_Buffer(size=buffer_size)
# Buffer for low level agent policy
buf_low = replay_buffer.Replay_Buffer(size=buffer_size)
# Logging
header = "Episode,Step,Step_train,R_avg,R_eval,Steps_per_eps,Opp_win_rate,Win_rate,T_env,T_alg\n"
with open("../results/%s/log.csv" % dir_name, 'w') as f:
f.write(header)
t_start = time.time()
t_env = 0
t_alg = 0
reward_period = 0
step = 0
step_train = 0
step_h = 0
for idx_episode in range(1, N_train+1):
state_home, state_away, list_obs_home, list_obs_away, done = env.reset()
# Variables with suffix _h are high-level quantities for training the role assignment policy
# These are the high-level equivalent of the s_t in a usual transition tuple (s_t, a_t, s_{t+1})
state_home_h, state_away_h, list_obs_home_h, list_obs_away_h = state_home, state_away, list_obs_home, list_obs_away
# Cumulative discounted reward for high-level policy
reward_h = 0
# Action taken by high-level role assignment policy
roles_int = np.random.randint(0, N_roles, N_home)
reward_episode = 0
summarized = 0
summarized_h = 0
step_episode = 0 # steps within an episode
while not done:
if step_episode % steps_per_assign == 0:
if step_episode != 0:
# The environment state at this point, e.g. <state_home>,
# acts like the "next state" for the high-level policy
# All of the intervening environment steps act as a single step for the high-level policy
r_discounted = reward_h * (config_alg['gamma']**steps_per_assign)
if alg_name == 'hsd-scripted':
buf_high.add( np.array([ state_home_h, np.array(list_obs_home_h), roles_int, r_discounted, state_home, np.array(list_obs_home), done ]) )
elif alg_name == 'mara-c':
buf_high.add( np.array([ state_home_h, idx_action_centralized, r_discounted, state_home, done ]) )
step_h += 1
# Get new role assignment, i.e. take high-level action
if idx_episode < pretrain_episodes:
roles_int = np.random.randint(0, N_roles, N_home)
if alg_name == 'mara-c':
idx_action_centralized = np.random.randint(0, alg.dim_role_space)
else:
t_alg_start = time.time()
if alg_name == 'hsd-scripted':
roles_int = alg.assign_roles(list_obs_home, epsilon, sess)
elif alg_name == 'mara-c':
roles_int, idx_action_centralized = alg.assign_roles_centralized(state_home, epsilon, sess)
t_alg += time.time() - t_alg_start
roles = np.zeros([N_home, N_roles])
roles[np.arange(N_home), roles_int] = 1
if (idx_episode >= pretrain_episodes) and (step_h % steps_per_train == 0):
# Conduct training of high-level policy
batch = buf_high.sample_batch(batch_size)
t_alg_start = time.time()
if summarize and idx_episode % period == 0 and not summarized_h:
alg.train_step(sess, batch, step_train, summarize=True, writer=writer)
summarized_h = True
else:
alg.train_step(sess, batch, step_train, summarize=False, writer=None)
step_train += 1
t_alg += time.time() - t_alg_start
# Update high-level state
state_home_h, state_away_h, list_obs_home_h, list_obs_away_h = state_home, state_away, list_obs_home, list_obs_away
reward_h = 0
# Take low-level actions, conditioned on roles
if idx_episode < pretrain_episodes:
actions_int = env.random_actions()
else:
t_alg_start = time.time()
actions_int = alg.run_actor(list_obs_home, roles, epsilon, sess)
t_alg += time.time() - t_alg_start
t_env_start = time.time()
state_home_next, state_away_next, list_obs_home_next, list_obs_away_next, reward, local_rewards, done, info = env.step(actions_int, roles_int)
t_env += time.time() - t_env_start
step += 1
step_episode += 1
l_temp = [np.array(list_obs_home), actions_int, local_rewards, np.array(list_obs_home_next), roles]
a_temp = np.empty(len(l_temp), dtype=object)
a_temp[:] = l_temp
buf_low.add( a_temp )
if (idx_episode >= pretrain_episodes) and (step % steps_per_train == 0):
# Train low-level policies
batch = buf_low.sample_batch(batch_size)
t_alg_start = time.time()
if summarize and idx_episode % period == 0 and not summarized:
alg.train_step_low(sess, batch, step_train, summarize=True, writer=writer)
summarized = True
else:
alg.train_step_low(sess, batch, step_train, summarize=False, writer=None)
step_train += 1
t_alg += time.time() - t_alg_start
state_home = state_home_next
list_obs_home = list_obs_home_next
reward_episode += reward
reward_h += reward
if done:
# Since the episode is done, we also terminate the current role assignment period,
# even if not all <steps_per_assign> have been completed
r_discounted = reward_h * config_alg['gamma']**(step_episode % steps_per_assign)
if alg_name == 'hsd-scripted':
buf_high.add( np.array([ state_home_h, np.array(list_obs_home_h), roles_int, r_discounted, state_home, np.array(list_obs_home), done]) )
elif alg_name == 'mara-c':
buf_high.add( np.array([ state_home_h, idx_action_centralized, r_discounted, state_home, done ]) )
if idx_episode >= pretrain_episodes and epsilon > epsilon_end:
epsilon -= epsilon_step
reward_period += reward_episode
if idx_episode == 1 or idx_episode % (5*period) == 0:
print('{:>10s}{:>10s}{:>12s}{:>8s}{:>8s}{:>15s}{:>15s}{:>10s}{:>12s}{:>12s}'.format(*(header.strip().split(','))))
if idx_episode % period == 0:
# Evaluation episodes
r_avg_eval, steps_per_episode, win_rate, win_rate_opponent = evaluate.test_hierarchy(alg_name, N_eval, env, sess, alg, steps_per_assign)
if win_rate >= config_main['save_threshold']:
saver.save(sess, '../results/%s/%s-%d' % (dir_name, "model_good.ckpt", idx_episode))
s = '%d,%d,%d,%.2f,%.2f,%d,%.2f,%.2f,%.5e,%.5e\n' % (idx_episode, step, step_train, reward_period/float(period), r_avg_eval, steps_per_episode, win_rate_opponent, win_rate, t_env, t_alg)
with open('../results/%s/log.csv' % dir_name, 'a') as f:
f.write(s)
print('{:10d}{:10d}{:12d}{:8.2f}{:8.2f}{:15d}{:15.2f}{:10.2f}{:12.5e}{:12.5e}\n'.format(idx_episode, step, step_train, reward_period/float(period), r_avg_eval, int(steps_per_episode), win_rate_opponent, win_rate, t_env, t_alg))
reward_period = 0
if idx_episode % save_period == 0:
saver.save(sess, '../results/%s/%s-%d' % (dir_name, "model.ckpt", idx_episode))
saver.save(sess, '../results/%s/%s' % (dir_name, model_name))
with open('../results/%s/time.txt' % dir_name, 'a') as f:
f.write('t_env_total,t_env_per_step,t_alg_total,t_alg_per_step\n')
f.write('%.5e,%.5e,%.5e,%.5e' % (t_env, t_env/step, t_alg, t_alg/step))
if __name__ == '__main__':
with open('config.json', 'r') as f:
config = json.load(f)
train_function(config)
|
[
"evaluate.test_hierarchy"
] |
[((254, 280), 'sys.path.append', 'sys.path.append', (['"""../env/"""'], {}), "('../env/')\n", (269, 280), False, 'import sys\n'), ((622, 642), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (636, 642), True, 'import numpy as np\n'), ((647, 664), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (658, 664), False, 'import random\n'), ((669, 693), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (687, 693), True, 'import tensorflow as tf\n'), ((915, 969), 'os.makedirs', 'os.makedirs', (["('../results/%s' % dir_name)"], {'exist_ok': '(True)'}), "('../results/%s' % dir_name, exist_ok=True)\n", (926, 969), False, 'import os\n'), ((1847, 1887), 'env_wrapper.Env', 'env_wrapper.Env', (['config_env', 'config_main'], {}), '(config_env, config_main)\n', (1862, 1887), False, 'import env_wrapper\n'), ((2370, 2386), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2384, 2386), True, 'import tensorflow as tf\n'), ((2447, 2478), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config_proto'}), '(config=config_proto)\n', (2457, 2478), True, 'import tensorflow as tf\n'), ((2691, 2745), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': "config_main['max_to_keep']"}), "(max_to_keep=config_main['max_to_keep'])\n", (2705, 2745), True, 'import tensorflow as tf\n'), ((2817, 2862), 'replay_buffer.Replay_Buffer', 'replay_buffer.Replay_Buffer', ([], {'size': 'buffer_size'}), '(size=buffer_size)\n', (2844, 2862), False, 'import replay_buffer\n'), ((2917, 2962), 'replay_buffer.Replay_Buffer', 'replay_buffer.Replay_Buffer', ([], {'size': 'buffer_size'}), '(size=buffer_size)\n', (2944, 2962), False, 'import replay_buffer\n'), ((3188, 3199), 'time.time', 'time.time', ([], {}), '()\n', (3197, 3199), False, 'import time\n'), ((2080, 2157), 'alg_qmix.Alg', 'alg_qmix.Alg', (['config_alg', 'N_home', 'l_state', 'l_obs', 'l_action', "config['nn_qmix']"], {}), "(config_alg, N_home, l_state, l_obs, l_action, config['nn_qmix'])\n", (2092, 2157), False, 'import alg_qmix\n'), ((2492, 2525), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2523, 2525), True, 'import tensorflow as tf\n'), ((2617, 2678), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["('../results/%s' % dir_name)", 'sess.graph'], {}), "('../results/%s' % dir_name, sess.graph)\n", (2638, 2678), True, 'import tensorflow as tf\n'), ((3934, 3971), 'numpy.random.randint', 'np.random.randint', (['(0)', 'N_roles', 'N_home'], {}), '(0, N_roles, N_home)\n', (3951, 3971), True, 'import numpy as np\n'), ((10987, 10999), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10996, 10999), False, 'import json\n'), ((2233, 2349), 'alg_hsd_scripted.Alg', 'alg_hsd_scripted.Alg', (['alg_name', 'config_alg', 'N_home', 'l_state', 'l_obs', 'l_action', 'N_roles', "config['nn_hsd_scripted']"], {}), "(alg_name, config_alg, N_home, l_state, l_obs, l_action,\n N_roles, config['nn_hsd_scripted'])\n", (2253, 2349), False, 'import alg_hsd_scripted\n'), ((7178, 7189), 'time.time', 'time.time', ([], {}), '()\n', (7187, 7189), False, 'import time\n'), ((9654, 9729), 'evaluate.test_hierarchy', 'evaluate.test_hierarchy', (['alg_name', 'N_eval', 'env', 'sess', 'alg', 'steps_per_assign'], {}), '(alg_name, N_eval, env, sess, alg, steps_per_assign)\n', (9677, 9729), False, 'import evaluate\n'), ((5784, 5811), 'numpy.zeros', 'np.zeros', (['[N_home, N_roles]'], {}), '([N_home, N_roles])\n', (5792, 5811), True, 'import numpy as np\n'), ((6991, 7002), 'time.time', 'time.time', ([], {}), '()\n', (7000, 7002), False, 'import time\n'), ((7366, 7377), 'time.time', 'time.time', ([], {}), '()\n', (7375, 7377), False, 'import time\n'), ((7476, 7499), 'numpy.array', 'np.array', (['list_obs_home'], {}), '(list_obs_home)\n', (7484, 7499), True, 'import numpy as np\n'), ((7529, 7557), 'numpy.array', 'np.array', (['list_obs_home_next'], {}), '(list_obs_home_next)\n', (7537, 7557), True, 'import numpy as np\n'), ((7908, 7919), 'time.time', 'time.time', ([], {}), '()\n', (7917, 7919), False, 'import time\n'), ((5167, 5204), 'numpy.random.randint', 'np.random.randint', (['(0)', 'N_roles', 'N_home'], {}), '(0, N_roles, N_home)\n', (5184, 5204), True, 'import numpy as np\n'), ((5396, 5407), 'time.time', 'time.time', ([], {}), '()\n', (5405, 5407), False, 'import time\n'), ((6120, 6131), 'time.time', 'time.time', ([], {}), '()\n', (6129, 6131), False, 'import time\n'), ((7109, 7120), 'time.time', 'time.time', ([], {}), '()\n', (7118, 7120), False, 'import time\n'), ((8305, 8316), 'time.time', 'time.time', ([], {}), '()\n', (8314, 8316), False, 'import time\n'), ((5299, 5339), 'numpy.random.randint', 'np.random.randint', (['(0)', 'alg.dim_role_space'], {}), '(0, alg.dim_role_space)\n', (5316, 5339), True, 'import numpy as np\n'), ((5734, 5745), 'time.time', 'time.time', ([], {}), '()\n', (5743, 5745), False, 'import time\n'), ((5834, 5851), 'numpy.arange', 'np.arange', (['N_home'], {}), '(N_home)\n', (5843, 5851), True, 'import numpy as np\n'), ((6541, 6552), 'time.time', 'time.time', ([], {}), '()\n', (6550, 6552), False, 'import time\n'), ((9068, 9153), 'numpy.array', 'np.array', (['[state_home_h, idx_action_centralized, r_discounted, state_home, done]'], {}), '([state_home_h, idx_action_centralized, r_discounted, state_home, done]\n )\n', (9076, 9153), True, 'import numpy as np\n'), ((4878, 4963), 'numpy.array', 'np.array', (['[state_home_h, idx_action_centralized, r_discounted, state_home, done]'], {}), '([state_home_h, idx_action_centralized, r_discounted, state_home, done]\n )\n', (4886, 4963), True, 'import numpy as np\n'), ((8893, 8918), 'numpy.array', 'np.array', (['list_obs_home_h'], {}), '(list_obs_home_h)\n', (8901, 8918), True, 'import numpy as np\n'), ((8957, 8980), 'numpy.array', 'np.array', (['list_obs_home'], {}), '(list_obs_home)\n', (8965, 8980), True, 'import numpy as np\n'), ((4694, 4719), 'numpy.array', 'np.array', (['list_obs_home_h'], {}), '(list_obs_home_h)\n', (4702, 4719), True, 'import numpy as np\n'), ((4758, 4781), 'numpy.array', 'np.array', (['list_obs_home'], {}), '(list_obs_home)\n', (4766, 4781), True, 'import numpy as np\n')]
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import print_function, division
import argparse
import copy
import evaluate
import tensorflow as tf
from nlp_architect.data.fasttext_emb import FastTextEmb
from nlp_architect.models.crossling_emb import WordTranslator
from nlp_architect.utils.io import validate_existing_directory, validate_parent_exists, check_size
if __name__ == "__main__":
print("\t\t" + 40 * "=")
print("\t\t= Unsupervised Crosslingual Embeddings =")
print("\t\t" + 40 * "=")
# Parsing arguments for model parameters
parser = argparse.ArgumentParser()
parser.add_argument(
"--emb_dim", type=int, default=300, help="Embedding Dimensions", action=check_size(1, 1024)
)
parser.add_argument(
"--vocab_size",
type=int,
default=200000,
help="Vocabulary Size",
action=check_size(1, 1000000),
)
parser.add_argument(
"--lr", type=float, default=0.1, help="Learning Rate", action=check_size(0.00001, 2.0)
)
parser.add_argument(
"--beta",
type=float,
default=0.001,
help="Beta for W orthogornaliztion",
action=check_size(0.0000001, 5.0),
)
parser.add_argument(
"--smooth_val",
type=float,
default=0.1,
help="Label smoother for\
discriminator",
action=check_size(0.0001, 0.2),
)
parser.add_argument(
"--batch_size", type=int, default=32, help="Batch size", action=check_size(8, 1024)
)
parser.add_argument(
"--epochs", type=int, default=5, help="Number of epochs to run", action=check_size(1, 20)
)
parser.add_argument(
"--iters_epoch",
type=int,
default=1000000,
help="Iterations to run\
each epoch",
action=check_size(1, 2000000),
)
parser.add_argument(
"--disc_runs",
type=int,
default=5,
help="Number of times\
discriminator is run each iteration",
action=check_size(1, 20),
)
parser.add_argument(
"--most_freq",
type=int,
default=75000,
help="Number of words to\
show discriminator",
action=check_size(1, 1000000),
)
parser.add_argument(
"--src_lang", type=str, default="en", help="Source Language", action=check_size(1, 3)
)
parser.add_argument(
"--tgt_lang", type=str, default="fr", help="Target Language", action=check_size(1, 3)
)
parser.add_argument(
"--data_dir",
default=None,
help="Data path for training and\
and evaluation data",
type=validate_existing_directory,
)
parser.add_argument(
"--eval_dir", default=None, help="Path for eval words", type=validate_existing_directory
)
parser.add_argument(
"--weight_dir",
default=None,
help="path to save mapping\
weights",
type=validate_parent_exists,
)
hparams = parser.parse_args()
# Load Source Embeddings
src = FastTextEmb(hparams.data_dir, hparams.src_lang, hparams.vocab_size)
src_dict, src_vec = src.load_embeddings()
# Load Target Embeddings
tgt = FastTextEmb(hparams.data_dir, hparams.tgt_lang, hparams.vocab_size)
tgt_dict, tgt_vec = tgt.load_embeddings()
# GAN instance
train_model = WordTranslator(hparams, src_vec, tgt_vec, hparams.vocab_size)
# Copy embeddings
src_vec_eval = copy.deepcopy(src_vec)
tgt_vec_eval = copy.deepcopy(tgt_vec)
# Evaluator instance
eval_model = evaluate.Evaluate(
train_model.generator.W,
src_vec_eval,
tgt_vec_eval,
src_dict,
tgt_dict,
hparams.src_lang,
hparams.tgt_lang,
hparams.eval_dir,
hparams.vocab_size,
)
# Tensorflow session
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
local_lr = hparams.lr
for epoch in range(hparams.epochs):
# Train the model
train_model.run(sess, local_lr)
# Evaluate using nearest neighbors measure
eval_model.calc_nn_acc(sess)
# Evaluate using CSLS similarity measure
eval_model.run_csls_metrics(sess)
# Drop learning rate
local_lr = train_model.set_lr(local_lr, eval_model.drop_lr)
# Save model if it is good
train_model.save_model(eval_model.save_model, sess)
print("End of epoch " + str(epoch))
# Apply procrustes to improve CSLS score
final_pairs = eval_model.generate_dictionary(sess, dict_type="S2T&T2S")
train_model.apply_procrustes(sess, final_pairs)
# Run metrics to see improvement
eval_model.run_csls_metrics(sess)
# Save the model if there is imporvement
train_model.save_model(eval_model.save_model, sess)
# Write cross lingual embeddings to file
train_model.generate_xling_embed(sess, src_dict, tgt_dict, tgt_vec)
print("Completed Training")
|
[
"evaluate.Evaluate"
] |
[((1288, 1313), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1311, 1313), False, 'import argparse\n'), ((3860, 3927), 'nlp_architect.data.fasttext_emb.FastTextEmb', 'FastTextEmb', (['hparams.data_dir', 'hparams.src_lang', 'hparams.vocab_size'], {}), '(hparams.data_dir, hparams.src_lang, hparams.vocab_size)\n', (3871, 3927), False, 'from nlp_architect.data.fasttext_emb import FastTextEmb\n'), ((4013, 4080), 'nlp_architect.data.fasttext_emb.FastTextEmb', 'FastTextEmb', (['hparams.data_dir', 'hparams.tgt_lang', 'hparams.vocab_size'], {}), '(hparams.data_dir, hparams.tgt_lang, hparams.vocab_size)\n', (4024, 4080), False, 'from nlp_architect.data.fasttext_emb import FastTextEmb\n'), ((4165, 4226), 'nlp_architect.models.crossling_emb.WordTranslator', 'WordTranslator', (['hparams', 'src_vec', 'tgt_vec', 'hparams.vocab_size'], {}), '(hparams, src_vec, tgt_vec, hparams.vocab_size)\n', (4179, 4226), False, 'from nlp_architect.models.crossling_emb import WordTranslator\n'), ((4269, 4291), 'copy.deepcopy', 'copy.deepcopy', (['src_vec'], {}), '(src_vec)\n', (4282, 4291), False, 'import copy\n'), ((4311, 4333), 'copy.deepcopy', 'copy.deepcopy', (['tgt_vec'], {}), '(tgt_vec)\n', (4324, 4333), False, 'import copy\n'), ((4377, 4550), 'evaluate.Evaluate', 'evaluate.Evaluate', (['train_model.generator.W', 'src_vec_eval', 'tgt_vec_eval', 'src_dict', 'tgt_dict', 'hparams.src_lang', 'hparams.tgt_lang', 'hparams.eval_dir', 'hparams.vocab_size'], {}), '(train_model.generator.W, src_vec_eval, tgt_vec_eval,\n src_dict, tgt_dict, hparams.src_lang, hparams.tgt_lang, hparams.\n eval_dir, hparams.vocab_size)\n', (4394, 4550), False, 'import evaluate\n'), ((4656, 4668), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4666, 4668), True, 'import tensorflow as tf\n'), ((1419, 1438), 'nlp_architect.utils.io.check_size', 'check_size', (['(1)', '(1024)'], {}), '(1, 1024)\n', (1429, 1438), False, 'from nlp_architect.utils.io import validate_existing_directory, validate_parent_exists, check_size\n'), ((1583, 1605), 'nlp_architect.utils.io.check_size', 'check_size', (['(1)', '(1000000)'], {}), '(1, 1000000)\n', (1593, 1605), False, 'from nlp_architect.utils.io import validate_existing_directory, validate_parent_exists, check_size\n'), ((1708, 1730), 'nlp_architect.utils.io.check_size', 'check_size', (['(1e-05)', '(2.0)'], {}), '(1e-05, 2.0)\n', (1718, 1730), False, 'from nlp_architect.utils.io import validate_existing_directory, validate_parent_exists, check_size\n'), ((1885, 1907), 'nlp_architect.utils.io.check_size', 'check_size', (['(1e-07)', '(5.0)'], {}), '(1e-07, 5.0)\n', (1895, 1907), False, 'from nlp_architect.utils.io import validate_existing_directory, validate_parent_exists, check_size\n'), ((2098, 2121), 'nlp_architect.utils.io.check_size', 'check_size', (['(0.0001)', '(0.2)'], {}), '(0.0001, 0.2)\n', (2108, 2121), False, 'from nlp_architect.utils.io import validate_existing_directory, validate_parent_exists, check_size\n'), ((2226, 2245), 'nlp_architect.utils.io.check_size', 'check_size', (['(8)', '(1024)'], {}), '(8, 1024)\n', (2236, 2245), False, 'from nlp_architect.utils.io import validate_existing_directory, validate_parent_exists, check_size\n'), ((2357, 2374), 'nlp_architect.utils.io.check_size', 'check_size', (['(1)', '(20)'], {}), '(1, 20)\n', (2367, 2374), False, 'from nlp_architect.utils.io import validate_existing_directory, validate_parent_exists, check_size\n'), ((2559, 2581), 'nlp_architect.utils.io.check_size', 'check_size', (['(1)', '(2000000)'], {}), '(1, 2000000)\n', (2569, 2581), False, 'from nlp_architect.utils.io import validate_existing_directory, validate_parent_exists, check_size\n'), ((2782, 2799), 'nlp_architect.utils.io.check_size', 'check_size', (['(1)', '(20)'], {}), '(1, 20)\n', (2792, 2799), False, 'from nlp_architect.utils.io import validate_existing_directory, validate_parent_exists, check_size\n'), ((2990, 3012), 'nlp_architect.utils.io.check_size', 'check_size', (['(1)', '(1000000)'], {}), '(1, 1000000)\n', (3000, 3012), False, 'from nlp_architect.utils.io import validate_existing_directory, validate_parent_exists, check_size\n'), ((3122, 3138), 'nlp_architect.utils.io.check_size', 'check_size', (['(1)', '(3)'], {}), '(1, 3)\n', (3132, 3138), False, 'from nlp_architect.utils.io import validate_existing_directory, validate_parent_exists, check_size\n'), ((3247, 3263), 'nlp_architect.utils.io.check_size', 'check_size', (['(1)', '(3)'], {}), '(1, 3)\n', (3257, 3263), False, 'from nlp_architect.utils.io import validate_existing_directory, validate_parent_exists, check_size\n'), ((4695, 4728), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4726, 4728), True, 'import tensorflow as tf\n')]
|
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from dataset import BaseDataset
import torch
import torch.nn.functional as F
import time
import numpy as np
from config import model_name
from tqdm import tqdm
import os
from pathlib import Path
from evaluate import evaluate
import importlib
import datetime
try:
Model = getattr(importlib.import_module(f"model.{model_name}"), model_name)
Config = getattr(importlib.import_module('config'), f"{model_name}Config")
except (AttributeError, ModuleNotFoundError):
print(f"{model_name} not included!")
exit()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def latest_checkpoint(directory):
if not os.path.exists(directory):
return None
all_checkpoints = {
int(x.split('.')[-2].split('-')[-1]): x
for x in os.listdir(directory)
}
if not all_checkpoints:
return None
return os.path.join(directory,
all_checkpoints[max(all_checkpoints.keys())])
def train():
writer = SummaryWriter(
log_dir=f"./runs/{model_name}/{datetime.datetime.now().replace(microsecond=0).isoformat()}{'-' + os.environ['REMARK'] if 'REMARK' in os.environ else ''}")
if not os.path.exists('checkpoint'):
os.makedirs('checkpoint')
try:
pretrained_word_embedding = torch.from_numpy(
np.load('./data/train/pretrained_word_embedding.npy')).float()
except FileNotFoundError:
pretrained_word_embedding = None
if model_name == 'DKN':
try:
pretrained_entity_embedding = torch.from_numpy(
np.load(
'./data/train/pretrained_entity_embedding.npy')).float()
except FileNotFoundError:
pretrained_entity_embedding = None
# TODO: currently context is not available
try:
pretrained_context_embedding = torch.from_numpy(
np.load(
'./data/train/pretrained_context_embedding.npy')).float()
except FileNotFoundError:
pretrained_context_embedding = None
model = Model(Config, pretrained_word_embedding,
pretrained_entity_embedding,
pretrained_context_embedding, writer).to(device)
else:
model = Model(Config, pretrained_word_embedding, writer).to(device)
print(model)
dataset = BaseDataset('data/train/behaviors_parsed.tsv',
'data/train/news_parsed.tsv',
Config.dataset_attributes)
print(f"Load training dataset with size {len(dataset)}.")
dataloader = iter(
DataLoader(dataset,
batch_size=Config.batch_size,
shuffle=True,
num_workers=Config.num_workers,
drop_last=True))
optimizer = torch.optim.Adam(model.parameters(), lr=Config.learning_rate)
start_time = time.time()
loss_full = []
exhaustion_count = 0
step = 0
checkpoint_dir = os.path.join('./checkpoint', model_name)
Path(checkpoint_dir).mkdir(parents=True, exist_ok=True)
if Config.load_checkpoint:
checkpoint_path = latest_checkpoint(checkpoint_dir)
if checkpoint_path is not None:
print(f"Load saved parameters in {checkpoint_path}")
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
step = checkpoint['step']
model.train()
with tqdm(total=Config.num_batches, desc="Training") as pbar:
for i in range(1, Config.num_batches + 1):
try:
minibatch = next(dataloader)
except StopIteration:
exhaustion_count += 1
tqdm.write(
f"Training data exhausted for {exhaustion_count} times after {i} batches, reuse the dataset."
)
dataloader = iter(
DataLoader(dataset,
batch_size=Config.batch_size,
shuffle=True,
num_workers=Config.num_workers,
drop_last=True))
minibatch = next(dataloader)
step += 1
if model_name == 'LSTUR':
y_pred = model(minibatch["user"],
minibatch["clicked_news_length"],
minibatch["candidate_news"],
minibatch["clicked_news"])
elif model_name == 'HiFiArk':
y_pred, regularizer_loss = model(minibatch["candidate_news"],
minibatch["clicked_news"])
elif model_name == 'TANR':
y_pred, topic_classification_loss = model(
minibatch["candidate_news"], minibatch["clicked_news"])
else:
y_pred = model(minibatch["candidate_news"],
minibatch["clicked_news"])
loss = torch.stack([x[0]
for x in - F.log_softmax(y_pred, dim=1)]).mean()
if model_name == 'HiFiArk':
writer.add_scalar('Train/BaseLoss', loss.item(), step)
writer.add_scalar('Train/RegularizerLoss',
regularizer_loss.item(), step)
loss += Config.regularizer_loss_weight * regularizer_loss
elif model_name == 'TANR':
writer.add_scalar('Train/BaseLoss', loss.item(), step)
writer.add_scalar('Train/TopicClassificationLoss',
topic_classification_loss.item(), step)
loss += Config.topic_classification_loss_weight * topic_classification_loss
loss_full.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
writer.add_scalar('Train/Loss', loss.item(), step)
if i % Config.num_batches_save_checkpoint == 0:
torch.save(
{
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'step': step
}, f"./checkpoint/{model_name}/ckpt-{step}.pth")
if i % Config.num_batches_show_loss == 0:
tqdm.write(
f"Time {time_since(start_time)}, batches {i}, current loss {loss.item():.4f}, average loss: {np.mean(loss_full):.4f}"
)
if i % Config.num_batches_validate == 0:
val_auc, val_mrr, val_ndcg5, val_ndcg10 = evaluate(
model, './data/val')
writer.add_scalar('Validation/AUC', val_auc, step)
writer.add_scalar('Validation/MRR', val_mrr, step)
writer.add_scalar('Validation/nDCG@5', val_ndcg5, step)
writer.add_scalar('Validation/nDCG@10', val_ndcg10, step)
tqdm.write(
f"Time {time_since(start_time)}, batches {i}, validation AUC: {val_auc:.4f}, validation MRR: {val_mrr:.4f}, validation nDCG@5: {val_ndcg5:.4f}, validation nDCG@10: {val_ndcg10:.4f}, "
)
pbar.update(1)
if Config.num_batches % Config.num_batches_save_checkpoint != 0:
torch.save(
{
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'step': step
}, f"./checkpoint/{model_name}/ckpt-{step}.pth")
def time_since(since):
"""
Format elapsed time string.
"""
now = time.time()
elapsed_time = now - since
return time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
if __name__ == '__main__':
print('Using device:', device)
print(f'Training model {model_name}')
train()
|
[
"evaluate.evaluate"
] |
[((2435, 2542), 'dataset.BaseDataset', 'BaseDataset', (['"""data/train/behaviors_parsed.tsv"""', '"""data/train/news_parsed.tsv"""', 'Config.dataset_attributes'], {}), "('data/train/behaviors_parsed.tsv', 'data/train/news_parsed.tsv',\n Config.dataset_attributes)\n", (2446, 2542), False, 'from dataset import BaseDataset\n'), ((2971, 2982), 'time.time', 'time.time', ([], {}), '()\n', (2980, 2982), False, 'import time\n'), ((3062, 3102), 'os.path.join', 'os.path.join', (['"""./checkpoint"""', 'model_name'], {}), "('./checkpoint', model_name)\n", (3074, 3102), False, 'import os\n'), ((7843, 7854), 'time.time', 'time.time', ([], {}), '()\n', (7852, 7854), False, 'import time\n'), ((374, 420), 'importlib.import_module', 'importlib.import_module', (['f"""model.{model_name}"""'], {}), "(f'model.{model_name}')\n", (397, 420), False, 'import importlib\n'), ((455, 488), 'importlib.import_module', 'importlib.import_module', (['"""config"""'], {}), "('config')\n", (478, 488), False, 'import importlib\n'), ((646, 671), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (669, 671), False, 'import torch\n'), ((731, 756), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (745, 756), False, 'import os\n'), ((1266, 1294), 'os.path.exists', 'os.path.exists', (['"""checkpoint"""'], {}), "('checkpoint')\n", (1280, 1294), False, 'import os\n'), ((1304, 1329), 'os.makedirs', 'os.makedirs', (['"""checkpoint"""'], {}), "('checkpoint')\n", (1315, 1329), False, 'import os\n'), ((2686, 2802), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'Config.batch_size', 'shuffle': '(True)', 'num_workers': 'Config.num_workers', 'drop_last': '(True)'}), '(dataset, batch_size=Config.batch_size, shuffle=True, num_workers\n =Config.num_workers, drop_last=True)\n', (2696, 2802), False, 'from torch.utils.data import DataLoader\n'), ((3626, 3673), 'tqdm.tqdm', 'tqdm', ([], {'total': 'Config.num_batches', 'desc': '"""Training"""'}), "(total=Config.num_batches, desc='Training')\n", (3630, 3673), False, 'from tqdm import tqdm\n'), ((7923, 7948), 'time.gmtime', 'time.gmtime', (['elapsed_time'], {}), '(elapsed_time)\n', (7934, 7948), False, 'import time\n'), ((867, 888), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (877, 888), False, 'import os\n'), ((3107, 3127), 'pathlib.Path', 'Path', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (3111, 3127), False, 'from pathlib import Path\n'), ((3384, 3411), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (3394, 3411), False, 'import torch\n'), ((6837, 6866), 'evaluate.evaluate', 'evaluate', (['model', '"""./data/val"""'], {}), "(model, './data/val')\n", (6845, 6866), False, 'from evaluate import evaluate\n'), ((1406, 1459), 'numpy.load', 'np.load', (['"""./data/train/pretrained_word_embedding.npy"""'], {}), "('./data/train/pretrained_word_embedding.npy')\n", (1413, 1459), True, 'import numpy as np\n'), ((3884, 3999), 'tqdm.tqdm.write', 'tqdm.write', (['f"""Training data exhausted for {exhaustion_count} times after {i} batches, reuse the dataset."""'], {}), "(\n f'Training data exhausted for {exhaustion_count} times after {i} batches, reuse the dataset.'\n )\n", (3894, 3999), False, 'from tqdm import tqdm\n'), ((1658, 1713), 'numpy.load', 'np.load', (['"""./data/train/pretrained_entity_embedding.npy"""'], {}), "('./data/train/pretrained_entity_embedding.npy')\n", (1665, 1713), True, 'import numpy as np\n'), ((1967, 2023), 'numpy.load', 'np.load', (['"""./data/train/pretrained_context_embedding.npy"""'], {}), "('./data/train/pretrained_context_embedding.npy')\n", (1974, 2023), True, 'import numpy as np\n'), ((4083, 4199), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'Config.batch_size', 'shuffle': '(True)', 'num_workers': 'Config.num_workers', 'drop_last': '(True)'}), '(dataset, batch_size=Config.batch_size, shuffle=True, num_workers\n =Config.num_workers, drop_last=True)\n', (4093, 4199), False, 'from torch.utils.data import DataLoader\n'), ((6682, 6700), 'numpy.mean', 'np.mean', (['loss_full'], {}), '(loss_full)\n', (6689, 6700), True, 'import numpy as np\n'), ((1130, 1153), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1151, 1153), False, 'import datetime\n'), ((5246, 5274), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['y_pred'], {'dim': '(1)'}), '(y_pred, dim=1)\n', (5259, 5274), True, 'import torch.nn.functional as F\n')]
|
from alfred.utils.config import parse_bool, load_dict_from_json, save_dict_to_json, parse_log_level
from alfred.utils.misc import create_logger, select_storage_dirs
from alfred.utils.directory_tree import DirectoryTree, get_root, sanity_check_exists
from alfred.utils.recorder import Recorder
from alfred.utils.plots import create_fig, bar_chart, plot_curves, plot_vertical_densities
from alfred.utils.stats import get_95_confidence_interval_of_sequence, get_95_confidence_interval
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import argparse
import pickle
import os
import logging
from shutil import copyfile
from collections import OrderedDict
import seaborn as sns
import pathlib
from pathlib import Path
import shutil
import math
sns.set()
sns.set_style('whitegrid')
def get_benchmark_args():
parser = argparse.ArgumentParser()
parser.add_argument('--benchmark_type', type=str, choices=['compare_models', 'compare_searches'], required=True)
parser.add_argument('--log_level', type=parse_log_level, default=logging.INFO)
parser.add_argument('--from_file', type=str, default=None)
parser.add_argument('--storage_names', type=str, nargs='+', default=None)
parser.add_argument('--x_metric', default="episode", type=str)
parser.add_argument('--y_metric', default="eval_return", type=str)
parser.add_argument('--y_error_bars', default="bootstrapped_CI",
choices=["bootstrapped_CI", "stderr", "10th_quantile"])
parser.add_argument('--re_run_if_exists', type=parse_bool, default=False,
help="Whether to re-compute seed_scores if 'seed_scores.pkl' already exists")
parser.add_argument('--n_eval_runs', type=int, default=100,
help="Only used if performance_metric=='evaluation_runs'")
parser.add_argument('--performance_metric', type=str, default='eval_return',
help="Can fall into either of two categories: "
"(1) 'evaluation_runs': evaluate() will be called on model in seed_dir for 'n_eval_runs'"
"(2) OTHER METRIC: this metric must have been recorded in training and be a key of train_recorder")
parser.add_argument('--performance_aggregation', type=str, choices=['min', 'max', 'avg', 'last',
'mean_on_last_20_percents'],
default='mean_on_last_20_percents',
help="How gathered 'performance_metric' should be aggregated to quantify performance of seed_dir")
parser.add_argument('--root_dir', default="./storage", type=str)
return parser.parse_args()
# utility functions for curves (should not be called alone) -------------------------------------------------------
def _compute_seed_scores(storage_dir, performance_metric, performance_aggregation, group_key, bar_key,
re_run_if_exists, save_dir, logger, root_dir, n_eval_runs):
if (storage_dir / save_dir / f"{save_dir}_seed_scores.pkl").exists() and not re_run_if_exists:
logger.info(f" SKIPPING {storage_dir} - {save_dir}_seed_scores.pkl already exists")
return
else:
logger.info(f"Benchmarking {storage_dir}...")
assert group_key in ['task_name', 'storage_name', 'experiment_num', 'alg_name']
assert bar_key in ['task_name', 'storage_name', 'experiment_num', 'alg_name']
# Initialize container
scores = OrderedDict()
# Get all experiment directories
all_experiments = DirectoryTree.get_all_experiments(storage_dir=storage_dir)
for experiment_dir in all_experiments:
# For that experiment, get all seed directories
experiment_seeds = DirectoryTree.get_all_seeds(experiment_dir=experiment_dir)
# Initialize container
all_seeds_scores = []
for i, seed_dir in enumerate(experiment_seeds):
# Prints which seed directory is being treated
logger.debug(f"{seed_dir}")
# Loads training config
config_dict = load_dict_from_json(str(seed_dir / "config.json"))
# Selects how data will be identified
keys = {
"task_name": config_dict["task_name"],
"storage_name": seed_dir.parents[1].name,
"alg_name": config_dict["alg_name"],
"experiment_num": seed_dir.parents[0].name.strip('experiment')
}
outer_key = keys[bar_key]
inner_key = keys[group_key]
# Evaluation phase
if performance_metric == 'evaluation_runs':
assert n_eval_runs is not None
try:
from evaluate import evaluate, get_evaluation_args
except ImportError as e:
raise ImportError(
f"{e}\nTo evaluate models based on --performance_metric='evaluation_runs' "
f"alfred.benchmark assumes the following structure that the working directory contains "
f"a file called evaluate.py containing two functions: "
f"\n\t1. a function evaluate() that returns a score for each evaluation run"
f"\n\t2. a function get_evaluation_args() that returns a Namespace of arguments for evaluate()"
)
# Sets config for evaluation phase
eval_config = get_evaluation_args(overwritten_args="")
eval_config.storage_name = seed_dir.parents[1].name
eval_config.experiment_num = int(seed_dir.parents[0].name.strip("experiment"))
eval_config.seed_num = int(seed_dir.name.strip("seed"))
eval_config.render = False
eval_config.n_episodes = n_eval_runs
eval_config.root_dir = root_dir
# Evaluates agent and stores the return
performance_data = evaluate(eval_config)
else:
# Loads training data
loaded_recorder = Recorder.init_from_pickle_file(
filename=str(seed_dir / 'recorders' / 'train_recorder.pkl'))
performance_data = loaded_recorder.tape[performance_metric]
# Aggregation phase
if performance_aggregation == 'min':
score = np.min(performance_data)
elif performance_aggregation == 'max':
score = np.max(performance_data)
elif performance_aggregation == 'avg':
score = np.mean(performance_data)
elif performance_aggregation == 'last':
score = performance_data[-1]
elif performance_aggregation == 'mean_on_last_20_percents':
eighty_percent_index = int(0.8*len(performance_data))
score = np.mean(performance_data[eighty_percent_index:])
else:
raise NotImplementedError
all_seeds_scores.append(score)
if outer_key not in scores.keys():
scores[outer_key] = OrderedDict()
scores[outer_key][inner_key] = np.stack(all_seeds_scores)
os.makedirs(storage_dir / save_dir, exist_ok=True)
with open(storage_dir / save_dir / f"{save_dir}_seed_scores.pkl", "wb") as f:
pickle.dump(scores, f)
scores_info = {'n_eval_runs': n_eval_runs,
'performance_metric': performance_metric,
'performance_aggregation': performance_aggregation}
save_dict_to_json(scores_info, filename=str(storage_dir / save_dir / f"{save_dir}_seed_scores_info.json"))
def _gather_scores(storage_dirs, save_dir, y_error_bars, logger, normalize_with_first_model=True, sort_bars=False):
# Initialize containers
scores_means = OrderedDict()
scores_err_up = OrderedDict()
scores_err_down = OrderedDict()
# Loads performance benchmark data
individual_scores = OrderedDict()
for storage_dir in storage_dirs:
with open(storage_dir / save_dir / f"{save_dir}_seed_scores.pkl", "rb") as f:
individual_scores[storage_dir.name] = pickle.load(f)
# Print keys so that user can verify all these benchmarks make sense to compare (e.g. same tasks)
for storage_name, idv_score in individual_scores.items():
logger.debug(storage_name)
for outer_key in idv_score.keys():
logger.debug(f"{outer_key}: {list(idv_score[outer_key].keys())}")
logger.debug(f"\n")
# Reorganize all individual_scores in a single dictionary
scores = OrderedDict()
for storage_name, idv_score in individual_scores.items():
for outer_key in idv_score:
if outer_key not in list(scores.keys()):
scores[outer_key] = OrderedDict()
for inner_key in idv_score[outer_key]:
if inner_key not in list(scores.keys()):
scores[outer_key][inner_key] = OrderedDict()
_, _, _, task_name, _ = DirectoryTree.extract_info_from_storage_name(storage_name)
scores[outer_key][inner_key] = idv_score[outer_key][inner_key]
# First storage_dir will serve as reference if normalize_with_first_model is True
reference_key = list(scores.keys())[0]
reference_means = OrderedDict()
for inner_key in scores[reference_key].keys():
if normalize_with_first_model:
reference_means[inner_key] = scores[reference_key][inner_key].mean()
else:
reference_means[inner_key] = 1.
# Sorts inner_keys (bars among groups)
sorted_inner_keys = list(reversed(sorted(reference_means.keys(),
key=lambda item: (scores[reference_key][item].mean(), item))))
if sort_bars:
inner_keys = sorted_inner_keys
else:
inner_keys = scores[reference_key].keys()
# Computes means and error bars
for inner_key in inner_keys:
for outer_key in scores.keys():
if outer_key not in scores_means.keys():
scores_means[outer_key] = OrderedDict()
scores_err_up[outer_key] = OrderedDict()
scores_err_down[outer_key] = OrderedDict()
if y_error_bars == "stderr":
scores_means[outer_key][inner_key] = np.mean(
scores[outer_key][inner_key] / (reference_means[inner_key] + 1e-8))
scores_err_down[outer_key][inner_key] = np.std(
scores[outer_key][inner_key] / (reference_means[inner_key] + 1e-8)) / len(
scores[outer_key][inner_key]) ** 0.5
scores_err_up[outer_key][inner_key] = scores_err_down[outer_key][inner_key]
elif y_error_bars == "10th_quantiles":
scores_means[outer_key][inner_key] = np.mean(
scores[outer_key][inner_key] / (reference_means[inner_key] + 1e-8))
quantile = 0.10
scores_err_down[outer_key][inner_key] = np.abs(
np.quantile(a=scores[outer_key][inner_key] / (reference_means[inner_key] + 1e-8), q=0. + quantile) \
- scores_means[outer_key][inner_key])
scores_err_up[outer_key][inner_key] = np.abs(
np.quantile(a=scores[outer_key][inner_key] / (reference_means[inner_key] + 1e-8), q=1. - quantile) \
- scores_means[outer_key][inner_key])
elif y_error_bars == "bootstrapped_CI":
scores_samples = scores[outer_key][inner_key] / (reference_means[inner_key] + 1e-8)
mean, err_up, err_down = get_95_confidence_interval(samples=scores_samples, method=y_error_bars)
scores_means[outer_key][inner_key] = mean
scores_err_up[outer_key][inner_key] = err_up
scores_err_down[outer_key][inner_key] = err_down
else:
raise NotImplementedError
return scores, scores_means, scores_err_up, scores_err_down, sorted_inner_keys, reference_key
def _make_benchmark_performance_figure(storage_dirs, save_dir, y_error_bars, logger, normalize_with_first_model=True,
sort_bars=False):
scores, scores_means, scores_err_up, scores_err_down, sorted_inner_keys, reference_key = _gather_scores(
storage_dirs=storage_dirs,
save_dir=save_dir,
y_error_bars=y_error_bars,
logger=logger,
normalize_with_first_model=normalize_with_first_model,
sort_bars=sort_bars)
# Creates the graph
n_bars_per_group = len(scores_means.keys())
n_groups = len(scores_means[reference_key].keys())
fig, ax = create_fig((1, 1), figsize=(n_bars_per_group * n_groups, n_groups))
bar_chart(ax,
scores=scores_means,
err_up=scores_err_up,
err_down=scores_err_down,
group_names=scores_means[reference_key].keys(),
title="Average Return"
)
n_training_seeds = scores[reference_key][list(scores_means[reference_key].keys())[0]].shape[0]
scores_info = load_dict_from_json(filename=str(storage_dirs[0] / save_dir / f"{save_dir}_seed_scores_info.json"))
info_str = f"{n_training_seeds} training seeds" \
f"\nn_eval_runs={scores_info['n_eval_runs']}" \
f"\nperformance_metric={scores_info['performance_metric']}" \
f"\nperformance_aggregation={scores_info['performance_aggregation']}"
ax.text(0.80, 0.95, info_str, transform=ax.transAxes, fontsize=12,
verticalalignment='top', bbox=dict(facecolor='gray', alpha=0.1))
plt.tight_layout()
# Saves storage_dirs from which the graph was created for traceability
for storage_dir in storage_dirs:
os.makedirs(storage_dir / save_dir, exist_ok=True)
fig.savefig(storage_dir / save_dir / f'{save_dir}_performance.png')
save_dict_to_json({'sources': str(storage_dir) in storage_dirs,
'n_training_seeds': n_training_seeds,
'n_eval_runs': scores_info['n_eval_runs'],
'performance_metric': scores_info['performance_metric'],
'performance_aggregation': scores_info['performance_aggregation']
},
storage_dir / save_dir / f'{save_dir}_performance_sources.json')
plt.close(fig)
# SANITY-CHECKS that no seeds has a Nan score to avoid making create best on it
expe_with_nan_scores=[]
for outer_key in scores.keys():
for inner_key, indiv_score in scores[outer_key].items():
if math.isnan(indiv_score.mean()):
expe_with_nan_scores.append(outer_key+"/experiment" + inner_key)
if len(expe_with_nan_scores)>0:
raise ValueError(f'Some experiments have nan scores. Remove them from storage and clean summary folder to continue\n'
f'experiments with Nan Scores:\n' + '\n'.join(expe_with_nan_scores))
return sorted_inner_keys
def _gather_experiments_training_curves(storage_dir, graph_key, curve_key, logger, x_metric, y_metric,
x_data=None, y_data=None):
# Initialize containers
if x_data is None:
x_data = OrderedDict()
else:
assert type(x_data) is OrderedDict
if y_data is None:
y_data = OrderedDict()
else:
assert type(y_data) is OrderedDict
# Get all experiment directories
all_experiments = DirectoryTree.get_all_experiments(storage_dir=storage_dir)
for experiment_dir in all_experiments:
# For that experiment, get all seed directories
experiment_seeds = DirectoryTree.get_all_seeds(experiment_dir=experiment_dir)
for i, seed_dir in enumerate(experiment_seeds):
# Prints which seed directory is being treated
logger.debug(f"{seed_dir}")
# Loads training config
config_dict = load_dict_from_json(str(seed_dir / "config.json"))
# Keys can be any information stored in config.json
# We also handle a few special cases (e.g. "experiment_num")
keys = config_dict.copy()
keys['experiment_num'] = seed_dir.parent.stem.strip('experiment')
keys['storage_name'] = seed_dir.parents[1]
outer_key = keys[graph_key] # number of graphs to be made
inner_key = keys[curve_key] # number of curves per graph
# Loads training data
loaded_recorder = Recorder.init_from_pickle_file(
filename=str(seed_dir / 'recorders' / 'train_recorder.pkl'))
# Stores the data
if outer_key not in y_data.keys():
x_data[outer_key] = OrderedDict()
y_data[outer_key] = OrderedDict()
if inner_key not in y_data[outer_key].keys():
x_data[outer_key][inner_key] = []
y_data[outer_key][inner_key] = []
x_data[outer_key][inner_key].append(loaded_recorder.tape[x_metric])
y_data[outer_key][inner_key].append(loaded_recorder.tape[y_metric]) # TODO: make sure that this is a scalar metric, even for eval_return (and not 10 points for every eval_step). All metrics saved in the recorder should be scalars for every time point.
return x_data, y_data
def _make_benchmark_learning_figure(x_data, y_data, x_metric, y_metric, y_error_bars, storage_dirs, save_dir, logger,
n_labels=np.inf, visuals_file=None, additional_curves_file=None):
# Initialize containers
y_data_means = OrderedDict()
y_data_err_up = OrderedDict()
y_data_err_down = OrderedDict()
long_labels = OrderedDict()
titles = OrderedDict()
x_axis_titles = OrderedDict()
y_axis_titles = OrderedDict()
labels = OrderedDict()
colors = OrderedDict()
markers = OrderedDict()
for outer_key in y_data:
y_data_means[outer_key] = OrderedDict()
y_data_err_up[outer_key] = OrderedDict()
y_data_err_down[outer_key] = OrderedDict()
# Initialize figure
n_graphs = len(y_data.keys())
if n_graphs == 3:
axes_shape = (1, 3)
elif n_graphs > 1:
i_max = int(np.ceil(np.sqrt(len(y_data.keys()))))
axes_shape = (int(np.ceil(len(y_data.keys()) / i_max)), i_max)
else:
axes_shape = (1, 1)
# Creates figure
gs = gridspec.GridSpec(*axes_shape)
fig = plt.figure(figsize=(8 * axes_shape[1], 4 * axes_shape[0]))
# Compute means and stds for all inner_key curve from raw data
for i, outer_key in enumerate(y_data.keys()):
for inner_key in y_data[outer_key].keys():
x_data[outer_key][inner_key] = x_data[outer_key][inner_key][0] # assumes all x_data are the same
if y_error_bars == "stderr":
y_data_means[outer_key][inner_key] = np.stack(y_data[outer_key][inner_key], axis=-1).mean(-1)
y_data_err_up[outer_key][inner_key] = np.stack(y_data[outer_key][inner_key], axis=-1).std(-1) \
/ len(y_data_means[outer_key][inner_key]) ** 0.5
y_data_err_down = y_data_err_up
elif y_error_bars == "bootstrapped_CI":
y_data_samples = np.stack(y_data[outer_key][inner_key],
axis=-1) # dim=0 is accross time (n_time_steps, n_samples)
mean, err_up, err_down = get_95_confidence_interval_of_sequence(list_of_samples=y_data_samples,
method=y_error_bars)
y_data_means[outer_key][inner_key] = mean
y_data_err_up[outer_key][inner_key] = err_up
y_data_err_down[outer_key][inner_key] = err_down
else:
raise NotImplementedError
long_labels[outer_key] = list(y_data_means[outer_key].keys())
# Limits the number of labels to be displayed (only displays labels of n_labels best experiments)
if n_labels < np.inf:
mean_over_entire_curves = np.array([array.mean() for array in y_data_means[outer_key].values()])
n_max_idxs = (-mean_over_entire_curves).argsort()[:n_labels]
for k in range(len(long_labels[outer_key])):
if k in n_max_idxs:
continue
else:
long_labels[outer_key][k] = None
# Selects right ax object
if axes_shape == (1, 1):
current_ax = fig.add_subplot(gs[0, 0])
elif any(np.array(axes_shape) == 1):
current_ax = fig.add_subplot(gs[0, i])
else:
current_ax = fig.add_subplot(gs[i // axes_shape[1], i % axes_shape[1]])
# Collect algorithm names
if all([type(long_label) is pathlib.PosixPath for long_label in long_labels[outer_key]]):
algs = []
for path in long_labels[outer_key]:
_, _, alg, _, _ = DirectoryTree.extract_info_from_storage_name(path.name)
algs.append(alg)
# Loads visuals dictionaries
if visuals_file is not None:
visuals = load_dict_from_json(visuals_file)
else:
visuals = None
# Loads additional curves file
if additional_curves_file is not None:
additional_curves = load_dict_from_json(additional_curves_file)
else:
additional_curves = None
# Sets visuals
if type(visuals) is dict and 'titles_dict' in visuals.keys():
titles[outer_key] = visuals['titles_dict'][outer_key]
else:
titles[outer_key] = outer_key
if type(visuals) is dict and 'axis_titles_dict' in visuals.keys():
x_axis_titles[outer_key] = visuals['axis_titles_dict'][x_metric]
y_axis_titles[outer_key] = visuals['axis_titles_dict'][y_metric]
else:
x_axis_titles[outer_key] = x_metric
y_axis_titles[outer_key] = y_metric
if type(visuals) is dict and 'labels_dict' in visuals.keys():
labels[outer_key] = [visuals['labels_dict'][inner_key] for inner_key in y_data_means[outer_key].keys()]
else:
labels[outer_key] = long_labels[outer_key]
if type(visuals) is dict and 'colors_dict' in visuals.keys():
colors[outer_key] = [visuals['colors_dict'][inner_key] for inner_key in y_data_means[outer_key].keys()]
else:
colors[outer_key] = [None for _ in long_labels[outer_key]]
if type(visuals) is dict and 'markers_dict' in visuals.keys():
markers[outer_key] = [visuals['markers_dict'][inner_key] for inner_key in y_data_means[outer_key].keys()]
else:
markers[outer_key] = [None for _ in long_labels[outer_key]]
logger.info(f"Graph for {outer_key}:\n\tlabels={labels}\n\tcolors={colors}\n\tmarkers={markers}")
if additional_curves_file is not None:
hlines = additional_curves['hlines'][outer_key]
n_lines = len(hlines)
else:
hlines = None
n_lines = 0
# Plots the curves
plot_curves(current_ax,
xs=list(x_data[outer_key].values()),
ys=list(y_data_means[outer_key].values()),
fill_up=list(y_data_err_up[outer_key].values()),
fill_down=list(y_data_err_down[outer_key].values()),
labels=labels[outer_key],
colors=colors[outer_key],
markers=markers[outer_key],
xlabel=x_axis_titles[outer_key],
ylabel=y_axis_titles[outer_key] if i == 0 else "",
title=titles[outer_key].upper(),
add_legend=True if i == (len(list(y_data.keys())) - 1) else False,
legend_outside=True,
legend_loc="upper right",
legend_pos=(0.95, -0.2),
legend_n_columns=len(list(y_data_means[outer_key].values())) + n_lines,
hlines=hlines,
tick_font_size=22,
axis_font_size=26,
legend_font_size=26,
title_font_size=28)
plt.tight_layout()
for storage_dir in storage_dirs:
os.makedirs(storage_dir / save_dir, exist_ok=True)
fig.savefig(storage_dir / save_dir / f'{save_dir}_learning.pdf', bbox_inches='tight')
plt.close(fig)
def _make_vertical_densities_figure(storage_dirs, visuals_file, additional_curves_file, make_box_plot, queried_performance_metric,
queried_performance_aggregation, save_dir, load_dir, logger):
# Initialize container
all_means = OrderedDict()
long_labels = OrderedDict()
titles = OrderedDict()
labels = OrderedDict()
colors = OrderedDict()
markers = OrderedDict()
all_performance_metrics = []
all_performance_aggregation = []
# Gathers data
for storage_dir in storage_dirs:
logger.debug(storage_dir)
# Loads the scores and scores_info saved by summarize_search
with open(str(storage_dir / load_dir / f"{load_dir}_seed_scores.pkl"), "rb") as f:
scores = pickle.load(f)
scores_info = load_dict_from_json(str(storage_dir / "summary" / f"summary_seed_scores_info.json"))
all_performance_metrics.append(scores_info['performance_metric'])
all_performance_aggregation.append(scores_info['performance_aggregation'])
x = list(scores.keys())[0]
storage_name = storage_dir.name
# Adding task_name if first time it is encountered
_, _, _, outer_key, _ = DirectoryTree.extract_info_from_storage_name(storage_name)
if outer_key not in list(all_means.keys()):
all_means[outer_key] = OrderedDict()
# Taking the mean across evaluations and seeds
_, _, _, outer_key, _ = DirectoryTree.extract_info_from_storage_name(storage_name)
all_means[outer_key][storage_name] = [array.mean() for array in scores[x].values()]
if outer_key not in long_labels.keys():
long_labels[outer_key] = [storage_dir]
else:
long_labels[outer_key].append(storage_dir)
# Security checks
assert len(set(all_performance_metrics)) == 1 and len(set(all_performance_aggregation)) == 1, \
"Error: all seeds do not have scores computed using the same performance metric or performance aggregation. " \
"You should benchmark with --re_run_if_exists=True using the desired --performance_aggregation and " \
"--performance_metric so that all seeds that you want to compare have the same metrics."
actual_performance_metric = all_performance_metrics.pop()
actual_performance_aggregation = all_performance_aggregation.pop()
assert queried_performance_metric == actual_performance_metric and \
queried_performance_aggregation == actual_performance_aggregation, \
"Error: The performance_metric or performance_aggregation that was queried for the vertical_densities " \
"is not the same as what was saved by summarize_search. You should benchmark with --re_run_if_exists=True " \
"using the desired --performance_aggregation and --performance_metric so that all seeds that you want " \
"to compare have the same metrics."
# Initialize figure
n_graphs = len(all_means.keys())
if n_graphs == 3:
axes_shape = (1, 3)
elif n_graphs > 1:
i_max = int(np.ceil(np.sqrt(len(all_means.keys()))))
axes_shape = (int(np.ceil(len(all_means.keys()) / i_max)), i_max)
else:
axes_shape = (1, 1)
# Creates figure
gs = gridspec.GridSpec(*axes_shape)
fig = plt.figure(figsize=(12 * axes_shape[1], 5 * axes_shape[0]))
for i, outer_key in enumerate(all_means.keys()):
# Selects right ax object
if axes_shape == (1, 1):
current_ax = fig.add_subplot(gs[0, 0])
elif any(np.array(axes_shape) == 1):
current_ax = fig.add_subplot(gs[0, i])
else:
current_ax = fig.add_subplot(gs[i // axes_shape[1], i % axes_shape[1]])
# Collect algorithm names
if all([type(long_label) is pathlib.PosixPath for long_label in long_labels[outer_key]]):
algs = []
for path in long_labels[outer_key]:
_, _, alg, _, _ = DirectoryTree.extract_info_from_storage_name(path.name)
algs.append(alg)
# Loads visuals dictionaries
if visuals_file is not None:
visuals = load_dict_from_json(visuals_file)
else:
visuals = None
# Loads additional curves file
if additional_curves_file is not None:
additional_curves = load_dict_from_json(additional_curves_file)
else:
additional_curves = None
# Sets visuals
if type(visuals) is dict and 'titles_dict' in visuals.keys():
titles[outer_key] = visuals['titles_dict'][outer_key]
else:
titles[outer_key] = outer_key
if type(visuals) is dict and 'labels_dict' in visuals.keys():
labels[outer_key] = [visuals['labels_dict'][alg] for alg in algs]
else:
labels[outer_key] = long_labels[outer_key]
if type(visuals) is dict and 'colors_dict' in visuals.keys():
colors[outer_key] = [visuals['colors_dict'][alg] for alg in algs]
else:
colors[outer_key] = [None for _ in long_labels[outer_key]]
if type(visuals) is dict and 'markers_dict' in visuals.keys():
markers[outer_key] = [visuals['markers_dict'][alg] for alg in algs]
else:
markers[outer_key] = [None for _ in long_labels[outer_key]]
logger.info(f"Graph for {outer_key}:\n\tlabels={labels}\n\tcolors={colors}\n\tmarkers={markers}")
if additional_curves_file is not None:
hlines = additional_curves['hlines'][outer_key]
else:
hlines = None
# Makes the plots
plot_vertical_densities(ax=current_ax,
ys=list(all_means[outer_key].values()),
labels=labels[outer_key],
colors=colors[outer_key],
make_boxplot=make_box_plot,
title=titles[outer_key].upper(),
ylabel=f"{actual_performance_aggregation}-{actual_performance_metric}",
hlines=hlines)
# Saves the figure
plt.tight_layout()
filename_addon = "boxplot" if make_box_plot else ""
for storage_dir in storage_dirs:
os.makedirs(storage_dir / save_dir, exist_ok=True)
fig.savefig(storage_dir / save_dir / f'{save_dir}_vertical_densities_{filename_addon}.pdf', bbox_inches="tight")
save_dict_to_json([str(storage_dir) in storage_dirs],
storage_dir / save_dir / f'{save_dir}_vertical_densities_sources.json')
plt.close(fig)
# benchmark interface ---------------------------------------------------------------------------------------------
def compare_models(storage_names, n_eval_runs, re_run_if_exists, logger, root_dir, x_metric, y_metric, y_error_bars,
visuals_file, additional_curves_file, performance_metric, performance_aggregation,
make_performance_chart=True, make_learning_plots=True):
"""
compare_models compare several storage_dirs
"""
assert type(storage_names) is list
if make_learning_plots:
logger.debug(f'\n{"benchmark_learning".upper()}:')
x_data = OrderedDict()
y_data = OrderedDict()
storage_dirs = []
for storage_name in storage_names:
x_data, y_data = _gather_experiments_training_curves(
storage_dir=get_root(root_dir) / storage_name,
graph_key="task_name",
curve_key="storage_name" if logger.level == 10 else "alg_name",
logger=logger,
x_metric=x_metric,
y_metric=y_metric,
x_data=x_data,
y_data=y_data)
storage_dirs.append(get_root(root_dir) / storage_name)
_make_benchmark_learning_figure(x_data=x_data,
y_data=y_data,
x_metric=x_metric,
y_metric=y_metric,
y_error_bars=y_error_bars,
storage_dirs=storage_dirs,
n_labels=np.inf,
save_dir="benchmark",
logger=logger,
visuals_file=visuals_file,
additional_curves_file=additional_curves_file)
if make_performance_chart:
logger.debug(f'\n{"benchmark_performance".upper()}:')
storage_dirs = []
for storage_name in storage_names:
_compute_seed_scores(storage_dir=get_root(root_dir) / storage_name,
performance_metric=performance_metric,
performance_aggregation=performance_aggregation,
n_eval_runs=n_eval_runs,
group_key="task_name",
bar_key="storage_name" if logger.level == 10 else "alg_name",
re_run_if_exists=re_run_if_exists,
save_dir="benchmark",
logger=logger,
root_dir=root_dir)
storage_dirs.append(get_root(root_dir) / storage_name)
_make_benchmark_performance_figure(storage_dirs=storage_dirs,
logger=logger,
normalize_with_first_model=True,
sort_bars=False,
y_error_bars=y_error_bars,
save_dir="benchmark")
return
def summarize_search(storage_name, n_eval_runs, re_run_if_exists, logger, root_dir, x_metric, y_metric, y_error_bars,
performance_metric, performance_aggregation, make_performance_chart=True,
make_learning_plots=True):
"""
Summaries act inside a single storage_dir
"""
assert type(storage_name) is str
storage_dir = get_root(root_dir) / storage_name
if re_run_if_exists and (storage_dir / "summary").exists():
shutil.rmtree(storage_dir / "summary")
if make_learning_plots:
logger.debug(f'\n{"benchmark_learning".upper()}:')
x_data, y_data = _gather_experiments_training_curves(storage_dir=storage_dir,
graph_key="task_name",
curve_key="experiment_num",
logger=logger,
x_metric=x_metric,
y_metric=y_metric)
_make_benchmark_learning_figure(x_data=x_data,
y_data=y_data,
x_metric=x_metric,
y_metric=y_metric,
y_error_bars=y_error_bars,
storage_dirs=[storage_dir],
n_labels=10,
save_dir="summary",
logger=logger,
visuals_file=None)
if make_performance_chart:
logger.debug(f'\n{"benchmark_performance".upper()}:')
_compute_seed_scores(storage_dir=storage_dir,
n_eval_runs=n_eval_runs,
performance_metric=performance_metric,
performance_aggregation=performance_aggregation,
group_key="experiment_num",
bar_key="storage_name" if logger.level == 10 else "alg_name",
re_run_if_exists=re_run_if_exists,
save_dir="summary",
logger=logger,
root_dir=root_dir)
sorted_inner_keys = _make_benchmark_performance_figure(storage_dirs=[storage_dir],
logger=logger,
normalize_with_first_model=False,
sort_bars=True,
y_error_bars=y_error_bars,
save_dir="summary")
best_experiment_num = sorted_inner_keys[0]
seed_dirs_for_best_exp = [path for path in (storage_dir / f"experiment{best_experiment_num}").iterdir()]
copyfile(src=seed_dirs_for_best_exp[0] / "config.json",
dst=storage_dir / "summary" / f"bestConfig_exp{best_experiment_num}.json")
return
def compare_searches(storage_names, x_metric, y_metric, y_error_bars, performance_metric, performance_aggregation,
visuals_file, additional_curves_files, re_run_if_exists, logger, root_dir, n_eval_runs):
"""
compare_searches compare several storage_dirs
"""
assert type(storage_names) is list
logger.debug(f'\n{"benchmark_vertical_densities".upper()}:')
storage_dirs = []
for storage_name in storage_names:
storage_dirs.append(get_root(root_dir) / storage_name)
for storage_dir in storage_dirs:
if not (storage_dir / "summary" / f"summary_seed_scores.pkl").exists() or re_run_if_exists:
summarize_search(storage_name=storage_dir.name,
n_eval_runs=n_eval_runs,
x_metric=x_metric,
y_metric=y_metric,
y_error_bars=y_error_bars,
performance_metric=performance_metric,
performance_aggregation=performance_aggregation,
re_run_if_exists=re_run_if_exists,
make_performance_chart=True,
make_learning_plots=True,
logger=logger,
root_dir=root_dir)
_make_vertical_densities_figure(storage_dirs=storage_dirs,
visuals_file=visuals_file,
additional_curves_file=additional_curves_file,
make_box_plot=True,
queried_performance_metric=performance_metric,
queried_performance_aggregation=performance_aggregation,
load_dir="summary",
save_dir="benchmark",
logger=logger)
return
if __name__ == '__main__':
benchmark_args = get_benchmark_args()
logger = create_logger(name="BENCHMARK - MAIN", loglevel=benchmark_args.log_level)
# Gets storage_dirs list
storage_dirs = select_storage_dirs(from_file=benchmark_args.from_file,
storage_name=benchmark_args.storage_names,
root_dir=benchmark_args.root_dir)
# Sanity-check that storages exist
storage_dirs = [storage_dir for storage_dir in storage_dirs if sanity_check_exists(storage_dir, logger)]
# convert them to storage_name to be compatible with the function called down the line
benchmark_args.storage_names = [storage_dir_path.name for storage_dir_path in storage_dirs]
# Gets visuals_file for plotting
if benchmark_args.from_file is not None:
# Gets path of visuals_file
schedule_name = Path(benchmark_args.from_file).parent.stem
visuals_file = Path(benchmark_args.from_file).parent / f"visuals_{schedule_name}.json"
additional_curves_file = Path(benchmark_args.from_file).parent / f"additional_curves_{schedule_name}.json"
if not visuals_file.exists():
visuals_file = None
if not additional_curves_file.exists():
additional_curves_file = None
else:
visuals_file = None
additional_curves_file = None
# Launches the requested benchmark type (comparing searches [vertical densities] or comparing final models [learning curves])
if benchmark_args.benchmark_type == "compare_models":
compare_models(storage_names=benchmark_args.storage_names,
x_metric=benchmark_args.x_metric,
y_metric=benchmark_args.y_metric,
y_error_bars=benchmark_args.y_error_bars,
visuals_file=visuals_file,
additional_curves_file=additional_curves_file,
n_eval_runs=benchmark_args.n_eval_runs,
performance_metric=benchmark_args.performance_metric,
performance_aggregation=benchmark_args.performance_aggregation,
make_performance_chart=False, # TODO: add support for that chart in a compare_models context
make_learning_plots=True,
re_run_if_exists=benchmark_args.re_run_if_exists,
logger=logger,
root_dir=get_root(benchmark_args.root_dir))
elif benchmark_args.benchmark_type == "compare_searches":
compare_searches(storage_names=benchmark_args.storage_names,
x_metric=benchmark_args.x_metric,
y_metric=benchmark_args.y_metric,
y_error_bars=benchmark_args.y_error_bars,
performance_metric=benchmark_args.performance_metric,
performance_aggregation=benchmark_args.performance_aggregation,
n_eval_runs=benchmark_args.n_eval_runs,
visuals_file=visuals_file,
additional_curves_files=additional_curves_file,
re_run_if_exists=benchmark_args.re_run_if_exists,
logger=logger,
root_dir=get_root(benchmark_args.root_dir))
else:
raise NotImplementedError
|
[
"evaluate.get_evaluation_args",
"evaluate.evaluate"
] |
[((781, 790), 'seaborn.set', 'sns.set', ([], {}), '()\n', (788, 790), True, 'import seaborn as sns\n'), ((791, 817), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (804, 817), True, 'import seaborn as sns\n'), ((859, 884), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (882, 884), False, 'import argparse\n'), ((3518, 3531), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3529, 3531), False, 'from collections import OrderedDict\n'), ((3593, 3651), 'alfred.utils.directory_tree.DirectoryTree.get_all_experiments', 'DirectoryTree.get_all_experiments', ([], {'storage_dir': 'storage_dir'}), '(storage_dir=storage_dir)\n', (3626, 3651), False, 'from alfred.utils.directory_tree import DirectoryTree, get_root, sanity_check_exists\n'), ((7248, 7298), 'os.makedirs', 'os.makedirs', (['(storage_dir / save_dir)'], {'exist_ok': '(True)'}), '(storage_dir / save_dir, exist_ok=True)\n', (7259, 7298), False, 'import os\n'), ((7871, 7884), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7882, 7884), False, 'from collections import OrderedDict\n'), ((7905, 7918), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7916, 7918), False, 'from collections import OrderedDict\n'), ((7941, 7954), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7952, 7954), False, 'from collections import OrderedDict\n'), ((8020, 8033), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8031, 8033), False, 'from collections import OrderedDict\n'), ((8649, 8662), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8660, 8662), False, 'from collections import OrderedDict\n'), ((9368, 9381), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9379, 9381), False, 'from collections import OrderedDict\n'), ((12759, 12826), 'alfred.utils.plots.create_fig', 'create_fig', (['(1, 1)'], {'figsize': '(n_bars_per_group * n_groups, n_groups)'}), '((1, 1), figsize=(n_bars_per_group * n_groups, n_groups))\n', (12769, 12826), False, 'from alfred.utils.plots import create_fig, bar_chart, plot_curves, plot_vertical_densities\n'), ((13725, 13743), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13741, 13743), True, 'import matplotlib.pyplot as plt\n'), ((14503, 14517), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (14512, 14517), True, 'import matplotlib.pyplot as plt\n'), ((15626, 15684), 'alfred.utils.directory_tree.DirectoryTree.get_all_experiments', 'DirectoryTree.get_all_experiments', ([], {'storage_dir': 'storage_dir'}), '(storage_dir=storage_dir)\n', (15659, 15684), False, 'from alfred.utils.directory_tree import DirectoryTree, get_root, sanity_check_exists\n'), ((17754, 17767), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17765, 17767), False, 'from collections import OrderedDict\n'), ((17788, 17801), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17799, 17801), False, 'from collections import OrderedDict\n'), ((17824, 17837), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17835, 17837), False, 'from collections import OrderedDict\n'), ((17856, 17869), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17867, 17869), False, 'from collections import OrderedDict\n'), ((17883, 17896), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17894, 17896), False, 'from collections import OrderedDict\n'), ((17917, 17930), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17928, 17930), False, 'from collections import OrderedDict\n'), ((17951, 17964), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17962, 17964), False, 'from collections import OrderedDict\n'), ((17978, 17991), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17989, 17991), False, 'from collections import OrderedDict\n'), ((18005, 18018), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (18016, 18018), False, 'from collections import OrderedDict\n'), ((18033, 18046), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (18044, 18046), False, 'from collections import OrderedDict\n'), ((18559, 18589), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['*axes_shape'], {}), '(*axes_shape)\n', (18576, 18589), True, 'import matplotlib.gridspec as gridspec\n'), ((18600, 18658), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8 * axes_shape[1], 4 * axes_shape[0])'}), '(figsize=(8 * axes_shape[1], 4 * axes_shape[0]))\n', (18610, 18658), True, 'import matplotlib.pyplot as plt\n'), ((24481, 24499), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (24497, 24499), True, 'import matplotlib.pyplot as plt\n'), ((24696, 24710), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (24705, 24710), True, 'import matplotlib.pyplot as plt\n'), ((24986, 24999), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (24997, 24999), False, 'from collections import OrderedDict\n'), ((25018, 25031), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (25029, 25031), False, 'from collections import OrderedDict\n'), ((25045, 25058), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (25056, 25058), False, 'from collections import OrderedDict\n'), ((25072, 25085), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (25083, 25085), False, 'from collections import OrderedDict\n'), ((25099, 25112), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (25110, 25112), False, 'from collections import OrderedDict\n'), ((25127, 25140), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (25138, 25140), False, 'from collections import OrderedDict\n'), ((27977, 28007), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['*axes_shape'], {}), '(*axes_shape)\n', (27994, 28007), True, 'import matplotlib.gridspec as gridspec\n'), ((28018, 28077), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12 * axes_shape[1], 5 * axes_shape[0])'}), '(figsize=(12 * axes_shape[1], 5 * axes_shape[0]))\n', (28028, 28077), True, 'import matplotlib.pyplot as plt\n'), ((30891, 30909), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (30907, 30909), True, 'import matplotlib.pyplot as plt\n'), ((31352, 31366), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (31361, 31366), True, 'import matplotlib.pyplot as plt\n'), ((39890, 39963), 'alfred.utils.misc.create_logger', 'create_logger', ([], {'name': '"""BENCHMARK - MAIN"""', 'loglevel': 'benchmark_args.log_level'}), "(name='BENCHMARK - MAIN', loglevel=benchmark_args.log_level)\n", (39903, 39963), False, 'from alfred.utils.misc import create_logger, select_storage_dirs\n'), ((40014, 40151), 'alfred.utils.misc.select_storage_dirs', 'select_storage_dirs', ([], {'from_file': 'benchmark_args.from_file', 'storage_name': 'benchmark_args.storage_names', 'root_dir': 'benchmark_args.root_dir'}), '(from_file=benchmark_args.from_file, storage_name=\n benchmark_args.storage_names, root_dir=benchmark_args.root_dir)\n', (40033, 40151), False, 'from alfred.utils.misc import create_logger, select_storage_dirs\n'), ((3781, 3839), 'alfred.utils.directory_tree.DirectoryTree.get_all_seeds', 'DirectoryTree.get_all_seeds', ([], {'experiment_dir': 'experiment_dir'}), '(experiment_dir=experiment_dir)\n', (3808, 3839), False, 'from alfred.utils.directory_tree import DirectoryTree, get_root, sanity_check_exists\n'), ((7216, 7242), 'numpy.stack', 'np.stack', (['all_seeds_scores'], {}), '(all_seeds_scores)\n', (7224, 7242), True, 'import numpy as np\n'), ((7390, 7412), 'pickle.dump', 'pickle.dump', (['scores', 'f'], {}), '(scores, f)\n', (7401, 7412), False, 'import pickle\n'), ((13866, 13916), 'os.makedirs', 'os.makedirs', (['(storage_dir / save_dir)'], {'exist_ok': '(True)'}), '(storage_dir / save_dir, exist_ok=True)\n', (13877, 13916), False, 'import os\n'), ((15390, 15403), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (15401, 15403), False, 'from collections import OrderedDict\n'), ((15498, 15511), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (15509, 15511), False, 'from collections import OrderedDict\n'), ((15814, 15872), 'alfred.utils.directory_tree.DirectoryTree.get_all_seeds', 'DirectoryTree.get_all_seeds', ([], {'experiment_dir': 'experiment_dir'}), '(experiment_dir=experiment_dir)\n', (15841, 15872), False, 'from alfred.utils.directory_tree import DirectoryTree, get_root, sanity_check_exists\n'), ((18111, 18124), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (18122, 18124), False, 'from collections import OrderedDict\n'), ((18160, 18173), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (18171, 18173), False, 'from collections import OrderedDict\n'), ((18211, 18224), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (18222, 18224), False, 'from collections import OrderedDict\n'), ((24546, 24596), 'os.makedirs', 'os.makedirs', (['(storage_dir / save_dir)'], {'exist_ok': '(True)'}), '(storage_dir / save_dir, exist_ok=True)\n', (24557, 24596), False, 'import os\n'), ((25935, 25993), 'alfred.utils.directory_tree.DirectoryTree.extract_info_from_storage_name', 'DirectoryTree.extract_info_from_storage_name', (['storage_name'], {}), '(storage_name)\n', (25979, 25993), False, 'from alfred.utils.directory_tree import DirectoryTree, get_root, sanity_check_exists\n'), ((26184, 26242), 'alfred.utils.directory_tree.DirectoryTree.extract_info_from_storage_name', 'DirectoryTree.extract_info_from_storage_name', (['storage_name'], {}), '(storage_name)\n', (26228, 26242), False, 'from alfred.utils.directory_tree import DirectoryTree, get_root, sanity_check_exists\n'), ((31013, 31063), 'os.makedirs', 'os.makedirs', (['(storage_dir / save_dir)'], {'exist_ok': '(True)'}), '(storage_dir / save_dir, exist_ok=True)\n', (31024, 31063), False, 'import os\n'), ((31990, 32003), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (32001, 32003), False, 'from collections import OrderedDict\n'), ((32021, 32034), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (32032, 32034), False, 'from collections import OrderedDict\n'), ((34968, 34986), 'alfred.utils.directory_tree.get_root', 'get_root', (['root_dir'], {}), '(root_dir)\n', (34976, 34986), False, 'from alfred.utils.directory_tree import DirectoryTree, get_root, sanity_check_exists\n'), ((35075, 35113), 'shutil.rmtree', 'shutil.rmtree', (["(storage_dir / 'summary')"], {}), "(storage_dir / 'summary')\n", (35088, 35113), False, 'import shutil\n'), ((37684, 37818), 'shutil.copyfile', 'copyfile', ([], {'src': "(seed_dirs_for_best_exp[0] / 'config.json')", 'dst': "(storage_dir / 'summary' / f'bestConfig_exp{best_experiment_num}.json')"}), "(src=seed_dirs_for_best_exp[0] / 'config.json', dst=storage_dir /\n 'summary' / f'bestConfig_exp{best_experiment_num}.json')\n", (37692, 37818), False, 'from shutil import copyfile\n'), ((7162, 7175), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7173, 7175), False, 'from collections import OrderedDict\n'), ((8207, 8221), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8218, 8221), False, 'import pickle\n'), ((21367, 21400), 'alfred.utils.config.load_dict_from_json', 'load_dict_from_json', (['visuals_file'], {}), '(visuals_file)\n', (21386, 21400), False, 'from alfred.utils.config import parse_bool, load_dict_from_json, save_dict_to_json, parse_log_level\n'), ((21562, 21605), 'alfred.utils.config.load_dict_from_json', 'load_dict_from_json', (['additional_curves_file'], {}), '(additional_curves_file)\n', (21581, 21605), False, 'from alfred.utils.config import parse_bool, load_dict_from_json, save_dict_to_json, parse_log_level\n'), ((25486, 25500), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (25497, 25500), False, 'import pickle\n'), ((26081, 26094), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (26092, 26094), False, 'from collections import OrderedDict\n'), ((28871, 28904), 'alfred.utils.config.load_dict_from_json', 'load_dict_from_json', (['visuals_file'], {}), '(visuals_file)\n', (28890, 28904), False, 'from alfred.utils.config import parse_bool, load_dict_from_json, save_dict_to_json, parse_log_level\n'), ((29066, 29109), 'alfred.utils.config.load_dict_from_json', 'load_dict_from_json', (['additional_curves_file'], {}), '(additional_curves_file)\n', (29085, 29109), False, 'from alfred.utils.config import parse_bool, load_dict_from_json, save_dict_to_json, parse_log_level\n'), ((40333, 40373), 'alfred.utils.directory_tree.sanity_check_exists', 'sanity_check_exists', (['storage_dir', 'logger'], {}), '(storage_dir, logger)\n', (40352, 40373), False, 'from alfred.utils.directory_tree import DirectoryTree, get_root, sanity_check_exists\n'), ((5515, 5555), 'evaluate.get_evaluation_args', 'get_evaluation_args', ([], {'overwritten_args': '""""""'}), "(overwritten_args='')\n", (5534, 5555), False, 'from evaluate import evaluate, get_evaluation_args\n'), ((6028, 6049), 'evaluate.evaluate', 'evaluate', (['eval_config'], {}), '(eval_config)\n', (6036, 6049), False, 'from evaluate import evaluate, get_evaluation_args\n'), ((6440, 6464), 'numpy.min', 'np.min', (['performance_data'], {}), '(performance_data)\n', (6446, 6464), True, 'import numpy as np\n'), ((8850, 8863), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8861, 8863), False, 'from collections import OrderedDict\n'), ((9077, 9135), 'alfred.utils.directory_tree.DirectoryTree.extract_info_from_storage_name', 'DirectoryTree.extract_info_from_storage_name', (['storage_name'], {}), '(storage_name)\n', (9121, 9135), False, 'from alfred.utils.directory_tree import DirectoryTree, get_root, sanity_check_exists\n'), ((10157, 10170), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10168, 10170), False, 'from collections import OrderedDict\n'), ((10214, 10227), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10225, 10227), False, 'from collections import OrderedDict\n'), ((10273, 10286), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10284, 10286), False, 'from collections import OrderedDict\n'), ((10382, 10458), 'numpy.mean', 'np.mean', (['(scores[outer_key][inner_key] / (reference_means[inner_key] + 1e-08))'], {}), '(scores[outer_key][inner_key] / (reference_means[inner_key] + 1e-08))\n', (10389, 10458), True, 'import numpy as np\n'), ((16888, 16901), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (16899, 16901), False, 'from collections import OrderedDict\n'), ((16938, 16951), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (16949, 16951), False, 'from collections import OrderedDict\n'), ((21180, 21235), 'alfred.utils.directory_tree.DirectoryTree.extract_info_from_storage_name', 'DirectoryTree.extract_info_from_storage_name', (['path.name'], {}), '(path.name)\n', (21224, 21235), False, 'from alfred.utils.directory_tree import DirectoryTree, get_root, sanity_check_exists\n'), ((28684, 28739), 'alfred.utils.directory_tree.DirectoryTree.extract_info_from_storage_name', 'DirectoryTree.extract_info_from_storage_name', (['path.name'], {}), '(path.name)\n', (28728, 28739), False, 'from alfred.utils.directory_tree import DirectoryTree, get_root, sanity_check_exists\n'), ((38333, 38351), 'alfred.utils.directory_tree.get_root', 'get_root', (['root_dir'], {}), '(root_dir)\n', (38341, 38351), False, 'from alfred.utils.directory_tree import DirectoryTree, get_root, sanity_check_exists\n'), ((40710, 40740), 'pathlib.Path', 'Path', (['benchmark_args.from_file'], {}), '(benchmark_args.from_file)\n', (40714, 40740), False, 'from pathlib import Path\n'), ((40776, 40806), 'pathlib.Path', 'Path', (['benchmark_args.from_file'], {}), '(benchmark_args.from_file)\n', (40780, 40806), False, 'from pathlib import Path\n'), ((40881, 40911), 'pathlib.Path', 'Path', (['benchmark_args.from_file'], {}), '(benchmark_args.from_file)\n', (40885, 40911), False, 'from pathlib import Path\n'), ((42292, 42325), 'alfred.utils.directory_tree.get_root', 'get_root', (['benchmark_args.root_dir'], {}), '(benchmark_args.root_dir)\n', (42300, 42325), False, 'from alfred.utils.directory_tree import DirectoryTree, get_root, sanity_check_exists\n'), ((6541, 6565), 'numpy.max', 'np.max', (['performance_data'], {}), '(performance_data)\n', (6547, 6565), True, 'import numpy as np\n'), ((9023, 9036), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9034, 9036), False, 'from collections import OrderedDict\n'), ((10536, 10611), 'numpy.std', 'np.std', (['(scores[outer_key][inner_key] / (reference_means[inner_key] + 1e-08))'], {}), '(scores[outer_key][inner_key] / (reference_means[inner_key] + 1e-08))\n', (10542, 10611), True, 'import numpy as np\n'), ((10893, 10969), 'numpy.mean', 'np.mean', (['(scores[outer_key][inner_key] / (reference_means[inner_key] + 1e-08))'], {}), '(scores[outer_key][inner_key] / (reference_means[inner_key] + 1e-08))\n', (10900, 10969), True, 'import numpy as np\n'), ((19440, 19487), 'numpy.stack', 'np.stack', (['y_data[outer_key][inner_key]'], {'axis': '(-1)'}), '(y_data[outer_key][inner_key], axis=-1)\n', (19448, 19487), True, 'import numpy as np\n'), ((19622, 19717), 'alfred.utils.stats.get_95_confidence_interval_of_sequence', 'get_95_confidence_interval_of_sequence', ([], {'list_of_samples': 'y_data_samples', 'method': 'y_error_bars'}), '(list_of_samples=y_data_samples,\n method=y_error_bars)\n', (19660, 19717), False, 'from alfred.utils.stats import get_95_confidence_interval_of_sequence, get_95_confidence_interval\n'), ((20765, 20785), 'numpy.array', 'np.array', (['axes_shape'], {}), '(axes_shape)\n', (20773, 20785), True, 'import numpy as np\n'), ((28269, 28289), 'numpy.array', 'np.array', (['axes_shape'], {}), '(axes_shape)\n', (28277, 28289), True, 'import numpy as np\n'), ((32549, 32567), 'alfred.utils.directory_tree.get_root', 'get_root', (['root_dir'], {}), '(root_dir)\n', (32557, 32567), False, 'from alfred.utils.directory_tree import DirectoryTree, get_root, sanity_check_exists\n'), ((34139, 34157), 'alfred.utils.directory_tree.get_root', 'get_root', (['root_dir'], {}), '(root_dir)\n', (34147, 34157), False, 'from alfred.utils.directory_tree import DirectoryTree, get_root, sanity_check_exists\n'), ((43151, 43184), 'alfred.utils.directory_tree.get_root', 'get_root', (['benchmark_args.root_dir'], {}), '(benchmark_args.root_dir)\n', (43159, 43184), False, 'from alfred.utils.directory_tree import DirectoryTree, get_root, sanity_check_exists\n'), ((6642, 6667), 'numpy.mean', 'np.mean', (['performance_data'], {}), '(performance_data)\n', (6649, 6667), True, 'import numpy as np\n'), ((11702, 11773), 'alfred.utils.stats.get_95_confidence_interval', 'get_95_confidence_interval', ([], {'samples': 'scores_samples', 'method': 'y_error_bars'}), '(samples=scores_samples, method=y_error_bars)\n', (11728, 11773), False, 'from alfred.utils.stats import get_95_confidence_interval_of_sequence, get_95_confidence_interval\n'), ((19034, 19081), 'numpy.stack', 'np.stack', (['y_data[outer_key][inner_key]'], {'axis': '(-1)'}), '(y_data[outer_key][inner_key], axis=-1)\n', (19042, 19081), True, 'import numpy as np\n'), ((32199, 32217), 'alfred.utils.directory_tree.get_root', 'get_root', (['root_dir'], {}), '(root_dir)\n', (32207, 32217), False, 'from alfred.utils.directory_tree import DirectoryTree, get_root, sanity_check_exists\n'), ((33485, 33503), 'alfred.utils.directory_tree.get_root', 'get_root', (['root_dir'], {}), '(root_dir)\n', (33493, 33503), False, 'from alfred.utils.directory_tree import DirectoryTree, get_root, sanity_check_exists\n'), ((11107, 11212), 'numpy.quantile', 'np.quantile', ([], {'a': '(scores[outer_key][inner_key] / (reference_means[inner_key] + 1e-08))', 'q': '(0.0 + quantile)'}), '(a=scores[outer_key][inner_key] / (reference_means[inner_key] + \n 1e-08), q=0.0 + quantile)\n', (11118, 11212), True, 'import numpy as np\n'), ((11348, 11453), 'numpy.quantile', 'np.quantile', ([], {'a': '(scores[outer_key][inner_key] / (reference_means[inner_key] + 1e-08))', 'q': '(1.0 - quantile)'}), '(a=scores[outer_key][inner_key] / (reference_means[inner_key] + \n 1e-08), q=1.0 - quantile)\n', (11359, 11453), True, 'import numpy as np\n'), ((19145, 19192), 'numpy.stack', 'np.stack', (['y_data[outer_key][inner_key]'], {'axis': '(-1)'}), '(y_data[outer_key][inner_key], axis=-1)\n', (19153, 19192), True, 'import numpy as np\n'), ((6933, 6981), 'numpy.mean', 'np.mean', (['performance_data[eighty_percent_index:]'], {}), '(performance_data[eighty_percent_index:])\n', (6940, 6981), True, 'import numpy as np\n')]
|
import json
import os
from argparse import ArgumentParser
import pandas as pd
import time
import torch
from torch.utils.data import DataLoader
from torch import nn, optim
import learn2learn as l2l
from AmazonDataset import AmazonDataset
from Model import Model
from evaluate import metrics
if __name__ == '__main__':
parser = ArgumentParser()
# ------------------------------------Dataset Parameters------------------------------------
parser.add_argument('--dataset',
default='Musical_Instruments',
help='name of the dataset')
parser.add_argument('--processed_path',
default='/home/yxk/share/yinxiangkun/processed/cold_start/ordinary/Musical_Instruments/',
help="preprocessed path of the raw data")
# ------------------------------------Experiment Setup------------------------------------
parser.add_argument('--device',
default='cuda:0',
help="using device(cpu, cuda:0, cuda:1, ...)")
parser.add_argument('--epochs',
default=20,
type=int,
help="training epochs")
parser.add_argument('--fast_lr',
default=0.5,
help='learning rate for fast adaptation')
parser.add_argument('--meta_lr',
default=0.01,
help='learning rate for meta learning')
parser.add_argument('--batch_size',
default=100,
type=int,
help='batch size for training')
# ------------------------------------Model Hyper Parameters------------------------------------
parser.add_argument('--word_embedding_size',
default=128,
type=int,
help="word embedding size")
parser.add_argument('--doc_embedding_size',
default=128,
type=int,
help="LSTM hidden size")
parser.add_argument('--attention_hidden_dim',
default=384,
type=int,
help="LSTM hidden size")
parser.add_argument('--margin',
default=1.,
type=float,
help="Margin Loss margin")
# ------------------------------------Data Preparation------------------------------------
config = parser.parse_args()
train_path = os.path.join(config.processed_path, "{}_train.csv".format(config.dataset))
test_path = os.path.join(config.processed_path, "{}_test.csv".format(config.dataset))
query_path = os.path.join(config.processed_path, '{}_query.json'.format(config.dataset))
asin_sample_path = config.processed_path + '{}_asin_sample.json'.format(config.dataset)
word_dict_path = os.path.join(config.processed_path, '{}_word_dict.json'.format(config.dataset))
query_dict = json.load(open(query_path, 'r'))
asin_dict = json.load(open(asin_sample_path, 'r'))
word_dict = json.load(open(word_dict_path, 'r'))
train_df = pd.read_csv(train_path)
test_df = pd.read_csv(test_path)
full_df: pd.DataFrame = pd.concat([train_df, test_df], ignore_index=True)
init = AmazonDataset.init(full_df)
train_support = full_df[full_df["metaFilter"] == "TrainSupport"]
train_query = full_df[full_df["metaFilter"] == "TrainQuery"]
test_support = full_df[full_df["metaFilter"] == "TestSupport"]
test_query = full_df[full_df["metaFilter"] == "TestQuery"]
train_dataset = AmazonDataset(train_support, train_query, train_df, query_dict, asin_dict, init['item_map'], config.device)
test_dataset = AmazonDataset(test_support, test_query, train_df, query_dict, asin_dict, init['item_map'], config.device)
train_loader = DataLoader(train_dataset, drop_last=True, batch_size=1, shuffle=True, num_workers=0,
collate_fn=AmazonDataset.collect_fn)
# valid_loader = DataLoader(valid_dataset, batch_size=config['dataset']['batch_size'], shuffle=False, num_workers=0)
test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=0,
collate_fn=AmazonDataset.collect_fn)
# ------------------------------------Model Construction------------------------------------
# word_dict starts from 1
model = Model(len(init['users']), len(init['items']), config.word_embedding_size)
model.to(config.device)
meta = l2l.algorithms.MAML(model, lr=config.fast_lr)
criterion = nn.MSELoss()
optimizer = optim.Adagrad(model.parameters(), lr=config.meta_lr)
# ------------------------------------Train------------------------------------
step = 0
loss = torch.tensor(0.).cuda()
for epoch in range(config.epochs):
start_time = time.time()
for _, (user_id, support_item_ids, support_negative_item_ids, query_item_ids, query_negative_item_ids)\
in enumerate(train_loader):
learner = meta.clone(allow_nograd=True)
learner.module.set_local()
# ---------Local Update---------
for i in range(len(support_item_ids)):
# ---------Construct Batch---------
score = learner(user_id, support_item_ids[i])
learner.adapt(criterion(score, torch.tensor(1.).cuda()))
# print(learner.module.global_parameters[0].grad)
# ---------Global Update---------
learner.module.set_global()
loss = torch.tensor(0.).cuda()
for i in range(len(query_item_ids)):
# ---------Construct Batch---------
score = learner(user_id, query_item_ids[i])
loss += criterion(score, torch.tensor(1.).to(config.device))
step += 1
loss = loss / len(query_item_ids)
optimizer.zero_grad()
loss.backward()
# print(model.global_parameters[0].grad.sum())
optimizer.step()
print("loss:{:.3f}".format(float(loss)))
Mrr, Hr, Ndcg = metrics(meta, test_dataset, test_loader, 20, criterion) # TODO
print(
"Running Epoch {:03d}/{:03d}".format(epoch + 1, config.epochs),
"loss:{:.3f}".format(float(loss)),
"Mrr {:.3f}, Hr {:.3f}, Ndcg {:.3f}".format(Mrr, Hr, Ndcg),
"costs:", time.strftime("%H: %M: %S", time.gmtime(time.time() - start_time)))
|
[
"evaluate.metrics"
] |
[((335, 351), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (349, 351), False, 'from argparse import ArgumentParser\n'), ((3204, 3227), 'pandas.read_csv', 'pd.read_csv', (['train_path'], {}), '(train_path)\n', (3215, 3227), True, 'import pandas as pd\n'), ((3242, 3264), 'pandas.read_csv', 'pd.read_csv', (['test_path'], {}), '(test_path)\n', (3253, 3264), True, 'import pandas as pd\n'), ((3293, 3342), 'pandas.concat', 'pd.concat', (['[train_df, test_df]'], {'ignore_index': '(True)'}), '([train_df, test_df], ignore_index=True)\n', (3302, 3342), True, 'import pandas as pd\n'), ((3355, 3382), 'AmazonDataset.AmazonDataset.init', 'AmazonDataset.init', (['full_df'], {}), '(full_df)\n', (3373, 3382), False, 'from AmazonDataset import AmazonDataset\n'), ((3669, 3780), 'AmazonDataset.AmazonDataset', 'AmazonDataset', (['train_support', 'train_query', 'train_df', 'query_dict', 'asin_dict', "init['item_map']", 'config.device'], {}), "(train_support, train_query, train_df, query_dict, asin_dict,\n init['item_map'], config.device)\n", (3682, 3780), False, 'from AmazonDataset import AmazonDataset\n'), ((3796, 3905), 'AmazonDataset.AmazonDataset', 'AmazonDataset', (['test_support', 'test_query', 'train_df', 'query_dict', 'asin_dict', "init['item_map']", 'config.device'], {}), "(test_support, test_query, train_df, query_dict, asin_dict,\n init['item_map'], config.device)\n", (3809, 3905), False, 'from AmazonDataset import AmazonDataset\n'), ((3921, 4046), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'drop_last': '(True)', 'batch_size': '(1)', 'shuffle': '(True)', 'num_workers': '(0)', 'collate_fn': 'AmazonDataset.collect_fn'}), '(train_dataset, drop_last=True, batch_size=1, shuffle=True,\n num_workers=0, collate_fn=AmazonDataset.collect_fn)\n', (3931, 4046), False, 'from torch.utils.data import DataLoader\n'), ((4212, 4321), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(0)', 'collate_fn': 'AmazonDataset.collect_fn'}), '(test_dataset, batch_size=1, shuffle=False, num_workers=0,\n collate_fn=AmazonDataset.collect_fn)\n', (4222, 4321), False, 'from torch.utils.data import DataLoader\n'), ((4600, 4645), 'learn2learn.algorithms.MAML', 'l2l.algorithms.MAML', (['model'], {'lr': 'config.fast_lr'}), '(model, lr=config.fast_lr)\n', (4619, 4645), True, 'import learn2learn as l2l\n'), ((4663, 4675), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (4673, 4675), False, 'from torch import nn, optim\n'), ((4938, 4949), 'time.time', 'time.time', ([], {}), '()\n', (4947, 4949), False, 'import time\n'), ((6209, 6264), 'evaluate.metrics', 'metrics', (['meta', 'test_dataset', 'test_loader', '(20)', 'criterion'], {}), '(meta, test_dataset, test_loader, 20, criterion)\n', (6216, 6264), False, 'from evaluate import metrics\n'), ((4854, 4871), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (4866, 4871), False, 'import torch\n'), ((5649, 5666), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (5661, 5666), False, 'import torch\n'), ((6545, 6556), 'time.time', 'time.time', ([], {}), '()\n', (6554, 6556), False, 'import time\n'), ((5876, 5893), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (5888, 5893), False, 'import torch\n'), ((5455, 5472), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (5467, 5472), False, 'import torch\n')]
|
from train import train
from test import test
from evaluate import evaluate
if __name__ == "__main__":
#groupings = ['normal','abnormal']
groupings = ['normal','entering','abnormal']
cfg = {
'experiment': 'normal',
'train_folder': 'data/train1715/normal/',
'test_folder': 'data/test500/',
'val_folder': 'data/val143/',
'image_size': (64,192),
'max_epoch': 200,
'gpus': 1,
'lr': 0.0005,
'batch_size': 32,
'nc': 1,
'nz': 8,
'nfe': 32,
'nfd': 32,
'device': 'cuda',
}
#train(cfg)
test(cfg,dataset='test',groupings=groupings,save=False)
test(cfg,dataset='val', groupings=groupings)
cfg['train_folder'] = 'data/train1715/'
test(cfg,dataset='train', groupings=groupings)
th = evaluate(cfg, dataset='train', groupings=groupings) # find a suitable threshold using training set
evaluate(cfg, dataset='test', threshold=th, groupings=groupings)
cfg = {
'experiment': 'all',
'train_folder': 'data/train1715/',
'test_folder': 'data/test500/',
'val_folder': 'data/val143/',
'image_size': (64,192),
'max_epoch': 200,
'gpus': 1,
'lr': 0.0005,
'batch_size': 32,
'nc': 1,
'nz': 8,
'nfe': 32,
'nfd': 32,
'device': 'cuda',
}
#train(cfg)
test(cfg,dataset='test', groupings=groupings)
test(cfg,dataset='val', groupings=groupings)
test(cfg,dataset='train', groupings=groupings)
th = evaluate(cfg, dataset='train', groupings=groupings)
evaluate(cfg, dataset='test', threshold=th, groupings=groupings)
'''
#
cfg = {
'experiment': 'extensive',
'train_folder': 'data/train1715/',
'test_folder': 'data/test500/',
'val_folder': 'data/val143/',
'image_size': (64,192),
'max_epoch': 50,
'gpus': 1,
'lr': 0.0005,
'batch_size': 32,
'nc': 1,
'nz': 8,
'nfe': 32,
'nfd': 32,
'device': 'cuda',
}
#train(cfg)
test(cfg,dataset='test', groupings=groupings)
test(cfg,dataset='val', groupings=groupings)
#test(cfg,dataset='extensive', groupings=groupings)
test(cfg,dataset='train', groupings=groupings)
th = evaluate(cfg, dataset='train', groupings=groupings)
evaluate(cfg, dataset='test', threshold=th, groupings=groupings)
'''
|
[
"evaluate.evaluate"
] |
[((663, 721), 'test.test', 'test', (['cfg'], {'dataset': '"""test"""', 'groupings': 'groupings', 'save': '(False)'}), "(cfg, dataset='test', groupings=groupings, save=False)\n", (667, 721), False, 'from test import test\n'), ((723, 768), 'test.test', 'test', (['cfg'], {'dataset': '"""val"""', 'groupings': 'groupings'}), "(cfg, dataset='val', groupings=groupings)\n", (727, 768), False, 'from test import test\n'), ((816, 863), 'test.test', 'test', (['cfg'], {'dataset': '"""train"""', 'groupings': 'groupings'}), "(cfg, dataset='train', groupings=groupings)\n", (820, 863), False, 'from test import test\n'), ((872, 923), 'evaluate.evaluate', 'evaluate', (['cfg'], {'dataset': '"""train"""', 'groupings': 'groupings'}), "(cfg, dataset='train', groupings=groupings)\n", (880, 923), False, 'from evaluate import evaluate\n'), ((975, 1039), 'evaluate.evaluate', 'evaluate', (['cfg'], {'dataset': '"""test"""', 'threshold': 'th', 'groupings': 'groupings'}), "(cfg, dataset='test', threshold=th, groupings=groupings)\n", (983, 1039), False, 'from evaluate import evaluate\n'), ((1502, 1548), 'test.test', 'test', (['cfg'], {'dataset': '"""test"""', 'groupings': 'groupings'}), "(cfg, dataset='test', groupings=groupings)\n", (1506, 1548), False, 'from test import test\n'), ((1552, 1597), 'test.test', 'test', (['cfg'], {'dataset': '"""val"""', 'groupings': 'groupings'}), "(cfg, dataset='val', groupings=groupings)\n", (1556, 1597), False, 'from test import test\n'), ((1601, 1648), 'test.test', 'test', (['cfg'], {'dataset': '"""train"""', 'groupings': 'groupings'}), "(cfg, dataset='train', groupings=groupings)\n", (1605, 1648), False, 'from test import test\n'), ((1657, 1708), 'evaluate.evaluate', 'evaluate', (['cfg'], {'dataset': '"""train"""', 'groupings': 'groupings'}), "(cfg, dataset='train', groupings=groupings)\n", (1665, 1708), False, 'from evaluate import evaluate\n'), ((1713, 1777), 'evaluate.evaluate', 'evaluate', (['cfg'], {'dataset': '"""test"""', 'threshold': 'th', 'groupings': 'groupings'}), "(cfg, dataset='test', threshold=th, groupings=groupings)\n", (1721, 1777), False, 'from evaluate import evaluate\n')]
|
import os
import re
import sys
sys.path.append('.')
import cv2
import math
import time
import scipy
import argparse
import matplotlib
import numpy as np
import pylab as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
from scipy.ndimage.morphology import generate_binary_structure
from scipy.ndimage.filters import gaussian_filter, maximum_filter
from lib.network.rtpose_vgg import get_model
from lib.network import im_transform
from lib.config import update_config, cfg
from evaluate.coco_eval import get_outputs, handle_paf_and_heat
from lib.utils.common import Human, BodyPart, CocoPart, CocoColors, CocoPairsRender, draw_humans
from lib.utils.paf_to_pose import paf_to_pose_cpp
import ffmpeg
def check_rotation(path_video_file):
# this returns meta-data of the video file in form of a dictionary
meta_dict = ffmpeg.probe(path_video_file)
# from the dictionary, meta_dict['streams'][0]['tags']['rotate'] is the key
# we are looking for
rotateCode = None
if int(meta_dict['streams'][0]['tags']['rotate']) == 90:
rotateCode = cv2.ROTATE_90_CLOCKWISE
elif int(meta_dict['streams'][0]['tags']['rotate']) == 180:
rotateCode = cv2.ROTATE_180
elif int(meta_dict['streams'][0]['tags']['rotate']) == 270:
rotateCode = cv2.ROTATE_90_COUNTERCLOCKWISE
return rotateCode
def correct_rotation(frame, rotateCode):
return cv2.rotate(frame, rotateCode)
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', help='experiment configure file name',
default='./experiments/vgg19_368x368_sgd.yaml', type=str)
parser.add_argument('--weight', type=str,
default='pose_model.pth')
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
# update config file
update_config(cfg, args)
model = get_model('vgg19')
model.load_state_dict(torch.load(args.weight))
model.cuda()
model.float()
model.eval()
rotate_code = cv2.ROTATE_180
if __name__ == "__main__":
video_path = input("Enter video path")
video_capture_dummy = cv2.VideoCapture(video_path)
ret,oriImg = video_capture_dummy.read()
shape_tuple = tuple(oriImg.shape[1::-1])
print("Shape of image is ",shape_tuple)
rotate_code = check_rotation(video_path)
video_capture_dummy.release()
video_capture = cv2.VideoCapture(video_path)
##New stuff
fourcc = cv2.VideoWriter_fourcc(*'XVID')
vid_out = cv2.VideoWriter('output.avi', fourcc, 20.0, shape_tuple)
###
proc_frame_list = []
oriImg_list = []
while True:
# Capture frame-by-frame
try:
ret, oriImg = video_capture.read()
if rotate_code is not None:
oriImg = correct_rotation(oriImg, rotate_code)
oriImg_list.append(oriImg)
cv2.imshow('Video', oriImg)
# vid_out.write(out)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
except :
break
video_capture.release()
cv2.destroyAllWindows()
print("Number of frames",len(oriImg_list))
count = 0
for oriImg in oriImg_list:
count+=1
if count%50 == 0:
print(count, "frames processed")
try:
shape_dst = np.min(oriImg.shape[0:2])
print(oriImg.shape)
except:
break
with torch.no_grad():
paf, heatmap, imscale = get_outputs(
oriImg, model, 'rtpose')
humans = paf_to_pose_cpp(heatmap, paf, cfg)
out = draw_humans(oriImg, humans)
vid_out.write(out)
# When everything is done, release the capture
|
[
"evaluate.coco_eval.get_outputs"
] |
[((31, 51), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (46, 51), False, 'import sys\n'), ((1519, 1544), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1542, 1544), False, 'import argparse\n'), ((2009, 2033), 'lib.config.update_config', 'update_config', (['cfg', 'args'], {}), '(cfg, args)\n', (2022, 2033), False, 'from lib.config import update_config, cfg\n'), ((2046, 2064), 'lib.network.rtpose_vgg.get_model', 'get_model', (['"""vgg19"""'], {}), "('vgg19')\n", (2055, 2064), False, 'from lib.network.rtpose_vgg import get_model\n'), ((920, 949), 'ffmpeg.probe', 'ffmpeg.probe', (['path_video_file'], {}), '(path_video_file)\n', (932, 949), False, 'import ffmpeg\n'), ((1478, 1507), 'cv2.rotate', 'cv2.rotate', (['frame', 'rotateCode'], {}), '(frame, rotateCode)\n', (1488, 1507), False, 'import cv2\n'), ((2092, 2115), 'torch.load', 'torch.load', (['args.weight'], {}), '(args.weight)\n', (2102, 2115), False, 'import torch\n'), ((2282, 2310), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (2298, 2310), False, 'import cv2\n'), ((2548, 2576), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (2564, 2576), False, 'import cv2\n'), ((2611, 2642), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (2633, 2642), False, 'import cv2\n'), ((2658, 2714), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""output.avi"""', 'fourcc', '(20.0)', 'shape_tuple'], {}), "('output.avi', fourcc, 20.0, shape_tuple)\n", (2673, 2714), False, 'import cv2\n'), ((3263, 3286), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3284, 3286), False, 'import cv2\n'), ((3779, 3813), 'lib.utils.paf_to_pose.paf_to_pose_cpp', 'paf_to_pose_cpp', (['heatmap', 'paf', 'cfg'], {}), '(heatmap, paf, cfg)\n', (3794, 3813), False, 'from lib.utils.paf_to_pose import paf_to_pose_cpp\n'), ((3849, 3876), 'lib.utils.common.draw_humans', 'draw_humans', (['oriImg', 'humans'], {}), '(oriImg, humans)\n', (3860, 3876), False, 'from lib.utils.common import Human, BodyPart, CocoPart, CocoColors, CocoPairsRender, draw_humans\n'), ((3051, 3078), 'cv2.imshow', 'cv2.imshow', (['"""Video"""', 'oriImg'], {}), "('Video', oriImg)\n", (3061, 3078), False, 'import cv2\n'), ((3519, 3544), 'numpy.min', 'np.min', (['oriImg.shape[0:2]'], {}), '(oriImg.shape[0:2])\n', (3525, 3544), True, 'import numpy as np\n'), ((3624, 3639), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3637, 3639), False, 'import torch\n'), ((3681, 3717), 'evaluate.coco_eval.get_outputs', 'get_outputs', (['oriImg', 'model', '"""rtpose"""'], {}), "(oriImg, model, 'rtpose')\n", (3692, 3717), False, 'from evaluate.coco_eval import get_outputs, handle_paf_and_heat\n'), ((3139, 3153), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3150, 3153), False, 'import cv2\n')]
|
"""
This script demonstrates initialisation, training, evaluation, and forecasting of ForecastNet. The dataset used for the
time-invariance test in section 6.1 of the ForecastNet paper is used for this demonstration.
Paper:
"ForecastNet: A Time-Variant Deep Feed-Forward Neural Network Architecture for Multi-Step-Ahead Time-Series Forecasting"
by <NAME>, <NAME>, and <NAME>
Link to the paper: https://arxiv.org/abs/2002.04155
"""
import numpy as np
import matplotlib.pyplot as plt
from forecastNet import forecastNet
from train import train
from evaluate import evaluate
from dataHelpers import generate_data
#Use a fixed seed for repreducible results
np.random.seed(1)
# Generate the dataset
train_x, train_y, test_x, test_y, valid_x, valid_y, period = generate_data(T=2750, period = 50, n_seqs = 4)
# train_data, test_data, valid_data, period = generate_data(T=1000, period = 10)
# Model parameters
model_type = 'dense2' #'dense' or 'conv', 'dense2' or 'conv2'
in_seq_length = 2 * period
out_seq_length = period
hidden_dim = 24
input_dim = 1
output_dim = 1
learning_rate = 0.0001
n_epochs= 100
batch_size = 16
# Initialise model
fcstnet = forecastNet(in_seq_length=in_seq_length, out_seq_length=out_seq_length, input_dim=input_dim,
hidden_dim=hidden_dim, output_dim=output_dim, model_type = model_type, batch_size = batch_size,
n_epochs = n_epochs, learning_rate = learning_rate, save_file = './forecastnet.pt')
# Train the model
training_costs, validation_costs = train(fcstnet, train_x, train_y, valid_x, valid_y, restore_session=False)
# Plot the training curves
plt.figure()
plt.plot(training_costs)
plt.plot(validation_costs)
# Evaluate the model
mase, smape, nrmse = evaluate(fcstnet, test_x, test_y, return_lists=False)
print('')
print('MASE:', mase)
print('SMAPE:', smape)
print('NRMSE:', nrmse)
# Generate and plot forecasts for various samples from the test dataset
samples = [0, 500, 1039]
# Models with a Gaussian Mixture Density Component output
if model_type == 'dense' or model_type == 'conv':
# Generate a set of n_samples forecasts (Monte Carlo Forecasts)
num_forecasts = 10
y_pred = np.zeros((test_y.shape[0], len(samples), test_y.shape[2], num_forecasts))
mu = np.zeros((test_y.shape[0], len(samples), test_y.shape[2], num_forecasts))
sigma = np.zeros((test_y.shape[0], len(samples), test_y.shape[2], num_forecasts))
for i in range(num_forecasts):
y_pred[:, :, :, i], mu[:, :, :, i], sigma[:, :, :, i] = fcstnet.forecast(test_x[:, samples, :])
s_mean = np.mean(y_pred, axis=3)
s_std = np.std(y_pred, axis=3)
botVarLine = s_mean - s_std
topVarLine = s_mean + s_std
for i in range(len(samples)):
plt.figure()
plt.plot(np.arange(0, in_seq_length), test_x[:, samples[i], 0],
'-o', label='input')
plt.plot(np.arange(in_seq_length, in_seq_length + out_seq_length), test_y[:, samples[i], 0],
'-o', label='data')
plt.plot(np.arange(in_seq_length, in_seq_length + out_seq_length), s_mean[:, i, 0],
'-*', label='forecast')
plt.fill_between(np.arange(in_seq_length, in_seq_length + out_seq_length),
botVarLine[:, i, 0], topVarLine[:, i, 0],
color='gray', alpha=0.3, label='Uncertainty')
plt.legend()
# Models with a linear output
elif model_type == 'dense2' or model_type == 'conv2':
# Generate a forecast
y_pred = fcstnet.forecast(test_x[:,samples,:])
for i in range(len(samples)):
# Plot the forecast
plt.figure()
plt.plot(np.arange(0, fcstnet.in_seq_length),
test_x[:, samples[i], 0],
'o-', label='test_data')
plt.plot(np.arange(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length),
test_y[:, samples[i], 0],
'o-')
plt.plot(np.arange(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length),
y_pred[:, i, 0],
'*-', linewidth=0.7, label='mean')
plt.show()
|
[
"evaluate.evaluate"
] |
[((656, 673), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (670, 673), True, 'import numpy as np\n'), ((759, 801), 'dataHelpers.generate_data', 'generate_data', ([], {'T': '(2750)', 'period': '(50)', 'n_seqs': '(4)'}), '(T=2750, period=50, n_seqs=4)\n', (772, 801), False, 'from dataHelpers import generate_data\n'), ((1148, 1422), 'forecastNet.forecastNet', 'forecastNet', ([], {'in_seq_length': 'in_seq_length', 'out_seq_length': 'out_seq_length', 'input_dim': 'input_dim', 'hidden_dim': 'hidden_dim', 'output_dim': 'output_dim', 'model_type': 'model_type', 'batch_size': 'batch_size', 'n_epochs': 'n_epochs', 'learning_rate': 'learning_rate', 'save_file': '"""./forecastnet.pt"""'}), "(in_seq_length=in_seq_length, out_seq_length=out_seq_length,\n input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim,\n model_type=model_type, batch_size=batch_size, n_epochs=n_epochs,\n learning_rate=learning_rate, save_file='./forecastnet.pt')\n", (1159, 1422), False, 'from forecastNet import forecastNet\n'), ((1523, 1596), 'train.train', 'train', (['fcstnet', 'train_x', 'train_y', 'valid_x', 'valid_y'], {'restore_session': '(False)'}), '(fcstnet, train_x, train_y, valid_x, valid_y, restore_session=False)\n', (1528, 1596), False, 'from train import train\n'), ((1624, 1636), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1634, 1636), True, 'import matplotlib.pyplot as plt\n'), ((1637, 1661), 'matplotlib.pyplot.plot', 'plt.plot', (['training_costs'], {}), '(training_costs)\n', (1645, 1661), True, 'import matplotlib.pyplot as plt\n'), ((1662, 1688), 'matplotlib.pyplot.plot', 'plt.plot', (['validation_costs'], {}), '(validation_costs)\n', (1670, 1688), True, 'import matplotlib.pyplot as plt\n'), ((1732, 1785), 'evaluate.evaluate', 'evaluate', (['fcstnet', 'test_x', 'test_y'], {'return_lists': '(False)'}), '(fcstnet, test_x, test_y, return_lists=False)\n', (1740, 1785), False, 'from evaluate import evaluate\n'), ((4105, 4115), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4113, 4115), True, 'import matplotlib.pyplot as plt\n'), ((2568, 2591), 'numpy.mean', 'np.mean', (['y_pred'], {'axis': '(3)'}), '(y_pred, axis=3)\n', (2575, 2591), True, 'import numpy as np\n'), ((2604, 2626), 'numpy.std', 'np.std', (['y_pred'], {'axis': '(3)'}), '(y_pred, axis=3)\n', (2610, 2626), True, 'import numpy as np\n'), ((2734, 2746), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2744, 2746), True, 'import matplotlib.pyplot as plt\n'), ((3357, 3369), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3367, 3369), True, 'import matplotlib.pyplot as plt\n'), ((2764, 2791), 'numpy.arange', 'np.arange', (['(0)', 'in_seq_length'], {}), '(0, in_seq_length)\n', (2773, 2791), True, 'import numpy as np\n'), ((2874, 2930), 'numpy.arange', 'np.arange', (['in_seq_length', '(in_seq_length + out_seq_length)'], {}), '(in_seq_length, in_seq_length + out_seq_length)\n', (2883, 2930), True, 'import numpy as np\n'), ((3012, 3068), 'numpy.arange', 'np.arange', (['in_seq_length', '(in_seq_length + out_seq_length)'], {}), '(in_seq_length, in_seq_length + out_seq_length)\n', (3021, 3068), True, 'import numpy as np\n'), ((3153, 3209), 'numpy.arange', 'np.arange', (['in_seq_length', '(in_seq_length + out_seq_length)'], {}), '(in_seq_length, in_seq_length + out_seq_length)\n', (3162, 3209), True, 'import numpy as np\n'), ((3602, 3614), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3612, 3614), True, 'import matplotlib.pyplot as plt\n'), ((3632, 3667), 'numpy.arange', 'np.arange', (['(0)', 'fcstnet.in_seq_length'], {}), '(0, fcstnet.in_seq_length)\n', (3641, 3667), True, 'import numpy as np\n'), ((3771, 3856), 'numpy.arange', 'np.arange', (['fcstnet.in_seq_length', '(fcstnet.in_seq_length + fcstnet.out_seq_length)'], {}), '(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length\n )\n', (3780, 3856), True, 'import numpy as np\n'), ((3936, 4021), 'numpy.arange', 'np.arange', (['fcstnet.in_seq_length', '(fcstnet.in_seq_length + fcstnet.out_seq_length)'], {}), '(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length\n )\n', (3945, 4021), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""
This script launches model training in separate processes, one for each GPU.
"""
import fire
import logging
import multiprocessing
import os
import streamlit
import sys
import tarfile
import torch
from config import parse_config
from corpus import (
LanguageCorpus, BertCorpus, LowResolutionEmbeddingCorpus,
KeepRandomPercentCorpus, DropNthTokenCorpus)
from evaluate import beam_search
from train import build_learner, train_worker, restore
logger = logging.getLogger('fr2en')
def extract(fn, output_dir):
"""
Extract a .tar, .tgz or .gz file to `output_dir`.
"""
if fn.endswith('.tar.gz') or fn.endswith('.tgz'):
with tarfile.open(fn, 'r:gz') as f:
f.extractall(output_dir)
elif fn.endswith('.tar'):
with tarfile.open(fn, 'r') as f:
f.extractall(output_dir)
class PrepareData:
"""
This class encapsulates the commands for preparing various datasets.
"""
_urls = {
'ca-parliament-house':
'http://www.isi.edu/natural-language/download/hansard/'
'hansard.36.r2001-1a.house.debates.training.tar',
'ca-parliament-senate':
'http://www.isi.edu/natural-language/download/hansard/'
'hansard.36.r2001-1a.senate.debates.training.tar',
'commoncrawl':
'http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz',
'europarl-de-en':
'http://www.statmt.org/europarl/v7/de-en.tgz',
'europarl-es-en':
'http://www.statmt.org/europarl/v7/es-en.tgz',
'europarl-it-en':
'http://www.statmt.org/europarl/v7/it-en.tgz',
'europarl-fr-en':
'http://www.statmt.org/europarl/v7/fr-en.tgz',
'europarl-sl-en':
'http://www.statmt.org/europarl/v7/sl-en.tgz',
'news2014':
'http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz',
'news2016-de-en':
'http://www.casmacat.eu/corpus/news-commentary/'
'news-commentary-v11.de-en.xliff.gz',
'news2016-fr-en':
'http://www.casmacat.eu/corpus/news-commentary/'
'news-commentary-v11.fr-en.xliff.gz',
'phrases-fr-en':
'https://expz.github.io/files/phrases.tgz',
}
_corpora = {
'ca-parliament-house-fr-en': {
'en': 'hansard.36/Release-2001.1a/sentence-pairs/house/debates/'
'development/training/hansard.36.1.house.debates',
'fr': 'hansard.36/Release-2001.1a/sentence-pairs/house/debates/'
'development/training/hansard.36.1.house.debates'
},
'ca-parliament-senate-fr-en': {
'en': 'hansard.36/Release-2001.1a/sentence-pairs/senate/debates/'
'development/training/hansard.36.1.senate.debates',
'fr': 'hansard.36/Release-2001.1a/sentence-pairs/senate/debates/'
'development/training/hansard.36.1.senate.debates'
},
'commoncrawl-fr-en': {
'en': 'commoncrawl.fr-en.en',
'fr': 'commoncrawl.fr-en.fr'
},
'europarl-de-en': {
'en': 'europarl-v7.de-en.en',
'de': 'europarl-v7.de-en.de'
},
'europarl-es-en': {
'en': 'europarl-v7.es-en.en',
'es': 'europarl-v7.es-en.es'
},
'europarl-it-en': {
'en': 'europarl-v7.it-en.en',
'it': 'europarl-v7.it-en.it'
},
'europarl-fr-en': {
'en': 'europarl-v7.fr-en.en',
'fr': 'europarl-v7.fr-en.fr'
},
'europarl-sl-en': {
'en': 'europarl-v7.sl-en.en',
'sl': 'europarl-v7.sl-en.sl'
},
'news2014-fr-en': {
'en': 'training/news-commentary-v9.fr-en.en',
'fr': 'training/news-commentary-v9.fr-en.fr'
},
'news2016-fr-en': {
'en': 'news-commentary-v11.fr-en.xliff',
'fr': 'news-commentary-v11.fr-en.xliff'
},
'phrases-fr-en': {
'en': 'phrases.en',
'fr': 'phrases.fr',
},
}
_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
_tmp_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data', 'downloads', 'tmp')
def _download(self, data, use_cache=False):
"""
Downloads the datasets listed in `data`. They should be strings which
are keys of the `self._urls` dictionary.
Datasets are downloaded to a directory common to all models. If they
already exist, they are not re-downloaded.
Extracts datasets as well.
"""
os.makedirs(self._tmp_dir, exist_ok=True)
for dataset in data:
if dataset not in self._urls:
raise ValueError(f'Unknown dataset: {dataset}')
fn = self._urls[dataset].split('/')[-1]
out_fn = os.path.join(self._data_dir, 'downloads', fn)
if not os.path.isfile(out_fn):
logger.info(f'Downloading dataset {self._urls[dataset]}.')
os.system(f'wget -4 -O {out_fn} {self._urls[dataset]}')
logger.info(f'Extracting dataset {out_fn}.')
extract(out_fn, self._tmp_dir)
elif not use_cache:
logger.info(f'Extracting dataset {out_fn}.')
extract(out_fn, self._tmp_dir)
else:
logger.info(f'Using cached dataset {out_fn}.')
def _datafiles(self, lang, data):
"""
Returns a list of dictionaries which have the dataset file paths
for each language.
"""
# TODO: Implement special handling of Canadian Parliament proceedings.
# The zip files extract to hundreds of small *.gz files.
datafiles = []
for dataset in data:
if 'ca-parliament' in dataset:
raise NotImplementedError(
'Canadian Parliament datasets are not yet supported.')
if dataset[-3:] != '-en':
dataset = f'{dataset}-{lang}-en'
datafiles.append({
'en':
os.path.join(self._tmp_dir, self._corpora[dataset]['en']),
lang:
os.path.join(self._tmp_dir, self._corpora[dataset][lang])
})
return datafiles
def list_datasets(self):
"""Lists the available datasets."""
print('\n'.join(self._urls.keys()))
def standard(self, lang='fr', name=None, data=['news2014'], max_length=200,
max_size=None, shuffle=True, joint_vocab_size=40000,
valid_size=0, use_cache=False):
"""
Creates a dataset of sequences of indices into a joint BPE vocabulary
generated by `subword-nmt`.
"""
name = name if name else f'standard-{lang}-en'
self._download(data, use_cache)
datafiles = self._datafiles(lang, data)
ds = LanguageCorpus(name, shuffle=shuffle, max_length=max_length)
ds.create(datafiles, joint_vocab_size, max_size=max_size,
valid_size=valid_size, use_cache=use_cache)
def bert(self, lang='fr', name=None, data=['news2014'], max_length=200,
max_size=None, shuffle=True, valid_size=0, use_cache=False):
"""
Creates a dataset of sequences of indices into the 100-language
multilingual, cased BERT BPE vocabulary.
"""
name = name if name else f'bert-{lang}-en'
self._download(data, use_cache)
datafiles = self._datafiles(lang, data)
ds = BertCorpus(name, shuffle=shuffle, max_length=max_length)
ds.create(datafiles, max_size, valid_size, use_cache)
def low_res_embed(self, step, size, lang='fr', name=None, data=['news2014'],
max_length=200, max_size=None, shuffle=True,
valid_size=0, use_cache=False):
"""
Creates a dataset of BERT embeddings averaged using a window of size
`size` moving `step` tokens per step.
"""
name = name if name else f'embed-{lang}-en'
self._download(data, use_cache)
datafiles = self._datafiles(lang, data)
ds = LowResolutionEmbeddingCorpus(
name, step, size, shuffle=shuffle, max_length=max_length)
ds.create(datafiles, max_size, valid_size, use_cache)
def drop_nth_token(self, n, lang='fr', name=None, data=['news2014'],
max_length=200, max_size=None, shuffle=True,
valid_size=0, use_cache=False):
"""
Creates a dataset of BERT tokens with every nth one dropped.
"""
name = name if name else f'drop-nth-{lang}-en'
self._download(data, use_cache)
datafiles = self._datafiles(lang, data)
ds = DropNthTokenCorpus(
name, n, shuffle=shuffle, max_length=max_length)
ds.create(datafiles, max_size, valid_size, use_cache)
def keep_random(self, p, lang='fr', name=None, data=['news2014'],
max_length=200, max_size=None, shuffle=True,
valid_size=0, use_cache=False):
"""
Creates a dataset of BERT tokens keeping a `p` percent at random and
dropping the rest.
"""
name = name if name else f'drop-random-{lang}-en'
self._download(data, use_cache)
datafiles = self._datafiles(lang, data)
ds = KeepRandomPercentCorpus(
name, p, shuffle=shuffle, max_length=max_length)
ds.create(datafiles, max_size, valid_size, use_cache)
class PervasiveApp(object):
"""
This is the command line app that the `fire` packages exposes
using command line arguments.
"""
def __init__(self):
self.prepare_data = PrepareData()
def train(self,
config,
device_ids=None,
lr=None,
restore=None,
batch=None,
epochs=None,
epoch_size=None,
freeze=False,
dilate=False):
"""
Train the model described in file `config` on devices `device_ids`.
"""
params, project_dir = parse_config(config, device_ids, lr, batch,
epochs, epoch_size, freeze)
# Prepare a place for the shared process communication file.
model_name = params['model_name']
pid = os.getpid()
comm_file = f'{project_dir}/model/{model_name}/pgroup_shared_{pid}'
os.makedirs(f'{project_dir}/model/{model_name}', exist_ok=True)
try:
os.remove(comm_file)
except FileNotFoundError:
pass
# Variables used for distributed processing.
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '3892'
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3,4,5,6,7'
m = multiprocessing.Manager()
nprocs = max(1, len(params['gpu_ids']))
qs = [m.Queue() for _ in range(nprocs)]
torch.multiprocessing.spawn(train_worker,
args=(project_dir, params, comm_file,
restore, dilate, qs),
nprocs=nprocs,
join=True)
def example(self, config, gpu_id=0, model=None, batch=None):
"""
Print a list of `batch` many example translations. This function
requires the `restore` argument to be most useful. `restore` should be
the path of the saved model relative to the current model's folder.
The current model is specified by the `name` field in the config.
"""
params, project_dir = parse_config(config, [gpu_id], batch_size=batch)
learn, src_vocab, tgt_vocab = build_learner(params, project_dir)
restore(learn, model)
batch, tgt = next(iter(learn.data.valid_dl))
src_data, tgt_data = \
batch.split([learn.model.Ts, learn.model.Tt], dim=1)
src_text = src_vocab.to_text(src_data)
out_data = beam_search(
learn.model, src_data, 5, params['data']['max_length'])
out_text = tgt_vocab.to_text(out_data)
for src, out in zip(src_text, out_text):
print(f'IN: {src}')
print(f'OUT: {out}')
def find_lr(self, config, gpu_id=0):
"""
Search for an optimal learning rate and print plot to Streamlit.
"""
if 'streamlit' not in sys.modules:
print('Please install streamlit or dev-requirements.txt '
'to use this feature.')
sys.exit(1)
print('\nDue to a limitation of streamlit, this command can result in '
'an error about running out of inotify watches.')
print('\n************************************************************')
print('TESTING LEARNING RATES: THIS WILL RUN FOR UNDER 100 BATCHES.')
print('************************************************************\n')
params, project_dir = parse_config(config, [gpu_id])
params['gpu_ids'] = [gpu_id] if gpu_id is not None else []
learn, _, _ = build_learner(params, project_dir)
streamlit.title('Find the best learning rate')
streamlit.header(f'Model {params["model_name"]}')
streamlit.text(
'Choose the learning rate where the graph has its steepest decline.'
)
learn.lr_find()
learn.recorder.plot(return_fig=True)
streamlit.pyplot()
def summary(self, config):
"""
Print a summary of the model architecture described by file `config`.
"""
params, project_dir = self._parse_args(config)
learn, _, _ = build_learner(params, project_dir)
print(learn.summary())
if __name__ == '__main__':
fire.Fire(PervasiveApp)
|
[
"evaluate.beam_search"
] |
[((488, 514), 'logging.getLogger', 'logging.getLogger', (['"""fr2en"""'], {}), "('fr2en')\n", (505, 514), False, 'import logging\n'), ((13935, 13958), 'fire.Fire', 'fire.Fire', (['PervasiveApp'], {}), '(PervasiveApp)\n', (13944, 13958), False, 'import fire\n'), ((4703, 4744), 'os.makedirs', 'os.makedirs', (['self._tmp_dir'], {'exist_ok': '(True)'}), '(self._tmp_dir, exist_ok=True)\n', (4714, 4744), False, 'import os\n'), ((7015, 7075), 'corpus.LanguageCorpus', 'LanguageCorpus', (['name'], {'shuffle': 'shuffle', 'max_length': 'max_length'}), '(name, shuffle=shuffle, max_length=max_length)\n', (7029, 7075), False, 'from corpus import LanguageCorpus, BertCorpus, LowResolutionEmbeddingCorpus, KeepRandomPercentCorpus, DropNthTokenCorpus\n'), ((7652, 7708), 'corpus.BertCorpus', 'BertCorpus', (['name'], {'shuffle': 'shuffle', 'max_length': 'max_length'}), '(name, shuffle=shuffle, max_length=max_length)\n', (7662, 7708), False, 'from corpus import LanguageCorpus, BertCorpus, LowResolutionEmbeddingCorpus, KeepRandomPercentCorpus, DropNthTokenCorpus\n'), ((8274, 8365), 'corpus.LowResolutionEmbeddingCorpus', 'LowResolutionEmbeddingCorpus', (['name', 'step', 'size'], {'shuffle': 'shuffle', 'max_length': 'max_length'}), '(name, step, size, shuffle=shuffle, max_length=\n max_length)\n', (8302, 8365), False, 'from corpus import LanguageCorpus, BertCorpus, LowResolutionEmbeddingCorpus, KeepRandomPercentCorpus, DropNthTokenCorpus\n'), ((8882, 8949), 'corpus.DropNthTokenCorpus', 'DropNthTokenCorpus', (['name', 'n'], {'shuffle': 'shuffle', 'max_length': 'max_length'}), '(name, n, shuffle=shuffle, max_length=max_length)\n', (8900, 8949), False, 'from corpus import LanguageCorpus, BertCorpus, LowResolutionEmbeddingCorpus, KeepRandomPercentCorpus, DropNthTokenCorpus\n'), ((9500, 9572), 'corpus.KeepRandomPercentCorpus', 'KeepRandomPercentCorpus', (['name', 'p'], {'shuffle': 'shuffle', 'max_length': 'max_length'}), '(name, p, shuffle=shuffle, max_length=max_length)\n', (9523, 9572), False, 'from corpus import LanguageCorpus, BertCorpus, LowResolutionEmbeddingCorpus, KeepRandomPercentCorpus, DropNthTokenCorpus\n'), ((10257, 10328), 'config.parse_config', 'parse_config', (['config', 'device_ids', 'lr', 'batch', 'epochs', 'epoch_size', 'freeze'], {}), '(config, device_ids, lr, batch, epochs, epoch_size, freeze)\n', (10269, 10328), False, 'from config import parse_config\n'), ((10498, 10509), 'os.getpid', 'os.getpid', ([], {}), '()\n', (10507, 10509), False, 'import os\n'), ((10594, 10657), 'os.makedirs', 'os.makedirs', (['f"""{project_dir}/model/{model_name}"""'], {'exist_ok': '(True)'}), "(f'{project_dir}/model/{model_name}', exist_ok=True)\n", (10605, 10657), False, 'import os\n'), ((10975, 11000), 'multiprocessing.Manager', 'multiprocessing.Manager', ([], {}), '()\n', (10998, 11000), False, 'import multiprocessing\n'), ((11105, 11236), 'torch.multiprocessing.spawn', 'torch.multiprocessing.spawn', (['train_worker'], {'args': '(project_dir, params, comm_file, restore, dilate, qs)', 'nprocs': 'nprocs', 'join': '(True)'}), '(train_worker, args=(project_dir, params,\n comm_file, restore, dilate, qs), nprocs=nprocs, join=True)\n', (11132, 11236), False, 'import torch\n'), ((11805, 11853), 'config.parse_config', 'parse_config', (['config', '[gpu_id]'], {'batch_size': 'batch'}), '(config, [gpu_id], batch_size=batch)\n', (11817, 11853), False, 'from config import parse_config\n'), ((11892, 11926), 'train.build_learner', 'build_learner', (['params', 'project_dir'], {}), '(params, project_dir)\n', (11905, 11926), False, 'from train import build_learner, train_worker, restore\n'), ((11935, 11956), 'train.restore', 'restore', (['learn', 'model'], {}), '(learn, model)\n', (11942, 11956), False, 'from train import build_learner, train_worker, restore\n'), ((12172, 12239), 'evaluate.beam_search', 'beam_search', (['learn.model', 'src_data', '(5)', "params['data']['max_length']"], {}), "(learn.model, src_data, 5, params['data']['max_length'])\n", (12183, 12239), False, 'from evaluate import beam_search\n'), ((13144, 13174), 'config.parse_config', 'parse_config', (['config', '[gpu_id]'], {}), '(config, [gpu_id])\n', (13156, 13174), False, 'from config import parse_config\n'), ((13264, 13298), 'train.build_learner', 'build_learner', (['params', 'project_dir'], {}), '(params, project_dir)\n', (13277, 13298), False, 'from train import build_learner, train_worker, restore\n'), ((13308, 13354), 'streamlit.title', 'streamlit.title', (['"""Find the best learning rate"""'], {}), "('Find the best learning rate')\n", (13323, 13354), False, 'import streamlit\n'), ((13363, 13412), 'streamlit.header', 'streamlit.header', (['f"""Model {params[\'model_name\']}"""'], {}), '(f"Model {params[\'model_name\']}")\n', (13379, 13412), False, 'import streamlit\n'), ((13421, 13510), 'streamlit.text', 'streamlit.text', (['"""Choose the learning rate where the graph has its steepest decline."""'], {}), "(\n 'Choose the learning rate where the graph has its steepest decline.')\n", (13435, 13510), False, 'import streamlit\n'), ((13606, 13624), 'streamlit.pyplot', 'streamlit.pyplot', ([], {}), '()\n', (13622, 13624), False, 'import streamlit\n'), ((13836, 13870), 'train.build_learner', 'build_learner', (['params', 'project_dir'], {}), '(params, project_dir)\n', (13849, 13870), False, 'from train import build_learner, train_worker, restore\n'), ((683, 707), 'tarfile.open', 'tarfile.open', (['fn', '"""r:gz"""'], {}), "(fn, 'r:gz')\n", (695, 707), False, 'import tarfile\n'), ((4177, 4202), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4192, 4202), False, 'import os\n'), ((4266, 4291), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4281, 4291), False, 'import os\n'), ((4953, 4998), 'os.path.join', 'os.path.join', (['self._data_dir', '"""downloads"""', 'fn'], {}), "(self._data_dir, 'downloads', fn)\n", (4965, 4998), False, 'import os\n'), ((10683, 10703), 'os.remove', 'os.remove', (['comm_file'], {}), '(comm_file)\n', (10692, 10703), False, 'import os\n'), ((12720, 12731), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (12728, 12731), False, 'import sys\n'), ((794, 815), 'tarfile.open', 'tarfile.open', (['fn', '"""r"""'], {}), "(fn, 'r')\n", (806, 815), False, 'import tarfile\n'), ((5018, 5040), 'os.path.isfile', 'os.path.isfile', (['out_fn'], {}), '(out_fn)\n', (5032, 5040), False, 'import os\n'), ((5133, 5188), 'os.system', 'os.system', (['f"""wget -4 -O {out_fn} {self._urls[dataset]}"""'], {}), "(f'wget -4 -O {out_fn} {self._urls[dataset]}')\n", (5142, 5188), False, 'import os\n'), ((6204, 6261), 'os.path.join', 'os.path.join', (['self._tmp_dir', "self._corpora[dataset]['en']"], {}), "(self._tmp_dir, self._corpora[dataset]['en'])\n", (6216, 6261), False, 'import os\n'), ((6305, 6362), 'os.path.join', 'os.path.join', (['self._tmp_dir', 'self._corpora[dataset][lang]'], {}), '(self._tmp_dir, self._corpora[dataset][lang])\n', (6317, 6362), False, 'import os\n')]
|
import itertools
import json
import os
from datetime import date, datetime
from queue import Queue
from typing import List
import numpy as np
from qiskit import execute
from qiskit.providers.aer import Aer, AerJob
import config.load_config as cfg
import ibmq_account
import logger
from evaluate.circuit_gen import circ_gen
from evaluate.util import dict_to_array, sv_to_probability
from execution_handler.execution_handler import ExecutionHandler
from partitioner.partition_result_processing import (ResultProcessing,
ResultWriter)
from partitioner.partitioner import Partitioner
from quantum_execution_job import QuantumExecutionJob
from resource_mapping.backend_chooser import Backend_Data
from resource_mapping.result_analyzer import ResultAnalyzer
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
if isinstance(obj, complex):
return str(obj)
raise TypeError("Type %s not serializable" % type(obj))
def get_all_permutations(input_list):
return list(itertools.chain(*itertools.permutations(input_list)))
def write_file(dir_path, backend, results, part_results, sv_res_prob: List[np.ndarray], n_qubits: int, circuits, circuit_type, permute, shots):
res_prob = [dict_to_array(r, n_qubits) for r in results]
part_res_prob = [dict_to_array(r, n_qubits) for r in part_results]
data = []
n_circuits = len(circuits)
for i in range(n_circuits):
data.append({"circuit": circuits[i].qasm(), "sv-result": sv_res_prob[i].tolist(
), "result": res_prob[i].tolist(), "part-result": part_res_prob[i].tolist()})
backend_dict = {"name": backend.name()}
if backend.configuration() != None:
backend_dict["config"] = backend.configuration().to_dict()
if backend.status() != None:
backend_dict["status"] = backend.status().to_dict()
if backend.properties() != None:
backend_dict["properties"] = backend.properties().to_dict()
now = datetime.now()
now_str = now.strftime('%Y-%m-%d-%H-%M-%S')
with open(f'{dir_path}/{backend.name()}.json', 'w') as f:
json.dump({"date": now_str, "circuit_type": circuit_type, "n_circuits": n_circuits, "n_qubits": n_qubits,
"permute": permute, "shots": shots, "backend": backend_dict, "data": data}, f, indent=4, default=json_serial)
log.info("Wrote results to file.")
if __name__ == "__main__":
"""
Configure the evaluation here:
"""
# backend_names = ['ibmq_qasm_simulator' , 'ibmq_athens', 'ibmq_santiago', 'ibmq_belem']
# backend_names = ['ibmq_qasm_simulator' , 'ibmq_athens', 'ibmq_santiago', 'ibmq_quito', 'ibmq_lima', 'ibmq_belem']
backend_names = ['ibmq_qasm_simulator']
shots = 8192
n_circuits = 1
n_qubits = 5
subcircuit_max_qubits = 3
circuit_type = "adder"
permute = False
"""
Configuration End
"""
config = cfg.load_or_create()
logger.set_log_level_from_config(config)
provider = ibmq_account.get_provider(config)
log = logger.get_logger("Evaluate")
now = datetime.now()
now_str = now.strftime('%Y-%m-%d-%H-%M-%S')
dir_path = f"part_data/{circuit_type}_{n_qubits}_{subcircuit_max_qubits}_{now_str}"
os.makedirs(dir_path)
log.info(f"Created directory {dir_path}")
circuits, n_circuits = circ_gen(circuit_type, n_qubits, n_circuits)
log.info(f"Generated {n_circuits} circuits")
print(circuits[0])
statevector_backend = Aer.get_backend('statevector_simulator')
sv_job: AerJob = execute(circuits, statevector_backend)
sv_res = sv_job.result()
sv_results = [sv_res.get_statevector(circ) for circ in circuits]
sv_res_prob = [sv_to_probability(sv) for sv in sv_results]
log.info("Executed the circuits with local statevector simulator")
if permute:
circuits = get_all_permutations(circuits)
sv_res_prob = get_all_permutations(sv_res_prob)
n_circuits = len(circuits)
log.info(
f"Generated all permutations. Now there are {n_circuits} circuits")
backend_data_list = []
backends = {}
for backend_name in backend_names:
backend = provider.get_backend(backend_name)
backend_data = Backend_Data(backend)
backend_data_list.append(backend_data)
backends[backend_name] = {
"backend": backend, "backend_data": backend_data}
input_pipeline = Queue()
input_exec = Queue()
output_exec = Queue()
part_results = Queue()
all_results_are_available = Queue()
output_pipline = Queue()
errors = Queue()
for backend_data in backend_data_list:
for circ in circuits:
input_pipeline.put(QuantumExecutionJob(circuit=circ.measure_all(inplace=False), shots=shots, backend_data=backend_data, config={
"partitioner": {"subcircuit_max_qubits": subcircuit_max_qubits}}))
input_exec.put(QuantumExecutionJob(circuit=circ.measure_all(
inplace=False), shots=shots, backend_data=backend_data))
partition_dict = {}
partitioner = Partitioner(input=input_pipeline, output=input_exec,
partition_dict=partition_dict, error_queue=errors, **config["partitioner"])
partitioner.start()
exec_handler = ExecutionHandler(
provider, input=input_exec, output=output_exec)
exec_handler.start()
result_analyzer = ResultAnalyzer(
input=output_exec, output=output_pipline, output_agg=None, output_part=part_results)
result_analyzer.start()
partition_result_writer = ResultWriter(
input=part_results, completed_jobs=all_results_are_available, partition_dict=partition_dict)
partition_result_writer.start()
partition_result_processor = ResultProcessing(
input=all_results_are_available, output=output_pipline, partition_dict=partition_dict)
partition_result_processor.start()
log.info("Started the partition pipeline")
results = {}
part_results = {}
n_results = 2*n_circuits*len(backend_names)
for backend_name in backend_names:
results[backend_name] = []
part_results[backend_name] = []
i = 0
while i < n_results:
job = output_pipline.get()
i += 1
r = job.result_prob
backend_name = job.backend_data.name
log.debug(
f"{i}: Got job {job.id},type {job.type}, from backend {backend_name}")
if len(results[backend_name]) < n_circuits:
results[backend_name].append(r)
else:
part_results[backend_name].append(r)
if len(results[backend_name]) == n_circuits and len(part_results[backend_name]) == 0:
log.info(
f"All results for not partitioned circuits are available for backend {backend_name}")
elif len(part_results[backend_name]) == n_circuits:
log.info(
f"All results for partitioned circuits are available for backend {backend_name}")
write_file(dir_path, backends[backend_name]["backend"], results.pop(backend_name), part_results.pop(
backend_name), sv_res_prob, n_qubits, circuits, circuit_type, permute, shots)
|
[
"evaluate.util.dict_to_array",
"evaluate.util.sv_to_probability",
"evaluate.circuit_gen.circ_gen"
] |
[((2101, 2115), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2113, 2115), False, 'from datetime import date, datetime\n'), ((3030, 3050), 'config.load_config.load_or_create', 'cfg.load_or_create', ([], {}), '()\n', (3048, 3050), True, 'import config.load_config as cfg\n'), ((3055, 3095), 'logger.set_log_level_from_config', 'logger.set_log_level_from_config', (['config'], {}), '(config)\n', (3087, 3095), False, 'import logger\n'), ((3111, 3144), 'ibmq_account.get_provider', 'ibmq_account.get_provider', (['config'], {}), '(config)\n', (3136, 3144), False, 'import ibmq_account\n'), ((3156, 3185), 'logger.get_logger', 'logger.get_logger', (['"""Evaluate"""'], {}), "('Evaluate')\n", (3173, 3185), False, 'import logger\n'), ((3198, 3212), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3210, 3212), False, 'from datetime import date, datetime\n'), ((3354, 3375), 'os.makedirs', 'os.makedirs', (['dir_path'], {}), '(dir_path)\n', (3365, 3375), False, 'import os\n'), ((3451, 3495), 'evaluate.circuit_gen.circ_gen', 'circ_gen', (['circuit_type', 'n_qubits', 'n_circuits'], {}), '(circuit_type, n_qubits, n_circuits)\n', (3459, 3495), False, 'from evaluate.circuit_gen import circ_gen\n'), ((3597, 3637), 'qiskit.providers.aer.Aer.get_backend', 'Aer.get_backend', (['"""statevector_simulator"""'], {}), "('statevector_simulator')\n", (3612, 3637), False, 'from qiskit.providers.aer import Aer, AerJob\n'), ((3660, 3698), 'qiskit.execute', 'execute', (['circuits', 'statevector_backend'], {}), '(circuits, statevector_backend)\n', (3667, 3698), False, 'from qiskit import execute\n'), ((4536, 4543), 'queue.Queue', 'Queue', ([], {}), '()\n', (4541, 4543), False, 'from queue import Queue\n'), ((4561, 4568), 'queue.Queue', 'Queue', ([], {}), '()\n', (4566, 4568), False, 'from queue import Queue\n'), ((4587, 4594), 'queue.Queue', 'Queue', ([], {}), '()\n', (4592, 4594), False, 'from queue import Queue\n'), ((4614, 4621), 'queue.Queue', 'Queue', ([], {}), '()\n', (4619, 4621), False, 'from queue import Queue\n'), ((4654, 4661), 'queue.Queue', 'Queue', ([], {}), '()\n', (4659, 4661), False, 'from queue import Queue\n'), ((4683, 4690), 'queue.Queue', 'Queue', ([], {}), '()\n', (4688, 4690), False, 'from queue import Queue\n'), ((4704, 4711), 'queue.Queue', 'Queue', ([], {}), '()\n', (4709, 4711), False, 'from queue import Queue\n'), ((5215, 5348), 'partitioner.partitioner.Partitioner', 'Partitioner', ([], {'input': 'input_pipeline', 'output': 'input_exec', 'partition_dict': 'partition_dict', 'error_queue': 'errors'}), "(input=input_pipeline, output=input_exec, partition_dict=\n partition_dict, error_queue=errors, **config['partitioner'])\n", (5226, 5348), False, 'from partitioner.partitioner import Partitioner\n'), ((5418, 5482), 'execution_handler.execution_handler.ExecutionHandler', 'ExecutionHandler', (['provider'], {'input': 'input_exec', 'output': 'output_exec'}), '(provider, input=input_exec, output=output_exec)\n', (5434, 5482), False, 'from execution_handler.execution_handler import ExecutionHandler\n'), ((5540, 5643), 'resource_mapping.result_analyzer.ResultAnalyzer', 'ResultAnalyzer', ([], {'input': 'output_exec', 'output': 'output_pipline', 'output_agg': 'None', 'output_part': 'part_results'}), '(input=output_exec, output=output_pipline, output_agg=None,\n output_part=part_results)\n', (5554, 5643), False, 'from resource_mapping.result_analyzer import ResultAnalyzer\n'), ((5708, 5817), 'partitioner.partition_result_processing.ResultWriter', 'ResultWriter', ([], {'input': 'part_results', 'completed_jobs': 'all_results_are_available', 'partition_dict': 'partition_dict'}), '(input=part_results, completed_jobs=all_results_are_available,\n partition_dict=partition_dict)\n', (5720, 5817), False, 'from partitioner.partition_result_processing import ResultProcessing, ResultWriter\n'), ((5892, 5999), 'partitioner.partition_result_processing.ResultProcessing', 'ResultProcessing', ([], {'input': 'all_results_are_available', 'output': 'output_pipline', 'partition_dict': 'partition_dict'}), '(input=all_results_are_available, output=output_pipline,\n partition_dict=partition_dict)\n', (5908, 5999), False, 'from partitioner.partition_result_processing import ResultProcessing, ResultWriter\n'), ((1370, 1396), 'evaluate.util.dict_to_array', 'dict_to_array', (['r', 'n_qubits'], {}), '(r, n_qubits)\n', (1383, 1396), False, 'from evaluate.util import dict_to_array, sv_to_probability\n'), ((1436, 1462), 'evaluate.util.dict_to_array', 'dict_to_array', (['r', 'n_qubits'], {}), '(r, n_qubits)\n', (1449, 1462), False, 'from evaluate.util import dict_to_array, sv_to_probability\n'), ((2234, 2457), 'json.dump', 'json.dump', (["{'date': now_str, 'circuit_type': circuit_type, 'n_circuits': n_circuits,\n 'n_qubits': n_qubits, 'permute': permute, 'shots': shots, 'backend':\n backend_dict, 'data': data}", 'f'], {'indent': '(4)', 'default': 'json_serial'}), "({'date': now_str, 'circuit_type': circuit_type, 'n_circuits':\n n_circuits, 'n_qubits': n_qubits, 'permute': permute, 'shots': shots,\n 'backend': backend_dict, 'data': data}, f, indent=4, default=json_serial)\n", (2243, 2457), False, 'import json\n'), ((3816, 3837), 'evaluate.util.sv_to_probability', 'sv_to_probability', (['sv'], {}), '(sv)\n', (3833, 3837), False, 'from evaluate.util import dict_to_array, sv_to_probability\n'), ((4348, 4369), 'resource_mapping.backend_chooser.Backend_Data', 'Backend_Data', (['backend'], {}), '(backend)\n', (4360, 4369), False, 'from resource_mapping.backend_chooser import Backend_Data\n'), ((1171, 1205), 'itertools.permutations', 'itertools.permutations', (['input_list'], {}), '(input_list)\n', (1193, 1205), False, 'import itertools\n')]
|
from __future__ import print_function
import torch
import os.path as osp
import time
import datetime
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.optim import lr_scheduler
from torch.distributions import Bernoulli
from train_utils import save_model_epoch
from rewards import compute_reward_det_coff
import numpy as np
from evaluate import evaluate
def train(args, model, dataset):
lr = args.lr
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=args.weight_decay)
if args.stepsize > 0:
scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma)
if args.resume:
print("Loading checkpoint from '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint)
else:
start_epoch = 0
use_gpu = torch.cuda.is_available()
if use_gpu:
print("Currently using GPU {}".format(args.gpu))
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
model = nn.DataParallel(model).cuda()
else:
print("Currently using CPU")
print("=====> Start training <===== ")
start_time = time.time()
model.train()
train_keys = args.train_keys
baselines = {key: 0. for key in train_keys} # baseline rewards for videos
reward_writers = {key: [] for key in train_keys} # record reward changes for each video
for epoch in range(start_epoch, args.max_epoch):
idxs = np.arange(len(train_keys))
np.random.shuffle(idxs) # shuffle indices
for idx in idxs:
key = train_keys[idx]
video_info = dataset[key][u'video_1']
seq = video_info['features'][...] # sequence of features, (seq_len, dim)
seq = torch.from_numpy(seq).unsqueeze(0) # input shape (1, seq_len, dim)
if use_gpu:
seq = seq.cuda()
sig_probs = model(seq)
reg_loss = args.beta * (sig_probs.mean() - 0.5) ** 2
m = Bernoulli(sig_probs)
epis_rewards = []
if args.train_model == 'sup':
# loading label for supervised training
gtscore = np.loadtxt(osp.join(args.gtpath, 'gtscore_' + key + '_5fps.txt'))
positions = video_info['picks'][...]
label = gtscore[positions]
sum_loss = sum_loss_MSE(sig_probs, label)
else: # unsupervised mode
sum_loss = 0
cost = reg_loss + sum_loss
for episode in range(args.num_episode):
actions = m.sample()
log_probs = m.log_prob(actions)
if args.reward_type == 'Rdet' or args.reward_type == 'Rall' or args.reward_type == 'RrepRdet'\
or args.reward_type == 'RdivRdet':
det_scores = dataset[key][u'video_1']['det_scores'][...]
det_class = dataset[key][u'video_1']['det_class'][...]
if args.reward_type == 'Rdet': # include the detection reward
div_coff= 0.0
rep_coff = 0.0
det_coff = 1.0
elif args.reward_type == 'RrepRdet':
div_coff = 0.0
rep_coff= 1.0
det_coff = 1.0
elif args.reward_type == 'RdivRdet':
div_coff= 1.0
det_coff = 1.0
rep_coff = 0.0
elif args.reward_type == 'Rall':
rep_coff= 1.0
det_coff= 1.0
div_coff = 1.0
reward = compute_reward_det_coff(seq, actions, det_scores, det_class, episode,
use_gpu=use_gpu, div_coff=div_coff, rep_coff=rep_coff, det_coff=det_coff)
expected_reward = log_probs.mean() * (reward - baselines[key])
cost = cost - 10 * expected_reward # minimize negative expected reward
epis_rewards.append(reward.item())
baselines[key] = 0.9 * baselines[key] + 0.1 * np.mean(epis_rewards) # update baseline reward via moving average
reward_writers[key].append(np.mean(epis_rewards))
optimizer.zero_grad()
cost.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)
optimizer.step()
epoch_reward = np.mean([reward_writers[key][epoch] for key in train_keys])
if (epoch + 1) % 5 == 0:
if args.train_model == 'sup':
print("epoch {}/{}\t reward {}\t sum_loss {}\t reg_loss{} \t cost {}\t".format(epoch + 1,
args.max_epoch, epoch_reward, sum_loss, reg_loss, cost))
else:
print("epoch {}/{}\t reward {}\t reg_loss {} \t cost {}\t".format(epoch + 1,
args.max_epoch, epoch_reward, reg_loss, cost))
if (epoch + 1) % 50 == 0:
Fscore, Precision, Recall = evaluate(args, model, dataset, args.demo_h5, epoch)
print("epoch:{:0>3d}\t F45:{:.2%}\t P45:{:.2%}\t R45:{:.2%}\t ".format(epoch + 1,
Fscore[3], Precision[3], Recall[3]))
# save_model_epoch(args, model, epoch, Fscore[0]*100)
if epoch + 1 == args.max_epoch or epoch + 1 == 50 or epoch + 1 == 100:
save_model_epoch(args, model, epoch, Fscore[0]*100)
model.train()
scheduler.step()
elapsed = round(time.time() - start_time)
elapsed = str(datetime.timedelta(seconds=elapsed))
print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
return model
def sum_loss_MSE(pred_score, gt_labels):
gt_labels = gt_labels.reshape(-1)
gt_labels = torch.tensor(gt_labels, dtype=torch.float32)
gt_labels = gt_labels.cuda()
if pred_score.dim() > 1:
pred_score = pred_score.squeeze(0).squeeze(1)
criterion = nn.MSELoss()
loss = criterion(pred_score, gt_labels)
return loss
|
[
"evaluate.evaluate"
] |
[((861, 886), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (884, 886), False, 'import torch\n'), ((1191, 1202), 'time.time', 'time.time', ([], {}), '()\n', (1200, 1202), False, 'import time\n'), ((5936, 5980), 'torch.tensor', 'torch.tensor', (['gt_labels'], {'dtype': 'torch.float32'}), '(gt_labels, dtype=torch.float32)\n', (5948, 5980), False, 'import torch\n'), ((6114, 6126), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (6124, 6126), True, 'import torch.nn as nn\n'), ((564, 637), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizer'], {'step_size': 'args.stepsize', 'gamma': 'args.gamma'}), '(optimizer, step_size=args.stepsize, gamma=args.gamma)\n', (583, 637), False, 'from torch.optim import lr_scheduler\n'), ((746, 769), 'torch.load', 'torch.load', (['args.resume'], {}), '(args.resume)\n', (756, 769), False, 'import torch\n'), ((999, 1036), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (1025, 1036), False, 'import torch\n'), ((1529, 1552), 'numpy.random.shuffle', 'np.random.shuffle', (['idxs'], {}), '(idxs)\n', (1546, 1552), True, 'import numpy as np\n'), ((4517, 4576), 'numpy.mean', 'np.mean', (['[reward_writers[key][epoch] for key in train_keys]'], {}), '([reward_writers[key][epoch] for key in train_keys])\n', (4524, 4576), True, 'import numpy as np\n'), ((5716, 5751), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'elapsed'}), '(seconds=elapsed)\n', (5734, 5751), False, 'import datetime\n'), ((2028, 2048), 'torch.distributions.Bernoulli', 'Bernoulli', (['sig_probs'], {}), '(sig_probs)\n', (2037, 2048), False, 'from torch.distributions import Bernoulli\n'), ((5152, 5203), 'evaluate.evaluate', 'evaluate', (['args', 'model', 'dataset', 'args.demo_h5', 'epoch'], {}), '(args, model, dataset, args.demo_h5, epoch)\n', (5160, 5203), False, 'from evaluate import evaluate\n'), ((5552, 5605), 'train_utils.save_model_epoch', 'save_model_epoch', (['args', 'model', 'epoch', '(Fscore[0] * 100)'], {}), '(args, model, epoch, Fscore[0] * 100)\n', (5568, 5605), False, 'from train_utils import save_model_epoch\n'), ((5672, 5683), 'time.time', 'time.time', ([], {}), '()\n', (5681, 5683), False, 'import time\n'), ((1053, 1075), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (1068, 1075), True, 'import torch.nn as nn\n'), ((4310, 4331), 'numpy.mean', 'np.mean', (['epis_rewards'], {}), '(epis_rewards)\n', (4317, 4331), True, 'import numpy as np\n'), ((1785, 1806), 'torch.from_numpy', 'torch.from_numpy', (['seq'], {}), '(seq)\n', (1801, 1806), False, 'import torch\n'), ((2215, 2268), 'os.path.join', 'osp.join', (['args.gtpath', "('gtscore_' + key + '_5fps.txt')"], {}), "(args.gtpath, 'gtscore_' + key + '_5fps.txt')\n", (2223, 2268), True, 'import os.path as osp\n'), ((3739, 3886), 'rewards.compute_reward_det_coff', 'compute_reward_det_coff', (['seq', 'actions', 'det_scores', 'det_class', 'episode'], {'use_gpu': 'use_gpu', 'div_coff': 'div_coff', 'rep_coff': 'rep_coff', 'det_coff': 'det_coff'}), '(seq, actions, det_scores, det_class, episode,\n use_gpu=use_gpu, div_coff=div_coff, rep_coff=rep_coff, det_coff=det_coff)\n', (3762, 3886), False, 'from rewards import compute_reward_det_coff\n'), ((4205, 4226), 'numpy.mean', 'np.mean', (['epis_rewards'], {}), '(epis_rewards)\n', (4212, 4226), True, 'import numpy as np\n')]
|
import json
import logging
import os
import shutil
import numpy as np
import pandas as pd
import logging
import click
import torch
from torch import nn
from torch.nn import functional as F
import torch.optim as optim
from torch.nn import CrossEntropyLoss, MSELoss
from torch.nn.utils.rnn import pack_padded_sequence
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from tqdm import tqdm, trange
from pytorch_transformers import *
from train import train_model
from evaluate import evaluate_model
from utils import accuracy_recall_precision_f1, save_checkpoint, load_checkpoint
from data_loader import get_data
#from data_loader import get_data_bert
import models
import warnings
warnings.filterwarnings('ignore')
#Sacred
#Sources
#https://github.com/gereleth/kaggle-telstra/blob/master/Automatic%20model%20tuning%20with%20Sacred%20and%20Hyperopt.ipynb
#https://github.com/maartjeth/sacred-example-pytorch
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.observers import MongoObserver
from sacred.observers import SlackObserver
from sacred.utils import apply_backspaces_and_linefeeds
EXPERIMENT_NAME = 'experiment'
DATABASE_NAME = 'experiments'
URL_NAME = 'mongodb://localhost:27017/'
ex = Experiment()
#ex.observers.append(MongoObserver.create(url=URL_NAME, db_name=DATABASE_NAME))
ex.captured_out_filter = apply_backspaces_and_linefeeds
#Send a message to slack if the run is succesfull or if it failed
slack_obs = SlackObserver.from_config('slack.json')
ex.observers.append(slack_obs)
#Device
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
def log_scalars(results, name_dataset):
"""Log scalars of the results for MongoDB and Omniboard
Args:
results: Results with the loss, accuracy, recall, precision and f1-score
name_dataset: The name of the dataset so it can store the scalers by name
"""
ex.log_scalar(name_dataset+'.loss', float(results['loss']))
ex.log_scalar(name_dataset+'.accuracy', float(results['accuracy']))
ex.log_scalar(name_dataset+'.recall.OFF', float(results['recall'][0]))
ex.log_scalar(name_dataset+'.recall.NOT', float(results['recall'][1]))
ex.log_scalar(name_dataset+'.precision.OFF', float(results['precision'][0]))
ex.log_scalar(name_dataset+'.precision.NOT', float(results['precision'][1]))
ex.log_scalar(name_dataset+'.f1.OFF', float(results['f1'][0]))
ex.log_scalar(name_dataset+'.f1.NOT', float(results['f1'][1]))
@ex.capture
def train_and_evaluate(num_epochs, model, optimizer, loss_fn, train_dataloader, val_dataloader, early_stopping_criteria, directory, use_bert, use_mongo):
"""Train on training set and evaluate on evaluation set
Args:
num_epochs: Number of epochs to run the training and evaluation
model: Model
optimizer: Optimizer
loss_fn: Loss function
dataloader: Dataloader for the training set
val_dataloader: Dataloader for the validation set
scheduler: Scheduler
directory: Directory path name to story the logging files
Returns train and evaluation metrics with epoch, loss, accuracy, recall, precision and f1-score
"""
train_metrics = pd.DataFrame(columns=['epoch', 'loss', 'accuracy', 'recall', 'precision', 'f1'])
val_metrics = pd.DataFrame(columns=['epoch', 'loss', 'accuracy', 'recall', 'precision', 'f1'])
best_val_loss = float("inf")
early_stop_step = 0
for epoch in trange(num_epochs, desc="Epoch"):
### TRAINING ###
train_results = train_model(model, optimizer, loss_fn, train_dataloader, device, use_bert)
train_metrics.loc[len(train_metrics)] = {'epoch':epoch, 'loss':train_results['loss'], 'accuracy':train_results['accuracy'], 'recall':train_results['recall'], 'precision':train_results['precision'], 'f1':train_results['f1']}
if use_mongo: log_scalars(train_results, "Train")
### EVALUATION ###
val_results = evaluate_model(model, optimizer, loss_fn, val_dataloader, device, use_bert)
val_metrics.loc[len(val_metrics)] = {'epoch':epoch, 'loss':val_results['loss'], 'accuracy':val_results['accuracy'], 'recall':val_results['recall'], 'precision':val_results['precision'], 'f1':val_results['f1']}
if use_mongo: log_scalars(val_results, "Validation")
#Save best and latest state
best_model = val_results['loss'] < best_val_loss
#last_model = epoch == num_epochs-1
if best_model:
save_checkpoint({'epoch': epoch+1,
'state_dict': model.state_dict(),
'optim_dict': optimizer.state_dict()},
directory=directory,
checkpoint='best_model.pth.tar')
#Early stopping
if val_results['loss'] >= best_val_loss:
early_stop_step += 1
print("Early stop step:", early_stop_step)
else:
best_val_loss = val_results['loss']
early_stop_step = 0
stop_early = early_stop_step >= early_stopping_criteria
if stop_early:
print("Stopping early at epoch {}".format(epoch))
return train_metrics, val_metrics
print('\n')
print('Train Loss: {} | Train Acc: {}'.format(train_results['loss'], train_results['accuracy']))
print('Valid Loss: {} | Valid Acc: {}'.format(val_results['loss'], val_results['accuracy']))
print('Train recall: {} | Train precision: {} | Train f1: {}'.format(train_results['recall'], train_results['precision'], train_results['f1']))
print('Valid recall: {} | Valid precision: {} | Valid f1 {}'.format(val_results['recall'], val_results['precision'], val_results['f1']))
return train_metrics, val_metrics
#embedding_file = 'data/GloVe/glove.twitter.27B.200d.txt'
#embedding_file = 'data/Word2Vec/GoogleNews-vectors-negative300.bin'
@ex.config
def config():
"""Configuration"""
output_dim = 2 #Number of labels (default=2)
batch_size = 64 #Batch size (default=32)
num_epochs = 50 #Number of epochs (default=100)
max_seq_length = 45 #Maximum sequence length of the sentences (default=40)
learning_rate = 3e-3 #Learning rate for the model (default=3e-5)
warmup_proportion = 0.1 #Warmup proportion (default=0.1)
early_stopping_criteria = 10 #Early stopping criteria (default=5)
num_layers = 2 #Number of layers (default=2)
hidden_dim = 128 #Hidden layers dimension (default=128)
bidirectional = False #Left and right LSTM
dropout = 0.5 #Dropout percentage
filter_sizes = [2, 3, 4] #CNN
embedding_file = 'data/GloVe/glove.twitter.27B.200d.txt'
model_name = "MLP_Features" #Model name: LSTM, BERT, MLP, CNN
use_mongo = False
subtask = "a" #Subtask name: a, b or c
use_features = True
#ex.observers.append(MongoObserver.create(url=URL_NAME, db_name=DATABASE_NAME))
if model_name == "MLP":
ex.observers.append(FileStorageObserver.create('results-mlp'))
elif model_name == "LSTM":
ex.observers.append(FileStorageObserver.create('results-lstm'))
elif model_name == "LSTMAttention":
ex.observers.append(FileStorageObserver.create('results-lstmattention'))
elif model_name == "CNN":
ex.observers.append(FileStorageObserver.create('results-cnn'))
elif "BERT" in model_name:
#use_bert = True
ex.observers.append(FileStorageObserver.create('results-bert'))
@ex.automain
def main(output_dim,
batch_size,
num_epochs,
max_seq_length,
learning_rate,
warmup_proportion,
early_stopping_criteria,
num_layers,
hidden_dim,
bidirectional,
dropout,
filter_sizes,
embedding_file,
model_name,
use_mongo,
subtask,
use_features,
_run):
#Logger
#directory_checkpoints = f"results/checkpoints/{_run._id}/"
#directory = f"results/{_run._id}/"
id_nummer = f'{_run._id}'
if "BERT" in model_name: #Default = False, if BERT model is used then use_bert is set to True
use_bert = True
directory = f"results-bert/{_run._id}/"
directory_checkpoints = f"results-bert/checkpoints/{_run._id}/"
else:
use_bert = False
directory = f"results-"+model_name.lower()+"/"+id_nummer+"/"
directory_checkpoints = f"results-"+model_name.lower()+"/checkpoints"+"/"+id_nummer+"/"
#Data
if use_bert:
train_dataloader, val_dataloader, test_dataloader = get_data_bert(int(max_seq_length), batch_size, subtask)
else:
embedding_dim, vocab_size, embedding_matrix, train_dataloader, val_dataloader, test_dataloader = get_data(int(max_seq_length), embedding_file, batch_size, use_features, subtask)
#Model
if model_name=="MLP":
model = models.MLP(embedding_matrix, embedding_dim, vocab_size, int(hidden_dim), dropout, output_dim)
if model_name=="MLP_Features":
model = models.MLP_Features(embedding_matrix, embedding_dim, vocab_size, int(hidden_dim), dropout, output_dim)
elif model_name=="CNN":
model = models.CNN(embedding_matrix, embedding_dim, vocab_size, dropout, filter_sizes, output_dim)
elif model_name=="LSTM":
model = models.LSTM(embedding_matrix, embedding_dim, vocab_size, int(hidden_dim), dropout, int(num_layers), bidirectional, output_dim)
elif model_name=="LSTMAttention":
model = models.LSTMAttention(embedding_matrix, embedding_dim, vocab_size, int(hidden_dim), dropout, int(num_layers), bidirectional, output_dim)
# elif model_name=="BERTFreeze":
# model = BertForSequenceClassification.from_pretrained("bert-base-uncased", output_dim)
# for param in model.bert.parameters():
# param.requires_grad = False
# print(param)
# print(param.requires_grad)
# print(model)
# elif model_name=="BERT":
# model = BertForSequenceClassification.from_pretrained("bert-base-uncased", output_dim)
# #print(model)
# elif model_name=="BERTLinear":
# model = models.BertLinear(hidden_dim, dropout, output_dim)
# #print(model)
# elif model_name=="BERTLinearFreeze":
# model = models.BertLinearFreeze(hidden_dim, dropout, output_dim)
# #print(model)
# elif model_name=="BERTLinearFreezeEmbeddings":
# model = models.BertLinearFreezeEmbeddings(hidden_dim, dropout, output_dim)
# #print(model)
# elif model_name=="BERTLSTM":
# model = models.BertLSTM(hidden_dim, dropout, bidirectional, output_dim)
# #print(model)
# elif model_name=="BERTNonLinear":
# model = models.BertNonLinear(dropout, output_dim)
# #print(model)
# elif model_name=="BERTNorm":
# model = models.BertNorm(dropout, output_dim)
# #print(model)
# elif model_name=="BERTPooling":
# model = models.BertPooling(dropout, output_dim)
# elif model_name=="BERTExtractEmbeddings":
# model = models.BertExtractEmbeddings(dropout, output_dim)
model = model.to(device)
#Loss and optimizer
#optimizer = optim.Adam([{'params': model.parameters(), 'weight_decay': 0.1}], lr=learning_rate)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
loss_fn = F.cross_entropy
#Scheduler
#scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[5, 50], gamma=0.1)
#Training and evaluation
print('Training and evaluation for {} epochs...'.format(num_epochs))
train_metrics, val_metrics = train_and_evaluate(num_epochs, model, optimizer, loss_fn, train_dataloader, val_dataloader, early_stopping_criteria, directory_checkpoints, use_bert, use_mongo)
train_metrics.to_csv(directory+"train_metrics.csv"), val_metrics.to_csv(directory+"val_metrics.csv")
#Test
print('Testing...')
load_checkpoint(directory_checkpoints+"best_model.pth.tar", model)
test_metrics = evaluate_model(model, optimizer, loss_fn, test_dataloader, device, use_bert)
if use_mongo: log_scalars(test_metrics,"Test")
test_metrics_df = pd.DataFrame(test_metrics)
print(test_metrics)
test_metrics_df.to_csv(directory+"test_metrics.csv")
results = {
'id': id_nummer,
#'loss': np.round(np.mean(val_metrics['loss']), 4),
'loss': 1-test_metrics['accuracy'],
'accuracy': test_metrics['accuracy'],
'recall': test_metrics['recall'],
'precision': test_metrics['precision'],
'f1': test_metrics['f1'],
'learning_rate': learning_rate,
'hidden_dim': hidden_dim,
'dropout': dropout,
'max_seq_length': max_seq_length,
'status': 'ok'
}
return results
|
[
"evaluate.evaluate_model"
] |
[((758, 791), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (781, 791), False, 'import warnings\n'), ((1314, 1326), 'sacred.Experiment', 'Experiment', ([], {}), '()\n', (1324, 1326), False, 'from sacred import Experiment\n'), ((1542, 1581), 'sacred.observers.SlackObserver.from_config', 'SlackObserver.from_config', (['"""slack.json"""'], {}), "('slack.json')\n", (1567, 1581), False, 'from sacred.observers import SlackObserver\n'), ((1625, 1650), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1648, 1650), False, 'import torch\n'), ((1665, 1685), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1677, 1685), False, 'import torch\n'), ((1705, 1724), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1717, 1724), False, 'import torch\n'), ((3318, 3403), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['epoch', 'loss', 'accuracy', 'recall', 'precision', 'f1']"}), "(columns=['epoch', 'loss', 'accuracy', 'recall', 'precision', 'f1']\n )\n", (3330, 3403), True, 'import pandas as pd\n'), ((3417, 3502), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['epoch', 'loss', 'accuracy', 'recall', 'precision', 'f1']"}), "(columns=['epoch', 'loss', 'accuracy', 'recall', 'precision', 'f1']\n )\n", (3429, 3502), True, 'import pandas as pd\n'), ((3575, 3607), 'tqdm.trange', 'trange', (['num_epochs'], {'desc': '"""Epoch"""'}), "(num_epochs, desc='Epoch')\n", (3581, 3607), False, 'from tqdm import tqdm, trange\n'), ((12033, 12101), 'utils.load_checkpoint', 'load_checkpoint', (["(directory_checkpoints + 'best_model.pth.tar')", 'model'], {}), "(directory_checkpoints + 'best_model.pth.tar', model)\n", (12048, 12101), False, 'from utils import accuracy_recall_precision_f1, save_checkpoint, load_checkpoint\n'), ((12120, 12196), 'evaluate.evaluate_model', 'evaluate_model', (['model', 'optimizer', 'loss_fn', 'test_dataloader', 'device', 'use_bert'], {}), '(model, optimizer, loss_fn, test_dataloader, device, use_bert)\n', (12134, 12196), False, 'from evaluate import evaluate_model\n'), ((12271, 12297), 'pandas.DataFrame', 'pd.DataFrame', (['test_metrics'], {}), '(test_metrics)\n', (12283, 12297), True, 'import pandas as pd\n'), ((3659, 3733), 'train.train_model', 'train_model', (['model', 'optimizer', 'loss_fn', 'train_dataloader', 'device', 'use_bert'], {}), '(model, optimizer, loss_fn, train_dataloader, device, use_bert)\n', (3670, 3733), False, 'from train import train_model\n'), ((4074, 4149), 'evaluate.evaluate_model', 'evaluate_model', (['model', 'optimizer', 'loss_fn', 'val_dataloader', 'device', 'use_bert'], {}), '(model, optimizer, loss_fn, val_dataloader, device, use_bert)\n', (4088, 4149), False, 'from evaluate import evaluate_model\n'), ((7114, 7155), 'sacred.observers.FileStorageObserver.create', 'FileStorageObserver.create', (['"""results-mlp"""'], {}), "('results-mlp')\n", (7140, 7155), False, 'from sacred.observers import FileStorageObserver\n'), ((9291, 9385), 'models.CNN', 'models.CNN', (['embedding_matrix', 'embedding_dim', 'vocab_size', 'dropout', 'filter_sizes', 'output_dim'], {}), '(embedding_matrix, embedding_dim, vocab_size, dropout,\n filter_sizes, output_dim)\n', (9301, 9385), False, 'import models\n'), ((7216, 7258), 'sacred.observers.FileStorageObserver.create', 'FileStorageObserver.create', (['"""results-lstm"""'], {}), "('results-lstm')\n", (7242, 7258), False, 'from sacred.observers import FileStorageObserver\n'), ((7328, 7379), 'sacred.observers.FileStorageObserver.create', 'FileStorageObserver.create', (['"""results-lstmattention"""'], {}), "('results-lstmattention')\n", (7354, 7379), False, 'from sacred.observers import FileStorageObserver\n'), ((7439, 7480), 'sacred.observers.FileStorageObserver.create', 'FileStorageObserver.create', (['"""results-cnn"""'], {}), "('results-cnn')\n", (7465, 7480), False, 'from sacred.observers import FileStorageObserver\n'), ((7566, 7608), 'sacred.observers.FileStorageObserver.create', 'FileStorageObserver.create', (['"""results-bert"""'], {}), "('results-bert')\n", (7592, 7608), False, 'from sacred.observers import FileStorageObserver\n')]
|
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from dataset import BaseDataset
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
import numpy as np
from config import model_name
from tqdm import tqdm
import os
from pathlib import Path
from evaluate import evaluate
import importlib
import datetime
try:
Model = getattr(importlib.import_module(f"model.{model_name}"), model_name)
config = getattr(importlib.import_module('config'), f"{model_name}Config")
except AttributeError:
print(f"{model_name} not included!")
exit()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class EarlyStopping:
def __init__(self, patience=5):
self.patience = patience
self.counter = 0
self.best_loss = np.Inf
def __call__(self, val_loss):
"""
if you use other metrics where a higher value is better, e.g. accuracy,
call this with its corresponding negative value
"""
if val_loss < self.best_loss:
early_stop = False
get_better = True
self.counter = 0
self.best_loss = val_loss
else:
get_better = False
self.counter += 1
if self.counter >= self.patience:
early_stop = True
else:
early_stop = False
return early_stop, get_better
def latest_checkpoint(directory):
if not os.path.exists(directory):
return None
all_checkpoints = {
int(x.split('.')[-2].split('-')[-1]): x
for x in os.listdir(directory)
}
if not all_checkpoints:
return None
return os.path.join(directory,
all_checkpoints[max(all_checkpoints.keys())])
def train():
writer = SummaryWriter(
log_dir=
f"./runs/{model_name}/{datetime.datetime.now().replace(microsecond=0).isoformat()}{'-' + os.environ['REMARK'] if 'REMARK' in os.environ else ''}"
)
if not os.path.exists('checkpoint'):
os.makedirs('checkpoint')
try:
pretrained_word_embedding = torch.from_numpy(
np.load('./data/train/pretrained_word_embedding.npy')).float()
except FileNotFoundError:
pretrained_word_embedding = None
if model_name == 'DKN':
try:
pretrained_entity_embedding = torch.from_numpy(
np.load(
'./data/train/pretrained_entity_embedding.npy')).float()
except FileNotFoundError:
pretrained_entity_embedding = None
try:
pretrained_context_embedding = torch.from_numpy(
np.load(
'./data/train/pretrained_context_embedding.npy')).float()
except FileNotFoundError:
pretrained_context_embedding = None
model = Model(config, pretrained_word_embedding,
pretrained_entity_embedding,
pretrained_context_embedding).to(device)
elif model_name == 'Exp1':
models = nn.ModuleList([
Model(config, pretrained_word_embedding).to(device)
for _ in range(config.ensemble_factor)
])
elif model_name == 'Exp2':
model = Model(config).to(device)
else:
model = Model(config, pretrained_word_embedding).to(device)
if model_name != 'Exp1':
print(model)
else:
print(models[0])
dataset = BaseDataset('data/train/behaviors_parsed.tsv',
'data/train/news_parsed.tsv', 'data/train/roberta')
print(f"Load training dataset with size {len(dataset)}.")
dataloader = iter(
DataLoader(dataset,
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers,
drop_last=True,
pin_memory=True))
if model_name != 'Exp1':
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),
lr=config.learning_rate)
else:
criterion = nn.NLLLoss()
optimizers = [
torch.optim.Adam(model.parameters(), lr=config.learning_rate)
for model in models
]
start_time = time.time()
loss_full = []
exhaustion_count = 0
step = 0
early_stopping = EarlyStopping()
checkpoint_dir = os.path.join('./checkpoint', model_name)
Path(checkpoint_dir).mkdir(parents=True, exist_ok=True)
checkpoint_path = latest_checkpoint(checkpoint_dir)
if checkpoint_path is not None:
print(f"Load saved parameters in {checkpoint_path}")
checkpoint = torch.load(checkpoint_path)
early_stopping(checkpoint['early_stop_value'])
step = checkpoint['step']
if model_name != 'Exp1':
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
model.train()
else:
for model in models:
model.load_state_dict(checkpoint['model_state_dict'])
model.train()
for optimizer in optimizers:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
for i in tqdm(range(
1,
config.num_epochs * len(dataset) // config.batch_size + 1),
desc="Training"):
try:
minibatch = next(dataloader)
except StopIteration:
exhaustion_count += 1
tqdm.write(
f"Training data exhausted for {exhaustion_count} times after {i} batches, reuse the dataset."
)
dataloader = iter(
DataLoader(dataset,
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers,
drop_last=True,
pin_memory=True))
minibatch = next(dataloader)
step += 1
if model_name == 'LSTUR':
y_pred = model(minibatch["user"], minibatch["clicked_news_length"],
minibatch["candidate_news"],
minibatch["clicked_news"])
elif model_name == 'HiFiArk':
y_pred, regularizer_loss = model(minibatch["candidate_news"],
minibatch["clicked_news"])
elif model_name == 'TANR':
y_pred, topic_classification_loss = model(
minibatch["candidate_news"], minibatch["clicked_news"])
elif model_name == 'Exp1':
y_preds = [
model(minibatch["candidate_news"], minibatch["clicked_news"])
for model in models
]
y_pred_averaged = torch.stack(
[F.softmax(y_pred, dim=1) for y_pred in y_preds],
dim=-1).mean(dim=-1)
y_pred = torch.log(y_pred_averaged)
else:
y_pred = model(minibatch["candidate_news"],
minibatch["clicked_news"])
y = torch.zeros(len(y_pred)).long().to(device)
loss = criterion(y_pred, y)
if model_name == 'HiFiArk':
if i % 10 == 0:
writer.add_scalar('Train/BaseLoss', loss.item(), step)
writer.add_scalar('Train/RegularizerLoss',
regularizer_loss.item(), step)
writer.add_scalar('Train/RegularizerBaseRatio',
regularizer_loss.item() / loss.item(), step)
loss += config.regularizer_loss_weight * regularizer_loss
elif model_name == 'TANR':
if i % 10 == 0:
writer.add_scalar('Train/BaseLoss', loss.item(), step)
writer.add_scalar('Train/TopicClassificationLoss',
topic_classification_loss.item(), step)
writer.add_scalar(
'Train/TopicBaseRatio',
topic_classification_loss.item() / loss.item(), step)
loss += config.topic_classification_loss_weight * topic_classification_loss
loss_full.append(loss.item())
if model_name != 'Exp1':
optimizer.zero_grad()
else:
for optimizer in optimizers:
optimizer.zero_grad()
loss.backward()
if model_name != 'Exp1':
optimizer.step()
else:
for optimizer in optimizers:
optimizer.step()
if i % 10 == 0:
writer.add_scalar('Train/Loss', loss.item(), step)
if i % config.num_batches_show_loss == 0:
tqdm.write(
f"Time {time_since(start_time)}, batches {i}, current loss {loss.item():.4f}, average loss: {np.mean(loss_full):.4f}, latest average loss: {np.mean(loss_full[-256:]):.4f}"
)
if i % config.num_batches_validate == 0:
(model if model_name != 'Exp1' else models[0]).eval()
val_auc, val_mrr, val_ndcg5, val_ndcg10 = evaluate(
model if model_name != 'Exp1' else models[0], './data/val',
200000)
(model if model_name != 'Exp1' else models[0]).train()
writer.add_scalar('Validation/AUC', val_auc, step)
writer.add_scalar('Validation/MRR', val_mrr, step)
writer.add_scalar('Validation/nDCG@5', val_ndcg5, step)
writer.add_scalar('Validation/nDCG@10', val_ndcg10, step)
tqdm.write(
f"Time {time_since(start_time)}, batches {i}, validation AUC: {val_auc:.4f}, validation MRR: {val_mrr:.4f}, validation nDCG@5: {val_ndcg5:.4f}, validation nDCG@10: {val_ndcg10:.4f}, "
)
early_stop, get_better = early_stopping(-val_auc)
if early_stop:
tqdm.write('Early stop.')
break
elif get_better:
try:
torch.save(
{
'model_state_dict': (model if model_name != 'Exp1'
else models[0]).state_dict(),
'optimizer_state_dict':
(optimizer if model_name != 'Exp1' else
optimizers[0]).state_dict(),
'step':
step,
'early_stop_value':
-val_auc
}, f"./checkpoint/{model_name}/ckpt-{step}.pth")
except OSError as error:
print(f"OS error: {error}")
def time_since(since):
"""
Format elapsed time string.
"""
now = time.time()
elapsed_time = now - since
return time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
if __name__ == '__main__':
print('Using device:', device)
print(f'Training model {model_name}')
train()
|
[
"evaluate.evaluate"
] |
[((3468, 3570), 'dataset.BaseDataset', 'BaseDataset', (['"""data/train/behaviors_parsed.tsv"""', '"""data/train/news_parsed.tsv"""', '"""data/train/roberta"""'], {}), "('data/train/behaviors_parsed.tsv', 'data/train/news_parsed.tsv',\n 'data/train/roberta')\n", (3479, 3570), False, 'from dataset import BaseDataset\n'), ((4302, 4313), 'time.time', 'time.time', ([], {}), '()\n', (4311, 4313), False, 'import time\n'), ((4430, 4470), 'os.path.join', 'os.path.join', (['"""./checkpoint"""', 'model_name'], {}), "('./checkpoint', model_name)\n", (4442, 4470), False, 'import os\n'), ((10815, 10826), 'time.time', 'time.time', ([], {}), '()\n', (10824, 10826), False, 'import time\n'), ((396, 442), 'importlib.import_module', 'importlib.import_module', (['f"""model.{model_name}"""'], {}), "(f'model.{model_name}')\n", (419, 442), False, 'import importlib\n'), ((477, 510), 'importlib.import_module', 'importlib.import_module', (['"""config"""'], {}), "('config')\n", (500, 510), False, 'import importlib\n'), ((645, 670), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (668, 670), False, 'import torch\n'), ((1487, 1512), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (1501, 1512), False, 'import os\n'), ((2036, 2064), 'os.path.exists', 'os.path.exists', (['"""checkpoint"""'], {}), "('checkpoint')\n", (2050, 2064), False, 'import os\n'), ((2074, 2099), 'os.makedirs', 'os.makedirs', (['"""checkpoint"""'], {}), "('checkpoint')\n", (2085, 2099), False, 'import os\n'), ((3688, 3821), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'config.batch_size', 'shuffle': '(True)', 'num_workers': 'config.num_workers', 'drop_last': '(True)', 'pin_memory': '(True)'}), '(dataset, batch_size=config.batch_size, shuffle=True, num_workers\n =config.num_workers, drop_last=True, pin_memory=True)\n', (3698, 3821), False, 'from torch.utils.data import DataLoader\n'), ((3962, 3983), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3981, 3983), True, 'import torch.nn as nn\n'), ((4133, 4145), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (4143, 4145), True, 'import torch.nn as nn\n'), ((4706, 4733), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (4716, 4733), False, 'import torch\n'), ((10895, 10920), 'time.gmtime', 'time.gmtime', (['elapsed_time'], {}), '(elapsed_time)\n', (10906, 10920), False, 'import time\n'), ((1623, 1644), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1633, 1644), False, 'import os\n'), ((4475, 4495), 'pathlib.Path', 'Path', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (4479, 4495), False, 'from pathlib import Path\n'), ((9138, 9214), 'evaluate.evaluate', 'evaluate', (["(model if model_name != 'Exp1' else models[0])", '"""./data/val"""', '(200000)'], {}), "(model if model_name != 'Exp1' else models[0], './data/val', 200000)\n", (9146, 9214), False, 'from evaluate import evaluate\n'), ((5567, 5682), 'tqdm.tqdm.write', 'tqdm.write', (['f"""Training data exhausted for {exhaustion_count} times after {i} batches, reuse the dataset."""'], {}), "(\n f'Training data exhausted for {exhaustion_count} times after {i} batches, reuse the dataset.'\n )\n", (5577, 5682), False, 'from tqdm import tqdm\n'), ((9923, 9948), 'tqdm.tqdm.write', 'tqdm.write', (['"""Early stop."""'], {}), "('Early stop.')\n", (9933, 9948), False, 'from tqdm import tqdm\n'), ((2176, 2229), 'numpy.load', 'np.load', (['"""./data/train/pretrained_word_embedding.npy"""'], {}), "('./data/train/pretrained_word_embedding.npy')\n", (2183, 2229), True, 'import numpy as np\n'), ((5750, 5883), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'config.batch_size', 'shuffle': '(True)', 'num_workers': 'config.num_workers', 'drop_last': '(True)', 'pin_memory': '(True)'}), '(dataset, batch_size=config.batch_size, shuffle=True, num_workers\n =config.num_workers, drop_last=True, pin_memory=True)\n', (5760, 5883), False, 'from torch.utils.data import DataLoader\n'), ((2428, 2483), 'numpy.load', 'np.load', (['"""./data/train/pretrained_entity_embedding.npy"""'], {}), "('./data/train/pretrained_entity_embedding.npy')\n", (2435, 2483), True, 'import numpy as np\n'), ((2686, 2742), 'numpy.load', 'np.load', (['"""./data/train/pretrained_context_embedding.npy"""'], {}), "('./data/train/pretrained_context_embedding.npy')\n", (2693, 2742), True, 'import numpy as np\n'), ((6999, 7025), 'torch.log', 'torch.log', (['y_pred_averaged'], {}), '(y_pred_averaged)\n', (7008, 7025), False, 'import torch\n'), ((8875, 8893), 'numpy.mean', 'np.mean', (['loss_full'], {}), '(loss_full)\n', (8882, 8893), True, 'import numpy as np\n'), ((8922, 8947), 'numpy.mean', 'np.mean', (['loss_full[-256:]'], {}), '(loss_full[-256:])\n', (8929, 8947), True, 'import numpy as np\n'), ((1895, 1918), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1916, 1918), False, 'import datetime\n'), ((6892, 6916), 'torch.nn.functional.softmax', 'F.softmax', (['y_pred'], {'dim': '(1)'}), '(y_pred, dim=1)\n', (6901, 6916), True, 'import torch.nn.functional as F\n')]
|
import os
import sys
import random
import torch
import math
import time
import argparse
import collections
import numpy as np
from torch import nn, optim
import torch.utils.data as data
import torch.nn.utils.rnn as rnn_utils
from itertools import chain
from data_process import Corpus, MyDataset, pretrain_corpus_construction
from evaluate import weighted_binary_cross_entropy, valid_evaluate, test_evaluate
def pretrain_epoch(model, train_data, loss_weights, optimizer, epoch, config, out_text):
start = time.time()
model.train()
print('Pretrain Epoch: %d start!' % epoch)
out_text += ('Pretrain Epoch: %d start!\n' % epoch)
avg_loss = 0.0
train_loader = data.DataLoader(train_data, collate_fn=train_data.my_collate, batch_size=config.batch_size, num_workers=0, shuffle=True)
pretrain_types = config.pretrain_type.split('+')
for batch_idx, batch in enumerate(train_loader):
convs, conv_lens, conv_turn_lens, users, user_lens, user_turn_lens, labels = batch[0], batch[1], batch[2], batch[3], batch[4], batch[5], batch[6]
if torch.cuda.is_available() and config.use_gpu: # run in GPU
convs = convs.cuda()
users = users.cuda()
labels = [label.cuda() for label in labels]
predictions, _ = model(convs, conv_lens, conv_turn_lens, users, user_lens, user_turn_lens)
for i, pt in enumerate(pretrain_types):
if pt == 'SR':
cur_loss = config.SR_tradeoff * weighted_binary_cross_entropy(predictions[i], labels[i], loss_weights[0])
elif pt == 'AR':
cur_loss = config.AR_tradeoff * nn.MSELoss()(predictions[i], labels[i])
elif pt == 'upattern':
cur_loss = config.upattern_tradeoff * weighted_binary_cross_entropy(predictions[i], labels[i], loss_weights[-1])
if i == 0:
loss = cur_loss
else:
loss += cur_loss
avg_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_loss /= len(train_data)
end = time.time()
print('Pretrain Epoch: %d done! Train avg_loss: %g! Using time: %.2f minutes!' % (epoch, avg_loss, (end - start) / 60))
out_text += ('Pretrain Epoch: %d done! Train avg_loss: %g! Using time: %.2f minutes!\n' % (epoch, avg_loss, (end - start) / 60))
return avg_loss, out_text
def train_epoch(model, train_data, valid_loader, loss_weights, optimizer, epoch, config, out_text, valid_out):
start = time.time()
model.train()
print('Train Epoch: %d start!' % epoch)
out_text += ('Train Epoch: %d start!\n' % epoch)
avg_loss = 0.0
pretrain_types = config.multitask_type.split('+')
train_loader = data.DataLoader(train_data, collate_fn=train_data.my_collate, batch_size=config.batch_size, num_workers=0, shuffle=True)
for batch_idx, batch in enumerate(train_loader):
if config.multi_task:
convs, conv_lens, conv_turn_lens, users, user_lens, user_turn_lens, labels, pre_labels = batch[0], batch[1], batch[2], batch[3], batch[4], batch[5], batch[6], batch[7]
else:
convs, conv_lens, conv_turn_lens, users, user_lens, user_turn_lens, labels = batch[0], batch[1], batch[2], batch[3], batch[4], batch[5], batch[6]
if torch.cuda.is_available() and config.use_gpu: # run in GPU
convs = convs.cuda()
users = users.cuda()
labels = labels.cuda()
if config.multi_task:
pre_labels = [pre_label.cuda() for pre_label in pre_labels]
predictions, _ = model(convs, conv_lens, conv_turn_lens, users, user_lens, user_turn_lens)
if config.multi_task:
loss = weighted_binary_cross_entropy(predictions[0], labels, loss_weights[0])
for i, pt in enumerate(pretrain_types):
if pt == 'SR':
loss += config.multitask_tradeoff * config.SR_tradeoff * weighted_binary_cross_entropy(predictions[1][i], pre_labels[i], loss_weights[1][0])
elif pt == 'AR':
loss += config.multitask_tradeoff * config.AR_tradeoff * nn.MSELoss()(predictions[1][i], pre_labels[i])
elif pt == 'upattern':
loss += config.multitask_tradeoff * config.upattern_tradeoff * weighted_binary_cross_entropy(predictions[1][i], pre_labels[i], loss_weights[1][-1])
else:
loss = weighted_binary_cross_entropy(predictions, labels, loss_weights)
avg_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx != 0 and config.valid_during_epoch != -1 and batch_idx % config.valid_during_epoch == 0:
_, valid_f1, valid_loss, _ = valid_evaluate(model, valid_loader, config)
model.train()
valid_out += ('%g\t%g\n' % (valid_f1, valid_loss))
print('Valid during epoch: %g\t%g' % (valid_f1, valid_loss))
avg_loss /= len(train_data)
end = time.time()
print('Train Epoch: %d done! Train avg_loss: %g! Using time: %.2f minutes!' % (epoch, avg_loss, (end - start) / 60))
out_text += ('Train Epoch: %d done! Train avg_loss: %g! Using time: %.2f minutes!\n' % (epoch, avg_loss, (end - start) / 60))
return avg_loss, out_text, valid_out
def train(corp, model, config):
pretrain_optimizer = optim.Adam(model.parameters(), lr=config.pre_lr, weight_decay=config.l2_weight)
train_optimizer = optim.Adam(model.parameters(), lr=config.lr, weight_decay=config.l2_weight)
train_data = corp.train_data
corp.test_corpus(config.valid_file, mode='VALID')
valid_data = MyDataset(corp, config, 'VALID')
out_text = ""
# First step: Pretrain the model
if config.pretrain_path == "N" or config.pretrain_type == "N" or config.modelname == "LSTMBIA":
pass
elif config.pretrain_path is None:
model.mode = 'pretrain'
train_data.pretrain = True
valid_data.pretrain = True
valid_loader = data.DataLoader(valid_data, collate_fn=valid_data.my_collate, batch_size=config.batch_size, num_workers=0)
if "SR" in config.pretrain_type and "upattern" in config.pretrain_type:
config.pretrain_loss_weights = [torch.Tensor([1, train_data.pretrain_weight_sr]), torch.Tensor([1, train_data.pretrain_weight_up])]
elif "SR" in config.pretrain_type:
config.pretrain_loss_weights = [torch.Tensor([1, train_data.pretrain_weight_sr])]
elif "upattern" in config.pretrain_type:
config.pretrain_loss_weights = [torch.Tensor([1, train_data.pretrain_weight_up])]
else:
config.pretrain_loss_weights = None
if torch.cuda.is_available() and config.use_gpu and config.pretrain_loss_weights is not None: # run in GPU
if "SR" in config.pretrain_type and "upattern" in config.pretrain_type:
config.pretrain_loss_weights = [config.pretrain_loss_weights[0].cuda(), config.pretrain_loss_weights[1].cuda()]
else:
config.pretrain_loss_weights = [config.pretrain_loss_weights[0].cuda()]
best_state = None
best_valid_f1 = -1.0
best_valid_loss = 999999.99
no_improve = 0
for epoch in range(config.max_epoch):
_, out_text = pretrain_epoch(model, train_data, config.pretrain_loss_weights, pretrain_optimizer, epoch, config, out_text)
valid_auc, valid_f1, valid_loss, _ = valid_evaluate(model, valid_loader, config)
if best_valid_f1 < valid_f1 or best_valid_loss > valid_loss:
no_improve = 0
best_state = model.state_dict()
if best_valid_f1 < valid_f1:
best_valid_f1 = valid_f1
print('New Best F1 Valid Result!!! Valid F1: %g, Valid Loss: %g' % (valid_f1, valid_loss))
out_text += ('New Best F1 Valid Result!!! Valid F1: %g, Valid Loss: %g\n' % (valid_f1, valid_loss))
if best_valid_loss > valid_loss:
best_valid_loss = valid_loss
print('New Best Loss Valid Result!!! Valid F1: %g, Valid Loss: %g' % (valid_f1, valid_loss))
out_text += ('New Best Loss Valid Result!!! Valid F1: %g, Valid Loss: %g\n' % (valid_f1, valid_loss))
else:
no_improve += 1
print('No improve! Current Valid F1: %g, Best Valid F1: %g; Current Valid Loss: %g, Best Valid Loss: %g' % (valid_f1, best_valid_f1, valid_loss, best_valid_loss))
out_text += ('No improve! Current Valid F1: %g, Best Valid F1: %g; Current Valid Loss: %g, Best Valid Loss: %g\n' % (valid_f1, best_valid_f1, valid_loss, best_valid_loss))
if no_improve == 2:
break
model.load_state_dict(best_state)
else:
model.load_state_dict(torch.load(config.pretrain_path))
# Second step: Fine-tune the model
model.mode = 'train'
valid_out = "F1\tLoss\n"
train_data.pretrain = False
valid_data.pretrain = False
valid_loader = data.DataLoader(valid_data, collate_fn=valid_data.my_collate, batch_size=config.batch_size, num_workers=0)
if config.pretrain_type != "IDF_Att" and config.pretrain_type != "TPIDF_Att":
best_state_f1 = None
best_state_loss = None
best_valid_thr_f1 = 0.0
best_valid_thr_loss = 0.0
best_valid_f1 = -1.0
best_valid_loss = 999999.99
no_improve = 0
for epoch in range(config.max_epoch):
if config.modelname == 'LSTMBIA':
model.mode = 'train'
if config.multi_task:
if "SR" in config.multitask_type and "upattern" in config.multitask_type:
config.pretrain_loss_weights = [torch.Tensor([1, train_data.pretrain_weight_sr]),
torch.Tensor([1, train_data.pretrain_weight_up])]
elif "SR" in config.multitask_type:
config.pretrain_loss_weights = [torch.Tensor([1, train_data.pretrain_weight_sr])]
elif "upattern" in config.multitask_type:
config.pretrain_loss_weights = [torch.Tensor([1, train_data.pretrain_weight_up])]
else:
config.pretrain_loss_weights = None
if torch.cuda.is_available() and config.use_gpu and config.pretrain_loss_weights is not None: # run in GPU
if "SR" in config.multitask_type and "upattern" in config.multitask_type:
config.pretrain_loss_weights = [config.pretrain_loss_weights[0].cuda(),
config.pretrain_loss_weights[1].cuda()]
else:
config.pretrain_loss_weights = [config.pretrain_loss_weights[0].cuda()]
_, out_text, valid_out = train_epoch(model, train_data, valid_loader, (config.loss_weights, config.pretrain_loss_weights), train_optimizer, epoch, config, out_text, valid_out)
else:
_, out_text, valid_out = train_epoch(model, train_data, valid_loader, config.loss_weights, train_optimizer, epoch, config, out_text, valid_out)
if config.modelname == 'LSTMBIA':
model.mode = 'test'
valid_auc, valid_f1, valid_loss, valid_thr = valid_evaluate(model, valid_loader, config)
if best_valid_f1 < valid_f1 or best_valid_loss > valid_loss:
no_improve = 0
if best_valid_f1 < valid_f1:
best_state_f1 = model.state_dict()
best_valid_thr_f1 = valid_thr
best_valid_f1 = valid_f1
print('New Best F1 Valid Result!!! Valid F1: %g, Valid Loss: %g' % (valid_f1, valid_loss))
out_text += ('New Best F1 Valid Result!!! Valid F1: %g, Valid Loss: %g\n' % (valid_f1, valid_loss))
if best_valid_loss > valid_loss:
best_state_loss = model.state_dict()
best_valid_thr_loss = valid_thr
best_valid_loss = valid_loss
print('New Best Loss Valid Result!!! Valid F1: %g, Valid Loss: %g' % (valid_f1, valid_loss))
out_text += ('New Best Loss Valid Result!!! Valid F1: %g, Valid Loss: %g\n' % (valid_f1, valid_loss))
else:
no_improve += 1
print('No improve! Current Valid F1: %g, Best Valid F1: %g; Current Valid Loss: %g, Best Valid Loss: %g' % (valid_f1, best_valid_f1, valid_loss, best_valid_loss))
out_text += ('No improve! Current Valid F1: %g, Best Valid F1: %g; Current Valid Loss: %g, Best Valid Loss: %g\n' % (valid_f1, best_valid_f1, valid_loss, best_valid_loss))
if no_improve == 5:
break
# Final step: Evaluate the model
corp.test_corpus(config.test_file, mode='TEST')
test_data = MyDataset(corp, config, 'TEST')
test_loader = data.DataLoader(test_data, collate_fn=test_data.my_collate, batch_size=config.batch_size, num_workers=0)
if config.modelname == 'LSTMBIA':
model.mode = 'test'
model.load_state_dict(best_state_f1)
res_f1 = test_evaluate(model, test_loader, config, best_valid_thr_f1)
print('Result in test set(F1 Valid): AUC %g, F1 Score %g, Precision %g, Recall %g, Accuracy %g' % (res_f1[0], res_f1[1], res_f1[2], res_f1[3], res_f1[4]))
out_text += ('Result in test set(F1 Valid): AUC %g, F1 Score %g, Precision %g, Recall %g, Accuracy %g\n' % (res_f1[0], res_f1[1], res_f1[2], res_f1[3], res_f1[4]))
model.load_state_dict(best_state_loss)
res_loss = test_evaluate(model, test_loader, config, best_valid_thr_loss)
print('Result in test set(Loss Valid): AUC %g, F1 Score %g, Precision %g, Recall %g, Accuracy %g' % (res_loss[0], res_loss[1], res_loss[2], res_loss[3], res_loss[4]))
out_text += ('Result in test set(Loss Valid): AUC %g, F1 Score %g, Precision %g, Recall %g, Accuracy %g\n' % (res_loss[0], res_loss[1], res_loss[2], res_loss[3], res_loss[4]))
if res_f1[1] >= res_loss[1]:
torch.save(best_state_f1, config.path + '%.4f_%.4f_%.4f_%g_%d_%d_%g_f1.model' % (res_f1[0], res_f1[1], best_valid_f1, config.lr, epoch, config.random_seed, config.train_weight))
with open(config.path + '%.4f_%.4f_%.4f_%g_%d_%d_%g_f1.res' % (res_f1[0], res_f1[1], best_valid_f1, config.lr, epoch, config.random_seed, config.train_weight), 'w') as f:
f.write('AUC\tF1-Score\tPrecision\tRecall\tAccuracy\n')
f.write('%g\t%g\t%g\t%g\t%g\n\n' % (res_f1[0], res_f1[1], res_f1[2], res_f1[3], res_f1[4]))
f.write('Threshold: %g\n' % best_valid_thr_f1)
f.write('\n\nParameters:\n')
for key in config.__dict__:
f.write('%s : %s\n' % (key, config.__dict__[key]))
with open(config.path + '%.4f_%.4f_%.4f_%g_%d_%d_%g_f1.out' % (res_f1[0], res_f1[1], best_valid_f1, config.lr, epoch, config.random_seed, config.train_weight), 'w') as f:
f.write(out_text)
if config.valid_during_epoch != -1:
with open(config.path + '%.4f_%.4f_%.4f_%g_%d_%d_%g_f1-valid.out' % (res_f1[0], res_f1[1], best_valid_f1, config.lr, epoch, config.random_seed, config.train_weight), 'w') as f:
f.write(valid_out)
else:
torch.save(best_state_loss, config.path + '%.4f_%.4f_%.4f_%g_%d_%d_%g_loss.model' % (res_loss[0], res_loss[1], best_valid_loss, config.lr, epoch, config.random_seed, config.train_weight))
with open(config.path + '%.4f_%.4f_%.4f_%g_%d_%d_%g_loss.res' % (res_loss[0], res_loss[1], best_valid_loss, config.lr, epoch, config.random_seed, config.train_weight), 'w') as f:
f.write('AUC\tF1-Score\tPrecision\tRecall\tAccuracy\n')
f.write('%g\t%g\t%g\t%g\t%g\n\n' % (res_loss[0], res_loss[1], res_loss[2], res_loss[3], res_loss[4]))
f.write('Threshold: %g\n' % best_valid_thr_loss)
f.write('\n\nParameters:\n')
for key in config.__dict__:
f.write('%s : %s\n' % (key, config.__dict__[key]))
with open(config.path + '%.4f_%.4f_%.4f_%g_%d_%d_%g_loss.out' % (res_loss[0], res_loss[1], best_valid_loss, config.lr, epoch, config.random_seed, config.train_weight), 'w') as f:
f.write(out_text)
if config.valid_during_epoch != -1:
with open(config.path + '%.4f_%.4f_%.4f_%g_%d_%d_%g_loss-valid.out' % (res_loss[0], res_loss[1], best_valid_loss, config.lr, epoch, config.random_seed, config.train_weight), 'w') as f:
f.write(valid_out)
|
[
"evaluate.test_evaluate",
"evaluate.valid_evaluate",
"evaluate.weighted_binary_cross_entropy"
] |
[((511, 522), 'time.time', 'time.time', ([], {}), '()\n', (520, 522), False, 'import time\n'), ((682, 807), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_data'], {'collate_fn': 'train_data.my_collate', 'batch_size': 'config.batch_size', 'num_workers': '(0)', 'shuffle': '(True)'}), '(train_data, collate_fn=train_data.my_collate, batch_size=\n config.batch_size, num_workers=0, shuffle=True)\n', (697, 807), True, 'import torch.utils.data as data\n'), ((2092, 2103), 'time.time', 'time.time', ([], {}), '()\n', (2101, 2103), False, 'import time\n'), ((2516, 2527), 'time.time', 'time.time', ([], {}), '()\n', (2525, 2527), False, 'import time\n'), ((2735, 2860), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_data'], {'collate_fn': 'train_data.my_collate', 'batch_size': 'config.batch_size', 'num_workers': '(0)', 'shuffle': '(True)'}), '(train_data, collate_fn=train_data.my_collate, batch_size=\n config.batch_size, num_workers=0, shuffle=True)\n', (2750, 2860), True, 'import torch.utils.data as data\n'), ((5021, 5032), 'time.time', 'time.time', ([], {}), '()\n', (5030, 5032), False, 'import time\n'), ((5669, 5701), 'data_process.MyDataset', 'MyDataset', (['corp', 'config', '"""VALID"""'], {}), "(corp, config, 'VALID')\n", (5678, 5701), False, 'from data_process import Corpus, MyDataset, pretrain_corpus_construction\n'), ((9102, 9213), 'torch.utils.data.DataLoader', 'data.DataLoader', (['valid_data'], {'collate_fn': 'valid_data.my_collate', 'batch_size': 'config.batch_size', 'num_workers': '(0)'}), '(valid_data, collate_fn=valid_data.my_collate, batch_size=\n config.batch_size, num_workers=0)\n', (9117, 9213), True, 'import torch.utils.data as data\n'), ((12996, 13027), 'data_process.MyDataset', 'MyDataset', (['corp', 'config', '"""TEST"""'], {}), "(corp, config, 'TEST')\n", (13005, 13027), False, 'from data_process import Corpus, MyDataset, pretrain_corpus_construction\n'), ((13046, 13155), 'torch.utils.data.DataLoader', 'data.DataLoader', (['test_data'], {'collate_fn': 'test_data.my_collate', 'batch_size': 'config.batch_size', 'num_workers': '(0)'}), '(test_data, collate_fn=test_data.my_collate, batch_size=\n config.batch_size, num_workers=0)\n', (13061, 13155), True, 'import torch.utils.data as data\n'), ((13271, 13331), 'evaluate.test_evaluate', 'test_evaluate', (['model', 'test_loader', 'config', 'best_valid_thr_f1'], {}), '(model, test_loader, config, best_valid_thr_f1)\n', (13284, 13331), False, 'from evaluate import weighted_binary_cross_entropy, valid_evaluate, test_evaluate\n'), ((13717, 13779), 'evaluate.test_evaluate', 'test_evaluate', (['model', 'test_loader', 'config', 'best_valid_thr_loss'], {}), '(model, test_loader, config, best_valid_thr_loss)\n', (13730, 13779), False, 'from evaluate import weighted_binary_cross_entropy, valid_evaluate, test_evaluate\n'), ((14172, 14358), 'torch.save', 'torch.save', (['best_state_f1', "(config.path + '%.4f_%.4f_%.4f_%g_%d_%d_%g_f1.model' % (res_f1[0], res_f1[1\n ], best_valid_f1, config.lr, epoch, config.random_seed, config.\n train_weight))"], {}), "(best_state_f1, config.path + \n '%.4f_%.4f_%.4f_%g_%d_%d_%g_f1.model' % (res_f1[0], res_f1[1],\n best_valid_f1, config.lr, epoch, config.random_seed, config.train_weight))\n", (14182, 14358), False, 'import torch\n'), ((15403, 15604), 'torch.save', 'torch.save', (['best_state_loss', "(config.path + '%.4f_%.4f_%.4f_%g_%d_%d_%g_loss.model' % (res_loss[0],\n res_loss[1], best_valid_loss, config.lr, epoch, config.random_seed,\n config.train_weight))"], {}), "(best_state_loss, config.path + \n '%.4f_%.4f_%.4f_%g_%d_%d_%g_loss.model' % (res_loss[0], res_loss[1],\n best_valid_loss, config.lr, epoch, config.random_seed, config.train_weight)\n )\n", (15413, 15604), False, 'import torch\n'), ((1074, 1099), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1097, 1099), False, 'import torch\n'), ((3302, 3327), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3325, 3327), False, 'import torch\n'), ((3721, 3791), 'evaluate.weighted_binary_cross_entropy', 'weighted_binary_cross_entropy', (['predictions[0]', 'labels', 'loss_weights[0]'], {}), '(predictions[0], labels, loss_weights[0])\n', (3750, 3791), False, 'from evaluate import weighted_binary_cross_entropy, valid_evaluate, test_evaluate\n'), ((4446, 4510), 'evaluate.weighted_binary_cross_entropy', 'weighted_binary_cross_entropy', (['predictions', 'labels', 'loss_weights'], {}), '(predictions, labels, loss_weights)\n', (4475, 4510), False, 'from evaluate import weighted_binary_cross_entropy, valid_evaluate, test_evaluate\n'), ((4773, 4816), 'evaluate.valid_evaluate', 'valid_evaluate', (['model', 'valid_loader', 'config'], {}), '(model, valid_loader, config)\n', (4787, 4816), False, 'from evaluate import weighted_binary_cross_entropy, valid_evaluate, test_evaluate\n'), ((6035, 6146), 'torch.utils.data.DataLoader', 'data.DataLoader', (['valid_data'], {'collate_fn': 'valid_data.my_collate', 'batch_size': 'config.batch_size', 'num_workers': '(0)'}), '(valid_data, collate_fn=valid_data.my_collate, batch_size=\n config.batch_size, num_workers=0)\n', (6050, 6146), True, 'import torch.utils.data as data\n'), ((11395, 11438), 'evaluate.valid_evaluate', 'valid_evaluate', (['model', 'valid_loader', 'config'], {}), '(model, valid_loader, config)\n', (11409, 11438), False, 'from evaluate import weighted_binary_cross_entropy, valid_evaluate, test_evaluate\n'), ((6719, 6744), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6742, 6744), False, 'import torch\n'), ((7486, 7529), 'evaluate.valid_evaluate', 'valid_evaluate', (['model', 'valid_loader', 'config'], {}), '(model, valid_loader, config)\n', (7500, 7529), False, 'from evaluate import weighted_binary_cross_entropy, valid_evaluate, test_evaluate\n'), ((8891, 8923), 'torch.load', 'torch.load', (['config.pretrain_path'], {}), '(config.pretrain_path)\n', (8901, 8923), False, 'import torch\n'), ((1478, 1551), 'evaluate.weighted_binary_cross_entropy', 'weighted_binary_cross_entropy', (['predictions[i]', 'labels[i]', 'loss_weights[0]'], {}), '(predictions[i], labels[i], loss_weights[0])\n', (1507, 1551), False, 'from evaluate import weighted_binary_cross_entropy, valid_evaluate, test_evaluate\n'), ((6266, 6314), 'torch.Tensor', 'torch.Tensor', (['[1, train_data.pretrain_weight_sr]'], {}), '([1, train_data.pretrain_weight_sr])\n', (6278, 6314), False, 'import torch\n'), ((6316, 6364), 'torch.Tensor', 'torch.Tensor', (['[1, train_data.pretrain_weight_up]'], {}), '([1, train_data.pretrain_weight_up])\n', (6328, 6364), False, 'import torch\n'), ((10373, 10398), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10396, 10398), False, 'import torch\n'), ((3952, 4039), 'evaluate.weighted_binary_cross_entropy', 'weighted_binary_cross_entropy', (['predictions[1][i]', 'pre_labels[i]', 'loss_weights[1][0]'], {}), '(predictions[1][i], pre_labels[i],\n loss_weights[1][0])\n', (3981, 4039), False, 'from evaluate import weighted_binary_cross_entropy, valid_evaluate, test_evaluate\n'), ((6453, 6501), 'torch.Tensor', 'torch.Tensor', (['[1, train_data.pretrain_weight_sr]'], {}), '([1, train_data.pretrain_weight_sr])\n', (6465, 6501), False, 'import torch\n'), ((9810, 9858), 'torch.Tensor', 'torch.Tensor', (['[1, train_data.pretrain_weight_sr]'], {}), '([1, train_data.pretrain_weight_sr])\n', (9822, 9858), False, 'import torch\n'), ((9912, 9960), 'torch.Tensor', 'torch.Tensor', (['[1, train_data.pretrain_weight_up]'], {}), '([1, train_data.pretrain_weight_up])\n', (9924, 9960), False, 'import torch\n'), ((1629, 1641), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1639, 1641), False, 'from torch import nn, optim\n'), ((1758, 1832), 'evaluate.weighted_binary_cross_entropy', 'weighted_binary_cross_entropy', (['predictions[i]', 'labels[i]', 'loss_weights[-1]'], {}), '(predictions[i], labels[i], loss_weights[-1])\n', (1787, 1832), False, 'from evaluate import weighted_binary_cross_entropy, valid_evaluate, test_evaluate\n'), ((6596, 6644), 'torch.Tensor', 'torch.Tensor', (['[1, train_data.pretrain_weight_up]'], {}), '([1, train_data.pretrain_weight_up])\n', (6608, 6644), False, 'import torch\n'), ((10066, 10114), 'torch.Tensor', 'torch.Tensor', (['[1, train_data.pretrain_weight_sr]'], {}), '([1, train_data.pretrain_weight_sr])\n', (10078, 10114), False, 'import torch\n'), ((4146, 4158), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (4156, 4158), False, 'from torch import nn, optim\n'), ((4315, 4403), 'evaluate.weighted_binary_cross_entropy', 'weighted_binary_cross_entropy', (['predictions[1][i]', 'pre_labels[i]', 'loss_weights[1][-1]'], {}), '(predictions[1][i], pre_labels[i],\n loss_weights[1][-1])\n', (4344, 4403), False, 'from evaluate import weighted_binary_cross_entropy, valid_evaluate, test_evaluate\n'), ((10226, 10274), 'torch.Tensor', 'torch.Tensor', (['[1, train_data.pretrain_weight_up]'], {}), '([1, train_data.pretrain_weight_up])\n', (10238, 10274), False, 'import torch\n')]
|
from io import StringIO
import pandas as pd
from evaluate.classification import AlignmentAssessment
from evaluate.classifier import RecallClassifier
from evaluate.reporter import (
Reporter,
RecallReporter,
PrecisionReporter
)
from tests.common import (
create_classifier_with_two_entries,
create_correct_primary_sam_record,
create_incorrect_supplementary_sam_record,
)
from unittest.mock import Mock, patch
class TestReporter:
def test_generateReport_noClassifierReturnsEmpty(self):
sample = "sample"
classifier = RecallClassifier(name=sample)
reporter = RecallReporter(classifiers=[classifier])
actual = reporter._generate_report()
expected = pd.DataFrame(
[],
columns=[
"sample",
"query_probe_header",
"ref_probe_header",
"classification",
],
)
assert actual.equals(expected)
def test_generateReport_twoClassificationsReturnsDataframeWithTwoEntries(self):
classifier = create_classifier_with_two_entries(RecallClassifier)
sample = "sample"
classifier.name = sample
reporter = RecallReporter(classifiers=[classifier])
actual = reporter._generate_report()
expected_data = []
for assessment, record in [
(AlignmentAssessment.PRIMARY_CORRECT, create_correct_primary_sam_record()),
(
AlignmentAssessment.SUPPLEMENTARY_INCORRECT,
create_incorrect_supplementary_sam_record(),
),
]:
expected_data.append(
[sample, record.query_name, record.reference_name, assessment]
)
expected = pd.DataFrame(
expected_data,
columns=[
"sample",
"query_probe_header",
"ref_probe_header",
"classification",
],
)
assert actual.equals(expected)
def test_generateReport_twoClassificationsReturnsDataframeWithTwoEntriesWithAdditionalInfoToQueryAndRefHeaders(self):
classifier = create_classifier_with_two_entries(RecallClassifier)
sample = "sample"
classifier.name = sample
reporter = RecallReporter(classifiers=[classifier])
actual = reporter._generate_report(fixed_info_to_add_to_query_probe_header="info_query",
fixed_info_to_add_to_ref_probe_header="info_ref")
expected_data = []
for assessment, record in [
(AlignmentAssessment.PRIMARY_CORRECT, create_correct_primary_sam_record()),
(
AlignmentAssessment.SUPPLEMENTARY_INCORRECT,
create_incorrect_supplementary_sam_record(),
),
]:
expected_data.append(
[sample, record.query_name+"info_query", record.reference_name+"info_ref", assessment]
)
expected = pd.DataFrame(
expected_data,
columns=[
"sample",
"query_probe_header",
"ref_probe_header",
"classification",
],
)
assert actual.equals(expected)
def test_save_emptyReporterReturnsHeadersOnly(self):
delim = "\t"
reporter = RecallReporter(classifiers=[RecallClassifier()], delim=delim)
fh = StringIO(newline="")
report = reporter._generate_report()
reporter.save_report(report, fh)
fh.seek(0)
actual = fh.read()
expected = delim.join(reporter.columns) + "\n"
assert actual == expected
def test_save_reporterWithTwoClassificationsWritesHeadersAndTwoRows(self):
primary_correct_record = create_correct_primary_sam_record()
suppl_incorrect_record = create_incorrect_supplementary_sam_record()
delim = "\t"
classifier = create_classifier_with_two_entries(RecallClassifier)
sample = "sample"
classifier.name = sample
reporter = RecallReporter(classifiers=[classifier], delim=delim)
fh = StringIO(newline="")
report = reporter._generate_report()
reporter.save_report(report, fh)
fh.seek(0)
actual = fh.read()
expected_data = []
for assessment, record in [
(AlignmentAssessment.PRIMARY_CORRECT, primary_correct_record),
(AlignmentAssessment.SUPPLEMENTARY_INCORRECT, suppl_incorrect_record),
]:
expected_data.append(
[sample, record.query_name, record.reference_name, assessment]
)
expected = StringIO(newline="")
pd.DataFrame(
expected_data,
columns=[
"sample",
"query_probe_header",
"ref_probe_header",
"classification",
],
).to_csv(expected, sep=delim, header=True, index=False)
expected.seek(0)
expected = expected.read()
assert actual == expected
def test_save_reporterWithTwoClassificationsWritesHeadersAndTwoRowsWithCommaDelim(
self
):
primary_correct_record = create_correct_primary_sam_record()
suppl_incorrect_record = create_incorrect_supplementary_sam_record()
delim = ","
classifier = create_classifier_with_two_entries(RecallClassifier)
sample = "sample"
classifier.name = sample
reporter = RecallReporter(classifiers=[classifier], delim=delim)
fh = StringIO(newline="")
report = reporter._generate_report()
reporter.save_report(report, fh)
fh.seek(0)
actual = fh.read()
expected_data = []
for assessment, record in [
(AlignmentAssessment.PRIMARY_CORRECT, primary_correct_record),
(AlignmentAssessment.SUPPLEMENTARY_INCORRECT, suppl_incorrect_record),
]:
expected_data.append(
[sample, record.query_name, record.reference_name, assessment]
)
expected = StringIO(newline="")
pd.DataFrame(
expected_data,
columns=[
"sample",
"query_probe_header",
"ref_probe_header",
"classification",
],
).to_csv(expected, sep=delim, header=True, index=False)
expected.seek(0)
expected = expected.read()
assert actual == expected
def test_save_reporterWithTwoClassifiersWritesTwoSamplesWithTwoRows(self):
primary_correct_record = create_correct_primary_sam_record()
suppl_incorrect_record = create_incorrect_supplementary_sam_record()
delim = ","
classifier1 = create_classifier_with_two_entries(RecallClassifier)
sample = "sample"
classifier1.name = sample
classifier2 = create_classifier_with_two_entries(RecallClassifier)
sample2 = "sample2"
classifier2.name = sample2
reporter = RecallReporter(classifiers=[classifier1, classifier2], delim=delim)
fh = StringIO(newline="")
report = reporter._generate_report()
reporter.save_report(report, fh)
fh.seek(0)
actual = fh.read()
expected_data = []
for s in [sample, sample2]:
for assessment, record in [
(AlignmentAssessment.PRIMARY_CORRECT, primary_correct_record),
(AlignmentAssessment.SUPPLEMENTARY_INCORRECT, suppl_incorrect_record),
]:
expected_data.append(
[s, record.query_name, record.reference_name, assessment]
)
expected = StringIO(newline="")
pd.DataFrame(
expected_data,
columns=[
"sample",
"query_probe_header",
"ref_probe_header",
"classification",
],
).to_csv(expected, sep=delim, header=True, index=False)
expected.seek(0)
expected = expected.read()
assert actual == expected
class TestPrecisionReporter:
@patch.object(Reporter, Reporter._generate_report.__name__)
def test___generate_report(self, _generate_report_mock):
precision_reporter = PrecisionReporter(None)
precision_reporter.generate_report()
_generate_report_mock.assert_called_once_with()
class TestRecallReporter:
@patch.object(Reporter, Reporter._generate_report.__name__)
def test___generate_report(self, _generate_report_mock):
recall_reporter = RecallReporter(None)
recall_reporter.generate_report(10)
_generate_report_mock.assert_called_once_with(fixed_info_to_add_to_ref_probe_header="GT_CONF=10;")
|
[
"evaluate.classifier.RecallClassifier",
"evaluate.reporter.PrecisionReporter",
"evaluate.reporter.RecallReporter"
] |
[((8157, 8215), 'unittest.mock.patch.object', 'patch.object', (['Reporter', 'Reporter._generate_report.__name__'], {}), '(Reporter, Reporter._generate_report.__name__)\n', (8169, 8215), False, 'from unittest.mock import Mock, patch\n'), ((8463, 8521), 'unittest.mock.patch.object', 'patch.object', (['Reporter', 'Reporter._generate_report.__name__'], {}), '(Reporter, Reporter._generate_report.__name__)\n', (8475, 8521), False, 'from unittest.mock import Mock, patch\n'), ((563, 592), 'evaluate.classifier.RecallClassifier', 'RecallClassifier', ([], {'name': 'sample'}), '(name=sample)\n', (579, 592), False, 'from evaluate.classifier import RecallClassifier\n'), ((612, 652), 'evaluate.reporter.RecallReporter', 'RecallReporter', ([], {'classifiers': '[classifier]'}), '(classifiers=[classifier])\n', (626, 652), False, 'from evaluate.reporter import Reporter, RecallReporter, PrecisionReporter\n'), ((718, 818), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {'columns': "['sample', 'query_probe_header', 'ref_probe_header', 'classification']"}), "([], columns=['sample', 'query_probe_header',\n 'ref_probe_header', 'classification'])\n", (730, 818), True, 'import pandas as pd\n'), ((1075, 1127), 'tests.common.create_classifier_with_two_entries', 'create_classifier_with_two_entries', (['RecallClassifier'], {}), '(RecallClassifier)\n', (1109, 1127), False, 'from tests.common import create_classifier_with_two_entries, create_correct_primary_sam_record, create_incorrect_supplementary_sam_record\n'), ((1206, 1246), 'evaluate.reporter.RecallReporter', 'RecallReporter', ([], {'classifiers': '[classifier]'}), '(classifiers=[classifier])\n', (1220, 1246), False, 'from evaluate.reporter import Reporter, RecallReporter, PrecisionReporter\n'), ((1752, 1863), 'pandas.DataFrame', 'pd.DataFrame', (['expected_data'], {'columns': "['sample', 'query_probe_header', 'ref_probe_header', 'classification']"}), "(expected_data, columns=['sample', 'query_probe_header',\n 'ref_probe_header', 'classification'])\n", (1764, 1863), True, 'import pandas as pd\n'), ((2158, 2210), 'tests.common.create_classifier_with_two_entries', 'create_classifier_with_two_entries', (['RecallClassifier'], {}), '(RecallClassifier)\n', (2192, 2210), False, 'from tests.common import create_classifier_with_two_entries, create_correct_primary_sam_record, create_incorrect_supplementary_sam_record\n'), ((2289, 2329), 'evaluate.reporter.RecallReporter', 'RecallReporter', ([], {'classifiers': '[classifier]'}), '(classifiers=[classifier])\n', (2303, 2329), False, 'from evaluate.reporter import Reporter, RecallReporter, PrecisionReporter\n'), ((3004, 3115), 'pandas.DataFrame', 'pd.DataFrame', (['expected_data'], {'columns': "['sample', 'query_probe_header', 'ref_probe_header', 'classification']"}), "(expected_data, columns=['sample', 'query_probe_header',\n 'ref_probe_header', 'classification'])\n", (3016, 3115), True, 'import pandas as pd\n'), ((3439, 3459), 'io.StringIO', 'StringIO', ([], {'newline': '""""""'}), "(newline='')\n", (3447, 3459), False, 'from io import StringIO\n'), ((3796, 3831), 'tests.common.create_correct_primary_sam_record', 'create_correct_primary_sam_record', ([], {}), '()\n', (3829, 3831), False, 'from tests.common import create_classifier_with_two_entries, create_correct_primary_sam_record, create_incorrect_supplementary_sam_record\n'), ((3865, 3908), 'tests.common.create_incorrect_supplementary_sam_record', 'create_incorrect_supplementary_sam_record', ([], {}), '()\n', (3906, 3908), False, 'from tests.common import create_classifier_with_two_entries, create_correct_primary_sam_record, create_incorrect_supplementary_sam_record\n'), ((3951, 4003), 'tests.common.create_classifier_with_two_entries', 'create_classifier_with_two_entries', (['RecallClassifier'], {}), '(RecallClassifier)\n', (3985, 4003), False, 'from tests.common import create_classifier_with_two_entries, create_correct_primary_sam_record, create_incorrect_supplementary_sam_record\n'), ((4083, 4136), 'evaluate.reporter.RecallReporter', 'RecallReporter', ([], {'classifiers': '[classifier]', 'delim': 'delim'}), '(classifiers=[classifier], delim=delim)\n', (4097, 4136), False, 'from evaluate.reporter import Reporter, RecallReporter, PrecisionReporter\n'), ((4151, 4171), 'io.StringIO', 'StringIO', ([], {'newline': '""""""'}), "(newline='')\n", (4159, 4171), False, 'from io import StringIO\n'), ((4683, 4703), 'io.StringIO', 'StringIO', ([], {'newline': '""""""'}), "(newline='')\n", (4691, 4703), False, 'from io import StringIO\n'), ((5224, 5259), 'tests.common.create_correct_primary_sam_record', 'create_correct_primary_sam_record', ([], {}), '()\n', (5257, 5259), False, 'from tests.common import create_classifier_with_two_entries, create_correct_primary_sam_record, create_incorrect_supplementary_sam_record\n'), ((5293, 5336), 'tests.common.create_incorrect_supplementary_sam_record', 'create_incorrect_supplementary_sam_record', ([], {}), '()\n', (5334, 5336), False, 'from tests.common import create_classifier_with_two_entries, create_correct_primary_sam_record, create_incorrect_supplementary_sam_record\n'), ((5378, 5430), 'tests.common.create_classifier_with_two_entries', 'create_classifier_with_two_entries', (['RecallClassifier'], {}), '(RecallClassifier)\n', (5412, 5430), False, 'from tests.common import create_classifier_with_two_entries, create_correct_primary_sam_record, create_incorrect_supplementary_sam_record\n'), ((5509, 5562), 'evaluate.reporter.RecallReporter', 'RecallReporter', ([], {'classifiers': '[classifier]', 'delim': 'delim'}), '(classifiers=[classifier], delim=delim)\n', (5523, 5562), False, 'from evaluate.reporter import Reporter, RecallReporter, PrecisionReporter\n'), ((5577, 5597), 'io.StringIO', 'StringIO', ([], {'newline': '""""""'}), "(newline='')\n", (5585, 5597), False, 'from io import StringIO\n'), ((6109, 6129), 'io.StringIO', 'StringIO', ([], {'newline': '""""""'}), "(newline='')\n", (6117, 6129), False, 'from io import StringIO\n'), ((6622, 6657), 'tests.common.create_correct_primary_sam_record', 'create_correct_primary_sam_record', ([], {}), '()\n', (6655, 6657), False, 'from tests.common import create_classifier_with_two_entries, create_correct_primary_sam_record, create_incorrect_supplementary_sam_record\n'), ((6691, 6734), 'tests.common.create_incorrect_supplementary_sam_record', 'create_incorrect_supplementary_sam_record', ([], {}), '()\n', (6732, 6734), False, 'from tests.common import create_classifier_with_two_entries, create_correct_primary_sam_record, create_incorrect_supplementary_sam_record\n'), ((6777, 6829), 'tests.common.create_classifier_with_two_entries', 'create_classifier_with_two_entries', (['RecallClassifier'], {}), '(RecallClassifier)\n', (6811, 6829), False, 'from tests.common import create_classifier_with_two_entries, create_correct_primary_sam_record, create_incorrect_supplementary_sam_record\n'), ((6912, 6964), 'tests.common.create_classifier_with_two_entries', 'create_classifier_with_two_entries', (['RecallClassifier'], {}), '(RecallClassifier)\n', (6946, 6964), False, 'from tests.common import create_classifier_with_two_entries, create_correct_primary_sam_record, create_incorrect_supplementary_sam_record\n'), ((7048, 7115), 'evaluate.reporter.RecallReporter', 'RecallReporter', ([], {'classifiers': '[classifier1, classifier2]', 'delim': 'delim'}), '(classifiers=[classifier1, classifier2], delim=delim)\n', (7062, 7115), False, 'from evaluate.reporter import Reporter, RecallReporter, PrecisionReporter\n'), ((7130, 7150), 'io.StringIO', 'StringIO', ([], {'newline': '""""""'}), "(newline='')\n", (7138, 7150), False, 'from io import StringIO\n'), ((7721, 7741), 'io.StringIO', 'StringIO', ([], {'newline': '""""""'}), "(newline='')\n", (7729, 7741), False, 'from io import StringIO\n'), ((8306, 8329), 'evaluate.reporter.PrecisionReporter', 'PrecisionReporter', (['None'], {}), '(None)\n', (8323, 8329), False, 'from evaluate.reporter import Reporter, RecallReporter, PrecisionReporter\n'), ((8609, 8629), 'evaluate.reporter.RecallReporter', 'RecallReporter', (['None'], {}), '(None)\n', (8623, 8629), False, 'from evaluate.reporter import Reporter, RecallReporter, PrecisionReporter\n'), ((1406, 1441), 'tests.common.create_correct_primary_sam_record', 'create_correct_primary_sam_record', ([], {}), '()\n', (1439, 1441), False, 'from tests.common import create_classifier_with_two_entries, create_correct_primary_sam_record, create_incorrect_supplementary_sam_record\n'), ((1535, 1578), 'tests.common.create_incorrect_supplementary_sam_record', 'create_incorrect_supplementary_sam_record', ([], {}), '()\n', (1576, 1578), False, 'from tests.common import create_classifier_with_two_entries, create_correct_primary_sam_record, create_incorrect_supplementary_sam_record\n'), ((2634, 2669), 'tests.common.create_correct_primary_sam_record', 'create_correct_primary_sam_record', ([], {}), '()\n', (2667, 2669), False, 'from tests.common import create_classifier_with_two_entries, create_correct_primary_sam_record, create_incorrect_supplementary_sam_record\n'), ((2763, 2806), 'tests.common.create_incorrect_supplementary_sam_record', 'create_incorrect_supplementary_sam_record', ([], {}), '()\n', (2804, 2806), False, 'from tests.common import create_classifier_with_two_entries, create_correct_primary_sam_record, create_incorrect_supplementary_sam_record\n'), ((4712, 4823), 'pandas.DataFrame', 'pd.DataFrame', (['expected_data'], {'columns': "['sample', 'query_probe_header', 'ref_probe_header', 'classification']"}), "(expected_data, columns=['sample', 'query_probe_header',\n 'ref_probe_header', 'classification'])\n", (4724, 4823), True, 'import pandas as pd\n'), ((6138, 6249), 'pandas.DataFrame', 'pd.DataFrame', (['expected_data'], {'columns': "['sample', 'query_probe_header', 'ref_probe_header', 'classification']"}), "(expected_data, columns=['sample', 'query_probe_header',\n 'ref_probe_header', 'classification'])\n", (6150, 6249), True, 'import pandas as pd\n'), ((7750, 7861), 'pandas.DataFrame', 'pd.DataFrame', (['expected_data'], {'columns': "['sample', 'query_probe_header', 'ref_probe_header', 'classification']"}), "(expected_data, columns=['sample', 'query_probe_header',\n 'ref_probe_header', 'classification'])\n", (7762, 7861), True, 'import pandas as pd\n'), ((3392, 3410), 'evaluate.classifier.RecallClassifier', 'RecallClassifier', ([], {}), '()\n', (3408, 3410), False, 'from evaluate.classifier import RecallClassifier\n')]
|
import chess
import random
from evaluate import evaluate
import copy
import os
import psutil
#count = 0
#best_move = chess.Move.from_uci("a2a3")
#best_move = None
#temp_move = None
bestMove = None
#def negamax(board: chess.Board, depth: int, max: int):
def negamax(board: chess.Board, depth: int, alpha: int, beta: int):
global bestMove
tempBestMove = None
#global best_move, temp_move
#global count
#count += 1
#if depth == 0 or gameIsOver???:
if depth == 0:
#return evaluate(board), None
return evaluate(board)
#max = -10000 # -Infinity
#best_move = None
#process = psutil.Process(os.getpid())
#mem = round(process.memory_info().rss / 1024 / 1024, 1)
#mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
#print(f"--[ {depth} ] | # {count} | {mem} Mb ------------------------------------------")
#print(f"--[ {depth} ] | {mem} Mb ------------------------------------------")
#print("MEM", mem, "Mb")
#print("TOTAL LEGAL MOVES:", len(list(board.legal_moves)))
#print([move.uci() for move in board.legal_moves], "=>", len(list(board.legal_moves)))
oldAlpha = alpha
#tempBestSourceSquare = None
#tempBestTargetSquare = None
tempBestMove = None
max = -10000
for move in board.legal_moves:
# if ((capturedPiece & 7) == 3) return 10000 - ply; // mate in "ply"
# capturedPiece = board.piece_type_at(move.to_square)
#color = board.color_at(square)
# if capturedPiece == chess.KING:
# return 10000
#print(move, " | ", evaluate(board))
board.push(move)
#new_score, new_move = search(board, depth-1, best_score, best_move)
#new_score, new_move = search(board, depth-1, -10000, best_move)
#new_score = -new_score
#// return mating score if king has been captured
#if((capturedPiece & 7) == 3) return 10000 - ply; // mate in "ply"
#score, _ = search(copy.deepcopy(board), depth-1, max)
#score = negamax(copy.deepcopy(board), depth-1, max)
#score = negamax(board, depth-1, max)
score = -negamax(board, depth-1, -beta, -alpha)
#board_copy = copy.deepcopy(board)
#board_copy.push(move)
#print("\n-- BEST MOVE --")
print(f"\n[ {depth} ]", "WHITE" if not board.turn else "BLACK", move, "=>", evaluate(board))
print("---------------")
print(board)
print("---------------")
board.pop()
#bestMove = move
if score > max:
max = score
#bestMove = move
tempBestMove = move
#if score > alpha:
if max > alpha:
alpha = max
#if score >= beta: return beta
#tempBestMove = move
# if alpha >= beta:
#return beta
# return alpha
if max >= beta:
break
if alpha != oldAlpha:
bestMove = tempBestMove
return alpha
#return max
# print(depth, "|", "WHITE" if not board.turn else "BLACK", "|", move, "|", score)
#if new_score > best_score:
# if score > max:
#best_score = new_score
# max = score
# best_move = move
#temp_move = move
#print(depth, "|", "WHITE" if not board.turn else "BLACK", "|", move, "|", score)
#board_copy = copy.deepcopy(board)
#board_copy.push(move)
#print("\n-- BEST MOVE --")
#print(board_copy)
#print("---------------")
#return best_score, best_move
#return max, best_move
#best_move = temp_move
#print("SEARCH", temp_move, best_move)
# return max
|
[
"evaluate.evaluate"
] |
[((581, 596), 'evaluate.evaluate', 'evaluate', (['board'], {}), '(board)\n', (589, 596), False, 'from evaluate import evaluate\n'), ((2490, 2505), 'evaluate.evaluate', 'evaluate', (['board'], {}), '(board)\n', (2498, 2505), False, 'from evaluate import evaluate\n')]
|
#!/usr/bin/env python
import torch.utils.data
import numpy as np
import random
import pickle
import matplotlib.pyplot as plt
from BERMUDA import training, testing
from pre_processing import pre_processing, read_cluster_similarity
from evaluate import evaluate_scores
from helper import cal_UMAP, plot_labels, plot_expr, plot_loss, gen_dataset_idx
# Set random seed
seed = 0
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(seed)
# IMPORTANT PARAMETER
similarity_thr = 0.90 #S_thr in the paper, choose between 0.85-0.9
# nn parameter
code_dim = 20
batch_size = 50 # batch size for each cluster
num_epochs = 2000
base_lr = 1e-3
lr_step = 200 # step decay of learning rates
momentum = 0.9
l2_decay = 5e-5
gamma = 1 # regularization between reconstruction and transfer learning
log_interval = 1
# CUDA
device_id = 0 # ID of GPU to use
cuda = torch.cuda.is_available()
if cuda:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
pre_process_paras = {'take_log': True, 'standardization': True, 'scaling': True}
nn_paras = {'code_dim': code_dim, 'batch_size': batch_size, 'num_epochs': num_epochs,
'base_lr': base_lr, 'lr_step': lr_step,
'momentum': momentum, 'l2_decay': l2_decay, 'gamma': gamma,
'cuda': cuda, 'log_interval': log_interval}
plt.ioff()
if __name__ == '__main__':
data_folder = 'pancreas/'
dataset_file_list = ['muraro_seurat.csv', 'baron_seurat.csv']
cluster_similarity_file = data_folder + 'pancreas_metaneighbor.csv'
code_save_file = data_folder + 'code_list.pkl'
dataset_file_list = [data_folder+f for f in dataset_file_list]
# read data
dataset_list = pre_processing(dataset_file_list, pre_process_paras)
cluster_pairs = read_cluster_similarity(cluster_similarity_file, similarity_thr)
nn_paras['num_inputs'] = len(dataset_list[0]['gene_sym'])
# training
model, loss_total_list, loss_reconstruct_list, loss_transfer_list = training(dataset_list, cluster_pairs, nn_paras)
plot_loss(loss_total_list, loss_reconstruct_list, loss_transfer_list, data_folder+'loss.png')
# extract codes
code_list = testing(model, dataset_list, nn_paras)
with open(code_save_file,'wb') as f:
pickle.dump(code_list, f)
# combine datasets in dataset_list
pre_process_paras = {'take_log': True, 'standardization': False, 'scaling': False} # lof TPM for uncorrected data
dataset_list = pre_processing(dataset_file_list, pre_process_paras)
cell_list = []
data_list = []
cluster_list = []
for dataset in dataset_list:
data_list.append(dataset['gene_exp'])
cell_list.append(dataset['cell_labels'])
cluster_list.append(dataset['cluster_labels'])
cell_labels = np.concatenate(cell_list)
dataset_labels = gen_dataset_idx(data_list)
cluster_labels = np.concatenate(cluster_list)
# calculate UMAP
with open(code_save_file,'rb') as f:
code_list = pickle.load(f)
code = np.concatenate(code_list, axis=1).transpose()
data = np.concatenate(data_list, axis=1).transpose()
umap_code = cal_UMAP(code)
umap_uncorrected = cal_UMAP(data)
# plot results
cell_type_dict = {1:'alpha', 2:'beta', 3:'delta', 4:'acinar', 5:'ductal', 6:'endo', 7:'gamma', 8:'epsilon'}
dataset_dict = {1: 'Muraro', 2:'Baron'}
plot_labels(umap_code, cell_labels, cell_type_dict, ['UMAP_1', 'UMAP_2'], data_folder+'ae_cell_type.png')
plot_labels(umap_uncorrected, cell_labels, cell_type_dict, ['UMAP_1', 'UMAP_2'], data_folder+'uncorrected_cell_type.png')
plot_labels(umap_code, dataset_labels, dataset_dict, ['UMAP_1', 'UMAP_2'], data_folder + 'ae_dataset.png')
plot_labels(umap_uncorrected, dataset_labels, dataset_dict, ['UMAP_1', 'UMAP_2'], data_folder + 'uncorrected_dataset.png')
# evaluate using proposed metrics
num_datasets = len(dataset_file_list)
print('ae')
div_score, ent_score, sil_score = evaluate_scores(umap_code, code, cell_labels, dataset_labels, num_datasets, 20, 20, 'cosine')
print('divergence_score: {:.3f}, entropy_score: {:.3f}, silhouette_score: {:.3f}'.format(div_score, ent_score, sil_score))
print('uncorrected')
div_score, ent_score, sil_score = evaluate_scores(umap_uncorrected, data, cell_labels, dataset_labels, num_datasets, 20, 20, 'cosine')
print('divergence_score: {:.3f}, entropy_score: {:.3f}, silhouette_score: {:.3f}'.format(div_score, ent_score, sil_score))
|
[
"evaluate.evaluate_scores"
] |
[((376, 393), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (387, 393), False, 'import random\n'), ((394, 414), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (408, 414), True, 'import numpy as np\n'), ((1424, 1434), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (1432, 1434), True, 'import matplotlib.pyplot as plt\n'), ((1790, 1842), 'pre_processing.pre_processing', 'pre_processing', (['dataset_file_list', 'pre_process_paras'], {}), '(dataset_file_list, pre_process_paras)\n', (1804, 1842), False, 'from pre_processing import pre_processing, read_cluster_similarity\n'), ((1863, 1927), 'pre_processing.read_cluster_similarity', 'read_cluster_similarity', (['cluster_similarity_file', 'similarity_thr'], {}), '(cluster_similarity_file, similarity_thr)\n', (1886, 1927), False, 'from pre_processing import pre_processing, read_cluster_similarity\n'), ((2078, 2125), 'BERMUDA.training', 'training', (['dataset_list', 'cluster_pairs', 'nn_paras'], {}), '(dataset_list, cluster_pairs, nn_paras)\n', (2086, 2125), False, 'from BERMUDA import training, testing\n'), ((2130, 2230), 'helper.plot_loss', 'plot_loss', (['loss_total_list', 'loss_reconstruct_list', 'loss_transfer_list', "(data_folder + 'loss.png')"], {}), "(loss_total_list, loss_reconstruct_list, loss_transfer_list, \n data_folder + 'loss.png')\n", (2139, 2230), False, 'from helper import cal_UMAP, plot_labels, plot_expr, plot_loss, gen_dataset_idx\n'), ((2260, 2298), 'BERMUDA.testing', 'testing', (['model', 'dataset_list', 'nn_paras'], {}), '(model, dataset_list, nn_paras)\n', (2267, 2298), False, 'from BERMUDA import training, testing\n'), ((2552, 2604), 'pre_processing.pre_processing', 'pre_processing', (['dataset_file_list', 'pre_process_paras'], {}), '(dataset_file_list, pre_process_paras)\n', (2566, 2604), False, 'from pre_processing import pre_processing, read_cluster_similarity\n'), ((2866, 2891), 'numpy.concatenate', 'np.concatenate', (['cell_list'], {}), '(cell_list)\n', (2880, 2891), True, 'import numpy as np\n'), ((2913, 2939), 'helper.gen_dataset_idx', 'gen_dataset_idx', (['data_list'], {}), '(data_list)\n', (2928, 2939), False, 'from helper import cal_UMAP, plot_labels, plot_expr, plot_loss, gen_dataset_idx\n'), ((2961, 2989), 'numpy.concatenate', 'np.concatenate', (['cluster_list'], {}), '(cluster_list)\n', (2975, 2989), True, 'import numpy as np\n'), ((3218, 3232), 'helper.cal_UMAP', 'cal_UMAP', (['code'], {}), '(code)\n', (3226, 3232), False, 'from helper import cal_UMAP, plot_labels, plot_expr, plot_loss, gen_dataset_idx\n'), ((3256, 3270), 'helper.cal_UMAP', 'cal_UMAP', (['data'], {}), '(data)\n', (3264, 3270), False, 'from helper import cal_UMAP, plot_labels, plot_expr, plot_loss, gen_dataset_idx\n'), ((3451, 3563), 'helper.plot_labels', 'plot_labels', (['umap_code', 'cell_labels', 'cell_type_dict', "['UMAP_1', 'UMAP_2']", "(data_folder + 'ae_cell_type.png')"], {}), "(umap_code, cell_labels, cell_type_dict, ['UMAP_1', 'UMAP_2'], \n data_folder + 'ae_cell_type.png')\n", (3462, 3563), False, 'from helper import cal_UMAP, plot_labels, plot_expr, plot_loss, gen_dataset_idx\n'), ((3561, 3688), 'helper.plot_labels', 'plot_labels', (['umap_uncorrected', 'cell_labels', 'cell_type_dict', "['UMAP_1', 'UMAP_2']", "(data_folder + 'uncorrected_cell_type.png')"], {}), "(umap_uncorrected, cell_labels, cell_type_dict, ['UMAP_1',\n 'UMAP_2'], data_folder + 'uncorrected_cell_type.png')\n", (3572, 3688), False, 'from helper import cal_UMAP, plot_labels, plot_expr, plot_loss, gen_dataset_idx\n'), ((3687, 3798), 'helper.plot_labels', 'plot_labels', (['umap_code', 'dataset_labels', 'dataset_dict', "['UMAP_1', 'UMAP_2']", "(data_folder + 'ae_dataset.png')"], {}), "(umap_code, dataset_labels, dataset_dict, ['UMAP_1', 'UMAP_2'], \n data_folder + 'ae_dataset.png')\n", (3698, 3798), False, 'from helper import cal_UMAP, plot_labels, plot_expr, plot_loss, gen_dataset_idx\n'), ((3798, 3924), 'helper.plot_labels', 'plot_labels', (['umap_uncorrected', 'dataset_labels', 'dataset_dict', "['UMAP_1', 'UMAP_2']", "(data_folder + 'uncorrected_dataset.png')"], {}), "(umap_uncorrected, dataset_labels, dataset_dict, ['UMAP_1',\n 'UMAP_2'], data_folder + 'uncorrected_dataset.png')\n", (3809, 3924), False, 'from helper import cal_UMAP, plot_labels, plot_expr, plot_loss, gen_dataset_idx\n'), ((4056, 4153), 'evaluate.evaluate_scores', 'evaluate_scores', (['umap_code', 'code', 'cell_labels', 'dataset_labels', 'num_datasets', '(20)', '(20)', '"""cosine"""'], {}), "(umap_code, code, cell_labels, dataset_labels, num_datasets,\n 20, 20, 'cosine')\n", (4071, 4153), False, 'from evaluate import evaluate_scores\n'), ((4340, 4444), 'evaluate.evaluate_scores', 'evaluate_scores', (['umap_uncorrected', 'data', 'cell_labels', 'dataset_labels', 'num_datasets', '(20)', '(20)', '"""cosine"""'], {}), "(umap_uncorrected, data, cell_labels, dataset_labels,\n num_datasets, 20, 20, 'cosine')\n", (4355, 4444), False, 'from evaluate import evaluate_scores\n'), ((2348, 2373), 'pickle.dump', 'pickle.dump', (['code_list', 'f'], {}), '(code_list, f)\n', (2359, 2373), False, 'import pickle\n'), ((3073, 3087), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3084, 3087), False, 'import pickle\n'), ((3099, 3132), 'numpy.concatenate', 'np.concatenate', (['code_list'], {'axis': '(1)'}), '(code_list, axis=1)\n', (3113, 3132), True, 'import numpy as np\n'), ((3156, 3189), 'numpy.concatenate', 'np.concatenate', (['data_list'], {'axis': '(1)'}), '(data_list, axis=1)\n', (3170, 3189), True, 'import numpy as np\n')]
|
import os
from datetime import datetime
import numpy as np
from torch.nn.utils import clip_grad_norm_
from torch.nn.utils.rnn import pad_sequence
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from evaluate import evaluate
from onsets_and_frames import *
from onsets_and_frames.dataset import MAESTRO_scaled
def train(logdir, device, iterations, resume_iteration, checkpoint_interval, train_on, batch_size, sequence_length,
model_complexity, learning_rate, learning_rate_decay_steps, learning_rate_decay_rate, leave_one_out,
clip_gradient_norm, validation_length, validation_interval):
os.makedirs(logdir, exist_ok=True)
writer = SummaryWriter(logdir)
train_groups, validation_groups = ['train'], ['validation']
if leave_one_out is not None:
all_years = {'2004', '2006', '2008', '2009', '2011', '2013', '2014', '2015', '2017'}
train_groups = list(all_years - {str(leave_one_out)})
validation_groups = [str(leave_one_out)]
if train_on == 'MAESTRO_small':
dataset = MAESTRO(path='data/MAESTRO_small', groups=train_groups, sequence_length=sequence_length)
validation_dataset = MAESTRO(path='data/MAESTRO_small', groups=validation_groups, sequence_length=sequence_length)
elif train_on == 'MAESTRO_scaled':
dataset = MAESTRO_scaled(groups=train_groups, sequence_length=sequence_length)
validation_dataset = MAESTRO_scaled(groups=validation_groups, sequence_length=sequence_length)
elif train_on == 'MAESTRO':
dataset = MAESTRO(groups=train_groups, sequence_length=sequence_length)
validation_dataset = MAESTRO(groups=validation_groups, sequence_length=sequence_length)
else:
dataset = MAPS(groups=['AkPnBcht', 'AkPnBsdf', 'AkPnCGdD', 'AkPnStgb', 'SptkBGAm', 'SptkBGCl', 'StbgTGd2'], sequence_length=sequence_length)
validation_dataset = MAPS(groups=['ENSTDkAm', 'ENSTDkCl'], sequence_length=validation_length)
if train_on == 'MAESTRO_scaled':
loader = DataLoader(dataset, batch_size, shuffle=True, collate_fn=collate_scaled_audio, drop_last=True)
else:
loader = DataLoader(dataset, batch_size, shuffle=True, drop_last=True)
if resume_iteration is None:
model = OnsetsAndFrames(N_MELS, MAX_MIDI - MIN_MIDI + 1, model_complexity).to(device)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
resume_iteration = 0
else:
model_path = os.path.join(logdir, f'model-{resume_iteration}.pt')
model = torch.load(model_path)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
optimizer.load_state_dict(torch.load(os.path.join(logdir, 'last-optimizer-state.pt')))
summary(model)
scheduler = StepLR(optimizer, step_size=learning_rate_decay_steps, gamma=learning_rate_decay_rate)
loop = tqdm(range(resume_iteration + 1, iterations + 1))
for i, batch in zip(loop, cycle(loader)):
if train_on == 'MAESTRO_scaled':
predictions, losses = model.run_on_scaled_batch(batch)
else:
predictions, losses = model.run_on_batch(batch)
loss = sum(losses.values())
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
if clip_gradient_norm:
clip_grad_norm_(model.parameters(), clip_gradient_norm)
for key, value in {'loss': loss, **losses}.items():
writer.add_scalar(key, value.item(), global_step=i)
if i % validation_interval == 0:
model.eval()
with torch.no_grad():
for key, value in evaluate(validation_dataset, model, dataset=train_on).items():
writer.add_scalar('validation/' + key.replace(' ', '_'), np.mean(value), global_step=i)
model.train()
if i % checkpoint_interval == 0:
torch.save(model, os.path.join(logdir, f'model-{i}.pt'))
torch.save(optimizer.state_dict(), os.path.join(logdir, 'last-optimizer-state.pt'))
# Custom collate_fn to add zero padding to different size of data in a batch
def collate_scaled_audio(batch):
# batch: 8 dictionary items
# stacked_batch: 1 dictionary with stacked data
stacked_batch = {}
stacked_batch['audio_scaled'] = []
stacked_batch['scaled_index'] = []
stacked_batch['num_onsets'] = []
stacked_batch['audio'] = []
stacked_batch['onset'] = []
stacked_batch['offset'] = []
stacked_batch['frame'] = []
stacked_batch['velocity'] = []
# Get a data with maximum length of scaled audio
# max_data_length = 0
data_batch = []
num_scaled_frames = torch.zeros((len(batch),))
for i in range(len(batch)):
data = batch[i]
data_batch.append(data['audio_scaled'])
stacked_batch['scaled_index'].append(data['scaled_index'])
stacked_batch['num_onsets'].append(data['num_onsets'])
stacked_batch['audio'].append(data['audio'])
stacked_batch['onset'].append(data['onset'])
stacked_batch['offset'].append(data['offset'])
stacked_batch['frame'].append(data['frame'])
stacked_batch['velocity'].append(data['velocity'])
num_scaled_frames[i] = data['audio_scaled'].shape[0] // HOP_LENGTH
# Zero pad all data
padded_data_batch = pad_sequence(data_batch, batch_first=True)
padded_length = padded_data_batch.shape[-1] // HOP_LENGTH
# Stack data into one dict
stacked_batch['padded_frames'] = torch.full_like(num_scaled_frames, padded_length) - num_scaled_frames
stacked_batch['audio_scaled'] = padded_data_batch
stacked_batch['scaled_index'] = torch.stack(stacked_batch['scaled_index'])
stacked_batch['num_onsets'] = torch.stack(stacked_batch['num_onsets'])
stacked_batch['audio'] = torch.stack(stacked_batch['audio'])
stacked_batch['onset'] = torch.stack(stacked_batch['onset'])
stacked_batch['offset'] = torch.stack(stacked_batch['offset'])
stacked_batch['frame'] = torch.stack(stacked_batch['frame'])
stacked_batch['velocity'] = torch.stack(stacked_batch['velocity'])
# Return padded data
return stacked_batch
if __name__ == '__main__':
# logdir = 'runs/transcriber-' + datetime.now().strftime('%y%m%d-%H%M%S')
# logdir = 'runs/maestro_pretrain_100000'
logdir = 'runs/standardize_test'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
iterations = 10000
resume_iteration = None
checkpoint_interval = 1000
train_on = 'MAESTRO_scaled'
batch_size = 8
sequence_length = 200 * HOP_LENGTH
model_complexity = 48
learning_rate = 0.0006
learning_rate_decay_steps = 10000
learning_rate_decay_rate = 0.98
leave_one_out = None
clip_gradient_norm = 3
validation_length = sequence_length
validation_interval = 500
train(logdir, device, iterations, resume_iteration, checkpoint_interval, train_on, batch_size, sequence_length,
model_complexity, learning_rate, learning_rate_decay_steps, learning_rate_decay_rate, leave_one_out,
clip_gradient_norm, validation_length, validation_interval)
|
[
"evaluate.evaluate"
] |
[((726, 760), 'os.makedirs', 'os.makedirs', (['logdir'], {'exist_ok': '(True)'}), '(logdir, exist_ok=True)\n', (737, 760), False, 'import os\n'), ((774, 795), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['logdir'], {}), '(logdir)\n', (787, 795), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((2859, 2950), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (['optimizer'], {'step_size': 'learning_rate_decay_steps', 'gamma': 'learning_rate_decay_rate'}), '(optimizer, step_size=learning_rate_decay_steps, gamma=\n learning_rate_decay_rate)\n', (2865, 2950), False, 'from torch.optim.lr_scheduler import StepLR\n'), ((5427, 5469), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['data_batch'], {'batch_first': '(True)'}), '(data_batch, batch_first=True)\n', (5439, 5469), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((2120, 2219), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset', 'batch_size'], {'shuffle': '(True)', 'collate_fn': 'collate_scaled_audio', 'drop_last': '(True)'}), '(dataset, batch_size, shuffle=True, collate_fn=\n collate_scaled_audio, drop_last=True)\n', (2130, 2219), False, 'from torch.utils.data import DataLoader\n'), ((2242, 2303), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset', 'batch_size'], {'shuffle': '(True)', 'drop_last': '(True)'}), '(dataset, batch_size, shuffle=True, drop_last=True)\n', (2252, 2303), False, 'from torch.utils.data import DataLoader\n'), ((2564, 2616), 'os.path.join', 'os.path.join', (['logdir', 'f"""model-{resume_iteration}.pt"""'], {}), "(logdir, f'model-{resume_iteration}.pt')\n", (2576, 2616), False, 'import os\n'), ((1424, 1492), 'onsets_and_frames.dataset.MAESTRO_scaled', 'MAESTRO_scaled', ([], {'groups': 'train_groups', 'sequence_length': 'sequence_length'}), '(groups=train_groups, sequence_length=sequence_length)\n', (1438, 1492), False, 'from onsets_and_frames.dataset import MAESTRO_scaled\n'), ((1522, 1595), 'onsets_and_frames.dataset.MAESTRO_scaled', 'MAESTRO_scaled', ([], {'groups': 'validation_groups', 'sequence_length': 'sequence_length'}), '(groups=validation_groups, sequence_length=sequence_length)\n', (1536, 1595), False, 'from onsets_and_frames.dataset import MAESTRO_scaled\n'), ((2773, 2820), 'os.path.join', 'os.path.join', (['logdir', '"""last-optimizer-state.pt"""'], {}), "(logdir, 'last-optimizer-state.pt')\n", (2785, 2820), False, 'import os\n'), ((4006, 4043), 'os.path.join', 'os.path.join', (['logdir', 'f"""model-{i}.pt"""'], {}), "(logdir, f'model-{i}.pt')\n", (4018, 4043), False, 'import os\n'), ((4092, 4139), 'os.path.join', 'os.path.join', (['logdir', '"""last-optimizer-state.pt"""'], {}), "(logdir, 'last-optimizer-state.pt')\n", (4104, 4139), False, 'import os\n'), ((3737, 3790), 'evaluate.evaluate', 'evaluate', (['validation_dataset', 'model'], {'dataset': 'train_on'}), '(validation_dataset, model, dataset=train_on)\n', (3745, 3790), False, 'from evaluate import evaluate\n'), ((3877, 3891), 'numpy.mean', 'np.mean', (['value'], {}), '(value)\n', (3884, 3891), True, 'import numpy as np\n')]
|
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm, trange
from transformers import AutoConfig, AutoTokenizer
from modeling import BertForSentimentClassification, AlbertForSentimentClassification, DistilBertForSentimentClassification
from dataset import SSTDataset
from evaluate import evaluate
from arguments import args
def train(model, criterion, optimizer, train_loader, val_loader, args):
best_acc = 0
for epoch in trange(args.num_eps, desc="Epoch"):
model.train()
for i, (input_ids, attention_mask, labels) in enumerate(tqdm(iterable=train_loader, desc="Training")):
optimizer.zero_grad()
input_ids, attention_mask, labels = input_ids.to(device), attention_mask.to(device), labels.to(device)
logits = model(input_ids=input_ids, attention_mask=attention_mask)
loss = criterion(input=logits.squeeze(-1), target=labels.float())
loss.backward()
optimizer.step()
val_acc, val_loss = evaluate(model=model, criterion=criterion, dataloader=val_loader, device=device)
print("Epoch {} complete! Validation Accuracy : {}, Validation Loss : {}".format(epoch, val_acc, val_loss))
if val_acc > best_acc:
print("Best validation accuracy improved from {} to {}, saving model...".format(best_acc, val_acc))
best_acc = val_acc
model.save_pretrained(save_directory=f'models/{args.output_dir}/')
config.save_pretrained(save_directory=f'models/{args.output_dir}/')
tokenizer.save_pretrained(save_directory=f'models/{args.output_dir}/')
if __name__ == "__main__":
if args.model_name_or_path is None:
args.model_name_or_path = 'bert-base-uncased'
#Configuration for the desired transformer model
config = AutoConfig.from_pretrained(args.model_name_or_path)
#Tokenizer for the desired transformer model
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
#Create the model with the desired transformer model
if config.model_type == 'bert':
model = BertForSentimentClassification.from_pretrained(args.model_name_or_path, config=config)
elif config.model_type == 'albert':
model = AlbertForSentimentClassification.from_pretrained(args.model_name_or_path, config=config)
elif config.model_type == 'distilbert':
model = DistilBertForSentimentClassification.from_pretrained(args.model_name_or_path, config=config)
else:
raise ValueError('This transformer model is not supported yet.')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
#Takes as the input the logits of the positive class and computes the binary cross-entropy
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(params=model.parameters(), lr=args.lr)
train_set = SSTDataset(filename='data/train.tsv', maxlen=args.maxlen_train, tokenizer=tokenizer)
val_set = SSTDataset(filename='data/dev.tsv', maxlen=args.maxlen_val, tokenizer=tokenizer)
train_loader = DataLoader(dataset=train_set, batch_size=args.batch_size, num_workers=args.num_threads)
val_loader = DataLoader(dataset=val_set, batch_size=args.batch_size, num_workers=args.num_threads)
train(model=model, criterion=criterion, optimizer=optimizer, train_loader=train_loader, val_loader=val_loader, args=args)
|
[
"evaluate.evaluate"
] |
[((508, 542), 'tqdm.trange', 'trange', (['args.num_eps'], {'desc': '"""Epoch"""'}), "(args.num_eps, desc='Epoch')\n", (514, 542), False, 'from tqdm import tqdm, trange\n'), ((1729, 1780), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', (['args.model_name_or_path'], {}), '(args.model_name_or_path)\n', (1755, 1780), False, 'from transformers import AutoConfig, AutoTokenizer\n'), ((1842, 1896), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['args.model_name_or_path'], {}), '(args.model_name_or_path)\n', (1871, 1896), False, 'from transformers import AutoConfig, AutoTokenizer\n'), ((2646, 2668), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (2666, 2668), True, 'import torch.nn as nn\n'), ((2747, 2836), 'dataset.SSTDataset', 'SSTDataset', ([], {'filename': '"""data/train.tsv"""', 'maxlen': 'args.maxlen_train', 'tokenizer': 'tokenizer'}), "(filename='data/train.tsv', maxlen=args.maxlen_train, tokenizer=\n tokenizer)\n", (2757, 2836), False, 'from dataset import SSTDataset\n'), ((2843, 2928), 'dataset.SSTDataset', 'SSTDataset', ([], {'filename': '"""data/dev.tsv"""', 'maxlen': 'args.maxlen_val', 'tokenizer': 'tokenizer'}), "(filename='data/dev.tsv', maxlen=args.maxlen_val, tokenizer=tokenizer\n )\n", (2853, 2928), False, 'from dataset import SSTDataset\n'), ((2941, 3033), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_set', 'batch_size': 'args.batch_size', 'num_workers': 'args.num_threads'}), '(dataset=train_set, batch_size=args.batch_size, num_workers=args.\n num_threads)\n', (2951, 3033), False, 'from torch.utils.data import DataLoader\n'), ((3043, 3133), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'val_set', 'batch_size': 'args.batch_size', 'num_workers': 'args.num_threads'}), '(dataset=val_set, batch_size=args.batch_size, num_workers=args.\n num_threads)\n', (3053, 3133), False, 'from torch.utils.data import DataLoader\n'), ((998, 1083), 'evaluate.evaluate', 'evaluate', ([], {'model': 'model', 'criterion': 'criterion', 'dataloader': 'val_loader', 'device': 'device'}), '(model=model, criterion=criterion, dataloader=val_loader, device=device\n )\n', (1006, 1083), False, 'from evaluate import evaluate\n'), ((1996, 2086), 'modeling.BertForSentimentClassification.from_pretrained', 'BertForSentimentClassification.from_pretrained', (['args.model_name_or_path'], {'config': 'config'}), '(args.model_name_or_path,\n config=config)\n', (2042, 2086), False, 'from modeling import BertForSentimentClassification, AlbertForSentimentClassification, DistilBertForSentimentClassification\n'), ((618, 662), 'tqdm.tqdm', 'tqdm', ([], {'iterable': 'train_loader', 'desc': '"""Training"""'}), "(iterable=train_loader, desc='Training')\n", (622, 662), False, 'from tqdm import tqdm, trange\n'), ((2130, 2222), 'modeling.AlbertForSentimentClassification.from_pretrained', 'AlbertForSentimentClassification.from_pretrained', (['args.model_name_or_path'], {'config': 'config'}), '(args.model_name_or_path,\n config=config)\n', (2178, 2222), False, 'from modeling import BertForSentimentClassification, AlbertForSentimentClassification, DistilBertForSentimentClassification\n'), ((2475, 2500), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2498, 2500), False, 'import torch\n'), ((2270, 2367), 'modeling.DistilBertForSentimentClassification.from_pretrained', 'DistilBertForSentimentClassification.from_pretrained', (['args.model_name_or_path'], {'config': 'config'}), '(args.\n model_name_or_path, config=config)\n', (2322, 2367), False, 'from modeling import BertForSentimentClassification, AlbertForSentimentClassification, DistilBertForSentimentClassification\n')]
|
import argparse
import torch
import json, ast
#from matplotlib import pyplot as plt
from osc_server import FlowServer
from osc_utils import generate_dataset
from utils.data import load_dataset
from evaluate import evaluate_dimensions, evaluate_dataset
from torch.utils.data import DataLoader
import numpy as np
import os
from os.path import expanduser
# Debug mode
__DEBUG__ = False
parser = argparse.ArgumentParser()
parser.add_argument('--in_port', type=int, default=1232)
parser.add_argument('--out_port', type=int, default=1233)
parser.add_argument('--ip', type=str, default="127.0.0.1")
# Model arguments
parser.add_argument('--model_path', type=str, default="results/")
parser.add_argument('--model_name', type=str, default="vae_mel_mse_32_cnn_mlp_1.model")
# Data arguments
parser.add_argument('--path', type=str, default='data', help='')
parser.add_argument('--dataset', type=str, default='64par', help='')
parser.add_argument('--train_type', type=str, default='fixed', help='')
parser.add_argument('--data', type=str, default='mel', help='')
parser.add_argument('--projection', type=str, default='pca', help='')
parser.add_argument('--batch_size', type=int, default=128, help='')
parser.add_argument('--nbworkers', type=int, default=0, help='')
parser.add_argument('--reanalyze', type=int, default=0, help='')
parser.add_argument('--device', type=str, default='cpu', help='')
args = parser.parse_args()
#%%
"""
###################
Load model
###################
"""
model = None
args.model = args.model_path + args.dataset + '/' + args.model_name
if args.model:
model = torch.load(args.model, map_location="cpu")
model = model.eval()
"""
###################
Load dataset
###################
"""
print('[Loading dataset for ' + args.data + ']')
if (args.train_type == 'random'):
train_loader, valid_loader, test_loader, args = load_dataset(args)
else:
ref_split = args.path + '/reference_split_' + args.dataset + "_" + args.data + '.th'
print('[About to load]')
data = torch.load(ref_split)
train_loader, valid_loader, test_loader = data[0], data[1], data[2]
print('[Changing refs in reference]')
home = expanduser("~")
for t in [train_loader, valid_loader, test_loader]:
t.dataset.datadir = home + '/Datasets/diva_dataset/' + args.dataset
t.dataset.trans_datasets[args.data].datadir = home + '/Datasets/diva_dataset/' + args.dataset
torch.save([train_loader, valid_loader, test_loader], ref_split)
# Remove the shuffling from dataset
train_loader = DataLoader(train_loader.dataset, batch_size=64, shuffle=False, num_workers=2)
valid_loader = DataLoader(valid_loader.dataset, batch_size=64, shuffle=False, num_workers=2)
test_loader = DataLoader(test_loader.dataset, batch_size=64, shuffle=False, num_workers=2)
#%% Combine sets
audioset = [train_loader, valid_loader, test_loader]
# Handle DIVA parameters
with open("synth/diva_params.txt") as f:
diva_midi_desc = ast.literal_eval(f.read())
rev_idx = {diva_midi_desc[key]: key for key in diva_midi_desc}
# Retrieve dataset parameters
with open("synth/param_default_32.json") as f:
params_default = json.load(f)
param_dict = params_default
param_names = test_loader.dataset.final_params
print('[Reference set on which model was trained]')
print(param_names)
#%%
"""
###################
Perform model pre-analysis
###################
"""
# Target file of analysis
analysis_file = args.model.replace('.model', '.analysis')
# Target dataset
dataset_file = args.model.replace('.model', '.dataset')
if (len(args.projection) > 0):
analysis_file += '.' + args.projection
dataset_file += '.' + args.projection
# Create analysis files
if (not os.path.exists(analysis_file + '.npy') or args.reanalyze):
# Perform dataset evaluation
final_z, final_meta, pca, z_vars, z_means = evaluate_dataset(model, [train_loader, valid_loader, test_loader], args)
# Perform dimension evaluation
d_idx, d_vars, d_params, d_desc, desc_max = evaluate_dimensions(model, pca, args)
# Save information
model_analysis = {
'd_idx':d_idx,
'd_vars':d_vars,
'd_params':d_params,
'd_desc':d_desc,
'desc_max':desc_max,
'final_z':final_z,
'final_meta':final_meta,
'pca':pca,
'z_vars':z_vars,
'z_means':z_means
}
# Generate offline presets dataset
model_analysis = generate_dataset(dataset_file + '.txt', [train_loader, valid_loader, test_loader], model_analysis)
# Keep path to the model dataset
model_analysis['dataset_path'] = dataset_file + '.txt'
# Save the whole analysis
np.save(analysis_file, model_analysis)
else:
model_analysis = np.load(analysis_file + '.npy', allow_pickle=True).item()
#%%
"""
###################
Create server
###################
"""
server = FlowServer(args.in_port, args.out_port, model=model, dataset=audioset, data=args.data, param_names=param_names, param_dict=param_dict, analysis=model_analysis, debug=__DEBUG__, args=args)
#%%
if (__DEBUG__):
# Test pitch analysis
print('[Debug mode : Testing server on given functions]')
else:
print('[Running server on ports in : %d - out : %d]'%(args.in_port, args.out_port))
server.run()
|
[
"evaluate.evaluate_dimensions",
"evaluate.evaluate_dataset"
] |
[((394, 419), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (417, 419), False, 'import argparse\n'), ((2635, 2712), 'torch.utils.data.DataLoader', 'DataLoader', (['train_loader.dataset'], {'batch_size': '(64)', 'shuffle': '(False)', 'num_workers': '(2)'}), '(train_loader.dataset, batch_size=64, shuffle=False, num_workers=2)\n', (2645, 2712), False, 'from torch.utils.data import DataLoader\n'), ((2728, 2805), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_loader.dataset'], {'batch_size': '(64)', 'shuffle': '(False)', 'num_workers': '(2)'}), '(valid_loader.dataset, batch_size=64, shuffle=False, num_workers=2)\n', (2738, 2805), False, 'from torch.utils.data import DataLoader\n'), ((2820, 2896), 'torch.utils.data.DataLoader', 'DataLoader', (['test_loader.dataset'], {'batch_size': '(64)', 'shuffle': '(False)', 'num_workers': '(2)'}), '(test_loader.dataset, batch_size=64, shuffle=False, num_workers=2)\n', (2830, 2896), False, 'from torch.utils.data import DataLoader\n'), ((4916, 5113), 'osc_server.FlowServer', 'FlowServer', (['args.in_port', 'args.out_port'], {'model': 'model', 'dataset': 'audioset', 'data': 'args.data', 'param_names': 'param_names', 'param_dict': 'param_dict', 'analysis': 'model_analysis', 'debug': '__DEBUG__', 'args': 'args'}), '(args.in_port, args.out_port, model=model, dataset=audioset, data\n =args.data, param_names=param_names, param_dict=param_dict, analysis=\n model_analysis, debug=__DEBUG__, args=args)\n', (4926, 5113), False, 'from osc_server import FlowServer\n'), ((1698, 1740), 'torch.load', 'torch.load', (['args.model'], {'map_location': '"""cpu"""'}), "(args.model, map_location='cpu')\n", (1708, 1740), False, 'import torch\n'), ((1964, 1982), 'utils.data.load_dataset', 'load_dataset', (['args'], {}), '(args)\n', (1976, 1982), False, 'from utils.data import load_dataset\n'), ((2118, 2139), 'torch.load', 'torch.load', (['ref_split'], {}), '(ref_split)\n', (2128, 2139), False, 'import torch\n'), ((2265, 2280), 'os.path.expanduser', 'expanduser', (['"""~"""'], {}), "('~')\n", (2275, 2280), False, 'from os.path import expanduser\n'), ((2519, 2583), 'torch.save', 'torch.save', (['[train_loader, valid_loader, test_loader]', 'ref_split'], {}), '([train_loader, valid_loader, test_loader], ref_split)\n', (2529, 2583), False, 'import torch\n'), ((3252, 3264), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3261, 3264), False, 'import json, ast\n'), ((3937, 4009), 'evaluate.evaluate_dataset', 'evaluate_dataset', (['model', '[train_loader, valid_loader, test_loader]', 'args'], {}), '(model, [train_loader, valid_loader, test_loader], args)\n', (3953, 4009), False, 'from evaluate import evaluate_dimensions, evaluate_dataset\n'), ((4098, 4135), 'evaluate.evaluate_dimensions', 'evaluate_dimensions', (['model', 'pca', 'args'], {}), '(model, pca, args)\n', (4117, 4135), False, 'from evaluate import evaluate_dimensions, evaluate_dataset\n'), ((4487, 4589), 'osc_utils.generate_dataset', 'generate_dataset', (["(dataset_file + '.txt')", '[train_loader, valid_loader, test_loader]', 'model_analysis'], {}), "(dataset_file + '.txt', [train_loader, valid_loader,\n test_loader], model_analysis)\n", (4503, 4589), False, 'from osc_utils import generate_dataset\n'), ((4716, 4754), 'numpy.save', 'np.save', (['analysis_file', 'model_analysis'], {}), '(analysis_file, model_analysis)\n', (4723, 4754), True, 'import numpy as np\n'), ((3797, 3835), 'os.path.exists', 'os.path.exists', (["(analysis_file + '.npy')"], {}), "(analysis_file + '.npy')\n", (3811, 3835), False, 'import os\n'), ((4782, 4832), 'numpy.load', 'np.load', (["(analysis_file + '.npy')"], {'allow_pickle': '(True)'}), "(analysis_file + '.npy', allow_pickle=True)\n", (4789, 4832), True, 'import numpy as np\n')]
|
import random
import numpy as np
import os
import logging
import torch
from utilities import get_device, current_utc_time
import pandas as pd
from imp import reload
from data_loader import get_loader, prepare_dataset
from transformers import AdamW, get_linear_schedule_with_warmup
from models import get_model
from trainer import train_model
from evaluate import evaluate_model
import pickle
from datetime import datetime
from sklearn.model_selection import train_test_split
reload(logging)
# Parameters
model_name = "BERT"
seed = 57
epochs = 15
batch_size = 8
learning_rate = 2e-4
epsilon = 1e-8
golden_3 = pd.read_excel("./data/P3-Golden.xlsx")
SAVE_MODEL = True
output_dir = "./models/"
# Set up log file
current_time = current_utc_time()
logging.basicConfig(
filename=f"{os.getcwd()}/bert-p3.log",
filemode="a",
format="%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s",
datefmt="%H:%M:%S",
level=logging.INFO,
)
device = get_device()
# Set the seed value all over the place to make this reproducible.
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Separate evaluation dataset
train, test = train_test_split(golden_3, test_size=0.2)
# Prepare dataset
input_ids, attention_masks, labels = prepare_dataset(train)
val_input_ids, val_attention_masks, val_labels = prepare_dataset(test)
val_dataloader = get_loader(
val_input_ids, val_attention_masks, val_labels, batch_size=batch_size, loader_type="VALIDATE"
)
logging.info(f"Number of train samples: {len(input_ids)}")
logging.info(f"Number of validation samples: {len(val_input_ids)}")
# Measure the total training time for the whole run.
start_time = datetime.now()
# ========================================
# Training
# ========================================
# Prepare dataloader
train_dataloader = get_loader(input_ids, attention_masks, labels, batch_size=batch_size)
# model
model = get_model(model_name, num_labels = 5).to(device)
# Optimizer
optimizer = AdamW(
model.parameters(),
lr=learning_rate, # args.learning_rate - default is 5e-5, our notebook had 2e-5
eps=epsilon, # args.adam_epsilon - default is 1e-8.
)
# Total number of training steps is [number of batches] x [number of epochs].
# (Note that this is not the same as the number of training samples).
total_steps = len(train_dataloader) * epochs
# Create the learning rate scheduler.
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0, # Default value in run_glue.py
num_training_steps=total_steps,
)
model, stats = train_model(
model, train_dataloader, val_dataloader, optimizer, scheduler, seed=seed, epochs=epochs
)
# ========================================
# Evaluation
# ========================================
train_time = (datetime.now() - start_time).total_seconds()
eval_time_start = datetime.now()
eval_report = evaluate_model(model, val_dataloader)
eval_time = (datetime.now() - eval_time_start).total_seconds()
training_stats = {
"train_size": len(labels),
"val_size": len(val_labels),
"training_stats": stats,
"evaluation_report": eval_report,
"train_time": train_time,
"eval_time": eval_time,
}
logging.info(f"Training Stats: \n {training_stats}")
print(f"Evaluation Report: \n {eval_report}")
# Save report
with open("bert-p1.pkl", "wb") as f:
pickle.dump(training_stats, f)
if SAVE_MODEL:
# Create output directory if needed
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print(f"Saving model to {output_dir}")
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
|
[
"evaluate.evaluate_model"
] |
[((476, 491), 'imp.reload', 'reload', (['logging'], {}), '(logging)\n', (482, 491), False, 'from imp import reload\n'), ((610, 648), 'pandas.read_excel', 'pd.read_excel', (['"""./data/P3-Golden.xlsx"""'], {}), "('./data/P3-Golden.xlsx')\n", (623, 648), True, 'import pandas as pd\n'), ((726, 744), 'utilities.current_utc_time', 'current_utc_time', ([], {}), '()\n', (742, 744), False, 'from utilities import get_device, current_utc_time\n'), ((958, 970), 'utilities.get_device', 'get_device', ([], {}), '()\n', (968, 970), False, 'from utilities import get_device, current_utc_time\n'), ((1038, 1055), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1049, 1055), False, 'import random\n'), ((1056, 1076), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1070, 1076), True, 'import numpy as np\n'), ((1077, 1100), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1094, 1100), False, 'import torch\n'), ((1101, 1133), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (1127, 1133), False, 'import torch\n'), ((1179, 1220), 'sklearn.model_selection.train_test_split', 'train_test_split', (['golden_3'], {'test_size': '(0.2)'}), '(golden_3, test_size=0.2)\n', (1195, 1220), False, 'from sklearn.model_selection import train_test_split\n'), ((1277, 1299), 'data_loader.prepare_dataset', 'prepare_dataset', (['train'], {}), '(train)\n', (1292, 1299), False, 'from data_loader import get_loader, prepare_dataset\n'), ((1349, 1370), 'data_loader.prepare_dataset', 'prepare_dataset', (['test'], {}), '(test)\n', (1364, 1370), False, 'from data_loader import get_loader, prepare_dataset\n'), ((1389, 1499), 'data_loader.get_loader', 'get_loader', (['val_input_ids', 'val_attention_masks', 'val_labels'], {'batch_size': 'batch_size', 'loader_type': '"""VALIDATE"""'}), "(val_input_ids, val_attention_masks, val_labels, batch_size=\n batch_size, loader_type='VALIDATE')\n", (1399, 1499), False, 'from data_loader import get_loader, prepare_dataset\n'), ((1696, 1710), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1708, 1710), False, 'from datetime import datetime\n'), ((1864, 1933), 'data_loader.get_loader', 'get_loader', (['input_ids', 'attention_masks', 'labels'], {'batch_size': 'batch_size'}), '(input_ids, attention_masks, labels, batch_size=batch_size)\n', (1874, 1933), False, 'from data_loader import get_loader, prepare_dataset\n'), ((2446, 2544), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': '(0)', 'num_training_steps': 'total_steps'}), '(optimizer, num_warmup_steps=0,\n num_training_steps=total_steps)\n', (2477, 2544), False, 'from transformers import AdamW, get_linear_schedule_with_warmup\n'), ((2604, 2708), 'trainer.train_model', 'train_model', (['model', 'train_dataloader', 'val_dataloader', 'optimizer', 'scheduler'], {'seed': 'seed', 'epochs': 'epochs'}), '(model, train_dataloader, val_dataloader, optimizer, scheduler,\n seed=seed, epochs=epochs)\n', (2615, 2708), False, 'from trainer import train_model\n'), ((2902, 2916), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2914, 2916), False, 'from datetime import datetime\n'), ((2931, 2968), 'evaluate.evaluate_model', 'evaluate_model', (['model', 'val_dataloader'], {}), '(model, val_dataloader)\n', (2945, 2968), False, 'from evaluate import evaluate_model\n'), ((3244, 3299), 'logging.info', 'logging.info', (['f"""Training Stats: \n {training_stats}"""'], {}), '(f"""Training Stats: \n {training_stats}""")\n', (3256, 3299), False, 'import logging\n'), ((3399, 3429), 'pickle.dump', 'pickle.dump', (['training_stats', 'f'], {}), '(training_stats, f)\n', (3410, 3429), False, 'import pickle\n'), ((1951, 1986), 'models.get_model', 'get_model', (['model_name'], {'num_labels': '(5)'}), '(model_name, num_labels=5)\n', (1960, 1986), False, 'from models import get_model\n'), ((3497, 3523), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (3511, 3523), False, 'import os\n'), ((3533, 3556), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (3544, 3556), False, 'import os\n'), ((2839, 2853), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2851, 2853), False, 'from datetime import datetime\n'), ((2982, 2996), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2994, 2996), False, 'from datetime import datetime\n'), ((782, 793), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (791, 793), False, 'import os\n')]
|
import pickle
import gzip
from sparse_gp import SparseGP
import scipy.stats as sps
import numpy as np
import sys
import os
sys.path.append('%s/../prog_common' % os.path.dirname(os.path.realpath(__file__)))
from cmd_args import cmd_args
sys.path.append('%s/../prog_eval' % os.path.dirname(os.path.realpath(__file__)))
from att_model_proxy import AttProgProxy, batch_decode
sys.path.append('%s/../prog_data' % os.path.dirname(os.path.realpath(__file__)))
from bo_target import BOTarget
import evaluate
parser = evaluate.get_parser(cmd_args.grammar_file)
gold_prog_list = []
with open('%s/../prog_data/gold_prog.txt' % os.path.dirname(os.path.realpath(__file__))) as f:
for row in f:
gold_prog_list.append(row.strip())
def is_prog_valid(prog):
tokens = evaluate.tokenize(prog)
tree = evaluate.parse(parser, tokens)
if tree is not None:
x = 0.12345
y, msg = evaluate.eval_at(tree, v0_val=x)
if y is not None or (y is None and msg.startswith('runtime error:')):
return True
return False
def decode_from_latent_space(latent_points, model):
decode_attempts = 25
raw_logits = model.pred_raw_logits(latent_points.astype(np.float32))
decoded_programs = batch_decode(raw_logits, True, decode_attempts)
# We see which ones are decoded by rdkit
rdkit_molecules = []
for i in range(decode_attempts):
rdkit_molecules.append([])
for j in range(latent_points.shape[ 0 ]):
smile = decoded_programs[ j ][ i ]
if not is_prog_valid(smile):
rdkit_molecules[ i ].append(None)
else:
rdkit_molecules[ i ].append(smile)
import collections
rdkit_molecules = np.array(rdkit_molecules)
final_smiles = []
for i in range(latent_points.shape[ 0 ]):
aux = collections.Counter(rdkit_molecules[ ~np.equal(rdkit_molecules[ :, i ], None) , i ])
if len(aux) > 0:
smile = aux.items()[ np.argmax(aux.values()) ][ 0 ]
else:
smile = None
final_smiles.append(smile)
return final_smiles
# We define the functions used to load and save objects
def save_object(obj, filename):
"""
Function that saves an object to a file using pickle
"""
result = pickle.dumps(obj)
with gzip.GzipFile(filename, 'wb') as dest: dest.write(result)
dest.close()
def load_object(filename):
"""
Function that loads an object from a file using pickle
"""
with gzip.GzipFile(filename, 'rb') as source: result = source.read()
ret = pickle.loads(result)
source.close()
return ret
import argparse
cmd_opt = argparse.ArgumentParser(description='Argparser for encoding')
cmd_opt.add_argument('-seed', type=int, help='random seed')
cmd_opt.add_argument('-min_len', type=int, help='min # of statements')
cmd_opt.add_argument('-max_len', type=int, help='max # of statements')
cmd_opt.add_argument('-y_norm', type=int, help='normalize target?')
cmd_opt.add_argument('-prog_idx', type=int, help='index of gold program')
cmd_opt.add_argument('-phase', type=str, help='train / test')
cmd_opt.add_argument('-prefix', type=str, help='data prefix')
cmd_opt.add_argument('-data_dir', type=str, help='data folder')
cmd_opt.add_argument('-feature_dump', type=str, help='feature numpy dump')
cmd_opt.add_argument('-gp_lr', type=float, help='learning rate of gaussian process')
args, _ = cmd_opt.parse_known_args()
if __name__ == '__main__':
print(cmd_args)
print(args)
model = AttProgProxy()
np.random.seed(args.seed)
fmt = args.feature_dump.split('.')[-1]
if fmt == 'npy':
X = np.load(args.feature_dump)
elif fmt == 'txt':
X = np.loadtxt(args.feature_dump)
else:
print('unknown feature dump format ' + fmt)
raise NotImplementedError
gold_prog = gold_prog_list[args.prog_idx]
y = []
for l in range(args.min_len, args.max_len + 1):
if args.phase == 'train':
fname = '%s/%s-number-50000-nbstat-%d.txt.target_for_[%s].txt' % (args.data_dir, args.prefix, l, gold_prog)
else:
fname = '%s/%s-number-50000-nbstat-%d.test.txt.target_for_[%s].txt' % (args.data_dir, args.prefix, l, gold_prog)
cur_scores = np.loadtxt(fname)
y.append(np.reshape(cur_scores, [-1, 1]))
y = np.vstack(y)
if args.y_norm:
y_mean = np.mean(y)
y_std = np.std(y)
y = (y - y_mean) / y_std
# y /= np.max(y)
assert X.shape[0] == y.shape[0]
n = X.shape[ 0 ]
permutation = np.random.choice(n, n, replace = False)
X_train = X[ permutation, : ][ 0 : np.int(np.round(0.99 * n)), : ]
X_test = X[ permutation, : ][ np.int(np.round(0.99 * n)) :, : ]
y_train = y[ permutation ][ 0 : np.int(np.round(0.99 * n)) ]
y_test = y[ permutation ][ np.int(np.round(0.99 * n)) : ]
bo_target = BOTarget(parser, gold_prog=gold_prog)
for iteration in range(5):
print(iteration)
np.random.seed(args.seed * iteration)
M = 500
sgp = SparseGP(X_train, 0 * X_train, y_train, M)
sgp.train_via_ADAM(X_train, 0 * X_train, y_train, X_test, X_test * 0, \
y_test, minibatch_size = 10 * M, max_iterations = cmd_args.num_epochs, learning_rate = args.gp_lr)
next_inputs = sgp.batched_greedy_ei(50, np.min(X_train, 0), np.max(X_train, 0))
valid_eq_final = decode_from_latent_space(next_inputs, model)
new_features = next_inputs
save_object(valid_eq_final, "%s/valid_eq-prog-%d-y-%d-seed-%d-iter-%d.dat" % (cmd_args.save_dir, args.prog_idx, args.y_norm, args.seed, iteration))
scores = []
for i in range(len(valid_eq_final)):
if valid_eq_final[ i ] is not None:
score = bo_target(valid_eq_final[i])
else:
score = np.log(1+BOTarget.WORST)
scores.append(score)
print(i)
print(valid_eq_final)
print(scores)
save_object(scores, "%s/scores-prog-%d-y-%d-seed-%d-iter-%d.dat" % (cmd_args.save_dir, args.prog_idx, args.y_norm, args.seed, iteration))
if args.y_norm:
scores = (np.array(scores) - y_mean) / y_std
# scores = np.array(scores) / np.max(y)
if len(new_features) > 0:
X_train = np.concatenate([ X_train, new_features ], 0)
y_train = np.concatenate([ y_train, np.array(scores)[ :, None ] ], 0)
|
[
"evaluate.eval_at",
"evaluate.get_parser",
"evaluate.parse",
"evaluate.tokenize"
] |
[((516, 558), 'evaluate.get_parser', 'evaluate.get_parser', (['cmd_args.grammar_file'], {}), '(cmd_args.grammar_file)\n', (535, 558), False, 'import evaluate\n'), ((2674, 2735), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Argparser for encoding"""'}), "(description='Argparser for encoding')\n", (2697, 2735), False, 'import argparse\n'), ((775, 798), 'evaluate.tokenize', 'evaluate.tokenize', (['prog'], {}), '(prog)\n', (792, 798), False, 'import evaluate\n'), ((810, 840), 'evaluate.parse', 'evaluate.parse', (['parser', 'tokens'], {}), '(parser, tokens)\n', (824, 840), False, 'import evaluate\n'), ((1233, 1280), 'att_model_proxy.batch_decode', 'batch_decode', (['raw_logits', '(True)', 'decode_attempts'], {}), '(raw_logits, True, decode_attempts)\n', (1245, 1280), False, 'from att_model_proxy import AttProgProxy, batch_decode\n'), ((1737, 1762), 'numpy.array', 'np.array', (['rdkit_molecules'], {}), '(rdkit_molecules)\n', (1745, 1762), True, 'import numpy as np\n'), ((2298, 2315), 'pickle.dumps', 'pickle.dumps', (['obj'], {}), '(obj)\n', (2310, 2315), False, 'import pickle\n'), ((2589, 2609), 'pickle.loads', 'pickle.loads', (['result'], {}), '(result)\n', (2601, 2609), False, 'import pickle\n'), ((3542, 3556), 'att_model_proxy.AttProgProxy', 'AttProgProxy', ([], {}), '()\n', (3554, 3556), False, 'from att_model_proxy import AttProgProxy, batch_decode\n'), ((3562, 3587), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (3576, 3587), True, 'import numpy as np\n'), ((4367, 4379), 'numpy.vstack', 'np.vstack', (['y'], {}), '(y)\n', (4376, 4379), True, 'import numpy as np\n'), ((4589, 4626), 'numpy.random.choice', 'np.random.choice', (['n', 'n'], {'replace': '(False)'}), '(n, n, replace=False)\n', (4605, 4626), True, 'import numpy as np\n'), ((4914, 4951), 'bo_target.BOTarget', 'BOTarget', (['parser'], {'gold_prog': 'gold_prog'}), '(parser, gold_prog=gold_prog)\n', (4922, 4951), False, 'from bo_target import BOTarget\n'), ((904, 936), 'evaluate.eval_at', 'evaluate.eval_at', (['tree'], {'v0_val': 'x'}), '(tree, v0_val=x)\n', (920, 936), False, 'import evaluate\n'), ((2325, 2354), 'gzip.GzipFile', 'gzip.GzipFile', (['filename', '"""wb"""'], {}), "(filename, 'wb')\n", (2338, 2354), False, 'import gzip\n'), ((2515, 2544), 'gzip.GzipFile', 'gzip.GzipFile', (['filename', '"""rb"""'], {}), "(filename, 'rb')\n", (2528, 2544), False, 'import gzip\n'), ((3665, 3691), 'numpy.load', 'np.load', (['args.feature_dump'], {}), '(args.feature_dump)\n', (3672, 3691), True, 'import numpy as np\n'), ((4278, 4295), 'numpy.loadtxt', 'np.loadtxt', (['fname'], {}), '(fname)\n', (4288, 4295), True, 'import numpy as np\n'), ((4422, 4432), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (4429, 4432), True, 'import numpy as np\n'), ((4449, 4458), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (4455, 4458), True, 'import numpy as np\n'), ((5017, 5054), 'numpy.random.seed', 'np.random.seed', (['(args.seed * iteration)'], {}), '(args.seed * iteration)\n', (5031, 5054), True, 'import numpy as np\n'), ((5086, 5128), 'sparse_gp.SparseGP', 'SparseGP', (['X_train', '(0 * X_train)', 'y_train', 'M'], {}), '(X_train, 0 * X_train, y_train, M)\n', (5094, 5128), False, 'from sparse_gp import SparseGP\n'), ((181, 207), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (197, 207), False, 'import os\n'), ((293, 319), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (309, 319), False, 'import os\n'), ((430, 456), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (446, 456), False, 'import os\n'), ((3727, 3756), 'numpy.loadtxt', 'np.loadtxt', (['args.feature_dump'], {}), '(args.feature_dump)\n', (3737, 3756), True, 'import numpy as np\n'), ((4313, 4344), 'numpy.reshape', 'np.reshape', (['cur_scores', '[-1, 1]'], {}), '(cur_scores, [-1, 1])\n', (4323, 4344), True, 'import numpy as np\n'), ((5369, 5387), 'numpy.min', 'np.min', (['X_train', '(0)'], {}), '(X_train, 0)\n', (5375, 5387), True, 'import numpy as np\n'), ((5389, 5407), 'numpy.max', 'np.max', (['X_train', '(0)'], {}), '(X_train, 0)\n', (5395, 5407), True, 'import numpy as np\n'), ((6363, 6405), 'numpy.concatenate', 'np.concatenate', (['[X_train, new_features]', '(0)'], {}), '([X_train, new_features], 0)\n', (6377, 6405), True, 'import numpy as np\n'), ((640, 666), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (656, 666), False, 'import os\n'), ((4813, 4831), 'numpy.round', 'np.round', (['(0.99 * n)'], {}), '(0.99 * n)\n', (4821, 4831), True, 'import numpy as np\n'), ((4873, 4891), 'numpy.round', 'np.round', (['(0.99 * n)'], {}), '(0.99 * n)\n', (4881, 4891), True, 'import numpy as np\n'), ((5898, 5924), 'numpy.log', 'np.log', (['(1 + BOTarget.WORST)'], {}), '(1 + BOTarget.WORST)\n', (5904, 5924), True, 'import numpy as np\n'), ((4676, 4694), 'numpy.round', 'np.round', (['(0.99 * n)'], {}), '(0.99 * n)\n', (4684, 4694), True, 'import numpy as np\n'), ((4742, 4760), 'numpy.round', 'np.round', (['(0.99 * n)'], {}), '(0.99 * n)\n', (4750, 4760), True, 'import numpy as np\n'), ((6224, 6240), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (6232, 6240), True, 'import numpy as np\n'), ((1885, 1922), 'numpy.equal', 'np.equal', (['rdkit_molecules[:, i]', 'None'], {}), '(rdkit_molecules[:, i], None)\n', (1893, 1922), True, 'import numpy as np\n'), ((6456, 6472), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (6464, 6472), True, 'import numpy as np\n')]
|
import argparse
import os
import math
import json
from datetime import datetime
from models import models
from db import db, Result
from uuid import uuid4, UUID
from keras import backend as K
import numpy as np
import evaluate
from data_gen import data
from config import config
def test_model(model, train, validation, test, label_form):
loss, accuracy = model.evaluate_generator(validation, steps=math.ceil(len(validation)/config.BATCH_SIZE))
train_loss, train_accuracy = model.evaluate_generator(train, steps=math.ceil(len(train)/config.BATCH_SIZE))
test_loss, test_accuracy = model.evaluate_generator(test, steps=math.ceil(len(test)/config.BATCH_SIZE))
train.reset()
validation.reset()
test.reset()
results = evaluate.get_results(model, validation)
labels = list(evaluate.get_labels(validation))
test_results = evaluate.get_results(model, test)
test_labels = list(evaluate.get_labels(test))
if label_form == "outcome_3":
probabilities = list(results)
test_probabilities = list(test_results)
else:
probabilities = list(evaluate.transform_binary_probabilities(results))
test_probabilities = list(evaluate.transform_binary_probabilities(test_results))
train.reset()
validation.reset()
test.reset()
return {
"train_accuracy": float(train_accuracy),
"train_loss": float(train_loss),
"accuracy": float(accuracy),
"loss": float(loss),
"test_accuracy": float(test_accuracy),
"test_loss": float(test_loss),
"probabilities": probabilities,
"labels": labels,
"test_probabilities": test_probabilities,
"test_labels":test_labels,
}
def characterize_data(data):
unique, counts = np.unique(data.classes, return_counts=True)
index_to_count = dict(zip(unique, counts))
characterization = { c: index_to_count[data.class_indices[c]] for c in data.class_indices }
return characterization
def run(model, description, input_form, label_form="outcome", split_id=None, loaded_data=None, hyperparameters=dict()):
model_instance = evaluate.load(model)
if loaded_data is None:
train, validation, test = data(split_id, input_form=input_form, label_form=label_form)
else:
train, validation, test = loaded_data
train.reset()
validation.reset()
test.reset()
train_data_stats = characterize_data(train)
validation_data_stats = characterize_data(validation)
test_data_stats = characterize_data(test)
results = test_model(model_instance, train, validation, test, label_form)
result = Result(
"v2",
str(uuid4())
,
str(split_id),
train_data_stats,
validation_data_stats,
test_data_stats,
description,
input_form,
label=label_form,
hyperparameters=hyperparameters,
history="-",
**results
)
db.session.add(result)
db.session.commit()
run("output/models/fc2dc27b-6954-4f82-80ca-0973d9437eb3-v2.h5",
description = "B0_clr",
input_form = "t2",
label_form = "outcome_pos",
split_id = UUID("84a64c17-fe3e-440c-aaaf-e1bd5b02576f"))
|
[
"evaluate.transform_binary_probabilities",
"evaluate.get_results",
"evaluate.get_labels",
"evaluate.load"
] |
[((749, 788), 'evaluate.get_results', 'evaluate.get_results', (['model', 'validation'], {}), '(model, validation)\n', (769, 788), False, 'import evaluate\n'), ((860, 893), 'evaluate.get_results', 'evaluate.get_results', (['model', 'test'], {}), '(model, test)\n', (880, 893), False, 'import evaluate\n'), ((1770, 1813), 'numpy.unique', 'np.unique', (['data.classes'], {'return_counts': '(True)'}), '(data.classes, return_counts=True)\n', (1779, 1813), True, 'import numpy as np\n'), ((2128, 2148), 'evaluate.load', 'evaluate.load', (['model'], {}), '(model)\n', (2141, 2148), False, 'import evaluate\n'), ((2955, 2977), 'db.db.session.add', 'db.session.add', (['result'], {}), '(result)\n', (2969, 2977), False, 'from db import db, Result\n'), ((2982, 3001), 'db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2999, 3001), False, 'from db import db, Result\n'), ((807, 838), 'evaluate.get_labels', 'evaluate.get_labels', (['validation'], {}), '(validation)\n', (826, 838), False, 'import evaluate\n'), ((917, 942), 'evaluate.get_labels', 'evaluate.get_labels', (['test'], {}), '(test)\n', (936, 942), False, 'import evaluate\n'), ((2212, 2272), 'data_gen.data', 'data', (['split_id'], {'input_form': 'input_form', 'label_form': 'label_form'}), '(split_id, input_form=input_form, label_form=label_form)\n', (2216, 2272), False, 'from data_gen import data\n'), ((3167, 3211), 'uuid.UUID', 'UUID', (['"""84a64c17-fe3e-440c-aaaf-e1bd5b02576f"""'], {}), "('84a64c17-fe3e-440c-aaaf-e1bd5b02576f')\n", (3171, 3211), False, 'from uuid import uuid4, UUID\n'), ((1108, 1156), 'evaluate.transform_binary_probabilities', 'evaluate.transform_binary_probabilities', (['results'], {}), '(results)\n', (1147, 1156), False, 'import evaluate\n'), ((1192, 1245), 'evaluate.transform_binary_probabilities', 'evaluate.transform_binary_probabilities', (['test_results'], {}), '(test_results)\n', (1231, 1245), False, 'import evaluate\n'), ((2678, 2685), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (2683, 2685), False, 'from uuid import uuid4, UUID\n')]
|
from evaluate import evaluate_model
from models import *
from train import train_model
from visualize import visualize_model_performance
'''
Driver script for part 2 of the assignment'''
num_epochs = 50
model = starter_model()
default_model_path = train_model(model, str(model.name) + "_p2_default_weights", num_epochs)
reduced_background_path = train_model(model, str(model.name) + "_p2_reduced_background_weights", num_epochs,
[.01, 0, 1])
balanced_path = train_model(model, str(model.name) + "_p2_balanced_weights", num_epochs, [1, 0, 1])
evaluate_model(default_model_path)
evaluate_model(reduced_background_path, [.01, 0, 1])
evaluate_model(balanced_path, [1, 0, 1])
visualize_model_performance(default_model_path)
visualize_model_performance(reduced_background_path, [.01, 0, 1])
visualize_model_performance(balanced_path, [1, 0, 1])
|
[
"evaluate.evaluate_model"
] |
[((590, 624), 'evaluate.evaluate_model', 'evaluate_model', (['default_model_path'], {}), '(default_model_path)\n', (604, 624), False, 'from evaluate import evaluate_model\n'), ((625, 678), 'evaluate.evaluate_model', 'evaluate_model', (['reduced_background_path', '[0.01, 0, 1]'], {}), '(reduced_background_path, [0.01, 0, 1])\n', (639, 678), False, 'from evaluate import evaluate_model\n'), ((678, 718), 'evaluate.evaluate_model', 'evaluate_model', (['balanced_path', '[1, 0, 1]'], {}), '(balanced_path, [1, 0, 1])\n', (692, 718), False, 'from evaluate import evaluate_model\n'), ((720, 767), 'visualize.visualize_model_performance', 'visualize_model_performance', (['default_model_path'], {}), '(default_model_path)\n', (747, 767), False, 'from visualize import visualize_model_performance\n'), ((768, 834), 'visualize.visualize_model_performance', 'visualize_model_performance', (['reduced_background_path', '[0.01, 0, 1]'], {}), '(reduced_background_path, [0.01, 0, 1])\n', (795, 834), False, 'from visualize import visualize_model_performance\n'), ((834, 887), 'visualize.visualize_model_performance', 'visualize_model_performance', (['balanced_path', '[1, 0, 1]'], {}), '(balanced_path, [1, 0, 1])\n', (861, 887), False, 'from visualize import visualize_model_performance\n')]
|
import json
import logging
import os
import tensorflow as tf
import evaluate
import model
import preprocessing
import train
import utils
logger = logging.getLogger('eval_from_ckpt')
logger.setLevel(logging.INFO)
str_1 = "_1294"
str_2 = "_2588"
str_3 = "_3882"
str_4 = ""
_CKPT_TO_EPOCH_BATCH_414 = {
1: -1,
2: str(0) + str_1,
3: str(0) + str_2,
4: str(0) + str_3,
5: str(0) + str_4,
6: str(1) + str_1,
7: str(1) + str_2,
8: str(1) + str_3,
9: str(1) + str_4,
10: str(2) + str_1,
11: str(2) + str_2,
12: str(2) + str_3,
13: str(2) + str_4,
14: str(3) + str_1,
15: str(3) + str_2,
16: str(3) + str_3,
17: str(3) + str_4,
18: str(4) + str_1,
19: str(4) + str_2,
20: str(4) + str_3,
21: str(4) + str_4,
22: str(5) + str_1,
23: str(5) + str_2,
24: str(5) + str_3,
25: str(5) + str_4,
26: str(6) + str_1,
27: str(6) + str_2,
28: str(6) + str_3,
29: str(6) + str_4,
}
_EPOCH_BATCH_414_CKPT = {v: k for k, v in _CKPT_TO_EPOCH_BATCH_414.items()}
def map_ckpts_to_partial_epoch(ckpt_num, map_type="414k"):
if map_type == "414k":
epoch_map = _CKPT_TO_EPOCH_BATCH_414
else:
raise ValueError(f"{map_type} not implemented yet.")
logger.info(f"Mapping {ckpt_num} to {epoch_map[ckpt_num]}")
return epoch_map[ckpt_num]
def map_partial_epoch_to_ckpts(epoch_num=4, batch_num="", map_type="414k"):
if map_type == "414k":
epoch_map = _EPOCH_BATCH_414_CKPT
else:
raise ValueError(f"{map_type} not implemented yet.")
logger.info(f"Mapping epoch {epoch_num}, batch {batch_num} to {epoch_map[str(epoch_num) + batch_num]}")
return epoch_map[str(epoch_num) + batch_num]
def evaluate_from_ckpts(num_training_captions=30000,
vocab_size=5000,
experiment_name="test",
num_repeats=20,
save_eval_dataset=False):
image_path, annotation_file_path = utils.get_data_path()
logger.info('Downloaded the dataset.')
train_captions, img_name_vector = utils.get_caption_image_names(annotation_file_path, image_path)
logger.info('Extracted the caption and image names.')
train_captions, img_name_vector = utils.get_top_k(train_captions, img_name_vector, num_training_captions)
assert len(train_captions) == len(img_name_vector) == num_training_captions
logger.info(f'Selected {num_training_captions} examples.')
image_features_extract_model = model.create_im_feat_extract_model()
preprocessing.preprocess_images(img_name_vector, image_features_extract_model)
logger.info('Preprocessed the images.')
caption_vector, tokenizer, train_seqs = preprocessing.preprocess_text(train_captions, vocab_size)
max_length = utils.calc_max_length(train_seqs)
logger.info('Preprocessed the text.')
(dataset_train, dataset_val,
num_steps_train, num_steps_val) = preprocessing.create_dataset(img_name_vector,
caption_vector,
train_seqs,
test_size=0.2,
batch_size=64,
buffer_size=1000)
logger.info('Created the dataset.')
# Create data folders
model_logdir = os.path.join("models", experiment_name)
results_logdir = os.path.join("results_eval_only", experiment_name)
eval_loss_fig_name = os.path.join(results_logdir, 'eval_loss_plot.png')
eval_loss_partial_fig_name = os.path.join(results_logdir, 'eval_loss_plot_partial_eval.png')
results_eval_logdir = os.path.join(results_logdir, 'eval_results')
# We just eval the example directory right now. To eval more add the corresponding directories here.
results_eval_adverts_logdir = os.path.join(results_eval_logdir, 'adverts')
results_eval_example_logdir = os.path.join(results_eval_adverts_logdir, 'example')
results_advert_dirs = [results_eval_example_logdir]
source_images_adverts_dirs = ["data/eval_data/example"]
if not os.path.exists(model_logdir):
os.makedirs(model_logdir)
logger.info(f'Created the model logging folder {model_logdir}.')
else:
logger.warning(f'Model directory already exists, continue?')
user_input = input('Y to continue. Any other key to exit: ')
if user_input.lower() != 'y':
sys.exit(0)
if not os.path.exists(results_logdir):
os.makedirs(results_eval_necklace_logdir)
logger.info(f'Created the results folders with root {results_logdir}.')
else:
logger.warning(f'Results directory already exists, continue?')
user_input = input('Y to continue. Any other key to exit: ')
if user_input.lower() != 'y':
sys.exit(0)
if save_eval_dataset:
dataset_path = os.path.join(model_logdir, 'eval_dataset')
tf.data.experimental.save(dataset_val, dataset_path)
# ******** Build models ********
# We add one more to the vocab to account for '<pad>' token
vocab_size += 1
encoder, decoder, tokenizer, ckpt_manager, ckpt = utils.build_model(model_logdir, vocab_size)
# Eval on a random image to ensure all model elements are built.
random_image = tf.keras.applications.inception_v3.preprocess_input(tf.random.uniform(shape=(299, 299, 3)))
evaluate.evaluate(random_image, encoder, decoder, image_features_extract_model, tokenizer, max_length)
# We re-run the eval to check the loss is comparable to the original model
eval_loss_plot = []
# Loop through all the checkpoints and evaluate from ckpts
for current_ckpt in ckpt_manager.checkpoints:
ckpt.restore(current_ckpt)
logger.info(f'Restored checkpoint {current_ckpt}')
ckpt_num = int(current_ckpt.split('-')[-1])
epoch = map_ckpts_to_partial_epoch(ckpt_num)
logger.info(f'Evaluating model at epoch {epoch}')
eval_loss = train.eval(epoch, dataset_val, num_steps_val, encoder, decoder, tokenizer, optimizer, loss_object)
eval_loss_plot.append(eval_loss)
utils.save_loss_plot(eval_loss_plot, eval_loss_fig_name, 'eval')
logger.info(f'Eval loss: {eval_loss_plot}')
for image_set, results_ad_logdir in zip(source_images_adverts_dirs, results_advert_dirs):
evaluate.eval_adverts(encoder, decoder, tokenizer, image_features_extract_model, results_ad_logdir, epoch, max_length=max_length, adverts_images=image_set, num_repeats=num_repeats)
|
[
"evaluate.evaluate",
"evaluate.eval_adverts"
] |
[((148, 183), 'logging.getLogger', 'logging.getLogger', (['"""eval_from_ckpt"""'], {}), "('eval_from_ckpt')\n", (165, 183), False, 'import logging\n'), ((2008, 2029), 'utils.get_data_path', 'utils.get_data_path', ([], {}), '()\n', (2027, 2029), False, 'import utils\n'), ((2112, 2175), 'utils.get_caption_image_names', 'utils.get_caption_image_names', (['annotation_file_path', 'image_path'], {}), '(annotation_file_path, image_path)\n', (2141, 2175), False, 'import utils\n'), ((2273, 2344), 'utils.get_top_k', 'utils.get_top_k', (['train_captions', 'img_name_vector', 'num_training_captions'], {}), '(train_captions, img_name_vector, num_training_captions)\n', (2288, 2344), False, 'import utils\n'), ((2524, 2560), 'model.create_im_feat_extract_model', 'model.create_im_feat_extract_model', ([], {}), '()\n', (2558, 2560), False, 'import model\n'), ((2565, 2643), 'preprocessing.preprocess_images', 'preprocessing.preprocess_images', (['img_name_vector', 'image_features_extract_model'], {}), '(img_name_vector, image_features_extract_model)\n', (2596, 2643), False, 'import preprocessing\n'), ((2733, 2790), 'preprocessing.preprocess_text', 'preprocessing.preprocess_text', (['train_captions', 'vocab_size'], {}), '(train_captions, vocab_size)\n', (2762, 2790), False, 'import preprocessing\n'), ((2808, 2841), 'utils.calc_max_length', 'utils.calc_max_length', (['train_seqs'], {}), '(train_seqs)\n', (2829, 2841), False, 'import utils\n'), ((2957, 3082), 'preprocessing.create_dataset', 'preprocessing.create_dataset', (['img_name_vector', 'caption_vector', 'train_seqs'], {'test_size': '(0.2)', 'batch_size': '(64)', 'buffer_size': '(1000)'}), '(img_name_vector, caption_vector, train_seqs,\n test_size=0.2, batch_size=64, buffer_size=1000)\n', (2985, 3082), False, 'import preprocessing\n'), ((3505, 3544), 'os.path.join', 'os.path.join', (['"""models"""', 'experiment_name'], {}), "('models', experiment_name)\n", (3517, 3544), False, 'import os\n'), ((3566, 3616), 'os.path.join', 'os.path.join', (['"""results_eval_only"""', 'experiment_name'], {}), "('results_eval_only', experiment_name)\n", (3578, 3616), False, 'import os\n'), ((3642, 3692), 'os.path.join', 'os.path.join', (['results_logdir', '"""eval_loss_plot.png"""'], {}), "(results_logdir, 'eval_loss_plot.png')\n", (3654, 3692), False, 'import os\n'), ((3726, 3789), 'os.path.join', 'os.path.join', (['results_logdir', '"""eval_loss_plot_partial_eval.png"""'], {}), "(results_logdir, 'eval_loss_plot_partial_eval.png')\n", (3738, 3789), False, 'import os\n'), ((3816, 3860), 'os.path.join', 'os.path.join', (['results_logdir', '"""eval_results"""'], {}), "(results_logdir, 'eval_results')\n", (3828, 3860), False, 'import os\n'), ((4001, 4045), 'os.path.join', 'os.path.join', (['results_eval_logdir', '"""adverts"""'], {}), "(results_eval_logdir, 'adverts')\n", (4013, 4045), False, 'import os\n'), ((4080, 4132), 'os.path.join', 'os.path.join', (['results_eval_adverts_logdir', '"""example"""'], {}), "(results_eval_adverts_logdir, 'example')\n", (4092, 4132), False, 'import os\n'), ((5324, 5367), 'utils.build_model', 'utils.build_model', (['model_logdir', 'vocab_size'], {}), '(model_logdir, vocab_size)\n', (5341, 5367), False, 'import utils\n'), ((5553, 5659), 'evaluate.evaluate', 'evaluate.evaluate', (['random_image', 'encoder', 'decoder', 'image_features_extract_model', 'tokenizer', 'max_length'], {}), '(random_image, encoder, decoder,\n image_features_extract_model, tokenizer, max_length)\n', (5570, 5659), False, 'import evaluate\n'), ((4261, 4289), 'os.path.exists', 'os.path.exists', (['model_logdir'], {}), '(model_logdir)\n', (4275, 4289), False, 'import os\n'), ((4299, 4324), 'os.makedirs', 'os.makedirs', (['model_logdir'], {}), '(model_logdir)\n', (4310, 4324), False, 'import os\n'), ((4620, 4650), 'os.path.exists', 'os.path.exists', (['results_logdir'], {}), '(results_logdir)\n', (4634, 4650), False, 'import os\n'), ((4660, 4701), 'os.makedirs', 'os.makedirs', (['results_eval_necklace_logdir'], {}), '(results_eval_necklace_logdir)\n', (4671, 4701), False, 'import os\n'), ((5044, 5086), 'os.path.join', 'os.path.join', (['model_logdir', '"""eval_dataset"""'], {}), "(model_logdir, 'eval_dataset')\n", (5056, 5086), False, 'import os\n'), ((5095, 5147), 'tensorflow.data.experimental.save', 'tf.data.experimental.save', (['dataset_val', 'dataset_path'], {}), '(dataset_val, dataset_path)\n', (5120, 5147), True, 'import tensorflow as tf\n'), ((5509, 5547), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '(299, 299, 3)'}), '(shape=(299, 299, 3))\n', (5526, 5547), True, 'import tensorflow as tf\n'), ((6151, 6253), 'train.eval', 'train.eval', (['epoch', 'dataset_val', 'num_steps_val', 'encoder', 'decoder', 'tokenizer', 'optimizer', 'loss_object'], {}), '(epoch, dataset_val, num_steps_val, encoder, decoder, tokenizer,\n optimizer, loss_object)\n', (6161, 6253), False, 'import train\n'), ((6299, 6363), 'utils.save_loss_plot', 'utils.save_loss_plot', (['eval_loss_plot', 'eval_loss_fig_name', '"""eval"""'], {}), "(eval_loss_plot, eval_loss_fig_name, 'eval')\n", (6319, 6363), False, 'import utils\n'), ((6527, 6716), 'evaluate.eval_adverts', 'evaluate.eval_adverts', (['encoder', 'decoder', 'tokenizer', 'image_features_extract_model', 'results_ad_logdir', 'epoch'], {'max_length': 'max_length', 'adverts_images': 'image_set', 'num_repeats': 'num_repeats'}), '(encoder, decoder, tokenizer,\n image_features_extract_model, results_ad_logdir, epoch, max_length=\n max_length, adverts_images=image_set, num_repeats=num_repeats)\n', (6548, 6716), False, 'import evaluate\n')]
|
import os
import tensorflow as tf
from train import train
from evaluate import evaluate
from data_structure import load_data
flags = tf.app.flags
flags.DEFINE_string('gpu', '0', 'visible gpu')
flags.DEFINE_string('mode', 'train', 'set train or eval')
flags.DEFINE_string('datadir', 'data', 'directory of input data')
flags.DEFINE_string('dataname', 'sports.pkl', 'name of data')
flags.DEFINE_string('modeldir', 'model', 'directory of model')
flags.DEFINE_string('modelname', 'sports', 'name of model')
flags.DEFINE_bool('discourserank', True, 'flag of discourserank')
flags.DEFINE_float('damp', 0.9, 'damping factor of discourserank')
flags.DEFINE_integer('epochs', 10, 'epochs')
flags.DEFINE_integer('batch_size', 8, 'batch size')
flags.DEFINE_integer('log_period', 500, 'valid period')
flags.DEFINE_string('opt', 'Adagrad', 'optimizer')
flags.DEFINE_float('lr', 0.1, 'lr')
flags.DEFINE_float('norm', 1e-4, 'norm')
flags.DEFINE_float('grad_clip', 10.0, 'grad_clip')
flags.DEFINE_float('keep_prob', 0.95, 'keep_prob')
flags.DEFINE_integer('beam_width', 10, 'beam_width')
flags.DEFINE_float('length_penalty_weight', 0.0, 'length_penalty_weight')
flags.DEFINE_integer('dim_hidden', 256, 'dim_output')
flags.DEFINE_integer('dim_str', 128, 'dim_output')
flags.DEFINE_integer('dim_sent', 384, 'dim_sent')
# for evaluation
flags.DEFINE_string('refdir', 'ref', 'refdir')
flags.DEFINE_string('outdir', 'out', 'outdir')
# special tokens
PAD = '<pad>' # This has a vocab id, which is used to pad the encoder input, decoder input and target sequence
UNK = '<unk>' # This has a vocab id, which is used to represent out-of-vocabulary words
BOS = '<p>' # This has a vocab id, which is used at the beginning of every decoder input sequence
EOS = '</p>' # This has a vocab id, which is used at the end of untruncated target sequences
def main(_):
config = flags.FLAGS
print(str(config.flag_values_dict()))
os.environ['CUDA_VISIBLE_DEVICES'] = config.gpu
print('loading data...')
train_batches, dev_batches, test_batches, embedding_matrix, vocab, word_to_id = load_data(config)
flags.DEFINE_integer('PAD_IDX', word_to_id[PAD], 'PAD_IDX')
flags.DEFINE_integer('UNK_IDX', word_to_id[UNK], 'UNK_IDX')
flags.DEFINE_integer('BOS_IDX', word_to_id[BOS], 'BOS_IDX')
flags.DEFINE_integer('EOS_IDX', word_to_id[EOS], 'EOS_IDX')
n_embed, d_embed = embedding_matrix.shape
flags.DEFINE_integer('n_embed', n_embed, 'n_embed')
flags.DEFINE_integer('d_embed', d_embed, 'd_embed')
maximum_iterations = max([max([d._max_sent_len(None) for d in batch]) for ct, batch in dev_batches])
flags.DEFINE_integer('maximum_iterations', maximum_iterations, 'maximum_iterations')
if config.mode == 'train':
train(config, train_batches, dev_batches, test_batches, embedding_matrix, vocab)
elif config.mode == 'eval':
evaluate(config, test_batches, vocab)
if __name__ == "__main__":
tf.app.run()
|
[
"evaluate.evaluate"
] |
[((2083, 2100), 'data_structure.load_data', 'load_data', (['config'], {}), '(config)\n', (2092, 2100), False, 'from data_structure import load_data\n'), ((2959, 2971), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (2969, 2971), True, 'import tensorflow as tf\n'), ((2768, 2853), 'train.train', 'train', (['config', 'train_batches', 'dev_batches', 'test_batches', 'embedding_matrix', 'vocab'], {}), '(config, train_batches, dev_batches, test_batches, embedding_matrix, vocab\n )\n', (2773, 2853), False, 'from train import train\n'), ((2889, 2926), 'evaluate.evaluate', 'evaluate', (['config', 'test_batches', 'vocab'], {}), '(config, test_batches, vocab)\n', (2897, 2926), False, 'from evaluate import evaluate\n')]
|
"""
This evaluates how the number of preceding POS tags affects the evaluation results.
"""
from config import base
import evaluate as e
config = base.get_config()
config['test_filepath'] = 'resources/test/teddev/data-with-doc.csv'
n_tags = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for n_tag in n_tags:
print("Running {}".format(n_tag))
config['n_tags'] = n_tag
config['no_embeddings'] = 2 * (config['window_size'][0] + config['window_size'][1]) + 1 + n_tag * 2
predictions = e.evaluate(config)
test_data = e.load_data(config['test_filepath'])
e.output(predictions, test_data, config['classes'],
'results/base.dev.n_tags.{}.txt'.format(n_tag))
print("Saving {}".format(n_tag))
|
[
"evaluate.evaluate",
"evaluate.load_data"
] |
[((148, 165), 'config.base.get_config', 'base.get_config', ([], {}), '()\n', (163, 165), False, 'from config import base\n'), ((490, 508), 'evaluate.evaluate', 'e.evaluate', (['config'], {}), '(config)\n', (500, 508), True, 'import evaluate as e\n'), ((525, 561), 'evaluate.load_data', 'e.load_data', (["config['test_filepath']"], {}), "(config['test_filepath'])\n", (536, 561), True, 'import evaluate as e\n')]
|
from evaluate import evalb
from trees import load_trees
def test(tree_path='data/22.auto.clean', evalb_path='EVALB'):
dev_trees = load_trees(tree_path)
score = evalb(evalb_path, dev_trees, dev_trees)
spec = locals()
spec.pop('dev_trees')
for key, val in spec.items():
print(key, val)
test()
|
[
"evaluate.evalb"
] |
[((132, 153), 'trees.load_trees', 'load_trees', (['tree_path'], {}), '(tree_path)\n', (142, 153), False, 'from trees import load_trees\n'), ((163, 202), 'evaluate.evalb', 'evalb', (['evalb_path', 'dev_trees', 'dev_trees'], {}), '(evalb_path, dev_trees, dev_trees)\n', (168, 202), False, 'from evaluate import evalb\n')]
|
# import ants
from sitkImageIO.itkdatawriter import sitk_write_lab,sitk_write_image
import numpy as np
import SimpleITK as sitk
import os
from dirutil.helper import mkdir_if_not_exist
from dirutil.helper import sort_glob
from preprocessor.tools import rescale_one_dir
from evaluate.metric import calculate_binary_hd,calculate_binary_dice,print_mean_and_std
from dirutil.helper import get_name_wo_suffix
from excelutil.output2excel import outpu2excel
from dirutil.helper import sort_glob
from learn2reg.sampler import MMSampler
# def registration_all_label_and_img(args,atlas_imgs, atlas_labs, target_imgs, target_labs):
#
# res=[]
# for target_img,target_lab in zip(target_imgs,target_labs):
# for atlas_img,atlas_lab in zip(atlas_imgs,atlas_labs):
# print("working:")
# print(atlas_img,atlas_lab,target_img,target_lab)
# reg(args,target_img,target_lab,atlas_img,atlas_lab)
'''
把数据转化成0,255之间
'''
def rescale(args):
atlas_imgs=sort_glob(args.dataset_dir+"/train_atlas/rez/img/*.nii.gz")
target_imgs=sort_glob(args.dataset_dir+"/validate_target/rez/img/*.nii.gz")
target_imgs=target_imgs+sort_glob(args.dataset_dir+"/train_target/rez/img/*.nii.gz")
rescale_one_dir(atlas_imgs)
rescale_one_dir(target_imgs)
from learn2reg.sampler import Sampler
class AntReg():
def __init__(self,args):
self.args=args
self.train_sampler = Sampler(self.args, 'train')
self.validate_sampler = Sampler(self.args, 'validate')
def run_reg(self):
all_ds=[]
all_hd=[]
for target_img, target_lab in zip(self.validate_sampler.img_fix, self.validate_sampler.lab_fix):
for atlas_img, atlas_lab in zip(self.validate_sampler.img_mv, self.validate_sampler.lab_mv):
print("working:")
print(atlas_img, atlas_lab, target_img, target_lab)
ds,hd= self.reg_one_pair(target_img, target_lab, atlas_img, atlas_lab)
all_ds.append(ds)
all_hd.append(hd)
print("ds %f hd %f"%(ds,hd))
print_mean_and_std(all_ds)
print_mean_and_std(all_hd)
outpu2excel(self.args.res_excel,self.args.MOLD_ID+"_DS",all_ds)
outpu2excel(self.args.res_excel,self.args.MOLD_ID+"_HD",all_hd)
def reg_one_pair(self, fix_img_path, fix_label_path, move_img_path, move_label_path):
type = self.args.type
# 读取数据,格式为: ants.core.ants_image.ANTsImage
fix_img = ants.image_read(fix_img_path)
fix_label = ants.image_read(fix_label_path)
move_img = ants.image_read(move_img_path)
move_label = ants.image_read(move_label_path)
g1 = ants.iMath_grad(fix_img)
g2 = ants.iMath_grad(move_img)
demonsMetric = ['demons', g1, g2, 1, 1]
ccMetric = ['CC', fix_img, move_img, 2, 4]
metrics = list()
metrics.append(demonsMetric)
# 配准
# outs = ants.registration(fix_img,move_img,type_of_transforme = 'Affine')
# outs = ants.registration( fix_img, move_img, 'ElasticSyN', multivariate_extras = metrics )
# outs = ants.registration( fix_img, move_img, type,syn_metric='demons' )
# outs = ants.registration( fix_img, move_img, type,verbose=True)
fix_mask=fix_img>fix_img.mean()
outs = ants.registration(fixed=fix_img, moving=move_img, type_of_transform=type,mask=fix_mask, reg_iterations=(20, 20, 40))
# 获取配准后的数据,并保存
# ants.image_write(outs['warpedmovout'] ,'./warp_image.nii.gz')
print(outs)
if len(outs['fwdtransforms']) != 2:
# return [0]
print("invalid output")
# 获取move到fix的转换矩阵;将其应用到 move_label上;插值方式选取 最近邻插值; 这个时候也对应的将label变换到 配准后的move图像上
warp_label = ants.apply_transforms(fix_img, move_label, transformlist=outs['fwdtransforms'],interpolator='nearestNeighbor')
warp_img= ants.apply_transforms(fix_img, move_img, transformlist=outs['fwdtransforms'],interpolator='nearestNeighbor')
out_dir = self.args.sample_dir + "/target_"+get_name_wo_suffix(fix_img_path)
mkdir_if_not_exist(out_dir)
p_warp_mv_label = out_dir + "/" + os.path.basename(move_label_path)
ants.image_write(warp_label, p_warp_mv_label)
p_warp_mv_img= out_dir + "/" + os.path.basename(move_img_path)
ants.image_write(warp_img, p_warp_mv_img)
p_fix_label = out_dir + "/" + os.path.basename(fix_label_path)
ants.image_write(fix_label, p_fix_label)
p_fix_img= out_dir + "/" + os.path.basename(fix_img_path)
ants.image_write(fix_img, p_fix_img)
fix_label=sitk.ReadImage(p_fix_label)
fix_label_array=np.where(sitk.GetArrayFromImage(fix_label)==self.args.component,1,0)
sitk_write_lab(fix_label_array,fix_label,out_dir,get_name_wo_suffix(p_fix_label))
warp_mv_label=sitk.ReadImage(p_warp_mv_label)
warp_mv_label_array=np.where(sitk.GetArrayFromImage(warp_mv_label)==self.args.component,1,0)
sitk_write_lab(warp_mv_label_array,warp_mv_label,out_dir,get_name_wo_suffix(p_warp_mv_label))
ds=calculate_binary_dice(fix_label_array,warp_mv_label_array)
hd=calculate_binary_hd(fix_label_array,warp_mv_label_array,spacing=fix_label.GetSpacing())
return ds,hd
def reg_one_pairV2(self, fix_img_path, fix_label_path, move_img_path, move_label_path):
def command_iteration(method):
print("{0:3} = {1:10.5f} : {2}".format(method.GetOptimizerIteration(),
method.GetMetricValue(),
method.GetOptimizerPosition()))
def command_multi_iteration(method):
print("--------- Resolution Changing ---------")
fixed = sitk.ReadImage(fix_img_path, sitk.sitkFloat32)
fixed = sitk.Normalize(fixed)
fixed = sitk.DiscreteGaussian(fixed, 2.0)
fixed_lab = sitk.ReadImage(fix_label_path, sitk.sitkUInt16)
moving = sitk.ReadImage(move_img_path, sitk.sitkFloat32)
moving = sitk.Normalize(moving)
moving = sitk.DiscreteGaussian(moving, 2.0)
moving_lab = sitk.ReadImage(move_label_path, sitk.sitkFloat32)
transformDomainMeshSize = [10] * moving.GetDimension()
tx = sitk.BSplineTransformInitializer(fixed,
transformDomainMeshSize)
print("Initial Parameters:")
print(tx.GetParameters())
R = sitk.ImageRegistrationMethod()
R.SetMetricAsMattesMutualInformation(50)
R.SetOptimizerAsGradientDescentLineSearch(5.0, 100,
convergenceMinimumValue=1e-4,
convergenceWindowSize=5)
R.SetOptimizerScalesFromPhysicalShift()
R.SetInitialTransform(tx)
R.SetInterpolator(sitk.sitkLinear)
R.SetShrinkFactorsPerLevel([6, 2, 1])
R.SetSmoothingSigmasPerLevel([6, 2, 1])
R.AddCommand(sitk.sitkIterationEvent, lambda: command_iteration(R))
R.AddCommand(sitk.sitkMultiResolutionIterationEvent,
lambda: command_multi_iteration(R))
outTx = R.Execute(fixed, moving)
print("-------")
print(outTx)
print("Optimizer stop condition: {0}"
.format(R.GetOptimizerStopConditionDescription()))
print(" Iteration: {0}".format(R.GetOptimizerIteration()))
print(" Metric value: {0}".format(R.GetMetricValue()))
# sitk.WriteTransform(outTx,'../outputs/tmp.nii.gz' )
if True:
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(fixed)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetDefaultPixelValue(0)
resampler.SetTransform(outTx)
warp_mv_img= resampler.Execute(moving)
warp_mv_label= resampler.Execute(moving_lab)
warp_mv_label= sitk.Cast(warp_mv_label, sitk.sitkUInt16)
# cimg = sitk.Compose(simg1, simg2, simg1 // 2. + simg2 // 2.)
# sitk.Show(cimg, "ImageRegistration4 Composition")
# out_dir = self.args.sample_dir + "/target_"+get_name_wo_suffix(fix_img_path)
out_dir = "../outputs/tmp/"
mkdir_if_not_exist(out_dir)
sitk_write_image(warp_mv_img,fixed,out_dir,get_name_wo_suffix(move_img_path))
sitk_write_image(fixed,fixed,out_dir,get_name_wo_suffix(fix_img_path))
fix_label_array=np.where(sitk.GetArrayFromImage(fixed_lab)==self.args.component,1,0)
sitk_write_lab(fix_label_array,fixed_lab,out_dir,get_name_wo_suffix(fix_label_path))
warp_mv_label_array=np.where(sitk.GetArrayFromImage(warp_mv_label)==self.args.component,1,0)
sitk_write_lab(warp_mv_label_array,warp_mv_label,out_dir,get_name_wo_suffix(move_label_path))
ds=calculate_binary_dice(fix_label_array,warp_mv_label_array)
hd=calculate_binary_hd(fix_label_array,warp_mv_label_array,spacing=fixed_lab.GetSpacing())
return ds,hd
def validate(self):
dirs=sort_glob(self.args.sample_dir+"/*")
DS=[]
HD=[]
for d in dirs:
target_file=sort_glob(d+"/*%s*label*"%self.args.Ttarget)
atlas_file=sort_glob(d+"/*%s*label*"%self.args.Tatlas)
fix_label=sitk.ReadImage(target_file[0])
fix_array=sitk.GetArrayFromImage(fix_label)
for itr in atlas_file:
mv_img=sitk.ReadImage(itr)
mv_array=sitk.GetArrayFromImage(mv_img)
ds=calculate_binary_dice(fix_array,mv_array)
hd=calculate_binary_hd(fix_array,mv_array,spacing=fix_label.GetSpacing())
print(ds)
DS.append(ds)
HD.append(hd)
outpu2excel(self.args.res_excel, self.args.MOLD_ID + "_DS", DS)
outpu2excel(self.args.res_excel, self.args.MOLD_ID + "_HD", HD)
class AntRegV2():
def __init__(self,args):
self.args=args
self.train_sampler = Sampler(self.args, 'train')
self.validate_sampler = MMSampler(self.args, 'validate')
def run_reg(self):
all_ds=[]
all_hd=[]
for target_img, target_lab in zip(self.validate_sampler.img_fix, self.validate_sampler.lab_fix):
for atlas_img, atlas_lab in zip(self.validate_sampler.img_mv1, self.validate_sampler.lab_mv1):
print("working:")
print(atlas_img, atlas_lab, target_img, target_lab)
ds,hd= self.reg_one_pair(target_img, target_lab, atlas_img, atlas_lab)
all_ds.append(ds)
all_hd.append(hd)
print("ds %f hd %f"%(ds,hd))
print_mean_and_std(all_ds)
print_mean_and_std(all_hd)
outpu2excel(self.args.res_excel,self.args.MOLD_ID+"_DS",all_ds)
outpu2excel(self.args.res_excel,self.args.MOLD_ID+"_HD",all_hd)
def reg_one_pair(self, fix_img_path, fix_label_path, move_img_path, move_label_path):
type = self.args.type
# 读取数据,格式为: ants.core.ants_image.ANTsImage
fix_img = ants.image_read(fix_img_path)
fix_label = ants.image_read(fix_label_path)
move_img = ants.image_read(move_img_path)
move_label = ants.image_read(move_label_path)
g1 = ants.iMath_grad(fix_img)
g2 = ants.iMath_grad(move_img)
demonsMetric = ['demons', g1, g2, 1, 1]
ccMetric = ['CC', fix_img, move_img, 2, 4]
metrics = list()
metrics.append(demonsMetric)
# 配准
# outs = ants.registration(fix_img,move_img,type_of_transforme = 'Affine')
# outs = ants.registration( fix_img, move_img, 'ElasticSyN', multivariate_extras = metrics )
# outs = ants.registration( fix_img, move_img, type,syn_metric='demons' )
# outs = ants.registration( fix_img, move_img, type,verbose=True)
outs = ants.registration(fixed=fix_img, moving=move_img, type_of_transform=type, reg_iterations=(20, 20, 40))
# 获取配准后的数据,并保存
# ants.image_write(outs['warpedmovout'] ,'./warp_image.nii.gz')
print(outs)
if len(outs['fwdtransforms']) != 2:
# return [0]
print("invalid output")
# 获取move到fix的转换矩阵;将其应用到 move_label上;插值方式选取 最近邻插值; 这个时候也对应的将label变换到 配准后的move图像上
warp_label = ants.apply_transforms(fix_img, move_label, transformlist=outs['fwdtransforms'],interpolator='nearestNeighbor')
warp_img= ants.apply_transforms(fix_img, move_img, transformlist=outs['fwdtransforms'],interpolator='nearestNeighbor')
out_dir = self.args.sample_dir + "/target_"+get_name_wo_suffix(fix_img_path)
mkdir_if_not_exist(out_dir)
p_warp_mv_label = out_dir + "/" + os.path.basename(move_label_path)
ants.image_write(warp_label, p_warp_mv_label)
p_warp_mv_img= out_dir + "/" + os.path.basename(move_img_path)
ants.image_write(warp_img, p_warp_mv_img)
p_fix_label = out_dir + "/" + os.path.basename(fix_label_path)
ants.image_write(fix_label, p_fix_label)
p_fix_img= out_dir + "/" + os.path.basename(fix_img_path)
ants.image_write(fix_img, p_fix_img)
fix_label=sitk.ReadImage(p_fix_label)
fix_label_array=np.where(sitk.GetArrayFromImage(fix_label)==self.args.component,1,0)
sitk_write_lab(fix_label_array,fix_label,out_dir,get_name_wo_suffix(p_fix_label))
warp_mv_label=sitk.ReadImage(p_warp_mv_label)
warp_mv_label_array=np.where(sitk.GetArrayFromImage(warp_mv_label)==self.args.component,1,0)
sitk_write_lab(warp_mv_label_array,warp_mv_label,out_dir,get_name_wo_suffix(p_warp_mv_label))
ds=calculate_binary_dice(fix_label_array,warp_mv_label_array)
hd=calculate_binary_hd(fix_label_array,warp_mv_label_array,spacing=fix_label.GetSpacing())
return ds,hd
def reg_one_pairV2(self, fix_img_path, fix_label_path, move_img_path, move_label_path):
def command_iteration(method):
print("{0:3} = {1:10.5f} : {2}".format(method.GetOptimizerIteration(),
method.GetMetricValue(),
method.GetOptimizerPosition()))
def command_multi_iteration(method):
print("--------- Resolution Changing ---------")
fixed = sitk.ReadImage(fix_img_path, sitk.sitkFloat32)
fixed = sitk.Normalize(fixed)
fixed = sitk.DiscreteGaussian(fixed, 2.0)
fixed_lab = sitk.ReadImage(fix_label_path, sitk.sitkUInt16)
moving = sitk.ReadImage(move_img_path, sitk.sitkFloat32)
moving = sitk.Normalize(moving)
moving = sitk.DiscreteGaussian(moving, 2.0)
moving_lab = sitk.ReadImage(move_label_path, sitk.sitkFloat32)
transformDomainMeshSize = [10] * moving.GetDimension()
tx = sitk.BSplineTransformInitializer(fixed,
transformDomainMeshSize)
print("Initial Parameters:")
print(tx.GetParameters())
R = sitk.ImageRegistrationMethod()
R.SetMetricAsMattesMutualInformation(50)
R.SetOptimizerAsGradientDescentLineSearch(5.0, 100,
convergenceMinimumValue=1e-4,
convergenceWindowSize=5)
R.SetOptimizerScalesFromPhysicalShift()
R.SetInitialTransform(tx)
R.SetInterpolator(sitk.sitkLinear)
R.SetShrinkFactorsPerLevel([6, 2, 1])
R.SetSmoothingSigmasPerLevel([6, 2, 1])
R.AddCommand(sitk.sitkIterationEvent, lambda: command_iteration(R))
R.AddCommand(sitk.sitkMultiResolutionIterationEvent,
lambda: command_multi_iteration(R))
outTx = R.Execute(fixed, moving)
print("-------")
print(outTx)
print("Optimizer stop condition: {0}"
.format(R.GetOptimizerStopConditionDescription()))
print(" Iteration: {0}".format(R.GetOptimizerIteration()))
print(" Metric value: {0}".format(R.GetMetricValue()))
# sitk.WriteTransform(outTx,'../outputs/tmp.nii.gz' )
if True:
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(fixed)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetDefaultPixelValue(0)
resampler.SetTransform(outTx)
warp_mv_img= resampler.Execute(moving)
warp_mv_label= resampler.Execute(moving_lab)
warp_mv_label= sitk.Cast(warp_mv_label, sitk.sitkUInt16)
# cimg = sitk.Compose(simg1, simg2, simg1 // 2. + simg2 // 2.)
# sitk.Show(cimg, "ImageRegistration4 Composition")
# out_dir = self.args.sample_dir + "/target_"+get_name_wo_suffix(fix_img_path)
out_dir = "../outputs/tmp/"
mkdir_if_not_exist(out_dir)
sitk_write_image(warp_mv_img,fixed,out_dir,get_name_wo_suffix(move_img_path))
sitk_write_image(fixed,fixed,out_dir,get_name_wo_suffix(fix_img_path))
fix_label_array=np.where(sitk.GetArrayFromImage(fixed_lab)==self.args.component,1,0)
sitk_write_lab(fix_label_array,fixed_lab,out_dir,get_name_wo_suffix(fix_label_path))
warp_mv_label_array=np.where(sitk.GetArrayFromImage(warp_mv_label)==self.args.component,1,0)
sitk_write_lab(warp_mv_label_array,warp_mv_label,out_dir,get_name_wo_suffix(move_label_path))
ds=calculate_binary_dice(fix_label_array,warp_mv_label_array)
hd=calculate_binary_hd(fix_label_array,warp_mv_label_array,spacing=fixed_lab.GetSpacing())
return ds,hd
def validate(self):
dirs=sort_glob(self.args.sample_dir+"/*")
DS=[]
HD=[]
for d in dirs:
target_file=sort_glob(d+"/*%s*label*"%self.args.Ttarget)
atlas_file=sort_glob(d+"/*%s*label*"%self.args.Tatlas)
fix_label=sitk.ReadImage(target_file[0])
fix_array=sitk.GetArrayFromImage(fix_label)
for itr in atlas_file:
mv_img=sitk.ReadImage(itr)
mv_array=sitk.GetArrayFromImage(mv_img)
ds=calculate_binary_dice(fix_array,mv_array)
hd=calculate_binary_hd(fix_array,mv_array,spacing=fix_label.GetSpacing())
print(ds)
DS.append(ds)
HD.append(hd)
outpu2excel(self.args.res_excel, self.args.MOLD_ID + "_DS", DS)
outpu2excel(self.args.res_excel, self.args.MOLD_ID + "_HD", HD)
# import ants
from dirutil.helper import mkdir_if_not_exist
class AntRegV3():
def __init__(self,args):
self.args=args
self.train_sampler = Sampler(self.args, 'train')
self.validate_sampler = MMSampler(self.args, 'validate')
def run_reg(self,dir):
fix_imgs=sort_glob(dir+"/*fixe_img*")
rescale_one_dir(fix_imgs)
fix_labs = sort_glob(dir + "/*fixe_lab*")
mv_imgs = sort_glob(dir + "/*input_mv*img*")
rescale_one_dir(mv_imgs)
mv_labs = sort_glob(dir + "/*input_mv*lab*")
all_ds=[]
all_hd=[]
for atlas_img, atlas_lab in zip(mv_imgs,mv_labs):
for target_img, target_lab in zip(fix_imgs,fix_labs):
print("working:")
print(atlas_img, atlas_lab, target_img, target_lab)
mkdir_if_not_exist(dir+"/%s"%self.args.type)
ds,hd= self.reg_one_pair(target_img, target_lab, atlas_img, atlas_lab,dir+"/%s"%self.args.type)
all_ds.append(ds)
all_hd.append(hd)
print("ds %f hd %f"%(ds,hd))
print_mean_and_std(all_ds)
print_mean_and_std(all_hd)
outpu2excel(self.args.res_excel,self.args.MOLD_ID+"_DS",all_ds)
outpu2excel(self.args.res_excel,self.args.MOLD_ID+"_HD",all_hd)
def reg_one_pair(self, fix_img_path, fix_label_path, move_img_path, move_label_path,out_dir):
type = self.args.type
# 读取数据,格式为: ants.core.ants_image.ANTsImage
fix_img = ants.image_read(fix_img_path)
fix_label = ants.image_read(fix_label_path)
move_img = ants.image_read(move_img_path)
move_label = ants.image_read(move_label_path)
g1 = ants.iMath_grad(fix_img)
g2 = ants.iMath_grad(move_img)
demonsMetric = ['demons', g1, g2, 1, 1]
ccMetric = ['CC', fix_img, move_img, 2, 4]
metrics = list()
metrics.append(demonsMetric)
# 配准
# outs = ants.registration(fix_img,move_img,type_of_transforme = 'Affine')
# outs = ants.registration( fix_img, move_img, 'ElasticSyN', multivariate_extras = metrics )
# outs = ants.registration( fix_img, move_img, type,syn_metric='demons' )
# outs = ants.registration( fix_img, move_img, type,verbose=True)
outs = ants.registration(fixed=fix_img, moving=move_img, type_of_transform=type, reg_iterations=(20, 20, 40))
# 获取配准后的数据,并保存
# ants.image_write(outs['warpedmovout'] ,'./warp_image.nii.gz')
print(outs)
if len(outs['fwdtransforms']) != 2:
# return [0]
print("invalid output")
# 获取move到fix的转换矩阵;将其应用到 move_label上;插值方式选取 最近邻插值; 这个时候也对应的将label变换到 配准后的move图像上
warp_label = ants.apply_transforms(fix_img, move_label, transformlist=outs['fwdtransforms'],interpolator='nearestNeighbor')
warp_img= ants.apply_transforms(fix_img, move_img, transformlist=outs['fwdtransforms'],interpolator='nearestNeighbor')
mkdir_if_not_exist(out_dir)
p_warp_mv_label = out_dir + "/" + os.path.basename(move_label_path)
ants.image_write(warp_label, p_warp_mv_label)
p_warp_mv_img= out_dir + "/" + os.path.basename(move_img_path)
ants.image_write(warp_img, p_warp_mv_img)
p_fix_label = out_dir + "/" + os.path.basename(fix_label_path)
ants.image_write(fix_label, p_fix_label)
p_fix_img= out_dir + "/" + os.path.basename(fix_img_path)
ants.image_write(fix_img, p_fix_img)
fix_label=sitk.ReadImage(p_fix_label)
fix_label_array=np.where(sitk.GetArrayFromImage(fix_label)==self.args.component,1,0)
sitk_write_lab(fix_label_array,fix_label,out_dir,get_name_wo_suffix(p_fix_label))
warp_mv_label=sitk.ReadImage(p_warp_mv_label)
warp_mv_label_array=np.where(sitk.GetArrayFromImage(warp_mv_label)==self.args.component,1,0)
sitk_write_lab(warp_mv_label_array,warp_mv_label,out_dir,get_name_wo_suffix(p_warp_mv_label))
ds=calculate_binary_dice(fix_label_array,warp_mv_label_array)
hd=calculate_binary_hd(fix_label_array,warp_mv_label_array,spacing=fix_label.GetSpacing())
return ds,hd
def reg_one_pairV2(self, fix_img_path, fix_label_path, move_img_path, move_label_path):
def command_iteration(method):
print("{0:3} = {1:10.5f} : {2}".format(method.GetOptimizerIteration(),
method.GetMetricValue(),
method.GetOptimizerPosition()))
def command_multi_iteration(method):
print("--------- Resolution Changing ---------")
fixed = sitk.ReadImage(fix_img_path, sitk.sitkFloat32)
fixed = sitk.Normalize(fixed)
fixed = sitk.DiscreteGaussian(fixed, 2.0)
fixed_lab = sitk.ReadImage(fix_label_path, sitk.sitkUInt16)
moving = sitk.ReadImage(move_img_path, sitk.sitkFloat32)
moving = sitk.Normalize(moving)
moving = sitk.DiscreteGaussian(moving, 2.0)
moving_lab = sitk.ReadImage(move_label_path, sitk.sitkFloat32)
transformDomainMeshSize = [10] * moving.GetDimension()
tx = sitk.BSplineTransformInitializer(fixed,
transformDomainMeshSize)
print("Initial Parameters:")
print(tx.GetParameters())
R = sitk.ImageRegistrationMethod()
R.SetMetricAsMattesMutualInformation(50)
R.SetOptimizerAsGradientDescentLineSearch(5.0, 100,
convergenceMinimumValue=1e-4,
convergenceWindowSize=5)
R.SetOptimizerScalesFromPhysicalShift()
R.SetInitialTransform(tx)
R.SetInterpolator(sitk.sitkLinear)
R.SetShrinkFactorsPerLevel([6, 2, 1])
R.SetSmoothingSigmasPerLevel([6, 2, 1])
R.AddCommand(sitk.sitkIterationEvent, lambda: command_iteration(R))
R.AddCommand(sitk.sitkMultiResolutionIterationEvent,
lambda: command_multi_iteration(R))
outTx = R.Execute(fixed, moving)
print("-------")
print(outTx)
print("Optimizer stop condition: {0}"
.format(R.GetOptimizerStopConditionDescription()))
print(" Iteration: {0}".format(R.GetOptimizerIteration()))
print(" Metric value: {0}".format(R.GetMetricValue()))
# sitk.WriteTransform(outTx,'../outputs/tmp.nii.gz' )
if True:
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(fixed)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetDefaultPixelValue(0)
resampler.SetTransform(outTx)
warp_mv_img= resampler.Execute(moving)
warp_mv_label= resampler.Execute(moving_lab)
warp_mv_label= sitk.Cast(warp_mv_label, sitk.sitkUInt16)
# cimg = sitk.Compose(simg1, simg2, simg1 // 2. + simg2 // 2.)
# sitk.Show(cimg, "ImageRegistration4 Composition")
# out_dir = self.args.sample_dir + "/target_"+get_name_wo_suffix(fix_img_path)
out_dir = "../outputs/tmp/"
mkdir_if_not_exist(out_dir)
sitk_write_image(warp_mv_img,fixed,out_dir,get_name_wo_suffix(move_img_path))
sitk_write_image(fixed,fixed,out_dir,get_name_wo_suffix(fix_img_path))
fix_label_array=np.where(sitk.GetArrayFromImage(fixed_lab)==self.args.component,1,0)
sitk_write_lab(fix_label_array,fixed_lab,out_dir,get_name_wo_suffix(fix_label_path))
warp_mv_label_array=np.where(sitk.GetArrayFromImage(warp_mv_label)==self.args.component,1,0)
sitk_write_lab(warp_mv_label_array,warp_mv_label,out_dir,get_name_wo_suffix(move_label_path))
ds=calculate_binary_dice(fix_label_array,warp_mv_label_array)
hd=calculate_binary_hd(fix_label_array,warp_mv_label_array,spacing=fixed_lab.GetSpacing())
return ds,hd
def validate(self):
dirs=sort_glob(self.args.sample_dir+"/*")
DS=[]
HD=[]
for d in dirs:
target_file=sort_glob(d+"/*%s*label*"%self.args.Ttarget)
atlas_file=sort_glob(d+"/*%s*label*"%self.args.Tatlas)
fix_label=sitk.ReadImage(target_file[0])
fix_array=sitk.GetArrayFromImage(fix_label)
for itr in atlas_file:
mv_img=sitk.ReadImage(itr)
mv_array=sitk.GetArrayFromImage(mv_img)
ds=calculate_binary_dice(fix_array,mv_array)
hd=calculate_binary_hd(fix_array,mv_array,spacing=fix_label.GetSpacing())
print(ds)
DS.append(ds)
HD.append(hd)
outpu2excel(self.args.res_excel, self.args.MOLD_ID + "_DS", DS)
outpu2excel(self.args.res_excel, self.args.MOLD_ID + "_HD", HD)
|
[
"evaluate.metric.calculate_binary_dice",
"evaluate.metric.print_mean_and_std"
] |
[((986, 1047), 'dirutil.helper.sort_glob', 'sort_glob', (["(args.dataset_dir + '/train_atlas/rez/img/*.nii.gz')"], {}), "(args.dataset_dir + '/train_atlas/rez/img/*.nii.gz')\n", (995, 1047), False, 'from dirutil.helper import sort_glob\n'), ((1062, 1127), 'dirutil.helper.sort_glob', 'sort_glob', (["(args.dataset_dir + '/validate_target/rez/img/*.nii.gz')"], {}), "(args.dataset_dir + '/validate_target/rez/img/*.nii.gz')\n", (1071, 1127), False, 'from dirutil.helper import sort_glob\n'), ((1219, 1246), 'preprocessor.tools.rescale_one_dir', 'rescale_one_dir', (['atlas_imgs'], {}), '(atlas_imgs)\n', (1234, 1246), False, 'from preprocessor.tools import rescale_one_dir\n'), ((1251, 1279), 'preprocessor.tools.rescale_one_dir', 'rescale_one_dir', (['target_imgs'], {}), '(target_imgs)\n', (1266, 1279), False, 'from preprocessor.tools import rescale_one_dir\n'), ((1154, 1216), 'dirutil.helper.sort_glob', 'sort_glob', (["(args.dataset_dir + '/train_target/rez/img/*.nii.gz')"], {}), "(args.dataset_dir + '/train_target/rez/img/*.nii.gz')\n", (1163, 1216), False, 'from dirutil.helper import sort_glob\n'), ((1415, 1442), 'learn2reg.sampler.Sampler', 'Sampler', (['self.args', '"""train"""'], {}), "(self.args, 'train')\n", (1422, 1442), False, 'from learn2reg.sampler import Sampler\n'), ((1475, 1505), 'learn2reg.sampler.Sampler', 'Sampler', (['self.args', '"""validate"""'], {}), "(self.args, 'validate')\n", (1482, 1505), False, 'from learn2reg.sampler import Sampler\n'), ((2087, 2113), 'evaluate.metric.print_mean_and_std', 'print_mean_and_std', (['all_ds'], {}), '(all_ds)\n', (2105, 2113), False, 'from evaluate.metric import calculate_binary_hd, calculate_binary_dice, print_mean_and_std\n'), ((2122, 2148), 'evaluate.metric.print_mean_and_std', 'print_mean_and_std', (['all_hd'], {}), '(all_hd)\n', (2140, 2148), False, 'from evaluate.metric import calculate_binary_hd, calculate_binary_dice, print_mean_and_std\n'), ((2157, 2224), 'excelutil.output2excel.outpu2excel', 'outpu2excel', (['self.args.res_excel', "(self.args.MOLD_ID + '_DS')", 'all_ds'], {}), "(self.args.res_excel, self.args.MOLD_ID + '_DS', all_ds)\n", (2168, 2224), False, 'from excelutil.output2excel import outpu2excel\n'), ((2229, 2296), 'excelutil.output2excel.outpu2excel', 'outpu2excel', (['self.args.res_excel', "(self.args.MOLD_ID + '_HD')", 'all_hd'], {}), "(self.args.res_excel, self.args.MOLD_ID + '_HD', all_hd)\n", (2240, 2296), False, 'from excelutil.output2excel import outpu2excel\n'), ((4095, 4122), 'dirutil.helper.mkdir_if_not_exist', 'mkdir_if_not_exist', (['out_dir'], {}), '(out_dir)\n', (4113, 4122), False, 'from dirutil.helper import mkdir_if_not_exist\n'), ((4629, 4656), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['p_fix_label'], {}), '(p_fix_label)\n', (4643, 4656), True, 'import SimpleITK as sitk\n'), ((4863, 4894), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['p_warp_mv_label'], {}), '(p_warp_mv_label)\n', (4877, 4894), True, 'import SimpleITK as sitk\n'), ((5110, 5169), 'evaluate.metric.calculate_binary_dice', 'calculate_binary_dice', (['fix_label_array', 'warp_mv_label_array'], {}), '(fix_label_array, warp_mv_label_array)\n', (5131, 5169), False, 'from evaluate.metric import calculate_binary_hd, calculate_binary_dice, print_mean_and_std\n'), ((5787, 5833), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['fix_img_path', 'sitk.sitkFloat32'], {}), '(fix_img_path, sitk.sitkFloat32)\n', (5801, 5833), True, 'import SimpleITK as sitk\n'), ((5850, 5871), 'SimpleITK.Normalize', 'sitk.Normalize', (['fixed'], {}), '(fixed)\n', (5864, 5871), True, 'import SimpleITK as sitk\n'), ((5888, 5921), 'SimpleITK.DiscreteGaussian', 'sitk.DiscreteGaussian', (['fixed', '(2.0)'], {}), '(fixed, 2.0)\n', (5909, 5921), True, 'import SimpleITK as sitk\n'), ((5943, 5990), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['fix_label_path', 'sitk.sitkUInt16'], {}), '(fix_label_path, sitk.sitkUInt16)\n', (5957, 5990), True, 'import SimpleITK as sitk\n'), ((6009, 6056), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['move_img_path', 'sitk.sitkFloat32'], {}), '(move_img_path, sitk.sitkFloat32)\n', (6023, 6056), True, 'import SimpleITK as sitk\n'), ((6074, 6096), 'SimpleITK.Normalize', 'sitk.Normalize', (['moving'], {}), '(moving)\n', (6088, 6096), True, 'import SimpleITK as sitk\n'), ((6114, 6148), 'SimpleITK.DiscreteGaussian', 'sitk.DiscreteGaussian', (['moving', '(2.0)'], {}), '(moving, 2.0)\n', (6135, 6148), True, 'import SimpleITK as sitk\n'), ((6171, 6220), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['move_label_path', 'sitk.sitkFloat32'], {}), '(move_label_path, sitk.sitkFloat32)\n', (6185, 6220), True, 'import SimpleITK as sitk\n'), ((6298, 6362), 'SimpleITK.BSplineTransformInitializer', 'sitk.BSplineTransformInitializer', (['fixed', 'transformDomainMeshSize'], {}), '(fixed, transformDomainMeshSize)\n', (6330, 6362), True, 'import SimpleITK as sitk\n'), ((6494, 6524), 'SimpleITK.ImageRegistrationMethod', 'sitk.ImageRegistrationMethod', ([], {}), '()\n', (6522, 6524), True, 'import SimpleITK as sitk\n'), ((8306, 8333), 'dirutil.helper.mkdir_if_not_exist', 'mkdir_if_not_exist', (['out_dir'], {}), '(out_dir)\n', (8324, 8333), False, 'from dirutil.helper import mkdir_if_not_exist\n'), ((8903, 8962), 'evaluate.metric.calculate_binary_dice', 'calculate_binary_dice', (['fix_label_array', 'warp_mv_label_array'], {}), '(fix_label_array, warp_mv_label_array)\n', (8924, 8962), False, 'from evaluate.metric import calculate_binary_hd, calculate_binary_dice, print_mean_and_std\n'), ((9121, 9159), 'dirutil.helper.sort_glob', 'sort_glob', (["(self.args.sample_dir + '/*')"], {}), "(self.args.sample_dir + '/*')\n", (9130, 9159), False, 'from dirutil.helper import sort_glob\n'), ((9833, 9896), 'excelutil.output2excel.outpu2excel', 'outpu2excel', (['self.args.res_excel', "(self.args.MOLD_ID + '_DS')", 'DS'], {}), "(self.args.res_excel, self.args.MOLD_ID + '_DS', DS)\n", (9844, 9896), False, 'from excelutil.output2excel import outpu2excel\n'), ((9905, 9968), 'excelutil.output2excel.outpu2excel', 'outpu2excel', (['self.args.res_excel', "(self.args.MOLD_ID + '_HD')", 'HD'], {}), "(self.args.res_excel, self.args.MOLD_ID + '_HD', HD)\n", (9916, 9968), False, 'from excelutil.output2excel import outpu2excel\n'), ((10070, 10097), 'learn2reg.sampler.Sampler', 'Sampler', (['self.args', '"""train"""'], {}), "(self.args, 'train')\n", (10077, 10097), False, 'from learn2reg.sampler import Sampler\n'), ((10130, 10162), 'learn2reg.sampler.MMSampler', 'MMSampler', (['self.args', '"""validate"""'], {}), "(self.args, 'validate')\n", (10139, 10162), False, 'from learn2reg.sampler import MMSampler\n'), ((10746, 10772), 'evaluate.metric.print_mean_and_std', 'print_mean_and_std', (['all_ds'], {}), '(all_ds)\n', (10764, 10772), False, 'from evaluate.metric import calculate_binary_hd, calculate_binary_dice, print_mean_and_std\n'), ((10781, 10807), 'evaluate.metric.print_mean_and_std', 'print_mean_and_std', (['all_hd'], {}), '(all_hd)\n', (10799, 10807), False, 'from evaluate.metric import calculate_binary_hd, calculate_binary_dice, print_mean_and_std\n'), ((10816, 10883), 'excelutil.output2excel.outpu2excel', 'outpu2excel', (['self.args.res_excel', "(self.args.MOLD_ID + '_DS')", 'all_ds'], {}), "(self.args.res_excel, self.args.MOLD_ID + '_DS', all_ds)\n", (10827, 10883), False, 'from excelutil.output2excel import outpu2excel\n'), ((10888, 10955), 'excelutil.output2excel.outpu2excel', 'outpu2excel', (['self.args.res_excel', "(self.args.MOLD_ID + '_HD')", 'all_hd'], {}), "(self.args.res_excel, self.args.MOLD_ID + '_HD', all_hd)\n", (10899, 10955), False, 'from excelutil.output2excel import outpu2excel\n'), ((12700, 12727), 'dirutil.helper.mkdir_if_not_exist', 'mkdir_if_not_exist', (['out_dir'], {}), '(out_dir)\n', (12718, 12727), False, 'from dirutil.helper import mkdir_if_not_exist\n'), ((13232, 13259), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['p_fix_label'], {}), '(p_fix_label)\n', (13246, 13259), True, 'import SimpleITK as sitk\n'), ((13466, 13497), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['p_warp_mv_label'], {}), '(p_warp_mv_label)\n', (13480, 13497), True, 'import SimpleITK as sitk\n'), ((13713, 13772), 'evaluate.metric.calculate_binary_dice', 'calculate_binary_dice', (['fix_label_array', 'warp_mv_label_array'], {}), '(fix_label_array, warp_mv_label_array)\n', (13734, 13772), False, 'from evaluate.metric import calculate_binary_hd, calculate_binary_dice, print_mean_and_std\n'), ((14390, 14436), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['fix_img_path', 'sitk.sitkFloat32'], {}), '(fix_img_path, sitk.sitkFloat32)\n', (14404, 14436), True, 'import SimpleITK as sitk\n'), ((14453, 14474), 'SimpleITK.Normalize', 'sitk.Normalize', (['fixed'], {}), '(fixed)\n', (14467, 14474), True, 'import SimpleITK as sitk\n'), ((14491, 14524), 'SimpleITK.DiscreteGaussian', 'sitk.DiscreteGaussian', (['fixed', '(2.0)'], {}), '(fixed, 2.0)\n', (14512, 14524), True, 'import SimpleITK as sitk\n'), ((14546, 14593), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['fix_label_path', 'sitk.sitkUInt16'], {}), '(fix_label_path, sitk.sitkUInt16)\n', (14560, 14593), True, 'import SimpleITK as sitk\n'), ((14612, 14659), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['move_img_path', 'sitk.sitkFloat32'], {}), '(move_img_path, sitk.sitkFloat32)\n', (14626, 14659), True, 'import SimpleITK as sitk\n'), ((14677, 14699), 'SimpleITK.Normalize', 'sitk.Normalize', (['moving'], {}), '(moving)\n', (14691, 14699), True, 'import SimpleITK as sitk\n'), ((14717, 14751), 'SimpleITK.DiscreteGaussian', 'sitk.DiscreteGaussian', (['moving', '(2.0)'], {}), '(moving, 2.0)\n', (14738, 14751), True, 'import SimpleITK as sitk\n'), ((14774, 14823), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['move_label_path', 'sitk.sitkFloat32'], {}), '(move_label_path, sitk.sitkFloat32)\n', (14788, 14823), True, 'import SimpleITK as sitk\n'), ((14901, 14965), 'SimpleITK.BSplineTransformInitializer', 'sitk.BSplineTransformInitializer', (['fixed', 'transformDomainMeshSize'], {}), '(fixed, transformDomainMeshSize)\n', (14933, 14965), True, 'import SimpleITK as sitk\n'), ((15097, 15127), 'SimpleITK.ImageRegistrationMethod', 'sitk.ImageRegistrationMethod', ([], {}), '()\n', (15125, 15127), True, 'import SimpleITK as sitk\n'), ((16909, 16936), 'dirutil.helper.mkdir_if_not_exist', 'mkdir_if_not_exist', (['out_dir'], {}), '(out_dir)\n', (16927, 16936), False, 'from dirutil.helper import mkdir_if_not_exist\n'), ((17506, 17565), 'evaluate.metric.calculate_binary_dice', 'calculate_binary_dice', (['fix_label_array', 'warp_mv_label_array'], {}), '(fix_label_array, warp_mv_label_array)\n', (17527, 17565), False, 'from evaluate.metric import calculate_binary_hd, calculate_binary_dice, print_mean_and_std\n'), ((17724, 17762), 'dirutil.helper.sort_glob', 'sort_glob', (["(self.args.sample_dir + '/*')"], {}), "(self.args.sample_dir + '/*')\n", (17733, 17762), False, 'from dirutil.helper import sort_glob\n'), ((18436, 18499), 'excelutil.output2excel.outpu2excel', 'outpu2excel', (['self.args.res_excel', "(self.args.MOLD_ID + '_DS')", 'DS'], {}), "(self.args.res_excel, self.args.MOLD_ID + '_DS', DS)\n", (18447, 18499), False, 'from excelutil.output2excel import outpu2excel\n'), ((18508, 18571), 'excelutil.output2excel.outpu2excel', 'outpu2excel', (['self.args.res_excel', "(self.args.MOLD_ID + '_HD')", 'HD'], {}), "(self.args.res_excel, self.args.MOLD_ID + '_HD', HD)\n", (18519, 18571), False, 'from excelutil.output2excel import outpu2excel\n'), ((18731, 18758), 'learn2reg.sampler.Sampler', 'Sampler', (['self.args', '"""train"""'], {}), "(self.args, 'train')\n", (18738, 18758), False, 'from learn2reg.sampler import Sampler\n'), ((18791, 18823), 'learn2reg.sampler.MMSampler', 'MMSampler', (['self.args', '"""validate"""'], {}), "(self.args, 'validate')\n", (18800, 18823), False, 'from learn2reg.sampler import MMSampler\n'), ((18870, 18900), 'dirutil.helper.sort_glob', 'sort_glob', (["(dir + '/*fixe_img*')"], {}), "(dir + '/*fixe_img*')\n", (18879, 18900), False, 'from dirutil.helper import sort_glob\n'), ((18907, 18932), 'preprocessor.tools.rescale_one_dir', 'rescale_one_dir', (['fix_imgs'], {}), '(fix_imgs)\n', (18922, 18932), False, 'from preprocessor.tools import rescale_one_dir\n'), ((18952, 18982), 'dirutil.helper.sort_glob', 'sort_glob', (["(dir + '/*fixe_lab*')"], {}), "(dir + '/*fixe_lab*')\n", (18961, 18982), False, 'from dirutil.helper import sort_glob\n'), ((19001, 19035), 'dirutil.helper.sort_glob', 'sort_glob', (["(dir + '/*input_mv*img*')"], {}), "(dir + '/*input_mv*img*')\n", (19010, 19035), False, 'from dirutil.helper import sort_glob\n'), ((19044, 19068), 'preprocessor.tools.rescale_one_dir', 'rescale_one_dir', (['mv_imgs'], {}), '(mv_imgs)\n', (19059, 19068), False, 'from preprocessor.tools import rescale_one_dir\n'), ((19087, 19121), 'dirutil.helper.sort_glob', 'sort_glob', (["(dir + '/*input_mv*lab*')"], {}), "(dir + '/*input_mv*lab*')\n", (19096, 19121), False, 'from dirutil.helper import sort_glob\n'), ((19682, 19708), 'evaluate.metric.print_mean_and_std', 'print_mean_and_std', (['all_ds'], {}), '(all_ds)\n', (19700, 19708), False, 'from evaluate.metric import calculate_binary_hd, calculate_binary_dice, print_mean_and_std\n'), ((19717, 19743), 'evaluate.metric.print_mean_and_std', 'print_mean_and_std', (['all_hd'], {}), '(all_hd)\n', (19735, 19743), False, 'from evaluate.metric import calculate_binary_hd, calculate_binary_dice, print_mean_and_std\n'), ((19752, 19819), 'excelutil.output2excel.outpu2excel', 'outpu2excel', (['self.args.res_excel', "(self.args.MOLD_ID + '_DS')", 'all_ds'], {}), "(self.args.res_excel, self.args.MOLD_ID + '_DS', all_ds)\n", (19763, 19819), False, 'from excelutil.output2excel import outpu2excel\n'), ((19824, 19891), 'excelutil.output2excel.outpu2excel', 'outpu2excel', (['self.args.res_excel', "(self.args.MOLD_ID + '_HD')", 'all_hd'], {}), "(self.args.res_excel, self.args.MOLD_ID + '_HD', all_hd)\n", (19835, 19891), False, 'from excelutil.output2excel import outpu2excel\n'), ((21560, 21587), 'dirutil.helper.mkdir_if_not_exist', 'mkdir_if_not_exist', (['out_dir'], {}), '(out_dir)\n', (21578, 21587), False, 'from dirutil.helper import mkdir_if_not_exist\n'), ((22092, 22119), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['p_fix_label'], {}), '(p_fix_label)\n', (22106, 22119), True, 'import SimpleITK as sitk\n'), ((22326, 22357), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['p_warp_mv_label'], {}), '(p_warp_mv_label)\n', (22340, 22357), True, 'import SimpleITK as sitk\n'), ((22573, 22632), 'evaluate.metric.calculate_binary_dice', 'calculate_binary_dice', (['fix_label_array', 'warp_mv_label_array'], {}), '(fix_label_array, warp_mv_label_array)\n', (22594, 22632), False, 'from evaluate.metric import calculate_binary_hd, calculate_binary_dice, print_mean_and_std\n'), ((23250, 23296), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['fix_img_path', 'sitk.sitkFloat32'], {}), '(fix_img_path, sitk.sitkFloat32)\n', (23264, 23296), True, 'import SimpleITK as sitk\n'), ((23313, 23334), 'SimpleITK.Normalize', 'sitk.Normalize', (['fixed'], {}), '(fixed)\n', (23327, 23334), True, 'import SimpleITK as sitk\n'), ((23351, 23384), 'SimpleITK.DiscreteGaussian', 'sitk.DiscreteGaussian', (['fixed', '(2.0)'], {}), '(fixed, 2.0)\n', (23372, 23384), True, 'import SimpleITK as sitk\n'), ((23406, 23453), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['fix_label_path', 'sitk.sitkUInt16'], {}), '(fix_label_path, sitk.sitkUInt16)\n', (23420, 23453), True, 'import SimpleITK as sitk\n'), ((23472, 23519), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['move_img_path', 'sitk.sitkFloat32'], {}), '(move_img_path, sitk.sitkFloat32)\n', (23486, 23519), True, 'import SimpleITK as sitk\n'), ((23537, 23559), 'SimpleITK.Normalize', 'sitk.Normalize', (['moving'], {}), '(moving)\n', (23551, 23559), True, 'import SimpleITK as sitk\n'), ((23577, 23611), 'SimpleITK.DiscreteGaussian', 'sitk.DiscreteGaussian', (['moving', '(2.0)'], {}), '(moving, 2.0)\n', (23598, 23611), True, 'import SimpleITK as sitk\n'), ((23634, 23683), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['move_label_path', 'sitk.sitkFloat32'], {}), '(move_label_path, sitk.sitkFloat32)\n', (23648, 23683), True, 'import SimpleITK as sitk\n'), ((23761, 23825), 'SimpleITK.BSplineTransformInitializer', 'sitk.BSplineTransformInitializer', (['fixed', 'transformDomainMeshSize'], {}), '(fixed, transformDomainMeshSize)\n', (23793, 23825), True, 'import SimpleITK as sitk\n'), ((23957, 23987), 'SimpleITK.ImageRegistrationMethod', 'sitk.ImageRegistrationMethod', ([], {}), '()\n', (23985, 23987), True, 'import SimpleITK as sitk\n'), ((25769, 25796), 'dirutil.helper.mkdir_if_not_exist', 'mkdir_if_not_exist', (['out_dir'], {}), '(out_dir)\n', (25787, 25796), False, 'from dirutil.helper import mkdir_if_not_exist\n'), ((26366, 26425), 'evaluate.metric.calculate_binary_dice', 'calculate_binary_dice', (['fix_label_array', 'warp_mv_label_array'], {}), '(fix_label_array, warp_mv_label_array)\n', (26387, 26425), False, 'from evaluate.metric import calculate_binary_hd, calculate_binary_dice, print_mean_and_std\n'), ((26584, 26622), 'dirutil.helper.sort_glob', 'sort_glob', (["(self.args.sample_dir + '/*')"], {}), "(self.args.sample_dir + '/*')\n", (26593, 26622), False, 'from dirutil.helper import sort_glob\n'), ((27296, 27359), 'excelutil.output2excel.outpu2excel', 'outpu2excel', (['self.args.res_excel', "(self.args.MOLD_ID + '_DS')", 'DS'], {}), "(self.args.res_excel, self.args.MOLD_ID + '_DS', DS)\n", (27307, 27359), False, 'from excelutil.output2excel import outpu2excel\n'), ((27368, 27431), 'excelutil.output2excel.outpu2excel', 'outpu2excel', (['self.args.res_excel', "(self.args.MOLD_ID + '_HD')", 'HD'], {}), "(self.args.res_excel, self.args.MOLD_ID + '_HD', HD)\n", (27379, 27431), False, 'from excelutil.output2excel import outpu2excel\n'), ((4054, 4086), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['fix_img_path'], {}), '(fix_img_path)\n', (4072, 4086), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((4166, 4199), 'os.path.basename', 'os.path.basename', (['move_label_path'], {}), '(move_label_path)\n', (4182, 4199), False, 'import os\n'), ((4293, 4324), 'os.path.basename', 'os.path.basename', (['move_img_path'], {}), '(move_img_path)\n', (4309, 4324), False, 'import os\n'), ((4414, 4446), 'os.path.basename', 'os.path.basename', (['fix_label_path'], {}), '(fix_label_path)\n', (4430, 4446), False, 'import os\n'), ((4531, 4561), 'os.path.basename', 'os.path.basename', (['fix_img_path'], {}), '(fix_img_path)\n', (4547, 4561), False, 'import os\n'), ((4807, 4838), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['p_fix_label'], {}), '(p_fix_label)\n', (4825, 4838), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((5061, 5096), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['p_warp_mv_label'], {}), '(p_warp_mv_label)\n', (5079, 5096), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((7638, 7664), 'SimpleITK.ResampleImageFilter', 'sitk.ResampleImageFilter', ([], {}), '()\n', (7662, 7664), True, 'import SimpleITK as sitk\n'), ((7991, 8032), 'SimpleITK.Cast', 'sitk.Cast', (['warp_mv_label', 'sitk.sitkUInt16'], {}), '(warp_mv_label, sitk.sitkUInt16)\n', (8000, 8032), True, 'import SimpleITK as sitk\n'), ((8386, 8419), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['move_img_path'], {}), '(move_img_path)\n', (8404, 8419), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((8466, 8498), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['fix_img_path'], {}), '(fix_img_path)\n', (8484, 8498), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((8651, 8685), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['fix_label_path'], {}), '(fix_label_path)\n', (8669, 8685), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((8854, 8889), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['move_label_path'], {}), '(move_label_path)\n', (8872, 8889), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((9233, 9281), 'dirutil.helper.sort_glob', 'sort_glob', (["(d + '/*%s*label*' % self.args.Ttarget)"], {}), "(d + '/*%s*label*' % self.args.Ttarget)\n", (9242, 9281), False, 'from dirutil.helper import sort_glob\n'), ((9301, 9348), 'dirutil.helper.sort_glob', 'sort_glob', (["(d + '/*%s*label*' % self.args.Tatlas)"], {}), "(d + '/*%s*label*' % self.args.Tatlas)\n", (9310, 9348), False, 'from dirutil.helper import sort_glob\n'), ((9367, 9397), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['target_file[0]'], {}), '(target_file[0])\n', (9381, 9397), True, 'import SimpleITK as sitk\n'), ((9420, 9453), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['fix_label'], {}), '(fix_label)\n', (9442, 9453), True, 'import SimpleITK as sitk\n'), ((12659, 12691), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['fix_img_path'], {}), '(fix_img_path)\n', (12677, 12691), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((12771, 12804), 'os.path.basename', 'os.path.basename', (['move_label_path'], {}), '(move_label_path)\n', (12787, 12804), False, 'import os\n'), ((12898, 12929), 'os.path.basename', 'os.path.basename', (['move_img_path'], {}), '(move_img_path)\n', (12914, 12929), False, 'import os\n'), ((13019, 13051), 'os.path.basename', 'os.path.basename', (['fix_label_path'], {}), '(fix_label_path)\n', (13035, 13051), False, 'import os\n'), ((13136, 13166), 'os.path.basename', 'os.path.basename', (['fix_img_path'], {}), '(fix_img_path)\n', (13152, 13166), False, 'import os\n'), ((13410, 13441), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['p_fix_label'], {}), '(p_fix_label)\n', (13428, 13441), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((13664, 13699), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['p_warp_mv_label'], {}), '(p_warp_mv_label)\n', (13682, 13699), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((16241, 16267), 'SimpleITK.ResampleImageFilter', 'sitk.ResampleImageFilter', ([], {}), '()\n', (16265, 16267), True, 'import SimpleITK as sitk\n'), ((16594, 16635), 'SimpleITK.Cast', 'sitk.Cast', (['warp_mv_label', 'sitk.sitkUInt16'], {}), '(warp_mv_label, sitk.sitkUInt16)\n', (16603, 16635), True, 'import SimpleITK as sitk\n'), ((16989, 17022), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['move_img_path'], {}), '(move_img_path)\n', (17007, 17022), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((17069, 17101), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['fix_img_path'], {}), '(fix_img_path)\n', (17087, 17101), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((17254, 17288), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['fix_label_path'], {}), '(fix_label_path)\n', (17272, 17288), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((17457, 17492), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['move_label_path'], {}), '(move_label_path)\n', (17475, 17492), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((17836, 17884), 'dirutil.helper.sort_glob', 'sort_glob', (["(d + '/*%s*label*' % self.args.Ttarget)"], {}), "(d + '/*%s*label*' % self.args.Ttarget)\n", (17845, 17884), False, 'from dirutil.helper import sort_glob\n'), ((17904, 17951), 'dirutil.helper.sort_glob', 'sort_glob', (["(d + '/*%s*label*' % self.args.Tatlas)"], {}), "(d + '/*%s*label*' % self.args.Tatlas)\n", (17913, 17951), False, 'from dirutil.helper import sort_glob\n'), ((17970, 18000), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['target_file[0]'], {}), '(target_file[0])\n', (17984, 18000), True, 'import SimpleITK as sitk\n'), ((18023, 18056), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['fix_label'], {}), '(fix_label)\n', (18045, 18056), True, 'import SimpleITK as sitk\n'), ((21631, 21664), 'os.path.basename', 'os.path.basename', (['move_label_path'], {}), '(move_label_path)\n', (21647, 21664), False, 'import os\n'), ((21758, 21789), 'os.path.basename', 'os.path.basename', (['move_img_path'], {}), '(move_img_path)\n', (21774, 21789), False, 'import os\n'), ((21879, 21911), 'os.path.basename', 'os.path.basename', (['fix_label_path'], {}), '(fix_label_path)\n', (21895, 21911), False, 'import os\n'), ((21996, 22026), 'os.path.basename', 'os.path.basename', (['fix_img_path'], {}), '(fix_img_path)\n', (22012, 22026), False, 'import os\n'), ((22270, 22301), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['p_fix_label'], {}), '(p_fix_label)\n', (22288, 22301), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((22524, 22559), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['p_warp_mv_label'], {}), '(p_warp_mv_label)\n', (22542, 22559), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((25101, 25127), 'SimpleITK.ResampleImageFilter', 'sitk.ResampleImageFilter', ([], {}), '()\n', (25125, 25127), True, 'import SimpleITK as sitk\n'), ((25454, 25495), 'SimpleITK.Cast', 'sitk.Cast', (['warp_mv_label', 'sitk.sitkUInt16'], {}), '(warp_mv_label, sitk.sitkUInt16)\n', (25463, 25495), True, 'import SimpleITK as sitk\n'), ((25849, 25882), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['move_img_path'], {}), '(move_img_path)\n', (25867, 25882), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((25929, 25961), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['fix_img_path'], {}), '(fix_img_path)\n', (25947, 25961), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((26114, 26148), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['fix_label_path'], {}), '(fix_label_path)\n', (26132, 26148), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((26317, 26352), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['move_label_path'], {}), '(move_label_path)\n', (26335, 26352), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((26696, 26744), 'dirutil.helper.sort_glob', 'sort_glob', (["(d + '/*%s*label*' % self.args.Ttarget)"], {}), "(d + '/*%s*label*' % self.args.Ttarget)\n", (26705, 26744), False, 'from dirutil.helper import sort_glob\n'), ((26764, 26811), 'dirutil.helper.sort_glob', 'sort_glob', (["(d + '/*%s*label*' % self.args.Tatlas)"], {}), "(d + '/*%s*label*' % self.args.Tatlas)\n", (26773, 26811), False, 'from dirutil.helper import sort_glob\n'), ((26830, 26860), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['target_file[0]'], {}), '(target_file[0])\n', (26844, 26860), True, 'import SimpleITK as sitk\n'), ((26883, 26916), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['fix_label'], {}), '(fix_label)\n', (26905, 26916), True, 'import SimpleITK as sitk\n'), ((4690, 4723), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['fix_label'], {}), '(fix_label)\n', (4712, 4723), True, 'import SimpleITK as sitk\n'), ((4932, 4969), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['warp_mv_label'], {}), '(warp_mv_label)\n', (4954, 4969), True, 'import SimpleITK as sitk\n'), ((8534, 8567), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['fixed_lab'], {}), '(fixed_lab)\n', (8556, 8567), True, 'import SimpleITK as sitk\n'), ((8725, 8762), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['warp_mv_label'], {}), '(warp_mv_label)\n', (8747, 8762), True, 'import SimpleITK as sitk\n'), ((9512, 9531), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['itr'], {}), '(itr)\n', (9526, 9531), True, 'import SimpleITK as sitk\n'), ((9557, 9587), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['mv_img'], {}), '(mv_img)\n', (9579, 9587), True, 'import SimpleITK as sitk\n'), ((9607, 9649), 'evaluate.metric.calculate_binary_dice', 'calculate_binary_dice', (['fix_array', 'mv_array'], {}), '(fix_array, mv_array)\n', (9628, 9649), False, 'from evaluate.metric import calculate_binary_hd, calculate_binary_dice, print_mean_and_std\n'), ((13293, 13326), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['fix_label'], {}), '(fix_label)\n', (13315, 13326), True, 'import SimpleITK as sitk\n'), ((13535, 13572), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['warp_mv_label'], {}), '(warp_mv_label)\n', (13557, 13572), True, 'import SimpleITK as sitk\n'), ((17137, 17170), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['fixed_lab'], {}), '(fixed_lab)\n', (17159, 17170), True, 'import SimpleITK as sitk\n'), ((17328, 17365), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['warp_mv_label'], {}), '(warp_mv_label)\n', (17350, 17365), True, 'import SimpleITK as sitk\n'), ((18115, 18134), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['itr'], {}), '(itr)\n', (18129, 18134), True, 'import SimpleITK as sitk\n'), ((18160, 18190), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['mv_img'], {}), '(mv_img)\n', (18182, 18190), True, 'import SimpleITK as sitk\n'), ((18210, 18252), 'evaluate.metric.calculate_binary_dice', 'calculate_binary_dice', (['fix_array', 'mv_array'], {}), '(fix_array, mv_array)\n', (18231, 18252), False, 'from evaluate.metric import calculate_binary_hd, calculate_binary_dice, print_mean_and_std\n'), ((19403, 19451), 'dirutil.helper.mkdir_if_not_exist', 'mkdir_if_not_exist', (["(dir + '/%s' % self.args.type)"], {}), "(dir + '/%s' % self.args.type)\n", (19421, 19451), False, 'from dirutil.helper import mkdir_if_not_exist\n'), ((22153, 22186), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['fix_label'], {}), '(fix_label)\n', (22175, 22186), True, 'import SimpleITK as sitk\n'), ((22395, 22432), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['warp_mv_label'], {}), '(warp_mv_label)\n', (22417, 22432), True, 'import SimpleITK as sitk\n'), ((25997, 26030), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['fixed_lab'], {}), '(fixed_lab)\n', (26019, 26030), True, 'import SimpleITK as sitk\n'), ((26188, 26225), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['warp_mv_label'], {}), '(warp_mv_label)\n', (26210, 26225), True, 'import SimpleITK as sitk\n'), ((26975, 26994), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['itr'], {}), '(itr)\n', (26989, 26994), True, 'import SimpleITK as sitk\n'), ((27020, 27050), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['mv_img'], {}), '(mv_img)\n', (27042, 27050), True, 'import SimpleITK as sitk\n'), ((27070, 27112), 'evaluate.metric.calculate_binary_dice', 'calculate_binary_dice', (['fix_array', 'mv_array'], {}), '(fix_array, mv_array)\n', (27091, 27112), False, 'from evaluate.metric import calculate_binary_hd, calculate_binary_dice, print_mean_and_std\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.