python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
import logging
import os
import json
import time
from copy import deepcopy
from collections import defaultdict
import torch
import numpy as np
from torch.utils.data import DataLoader
from torch.utils.data.sampler import WeightedRandomSampler
from stratification.utils.utils import NumpyEncoder, get_unique_str, keys_to_strings, merge_dicts
from stratification.utils.logger import init_logger
from stratification.utils.visualization import visualize_clusters_by_group
from stratification.classification.datasets import *
from stratification.classification.models import *
from stratification.cluster.models.cluster import *
from stratification.cluster.models.reduction import *
from stratification.classification.george_classification import GEORGEClassification
from stratification.cluster.george_reduce import GEORGEReducer
from stratification.cluster.george_cluster import GEORGECluster
from stratification.cluster.utils import get_k_from_model
class GEORGEHarness:
"""Training harness for the GEORGE algorithm.
Note:
Users can execute all (using `run`) or some (using `classify` and `cluster`)
parts of the algorithm. The harness is meant to facilitate interactions with
GEORGEClassification and GEORGECluster– formatting outputs and saving artifacts.
Args:
exp_dir(str): the directory in which to store all experiment artifacts,
including the metrics, cluster assignments, and visualizations.
use_cuda(bool, optional): a flag describing whether or not to train on
the GPU.
"""
def __init__(self, config, use_cuda=False, log_format='full'):
if config['classification_config']['eval_only'] or config['classification_config'][
'save_act_only']:
self.exp_dir = config['activations_dir']
else:
self.exp_dir = os.path.join(config['exp_dir'], 'run_' + get_unique_str())
os.makedirs(self.exp_dir, exist_ok=True)
self.log_format = log_format
self.logger = init_logger('harness', self.exp_dir, log_format=log_format)
self.use_cuda = use_cuda
def save_full_config(self, config):
self._save_config(self.exp_dir, config, msg='Saving full config')
def run_george(self, config, dataloaders, model, cluster_model, reduction_model,
activation_layer=None):
"""Runs all three stages of the GEORGE pipeline: ERM, cluster, DRO.
Args:
config(dict): contains nested classification_config and
cluster_config dictionaries.
dataloaders(Dict[str, DataLoader]): a dictionary mapping a data split
to its given DataLoader (note that all data splits in DATA_SPLITS
must be specified). More information can be found in
classification.datasets.
model(nn.Module): a PyTorch model.
cluster_model(Any): a clustering model. Must implement `fit` and `predict`
methods. For more details, see cluster.george_cluster.
reduction_model(Any): a dimensionality reduction model. Must implement
`fit` and `transform`. For more details, see cluster.george_cluster.
Returns:
outputs(List[str]):
contains the paths to the artifacts for each phase of the algorithm.
"""
self.save_full_config(config)
outputs = []
# (1) Train initial representation
self.logger.basic_info('Training initial representation step (1/3)...')
state_dict = deepcopy(model.state_dict())
erm_dir = self.classify(config['classification_config'], model, dataloaders, 'erm')
outputs.append(erm_dir)
self.logger.basic_info('Running reduction step (2/3)...')
reduction_dir = self.reduce(config, inputs_path=os.path.join(erm_dir, 'outputs.pt'))
outputs.append(reduction_dir)
# (2) cluster
self.logger.basic_info('Running cluster step (2/3)...')
cluster_dir = self.cluster(config, inputs_path=os.path.join(reduction_dir, 'outputs.pt'))
outputs.append(cluster_dir)
# (3) DRO
self.logger.basic_info(f'Running DRO step (3/3)...')
if config['classification_config']['reset_model_state']:
model.load_state_dict(state_dict)
self.logger.basic_info('Model state reset')
dro_dir = self.classify(config['classification_config'], model, dataloaders, 'george',
clusters_path=os.path.join(cluster_dir, 'clusters.pt'))
outputs.append(dro_dir)
return outputs
def classify(self, classification_config, model, dataloaders, mode):
"""Runs the initial representation learning stage of the GEORGE pipeline.
Note:
This function handles much of the pre- and post-processing needed to transition
from stage to stage (i.e. modifying datasets with subclass labels and formatting
the outputs in a manner that is compatible with GEORGEHarness.cluster).
For more direct interaction with the classification procedure, see the
GEORGEClassification class in classification.george_classification.
Args:
classification_config(dict): Contains args for the criterion, optimizer,
scheduler, metrics. Optional nested `{mode}_config` dictionaries can
add and/or replace arguments in classification config.
model(nn.Module): A PyTorch model.
dataloaders(Dict[str, DataLoader]): a dictionary mapping a data split
to its given DataLoader (note that all data splits in DATA_SPLITS
must be specified). More information can be found in
classification.datasets.
mode(str): The type of optimization to run. `erm` trains with vanilla
Cross Entropy Loss. 'george' trains DRO using given cluster labels.
`random_gdro` trains DRO with random cluster labels. `superclass_gdro`
trains DRO using the given superclass labels. Implementation of DRO
from Sagawa et al. (2020).
clusters_path(str, optional): The path leading to clusters.pt file
produced by GEORGEHarness.cluster. Only needed if mode == 'george'.
Returns:
save_dir(str): The subdirectory within `exp_dir` that contains model
checkpoints, best model outputs, and best model metrics.
"""
# overwrite args in classification_config with mode specific params
if mode == 'erm':
mode_config = classification_config[f'erm_config']
else:
mode_config = classification_config[f'gdro_config']
classification_config = merge_dicts(classification_config, mode_config)
if classification_config['eval_only'] or classification_config['save_act_only']:
save_dir = self.exp_dir
else:
save_dir = os.path.join(self.exp_dir, f'{mode}_{get_unique_str()}')
self._save_config(save_dir, classification_config)
robust = self._get_robust_status(mode)
# (1) train
trainer = GEORGEClassification(
classification_config, save_dir=save_dir, use_cuda=self.use_cuda,
log_format=self.log_format,
has_estimated_subclasses=mode not in ['erm', 'true_subclass_gdro'])
if not (classification_config['eval_only'] or classification_config['save_act_only']
or classification_config['bit_pretrained']):
trainer.train(model, dataloaders['train'], dataloaders['val'], robust=robust)
# (2) evaluate
split_to_outputs = {}
split_to_metrics = {}
for split, dataloader in dataloaders.items():
if split == 'train':
continue
key = 'train' if split == 'train_clean' else split
if classification_config['eval_only'] and key != 'test':
continue
self.logger.basic_info(f'Evaluating on {key} split...')
metrics, outputs = trainer.evaluate(
model, dataloaders, split, robust=robust, save_activations=True,
bit_pretrained=classification_config['bit_pretrained'],
adv_metrics=classification_config['eval_only'],
ban_reweight=classification_config['ban_reweight'])
split_to_metrics[key] = metrics
split_to_outputs[key] = outputs
# (3) save everything
if not classification_config['eval_only']:
self._save_json(os.path.join(save_dir, 'metrics.json'), split_to_metrics)
self._save_torch(os.path.join(save_dir, 'outputs.pt'), split_to_outputs)
return save_dir
def reduce(self, reduction_config, reduction_model, inputs_path):
save_dir = os.path.join(os.path.dirname(inputs_path), f'reduce_{get_unique_str()}')
self._save_config(save_dir, reduction_config, msg='Saving reduction step config')
inputs = torch.load(inputs_path)
assert len(set(inputs.keys()) & {'train', 'val', 'test'}) == 3, \
'Must have ["train", "val", "test"] splits.'
for split, split_inputs in inputs.items():
assert len(set(split_inputs.keys()) & {'superclass', 'activations'}) == 2, \
f'{split} split of loaded inputs must have ["superclass", "activations"] keys'
# apply dimensionality reduction (if specified) to the data
reducer = GEORGEReducer(reduction_config, save_dir=save_dir, log_format=self.log_format)
group_to_models, train_means = reducer.train(reduction_model, inputs)
split_to_outputs = {}
for split, split_inputs in inputs.items():
outputs = reducer.evaluate(group_to_models, inputs[split], train_means)
split_to_outputs[split] = (outputs, inputs[split]['superclass'])
# save reduced data
self._save_torch(os.path.join(save_dir, 'outputs.pt'), split_to_outputs)
return save_dir
def cluster(self, cluster_config, cluster_model, inputs_path):
"""
Runs clustering stage of the GEORGE pipeline.
Note:
The `inputs_path` parameter must describe a pickle-serialized dictionary
that has the following schema:
{
'train': {
'metrics': Dict[str, Any],
'activations': np.ndarray of shape (N, D),
'superclass': np.ndarray of shape (N, ),
'subclass': np.ndarray of shape (N, ),
'true_subclass': np.ndarray of shape (N, ),
'targets': np.ndarray of shape (N, ),
'probs': np.ndarray of shape (N, ),
'preds': np.ndarray of shape (N, ),
'losses': np.ndarray of shape (N, ),
},
'val': {...},
'test': {...}
}
Future work is to further modularize the cluster code to mitigate
dependencies on this object. For best results, train classifiers
using GEORGEHarness.classify.
Args:
cluster_config(dict): contains args for the clustering step.
cluster_model(Any): a clustering model. Must implement `fit` and `predict`
methods. For more details, see cluster.george_cluster.
inputs_path (str) path leading to outputs.pt file produced by
GEORGEHarness.classify.
reduction_model(Any): a dimensionality reduction model. Must implement
`fit` and `transform`. For more details, see cluster.george_cluster.
Returns:
save_dir(str). subdirectory within `exp_dir` that contains the cluster
assignments, other cluster output, and cluster metrics.
"""
save_dir = os.path.join(os.path.dirname(inputs_path), f'cluster_{get_unique_str()}')
self._save_config(save_dir, cluster_config, msg='Saving cluster step config')
inputs = torch.load(inputs_path)
assert len(set(inputs.keys()) & {'train', 'val', 'test'}) == 3, \
'Must have ["train", "val", "test"] splits.'
for split, split_inputs in inputs.items():
for group, group_data in split_inputs[0].items():
assert len(set(group_data.keys()) & {'activations', 'losses'}) == 2, \
f'{split} split of loaded inputs must have ["activations", "losses"] keys' \
' for each superclass'
# (1) train
c_trainer = GEORGECluster(cluster_config, save_dir=save_dir, log_format=self.log_format)
group_to_models = c_trainer.train(cluster_model, inputs)
# (2) evaluate
split_to_metrics = {}
split_to_outputs = {}
for split, split_inputs in inputs.items():
metrics, outputs = c_trainer.evaluate(group_to_models, inputs[split])
split_to_metrics[split] = metrics
split_to_outputs[split] = outputs
# (3) save everything
self._save_json(os.path.join(save_dir, 'metrics.json'), split_to_metrics)
self._save_torch(os.path.join(save_dir, 'outputs.pt'), split_to_outputs)
# save assignments only
split_to_assignments = {k: v['assignments'] for k, v in split_to_outputs.items()}
self._save_torch(os.path.join(save_dir, 'clusters.pt'), split_to_assignments)
group_to_k = {
group: get_k_from_model(cluster_model)
for group, cluster_model in enumerate(group_to_models)
}
self._save_cluster_visualizations(save_dir, inputs, group_to_k, split_to_outputs, c_trainer)
return save_dir
def _get_robust_status(self, mode):
"""Identifies if the given `mode` calls for DRO"""
if mode in {'george', 'random_gdro', 'superclass_gdro', 'true_subclass_gdro'}:
return True
elif mode == 'erm':
return False
raise ValueError('mode {mode} not valid. Use one of the following:\n' +
'["george", "random_gdro", "superclass_gdro", "true_subclass_gdro", ' +
'"erm"]')
def get_dataloaders(self, config, mode='erm', transforms=None, subclass_labels=None):
dataset_name = config['dataset']
seed = config['seed']
config = config['classification_config']
if mode == 'george':
assert ('.pt' in subclass_labels) # file path to subclass labels specified
elif mode != 'erm':
assert (subclass_labels is None)
subclass_labels = mode.rstrip('_gdro')
if subclass_labels is None:
# subclass labels default to superclass labels if none are given
subclass_labels = 'superclass'
if '.pt' in subclass_labels: # file path specified
subclass_labels = torch.load(subclass_labels)
else: # keyword specified
kw = subclass_labels
subclass_labels = defaultdict(lambda: kw)
if mode == 'erm':
mode_config = config[f'erm_config']
else:
mode_config = config[f'gdro_config']
config = merge_dicts(config, mode_config)
dataset_name = dataset_name.lower()
d = {
'celeba': CelebADataset,
'isic': ISICDataset,
'mnist': MNISTDataset,
'waterbirds': WaterbirdsDataset
}
dataset_class = d[dataset_name]
batch_size = config['batch_size']
dataloaders = {}
for split in DATA_SPLITS:
key = 'train' if 'train' in split else split
split_subclass_labels = subclass_labels[key]
shared_dl_args = {'batch_size': batch_size, 'num_workers': config['workers']}
if split == 'train':
dataset = dataset_class(root='./data', split=split, download=True, augment=True,
**config['dataset_config'])
dataset.add_subclass_labels(split_subclass_labels, seed=seed)
if config.get('uniform_group_sampling', False):
sampler, group_weights = self._get_uniform_group_sampler(dataset)
self.logger.info(
f'Resampling training data with subclass weights:\n{group_weights}')
dataloaders[split] = DataLoader(dataset, **shared_dl_args, shuffle=False,
sampler=sampler)
else:
dataloaders[split] = DataLoader(dataset, **shared_dl_args, shuffle=True)
else:
# Evaluation dataloaders (including for the training set) are "clean" - no data augmentation or shuffling
dataset = dataset_class(root='./data', split=key, **config['dataset_config'])
dataset.add_subclass_labels(split_subclass_labels, seed=seed)
dataloaders[split] = DataLoader(dataset, **shared_dl_args, shuffle=False)
self.logger.info(f'{split} split:')
# log class counts for each label type
for label_type, labels in dataset.Y_dict.items():
self.logger.info(f'{label_type.capitalize()} counts: {np.bincount(labels)}')
return dataloaders
def _get_uniform_group_sampler(self, dataset):
group_counts, group_labels = dataset.get_class_counts('subclass'), dataset.get_labels(
'subclass')
group_weights = np.array([len(dataset) / c if c != 0 else 0 for c in group_counts])
group_weights /= np.sum(group_weights)
weights = group_weights[np.array(group_labels)]
sampler = WeightedRandomSampler(weights, num_samples=len(dataset), replacement=True)
return sampler, group_weights
def get_nn_model(self, config, num_classes, mode='erm'):
cl_config = config['classification_config']
if mode == 'erm':
mode_config = cl_config[f'erm_config']
else:
mode_config = cl_config[f'gdro_config']
cl_config = merge_dicts(cl_config, mode_config)
if cl_config['bit_pretrained']:
model_cls = BiTResNet
else:
models = {'lenet4': LeNet4, 'resnet50': PyTorchResNet, 'shallow_cnn': ShallowCNN}
try:
model_cls = models[cl_config['model']]
except KeyError:
raise ValueError('Unsupported model architecture')
model = model_cls(num_classes=num_classes)
if self.use_cuda:
model = torch.nn.DataParallel(model).cuda()
self.logger.info('Model:')
self.logger.info(str(model))
return model
def get_reduction_model(self, config, nn_model=None):
red_config = config['reduction_config']
models = {
'none': NoOpReducer,
'pca': PCAReducer,
'umap': UMAPReducer,
'hardness': HardnessAugmentedReducer
}
if red_config['model'] != 'hardness':
reduction_cls = models[red_config['model']]
reduction_model = reduction_cls(random_state=config['seed'],
n_components=red_config['components'])
else:
assert (nn_model is not None)
base_reduction_model = UMAPReducer(random_state=config['seed'],
n_components=red_config['components'])
reduction_model = HardnessAugmentedReducer(nn_model, base_reduction_model)
return reduction_model
def get_cluster_model(self, config):
cluster_config = config['cluster_config']
kwargs = {
'cluster_method': cluster_config['model'],
'max_k': cluster_config['k'],
'seed': config['seed'],
'sil_cuda': cluster_config['sil_cuda'],
'search': cluster_config['search_k']
}
if cluster_config['overcluster']:
cluster_model = OverclusterModel(**kwargs, oc_fac=cluster_config['overcluster_factor'])
else:
cluster_model = AutoKMixtureModel(**kwargs)
return cluster_model
def _save_cluster_visualizations(self, save_dir, inputs, group_to_k, split_to_outputs, trainer):
"""Generates and saves cluster visualizations."""
for split, outputs in split_to_outputs.items():
visualization_dir = os.path.join(save_dir, 'visualizations', split)
os.makedirs(visualization_dir, exist_ok=True)
visualize_clusters_by_group(outputs['activations'],
cluster_assignments=outputs['assignments'],
group_assignments=inputs[split][1],
true_subclass_labels=outputs['true_subclass'],
group_to_k=group_to_k, save_dir=visualization_dir)
def _save_config(self, save_dir, config, msg=None):
"""Helper function to save `config` in `save_dir`."""
os.makedirs(save_dir, exist_ok=True)
self._save_json(os.path.join(save_dir, 'config.json'), config)
if msg is not None:
self.logger.info(msg)
self.logger.basic_info(f'Config saved in: {save_dir}')
self.logger.info(f'Config:\n{json.dumps(config, indent=4)}')
def _save_json(self, save_path, data):
"""Saves JSON type objects."""
with open(save_path, 'w') as f:
json.dump(keys_to_strings(data), f, indent=4, cls=NumpyEncoder)
def _save_torch(self, save_path, data):
"""Saves arbitrary data with the torch serializer."""
torch.save(data, save_path)
| hidden-stratification-master | stratification/harness.py |
import os
import torch
from stratification.harness import GEORGEHarness
from stratification.utils.utils import set_seed, init_cuda
from stratification.utils.parse_args import get_config
from stratification.cluster.models.cluster import GaussianMixture
from stratification.cluster.models.reduction import UMAPReducer
def main():
config = get_config()
use_cuda = config['use_cuda'] and torch.cuda.is_available()
set_seed(config['seed'], use_cuda) # set seeds for reproducibility
init_cuda(config['deterministic'], config['allow_multigpu'])
torch.multiprocessing.set_sharing_strategy('file_system')
harness = GEORGEHarness(config, use_cuda=use_cuda)
harness.save_full_config(config)
dataloaders = harness.get_dataloaders(config, mode='erm')
num_classes = dataloaders['train'].dataset.get_num_classes('superclass')
model = harness.get_nn_model(config, num_classes=num_classes, mode='erm')
print('Model architecture:')
print(model)
# Train a model with ERM
erm_dir = harness.classify(config['classification_config'], model, dataloaders, 'erm')
# Cluster the activations of the model
reduction_model = UMAPReducer(random_state=12345, n_components=2, n_neighbors=10, min_dist=0)
reduction_dir = harness.reduce(config['reduction_config'], reduction_model,
inputs_path=os.path.join(erm_dir, 'outputs.pt'))
cluster_model = GaussianMixture(covariance_type='full', n_components=5, n_init=3)
cluster_dir = harness.cluster(config['cluster_config'], cluster_model,
inputs_path=os.path.join(reduction_dir, 'outputs.pt'))
set_seed(config['seed'], use_cuda) # reset random state
dataloaders = harness.get_dataloaders(config, mode='george',
subclass_labels=os.path.join(cluster_dir, 'clusters.pt'))
model = harness.get_nn_model(config, num_classes=num_classes, mode='george')
# Train the final (GEORGE) model
george_dir = harness.classify(config['classification_config'], model, dataloaders,
mode='george')
if __name__ == '__main__':
main()
| hidden-stratification-master | stratification/demo.py |
import os
import logging
from functools import partial
import numpy as np
import sklearn.metrics
import torch
import torch.optim as optimizers
import torch.optim.lr_scheduler as schedulers
import torch.nn.functional as F
from progress.bar import IncrementalBar as ProgressBar
from stratification.classification.utils import AverageMeter, compute_accuracy, compute_roc_auc
from stratification.classification.datasets import LABEL_TYPES, GEORGEDataset
from stratification.classification.losses import init_criterion
from stratification.utils.logger import init_logger, init_epoch_logger
from stratification.utils.utils import format_timedelta, move_to_device, get_learning_rate, concatenate_iterable
PROGRESS_BAR_SUFFIX = '({batch}/{size}) Time {total:} | ETA {eta:} | ' \
'Loss: {loss:.3f} | R Loss: {subclass_rob_loss:.3f} | ' \
'Acc: {acc:.3f} | RW acc: {acc_rw:.3f} | R acc: {subclass_rob_acc:.3f} | ' \
'RW R acc: {subclass_rob_acc_rw:.3f} | TR acc: {true_subclass_rob_acc:.3f}'
prog_metric_names = [
'loss', 'subclass_rob_loss', 'acc', 'acc_rw', 'subclass_rob_acc', 'subclass_rob_acc_rw',
'true_subclass_rob_acc'
]
def init_optimizer(optimizer_config, model):
"""Initializes the optimizer."""
optimizer_class = getattr(optimizers, optimizer_config['class_name'])
return optimizer_class(model.parameters(), **optimizer_config['class_args'])
def init_scheduler(scheduler_config, optimizer):
"""Initializes the learning rate scheduler."""
scheduler_class = getattr(schedulers, scheduler_config['class_name'])
return scheduler_class(optimizer, **scheduler_config['class_args'])
def load_state_dicts(load_path, model, optimizer, scheduler, logger):
"""Loads state from a given load path."""
state = {
'epoch': 0,
'best_score': np.nan,
'best_val_acc': -1,
'best_val_acc_rw': -1,
'best_val_subclass_rob_acc': -1,
'best_val_subclass_rob_acc_rw': -1,
'best_val_true_subclass_rob_acc': -1,
'best_val_alt_subclass_rob_acc': -1,
'best_val_auroc': -1,
'best_val_subclass_rob_auroc': -1,
'best_val_true_subclass_rob_auroc': -1,
'best_val_alt_subclass_rob_auroc': -1,
}
if load_path != None:
logger.info(f'Loading state_dict from {load_path}...')
checkpoint = torch.load(os.path.join(load_path))
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
return state
def register_save_activations_hook(model, model_activation_layer, activations_list):
"""Registers a forward pass hook that saves activations.
Args:
model(nn.Module): A PyTorch model.
model_activation_layer(str): The name of the module in the network that
produces the activations of interest.
activations_list(List[torch.Tensor]) The list in which we should store the
model activations.
"""
def save_activations(model, inp, out):
activations_list.append(out.view(out.size(0), -1))
for name, m in model.named_modules():
if name == model_activation_layer or \
(isinstance(model, torch.nn.DataParallel) and \
name.replace('module.', '') == model_activation_layer):
return m.register_forward_hook(save_activations)
return None
class GEORGEClassification:
"""Executes the classification stage of the GEORGE algorithm.
Args:
classification_config(dict): Contains the parameters required to execute this step.
See utils.schema for type information and examples.
save_dir(str, optional): Directory at which to save logging information.
If None, logging information is not saved. Default is None.
use_cuda(bool, optional): If True, enables GPU usage. Default is False.
"""
def __init__(self, classification_config, save_dir=None, use_cuda=False, log_format='full',
has_estimated_subclasses=False):
self.config = classification_config
self.save_dir = save_dir
if self.save_dir:
self.logger = init_logger('harness.classification', self.save_dir,
log_format=log_format)
self.epoch_logger = init_epoch_logger(self.save_dir)
self.logger.info(f'Saving checkpoints to {self.save_dir}...')
else:
# initialize logger without FileHandler
self.logger = logging.getLogger()
self.use_cuda = use_cuda
if self.use_cuda:
self.device = torch.cuda.current_device()
# uninitialized unless self.train or self.evaluate is called
self.criterion = None
# uninitialized unless self.train is called
self.optimizer = None
self.scheduler = None
self.has_estimated_subclasses = has_estimated_subclasses
self.compute_auroc = 'auroc' in self.config['checkpoint_metric']
def train(self, model, train_dataloader, val_dataloader, robust=False):
"""Trains the given model.
Note:
Artifacts are only saved if self.save_dir is initialized. Additionally,
this function assumes that the "step" unit of the scheduler is epoch-based.
The model is modified in-place, but the model is also returned to match the
GEORGECluster API.
Args:
model(nn.Module): A PyTorch model.
train_dataloader(DataLoader): The training dataloader. The dataset within must
subclass GEORGEDataset.
val_dataloader(DataLoader): The validation dataloader. The dataset within must
subclass GEORGEDataset.
robust(bool, optional): Whether or not to apply robust optimization. Affects
criterion initialization.
Returns:
model(nn.Module): The best model found during training.
"""
if self.criterion is None:
self.criterion = init_criterion(self.config['criterion_config'], robust,
train_dataloader.dataset, self.use_cuda)
self.optimizer = init_optimizer(self.config['optimizer_config'], model)
self.scheduler = init_scheduler(self.config['scheduler_config'], self.optimizer)
# in order to resume model training, load_path must be set explicitly
load_path = self.config.get('load_path', None)
self.state = load_state_dicts(load_path, model, self.optimizer, self.scheduler, self.logger)
num_epochs = self.config['num_epochs']
checkpoint_metric = self.config['checkpoint_metric']
use_cuda = next(model.parameters()).is_cuda
train_props = np.bincount(np.array(train_dataloader.dataset.Y_dict['true_subclass'])) / len(
train_dataloader.dataset)
val_props = np.bincount(np.array(val_dataloader.dataset.Y_dict['true_subclass'])) / len(
val_dataloader.dataset)
reweight = torch.tensor(train_props / val_props)
if use_cuda: reweight = reweight.cuda()
self.logger.basic_info('Starting training.')
for epoch in range(num_epochs):
self.state['epoch'] = epoch
self.scheduler.last_epoch = epoch - 1
self.scheduler.step(*([self.state[f'best_score']] if type(self.scheduler) ==
schedulers.ReduceLROnPlateau else []))
cur_lr = get_learning_rate(self.optimizer)
self.logger.basic_info(f'\nEpoch: [{epoch + 1} | {num_epochs}] LR: {cur_lr:.2E}')
self.logger.basic_info('Training:')
train_metrics, _ = self._run_epoch(model, train_dataloader, optimize=True,
save_activations=False)
self.logger.basic_info('Validation:')
val_metrics, _ = self._run_epoch(model, val_dataloader, optimize=False,
save_activations=False, reweight=reweight)
metrics = {
**{f'train_{k}': v
for k, v in train_metrics.items()},
**{f'val_{k}': v
for k, v in val_metrics.items()}
}
self._checkpoint(model, metrics, checkpoint_metric, epoch)
self.epoch_logger.append({'learning_rate': cur_lr, **metrics})
if use_cuda: torch.cuda.empty_cache()
best_model_path = os.path.join(self.save_dir, 'best_model.pt')
if os.path.exists(best_model_path):
self.logger.basic_info('\nTraining complete. Loading best model.')
checkpoint = torch.load(best_model_path)
model.load_state_dict(checkpoint['state_dict'])
else:
self.logger.basic_info('Training complete. No best model found.')
return model
def evaluate(self, model, dataloaders, split, robust=False, save_activations=False,
bit_pretrained=False, adv_metrics=False, ban_reweight=False):
"""Evaluates the model.
Note:
The latter item in the returned tuple is what is necessary to run
GEORGECluster.train and GEORGECluster.evaluate.
Args:
model(nn.Module): A PyTorch model.
dataloader(DataLoader): The dataloader. The dataset within must
subclass GEORGEDataset.
robust(bool, optional): Whether or not to apply robust optimization. Affects
criterion initialization.
save_activations(bool, optional): If True, saves the activations in
`outputs`. Default is False.
bit_pretrained(bool, optional): If True, assumes bit_pretrained and does not evaluate
performance metrics
Returns:
metrics(Dict[str, Any]) A dictionary object that stores the metrics defined
in self.config['metric_types'].
outputs(Dict[str, Any]) A dictionary object that stores artifacts necessary
for model analysis, including labels, activations, and predictions.
"""
dataloader = dataloaders[split]
# use criterion from training if trained; else, load a new one
if self.criterion is None:
self.criterion = init_criterion(self.config['criterion_config'], robust,
dataloader.dataset, self.use_cuda)
train_props = np.bincount(np.array(
dataloaders['train'].dataset.Y_dict['true_subclass'])) / len(
dataloaders['train'].dataset)
split_props = np.bincount(np.array(dataloader.dataset.Y_dict['true_subclass'])) / len(
dataloader.dataset)
use_cuda = next(model.parameters()).is_cuda
reweight = None if ban_reweight else torch.tensor(train_props / split_props)
if use_cuda and reweight is not None: reweight = reweight.cuda()
metrics, outputs = self._run_epoch(model, dataloader, optimize=False,
save_activations=save_activations, reweight=reweight,
bit_pretrained=bit_pretrained, adv_metrics=adv_metrics)
return metrics, outputs
def _run_epoch(self, model, dataloader, optimize=False, save_activations=False, reweight=None,
bit_pretrained=False, adv_metrics=False):
"""Runs the model on a given dataloader.
Note:
The latter item in the returned tuple is what is necessary to run
GEORGECluster.train and GEORGECluster.evaluate.
Args:
model(nn.Module): A PyTorch model.
dataloader(DataLoader): The dataloader. The dataset within must
subclass GEORGEDataset.
optimize(bool, optional): If True, the model is trained on self.criterion.
save_activations(bool, optional): If True, saves the activations in
`outputs`. Default is False.
bit_pretrained(bool, optional): If True, assumes bit_pretrained and does not evaluate
performance metrics
Returns:
metrics(Dict[str, Any]) A dictionary object that stores the metrics defined
in self.config['metric_types'].
outputs(Dict[str, Any]) A dictionary object that stores artifacts necessary
for model analysis, including labels, activations, and predictions.
"""
dataset = dataloader.dataset
self._check_dataset(dataset)
type_to_num_classes = {
label_type: dataset.get_num_classes(label_type)
for label_type in LABEL_TYPES if label_type in dataset.Y_dict.keys()
}
outputs = {
'metrics': None,
'activations': [],
'superclass': [],
'subclass': [],
'true_subclass': [],
'alt_subclass': [],
'targets': [],
'probs': [],
'preds': [],
'losses': [],
'reweight': [],
}
activations_handle = self._init_activations_hook(model, outputs['activations'])
if optimize:
progress_prefix = 'Training'
model.train()
else:
progress_prefix = 'Evaluation'
model.eval()
per_class_meters = self._init_per_class_meters(type_to_num_classes)
metric_meters = {k: AverageMeter() for k in ['loss', 'acc', 'loss_rw', 'acc_rw']}
progress = self.config['show_progress']
if progress:
bar = ProgressBar(progress_prefix, max=len(dataloader), width=50)
for batch_idx, (inputs, targets) in enumerate(dataloader):
batch_size = len(inputs)
if self.use_cuda:
inputs, targets = move_to_device([inputs, targets], device=self.device)
type_to_labels = {}
for label_type in type_to_num_classes.keys():
type_to_labels[label_type] = targets[label_type]
outputs[label_type].append(targets[label_type])
if optimize and not bit_pretrained:
logits = model(inputs)
loss_targets = targets['superclass']
co = self.criterion(logits, loss_targets, targets['subclass'])
loss, (losses, corrects), _ = co
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
else:
with torch.no_grad():
logits = model(inputs)
loss_targets = targets['superclass']
if bit_pretrained:
if progress:
bar.suffix = PROGRESS_BAR_SUFFIX.format(
batch=batch_idx + 1, size=len(dataloader),
total=format_timedelta(bar.elapsed_td),
eta=format_timedelta(bar.eta_td),
**{k: 0
for k in prog_metric_names})
bar.next()
continue
co = self.criterion(logits, loss_targets, targets['subclass'])
loss, (losses, corrects), _ = co
if not save_activations:
outputs['activations'].pop() # delete activations
reweight_vec = None if reweight is None else reweight[targets['true_subclass']]
metrics = self._compute_progress_metrics(losses, corrects, type_to_labels,
type_to_num_classes, per_class_meters,
reweight=reweight_vec)
acc, preds = compute_accuracy(logits.data, loss_targets.data, return_preds=True)
outputs['probs'].append(F.softmax(logits, dim=1).detach().cpu()[:, 1])
outputs['preds'].append(preds)
outputs['losses'].append(losses.detach().cpu())
outputs['targets'].append(loss_targets.detach().cpu())
if reweight_vec is not None:
outputs['reweight'].append(reweight_vec.cpu())
self._update_metrics(metric_meters, acc, loss, losses, corrects, batch_size,
reweight_vec)
PROGRESS_BAR_STR = PROGRESS_BAR_SUFFIX
if self.compute_auroc:
sub_map = dataloader.dataset.get_class_map('subclass')
assert (set(sub_map.keys()) == {0, 1}) # must be a binary problem
targets_cat, probs_cat = torch.cat(outputs['targets']), torch.cat(outputs['probs'])
auroc = compute_roc_auc(targets_cat, probs_cat)
metrics['auroc'] = auroc
has_alt_subclass = 'alt_subclass' in dataloader.dataset.Y_dict
for key in ['subclass', 'true_subclass'] + ['alt_subclass'] * has_alt_subclass:
sub_map = dataloader.dataset.get_class_map(key)
neg_subclasses = sub_map[0]
pos_subclasses = sub_map[1]
if len(neg_subclasses) == len(pos_subclasses) == 1:
# only one subclass in each superclass
rob_auroc = auroc
else:
subclass_labels = torch.cat(outputs[key])
paired_aurocs = []
for neg_subclass in neg_subclasses:
for pos_subclass in pos_subclasses:
inds = ((subclass_labels == neg_subclass) |
(subclass_labels == pos_subclass)).cpu()
subset_pair_auroc = compute_roc_auc(targets_cat[inds],
probs_cat[inds])
paired_aurocs.append(subset_pair_auroc)
rob_auroc = min(paired_aurocs)
metrics[f'{key}_rob_auroc'] = rob_auroc
if not has_alt_subclass: metrics[alt_subclass_rob_auroc] = auroc
PROGRESS_BAR_STR += ' | AUROC: {auroc:.4f} | R AUROC: {subclass_rob_auroc:.4f} | ' \
'TR AUROC: {true_subclass_rob_auroc:.4f} | AR AUROC: {alt_subclass_rob_auroc:.4f}'
if progress:
bar.suffix = PROGRESS_BAR_STR.format(
batch=batch_idx + 1, size=len(dataloader),
total=format_timedelta(bar.elapsed_td), eta=format_timedelta(bar.eta_td), **{
**metrics,
**{k: v.avg
for k, v in metric_meters.items()}
})
bar.next()
if progress:
bar.finish()
if activations_handle:
activations_handle.remove()
for k, v in outputs.items():
if type(v) == list and len(v) > 0:
outputs[k] = concatenate_iterable(v)
if bit_pretrained:
return outputs['metrics'], outputs
outputs['metrics'] = metrics
outputs['metrics'].update({k: float(v.avg) for k, v in metric_meters.items()})
outputs['metrics'].update(self._compute_aggregate_metrics(outputs))
self._print_output_metrics(outputs)
if adv_metrics:
scaa = np.mean(
[ga.avg * 100 for ga in np.array(per_class_meters[f'per_true_subclass_accs'])])
self.logger.info(
f'All accs: {[ga.avg * 100 for ga in np.array(per_class_meters[f"per_true_subclass_accs"])]}'
)
self.logger.info(f'SCAA: {scaa:.3f}')
ap = sklearn.metrics.average_precision_score(
outputs['targets'], outputs['probs'],
sample_weight=outputs['reweight'] if reweight_vec is not None else None)
self.logger.info(f'MaP: {ap:.4f}')
return outputs['metrics'], outputs
def _update_metrics(self, metric_meters, acc, loss, losses, corrects, batch_size, reweight_vec):
"""Helper function to update metric meters given network outputs."""
metric_meters['loss'].update(loss, batch_size)
metric_meters['acc'].update(acc[0], batch_size)
adj_losses, adj_counts = self.criterion.compute_group_avg(losses,
torch.zeros_like(corrects),
num_groups=1,
reweight=reweight_vec)
adj_accs, _ = self.criterion.compute_group_avg(corrects, torch.zeros_like(corrects),
num_groups=1, reweight=reweight_vec)
adj_loss = adj_losses[0].item()
adj_acc = adj_accs[0].item() * 100
adj_count = adj_counts[0].item()
metric_meters['loss_rw'].update(adj_loss, adj_count)
metric_meters['acc_rw'].update(adj_acc, adj_count)
def _print_output_metrics(self, outputs):
self.logger.info(outputs['metrics'])
output_strs = [
f'Loss: {outputs["metrics"]["loss"]:.3f}, '
f'Acc.: {outputs["metrics"]["acc"]:.2f}%, '
]
if self.logger.type == 'full':
output_strs += [
f'Rw. loss: {outputs["metrics"]["loss_rw"]:.3f}, '
f'Rw. acc: {outputs["metrics"]["acc_rw"]:.2f}%, ',
]
output_strs += [
f'True rob. loss: {outputs["metrics"]["true_subclass_rob_loss"]:.3f}, '
f'True rob. acc: {outputs["metrics"]["true_subclass_rob_acc"]:.2f}%'
]
if self.has_estimated_subclasses:
# "Robust accuracy" is accuracy on the estimated subclasses. If there are none (i.e., we either have
# no estimate of the subclass labels, or we know the true subclasses), then it is inapplicable.
est_metrics_str = \
(f'Est. rob. loss: {outputs["metrics"]["subclass_rob_loss"]:.3f}, '
f'Est. rob. acc: {outputs["metrics"]["subclass_rob_acc"]:.2f}%, ')
if self.logger.type == 'full':
est_metrics_str += \
(f'Rw. rob. loss: {outputs["metrics"]["subclass_rob_loss_rw"]:.3f}, '
f'Rw. rob. acc: {outputs["metrics"]["subclass_rob_acc_rw"]:.2f}%, ')
output_strs.insert(1, est_metrics_str)
self.logger.basic_info(''.join(output_strs))
def _check_dataset(self, dataset):
"""Checks the validity of the dataset."""
assert isinstance(dataset, GEORGEDataset), 'Dataset must subclass GEORGEDataset.'
assert 'subclass' in dataset.Y_dict.keys()
def _init_activations_hook(self, model, activations_list):
"""Initializes the forward hook to save model activations."""
if isinstance(model, torch.nn.DataParallel):
activation_layer = model.module.activation_layer_name
else:
activation_layer = model.activation_layer_name
activations_handle = register_save_activations_hook(model, activation_layer,
activations_list)
if activation_layer is not None:
assert activations_handle is not None, \
f'No hook registered for activation_layer={activation_layer}'
return activations_handle
def _init_per_class_meters(self, type_to_num_classes):
"""Initializes per_class_meters for loss and accuracy.
Args:
type_to_num_classes(Dict[str, int]): Dictionary object that maps the
label_type (e.g. superclass, subclass, true_subclass) to the number
of classes for that label_type.
Returns:
per_class_meters(Dict[str, List[AverageMeter]]): A dictionary of
per_class_meters, where each per_class_meter is a list of AverageMeter
objects, one for each class. There is a per_class_meter for each
label_type, and for each metric_type (e.g. losses, accs). The
AverageMeter objects are used to track metrics on individual groups.
"""
per_class_meters = {}
for label_type, num_classes in type_to_num_classes.items():
for metric_type in ['losses', 'accs']:
for rw in ['', '_reweighted']:
per_class_meter_name = f'per_{label_type}_{metric_type}{rw}'
per_class_meter = [AverageMeter() for i in range(num_classes)]
per_class_meters[per_class_meter_name] = per_class_meter
return per_class_meters
def _compute_progress_metrics(self, sample_losses, corrects, type_to_labels,
type_to_num_classes, per_class_meters, reweight=None):
"""Extracts metrics from each of the per_class_meters.
Args:
sample_losses(np.ndarray of shape (N, )): The loss computed for
each sample.
corrects(np.ndarray of shape(N, )): Whether or not the model produced
a correct prediction for each sample.
type_to_labels(Dict[str, Union[np.ndarray, torch.Tensor, Sequence]]):
Dictionary object mapping the label_type (e.g. superclass, subclass,
true_subclass) to the labels themselves.
type_to_num_classes(Dict[str, int]):
type_to_num_classes(Dict[str, int]): Dictionary object that maps the
label_type to the number
of classes for that label_type.
per_class_meters(Dict[str, List[AverageMeter]]): A dictionary of
per_class_meters, where a per_class_meter is a list of AverageMeter
objects, one for each class. There is a per_class_meter for each
label_type, and for each metric_type (e.g. losses, accs).
Returns:
metrics(Dict[str, Any]): A dictionary object that describes model
performance based on information in each of the per_class_meters.
"""
batch_stats = {}
for label_type, labels in type_to_labels.items():
num_classes = type_to_num_classes[label_type]
losses, counts = self.criterion.compute_group_avg(sample_losses, labels,
num_groups=num_classes)
accs, _ = self.criterion.compute_group_avg(corrects, labels, num_groups=num_classes)
losses_rw, counts_rw = self.criterion.compute_group_avg(sample_losses, labels,
num_groups=num_classes,
reweight=reweight)
accs_rw, _ = self.criterion.compute_group_avg(corrects, labels, num_groups=num_classes,
reweight=reweight)
batch_stats[label_type] = {
'losses': losses,
'losses_rw': losses_rw,
'counts': counts,
'counts_rw': counts_rw,
'accs': accs,
'accs_rw': accs_rw
}
metrics = {}
for label_type, stats in batch_stats.items():
losses, counts, accs, losses_rw, counts_rw, accs_rw = \
stats['losses'], stats['counts'], stats['accs'], stats['losses_rw'], stats['counts_rw'], stats['accs_rw']
loss_meters = per_class_meters[f'per_{label_type}_losses']
loss_meters_rw = per_class_meters[f'per_{label_type}_losses_reweighted']
acc_meters = per_class_meters[f'per_{label_type}_accs']
acc_meters_rw = per_class_meters[f'per_{label_type}_accs_reweighted']
num_classes = type_to_num_classes[label_type]
for i in range(num_classes):
loss_meters[i].update(losses[i], counts[i])
acc_meters[i].update(accs[i], counts[i])
loss_meters_rw[i].update(losses_rw[i], counts_rw[i])
acc_meters_rw[i].update(accs_rw[i], counts_rw[i])
active = np.array([i for i, m in enumerate(acc_meters) if m.count])
if len(active) > 0:
rob_loss = max([gl.avg for gl in np.array(loss_meters)[active]])
rob_acc = min([ga.avg * 100 for ga in np.array(acc_meters)[active]])
rob_loss_rw = max([gl.avg for gl in np.array(loss_meters_rw)[active]])
rob_acc_rw = min([ga.avg * 100 for ga in np.array(acc_meters_rw)[active]])
else:
rob_loss = 0.
rob_acc = 0.
rob_loss_rw = 0.
rob_acc_rw = 0.
metrics[f'{label_type}_rob_loss'] = rob_loss
metrics[f'{label_type}_rob_acc'] = rob_acc
metrics[f'{label_type}_rob_loss_rw'] = rob_loss_rw
metrics[f'{label_type}_rob_acc_rw'] = rob_acc_rw
if 'true_subclass_rob_acc' not in metrics.keys():
metrics['true_subclass_rob_acc'] = -1
return metrics
def _compute_aggregate_metrics(self, outputs, evaluate=False):
"""Extracts metrics from the outputs object."""
return {}
def _checkpoint(self, model, metrics, checkpoint_metric, epoch):
"""Saves the model.
Args:
model(nn.Module): A PyTorch model.
metrics(Dict[str, Any]): A dictionary object containing
model performance metrics.
checkpoint_metric(str): The checkpoint metric associated with the model.
epoch(int): The current epoch.
"""
if checkpoint_metric not in metrics.keys():
raise KeyError(f'{checkpoint_metric} not in metrics {metrics.keys()}')
if np.isnan(self.state['best_score']):
self.state['best_score'] = metrics[checkpoint_metric]
is_best = True
else:
if 'loss' in checkpoint_metric:
is_best = self.state['best_score'] > metrics[checkpoint_metric]
self.state['best_score'] = min(self.state['best_score'], metrics[checkpoint_metric])
else:
is_best = self.state['best_score'] < metrics[checkpoint_metric]
self.state['best_score'] = max(self.state['best_score'], metrics[checkpoint_metric])
data = {
'epoch': epoch,
'state_dict': model.state_dict(),
'best_score': self.state['best_score'],
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
**metrics
}
if is_best:
self._save(data, f'best_model.pt')
if self.compute_auroc:
save_metrics = [
'val_auroc', 'val_subclass_rob_auroc', 'val_true_subclass_rob_auroc',
'val_alt_subclass_rob_auroc'
]
else:
save_metrics = [
'val_acc', 'val_acc_rw', 'val_subclass_rob_acc', 'val_subclass_rob_acc_rw',
'val_true_subclass_rob_acc'
]
for metric in save_metrics:
if metrics[metric] > self.state['best_' + metric]:
self.state['best_' + metric] = metrics[metric]
self._save(data, f'best_{metric}_model.pt')
if self.config['save_every'] > 0 and epoch % self.config['save_every'] == 0:
self._save(data, f'checkpoint_epoch_{epoch}.pt')
def _save(self, data, filename):
"""If self.save_dir is not None, saves `data`."""
if self.save_dir is not None:
filepath = os.path.join(self.save_dir, filename)
self.logger.info(f'Saving checkpoint to {filepath}...')
torch.save(data, filepath)
else:
self.logger.info('save_dir not initialized. Skipping save step.')
| hidden-stratification-master | stratification/classification/george_classification.py |
hidden-stratification-master | stratification/classification/__init__.py |
|
import torch
from sklearn.metrics import roc_auc_score
class AverageMeter:
"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def __init__(self):
self.reset()
def reset(self):
self.val, self.avg, self.sum, self.count = [0] * 4
def update(self, val, n=1):
if isinstance(val, torch.Tensor):
val = val.data.cpu().item()
if isinstance(n, torch.Tensor):
n = n.data.cpu().item()
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count if self.count else 0
def __str__(self):
return f'sum: {self.sum}, count: {self.count}, avg: {self.avg}'
def compute_accuracy(output, target, topk=(1, ), return_preds=False):
"""Computes the precision@k for the specified values of k"""
topk_orig = topk
topk = [k for k in topk if k <= output.size(1)]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
label_preds = pred[:, 0]
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100. / batch_size))
res.extend(100 for k in topk_orig if k > output.size(1))
if return_preds:
return res, label_preds
return res
def compute_roc_auc(targets, probs):
"""'Safe' AUROC computation"""
if isinstance(targets, torch.Tensor):
targets = targets.numpy()
if isinstance(probs, torch.Tensor):
probs = probs.numpy()
try:
auroc = roc_auc_score(targets, probs)
except ValueError:
auroc = -1
return auroc
| hidden-stratification-master | stratification/classification/utils.py |
import logging
import torch
import numpy as np
class LossComputer:
def __init__(self, criterion, is_robust, n_groups, group_counts, robust_step_size, stable=True,
size_adjustments=None, auroc_version=False, class_map=None, use_cuda=True):
self.criterion = criterion
self.is_robust = is_robust
self.auroc_version = auroc_version
self.n_groups = n_groups
if auroc_version:
assert (class_map is not None)
self.n_gdro_groups = len(class_map[0]) * len(class_map[1])
self.class_map = class_map
else:
self.n_gdro_groups = n_groups
self.group_range = torch.arange(self.n_groups).unsqueeze(1).long()
if use_cuda: self.group_range = self.group_range.cuda()
if self.is_robust:
self.robust_step_size = robust_step_size
logging.info(f'Using robust loss with inner step size {self.robust_step_size}')
self.stable = stable
self.group_counts = group_counts.to(self.group_range.device)
if size_adjustments is not None:
self.do_adj = True
if auroc_version:
self.adj = torch.tensor(size_adjustments[0]).float().to(self.group_range.device)
self.loss_adjustment = self.adj / torch.sqrt(self.group_counts[:-1])
else:
self.adj = torch.tensor(size_adjustments).float().to(self.group_range.device)
self.loss_adjustment = self.adj / torch.sqrt(self.group_counts)
else:
self.adj = torch.zeros(self.n_gdro_groups).float().to(self.group_range.device)
self.do_adj = False
self.loss_adjustment = self.adj
logging.info(
f'Per-group loss adjustments: {np.round(self.loss_adjustment.tolist(), 2)}')
# The following quantities are maintained/updated throughout training
if self.stable:
logging.info('Using numerically stabilized DRO algorithm')
self.adv_probs_logits = torch.zeros(self.n_gdro_groups).to(self.group_range.device)
else: # for debugging purposes
logging.warn('Using original DRO algorithm')
self.adv_probs = torch.ones(self.n_gdro_groups).to(
self.group_range.device) / self.n_gdro_groups
else:
logging.info('Using ERM')
def loss(self, yhat, y, group_idx=None, is_training=False):
# compute per-sample and per-group losses
per_sample_losses = self.criterion(yhat, y)
batch_size = y.shape[0]
group_losses, group_counts = self.compute_group_avg(per_sample_losses, group_idx)
corrects = (torch.argmax(yhat, 1) == y).float()
group_accs, group_counts = self.compute_group_avg(corrects, group_idx)
# compute overall loss
if self.is_robust:
if self.auroc_version:
neg_subclasses, pos_subclasses = self.class_map[0], self.class_map[1]
pair_losses = []
for neg_subclass in neg_subclasses:
neg_count = group_counts[neg_subclass]
neg_sbc_loss = group_losses[neg_subclass] * neg_count
for pos_subclass in pos_subclasses:
pos_count = group_counts[pos_subclass]
pos_sbc_loss = group_losses[pos_subclass] * pos_count
tot_count = neg_count + pos_count
tot_count = tot_count + (tot_count == 0).float()
pair_loss = (neg_sbc_loss + pos_sbc_loss) / tot_count
pair_losses.append(pair_loss)
loss, _ = self.compute_robust_loss(torch.cat([l.view(1) for l in pair_losses]))
else:
loss, _ = self.compute_robust_loss(group_losses)
else:
loss = per_sample_losses.mean()
return loss, (per_sample_losses, corrects), (group_losses, group_accs, group_counts)
def compute_robust_loss(self, group_loss):
if torch.is_grad_enabled(): # update adv_probs if in training mode
adjusted_loss = group_loss
if self.do_adj:
adjusted_loss += self.loss_adjustment
logit_step = self.robust_step_size * adjusted_loss.data
if self.stable:
self.adv_probs_logits = self.adv_probs_logits + logit_step
else:
self.adv_probs = self.adv_probs * torch.exp(logit_step)
self.adv_probs = self.adv_probs / self.adv_probs.sum()
if self.stable:
adv_probs = torch.softmax(self.adv_probs_logits, dim=-1)
else:
adv_probs = self.adv_probs
robust_loss = group_loss @ adv_probs
return robust_loss, adv_probs
def compute_group_avg(self, losses, group_idx, num_groups=None, reweight=None):
# compute observed counts and mean loss for each group
if num_groups is None:
group_range = self.group_range
else:
group_range = torch.arange(num_groups).unsqueeze(1).long().to(group_idx.device)
if reweight is not None:
group_loss, group_count = [], []
reweighted = losses * reweight
for i in range(num_groups):
inds = group_idx == i
group_losses = reweighted[inds]
group_denom = torch.sum(reweight[inds])
group_denom = group_denom
group_loss.append(
torch.sum(group_losses) / (group_denom + (group_denom == 0).float()))
group_count.append(group_denom)
group_loss, group_count = torch.tensor(group_loss), torch.tensor(group_count)
else:
group_map = (group_idx == group_range).float()
group_count = group_map.sum(1)
group_denom = group_count + (group_count == 0).float() # avoid nans
group_loss = (group_map @ losses.view(-1)) / group_denom
return group_loss, group_count
def __call__(self, yhat, y, group_idx):
return self.loss(yhat, y, group_idx)
def init_criterion(criterion_config, robust, trainset, use_cuda):
num_subclasses = trainset.get_num_classes('subclass')
subclass_counts = trainset.get_class_counts('subclass')
criterion = torch.nn.CrossEntropyLoss(reduction='none')
if robust:
size_adjustments = [criterion_config.get('size_adjustment', 0)] * num_subclasses
else:
size_adjustments = None
criterion = LossComputer(criterion, robust, num_subclasses, subclass_counts,
criterion_config['robust_lr'], stable=criterion_config['stable_dro'],
size_adjustments=size_adjustments,
auroc_version=criterion_config['auroc_gdro'],
class_map=trainset.get_class_map('subclass'), use_cuda=use_cuda)
return criterion
| hidden-stratification-master | stratification/classification/losses/loss_computer.py |
from .loss_computer import init_criterion
| hidden-stratification-master | stratification/classification/losses/__init__.py |
import os
import logging
import random
from collections import defaultdict, Counter
import itertools
from PIL import Image
import numpy as np
import pandas as pd
import torch
from torchvision import transforms
from stratification.classification.datasets.base import GEORGEDataset
class ISICDataset(GEORGEDataset):
"""ISIC Dataset
"""
_channels = 3
_resolution = 224
_normalization_stats = {'mean': (0.71826, 0.56291, 0.52548), 'std': (0.16318, 0.14502, 0.17271)}
superclass_names = ['benign', 'malignant']
def __init__(self, root, split, transform=None, download=False, ontology='patch',
augment=False):
assert (transform is None)
transform = get_transform_ISIC(augment=augment)
super().__init__('isic', root, split, transform=transform, download=download,
ontology=ontology)
@property
def processed_folder(self):
return os.path.join(self.root, 'isic')
def _check_exists(self):
"""Checks whether or not the isic labels CSV has been initialized."""
return os.path.isfile(os.path.join(self.processed_folder, 'labels.csv')) and \
os.path.isdir(os.path.join(self.processed_folder, 'images'))
def _download(self):
"""Raises an error if the raw dataset has not yet been downloaded."""
raise ValueError('Run `isic_download.py` in order to download the ISIC dataset.')
def _load_samples(self):
"""Loads the ISIC dataset from the processed_folder"""
if self.ontology == 'patch':
self.true_subclass_names = ['benign/no_patch', 'benign/patch', 'malignant']
elif self.ontology == 'histopathology':
self.true_subclass_names = ['benign/no_hist', 'benign/hist', 'malignant']
else:
raise ValueError(f'Ontology {self.ontology} not supported.')
self.images_path = os.path.join(self.processed_folder, 'images')
labels_path = os.path.join(self.processed_folder, 'labels.csv')
self.df = pd.read_csv(labels_path)
# throw out unknowns
self.df = self.df.loc[self.df.benign_malignant.isin({'benign', 'malignant'})]
# split dataset
split_df = self.df[self.df['fold'] == self.split]
logging.info(f'Using {len(split_df)} of {len(self.df)} images in split "{self.split}"')
self.df = split_df
self.df = self.df.set_index('Image Index')
superclass_labels, true_subclass_labels, alt_subclass_labels = [], [], []
for idx in range(len(self.df)):
sup_label, sub_label = self._get_labels_from_id(idx, ontology=self.ontology)
superclass_labels.append(sup_label)
true_subclass_labels.append(sub_label)
_, alt_sub_label = self._get_labels_from_id(
idx, ontology=({'patch', 'histopathology'} - {self.ontology}).pop())
alt_subclass_labels.append(alt_sub_label)
X = self.df.index.values
Y_dict = {
'superclass': torch.tensor(superclass_labels),
'true_subclass': torch.tensor(true_subclass_labels),
'alt_subclass': torch.tensor(alt_subclass_labels),
}
return X, Y_dict
def _get_labels_from_id(self, idx, ontology):
"""Gets superclass and subclass label for a given example id,
based on the ontology."""
suplabel = self.df['Diagnosis'].iloc[idx].astype(int)
assert (suplabel in (0, 1))
if ontology == 'patch':
sublabel_raw = self.df['patch'].iloc[idx].astype(int)
if suplabel == 0: # benign
sublabel = sublabel_raw
else:
sublabel = 2
elif ontology == 'histopathology':
sublabel_raw = self.df['diagnosis_confirm_type'].iloc[idx]
if suplabel == 0:
sublabel = int(sublabel_raw == 'histopathology')
else:
sublabel = 2
return suplabel, sublabel
def __getitem__(self, idx):
"""
Args:
idx (int): Index
Returns:
tuple: (x: torch.Tensor, y: dict) where X is a tensor representing an image
and y is a dictionary of possible labels.
"""
# load the original image
image_path = os.path.join(self.processed_folder, 'images', self.X[idx])
image = Image.open(image_path)
image = image.convert('RGB')
if self.transform is not None:
image = self.transform(image)
x = image
y_dict = {name: label[idx] for name, label in self.Y_dict.items()}
return x, y_dict
def get_transform_ISIC(augment=False):
test_transform_list = [
transforms.Resize(ISICDataset._resolution),
transforms.CenterCrop(ISICDataset._resolution),
transforms.ToTensor(),
transforms.Normalize(mean=ISICDataset._normalization_stats['mean'],
std=ISICDataset._normalization_stats['std'])
]
if not augment:
return transforms.Compose(test_transform_list)
train_transform_list = [transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip()] + test_transform_list
return transforms.Compose(train_transform_list)
| hidden-stratification-master | stratification/classification/datasets/isic.py |
from .base import GEORGEDataset, DATA_SPLITS, LABEL_TYPES
from .celebA import CelebADataset
from .isic import ISICDataset
from .waterbirds import WaterbirdsDataset
from .mnist import MNISTDataset
| hidden-stratification-master | stratification/classification/datasets/__init__.py |
import os
import torch
import pandas as pd
from PIL import Image
import logging
import numpy as np
import torchvision.transforms as transforms
from .base import GEORGEDataset
class CelebADataset(GEORGEDataset):
"""
CelebA dataset (already cropped and centered).
Note: idx and filenames are off by one.
Adapted from https://github.com/kohpangwei/group_DRO/blob/master/data/celebA_dataset.py
"""
superclass_names = ['No Blond Hair', 'Blond Hair']
true_subclass_names = [
'Blond_Hair = 0, Male = 0', 'Blond_Hair = 0, Male = 1', 'Blond_Hair = 1, Male = 0',
'Blond_Hair = 1, Male = 1'
]
_channels = 3
_resolution = 224
_normalization_stats = {'mean': (0.485, 0.456, 0.406), 'std': (0.229, 0.224, 0.225)}
def __init__(self, root, split, transform=None, download=False, ontology='default',
augment=False):
assert (transform is None)
transform = get_transform_celebA()
super().__init__('celebA', root, split, transform=transform, download=download,
ontology=ontology)
def _download(self):
"""Raises an error if the raw dataset has not yet been downloaded."""
raise ValueError('Follow the README instructions to download the dataset.')
def _check_exists(self):
"""Checks whether or not the waterbirds labels CSV has been initialized."""
return os.path.isdir(os.path.join(self.root, 'celebA', 'img_align_celeba')) and \
os.path.isfile(os.path.join(self.root, 'celebA', 'list_attr_celeba.csv'))
def _load_samples(self):
self.target_name = 'Blond_Hair'
self.confounder_names = ['Male']
attrs_df = pd.read_csv(os.path.join(self.root, 'celebA', 'list_attr_celeba.csv'),
delim_whitespace=True)
# Split out filenames and attribute names
self.data_dir = os.path.join(self.root, 'celebA', 'img_align_celeba')
filename_array = attrs_df['image_id'].values
attrs_df = attrs_df.drop(labels='image_id', axis='columns')
attr_names = attrs_df.columns.copy()
# Then cast attributes to numpy array and set them to 0 and 1
# (originally, they're -1 and 1)
attrs_df = attrs_df.values
attrs_df[attrs_df == -1] = 0
def attr_idx(attr_name):
return attr_names.get_loc(attr_name)
# Get the y values
target_idx = attr_idx(self.target_name)
y_array = attrs_df[:, target_idx]
self._num_supclasses = np.amax(y_array).item() + 1
# Map the confounder attributes to a number 0,...,2^|confounder_idx|-1
self.confounder_idx = [attr_idx(a) for a in self.confounder_names]
confounders = attrs_df[:, self.confounder_idx]
confounder_array = confounders @ np.power(2, np.arange(len(self.confounder_idx)))
# Map to groups
self._num_subclasses = self._num_supclasses * pow(2, len(self.confounder_idx))
group_array = (y_array * (self._num_subclasses / 2) + confounder_array).astype('int')
# Read in train/val/test splits
split_df = pd.read_csv(os.path.join(self.root, 'celebA', 'list_eval_partition.csv'),
delim_whitespace=True)
split_array = split_df['partition'].values
split_dict = {'train': 0, 'val': 1, 'test': 2}
split_indices = split_array == split_dict[self.split]
X = filename_array[split_indices]
Y_dict = {
'superclass': torch.tensor(y_array[split_indices]),
'true_subclass': torch.tensor(group_array[split_indices])
}
return X, Y_dict
def __getitem__(self, idx):
"""
Args:
idx (int): Index
Returns:
tuple: (x_dict, y_dict) where x_dict is a dictionary mapping all
possible inputs and y_dict is a dictionary for all possible labels.
"""
img_filename = os.path.join(self.data_dir, self.X[idx])
image = Image.open(img_filename)
if self.transform is not None:
image = self.transform(image)
x = image
y_dict = {k: v[idx] for k, v in self.Y_dict.items()}
return x, y_dict
def get_transform_celebA():
orig_w = 178
orig_h = 218
orig_min_dim = min(orig_w, orig_h)
target_resolution = (224, 224)
transform = transforms.Compose([
transforms.CenterCrop(orig_min_dim),
transforms.Resize(target_resolution),
transforms.ToTensor(),
transforms.Normalize(**CelebADataset._normalization_stats),
])
return transform
| hidden-stratification-master | stratification/classification/datasets/celebA.py |
import argparse
import collections
import os
import random
import shutil
import pandas as pd
import requests
from PIL import Image
from tqdm import tqdm
from stratification.utils.utils import flatten_dict
def main():
parser = argparse.ArgumentParser(description='Downloads the ISIC dataset')
parser.add_argument(
'--root', default='./data/isic',
help='Directory in which to place the `raw` and `processed` ISIC subdirectories.')
parser.add_argument(
'--max_samples', default=25000, help='The maximum number of ISIC images to download. '
'At time of writing there are ~23000 images in the database.')
# options for the training/validation/test split
parser.add_argument(
'--preset_split_path', default=None, help='If not None, generates a dataset using the '
'split json file in the provided path')
parser.add_argument('--seed', default=1, help='The random seed used when splitting the data.')
parser.add_argument(
'--val_proportion', default=0.1, help='The proportion of the overall dataset to allocate '
'to the validation partition of the dataset.')
parser.add_argument(
'--test_proportion', default=0.1, help='The proportion of the overall dataset to allocate '
'to the test partition of the dataset.')
args = parser.parse_args()
root = args.root
max_samples = args.max_samples
preset_split_path = args.preset_split_path
seed = args.seed
val_proportion = args.val_proportion
test_proportion = args.test_proportion
print(f"Downloading data into {root} for up to {max_samples} samples...")
print(f"Downloading metadata into {os.path.join(root, 'raw', 'metadata.csv')}...")
download_isic_metadata(root, max_samples)
print(f"Downloading images into {os.path.join(root, 'raw', 'images')}...")
download_isic_images(root)
print(
f"Preprocessing metadata (adding columns, removing uncertain diagnoses) and saving into {os.path.join(root, 'processed', 'labels.csv')}..."
)
preprocess_isic_metadata(root, preset_split_path, seed=seed, val_proportion=val_proportion,
test_proportion=test_proportion)
print(
f"Preprocessing images (transforming to 3-channel RGB, resizing to 224x224) and saving into {os.path.join(root, 'raw', 'images')}..."
)
preprocess_isic_images(root)
def download_isic_metadata(root, max_samples):
"""Downloads the metadata CSV from the ISIC website."""
raw_dir = os.path.join(root, 'raw')
os.makedirs(raw_dir, exist_ok=True)
r = requests.get(f'https://isic-archive.com/api/v1/image?limit={max_samples}'
f'&sort=name&sortdir=1&detail=false')
image_ids = r.json()
image_ids = [image_id['_id'] for image_id in image_ids]
entries = []
for image_id in tqdm(image_ids):
r = requests.get(f'https://isic-archive.com/api/v1/image/{image_id}')
entry = flatten_dict(r.json(), sep='.')
entries.append(entry)
metadata_df = pd.DataFrame(entries)
metadata_df = metadata_df.set_index('_id')
metadata_df.to_csv(os.path.join(raw_dir, 'metadata.csv'))
return metadata_df
def download_isic_images(root):
"""Given the metadata CSV, downloads the ISIC images."""
metadata_path = os.path.join(root, 'raw', 'metadata.csv')
if not os.path.isfile(metadata_path):
raise FileNotFoundError('metadata.csv not downloaded. '
'Run `download_isic_data` before this function.')
metadata_df = pd.read_csv(metadata_path)
metadata_df = metadata_df.set_index('_id')
raw_dir = os.path.join(root, 'raw', 'images')
os.makedirs(raw_dir, exist_ok=True)
image_ids = list(metadata_df.index)
for image_id in tqdm(image_ids):
r = requests.get(f'https://isic-archive.com/api/v1/image/{image_id}/download', stream=True)
r.raise_for_status()
image_path = os.path.join(raw_dir, f'{image_id}.jpg')
with open(image_path, 'wb') as f:
shutil.copyfileobj(r.raw, f)
del r
def preprocess_isic_metadata(root, preset_split_path, seed=1, val_proportion=0.1,
test_proportion=0.1):
"""Preprocesses the raw ISIC metadata."""
raw_dir = os.path.join(root, 'raw')
processed_dir = os.path.join(root, 'processed')
os.makedirs(processed_dir, exist_ok=True)
metadata_path = os.path.join(raw_dir, 'metadata.csv')
if not os.path.isfile(metadata_path):
raise FileNotFoundError('metadata.csv not found while preprocessing ISIC dataset. '
'Run `download_isic_metadata` and `download_isic_images` before '
'calling `preprocess_isic_metadata`.')
metadata_df = pd.read_csv(metadata_path)
metadata_df = metadata_df.set_index('_id')
labels_df = _remove_uncertain_diagnoses(metadata_df)
labels_df = _add_split_column(labels_df, preset_split_path, seed, val_proportion,
test_proportion)
labels_df = _add_patch_column(labels_df)
labels_df.to_csv(os.path.join(processed_dir, 'labels.csv'))
def preprocess_isic_images(root):
"""Preprocesses the images."""
raw_dir = os.path.join(root, 'raw')
if not os.path.isdir(os.path.join(raw_dir, 'images')):
raise FileNotFoundError('Raw ISIC images not found. Run `download_isic_images` before '
'calling `preprocess_isic_images`.')
processed_dir = os.path.join(root, 'processed')
labels_df = pd.read_csv(os.path.join(processed_dir, 'labels.csv'))
labels_df = labels_df.set_index('_id')
image_ids = labels_df.index.tolist()
os.makedirs(os.path.join(processed_dir, 'images'), exist_ok=True)
for image_id in tqdm(image_ids):
out_path = os.path.join(processed_dir, 'images', f'{image_id}.jpg')
if os.path.isfile(out_path):
continue
image = Image.open(os.path.join(raw_dir, 'images', f'{image_id}.jpg'))
image = image.resize((224, 224))
if image.mode in ("RGBA", "P"):
image = image.convert("RGB")
image.save(out_path)
def _remove_uncertain_diagnoses(metadata_df):
labels_df = metadata_df.loc[metadata_df['meta.clinical.benign_malignant'].isin(
{'benign', 'malignant'})] # throw out unknowns
print(
f"Using {len(labels_df)} out of {len(metadata_df)} total samples with confirmed 'benign' or 'malignant' diagnoses..."
)
malignant_mask = labels_df['meta.clinical.benign_malignant'] == 'malignant'
labels_df['is_malignant'] = None
labels_df.loc[malignant_mask, 'is_malignant'] = 1
labels_df.loc[~malignant_mask, 'is_malignant'] = 0
assert not any(is_malignant is None for is_malignant in labels_df['is_malignant'])
return labels_df
def _add_split_column(labels_df, preset_split_path, seed, val_proportion, test_proportion):
"""Adds a split column to the input DataFrame."""
idxs = labels_df.index.tolist()
if preset_split_path is not None:
train_idxs, val_idxs, test_idxs = _get_preset_train_val_test_split(
labels_df, preset_split_path)
else:
train_idxs, val_idxs, test_idxs = _get_random_train_val_test_split(
idxs, seed, val_proportion, test_proportion)
# add to labels_df
labels_df['split'] = None
labels_df.loc[train_idxs, 'split'] = 'train'
labels_df.loc[val_idxs, 'split'] = 'val'
labels_df.loc[test_idxs, 'split'] = 'test'
assert not any(split is None for split in labels_df['split'])
return labels_df
def _add_patch_column(labels_df):
"""Adds a patch column to the input DataFrame."""
patch_mask = labels_df['dataset.name'] == 'SONIC'
# add to labels_df
labels_df['patch'] = None
labels_df.loc[patch_mask, 'patch'] = 1
labels_df.loc[~patch_mask, 'patch'] = 0
assert not any(patch is None for patch in labels_df['patch'])
return labels_df
def _get_preset_train_val_test_split(labels_df, preset_split_path):
"""Returns a tuple with indices for preset train, val, test, splits."""
raise NotImplementedError
def _get_random_train_val_test_split(idxs, seed, val_proportion, test_proportion):
"""Returns a tuple with indices for random train, val, test splits."""
n = len(idxs)
# ensures reproducibility
random.seed(seed)
shuffled_idxs = random.sample(idxs, n)
train_proportion = 1.0 - val_proportion - test_proportion
train_n = int(train_proportion * n)
train_idxs = shuffled_idxs[:train_n]
val_n = int(val_proportion * n)
val_idxs = shuffled_idxs[train_n:(train_n + val_n)]
test_idxs = shuffled_idxs[(train_n + val_n):]
return (train_idxs, val_idxs, test_idxs)
if __name__ == '__main__':
main()
| hidden-stratification-master | stratification/classification/datasets/isic_download.py |
import itertools
import os
import logging
from collections import Counter
from PIL import Image
import numpy as np
import pandas as pd
import torch
from torchvision import transforms
from stratification.classification.datasets.base import GEORGEDataset
class WaterbirdsDataset(GEORGEDataset):
"""Waterbirds Dataset
"""
_channels = 3
_resolution = 224
_normalization_stats = {'mean': (0.485, 0.456, 0.406), 'std': (0.229, 0.224, 0.225)}
# used to determine subclasses (index here used for querying sample class)
_df_attr_keys = ['y', 'place']
split_dict = {'train': 0, 'val': 1, 'test': 2}
def __init__(self, root, split, transform=None, download=False, ontology='default',
augment=False):
assert (transform is None)
transform = get_transform_cub()
super().__init__('waterbirds', root, split, transform=transform, download=download,
ontology=ontology)
@property
def processed_folder(self):
return os.path.join(self.root, 'waterbirds')
def _check_exists(self):
"""Checks whether or not the waterbirds labels CSV has been initialized."""
return os.path.isfile(os.path.join(self.processed_folder, 'metadata.csv'))
def _download(self):
"""Raises an error if the raw dataset has not yet been downloaded."""
raise ValueError('Follow the README instructions to download the dataset.')
def _load_samples(self):
"""Loads the Waterbirds dataset"""
self.df = pd.read_csv(os.path.join(self.processed_folder, 'metadata.csv'))
# initialize the subclasses (before split because should be invariant across splits)
class_attrs_to_class_idx = self._get_class_attrs_to_class_idx()
# split dataset
split_df = self.df[self.df['split'] == self.split_dict[self.split]]
logging.info(f'Using {len(split_df)} of {len(self.df)} images in split "{self.split}"')
self.df = split_df
# gets the data
image_names, original_labels = self._get_data(class_attrs_to_class_idx)
# reverse dict to easily lookup class_attrs from label
class_idx_to_class_attrs = {i: k for k, i in class_attrs_to_class_idx.items()}
logging.info('Sample attributes:')
logging.info(self._df_attr_keys)
logging.info('--')
logging.info(f'Original label counts ({self.split} split):')
logging.info('\n'.join([
f'idx: {class_idx},\t count: {class_count},\t '
f'attrs:{class_idx_to_class_attrs[class_idx]}'
for class_idx, class_count in sorted(Counter(original_labels).items())
]))
logging.info('--')
superclass_labels, self.superclass_names = self._get_superclass_labels_from_id(
original_labels, class_idx_to_class_attrs)
true_subclass_labels, self.true_subclass_names = self._get_true_subclass_labels_from_id(
original_labels, class_idx_to_class_attrs)
X = image_names
Y_dict = {
'superclass': torch.from_numpy(superclass_labels),
'true_subclass': torch.from_numpy(true_subclass_labels),
'original': torch.from_numpy(original_labels)
}
return X, Y_dict
def _get_data(self, class_attrs_to_class_idx):
"""
Iterates through the DataFrame to extract the image name and label.
The subclass labels are automatically assigned based on the row's attributes.
"""
image_names = []
labels = []
for idx, row in self.df.iterrows():
image_name = row['img_filename']
image_names.append(image_name)
row_attrs = []
for df_attr_key in self._df_attr_keys:
row_attr = row[df_attr_key]
row_attrs.append(row_attr)
label = class_attrs_to_class_idx[tuple(row_attrs)]
labels.append(label)
image_names = np.array(image_names)
labels = np.array(labels)
return image_names, labels
def _get_class_attrs_to_class_idx(self):
"""Uses self._df_attr_keys to identify all possible subclasses.
Subclass labels (class_idx) are mapped to a tuple of sample attributes (class_attrs).
"""
df_attr_uniques = []
for i, df_attr_key in enumerate(self._df_attr_keys):
uniques = sorted(self.df[df_attr_key].unique())
df_attr_uniques.append(uniques)
class_attrs_to_class_idx = {k: i for i, k in enumerate(itertools.product(*df_attr_uniques))}
return class_attrs_to_class_idx
def _get_superclass_labels_from_id(self, original_labels, class_idx_to_class_attrs):
"""Superclass labels are determined from the original_labels by the given ontology.
The default """
superclass_labels = []
if self.ontology == 'default':
y_attr_idx = self._df_attr_keys.index('y')
for label in original_labels:
class_attrs = class_idx_to_class_attrs[label]
if class_attrs[y_attr_idx] == 0:
superclass_label = 0
elif class_attrs[y_attr_idx] == 1:
superclass_label = 1
else:
raise ValueError(
f'Unrecognized class attributes {class_attrs} for label {label}')
superclass_labels.append(superclass_label)
superclass_names = ['LANDBIRD', 'WATERBIRD']
else:
raise ValueError(f'superclass id {self.ontology} not recognized.')
return np.array(superclass_labels), superclass_names
def _get_true_subclass_labels_from_id(self, original_labels, class_idx_to_class_attrs):
"""True subclass labels are determined from the original_labels by the given ontology"""
true_subclass_labels = []
if self.ontology == 'default':
y_attr_idx = self._df_attr_keys.index('y')
place_attr_idx = self._df_attr_keys.index('place')
for label in original_labels:
class_attrs = class_idx_to_class_attrs[label]
if class_attrs[y_attr_idx] == 0 and class_attrs[place_attr_idx] == 0:
true_subclass_label = 0
elif class_attrs[y_attr_idx] == 0 and class_attrs[place_attr_idx] == 1:
true_subclass_label = 1
elif class_attrs[y_attr_idx] == 1 and class_attrs[place_attr_idx] == 0:
true_subclass_label = 2
elif class_attrs[y_attr_idx] == 1 and class_attrs[place_attr_idx] == 1:
true_subclass_label = 3
else:
raise ValueError(
f'Unrecognized class attributes {class_attrs} for label {label}')
true_subclass_labels.append(true_subclass_label)
true_subclass_names = [
'LANDBIRD on land', 'LANDBIRD on water', 'WATERBIRD on land', 'WATERBIRD on water'
]
else:
raise ValueError(f'subclass id {self.ontology} not recognized.')
return np.array(true_subclass_labels), true_subclass_names
def __getitem__(self, idx):
"""
Args:
idx (int): Index
Returns:
tuple: (x: torch.Tensor, y: dict) where X is a tensor representing an image
and y is a dictionary of possible labels.
"""
image_path = os.path.join(self.data_dir, self.X[idx])
image = Image.open(image_path)
if self.transform is not None:
image = self.transform(image)
x = image
y_dict = {name: label[idx] for name, label in self.Y_dict.items()}
return x, y_dict
def get_transform_cub():
target_resolution = (224, 224)
transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.CenterCrop(target_resolution),
transforms.ToTensor(),
transforms.Normalize(**WaterbirdsDataset._normalization_stats),
])
return transform
| hidden-stratification-master | stratification/classification/datasets/waterbirds.py |
import logging
import os
import torch
from torch.utils.data import Dataset
import numpy as np
import random
DATA_SPLITS = ['train', 'train_clean', 'val', 'test']
LABEL_TYPES = ['superclass', 'subclass', 'true_subclass', 'alt_subclass']
class GEORGEDataset(Dataset):
"""
Lightweight class that enforces design pattern used within the training
loop. Essential components:
split (str) must be in {'train', 'train_clean', 'val', 'test'}.
- 'train' datasets are for model training. Data augmentation commonly applied.
- 'train_clean' datasets are for model evaluation on the train set.
Unshuffled and with no data augmentation. Used for clsutering step.
- 'val' datasets are for model evaluation during training.
- 'test' datasets are for model evaluation after training.
X (any) input to model. Passed through directly.
Y_dict (dict) targets used for computing loss and metrics.
- the 'superclass' key will be used for data quality loss computation
- the 'subclass' key will be used to compute metrics, as well as for DRO loss
- the 'true_subclass' key will be used to compute metrics, if available
"""
def __init__(self, name, root, split, transform=None, download=False, ontology='default'):
self.name = name
self.root = root
self.data_dir = os.path.join(self.root, name)
self.split = split
self.transform = transform
assert self.split in DATA_SPLITS
if not self._check_exists():
if download:
self._download()
else:
raise ValueError(f'{self.name} dataset not found.')
self.ontology = ontology
logging.info(f'Loading {self.split} split of {self.name}')
self.X, self.Y_dict = self._load_samples()
assert 'superclass' in self.Y_dict.keys(), \
"Y_dict['superclass'] must be populated with superclass (target) labels."
if 'true_subclass' in self.Y_dict.keys():
logging.info('True subclass available.')
self.true_subclass_available = True
else:
logging.info('True subclass unavailable.')
self.true_subclass_available = False
assert (self.true_subclass_available)
sup_to_true_sub_map = build_sup_to_sub_map(self.Y_dict['superclass'],
self.Y_dict['true_subclass'])
self._class_maps = {'true_subclass': sup_to_true_sub_map}
self._subclass_labels_added = False
def _check_exists(self):
"""
Checks if the dataset has been initialized.
"""
raise NotImplementedError
def _download(self):
"""
Downloads the dataset if it could not be found
"""
raise NotImplementedError
def _load_samples(self):
"""
Loads the X tensor and the Y_dict dictionary for training.
"""
raise NotImplementedError
def __len__(self):
"""
Returns the length of the dataset by returning the length of
one of the label lists.
"""
return len(next(iter(self.Y_dict.values())))
def __getitem__(self):
"""
Must be overridden.
"""
raise NotImplementedError
def add_labels(self, key, values):
"""
Adds a key-value pair to the labels dictionary.
"""
assert len(values) == len(self)
if key in self.Y_dict.keys():
logging.info(f'{key} in Y_dict already exists and will be overwritten.')
if isinstance(values, torch.Tensor):
values_tensor = values.clone().detach()
else:
values_tensor = torch.tensor(values)
self.Y_dict[key] = values_tensor
def add_subclass_labels(self, subclass_labels, seed=0):
"""
Adds subclass_labels. If subclass_labels is a string, it must be in
{'superclass', 'true_subclass', 'random'}. Else, subclass_labels is a
list of labels, and thus is added directly to Y_dict.
"""
if type(subclass_labels) == str:
# use a set of labels in Y_dict (commonly 'superclass' or 'true_subclass')
if subclass_labels in self.Y_dict.keys():
self.add_labels('subclass', self.Y_dict[subclass_labels])
# use a set of random labels mimicking class proportions of 'true_subclass'
elif subclass_labels == 'random':
self.add_labels(
'subclass',
generate_random_labels(self.Y_dict['superclass'], self.Y_dict['true_subclass'],
seed=seed))
else:
raise ValueError(f'subclass_labels string {subclass_labels} not recognized.')
elif subclass_labels is not None:
self.add_labels('subclass', subclass_labels)
else:
raise ValueError(f'subclass_labels object {subclass_labels} not recognized.')
self._subclass_labels_added = True
def get_num_classes(self, key):
return torch.max(self.Y_dict[key]).item() + 1
def get_labels(self, key):
return self.Y_dict[key]
def get_class_counts(self, key):
class_map = (self.get_labels(key) == torch.arange(
self.get_num_classes(key)).unsqueeze(1).long())
return class_map.sum(1).float()
def get_class_map(self, key):
if key in self._class_maps:
return self._class_maps[key]
else:
assert (self._subclass_labels_added)
sup_to_sub_map = build_sup_to_sub_map(self.Y_dict['superclass'], self.Y_dict[key])
self._class_maps[key] = sup_to_sub_map
return sup_to_sub_map
def build_sup_to_sub_map(superclass_labels, subclass_labels):
class_map = {}
superclass_set = sorted(set(np.array(superclass_labels)))
for superclass in superclass_set:
class_map[superclass] = sorted(
np.unique(np.array(subclass_labels[superclass_labels == superclass])))
return class_map
def generate_random_labels(superclass_labels, subclass_labels, proportions=None, seed=0):
"""
Build random mock subclass labels for each superclass, with the given proportions.
If proportions is None, uses the proportions of the given subclass labels.
"""
prev_state = random.getstate()
random.seed(seed)
data_mod_seed = random.randint(0, 2**32)
random.seed(data_mod_seed)
superclass_labels, subclass_labels = np.array(superclass_labels), np.array(subclass_labels)
random_labels = -np.ones_like(superclass_labels)
superclass_set = sorted(set(superclass_labels))
if proportions is None:
proportions = []
sup_to_sub_map = build_sup_to_sub_map(superclass_labels, subclass_labels)
for superclass in superclass_set:
proportions.append([])
for subclass in sup_to_sub_map[superclass]:
superclass_indices = superclass_labels == superclass
# Calculate the proportion of examples of this superclass that are of this subclass
proportions[superclass].append(
np.mean(subclass_labels[superclass_indices] == subclass))
for superclass in superclass_set:
superclass_indices = superclass_labels == superclass
num_subclass_examples = np.sum(superclass_indices)
subclass_proportions = proportions[superclass]
cumulative_prop = np.cumsum(subclass_proportions)
cumulative_prop = np.round(cumulative_prop * num_subclass_examples).astype(np.int)
cumulative_prop = np.concatenate(([0], cumulative_prop))
assert (cumulative_prop[-1] == num_subclass_examples)
mock_sub = -np.ones(num_subclass_examples)
for i in range(len(cumulative_prop) - 1):
percentile_lower, percentile_upper = cumulative_prop[i], cumulative_prop[i + 1]
mock_sub[percentile_lower:percentile_upper] = i
assert (np.all(mock_sub >= 0))
mock_sub = mock_sub + np.amax(random_labels) + 1 # adjust for previous superclasses
random.shuffle(mock_sub)
random_labels[superclass_indices] = mock_sub
assert (np.all(random_labels >= 0))
random.setstate(prev_state)
return torch.tensor(random_labels)
| hidden-stratification-master | stratification/classification/datasets/base.py |
import os
import logging
import codecs
import random
from collections import defaultdict
from PIL import Image
import numpy as np
import pandas as pd
import torch
from torchvision import transforms
from torchvision.datasets.utils import download_and_extract_archive
from .base import GEORGEDataset
class MNISTDataset(GEORGEDataset):
"""MNIST Dataset, possibly with undersampling.
NOTE: creates validation set when downloaded for the first time.
This is a deviation from the traditional MNIST dataset setup.
See <https://pytorch.org/docs/stable/_modules/torchvision/datasets/mnist.html>.
"""
resources = [('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',
'f68b3c2dcbeaaa9fbdd348bbdeb94873'),
('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',
'd53e105ee54ea40749a09fcbcd1e9432'),
('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',
'9fb629c4189551a2d022fa330f9573f3'),
('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz',
'ec29112dd5afa0611ce80d1b7f02629c')]
true_subclass_names = [
'0 - zero', '1 - one', '2 - two', '3 - three', '4 - four', '5 - five', '6 - six',
'7 - seven', '8 - eight', '9 - nine'
]
_channels = 1
_resolution = 28
_normalization_stats = {'mean': (0.1307, ), 'std': (0.3081, )}
_pil_mode = "L"
def __init__(self, root, split, transform=None, resize=True, download=False, subsample_8=False,
ontology='five-comp', augment=False):
assert (transform is None)
transform = get_transform_MNIST(resize=resize, augment=augment)
self.subclass_proportions = {8: 0.05} if ('train' in split and subsample_8) else {}
super().__init__('MNIST', root, split, transform=transform, download=download,
ontology=ontology)
def _load_samples(self):
"""Loads the U-MNIST dataset from the data file created by self._download"""
data_file = f'{self.split}.pt'
logging.info(f'Loading {self.split} split...')
data, original_labels = torch.load(os.path.join(self.processed_folder, data_file))
logging.info('Original label counts:')
logging.info(np.bincount(original_labels))
# subsample some subset of subclasses
if self.subclass_proportions:
logging.info(f'Subsampling subclasses: {self.subclass_proportions}')
data, original_labels = self.subsample_digits(data, original_labels,
self.subclass_proportions)
logging.info('New label counts:')
logging.info(np.bincount(original_labels))
# determine superclass partition of original_labels
if self.ontology == 'five-comp':
superclass_labels = (original_labels > 4).long()
self.superclass_names = ['< 5', '≥ 5']
else:
raise ValueError(f'Ontology {self.ontology} not supported.')
X = data
Y_dict = {'superclass': superclass_labels, 'true_subclass': original_labels.clone()}
return X, Y_dict
def __getitem__(self, idx):
"""
Args:
idx (int): Index
Returns:
tuple: (x_dict, y_dict) where x_dict is a dictionary mapping all
possible inputs and y_dict is a dictionary for all possible labels.
"""
x = self.X[idx]
image = Image.fromarray(x.numpy(), mode=self._pil_mode)
if self.transform is not None:
image = self.transform(image)
x = image
y_dict = {k: v[idx] for k, v in self.Y_dict.items()}
return x, y_dict
def subsample_digits(self, data, labels, subclass_proportions, seed=0):
prev_state = random.getstate()
random.seed(seed)
data_mod_seed = random.randint(0, 2**32)
random.seed(data_mod_seed)
for label, freq in subclass_proportions.items():
logging.info(f'Subsampling {label} fine class, keeping {freq*100} percent...')
inds = [i for i, x in enumerate(labels) if x == label]
inds = set(random.sample(inds, int((1 - freq) * len(inds))))
labels = torch.tensor([lab for i, lab in enumerate(labels) if i not in inds])
data = torch.stack([datum for i, datum in enumerate(data) if i not in inds])
random.setstate(prev_state)
return data, labels
@property
def raw_folder(self):
return os.path.join(self.root, 'MNIST', 'raw')
@property
def processed_folder(self):
return os.path.join(self.root, 'MNIST', 'processed')
def _check_exists(self):
return all(
os.path.exists(os.path.join(self.processed_folder, f'{split}.pt'))
for split in ['train', 'val', 'test'])
def _download(self):
if self._check_exists():
return
os.makedirs(self.raw_folder, exist_ok=True)
os.makedirs(self.processed_folder, exist_ok=True)
# download files
for url, md5 in self.resources:
filename = url.rpartition('/')[2]
download_and_extract_archive(url, download_root=self.raw_folder, filename=filename,
md5=md5)
# process and save as torch files
logging.info('Processing...')
training_set = (read_image_file(os.path.join(self.raw_folder, 'train-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 'train-labels-idx1-ubyte')))
test_set = (read_image_file(os.path.join(self.raw_folder, 't10k-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 't10k-labels-idx1-ubyte')))
with open(os.path.join(self.processed_folder, 'train.pt'), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, 'test.pt'), 'wb') as f:
torch.save(test_set, f)
logging.info('Done downloading!')
self._create_val_split()
def _create_val_split(self, seed=0, val_proportion=0.2):
data, original_labels = torch.load(os.path.join(self.processed_folder, 'train.pt'))
original_labels = original_labels.numpy()
original_label_counts = np.bincount(original_labels)
assert all(i > 0 for i in original_label_counts), \
'set(labels) must consist of consecutive numbers in [0, S]'
val_quota = np.round(original_label_counts * val_proportion).astype(np.int)
# reset seed here in case random fns called again (i.e. if get_loaders called twice)
prev_state = random.getstate()
random.seed(seed)
shuffled_idxs = random.sample(range(len(data)), len(data))
random.setstate(prev_state)
train_idxs = []
val_idxs = []
val_counts = defaultdict(int)
# Iterate through shuffled dataset to extract valset idxs
for i in shuffled_idxs:
label = original_labels[i]
if val_counts[label] < val_quota[label]:
val_idxs.append(i)
val_counts[label] += 1
else:
train_idxs.append(i)
train_idxs = sorted(train_idxs)
val_idxs = sorted(val_idxs)
assert len(set(val_idxs) & set(train_idxs)) == 0, \
'valset and trainset must be mutually exclusive'
logging.info(f'Creating training set with class counts:\n' +
f'{np.bincount(original_labels[train_idxs])}')
trainset = (data[train_idxs], torch.tensor(original_labels[train_idxs]))
with open(os.path.join(self.processed_folder, 'train.pt'), 'wb') as f:
torch.save(trainset, f)
logging.info(f'Creating validation set with class counts:\n' +
f'{np.bincount(original_labels[val_idxs])}')
valset = (data[val_idxs], torch.tensor(original_labels[val_idxs]))
with open(os.path.join(self.processed_folder, 'val.pt'), 'wb') as f:
torch.save(valset, f)
logging.info(f'Split complete!')
def read_label_file(path):
with open(path, 'rb') as f:
x = read_sn3_pascalvincent_tensor(f, strict=False)
assert (x.dtype == torch.uint8)
assert (x.ndimension() == 1)
return x.long()
def read_image_file(path):
with open(path, 'rb') as f:
x = read_sn3_pascalvincent_tensor(f, strict=False)
assert (x.dtype == torch.uint8)
assert (x.ndimension() == 3)
return x
def read_sn3_pascalvincent_tensor(path, strict=True):
"""Read a SN3 file in "Pascal Vincent" format (Lush file 'libidx/idx-io.lsh').
Argument may be a filename, compressed filename, or file object.
"""
# typemap
if not hasattr(read_sn3_pascalvincent_tensor, 'typemap'):
read_sn3_pascalvincent_tensor.typemap = {
8: (torch.uint8, np.uint8, np.uint8),
9: (torch.int8, np.int8, np.int8),
11: (torch.int16, np.dtype('>i2'), 'i2'),
12: (torch.int32, np.dtype('>i4'), 'i4'),
13: (torch.float32, np.dtype('>f4'), 'f4'),
14: (torch.float64, np.dtype('>f8'), 'f8')
}
# read
with open_maybe_compressed_file(path) as f:
data = f.read()
# parse
magic = get_int(data[0:4])
nd = magic % 256
ty = magic // 256
assert nd >= 1 and nd <= 3
assert ty >= 8 and ty <= 14
m = read_sn3_pascalvincent_tensor.typemap[ty]
s = [get_int(data[4 * (i + 1):4 * (i + 2)]) for i in range(nd)]
parsed = np.frombuffer(data, dtype=m[1], offset=(4 * (nd + 1)))
assert parsed.shape[0] == np.prod(s) or not strict
return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)
def get_int(b):
return int(codecs.encode(b, 'hex'), 16)
def open_maybe_compressed_file(path):
"""Return a file object that possibly decompresses 'path' on the fly.
Decompression occurs when argument `path` is a string and ends with '.gz' or '.xz'.
"""
if not isinstance(path, torch._six.string_classes):
return path
if path.endswith('.gz'):
import gzip
return gzip.open(path, 'rb')
if path.endswith('.xz'):
import lzma
return lzma.open(path, 'rb')
return open(path, 'rb')
def get_transform_MNIST(resize=True, augment=False):
test_transform_list = [
transforms.ToTensor(),
transforms.Normalize(**MNISTDataset._normalization_stats)
]
if resize:
test_transform_list.insert(0, transforms.Resize((32, 32)))
if not augment:
return transforms.Compose(test_transform_list)
train_transform_list = [
transforms.RandomCrop(MNISTDataset._resolution, padding=4),
transforms.RandomHorizontalFlip()
] + test_transform_list
return transforms.Compose(train_transform_list)
| hidden-stratification-master | stratification/classification/datasets/mnist.py |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Bottleneck ResNet v2 with GroupNorm and Weight Standardization."""
from collections import OrderedDict # pylint: disable=g-importing-member
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.datasets.utils import download_url
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.dilation, self.groups)
def conv3x3(cin, cout, stride=1, groups=1, bias=False):
return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1, bias=bias, groups=groups)
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0, bias=bias)
def tf2th(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
Follows the implementation of "Identity Mappings in Deep Residual Networks":
https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
Except it puts the stride on 3x3 conv when available.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout // 4
self.gn1 = nn.GroupNorm(32, cin)
self.conv1 = conv1x1(cin, cmid)
self.gn2 = nn.GroupNorm(32, cmid)
self.conv2 = conv3x3(cmid, cmid, stride) # Original code has it on conv1!!
self.gn3 = nn.GroupNorm(32, cmid)
self.conv3 = conv1x1(cmid, cout)
self.relu = nn.ReLU(inplace=True)
if (stride != 1 or cin != cout):
# Projection also with pre-activation according to paper.
self.downsample = conv1x1(cin, cout, stride)
def forward(self, x):
out = self.relu(self.gn1(x))
# Residual branch
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(out)
# Unit's branch
out = self.conv1(out)
out = self.conv2(self.relu(self.gn2(out)))
out = self.conv3(self.relu(self.gn3(out)))
return out + residual
def load_from(self, weights, prefix=''):
convname = 'standardized_conv2d'
with torch.no_grad():
self.conv1.weight.copy_(tf2th(weights[f'{prefix}a/{convname}/kernel']))
self.conv2.weight.copy_(tf2th(weights[f'{prefix}b/{convname}/kernel']))
self.conv3.weight.copy_(tf2th(weights[f'{prefix}c/{convname}/kernel']))
self.gn1.weight.copy_(tf2th(weights[f'{prefix}a/group_norm/gamma']))
self.gn2.weight.copy_(tf2th(weights[f'{prefix}b/group_norm/gamma']))
self.gn3.weight.copy_(tf2th(weights[f'{prefix}c/group_norm/gamma']))
self.gn1.bias.copy_(tf2th(weights[f'{prefix}a/group_norm/beta']))
self.gn2.bias.copy_(tf2th(weights[f'{prefix}b/group_norm/beta']))
self.gn3.bias.copy_(tf2th(weights[f'{prefix}c/group_norm/beta']))
if hasattr(self, 'downsample'):
w = weights[f'{prefix}a/proj/{convname}/kernel']
self.downsample.weight.copy_(tf2th(w))
class ResNetV2(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode."""
def __init__(self, block_units, width_factor, head_size=21843, zero_head=False):
super().__init__()
wf = width_factor # shortcut 'cause we'll use it a lot.
self.activation_layer_name = 'head.avg' # activation layer, works with BiT-M-R50x1 at least
# The following will be unreadable if we split lines.
# pylint: disable=line-too-long
self.root = nn.Sequential(
OrderedDict([
('conv', StdConv2d(3, 64 * wf, kernel_size=7, stride=2, padding=3, bias=False)),
('pad', nn.ConstantPad2d(1, 0)),
('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0)),
# The following is subtly not the same!
# ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
self.body = nn.Sequential(
OrderedDict([
('block1',
nn.Sequential(
OrderedDict(
[('unit01', PreActBottleneck(cin=64 * wf, cout=256 * wf, cmid=64 * wf))] +
[(f'unit{i:02d}',
PreActBottleneck(cin=256 * wf, cout=256 * wf, cmid=64 * wf))
for i in range(2, block_units[0] + 1)], ))),
('block2',
nn.Sequential(
OrderedDict([
('unit01',
PreActBottleneck(cin=256 * wf, cout=512 * wf, cmid=128 * wf, stride=2))
] + [(f'unit{i:02d}',
PreActBottleneck(cin=512 * wf, cout=512 * wf, cmid=128 * wf))
for i in range(2, block_units[1] + 1)], ))),
('block3',
nn.Sequential(
OrderedDict([
('unit01',
PreActBottleneck(cin=512 * wf, cout=1024 * wf, cmid=256 * wf, stride=2))
] + [(f'unit{i:02d}',
PreActBottleneck(cin=1024 * wf, cout=1024 * wf, cmid=256 * wf))
for i in range(2, block_units[2] + 1)], ))),
('block4',
nn.Sequential(
OrderedDict([
('unit01',
PreActBottleneck(cin=1024 * wf, cout=2048 * wf, cmid=512 * wf, stride=2))
] + [(f'unit{i:02d}',
PreActBottleneck(cin=2048 * wf, cout=2048 * wf, cmid=512 * wf))
for i in range(2, block_units[3] + 1)], ))),
]))
# pylint: enable=line-too-long
self.zero_head = zero_head
self.head = nn.Sequential(
OrderedDict([
('gn', nn.GroupNorm(32, 2048 * wf)),
('relu', nn.ReLU(inplace=True)),
('avg', nn.AdaptiveAvgPool2d(output_size=1)),
('conv', nn.Conv2d(2048 * wf, head_size, kernel_size=1, bias=True)),
]))
def forward(self, x):
x = self.head(self.body(self.root(x)))
assert x.shape[-2:] == (1, 1) # We should have no spatial shape left.
return x[..., 0, 0]
def load_from(self, weights, prefix='resnet/'):
with torch.no_grad():
self.root.conv.weight.copy_(tf2th(weights[f'{prefix}root_block/standardized_conv2d/kernel'])) # pylint: disable=line-too-long
self.head.gn.weight.copy_(tf2th(weights[f'{prefix}group_norm/gamma']))
self.head.gn.bias.copy_(tf2th(weights[f'{prefix}group_norm/beta']))
if self.zero_head:
nn.init.zeros_(self.head.conv.weight)
nn.init.zeros_(self.head.conv.bias)
else:
self.head.conv.weight.copy_(tf2th(weights[f'{prefix}head/conv2d/kernel'])) # pylint: disable=line-too-long
self.head.conv.bias.copy_(tf2th(weights[f'{prefix}head/conv2d/bias']))
for bname, block in self.body.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, prefix=f'{prefix}{bname}/{uname}/')
KNOWN_MODELS = OrderedDict([
('BiT-M-R50x1', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 1, *a, **kw)),
('BiT-M-R50x3', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 3, *a, **kw)),
('BiT-M-R101x1', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 1, *a, **kw)),
('BiT-M-R101x3', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 3, *a, **kw)),
('BiT-M-R152x2', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 2, *a, **kw)),
('BiT-M-R152x4', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 4, *a, **kw)),
('BiT-S-R50x1', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 1, *a, **kw)),
('BiT-S-R50x3', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 3, *a, **kw)),
('BiT-S-R101x1', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 1, *a, **kw)),
('BiT-S-R101x3', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 3, *a, **kw)),
('BiT-S-R152x2', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 2, *a, **kw)),
('BiT-S-R152x4', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 4, *a, **kw)),
])
def BiTResNet(pretrained=True, **kwargs):
assert (pretrained)
model = KNOWN_MODELS['BiT-M-R50x1'](head_size=1, zero_head=True)
model_path = 'stratification/classification/models/BiT-M-R50x1.npz'
if not os.path.isfile(model_path):
download_url('https://storage.googleapis.com/bit_models/BiT-M-R50x1.npz',
os.path.dirname(model_path))
model.load_from(np.load(model_path))
return model
| hidden-stratification-master | stratification/classification/models/bit_pytorch_models.py |
from .lenet import LeNet4
from .shallow_cnn import ShallowCNN
from .pt_resnet import PyTorchResNet
from .bit_pytorch_models import BiTResNet
| hidden-stratification-master | stratification/classification/models/__init__.py |
import torch.nn as nn
from collections import OrderedDict
class LeNet4(nn.Module):
"""
Adapted from https://github.com/activatedgeek/LeNet-5
"""
def __init__(self, **kwargs):
super().__init__()
in_channels = kwargs.get('num_channels', 1)
classes = kwargs.get('num_classes', 10)
self.convnet = nn.Sequential(
OrderedDict([('c1', nn.Conv2d(in_channels, 6, kernel_size=(5, 5))),
('relu1', nn.ReLU()), ('s2', nn.MaxPool2d(kernel_size=(2, 2), stride=2)),
('c3', nn.Conv2d(6, 16, kernel_size=(5, 5))), ('relu3', nn.ReLU()),
('s4', nn.MaxPool2d(kernel_size=(2, 2), stride=2)),
('c5', nn.Conv2d(16, 120, kernel_size=(5, 5))), ('relu5', nn.ReLU())]))
self.activation_layer_name = 'convnet.relu5'
self.fc = nn.Linear(120, classes)
def forward(self, img):
x = self.convnet(img)
x = x.view(x.size(0), -1)
out = self.fc(x)
return out
| hidden-stratification-master | stratification/classification/models/lenet.py |
import torch.nn as nn
from collections import OrderedDict
class ShallowCNN(nn.Module):
def __init__(self, **kwargs):
super().__init__()
in_channels = kwargs.get('num_channels', 1)
classes = kwargs.get('num_classes', 10)
self.convnet = nn.Sequential(
OrderedDict([('c1', nn.Conv2d(in_channels, 6, kernel_size=(5, 5))),
('relu1', nn.ReLU()), ('s2', nn.MaxPool2d(kernel_size=(2, 2), stride=2)),
('c3', nn.Conv2d(6, 8, kernel_size=(5, 5))), ('relu3', nn.ReLU())]))
# Store the name of the layer whose output we want to use as features
# (typically the last operation before the classification layer)
self.activation_layer_name = 'convnet.relu3'
self.fc = nn.Linear(512, classes)
def forward(self, img, save_act=None):
output = self.convnet(img)
output = output.view(img.size(0), -1)
output = self.fc(output)
return output
def shallow_cnn(**kwargs):
model = ShallowCNN(**kwargs)
return model
| hidden-stratification-master | stratification/classification/models/shallow_cnn.py |
import logging
import torch
import torch.nn as nn
from torchvision.models.utils import load_state_dict_from_url
__all__ = ['PyTorchResNet']
model_urls = {
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation,
groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64,
dilation=1, norm_layer=None):
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64,
dilation=1, norm_layer=None):
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block=BasicBlock, layers=(2, 2, 2, 2), **kwargs):
super().__init__()
in_channels = kwargs.get('num_channels', 3)
classes = kwargs.get('num_classes', 1000)
self._norm_layer = nn.BatchNorm2d
self.inplanes = 64
self.dilation = 1
self.groups = 1
self.base_width = 64
self.conv1 = nn.Conv2d(in_channels, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = self._norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.activation_layer_name = 'avgpool'
self.fc = nn.Linear(512 * block.expansion, classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, self.groups, self.base_width,
previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(self.inplanes, planes, groups=self.groups, base_width=self.base_width,
dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def _load_state_dict(model, model_url, load_classifier=True):
state_dict = load_state_dict_from_url(model_url, progress=False)
model_dict = model.state_dict()
for key in list(state_dict.keys()):
if not load_classifier:
if 'fc' in key:
state_dict[key] = model_dict[key] # keep classifier weights unchanged
model.load_state_dict(state_dict)
def PyTorchResNet(imagenet_pretrained=True, **kwargs):
depth = 50
if depth == 50:
block_config = (3, 4, 6, 3)
block = Bottleneck
else:
raise ValueError('Invalid depth specified')
model = ResNet(block=block, layers=block_config, **kwargs)
if imagenet_pretrained:
logging.info('Loading pretrained model...')
load_success = False
for load_classifier in [True, False]:
for arch in model_urls.keys():
try:
_load_state_dict(model, model_urls[arch], load_classifier=load_classifier)
load_success = True
break
except RuntimeError:
pass
if not load_success:
raise ValueError('No pretrained model found for given configuration!')
return model
| hidden-stratification-master | stratification/classification/models/pt_resnet.py |
from copy import deepcopy
import os
from collections import defaultdict
import logging
import torch
import numpy as np
import stratification.cluster.models.reduction as reduction_models
from stratification.utils.logger import init_logger
class GEORGEReducer:
"""Executes the cluster stage of the GEORGE algorithm.
Args:
cluster_config(dict): Contains the parameters required to execute this step.
See utils.schema for type information and examples.
save_dir(str, optional): Directory at which to save logging information.
If None, logging information is not saved. Default is None.
"""
def __init__(self, cluster_config, save_dir=None, log_format='full'):
self.config = cluster_config
self.save_dir = save_dir
if self.save_dir:
self.logger = init_logger('harness.reduction', self.save_dir, log_format=log_format)
else:
self.logger = logging.getLogger()
def preprocess_activations(self, activations, means=None):
"""Preprocesses the activations based on keys in the config.
Args:
activations(np.ndarray of shape (N, D)): D-dimensional vectors for N
samples.
Returns:
activations(np.ndarray of shape (N, D)): transformed activations.
"""
if len(activations.shape) > 2:
activations = activations.reshape(activations.shape[0], -1)
if means is not None:
activations = activations - means
if self.config['normalize']:
# divide activation vectors by their norm (plus epsilon, for numerical stability)
act_norms = np.maximum(np.linalg.norm(activations, axis=-1, keepdims=True), 1e-6)
activations = activations / act_norms
return activations
def train(self, reduction_model, inputs):
"""Fits reduction and cluster models to the data.
'G' reduction and cluster models are instantiated, where G is the number
of groups (i.e. superclasses).
Currently, all resulting reduction and cluster models have the same
hyperparameters for each superclass.
Args:
inputs(Dict[str, Sequence]): a dictionary object containing the model
activations and various metadata. The complete schema is the following:
{
'metrics': Dict[str, Any],
'activations': np.ndarray of shape (N, D),
'superclass': np.ndarray of shape (N, ),
'subclass': np.ndarray of shape (N, ),
'true_subclass': np.ndarray of shape (N, ),
'targets': np.ndarray of shape (N, ),
'probs': np.ndarray of shape (N, ),
'preds': np.ndarray of shape (N, ),
'losses': np.ndarray of shape (N, ),
}
reduction_model(Any): The model used for dimensionality reduction
of the activations.
Returns:
group_to_models(List[reduction_model]): the list of reduction models
fit on each group, where the idx indicates the group.
"""
orig_reduc_model = reduction_model
inputs_tr = inputs['train']
if 'losses' not in inputs_tr or len(inputs_tr['losses']) == 0:
inputs_val = inputs['val']
inputs_test = inputs['test']
inputs_tr['losses'] = np.zeros(len(inputs_tr['activations']),
dtype=inputs_tr['activations'].dtype)
inputs_val['losses'] = np.zeros(len(inputs_val['activations']),
dtype=inputs_val['activations'].dtype)
inputs_test['losses'] = np.zeros(len(inputs_test['activations']),
dtype=inputs_test['activations'].dtype)
if self.config['mean_reduce']:
train_means = inputs_tr['activations'].reshape(inputs_tr['activations'].shape[0],
-1).mean(axis=0, keepdims=True)
else:
train_means = None
group_assignments = inputs_tr['superclass']
group_to_data = self._group(inputs_tr, group_assignments)
groups = np.unique(group_assignments)
group_to_models = []
for group in groups:
group_data = group_to_data[group]
reduction_model = deepcopy(orig_reduc_model)
# reduce
self.logger.basic_info(f'Fitting reduction model on superclass {group}...')
activations = group_data['activations']
if self.config['mean_reduce']:
activations = self.preprocess_activations(activations, train_means)
else:
activations = self.preprocess_activations(activations)
acts_dtype = activations.dtype
reduction_model = reduction_model.fit(activations)
activations = reduction_model.transform(activations)
activations = activations.astype(acts_dtype)
group_to_models.append(reduction_model)
return group_to_models, train_means
def evaluate(self, group_to_models, split_inputs, train_means=None):
"""Reduces each of the inputs.
Args:
group_to_models(List[reduction_model]):
the models produced by GEORGEReduce.train. There should be as many
items in this list as groups in the inputs.
inputs(Dict[str, Sequence]): inputs of the same format as those described in
GEORGEReduce.train
Returns:
group_to_metrics(Dict[str, Any]): metrics, partitioned by group.
outputs(Dict[str, Any]): the outputs of the model. At time of writing,
the outputs consists of both the reduced activations and the cluster
assignments (`activations` and `assignments` keys, respectively).
"""
if self.config['mean_reduce']: assert (train_means is not None)
group_assignments = split_inputs['superclass']
group_to_data = self._group(split_inputs, group_assignments)
groups = np.unique(group_assignments)
assert len(group_to_models) <= len(groups), \
'There must be a model for each group in the input data.'
group_to_outputs = {}
for group in groups:
self.logger.info(f'Reducing group {group}...')
group_data = group_to_data[group]
group_outputs = group_data.copy()
del group_outputs['superclass'] # unneeded, as all are superclass "group"
reduction_model = group_to_models[group]
# reduce
activations = group_data['activations']
if self.config['mean_reduce']:
activations = self.preprocess_activations(activations, train_means)
else:
activations = self.preprocess_activations(activations)
acts_dtype = activations.dtype
activations = reduction_model.transform(activations)
activations = activations.astype(acts_dtype)
group_outputs['activations'] = activations
group_to_outputs[group] = group_outputs
return group_to_outputs
def _group(self, data, group_assignments):
"""Partitions the data by group.
Note:
this function assumes that the data is a dictionary of sequences.
By design, any key-value pair that doesn't describe a sequence is
ignored in the final partition.
Args:
data(Dict[str, Sequence]): A dictionary of sequences with the same
length of `group_assignments`.
group_assignments(Sequence[int]): A list of assignments of data,
where `group_assignments[idx]` is the group of data[idx].
Returns:
group_to_data(Dict[int, Dict[str, Sequence]]): the data, partitioned by group.
Note that the grouped data is still in the same order as it
was before partitioning.
"""
groups = np.unique(group_assignments)
group_to_data = defaultdict(dict)
for group in groups:
for k, v in data.items():
if isinstance(v, np.ndarray):
assert len(group_assignments) == len(v), \
f'group_assignments and "{k}" must be the same length'
group_to_data[group][k] = v[group_assignments == group]
return group_to_data
| hidden-stratification-master | stratification/cluster/george_reduce.py |
hidden-stratification-master | stratification/cluster/__init__.py |
|
from collections import Counter
import numpy as np
from stratification.cluster.fast_sil import silhouette_samples
def get_k_from_model(model):
if hasattr(model, 'n_clusters'):
return model.n_clusters
elif hasattr(model, 'n_components'):
return model.n_components
else:
raise NotImplementedError(f'model {type(model)} K not found.' +
f'model attributes:\n{list(model.__dict__.keys())}')
def get_cluster_mean_loss(sample_losses, assignments):
cluster_losses = {}
C = np.unique(assignments)
for c in C:
cluster_loss = np.mean(sample_losses[assignments == c])
cluster_losses[str(c)] = float(cluster_loss)
return cluster_losses
def get_cluster_composition(superclasses, assignments):
compositions = {}
S = np.unique(superclasses)
C = np.unique(assignments)
for c in C:
superclasses_c = superclasses[assignments == c]
counts = dict(Counter(superclasses_c))
compositions[str(c)] = {str(s): counts.get(s, 0) for s in S}
return compositions
| hidden-stratification-master | stratification/cluster/utils.py |
from copy import deepcopy
import os
from collections import defaultdict
import json
import logging
import torch
import numpy as np
from stratification.cluster.models.cluster import DummyClusterer
from stratification.cluster.utils import get_cluster_mean_loss, get_cluster_composition, get_k_from_model
from stratification.utils.logger import init_logger
class GEORGECluster:
"""Executes the cluster stage of the GEORGE algorithm.
Args:
cluster_config(dict): Contains the parameters required to execute this step.
See utils.schema for type information and examples.
save_dir(str, optional): Directory at which to save logging information.
If None, logging information is not saved. Default is None.
"""
def __init__(self, cluster_config, save_dir=None, log_format='full'):
self.config = cluster_config
self.save_dir = save_dir
if self.save_dir:
self.logger = init_logger('harness.cluster', self.save_dir, log_format=log_format)
else:
self.logger = logging.getLogger()
def compute_metrics(self, inputs, assignments):
"""Computes metrics using the sample data provided in inputs.
Args:
inputs(Dict[str, Sequence]) inputs of the same format as
those described in GEORGECluster.train
assignments(Sequence) the cluster assignments for each input
Returns:
metrics(Dict[str, Union[Dict[str, Any], float]]): the metrics computed.
Can be per-cluster metrics or aggregate metrics.
"""
metrics = {}
for metric_type in self.config['metric_types']:
if metric_type == 'mean_loss':
metric = get_cluster_mean_loss(inputs['losses'], assignments)
elif metric_type == 'composition':
metric = get_cluster_composition(inputs['true_subclass'], assignments)
else:
raise KeyError(f'Unrecognized metric_type {metric_type}')
metrics[metric_type] = metric
return metrics
def train(self, cluster_model, inputs):
"""Fits cluster models to the data of each superclass.
Args:
cluster_model(Any): The model used to produce cluster assignments. Must
implement `fit` and `predict`. Further, the number of clusters the
cluster_model will attempt to fit must be accessible, through either
(1) `n_clusters` or (2) `n_components`. This is due to the
limitations of the sklearn implementations of KMeans and GMMs.
inputs(Dict[str, Sequence]): a dictionary object containing the model
activations and various metadata. The complete schema is the following:
{
'metrics': Dict[str, Any],
'activations': np.ndarray of shape (N, D),
'superclass': np.ndarray of shape (N, ),
'subclass': np.ndarray of shape (N, ),
'true_subclass': np.ndarray of shape (N, ),
'targets': np.ndarray of shape (N, ),
'probs': np.ndarray of shape (N, ),
'preds': np.ndarray of shape (N, ),
'losses': np.ndarray of shape (N, ),
}
Future work is to further modularize the cluster code to mitigate
dependencies on this object. For best results, train classifiers
using GEORGEHarness.classify.
Returns:
group_to_models(List[Tuple[type(cluster_model), type(reduction_model)]]): the list
of reduction and cluster models fit on each group, where the idx
indicates the group.
"""
orig_cluster_model = cluster_model
extra_info = hasattr(cluster_model, 'requires_extra_info')
inputs_tr = inputs['train']
inputs_val = inputs['val']
group_to_models = []
for group, group_data in inputs_tr[0].items():
if group in self.config['superclasses_to_ignore']:
# Keep this superclass in a single "cluster"
self.logger.basic_info(f'Not clustering superclass {group}...')
group_to_models.append(DummyClusterer())
continue
cluster_model = deepcopy(orig_cluster_model)
activations = group_data['activations']
if extra_info:
val_group_data = inputs_val[0][group]
losses = group_data['losses']
val_activations = val_group_data['activations']
kwargs = {'val_activ': val_activations, 'losses': losses}
else:
kwargs = {}
# cluster
self.logger.basic_info(f'Clustering superclass {group}...')
cluster_model = cluster_model.fit(activations, **kwargs)
group_to_models.append(cluster_model)
return group_to_models
def evaluate(self, group_to_models, split_inputs):
"""Returns cluster assignments for each of the inputs.
Args:
group_to_models(List[reduction_model]):
the models produced by GEORGECluster.train. There should be as many
items in this list as groups in the inputs.
inputs(Dict[str, Sequence]): inputs of the same format as those described in
GEORGECluster.train
Returns:
group_to_metrics(Dict[str, Any]): metrics, partitioned by group.
outputs(Dict[str, Any]): the outputs of the model. At time of writing,
the outputs consists of both the reduced activations and the cluster
assignments (`activations` and `assignments` keys, respectively).
"""
group_to_data, group_assignments = split_inputs
group_to_metrics = {}
group_to_outputs = {}
cluster_floor = 0
for group, group_data in group_to_data.items():
self.logger.info(f'Evaluating group {group}...')
group_outputs = group_data.copy()
cluster_model = group_to_models[group]
assignments = np.array(cluster_model.predict(group_data['activations']))
group_outputs['assignments'] = cluster_floor + assignments
group_to_outputs[group] = group_outputs
group_to_metrics[group] = self.compute_metrics(group_data, assignments)
# update cluster_floor to ensure disjoint assignments
k = get_k_from_model(cluster_model) # accounts for degenerate cases
cluster_floor = cluster_floor + k
outputs = self._ungroup(group_to_outputs, group_assignments)
return group_to_metrics, outputs
def _ungroup(self, group_to_data, group_assignments):
"""Ungroups data that is partitioned by group.
Args:
group_to_data(Dict[int, Dict[str, Sequence]]) a partitioned
group of data, i.e. the object returned by GEORGEReduce._group
group_assignments(Sequence[int]): A list of assignments of data,
where `group_assignments[idx]` is the group of data[idx].
Returns:
data(Dict[str, Sequence]): A dictionary of sequences with the same
length of `group_assignments`.
"""
# keep track of where we are in each group of group_to_data
group_to_ptr = {group: 0 for group in group_to_data.keys()}
data = defaultdict(list)
for group in group_assignments:
group_data = group_to_data[group]
for k, v in group_data.items():
data[k].append(v[group_to_ptr[group]])
group_to_ptr[group] += 1
# format
for k, v in data.items():
data[k] = np.array(v)
return data
| hidden-stratification-master | stratification/cluster/george_cluster.py |
'''The functions in this file are adapted from scikit-learn
(https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/_unsupervised.py)
to use CUDA for Silhouette score computation.'''
import numpy as np
from sklearn.utils import gen_batches, get_chunk_n_rows
from sklearn.metrics.cluster._unsupervised import *
from sklearn.metrics import silhouette_samples as s_sil
import torch
def silhouette_samples(X, labels, verbose=False, cuda=False):
if not cuda:
return s_sil(X, labels)
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples = len(labels)
label_freqs = np.bincount(labels)
check_number_of_labels(len(le.classes_), n_samples)
reduce_func = functools.partial(_silhouette_reduce, labels=labels, label_freqs=label_freqs)
results = zip(*pairwise_distances_chunked_cuda(X, reduce_func=reduce_func, verbose=verbose))
intra_clust_dists, inter_clust_dists = results
intra_clust_dists = np.concatenate(intra_clust_dists)
inter_clust_dists = np.concatenate(inter_clust_dists)
denom = (label_freqs - 1).take(labels, mode='clip')
with np.errstate(divide="ignore", invalid="ignore"):
intra_clust_dists /= denom
sil_samples = inter_clust_dists - intra_clust_dists
with np.errstate(divide="ignore", invalid="ignore"):
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
# nan values are for clusters of size 1, and should be 0
return np.nan_to_num(sil_samples)
def _silhouette_reduce(D_chunk, start, labels, label_freqs):
"""Accumulate silhouette statistics for vertical chunk of X
Parameters
----------
D_chunk : shape (n_chunk_samples, n_samples)
precomputed distances for a chunk
start : int
first index in chunk
labels : array, shape (n_samples,)
corresponding cluster labels, encoded as {0, ..., n_clusters-1}
label_freqs : array
distribution of cluster labels in ``labels``
"""
# accumulate distances from each sample to each cluster
clust_dists = np.zeros((len(D_chunk), len(label_freqs)), dtype=D_chunk.dtype)
for i in range(len(D_chunk)):
clust_dists[i] += np.bincount(labels, weights=D_chunk[i], minlength=len(label_freqs))
# intra_index selects intra-cluster distances within clust_dists
intra_index = (np.arange(len(D_chunk)), labels[start:start + len(D_chunk)])
# intra_clust_dists are averaged over cluster size outside this function
intra_clust_dists = clust_dists[intra_index]
# of the remaining distances we normalise and extract the minimum
clust_dists[intra_index] = np.inf
clust_dists /= label_freqs
inter_clust_dists = clust_dists.min(axis=1)
return intra_clust_dists, inter_clust_dists
def _check_chunk_size(reduced, chunk_size):
"""Checks chunk is a sequence of expected size or a tuple of same
"""
if reduced is None:
return
is_tuple = isinstance(reduced, tuple)
if not is_tuple:
reduced = (reduced, )
if any(isinstance(r, tuple) or not hasattr(r, '__iter__') for r in reduced):
raise TypeError('reduce_func returned %r. '
'Expected sequence(s) of length %d.' %
(reduced if is_tuple else reduced[0], chunk_size))
if any(len(r) != chunk_size for r in reduced):
actual_size = tuple(len(r) for r in reduced)
raise ValueError('reduce_func returned object of length %s. '
'Expected same length as input: %d.' %
(actual_size if is_tuple else actual_size[0], chunk_size))
def pairwise_distances_chunked_cuda(X, reduce_func=None, verbose=False):
"""Generate a distance matrix chunk by chunk with optional reduction
In cases where not all of a pairwise distance matrix needs to be stored at
once, this is used to calculate pairwise distances in
``working_memory``-sized chunks. If ``reduce_func`` is given, it is run
on each chunk and its return values are concatenated into lists, arrays
or sparse matrices.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or,
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if
metric != "precomputed".
reduce_func : callable, optional
The function which is applied on each chunk of the distance matrix,
reducing it to needed values. ``reduce_func(D_chunk, start)``
is called repeatedly, where ``D_chunk`` is a contiguous vertical
slice of the pairwise distance matrix, starting at row ``start``.
It should return one of: None; an array, a list, or a sparse matrix
of length ``D_chunk.shape[0]``; or a tuple of such objects. Returning
None is useful for in-place operations, rather than reductions.
If None, pairwise_distances_chunked returns a generator of vertical
chunks of the distance matrix.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
working_memory : int, optional
The sought maximum memory for temporary distance matrix chunks.
When None (default), the value of
``sklearn.get_config()['working_memory']`` is used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Yields
------
D_chunk : array or sparse matrix
A contiguous slice of distance matrix, optionally processed by
``reduce_func``.
Examples
--------
Without reduce_func:
>>> import numpy as np
>>> from sklearn.metrics import pairwise_distances_chunked
>>> X = np.random.RandomState(0).rand(5, 3)
>>> D_chunk = next(pairwise_distances_chunked(X))
>>> D_chunk
array([[0. ..., 0.29..., 0.41..., 0.19..., 0.57...],
[0.29..., 0. ..., 0.57..., 0.41..., 0.76...],
[0.41..., 0.57..., 0. ..., 0.44..., 0.90...],
[0.19..., 0.41..., 0.44..., 0. ..., 0.51...],
[0.57..., 0.76..., 0.90..., 0.51..., 0. ...]])
Retrieve all neighbors and average distance within radius r:
>>> r = .2
>>> def reduce_func(D_chunk, start):
... neigh = [np.flatnonzero(d < r) for d in D_chunk]
... avg_dist = (D_chunk * (D_chunk < r)).mean(axis=1)
... return neigh, avg_dist
>>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func)
>>> neigh, avg_dist = next(gen)
>>> neigh
[array([0, 3]), array([1]), array([2]), array([0, 3]), array([4])]
>>> avg_dist
array([0.039..., 0. , 0. , 0.039..., 0. ])
Where r is defined per sample, we need to make use of ``start``:
>>> r = [.2, .4, .4, .3, .1]
>>> def reduce_func(D_chunk, start):
... neigh = [np.flatnonzero(d < r[i])
... for i, d in enumerate(D_chunk, start)]
... return neigh
>>> neigh = next(pairwise_distances_chunked(X, reduce_func=reduce_func))
>>> neigh
[array([0, 3]), array([0, 1]), array([2]), array([0, 3]), array([4])]
Force row-by-row generation by reducing ``working_memory``:
>>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func,
... working_memory=0)
>>> next(gen)
[array([0, 3])]
>>> next(gen)
[array([0, 1])]
"""
X = X.astype(np.float32)
n_samples_X = len(X)
Y = X
# We get as many rows as possible within our working_memory budget to
# store len(Y) distances in each row of output.
#
# Note:
# - this will get at least 1 row, even if 1 row of distances will
# exceed working_memory.
# - this does not account for any temporary memory usage while
# calculating distances (e.g. difference of vectors in manhattan
# distance.
chunk_n_rows = get_chunk_n_rows(row_bytes=8 * len(Y), max_n_rows=n_samples_X,
working_memory=None)
slices = gen_batches(n_samples_X, chunk_n_rows)
X_full = torch.tensor(X).cuda()
Xnorms = torch.norm(X_full, dim=1, keepdim=True)**2
for sl in slices:
if verbose: print(sl)
if sl.start == 0 and sl.stop == n_samples_X:
X_chunk = X # enable optimised paths for X is Y
else:
X_chunk = X[sl]
pX = torch.tensor(X_chunk).cuda()
d2 = Xnorms[sl] - 2 * torch.matmul(pX, X_full.t()) + Xnorms.t()
d2 = torch.sqrt(torch.nn.functional.relu(d2)).cpu().numpy()
d2.flat[sl.start::len(X) + 1] = 0
D_chunk = d2
if reduce_func is not None:
chunk_size = D_chunk.shape[0]
D_chunk = reduce_func(D_chunk, sl.start)
_check_chunk_size(D_chunk, chunk_size)
yield D_chunk
| hidden-stratification-master | stratification/cluster/fast_sil.py |
from numba.core.errors import NumbaWarning
import numpy as np
from sklearn.decomposition import PCA
from umap import UMAP
import warnings
__all__ = ['HardnessAugmentedReducer', 'NoOpReducer', 'PCAReducer', 'UMAPReducer']
class Reducer:
def __init__(self, **kwargs):
raise NotImplementedError()
def fit(self, X):
raise NotImplementedError()
def transform(self, X):
raise NotImplementedError()
def decrement_components(self):
raise NotImplementedError()
class NoOpReducer(Reducer):
"""
A no-op reduction method. Used when making changes using raw features.
"""
def __init__(self, n_components=1, **kwargs):
self.n_components = n_components
def fit(self, X):
return self
def transform(self, X):
return X
def decrement_components(self):
self.n_components -= 1
class PCAReducer(Reducer):
"""
Simple wrapper for PCA.
"""
def __init__(self, n_components=2, **kwargs):
self.n_components = n_components
self.model = PCA(n_components=n_components)
def fit(self, X):
self.model.fit(X)
return self
def transform(self, X):
return self.model.transform(X)
def decrement_components(self):
self.n_components -= 1
self.model.n_components -= 1
class UMAPReducer(Reducer):
"""
Simple wrapper for UMAP, used for API consistency.
"""
def __init__(self, n_components=2, **kwargs):
self.n_components = n_components
kwargs = {**{'n_neighbors': 10, 'min_dist': 0.}, **kwargs}
self.model = UMAP(n_components=n_components, **kwargs)
def fit(self, X):
with warnings.catch_warnings():
warnings.simplefilter('ignore', NumbaWarning)
self.model.fit(X)
return self
def transform(self, X):
with warnings.catch_warnings():
warnings.simplefilter('ignore', NumbaWarning)
result = self.model.transform(X)
return result
def decrement_components(self):
self.n_components -= 1
self.model.n_components -= 1
class HardnessAugmentedReducer(Reducer):
"""
A reducer that extracts the "hardness" component (i.e. component
orthogonal to the decision boundary, for a binary classification model).
Optionally takes in another reducer, whose components are appended to
this hardness component (possibly with different weights).
"""
def __init__(self, nn_model, base_reducer=None, hc_weight=1):
if base_reducer is not None:
base_reducer.decrement_components()
if base_reducer.n_components == 0:
base_reducer = None
self.base_reducer = base_reducer
self.fc = nn_model.module.fc if hasattr(nn_model, 'module') else nn_model.fc
self.decision_bdy = (self.fc.weight[1] - self.fc.weight[0]).cpu().data.numpy()
self.decision_bdy /= np.linalg.norm(self.decision_bdy)
self.hc_weight = hc_weight
def fit(self, X):
hardness_scores = np.dot(X, self.decision_bdy)
X = X - np.outer(hardness_scores, self.decision_bdy)
if self.base_reducer: self.base_reducer.fit(X)
return self
def transform(self, X):
hardness_scores = np.dot(X, self.decision_bdy)
X1 = hardness_scores
X1 = self.hc_weight * X1.reshape(len(X1), 1)
if self.base_reducer:
X2 = X - np.outer(hardness_scores, self.decision_bdy)
X2 = self.base_reducer.transform(X2)
return np.hstack((X1, X2))
return X1
| hidden-stratification-master | stratification/cluster/models/reduction.py |
try:
from libKMCUDA import kmeans_cuda
_LIBKMCUDA_FOUND = True
except ModuleNotFoundError:
_LIBKMCUDA_FOUND = False
from functools import partial
import logging
import numpy as np
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from stratification.cluster.utils import silhouette_samples
__all__ = [
'KMeans', 'GaussianMixture', 'FastKMeans', 'AutoKMixtureModel', 'OverclusterModel',
'DummyClusterer'
]
def get_cluster_sils(data, pred_labels, compute_sil=True, cuda=False):
unique_preds = sorted(np.unique(pred_labels))
SIL_samples = silhouette_samples(data, pred_labels, cuda=cuda) if compute_sil else np.zeros(
len(data))
SILs_by_cluster = {
int(label): float(np.mean(SIL_samples[pred_labels == label]))
for label in unique_preds
}
SIL_global = float(np.mean(SIL_samples))
return SILs_by_cluster, SIL_global
def compute_group_sizes(labels):
result = dict(sorted(zip(*np.unique(labels, return_counts=True))))
return {int(k): int(v) for k, v in result.items()}
class DummyClusterer:
def __init__(self, **kwargs):
self.n_components = 1
def fit(self, X):
return self
def predict(self, X):
return np.zeros(len(X), dtype=np.int32)
class FastKMeans:
def __init__(self, n_clusters, random_state=0, init='k-means++', n_init=10, verbose=False):
self.k = n_clusters
self.init = init
if n_init > 1: logging.warning('n_init unsupported for GPU K-Means')
self.seed = random_state
self.verbose = verbose
self.kmeans_obj = KMeans(n_clusters=n_clusters)
def fit(self, X):
logging.info('Using GPU-accelerated K-Means...')
self.cluster_centers_ = kmeans_cuda(X.astype(np.float32), clusters=self.k, seed=self.seed,
init=self.init)[0].astype(np.float32)
self.kmeans_obj.cluster_centers_ = self.cluster_centers_
if hasattr(self.kmeans_obj, '_check_params'):
self.kmeans_obj._check_params(np.zeros_like(X)) # properly initialize
return self.kmeans_obj
def fit_predict(self, X):
self.fit(X)
return self.predict(X)
def predict(self, X):
return self.kmeans_obj.predict(X.astype(np.float32))
def transform(self, X):
return self.kmeans_obj.transform(X.astype(np.float32))
class AutoKMixtureModel:
def __init__(self, cluster_method, max_k, n_init=3, seed=None, sil_cuda=False, verbose=0,
search=True):
if cluster_method == 'kmeans':
cluster_cls = FastKMeans if (sil_cuda and _LIBKMCUDA_FOUND) else KMeans
k_name = 'n_clusters'
elif cluster_method == 'gmm':
cluster_cls = GaussianMixture
k_name = 'n_components'
else:
raise ValueError('Unsupported clustering method')
self.cluster_cls = cluster_cls
self.k_name = k_name
self.search = search
self.max_k = max_k
self.n_init = n_init
self.seed = seed
self.sil_cuda = sil_cuda
self.verbose = verbose
def gen_inner_cluster_obj(self, k):
# Return a clustering object according to the specified parameters
return self.cluster_cls(**{self.k_name: k}, n_init=self.n_init, random_state=self.seed,
verbose=self.verbose)
def fit(self, activ):
logger = logging.getLogger('harness.cluster')
best_score = -2
k_min = 2 if self.search else self.max_k
search = self.search and k_min != self.max_k
for k in range(k_min, self.max_k + 1):
logger.info(f'Clustering into {k} groups...')
cluster_obj = self.gen_inner_cluster_obj(k)
pred_labels = cluster_obj.fit_predict(activ)
logger.info('Clustering done, computing score...')
cluster_sizes = compute_group_sizes(pred_labels)
if search:
local_sils, global_sil = get_cluster_sils(activ, pred_labels, compute_sil=True,
cuda=self.sil_cuda)
clustering_score = np.mean(list(local_sils.values()))
logger.info(f'k = {k} score: {clustering_score}')
if clustering_score >= best_score:
logger.info(f'Best model found at k = {k} with score {clustering_score:.3f}')
best_score = clustering_score
best_model = cluster_obj
best_k = k
else:
best_score, best_model, best_k = 0, cluster_obj, self.max_k
self.best_k = best_k
self.n_clusters = best_k
self.best_score = best_score
self.cluster_obj = best_model
return self
def predict(self, activ):
return self.cluster_obj.predict(activ)
def fit_predict(self, activ):
self.fit(activ)
return self.predict(activ)
def predict_proba(self, X):
return self.cluster_obj.predict_proba(activ)
def score(self, X):
return self.cluster_obj.score(activ)
class OverclusterModel:
def __init__(self, cluster_method, max_k, oc_fac, n_init=3, search=True, sil_threshold=0.,
seed=None, sil_cuda=False, verbose=0, sz_threshold_pct=0.005, sz_threshold_abs=25):
self.base_model = AutoKMixtureModel(cluster_method, max_k, n_init, seed, sil_cuda, verbose,
search)
self.oc_fac = oc_fac
self.sil_threshold = sil_threshold
self.sz_threshold_pct = sz_threshold_pct
self.sz_threshold_abs = sz_threshold_abs
self.requires_extra_info = True
def get_oc_predictions(self, activ, val_activ, orig_preds, val_orig_preds):
# Split each cluster from base_model into sub-clusters, and save each of the
# associated sub-clustering predictors in self.cluster_objs.
# Collate and return the new predictions in oc_preds and val_oc_preds.
self.cluster_objs = []
oc_preds = np.zeros(len(activ), dtype=np.int)
val_oc_preds = np.zeros(len(val_activ), dtype=np.int)
for i in self.pred_vals:
sub_activ = activ[orig_preds == i]
cluster_obj = self.base_model.gen_inner_cluster_obj(self.oc_fac).fit(sub_activ)
self.cluster_objs.append(cluster_obj)
sub_preds = cluster_obj.predict(sub_activ) + self.oc_fac * i
oc_preds[orig_preds == i] = sub_preds
val_sub_activ = val_activ[val_orig_preds == i]
val_sub_preds = cluster_obj.predict(val_sub_activ) + self.oc_fac * i
val_oc_preds[val_orig_preds == i] = val_sub_preds
return oc_preds, val_oc_preds
def filter_overclusters(self, activ, losses, orig_preds, oc_preds, val_oc_preds):
# Keep an overcluster if its point have higher SIL than before
# overclustering, AND it has higher average loss than the
# original cluster, AND it contains sufficiently many training and
# validation points.
num_oc = np.amax(oc_preds) + 1
# Compute original per-cluster SIL scores and losses,
# and the SIL scores and losses after overclustering.
orig_sample_sils = silhouette_samples(activ, orig_preds, cuda=self.sil_cuda)
orig_losses = [np.mean(losses[orig_preds == i]) for i in self.pred_vals]
new_sample_sils = silhouette_samples(activ, oc_preds, cuda=self.sil_cuda)
oc_orig_sils = [np.mean(orig_sample_sils[oc_preds == i]) for i in range(num_oc)]
oc_new_sils = [np.mean(new_sample_sils[oc_preds == i]) for i in range(num_oc)]
new_losses = [np.mean(losses[oc_preds == i]) for i in range(num_oc)]
# Count number of points in each cluster after overclustering. Drop tiny clusters as these
# will lead to unreliable optimization.
oc_counts = np.bincount(oc_preds)
# If val clusters are too small, we will get unreliable estimates - so need to threshold these too
val_oc_counts = np.bincount(val_oc_preds)
tr_sz_threshold = max(len(activ) * self.sz_threshold_pct, self.sz_threshold_abs)
val_sz_threshold = self.sz_threshold_abs
# Decide which overclusters to keep
oc_to_keep = []
for i in range(num_oc):
if oc_new_sils[i] > max(oc_orig_sils[i], self.sil_threshold) and \
new_losses[i] >= orig_losses[i // self.oc_fac] and \
oc_counts[i] >= tr_sz_threshold and val_oc_counts[i] >= val_sz_threshold:
oc_to_keep.append(i)
return oc_to_keep
def create_label_map(self, num_orig_preds, oc_to_keep, oc_preds):
# Map raw overclustering outputs to final "cluster labels," accounting for the
# fact that some overclusters are re-merged.
label_map = {}
cur_cluster_ind = -1
oc_to_base_id = {}
for i in range(num_orig_preds):
# For each original cluster, if there were no
# overclusters kept within it, keep the original cluster as-is.
# Otherwise, it needs to be split.
keep_all = True # If we keep all overclusters, we can discard the original cluster
for j in range(self.oc_fac):
index = i * self.oc_fac + j
if index not in oc_to_keep:
keep_all = False
if not keep_all:
cur_cluster_ind += 1
# Updated cluster index corresponding to original cluster
# (points in the original cluster assigned to a non-kept overcluster
# are merged into this cluster)
base_index = cur_cluster_ind
for j in range(self.oc_fac):
index = i * self.oc_fac + j
if index in oc_to_keep:
cur_cluster_ind += 1
oc_index = cur_cluster_ind
else:
assert (not keep_all)
oc_index = base_index
label_map[index] = oc_index
return label_map
def fit(self, activ, val_activ=None, losses=None):
if val_activ is None or losses is None:
raise ValueError('Must provide losses and val set activations')
logger = logging.getLogger('harness.cluster')
logger.info('Fitting base model...')
orig_preds = self.base_model.fit_predict(activ)
self.pred_vals = sorted(np.unique(orig_preds))
num_orig_preds = len(self.pred_vals)
losses = np.array(losses)
oc_fac = self.oc_fac
num_oc = num_orig_preds * oc_fac
val_orig_preds = self.base_model.predict(val_activ)
logger.info('Fitting overclustering model...')
oc_preds, val_oc_preds = self.get_oc_predictions(activ, val_activ, orig_preds,
val_orig_preds)
oc_to_keep = self.filter_overclusters(activ, losses, orig_preds, oc_preds, val_oc_preds)
self.label_map = self.create_label_map(num_orig_preds, oc_to_keep, oc_preds)
new_preds = np.zeros(len(activ), dtype=np.int)
for i in range(num_oc):
new_preds[oc_preds == i] = self.label_map[i]
self.n_clusters = max(self.label_map.values()) + 1 # Final number of output predictions
logger.info(f'Final number of clusters: {self.n_clusters}')
return self
def predict(self, activ):
# Get clusters from base model
base_preds = self.base_model.predict(activ)
# Get overclusters
oc_preds = np.zeros(len(activ), dtype=np.int)
for i in self.pred_vals:
subfeats = activ[base_preds == i]
subpreds = self.cluster_objs[i].predict(subfeats) + self.oc_fac * i
oc_preds[base_preds == i] = subpreds
# Merge overclusters appropriately and return final predictions
new_preds = np.zeros(len(activ), dtype=np.int)
for i in range(len(self.pred_vals) * self.oc_fac):
new_preds[oc_preds == i] = self.label_map[i]
return new_preds
@property
def sil_cuda(self):
return self.base_model.sil_cuda
@property
def n_init(self):
return self.base_model.n_init
@property
def seed(self):
return self.base_model.seed
| hidden-stratification-master | stratification/cluster/models/cluster.py |
import os
import sys
import logging
from collections import defaultdict
from datetime import datetime
import pandas as pd
from .utils import flatten_dict
class EpochCSVLogger:
'''Save training process without relying on fixed column names'''
def __init__(self, fpath, title=None, resume=False):
self.fpath = fpath
self.metrics_dict = {}
if fpath is not None:
if resume:
self.metrics_dict = pd.read_csv(fpath, sep='\t').to_dict()
self.metrics_dict = defaultdict(list, self.metrics_dict)
def append(self, metrics):
self.metrics_dict['timestamp'].append(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
metrics = flatten_dict(metrics)
for k, v in metrics.items():
self.metrics_dict[k].append(f'{v:.6f}')
pd.DataFrame(self.metrics_dict).to_csv(self.fpath, sep='\t', index=False)
def close(self):
pass
class SimpleLogger:
def __init__(self):
self.type = 'simple'
def basic_info(self, message):
print(message)
def info(self, message):
pass
def warning(self, message):
print('WARNING:', message)
class FullLogger:
'''Wrapper class for Python logger'''
def __init__(self, logger):
self.type = 'full'
self.logger = logger
def basic_info(self, message):
self.logger.info(message)
def info(self, message):
self.logger.info(message)
def warning(self, message):
self.logger.warning(message)
def init_epoch_logger(save_dir):
epoch_log_path = os.path.join(save_dir, 'epochs.tsv')
epoch_logger = EpochCSVLogger(epoch_log_path)
logging.info(f'Logging epoch output to {epoch_log_path}.')
return epoch_logger
def init_logger(name, save_dir, log_format='full'):
if log_format == 'full':
log_path = os.path.join(save_dir, 'experiment.log')
file_handler = logging.FileHandler(filename=log_path, mode='a')
file_handler.setFormatter(logging.Formatter(fmt='%(asctime)s %(message)s'))
base_logger = logging.getLogger(name)
base_logger.addHandler(file_handler)
base_logger.setLevel(logging.INFO)
logger = FullLogger(base_logger)
logging.info('') # seems to be required to initialize logging properly
logger.info(f'Logging all output to {log_path}')
return logger
else:
return SimpleLogger()
| hidden-stratification-master | stratification/utils/logger.py |
import os
import random
from collections import defaultdict
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn.metrics as skl
import pickle
import torch
def visualize_clusters_by_group(activations, cluster_assignments, group_assignments,
true_subclass_labels=None, group_to_k=None, save_dir=None):
"""
group_to_k (optional) allows standardization across splits, otherwise it will just use len(df['cluster'].unique())
"""
data = {
'x1': activations[:, 0],
'x2': activations[:, 1] if activations.shape[1] >= 2 else activations[:, 0],
'cluster': cluster_assignments,
'group': group_assignments
}
if true_subclass_labels is not None:
data['true_subclass'] = true_subclass_labels
df = pd.DataFrame(data)
groups = np.unique(group_assignments)
for group in groups:
group_df = df.loc[df['group'] == group]
for plot_type in ['cluster', 'true_subclass']:
if plot_type not in data:
continue
cluster_types = sorted(group_df[plot_type].unique())
if plot_type == 'true_subclass':
n_colors = len(cluster_types)
elif plot_type == 'cluster':
n_colors = group_to_k[group] if group_to_k != None else len(cluster_types)
g = sns.scatterplot(data=group_df, x='x1', y='x2', hue=plot_type,
hue_order=cluster_types,
palette=sns.color_palette('hls', n_colors=n_colors), alpha=.5)
plot_title = 'Clusters' if plot_type == 'cluster' else 'True subclasses'
plt.title(f'Superclass {group}: {plot_title}')
plt.xlabel('')
plt.ylabel('')
g.get_figure().savefig(os.path.join(save_dir, f'group_{group}_{plot_type}_viz.png'),
dpi=300)
g.get_figure().clf()
| hidden-stratification-master | stratification/utils/visualization.py |
import ast
import uuid
import datetime
import subprocess
import random
import time
import json
from functools import singledispatch
from datetime import datetime, timedelta
from collections import MutableMapping
import numpy as np
import torch
tenmin_td = timedelta(minutes=10)
hour_td = timedelta(hours=1)
def format_timedelta(timedelta):
s = str(timedelta)
if timedelta < tenmin_td:
return s[3:]
if timedelta < hour_td:
return s[2:]
return s
class NumpyEncoder(json.JSONEncoder):
""" Special json encoder for numpy types """
def default(self, obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.ndarray, )): #### This is the fix
return obj.tolist()
return json.JSONEncoder.default(self, obj)
class ScientificNotationDecoder(json.JSONDecoder):
"""Decodes floats incorrectly parsed by ActionJsonSchema (e.g. 1e-5)"""
def __init__(self, *args, **kwargs):
json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, obj):
for k, v in obj.items():
if type(v) == str:
obj[k] = convert_value(v)
return obj
@singledispatch
def keys_to_strings(ob):
"""
Converts keys in a dictionary object to strings for JSON.
source:
https://stackoverflow.com/questions/47568356/python-convert-all-keys-to-strings
"""
if type(ob) == dict:
return {str(k): keys_to_strings(v) for k, v in ob.items()}
elif type(ob) == list:
return [keys_to_strings(v) for v in ob]
return ob
def convert_value(value):
"""Parse string as python literal if possible and fallback to string."""
try:
return ast.literal_eval(value)
except (ValueError, SyntaxError):
return value
def get_unique_str():
date = datetime.now().strftime('%Y-%m-%d')
time = datetime.now().strftime('%H-%M-%S')
rand = str(uuid.uuid4())[:8]
return f'{date}_{time}_{rand}'
def set_by_dotted_path(d, path, value):
"""
Change an entry in a nested dict using a dotted path.
Raises exception if path not in d.
Examples
--------
>>> d = {'foo': {'bar': 7}}
>>> set_by_dotted_path(d, 'foo.bar', 10)
>>> d
{'foo': {'bar': 10}}
>>> set_by_dotted_path(d, 'foo.d.baz', 3)
>>> d
{'foo': {'bar': 10, 'd': {'baz': 3}}}
"""
split_path = path.split(".")
split_path_len = len(split_path)
current_option = d
for idx, p in enumerate(split_path):
assert p in current_option, \
f'Path {split_path} does not exist in dictionary.'
if idx != split_path_len - 1:
current_option = current_option[p]
else:
current_option[p] = value
def merge_dicts(a, b):
"""
Returns a dictionary in which b is merged into a.
This is different than the naive approach {**a, **b} because it preserves
all existing values in nested dictionaries that appear in both a and b,
rather than overwriting a's entire nested dictionary with b's.
"""
def merge_dicts_rec(a, b):
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge_dicts_rec(a[key], b[key])
else:
a[key] = b[key] # overwrite values in a
else:
a[key] = b[key]
return a
return merge_dicts_rec(dict(a), b)
def get_git_commit_info():
get_commit_hash = "git log | head -n 1 | awk '{print $2}'"
check_unstaged = 'git diff --exit-code'
check_staged = 'git diff --cached --exit-code'
status = 'git status'
cmds = [get_commit_hash, check_unstaged, check_staged, status]
do_checks = [True, False, False, True]
saved_infos = []
for cmd, do_check in zip(cmds, do_checks):
try:
process_result = subprocess.run(cmd, shell=True, check=do_check,
universal_newlines=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
saved_infos.append((process_result.returncode, process_result.stdout.strip()))
err_msg = None
except subprocess.CalledProcessError as e:
err_msg = str(e)
returncode = int(err_msg.split()[-1][:-1])
if err_msg is not None:
return err_msg
commit_hash = saved_infos[0][1]
msg = 'Current commit: ' + commit_hash
if saved_infos[1][0] or saved_infos[2][0]:
msg += '; Uncommitted changes present'
return msg
def set_seed(seed, use_cuda):
if seed is None or seed < 0:
random.seed(time.perf_counter())
seed = random.randint(0, 100000)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if use_cuda:
torch.cuda.manual_seed_all(seed)
def init_cuda(deterministic, allow_multigpu=False):
use_cuda = torch.cuda.is_available()
if use_cuda:
torch.backends.cudnn.deterministic = deterministic
torch.backends.cudnn.benchmark = not deterministic
if torch.cuda.device_count() > 1 and not allow_multigpu:
raise RuntimeError('Multi-GPU training unsupported. Run with CUDA_VISIBLE_DEVICES=X')
return use_cuda
def move_to_device(obj, device):
r"""Given a structure (possibly) containing Tensors on the CPU, move all the Tensors
to the specified GPU (or do nothing, if they should be on the CPU).
Adapted from https://github.com/SenWu/emmental/blob/master/src/emmental/utils/utils.py
device = -1 -> "cpu"
device = 0 -> "cuda:0"
Originally from:
https://github.com/HazyResearch/metal/blob/mmtl_clean/metal/utils.py
Args:
obj(Any): The object to convert.
device(int): The device id, defaults to -1.
Returns:
Any: The converted object.
"""
if device < 0 or not torch.cuda.is_available():
return obj.cpu()
elif isinstance(obj, torch.Tensor):
return obj.cuda(device) # type: ignore
elif isinstance(obj, dict):
return {key: move_to_device(value, device) for key, value in obj.items()}
elif isinstance(obj, list):
return [move_to_device(item, device) for item in obj]
elif isinstance(obj, tuple):
return tuple([move_to_device(item, device) for item in obj])
else:
return obj
def save_config(config, save_path):
f = open(save_path, 'w')
json_str = json.dumps(json.loads(jsonpickle.encode(config)), indent=4)
f.write(json_str)
f.close()
def load_config(load_path):
f = open(load_path, 'r')
config = jsonpickle.decode(f.read())
f.close()
return config
def flatten_dict(d, parent_key='', sep='_'):
'''
Source: https://stackoverflow.com/questions/6027558/flatten-nested-dictionaries-compressing-keys
'''
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def concatenate_iterable(list_of_iterables):
if isinstance(list_of_iterables[0], torch.Tensor):
return torch.cat([x.detach().cpu() for x in list_of_iterables]).numpy()
elif isinstance(list_of_iterables[0], np.ndarray):
return np.concatenate(list_of_iterables)
elif isinstance(list_of_iterables[0], list):
return np.array(list(itertools.chain(*list_of_iterables)))
def get_learning_rate(optimizer):
for param_group in optimizer.param_groups:
if 'lr' in param_group:
return param_group['lr']
| hidden-stratification-master | stratification/utils/utils.py |
import json
import argparse
from jsonargparse import ActionJsonSchema, namespace_to_dict
from .utils import ScientificNotationDecoder, convert_value, set_by_dotted_path
from .schema import schema
def get_config(args_list=None):
"""
"""
# load and validate config file
parser = argparse.ArgumentParser()
parser.add_argument('config', action=ActionJsonSchema(schema=schema))
parser.add_argument('updates', nargs='*')
args = parser.parse_args(args_list)
args = namespace_to_dict(args)
# convert config to json-serializable dict object
config = args['config']
if '__path__' in config:
config = {**config, '__path__': config['__path__'].abs_path}
config = json.loads(json.dumps(config), cls=ScientificNotationDecoder)
# update config in-place with commandline arguments
update_config(config, args['updates'])
return config
def update_config(config, updates):
for update in updates:
path, sep, value = update.partition("=")
if sep == "=":
path = path.strip() # get rid of surrounding whitespace
value = value.strip() # get rid of surrounding whitespace
set_by_dotted_path(config, path, convert_value(value))
| hidden-stratification-master | stratification/utils/parse_args.py |
schema = {
'type':
'object',
'required':
['exp_dir', 'mode', 'dataset', 'classification_config', 'reduction_config', 'cluster_config'],
'properties': {
'seed': {
'type': 'number',
'default': -1
},
'deterministic': {
'type': 'boolean',
'default': True
},
'use_cuda': {
'type': 'boolean',
'default': True
},
'allow_multigpu': {
'type': 'boolean',
'default': False
},
'exp_dir': {
'type': 'string',
'default': 'checkpoints/_debug'
},
'mode': {
'type': 'string',
'default':
'erm' # choices: erm, superclass_gdro, true_subclass_gdro, random_gdro, george
},
'dataset': {
'type': 'string',
'default': 'mnist' # choices: celeba, isic, mnist, waterbirds
},
'activations_dir': {
'type': 'string',
'default': 'NONE'
},
'representation_dir': {
'type': 'string',
'default': 'NONE'
},
'cluster_dir': {
'type': 'string',
'default': 'NONE'
},
'classification_config': {
'type':
'object',
'required': [
'model',
'metric_types',
'checkpoint_metric',
'eval_only',
'num_epochs',
'batch_size',
'criterion_config',
'optimizer_config',
'scheduler_config',
'dataset_config',
],
'properties': {
'model': {
'type': 'string',
'default': 'lenet4'
},
'erm_config': {
'type': 'object',
'default': {}
},
'gdro_config': {
'type': 'object',
'default': {}
},
'metric_types': {
'type': 'array',
'examples': [['acc', 'loss']]
},
'checkpoint_metric': {
'type': 'string',
'examples': ['train_acc', 'train_loss', 'val_acc', 'val_loss']
},
'eval_only': {
'type': 'boolean',
'default': False
},
'eval_mode': {
'type':
'string',
'default':
'best',
'examples': [
'best', 'best_val_acc', 'best_val_subclass_rob_acc', 'best_val_acc_rw',
'best_val_subclass_rob_acc_rw', 'best_val_true_subclass_rob_acc',
'best_val_auroc', 'best_val_subclass_rob_auroc',
'best_val_true_subclass_rob_auroc', 'best_val_alt_subclass_rob_auroc'
],
},
'save_act_only': {
'type': 'boolean',
'default': False
},
'ban_reweight': {
'type': 'boolean',
'default': False
},
'bit_pretrained': {
'type': 'boolean',
'default': False
},
'num_epochs': {
'type': 'number',
'default': 20
},
'workers': {
'type': 'number',
'default': 8
},
'dataset_config': {
'type': 'object',
'properties': {}
},
'criterion_config': {
'type': 'object',
'properties': {
'robust_lr': {
'type': 'number',
'default': 0.01
},
'stable_dro': {
'type': 'boolean',
'default': True
},
'size_adjustment': {
'type': 'number',
'default': 0
},
'auroc_gdro': {
'type': 'boolean',
'default': False
}
}
},
'optimizer_config': {
'type': 'object',
'required': ['class_args', 'class_name'],
'properties': {
'class_args': {
'type': 'object',
'examples': [{
'lr': 2e-3,
'weight_decay': 1e-5
}]
},
'class_name': {
'type': 'string',
'examples': ['Adam']
}
}
},
'scheduler_config': {
'type': 'object',
'required': ['class_args', 'class_name'],
'properties': {
'class_args': {
'type': 'object',
'examples': [{
'milestones': [50, 75]
}]
},
'class_name': {
'type': 'string',
'examples': ['MultiStepLR']
}
}
},
'show_progress': {
'type': 'boolean',
'default': True
},
'reset_model_state': {
'type': 'boolean',
'default': True
},
'save_every': {
'type': 'number',
'default': -1
}
}
},
'reduction_config': {
'type': 'object',
'required': ['model'],
'properties': {
'model': {
'type': 'string',
'default': 'umap' # choices: "none", "pca", "umap", "hardness"
},
'components': {
'type': 'number',
'default': 2
},
'normalize': {
'type': 'boolean',
'default': True
},
'mean_reduce': {
'type': 'boolean',
'default': False
}
}
},
'cluster_config': {
'type': 'object',
'required': ['model', 'metric_types'],
'properties': {
'model': {
'type': 'string',
'default': 'gmm' # choices: "gmm", "kmeans"
},
'metric_types': {
'type': 'array',
'examples': [['mean_loss', 'composition']]
},
'search_k': {
'type': 'boolean',
'default': False
},
'k': {
'type': 'number',
'default': 10
},
'sil_cuda': {
'type': 'boolean',
'default': False
},
'overcluster': {
'type': 'boolean',
'default': False
},
'overcluster_factor': {
'type': 'number',
'default': 5
},
'superclasses_to_ignore': {
'type': 'array',
'items': {
'type': 'number'
},
'default': []
},
}
}
}
}
| hidden-stratification-master | stratification/utils/schema.py |
import warnings
warnings.filterwarnings("ignore") # Suppress warnings from FlyingSquid
import numpy as np
from flyingsquid.label_model import LabelModel
def run_embroid(votes, nn_info, knn=10, thresholds=[[0.5, 0.5]]):
"""
Implements Embroid.
Parameters
----------
votes : ndarray of shape (n_samples, n_sources)
Predictions of LMs. Should be in 1/-1 space.
nn_info: ndarray of shape (n_embeddings, n_samples, d)
Nearest neighbor information for each of the n_embedding spaces.
nn_info[i, t, l] is the index of the lth nearest-neighbor in ith
embedding space for sample t.
knn: int
Number of neighbors to use when computing neighborhood votes.
thresholds: ndarray of shape (n_embeddings, 2)
The tau threshold used for computing majority votes.
"""
# Check that votes are in 1/-1 space
assert sorted(np.unique(votes)) in [[1], [-1], [-1, 1]], np.unique(votes)
n_samples, n_sources = votes.shape
n_embeddings = len(nn_info)
# compute neighborhood votes for each source
inputs = [votes]
for i in range(n_embeddings):
S = np.zeros((n_samples, n_sources))
for j in range(n_sources):
# Convert prediction of source j to index space (0, 1)
j_prediction = (votes[:, j] + 1) / 2
# Compute fraction of nearest neighbor votes for positive class
neighbor_pos_frac = j_prediction[nn_info[i, :, 1 : 1 + knn]].mean(axis=1)
# Construct neighborhood votes from fractions
shrunk_neighbor_votes = np.zeros(len(neighbor_pos_frac))
idxs = np.where(neighbor_pos_frac >= thresholds[j][1])
shrunk_neighbor_votes[idxs] = 1
idxs = np.where((1 - neighbor_pos_frac) >= thresholds[j][0])
shrunk_neighbor_votes[idxs] = -1
S[:, j] = shrunk_neighbor_votes
inputs.append(S)
# Stack votes and S
mod_votes = np.concatenate(inputs, axis=1)
assert mod_votes.shape[1] == n_sources * (len(inputs))
label_model = LabelModel(n_sources * (len(inputs)))
label_model.fit(mod_votes)
preds = label_model.predict(mod_votes).ravel()
return preds
| embroid-main | embroid.py |
from distutils.util import convert_path
from setuptools import find_packages, setup
main_ns = {}
ver_path = convert_path("bootleg/_version.py")
with open(ver_path) as ver_file:
exec(ver_file.read(), main_ns)
NAME = "bootleg"
DESCRIPTION = "Bootleg NED System"
URL = "https://github.com/HazyResearch/bootleg"
EMAIL = "[email protected]"
AUTHOR = "Laurel Orr"
VERSION = main_ns["__version__"]
REQUIRED = [
"argh>=0.26.2, <1.0.0",
"emmental==0.1.0",
"faiss-cpu>=1.6.8, <1.7.1",
"jsonlines>=2.0.0, <2.4.0",
"marisa_trie>=0.7.7, <0.8",
"mock>=4.0.3, <4.5.0",
"nltk>=3.6.4, <4.0.0",
"notebook>=6.4.1, <7.0.0",
"numba>=0.50.0, <0.55.0",
"numpy>=1.19.0, <=1.20.0",
"pandas>=1.2.3, <1.5.0",
"progressbar>=2.5.0, <2.8.0",
"pydantic>=1.7.1, <1.8.0",
"pyyaml>=5.1, <6.0",
"rich>=10.0.0, <10.20.0",
"scikit_learn>=0.24.0, <0.27.0",
"scipy>=1.6.1, <1.9.0",
"spacy>=3.2.0",
"tagme>=0.1.3, <0.2.0",
"torch>=1.7.0, <1.10.5",
"tqdm>=4.27",
"transformers>=4.0.0, <5.0.0",
"ujson>=4.1.0, <4.2.0",
"wandb>=0.10.0, <0.13.0",
]
EXTRAS = {
"dev": [
"black>=22.3.0",
"docformatter==1.4",
"flake8>=3.9.2",
"isort>=5.9.3",
"nbsphinx==0.8.1",
"pep8_naming==0.12.1",
"pre-commit>=2.14.0",
"pytest==6.2.2",
"python-dotenv==0.15.0",
"recommonmark==0.7.1",
"sphinx-rtd-theme==0.5.1",
],
"embs-gpu": [
"faiss-gpu>=1.7.0, <1.7.2",
],
}
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
packages=find_packages(),
url=URL,
install_requires=REQUIRED,
extras_require=EXTRAS,
)
| bootleg-master | setup.py |
"""Emmental task constants."""
CANDGEN_TASK = "CANDGEN"
BATCH_CANDS_LABEL = "gold_unq_eid_idx"
| bootleg-master | cand_gen/task_config.py |
import logging
import multiprocessing
import os
import re
import shutil
import tempfile
import time
import traceback
import warnings
import numpy as np
import torch
import ujson
from tqdm.auto import tqdm
from bootleg import log_rank_0_debug, log_rank_0_info
from bootleg.dataset import convert_examples_to_features_and_save, create_examples
from bootleg.layers.alias_to_ent_encoder import AliasEntityTable
from bootleg.symbols.constants import STOP_WORDS
from bootleg.symbols.entity_symbols import EntitySymbols
from bootleg.utils import data_utils, utils
from bootleg.utils.classes.emmental_data import RangedEmmentalDataset
from bootleg.utils.data_utils import read_in_akas
warnings.filterwarnings(
"ignore",
message="Could not import the lzma module. Your installed Python is incomplete. "
"Attempting to use lzma compression will result in a RuntimeError.",
)
warnings.filterwarnings(
"ignore",
message="FutureWarning: Passing (type, 1) or '1type'*",
)
logger = logging.getLogger(__name__)
# Removes warnings about TOKENIZERS_PARALLELISM
os.environ["TOKENIZERS_PARALLELISM"] = "false"
def get_entity_string(
qid,
constants,
entity_symbols,
qid2alternatenames,
):
"""
For each entity, generates a string that is fed into a language model to generate an entity embedding. Returns
all tokens that are the title of the entity (even if in the description)
Args:
qid: QID
constants: Dict of constants
entity_symbols: entity symbols
qid2alternatenames: Dict of QID to list of alternate
Returns: entity strings
"""
desc_str = (
"[ent_desc] " + entity_symbols.get_desc(qid) if constants["use_desc"] else ""
)
title_str = entity_symbols.get_title(qid) if entity_symbols.qid_exists(qid) else ""
# To encourage mention similarity, we remove the (<type>) from titles
title_str = re.sub(r"(\(.*\))", r"", title_str).strip()
if constants["use_akas"]:
# Use a type token sep from Bootleg models to allow for easier sharing of tokenizers
alternate_names = " [ent_type] " + " [ent_type] ".join(
qid2alternatenames.get(qid, [])
)
alternate_names = " ".join(
alternate_names.split()[: constants["max_ent_aka_len"]]
)
desc_str = " ".join([alternate_names, desc_str])
ent_str = " ".join([title_str, desc_str])
# Remove double spaces
ent_split = ent_str.split()
ent_str = " ".join(ent_split)
title_spans = []
if len(title_str) > 0:
# Find all occurrences of title words in the ent_str (helps if description has abbreviated name)
# Make sure you don't mask any types or kg relations
title_pieces = set(title_str.split())
to_skip = False
for e_id, ent_w in enumerate(ent_split):
if ent_w == "[ent_type]":
to_skip = True
if ent_w == "[ent_desc]":
to_skip = False
if to_skip:
continue
if ent_w in title_pieces and ent_w not in STOP_WORDS:
title_spans.append(e_id)
return ent_str, title_spans
def build_and_save_entity_inputs_initializer(
constants,
data_config,
save_entity_dataset_name,
X_entity_storage,
qid2alternatenames_file,
tokenizer,
):
global qid2alternatenames_global
qid2alternatenames_global = ujson.load(open(qid2alternatenames_file))
global mmap_entity_file_global
mmap_entity_file_global = np.memmap(
save_entity_dataset_name, dtype=X_entity_storage, mode="r+"
)
global constants_global
constants_global = constants
global tokenizer_global
tokenizer_global = tokenizer
global entitysymbols_global
entitysymbols_global = EntitySymbols.load_from_cache(
load_dir=os.path.join(data_config.entity_dir, data_config.entity_map_dir),
alias_cand_map_dir=data_config.alias_cand_map,
alias_idx_dir=data_config.alias_idx_map,
)
def build_and_save_entity_inputs(
save_entity_dataset_name,
X_entity_storage,
data_config,
dataset_threads,
tokenizer,
entity_symbols,
):
"""Generates data for the entity encoder input.
Args:
save_entity_dataset_name: memmap filename to save the entity data
X_entity_storage: storage type for memmap file
data_config: data config
dataset_threads: number of threads
tokenizer: tokenizer
entity_symbols: entity symbols
Returns:
"""
add_entity_akas = data_config.use_entity_akas
qid2alternatenames = {}
if add_entity_akas:
qid2alternatenames = read_in_akas(entity_symbols)
num_processes = min(dataset_threads, int(0.8 * multiprocessing.cpu_count()))
# IMPORTANT: for distributed writing to memmap files, you must create them in w+
# mode before being opened in r+ mode by workers
memfile = np.memmap(
save_entity_dataset_name,
dtype=X_entity_storage,
mode="w+",
shape=(entity_symbols.num_entities_with_pad_and_nocand,),
order="C",
)
# We'll use the -1 to check that things were written correctly later because at
# the end, there should be no -1
memfile["entity_token_type_ids"][:] = -1
# The memfile corresponds to eids. As eid 0 and -1 are reserved for UNK/PAD
# we need to set the values. These get a single [SEP] for title [SEP] rest of entity
empty_ent = tokenizer(
"[SEP]",
padding="max_length",
add_special_tokens=True,
truncation=True,
max_length=data_config.max_ent_len,
)
memfile["entity_input_ids"][0] = empty_ent["input_ids"][:]
memfile["entity_token_type_ids"][0] = empty_ent["token_type_ids"][:]
memfile["entity_attention_mask"][0] = empty_ent["attention_mask"][:]
memfile["entity_to_mask"][0] = [0 for _ in range(len(empty_ent["input_ids"]))]
memfile["entity_input_ids"][-1] = empty_ent["input_ids"][:]
memfile["entity_token_type_ids"][-1] = empty_ent["token_type_ids"][:]
memfile["entity_attention_mask"][-1] = empty_ent["attention_mask"][:]
memfile["entity_to_mask"][-1] = [0 for _ in range(len(empty_ent["input_ids"]))]
constants = {
"train_in_candidates": data_config.train_in_candidates,
"max_ent_len": data_config.max_ent_len,
"max_ent_aka_len": data_config.max_ent_aka_len,
"use_akas": add_entity_akas,
"use_desc": data_config.use_entity_desc,
"print_examples_prep": data_config.print_examples_prep,
}
if num_processes == 1:
input_qids = list(entity_symbols.get_all_qids())
num_qids, overflowed = build_and_save_entity_inputs_single(
input_qids,
constants,
memfile,
qid2alternatenames,
tokenizer,
entity_symbols,
)
else:
qid2alternatenames_file = tempfile.NamedTemporaryFile()
with open(qid2alternatenames_file.name, "w") as out_f:
ujson.dump(qid2alternatenames, out_f)
input_qids = list(entity_symbols.get_all_qids())
chunk_size = int(np.ceil(len(input_qids) / num_processes))
input_chunks = [
input_qids[i : i + chunk_size]
for i in range(0, len(input_qids), chunk_size)
]
log_rank_0_debug(logger, f"Starting pool with {num_processes} processes")
pool = multiprocessing.Pool(
processes=num_processes,
initializer=build_and_save_entity_inputs_initializer,
initargs=[
constants,
data_config,
save_entity_dataset_name,
X_entity_storage,
qid2alternatenames_file.name,
tokenizer,
],
)
cnt = 0
overflowed = 0
for res in tqdm(
pool.imap_unordered(
build_and_save_entity_inputs_hlp, input_chunks, chunksize=1
),
total=len(input_chunks),
desc="Building entity data",
):
c, overfl = res
cnt += c
overflowed += overfl
pool.close()
pool.join()
qid2alternatenames_file.close()
log_rank_0_debug(
logger,
f"{overflowed} out of {len(input_qids)} were overflowed",
)
memfile = np.memmap(save_entity_dataset_name, dtype=X_entity_storage, mode="r")
for i in tqdm(
range(entity_symbols.num_entities_with_pad_and_nocand),
desc="Verifying entity data",
):
assert all(memfile["entity_token_type_ids"][i] != -1), f"Memfile at {i} is -1."
memfile = None
return
def build_and_save_entity_inputs_hlp(input_qids):
return build_and_save_entity_inputs_single(
input_qids,
constants_global,
mmap_entity_file_global,
qid2alternatenames_global,
tokenizer_global,
entitysymbols_global,
)
def build_and_save_entity_inputs_single(
input_qids,
constants,
memfile,
qid2alternatenames,
tokenizer,
entity_symbols,
):
printed = 0
num_overflow = 0
for qid in tqdm(input_qids, desc="Processing entities"):
ent_str, title_spans = get_entity_string(
qid,
constants,
entity_symbols,
qid2alternatenames,
)
inputs = tokenizer(
ent_str.split(),
is_split_into_words=True,
padding="max_length",
add_special_tokens=True,
truncation=True,
max_length=constants["max_ent_len"],
)
to_mask = [0 for _ in range(len(inputs["input_ids"]))]
for title_sp in title_spans:
title_toks = inputs.word_to_tokens(title_sp)
if title_toks is None:
continue
for i in range(title_toks.start, title_toks.end):
to_mask[i] = 1
# Heuristic function to compute this
if inputs["input_ids"][-1] == tokenizer.sep_token_id:
num_overflow += 1
if printed < 8 and constants["print_examples_prep"]:
print("QID:", qid)
print("TITLE:", entity_symbols.get_title(qid))
print("ENT STR:", ent_str)
print("INPUTS:", inputs)
print("TITLE SPANS:", title_spans)
print("TO MASK:", to_mask)
print(tokenizer.convert_ids_to_tokens(np.array(inputs["input_ids"])))
printed += 1
eid = entity_symbols.get_eid(qid)
for k, value in inputs.items():
memfile[f"entity_{k}"][eid] = value
memfile["entity_to_mask"][eid] = to_mask
memfile.flush()
return len(input_qids), num_overflow
class CandGenDataset(RangedEmmentalDataset):
"""CandGen Dataset class to be used in dataloader.
Args:
main_args: input config
name: internal dataset name
dataset: dataset file
use_weak_label: whether to use weakly labeled mentions or not
tokenizer: sentence tokenizer
entity_symbols: entity database class
dataset_threads: number of threads to use
split: data split
is_bert: is the tokenizer a BERT or not
Returns:
"""
def __init__(
self,
main_args,
name,
dataset,
use_weak_label,
tokenizer,
entity_symbols,
dataset_threads,
split="train",
is_bert=True,
):
log_rank_0_info(
logger,
f"Starting to build data for {split} from {dataset}",
)
global_start = time.time()
data_config = main_args.data_config
spawn_method = main_args.run_config.spawn_method
log_rank_0_debug(logger, f"Setting spawn method to be {spawn_method}")
orig_spawn = multiprocessing.get_start_method()
multiprocessing.set_start_method(spawn_method, force=True)
# Unique identifier is sentence index, subsentence index (due to sentence splitting), and aliases in split
guid_dtype = np.dtype(
[
("sent_idx", "i8", 1),
("subsent_idx", "i8", 1),
("alias_orig_list_pos", "i8", (1,)),
]
)
max_total_input_len = data_config.max_seq_len
# Storage for saving the data.
self.X_storage, self.Y_storage, self.X_entity_storage = (
[
("guids", guid_dtype, 1),
("sent_idx", "i8", 1),
("subsent_idx", "i8", 1),
("alias_idx", "i8", 1),
(
"input_ids",
"i8",
(max_total_input_len,),
),
(
"token_type_ids",
"i8",
(max_total_input_len,),
),
(
"attention_mask",
"i8",
(max_total_input_len,),
),
(
"word_qid_cnt_mask_score",
"float",
(max_total_input_len,),
),
("alias_orig_list_pos", "i8", 1),
(
"gold_eid",
"i8",
1,
), # What the eid of the gold entity is
(
"for_dump_gold_eid",
"i8",
1,
), # What the eid of the gold entity is for all aliases
(
"for_dump_gold_cand_K_idx_train",
"i8",
1,
), # Which of the K candidates is correct. Only used in dump_pred to stitch sub-sentences together
],
[
(
"gold_cand_K_idx",
"i8",
1,
), # Which of the K candidates is correct.
],
[
("entity_input_ids", "i8", (data_config.max_ent_len)),
("entity_token_type_ids", "i8", (data_config.max_ent_len)),
("entity_attention_mask", "i8", (data_config.max_ent_len)),
("entity_to_mask", "i8", (data_config.max_ent_len)),
],
)
self.split = split
self.tokenizer = tokenizer
# Table to map from alias_idx to entity_cand_eid used in the __get_item__
self.alias2cands_model = AliasEntityTable(
data_config=data_config, entity_symbols=entity_symbols
)
# Total number of entities used in the __get_item__
self.num_entities_with_pad_and_nocand = (
entity_symbols.num_entities_with_pad_and_nocand
)
self.raw_filename = dataset
# Folder for all mmap saved files
save_dataset_folder = data_utils.get_save_data_folder_candgen(
data_config, use_weak_label, self.raw_filename
)
utils.ensure_dir(save_dataset_folder)
# Folder for entity mmap saved files
save_entity_folder = data_utils.get_emb_prep_dir(data_config)
utils.ensure_dir(save_entity_folder)
# Folder for temporary output files
temp_output_folder = os.path.join(
data_config.data_dir,
data_config.data_prep_dir,
f"prep_{split}_dataset_files",
)
utils.ensure_dir(temp_output_folder)
# Input step 1
create_ex_indir = os.path.join(temp_output_folder, "create_examples_input")
utils.ensure_dir(create_ex_indir)
# Input step 2
create_ex_outdir = os.path.join(temp_output_folder, "create_examples_output")
utils.ensure_dir(create_ex_outdir)
# Meta data saved files
meta_file = os.path.join(temp_output_folder, "meta_data.json")
# File for standard training data
self.save_dataset_name = os.path.join(save_dataset_folder, "ned_data.bin")
# File for standard labels
self.save_labels_name = os.path.join(save_dataset_folder, "ned_label.bin")
# File for type labels
self.save_entity_dataset_name = None
# =======================================================================================
# =======================================================================================
# =======================================================================================
# STANDARD DISAMBIGUATION
# =======================================================================================
# =======================================================================================
# =======================================================================================
log_rank_0_debug(
logger,
f"Seeing if {self.save_dataset_name} exists and {self.save_labels_name} exists",
)
if (
data_config.overwrite_preprocessed_data
or (not os.path.exists(self.save_dataset_name))
or (not os.path.exists(self.save_labels_name))
):
st_time = time.time()
log_rank_0_info(
logger,
f"Building dataset from scratch. Saving to {save_dataset_folder}.",
)
create_examples(
dataset,
create_ex_indir,
create_ex_outdir,
meta_file,
data_config,
dataset_threads,
use_weak_label,
split,
is_bert,
tokenizer,
)
try:
convert_examples_to_features_and_save(
meta_file,
guid_dtype,
data_config,
dataset_threads,
use_weak_label,
split,
is_bert,
self.save_dataset_name,
self.save_labels_name,
self.X_storage,
self.Y_storage,
tokenizer,
entity_symbols,
)
log_rank_0_debug(
logger,
f"Finished prepping disambig training data in {time.time() - st_time}",
)
except Exception as e:
tb = traceback.TracebackException.from_exception(e)
logger.error(e)
logger.error(traceback.format_exc())
logger.error("\n".join(tb.stack.format()))
os.remove(self.save_dataset_name)
os.remove(self.save_labels_name)
shutil.rmtree(save_dataset_folder, ignore_errors=True)
raise
log_rank_0_info(
logger,
f"Loading data from {self.save_dataset_name} and {self.save_labels_name}",
)
X_dict, Y_dict = self.build_data_dicts(
self.save_dataset_name,
self.save_labels_name,
self.X_storage,
self.Y_storage,
)
# =======================================================================================
# =======================================================================================
# =======================================================================================
# ENTITY TOKENS
# =======================================================================================
# =======================================================================================
# =======================================================================================
self.save_entity_dataset_name = os.path.join(
save_entity_folder,
f"entity_data"
f"_aka{int(data_config.use_entity_akas)}"
f"_desc{int(data_config.use_entity_desc)}.bin",
)
log_rank_0_debug(logger, f"Seeing if {self.save_entity_dataset_name} exists")
if data_config.overwrite_preprocessed_data or (
not os.path.exists(self.save_entity_dataset_name)
):
st_time = time.time()
log_rank_0_info(logger, "Building entity data from scatch.")
try:
# Creating/saving data
build_and_save_entity_inputs(
self.save_entity_dataset_name,
self.X_entity_storage,
data_config,
dataset_threads,
tokenizer,
entity_symbols,
)
log_rank_0_debug(
logger, f"Finished prepping data in {time.time() - st_time}"
)
except Exception as e:
tb = traceback.TracebackException.from_exception(e)
logger.error(e)
logger.error(traceback.format_exc())
logger.error("\n".join(tb.stack.format()))
os.remove(self.save_entity_dataset_name)
raise
X_entity_dict = self.build_data_entity_dicts(
self.save_entity_dataset_name, self.X_entity_storage
)
self.X_entity_dict = X_entity_dict
log_rank_0_debug(logger, "Removing temporary output files")
shutil.rmtree(temp_output_folder, ignore_errors=True)
log_rank_0_info(
logger,
f"Final data initialization time for {split} is {time.time() - global_start}s",
)
# Set spawn back to original/default, which is "fork" or "spawn".
# This is needed for the Meta.config to be correctly passed in the collate_fn.
multiprocessing.set_start_method(orig_spawn, force=True)
super().__init__(name, X_dict=X_dict, Y_dict=Y_dict, uid="guids")
@classmethod
def build_data_dicts(
cls, save_dataset_name, save_labels_name, X_storage, Y_storage
):
"""Returns the X_dict and Y_dict of inputs and labels for the entity
disambiguation task.
Args:
save_dataset_name: memmap file name with inputs
save_labels_name: memmap file name with labels
X_storage: memmap storage for inputs
Y_storage: memmap storage labels
Returns: X_dict of inputs and Y_dict of labels for Emmental datasets
"""
X_dict, Y_dict = (
{
"guids": [],
"sent_idx": [],
"subsent_idx": [],
"alias_idx": [],
"input_ids": [],
"token_type_ids": [],
"attention_mask": [],
"word_qid_cnt_mask_score": [],
"alias_orig_list_pos": [], # list of original position in the alias list this example is (see eval)
"gold_eid": [], # List of gold entity eids
"for_dump_gold_eid": [], # List of gold entity eids
"for_dump_gold_cand_K_idx_train": [], # list of gold indices without subsentence masking (see eval)
},
{
"gold_cand_K_idx": [],
},
)
mmap_file = np.memmap(save_dataset_name, dtype=X_storage, mode="r")
mmap_label_file = np.memmap(save_labels_name, dtype=Y_storage, mode="r")
X_dict["sent_idx"] = torch.from_numpy(mmap_file["sent_idx"])
X_dict["subsent_idx"] = torch.from_numpy(mmap_file["subsent_idx"])
X_dict["guids"] = mmap_file["guids"] # uid doesn't need to be tensor
X_dict["alias_idx"] = torch.from_numpy(mmap_file["alias_idx"])
X_dict["input_ids"] = torch.from_numpy(mmap_file["input_ids"])
X_dict["token_type_ids"] = torch.from_numpy(mmap_file["token_type_ids"])
X_dict["attention_mask"] = torch.from_numpy(mmap_file["attention_mask"])
X_dict["word_qid_cnt_mask_score"] = torch.from_numpy(
mmap_file["word_qid_cnt_mask_score"]
)
X_dict["alias_orig_list_pos"] = torch.from_numpy(
mmap_file["alias_orig_list_pos"]
)
X_dict["gold_eid"] = torch.from_numpy(mmap_file["gold_eid"])
X_dict["for_dump_gold_eid"] = torch.from_numpy(mmap_file["for_dump_gold_eid"])
X_dict["for_dump_gold_cand_K_idx_train"] = torch.from_numpy(
mmap_file["for_dump_gold_cand_K_idx_train"]
)
Y_dict["gold_cand_K_idx"] = torch.from_numpy(mmap_label_file["gold_cand_K_idx"])
return X_dict, Y_dict
@classmethod
def build_data_entity_dicts(cls, save_dataset_name, X_storage):
"""Returns the X_dict for the entity data.
Args:
save_dataset_name: memmap file name with entity data
X_storage: memmap storage type
Returns: Dict of labels
"""
X_dict = {
"entity_input_ids": [],
"entity_token_type_ids": [],
"entity_attention_mask": [],
"entity_to_mask": [],
}
mmap_label_file = np.memmap(save_dataset_name, dtype=X_storage, mode="r")
X_dict["entity_input_ids"] = torch.from_numpy(
mmap_label_file["entity_input_ids"]
)
X_dict["entity_token_type_ids"] = torch.from_numpy(
mmap_label_file["entity_token_type_ids"]
)
X_dict["entity_attention_mask"] = torch.from_numpy(
mmap_label_file["entity_attention_mask"]
)
X_dict["entity_to_mask"] = torch.from_numpy(mmap_label_file["entity_to_mask"])
return X_dict
def __getitem__(self, index):
r"""Get item by index.
Args:
index(index): The index of the item.
Returns:
Tuple[Dict[str, Any], Dict[str, Tensor]]: Tuple of x_dict and y_dict
"""
index = self.data_range[index]
x_dict = {name: feature[index] for name, feature in self.X_dict.items()}
y_dict = {name: label[index] for name, label in self.Y_dict.items()}
# Get the entity_cand_eid
entity_cand_eid = self.alias2cands_model(x_dict["alias_idx"]).long()
entity_cand_input_ids = []
entity_cand_token_type_ids = []
entity_cand_attention_mask = []
# Get the entity token ids
for eid in entity_cand_eid:
entity_input_ids = self.X_entity_dict["entity_input_ids"][eid]
entity_cand_input_ids.append(entity_input_ids)
entity_cand_token_type_ids.append(
self.X_entity_dict["entity_token_type_ids"][eid]
)
entity_cand_attention_mask.append(
self.X_entity_dict["entity_attention_mask"][eid]
)
# Create M x K x token length
x_dict["entity_cand_input_ids"] = torch.stack(entity_cand_input_ids, dim=0)
x_dict["entity_cand_token_type_ids"] = torch.stack(
entity_cand_token_type_ids, dim=0
)
x_dict["entity_cand_attention_mask"] = torch.stack(
entity_cand_attention_mask, dim=0
)
x_dict["entity_cand_eval_mask"] = entity_cand_eid == -1
# Handles the index errors with -1 indexing into an embedding
x_dict["entity_cand_eid"] = torch.where(
entity_cand_eid >= 0,
entity_cand_eid,
(
torch.ones_like(entity_cand_eid, dtype=torch.long)
* (self.num_entities_with_pad_and_nocand - 1)
),
)
# Add dummy gold_unq_eid_idx for Emmental init - this gets overwritten in the collator in data.py
y_dict["gold_unq_eid_idx"] = y_dict["gold_cand_K_idx"]
return x_dict, y_dict
def __getstate__(self):
state = self.__dict__.copy()
del state["X_dict"]
del state["Y_dict"]
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.X_dict, self.Y_dict = self.build_data_dicts(
self.save_dataset_name,
self.save_labels_name,
self.X_storage,
self.Y_storage,
)
return state
def __repr__(self):
return (
f"Bootleg Dataset. Data at {self.save_dataset_name}. "
f"Labels at {self.save_labels_name}. "
)
class CandGenEntityDataset(RangedEmmentalDataset):
"""Bootleg Dataset class for generating entity embeddings.
Args:
main_args: input config
name: internal dataset name
dataset: dataset file
tokenizer: sentence tokenizer
entity_symbols: entity database class
dataset_threads: number of threads to use
split: data split
Returns:
"""
def __init__(
self,
main_args,
name,
dataset,
tokenizer,
entity_symbols,
dataset_threads,
split="test",
):
assert split == "test", "Split must be test split for EntityDataset"
log_rank_0_info(
logger,
f"Starting to build data for {split} from {dataset}",
)
global_start = time.time()
data_config = main_args.data_config
spawn_method = main_args.run_config.spawn_method
log_rank_0_debug(logger, f"Setting spawn method to be {spawn_method}")
orig_spawn = multiprocessing.get_start_method()
multiprocessing.set_start_method(spawn_method, force=True)
# Storage for saving the data.
self.X_entity_storage = [
("entity_input_ids", "i8", (data_config.max_ent_len)),
("entity_token_type_ids", "i8", (data_config.max_ent_len)),
("entity_attention_mask", "i8", (data_config.max_ent_len)),
("entity_to_mask", "i8", (data_config.max_ent_len)),
]
self.split = split
self.tokenizer = tokenizer
# Table to map from alias_idx to entity_cand_eid used in the __get_item__
self.alias2cands_model = AliasEntityTable(
data_config=data_config, entity_symbols=entity_symbols
)
# Total number of entities used in the __get_item__
self.num_entities_with_pad_and_nocand = (
entity_symbols.num_entities_with_pad_and_nocand
)
# Folder for entity mmap saved files
save_entity_folder = data_utils.get_emb_prep_dir(data_config)
utils.ensure_dir(save_entity_folder)
# =======================================================================================
# =======================================================================================
# =======================================================================================
# ENTITY TOKENS
# =======================================================================================
# =======================================================================================
# =======================================================================================
self.save_entity_dataset_name = os.path.join(
save_entity_folder,
f"entity_data"
f"_aka{int(data_config.use_entity_akas)}"
f"_desc{int(data_config.use_entity_desc)}.bin",
)
log_rank_0_debug(logger, f"Seeing if {self.save_entity_dataset_name} exists")
if data_config.overwrite_preprocessed_data or (
not os.path.exists(self.save_entity_dataset_name)
):
st_time = time.time()
log_rank_0_info(logger, "Building entity data from scatch.")
try:
# Creating/saving data
build_and_save_entity_inputs(
self.save_entity_dataset_name,
self.X_entity_storage,
data_config,
dataset_threads,
tokenizer,
entity_symbols,
)
log_rank_0_debug(
logger, f"Finished prepping data in {time.time() - st_time}"
)
except Exception as e:
tb = traceback.TracebackException.from_exception(e)
logger.error(e)
logger.error(traceback.format_exc())
logger.error("\n".join(tb.stack.format()))
os.remove(self.save_entity_dataset_name)
raise
X_entity_dict = self.build_data_entity_dicts(
self.save_entity_dataset_name, self.X_entity_storage
)
# Add the unique identified of EID (the embeddings are already in this order)
X_entity_dict["guids"] = torch.arange(len(X_entity_dict["entity_input_ids"]))
log_rank_0_info(
logger,
f"Final data initialization time for {split} is {time.time() - global_start}s",
)
# Set spawn back to original/default, which is "fork" or "spawn".
# This is needed for the Meta.config to be correctly passed in the collate_fn.
multiprocessing.set_start_method(orig_spawn, force=True)
super().__init__(name, X_dict=X_entity_dict, uid="guids")
@classmethod
def build_data_entity_dicts(cls, save_dataset_name, X_storage):
"""Returns the X_dict for the entity data.
Args:
save_dataset_name: memmap file name with entity data
X_storage: memmap storage type
Returns: Dict of labels
"""
X_dict = {
"entity_input_ids": [],
"entity_token_type_ids": [],
"entity_attention_mask": [],
"entity_to_mask": [],
}
mmap_label_file = np.memmap(save_dataset_name, dtype=X_storage, mode="r")
X_dict["entity_input_ids"] = torch.from_numpy(
mmap_label_file["entity_input_ids"]
)
X_dict["entity_token_type_ids"] = torch.from_numpy(
mmap_label_file["entity_token_type_ids"]
)
X_dict["entity_attention_mask"] = torch.from_numpy(
mmap_label_file["entity_attention_mask"]
)
X_dict["entity_to_mask"] = torch.from_numpy(mmap_label_file["entity_to_mask"])
return X_dict
def __getitem__(self, index):
r"""Get item by index.
Args:
index(index): The index of the item.
Returns:
Tuple[Dict[str, Any], Dict[str, Tensor]]: Tuple of x_dict and y_dict
"""
x_dict = {name: feature[index] for name, feature in self.X_dict.items()}
return x_dict
def __getstate__(self):
state = self.__dict__.copy()
del state["X_dict"]
del state["Y_dict"]
return state
def __setstate__(self, state):
self.__dict__.update(state)
return state
def __repr__(self):
return f"Bootleg Entity Dataset. Data at {self.save_entity_dataset_name}."
class CandGenContextDataset(CandGenDataset):
"""CandGen Dataset class for context encoding.
Args:
main_args: input config
name: internal dataset name
dataset: dataset file
use_weak_label: whether to use weakly labeled mentions or not
tokenizer: sentence tokenizer
entity_symbols: entity database class
dataset_threads: number of threads to use
split: data split
is_bert: is the tokenizer a BERT or not
dataset_range: dataset range for subsetting (used in candidate generation)
Returns:
"""
def __init__(
self,
main_args,
name,
dataset,
use_weak_label,
tokenizer,
entity_symbols,
dataset_threads,
split="test",
is_bert=True,
dataset_range=None,
):
super(CandGenContextDataset, self).__init__(
main_args=main_args,
name=name,
dataset=dataset,
use_weak_label=use_weak_label,
tokenizer=tokenizer,
entity_symbols=entity_symbols,
dataset_threads=dataset_threads,
split=split,
is_bert=is_bert,
)
self.X_entity_dict = None
self.Y_dict = None
if dataset_range is not None:
self.data_range = dataset_range
else:
self.data_range = list(range(len(next(iter(self.X_dict.values())))))
def __getitem__(self, index):
r"""Get item by index.
Args:
index(index): The index of the item.
Returns:
Tuple[Dict[str, Any], Dict[str, Tensor]]: Tuple of x_dict and y_dict
"""
index = self.data_range[index]
x_dict = {name: feature[index] for name, feature in self.X_dict.items()}
return x_dict
def __getstate__(self):
"""Get state method"""
state = self.__dict__.copy()
del state["X_dict"]
return state
def __setstate__(self, state):
"""Set state method"""
self.__dict__.update(state)
self.X_dict, _ = self.build_data_dicts(
self.save_dataset_name,
self.save_labels_name,
self.X_storage,
self.Y_storage,
)
return state
def __repr__(self):
"""Repr method"""
return f"Bootleg Context Dataset. Data at {self.save_dataset_name}. "
| bootleg-master | cand_gen/dataset.py |
"""Bootleg run command."""
import argparse
import logging
import os
import subprocess
import sys
from copy import copy
import emmental
import torch
from emmental.learner import EmmentalLearner
from emmental.model import EmmentalModel
from rich.logging import RichHandler
from transformers import AutoTokenizer
from bootleg import log_rank_0_debug, log_rank_0_info
from bootleg.data import get_slicedatasets
from bootleg.symbols.constants import DEV_SPLIT, TEST_SPLIT, TRAIN_SPLIT
from bootleg.symbols.entity_symbols import EntitySymbols
from bootleg.utils import data_utils
from bootleg.utils.model_utils import count_parameters
from bootleg.utils.utils import (
dump_yaml_file,
load_yaml_file,
recurse_redict,
write_to_file,
)
from cand_gen.data import get_dataloaders
from cand_gen.task_config import CANDGEN_TASK
from cand_gen.tasks import candgen_task
from cand_gen.utils.parser.parser_utils import parse_boot_and_emm_args
logger = logging.getLogger(__name__)
def parse_cmdline_args():
"""Takes an input config file and parses it into the correct subdictionary
groups for the model.
Returns:
model run mode of train, eval, or dumping
parsed Dict config
path to original config path
"""
# Parse cmdline args to specify config and mode
cli_parser = argparse.ArgumentParser(
description="Bootleg CLI Config",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
cli_parser.add_argument(
"--config_script",
type=str,
default="",
help="Should mimic the config_args found in utils/parser/bootleg_args.py with parameters you want to override."
"You can also override the parameters from config_script by passing them in directly after config_script. "
"E.g., --train_config.batch_size 5",
)
cli_parser.add_argument("--local_rank", type=int, default=-1)
# you can add other args that will override those in the config_script
# parse_known_args returns 'args' that are the same as what parse_args() returns
# and 'unknown' which are args that the parser doesn't recognize but you want to keep.
# 'unknown' are what we pass on to our override any args from the second phase of arg parsing from the json file
cli_args, unknown = cli_parser.parse_known_args()
if len(cli_args.config_script) == 0:
raise ValueError("You must pass a config script via --config.")
config = parse_boot_and_emm_args(cli_args.config_script, unknown)
# Modify the local rank param from the cli args
config.learner_config.local_rank = int(os.getenv("LOCAL_RANK", cli_args.local_rank))
return config, cli_args.config_script
def setup(config, run_config_path=None):
"""
Setup distributed backend and save configuration files.
Args:
config: config
run_config_path: path for original run config
Returns:
"""
# torch.multiprocessing.set_sharing_strategy("file_system")
# spawn method must be fork to work with Meta.config
torch.multiprocessing.set_start_method("fork", force=True)
"""
ulimit -n 500000
python3 -m torch.distributed.launch --nproc_per_node=2 bootleg/run.py --config_script ...
"""
log_level = logging.getLevelName(config.run_config.log_level.upper())
emmental.init(
log_dir=config["meta_config"]["log_path"],
config=config,
use_exact_log_path=config["meta_config"]["use_exact_log_path"],
local_rank=config.learner_config.local_rank,
level=log_level,
)
log = logging.getLogger()
# Remove streaming handlers and use rich
log.handlers = [h for h in log.handlers if not type(h) is logging.StreamHandler]
log.addHandler(RichHandler())
# Set up distributed backend
emmental.Meta.init_distributed_backend()
cmd_msg = " ".join(sys.argv)
# Recast to dictionaries for emmental - will remove Dotteddicts
emmental.Meta.config = recurse_redict(copy(emmental.Meta.config))
# Log configuration into filess
if config.learner_config.local_rank in [0, -1]:
write_to_file(f"{emmental.Meta.log_path}/cmd.txt", cmd_msg)
dump_yaml_file(
f"{emmental.Meta.log_path}/parsed_config.yaml", emmental.Meta.config
)
# Dump the run config (does not contain defaults)
if run_config_path is not None:
dump_yaml_file(
f"{emmental.Meta.log_path}/run_config.yaml",
load_yaml_file(run_config_path),
)
log_rank_0_info(logger, f"COMMAND: {cmd_msg}")
log_rank_0_info(
logger, f"Saving config to {emmental.Meta.log_path}/parsed_config.yaml"
)
git_hash = "Not able to retrieve git hash"
try:
git_hash = subprocess.check_output(
["git", "log", "-n", "1", "--pretty=tformat:%h-%ad", "--date=short"]
).strip()
except subprocess.CalledProcessError:
pass
log_rank_0_info(logger, f"Git Hash: {git_hash}")
def configure_optimizer():
"""Configures the optimizer for Bootleg. By default, we use
SparseDenseAdam. We always change the parameter group for layer norms
following standard BERT finetuning methods.
Args:
config: config
Returns:
"""
# Specify parameter group for Adam BERT
def grouped_parameters(model):
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
return [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": emmental.Meta.config["learner_config"][
"optimizer_config"
]["l2"],
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
emmental.Meta.config["learner_config"]["optimizer_config"][
"parameters"
] = grouped_parameters
return
# TODO: optimize slices so we split them based on max aliases (save A LOT of memory)
def run_model(config, run_config_path=None):
"""
Main run method for Emmental Bootleg models.
Args:
config: parsed model config
run_config_path: original config path (for saving)
Returns:
"""
# Set up distributed backend and save configuration files
setup(config, run_config_path)
# Load entity symbols
log_rank_0_info(logger, "Loading entity symbols...")
entity_symbols = EntitySymbols.load_from_cache(
load_dir=os.path.join(
config.data_config.entity_dir, config.data_config.entity_map_dir
),
alias_cand_map_dir=config.data_config.alias_cand_map,
alias_idx_dir=config.data_config.alias_idx_map,
)
# Create tasks
tasks = [CANDGEN_TASK]
# Create splits for data loaders
data_splits = [TRAIN_SPLIT, DEV_SPLIT, TEST_SPLIT]
# Slices are for eval so we only split on test/dev
slice_splits = [DEV_SPLIT, TEST_SPLIT]
# Create tokenizer
context_tokenizer = AutoTokenizer.from_pretrained(
config.data_config.word_embedding.bert_model
)
data_utils.add_special_tokens(context_tokenizer)
# Gets dataloaders
dataloaders = get_dataloaders(
config,
tasks,
data_splits,
entity_symbols,
context_tokenizer,
)
slice_datasets = get_slicedatasets(config, slice_splits, entity_symbols)
configure_optimizer()
# Create models and add tasks
log_rank_0_info(logger, "Starting Bootleg Model")
model_name = "Bootleg"
model = EmmentalModel(name=model_name)
model.add_task(
candgen_task.create_task(
config,
len(context_tokenizer),
slice_datasets,
)
)
# Print param counts
log_rank_0_debug(logger, "PARAMS WITH GRAD\n" + "=" * 30)
total_params = count_parameters(model, requires_grad=True, logger=logger)
log_rank_0_info(logger, f"===> Total Params With Grad: {total_params}")
log_rank_0_debug(logger, "PARAMS WITHOUT GRAD\n" + "=" * 30)
total_params = count_parameters(model, requires_grad=False, logger=logger)
log_rank_0_info(logger, f"===> Total Params Without Grad: {total_params}")
# Load the best model from the pretrained model
if config["model_config"]["model_path"] is not None:
model.load(config["model_config"]["model_path"])
# Train model
emmental_learner = EmmentalLearner()
emmental_learner._set_optimizer(model)
# Save first checkpoint
if config.learner_config.local_rank in [0, -1]:
model.save(f"{emmental.Meta.log_path}/checkpoint_0.0.model.pth")
emmental_learner.learn(model, dataloaders)
if config.learner_config.local_rank in [0, -1]:
model.save(f"{emmental.Meta.log_path}/last_model.pth")
# If just finished training a model
if config.learner_config.local_rank in [0, -1]:
scores = model.score(dataloaders[1:])
# Save metrics and models
log_rank_0_info(logger, f"Saving metrics to {emmental.Meta.log_path}")
log_rank_0_info(logger, f"Metrics: {scores}")
scores["log_path"] = emmental.Meta.log_path
write_to_file(f"{emmental.Meta.log_path}/train_metrics.txt", scores)
else:
scores = {}
return scores
if __name__ == "__main__":
config, run_config_path = parse_cmdline_args()
run_model(config, run_config_path)
| bootleg-master | cand_gen/train.py |
"""Bootleg run command."""
import argparse
import logging
import os
import subprocess
import sys
from collections import defaultdict
from copy import copy
from pathlib import Path
import emmental
import faiss
import numpy as np
import torch
import ujson
from emmental.model import EmmentalModel
from rich.logging import RichHandler
from rich.progress import track
from transformers import AutoTokenizer
from bootleg import log_rank_0_info
from bootleg.symbols.entity_symbols import EntitySymbols
from bootleg.utils import data_utils
from bootleg.utils.utils import (
dump_yaml_file,
load_yaml_file,
recurse_redict,
write_to_file,
)
from cand_gen.data import get_context_dataloader, get_entity_dataloader
from cand_gen.task_config import CANDGEN_TASK
from cand_gen.tasks import context_gen_task, entity_gen_task
from cand_gen.utils.parser.parser_utils import parse_boot_and_emm_args
logger = logging.getLogger(__name__)
def parse_cmdline_args():
"""Takes an input config file and parses it into the correct subdictionary
groups for the model.
Returns:
model run mode of train, eval, or dumping
parsed Dict config
path to original config path
"""
# Parse cmdline args to specify config and mode
cli_parser = argparse.ArgumentParser(
description="Bootleg CLI Config",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
cli_parser.add_argument(
"--config_script",
type=str,
default="",
help="Should mimic the config_args found in utils/parser/bootleg_args.py with parameters you want to override."
"You can also override the parameters from config_script by passing them in directly after config_script. "
"E.g., --train_config.batch_size 5",
)
cli_parser.add_argument(
"--entity_embs_only",
action="store_true",
help="If true will only generate a static embedding file for all entity embeddings. Will not run with context"
"to generate candidates",
)
cli_parser.add_argument(
"--entity_embs_path",
type=str,
default=None,
help="If already dumped entity embeddings, can provide path here",
)
cli_parser.add_argument(
"--topk",
default=10,
type=int,
help="TopK entities to retrieve. Use spaces to deliminate multiple topks",
)
# you can add other args that will override those in the config_script
# parse_known_args returns 'args' that are the same as what parse_args() returns
# and 'unknown' which are args that the parser doesn't recognize but you want to keep.
# 'unknown' are what we pass on to our override any args from the second phase of arg parsing from the json file
cli_args, unknown = cli_parser.parse_known_args()
if len(cli_args.config_script) == 0:
raise ValueError("You must pass a config script via --config.")
config = parse_boot_and_emm_args(cli_args.config_script, unknown)
# Modify the local rank param from the cli args
config.learner_config.local_rank = int(os.getenv("LOCAL_RANK", -1))
return (
config,
cli_args.config_script,
cli_args.entity_embs_only,
cli_args.entity_embs_path,
cli_args.topk,
)
def setup(config, run_config_path=None):
"""
Setup distributed backend and save configuration files.
Args:
config: config
run_config_path: path for original run config
Returns:
"""
torch.multiprocessing.set_sharing_strategy("file_system")
# spawn method must be fork to work with Meta.config
torch.multiprocessing.set_start_method("fork", force=True)
"""
ulimit -n 500000
python3 -m torch.distributed.launch --nproc_per_node=2 bootleg/run.py --config_script ...
"""
log_level = logging.getLevelName(config.run_config.log_level.upper())
emmental.init(
log_dir=config["meta_config"]["log_path"],
config=config,
use_exact_log_path=config["meta_config"]["use_exact_log_path"],
local_rank=config.learner_config.local_rank,
level=log_level,
)
log = logging.getLogger()
# Remove streaming handlers and use rich
log.handlers = [h for h in log.handlers if not type(h) is logging.StreamHandler]
log.addHandler(RichHandler())
# Set up distributed backend
emmental.Meta.init_distributed_backend()
cmd_msg = " ".join(sys.argv)
# Recast to dictionaries for emmental - will remove Dotteddicts
emmental.Meta.config = recurse_redict(copy(emmental.Meta.config))
# Log configuration into filess
if config.learner_config.local_rank in [0, -1]:
write_to_file(f"{emmental.Meta.log_path}/cmd.txt", cmd_msg)
dump_yaml_file(
f"{emmental.Meta.log_path}/parsed_config.yaml", emmental.Meta.config
)
# Dump the run config (does not contain defaults)
if run_config_path is not None:
dump_yaml_file(
f"{emmental.Meta.log_path}/run_config.yaml",
load_yaml_file(run_config_path),
)
log_rank_0_info(logger, f"COMMAND: {cmd_msg}")
log_rank_0_info(
logger, f"Saving config to {emmental.Meta.log_path}/parsed_config.yaml"
)
git_hash = "Not able to retrieve git hash"
try:
git_hash = subprocess.check_output(
["git", "log", "-n", "1", "--pretty=tformat:%h-%ad", "--date=short"]
).strip()
except subprocess.CalledProcessError:
pass
log_rank_0_info(logger, f"Git Hash: {git_hash}")
def gen_entity_embeddings(config, context_tokenizer, entity_symbols):
# Create tasks
tasks = [CANDGEN_TASK]
# Gets dataloader - will set the split to be TEST even though there is no eval file used to generate entities
dataloader = get_entity_dataloader(
config,
tasks,
entity_symbols,
context_tokenizer,
)
# Create models and add tasks
log_rank_0_info(logger, "Starting Bootleg Model")
model_name = "Bootleg"
model = EmmentalModel(name=model_name)
model.add_task(
entity_gen_task.create_task(
config,
len(context_tokenizer),
)
)
# Load the best model from the pretrained model
if config["model_config"]["model_path"] is not None:
model.load(config["model_config"]["model_path"])
# This happens inside EmmentalLearner for training
if (
config["learner_config"]["local_rank"] == -1
and config["model_config"]["dataparallel"]
):
model._to_dataparallel()
preds = model.predict(dataloader, return_preds=True, return_action_outputs=False)
return preds, dataloader, model
def gen_context_embeddings(
config, context_tokenizer, entity_symbols, dataset_range=None, model=None
):
# Create tasks
tasks = [CANDGEN_TASK]
# Gets dataloader - will set the split to be TEST even though there is no eval file used to generate entities
dataloader = get_context_dataloader(
config, tasks, entity_symbols, context_tokenizer, dataset_range
)
# Create models and add tasks
if model is None:
log_rank_0_info(logger, "Starting Bootleg Model")
model_name = "Bootleg"
model = EmmentalModel(name=model_name)
model.add_task(
context_gen_task.create_task(
config,
len(context_tokenizer),
)
)
# Load the best model from the pretrained model
if config["model_config"]["model_path"] is not None:
model.load(config["model_config"]["model_path"])
# This happens inside EmmentalLearner for training
if (
config["learner_config"]["local_rank"] == -1
and config["model_config"]["dataparallel"]
):
model._to_dataparallel()
preds = model.predict(dataloader, return_preds=True, return_action_outputs=False)
return preds, dataloader, model
def run_model(
config, run_config_path=None, entity_embs_only=False, entity_embs_path=None, topk=30
):
"""
Main run method for Emmental Bootleg model.
Args:
config: parsed model config
run_config_path: original config path (for saving)
entity_embs_only: whether to just generate entity embeddings
Returns:
"""
# Set up distributed backend and save configuration files
setup(config, run_config_path)
# Load entity symbols
log_rank_0_info(logger, "Loading entity symbols...")
entity_symbols = EntitySymbols.load_from_cache(
load_dir=os.path.join(
config.data_config.entity_dir, config.data_config.entity_map_dir
),
alias_cand_map_dir=config.data_config.alias_cand_map,
alias_idx_dir=config.data_config.alias_idx_map,
)
qid2eid = entity_symbols.get_qid2eid_dict()
eid2qid = {v: k for k, v in qid2eid.items()}
assert len(qid2eid) == len(eid2qid), "Duplicate EIDs detected"
# GENERATE ENTITY EMBEDDINGS
# Create tokenizer
context_tokenizer = AutoTokenizer.from_pretrained(
config.data_config.word_embedding.bert_model
)
data_utils.add_special_tokens(context_tokenizer)
out_emb_file = entity_embs_path
if entity_embs_path is None:
log_rank_0_info(
logger, "Gathering embeddings for all entities. Will save for reuse."
)
preds, _, _ = gen_entity_embeddings(config, context_tokenizer, entity_symbols)
final_out_emb_file = os.path.join(
emmental.Meta.log_path, "entity_embeddings.npy"
)
log_rank_0_info(logger, f"Saving entity embeddings into {final_out_emb_file}")
log_rank_0_info(
logger,
"Use the entity profile's ```get_eid``` command to get the emb ids for QIDs",
)
np.save(final_out_emb_file, np.array(preds["probs"][CANDGEN_TASK]))
out_emb_file = final_out_emb_file
del preds
else:
assert Path(entity_embs_path).exists(), f"{entity_embs_path} must exist"
if entity_embs_only:
return out_emb_file
log_rank_0_info(logger, "Loading embeddings for cand gen.")
entity_embs = np.load(out_emb_file)
log_rank_0_info(logger, "Building index...")
if torch.cuda.device_count() > 0 and config["model_config"]["device"] >= 0:
if config["model_config"]["dataparallel"]:
print("DATAPARALLEL FAISS")
faiss_cpu_index = faiss.IndexFlatIP(entity_embs.shape[-1])
faiss_index = faiss.index_cpu_to_all_gpus(faiss_cpu_index)
else:
print("GPU FAISS")
faiss_cpu_index = faiss.IndexFlatIP(entity_embs.shape[-1])
res = faiss.StandardGpuResources()
faiss_index = faiss.index_cpu_to_gpu(res, 0, faiss_cpu_index)
else:
print("CPU FAISS")
faiss_index = faiss.IndexFlatIP(entity_embs.shape[-1])
faiss_index.add(entity_embs)
log_rank_0_info(logger, "Searching...")
recall_k = [1, 2, 5, 10, 20, 30, 40, 50]
total_cnt = 0
cnt_k = {i: 0 for i in recall_k}
# Make sure data is prepped
context_dataloader = get_context_dataloader(
config,
[CANDGEN_TASK],
entity_symbols,
context_tokenizer,
)
total_samples = len(context_dataloader.dataset)
topk_candidates = {}
context_model = None
nn_chunk = config["run_config"]["dump_preds_accumulation_steps"]
for i in range(int(np.ceil(total_samples / nn_chunk))):
st = i * nn_chunk
ed = min((i + 1) * nn_chunk, total_samples)
context_preds, context_dataloader, context_model = gen_context_embeddings(
config,
context_tokenizer,
entity_symbols,
dataset_range=list(range(st, ed)),
model=context_model,
)
res = {
"context_ids": context_preds["uids"][CANDGEN_TASK],
"context_features": np.array(context_preds["probs"][CANDGEN_TASK]),
}
# import pdb; pdb.set_trace()
# +1 as index will return
D, Is = faiss_index.search(res["context_features"], topk)
for j in range(Is.shape[0]):
# No need to offset by st+j as the range offset is accounted for in dataset
example = context_dataloader.dataset[j]
sent_id = int(example["sent_idx"])
alias_id = int(example["alias_orig_list_pos"])
gt_eid = int(example["for_dump_gold_eid"])
gt = eid2qid.get(gt_eid, "Q-1")
topk_nn = [eid2qid.get(k, "Q-1") for k in Is[j]]
assert tuple([sent_id, alias_id]) not in topk_candidates
topk_candidates[tuple([sent_id, alias_id])] = [
sent_id,
alias_id,
gt,
topk_nn[:topk],
D[j].tolist()[:topk],
]
total_cnt += 1
try:
idx = topk_nn.index(gt)
for ll in recall_k:
if idx < ll:
cnt_k[ll] += 1
except ValueError:
pass
assert len(topk_candidates) == total_samples, "Missing samples"
for k in recall_k:
cnt_k[k] /= total_cnt
print(cnt_k, total_cnt)
# Get test dataset filename
file_name = Path(config.data_config.test_dataset.file).stem
metrics_file = Path(emmental.Meta.log_path) / f"{file_name}_candgen_metrics.txt"
write_to_file(
metrics_file,
cnt_k,
)
sent2output = defaultdict(list)
for (sent_id, alias_id), v in track(
topk_candidates.items(), description="Grouping by sentence"
):
sent2output[sent_id].append(v)
sent2output = dict(sent2output)
for sent_id, v in track(sent2output.items(), description="Sorting sentences"):
v = sorted(v, key=lambda x: x[1])
sent2output[sent_id] = v
candidates_file = (
Path(emmental.Meta.log_path) / f"{file_name}_{topk}_candidates.jsonl"
)
log_rank_0_info(logger, f"Saving to {candidates_file}")
with open(candidates_file, "w") as f:
for sent_id, list_of_values in sent2output.items():
sent_ids, alias_ids, gts, cands, probs = list(zip(*list_of_values))
json_obj = {
"sent_idx_unq": sent_id,
"alias_idxs": list(alias_ids),
"qids": list(gts),
"cands": list(cands),
"probs": list(probs),
}
f.write(ujson.dumps(json_obj) + "\n")
return candidates_file, metrics_file
if __name__ == "__main__":
(
config,
run_config_path,
entity_embs_only,
entity_embs_path,
topk,
) = parse_cmdline_args()
run_model(config, run_config_path, entity_embs_only, entity_embs_path, topk)
| bootleg-master | cand_gen/eval.py |
"""Data"""
import logging
import os
from emmental import Meta
from emmental.data import EmmentalDataLoader, emmental_collate_fn
from torch.utils.data import DistributedSampler, RandomSampler
from bootleg import log_rank_0_info
from bootleg.data import bootleg_collate_fn
from cand_gen.dataset import CandGenContextDataset, CandGenDataset, CandGenEntityDataset
from cand_gen.task_config import BATCH_CANDS_LABEL
logger = logging.getLogger(__name__)
def get_dataloaders(
args,
tasks,
splits,
entity_symbols,
tokenizer,
):
"""Get the dataloaders.
Args:
args: main args
tasks: task names
use_batch_cands: whether to use candidates across a batch (train and eval_batch_cands)
splits: data splits to generate dataloaders for
entity_symbols: entity symbols
Returns: list of dataloaders
"""
task_to_label_dict = {t: BATCH_CANDS_LABEL for t in tasks}
is_bert = True
datasets = {}
for split in splits:
dataset_path = os.path.join(
args.data_config.data_dir, args.data_config[f"{split}_dataset"].file
)
datasets[split] = CandGenDataset(
main_args=args,
name="Bootleg",
dataset=dataset_path,
use_weak_label=args.data_config[f"{split}_dataset"].use_weak_label,
tokenizer=tokenizer,
entity_symbols=entity_symbols,
dataset_threads=args.run_config.dataset_threads,
split=split,
is_bert=is_bert,
)
dataloaders = []
for split, dataset in datasets.items():
if split in args.learner_config.train_split:
dataset_sampler = (
RandomSampler(dataset)
if Meta.config["learner_config"]["local_rank"] == -1
else DistributedSampler(
dataset, seed=Meta.config["meta_config"]["seed"]
)
)
else:
dataset_sampler = None
if Meta.config["learner_config"]["local_rank"] != -1:
log_rank_0_info(
logger,
"You are using distributed computing for eval. We are not using a distributed sampler. "
"Please use DataParallel and not DDP.",
)
dataloaders.append(
EmmentalDataLoader(
task_to_label_dict=task_to_label_dict,
dataset=dataset,
sampler=dataset_sampler,
split=split,
collate_fn=bootleg_collate_fn,
batch_size=args.train_config.batch_size
if split in args.learner_config.train_split
or args.run_config.eval_batch_size is None
else args.run_config.eval_batch_size,
num_workers=args.run_config.dataloader_threads,
pin_memory=False,
)
)
log_rank_0_info(
logger,
f"Built dataloader for {split} set with {len(dataset)} and {args.run_config.dataloader_threads} threads "
f"samples (Shuffle={split in args.learner_config.train_split}, "
f"Batch size={dataloaders[-1].batch_size}).",
)
return dataloaders
def get_entity_dataloader(
args,
tasks,
entity_symbols,
tokenizer,
):
"""Get the dataloaders.
Args:
args: main args
tasks: task names
entity_symbols: entity symbols
Returns: list of dataloaders
"""
task_to_label_dict = {t: None for t in tasks}
split = "test"
dataset_path = os.path.join(
args.data_config.data_dir, args.data_config[f"{split}_dataset"].file
)
dataset = CandGenEntityDataset(
main_args=args,
name="Bootleg",
dataset=dataset_path,
tokenizer=tokenizer,
entity_symbols=entity_symbols,
dataset_threads=args.run_config.dataset_threads,
split=split,
)
dataset_sampler = None
if Meta.config["learner_config"]["local_rank"] != -1:
log_rank_0_info(
logger,
"You are using distributed computing for eval. We are not using a distributed sampler. "
"Please use DataParallel and not DDP.",
)
dataloader = EmmentalDataLoader(
task_to_label_dict=task_to_label_dict,
dataset=dataset,
sampler=dataset_sampler,
split=split,
collate_fn=emmental_collate_fn,
batch_size=args.train_config.batch_size
if split in args.learner_config.train_split
or args.run_config.eval_batch_size is None
else args.run_config.eval_batch_size,
num_workers=args.run_config.dataloader_threads,
pin_memory=False,
)
log_rank_0_info(
logger,
f"Built dataloader for {split} set with {len(dataset)} and {args.run_config.dataloader_threads} threads "
f"samples (Shuffle={split in args.learner_config.train_split}, "
f"Batch size={dataloader.batch_size}).",
)
return dataloader
def get_context_dataloader(
args,
tasks,
entity_symbols,
tokenizer,
dataset_range=None,
):
"""Get the dataloaders.
Args:
args: main args
tasks: task names
entity_symbols: entity symbols
tokenizer: tokenizer
dataset_range: the subset of the dataset to wrap
Returns: list of dataloaders
"""
task_to_label_dict = {t: None for t in tasks}
split = "test"
is_bert = True
dataset_path = os.path.join(
args.data_config.data_dir, args.data_config[f"{split}_dataset"].file
)
dataset = CandGenContextDataset(
main_args=args,
name="Bootleg",
dataset=dataset_path,
use_weak_label=args.data_config[f"{split}_dataset"].use_weak_label,
tokenizer=tokenizer,
entity_symbols=entity_symbols,
dataset_threads=args.run_config.dataset_threads,
split=split,
is_bert=is_bert,
dataset_range=dataset_range,
)
dataset_sampler = None
if Meta.config["learner_config"]["local_rank"] != -1:
log_rank_0_info(
logger,
"You are using distributed computing for eval. We are not using a distributed sampler. "
"Please use DataParallel and not DDP.",
)
dataloader = EmmentalDataLoader(
task_to_label_dict=task_to_label_dict,
dataset=dataset,
sampler=dataset_sampler,
split=split,
collate_fn=emmental_collate_fn,
batch_size=args.train_config.batch_size
if split in args.learner_config.train_split
or args.run_config.eval_batch_size is None
else args.run_config.eval_batch_size,
num_workers=args.run_config.dataloader_threads,
pin_memory=False,
)
log_rank_0_info(
logger,
f"Built dataloader for {split} set with {len(dataset)} and {args.run_config.dataloader_threads} threads "
f"samples (Shuffle={split in args.learner_config.train_split}, "
f"Batch size={dataloader.batch_size}).",
)
return dataloader
| bootleg-master | cand_gen/data.py |
import torch
import torch.nn.functional as F
from emmental.scorer import Scorer
from emmental.task import Action, EmmentalTask
from torch import nn
from transformers import AutoModel
from bootleg.layers.bert_encoder import Encoder
from bootleg.scorer import BootlegSlicedScorer
from cand_gen.task_config import CANDGEN_TASK
class DisambigLoss:
def __init__(self, normalize, temperature):
self.normalize = normalize
self.temperature = temperature
def batch_cands_disambig_output(self, intermediate_output_dict):
"""Function to return the probs for a task in Emmental.
Args:
intermediate_output_dict: output dict from Emmental task flow
Returns: NED probabilities for candidates (B x M x K)
"""
out = intermediate_output_dict["context_encoder"][0]
ent_out = intermediate_output_dict["entity_encoder"][0]
if self.normalize:
out = F.normalize(out, p=2, dim=-1)
ent_out = F.normalize(ent_out, p=2, dim=-1)
score = torch.mm(out, ent_out.t()) / self.temperature
return F.softmax(score, dim=-1)
def batch_cands_disambig_loss(self, intermediate_output_dict, Y):
"""Returns the entity disambiguation loss on prediction heads.
Args:
intermediate_output_dict: output dict from the Emmental task flor
Y: gold labels
Returns: loss
"""
# Grab the first value of training (when doing distributed training, we will have one per process)
training = intermediate_output_dict["context_encoder"][1].item()
assert type(training) is bool
out = intermediate_output_dict["context_encoder"][0]
ent_out = intermediate_output_dict["entity_encoder"][0]
if self.normalize:
out = F.normalize(out, p=2, dim=-1)
ent_out = F.normalize(ent_out, p=2, dim=-1)
score = torch.mm(out, ent_out.t()) / self.temperature
labels = Y
masked_labels = labels.reshape(out.shape[0])
if not training:
label_mask = labels == -2
masked_labels = torch.where(
~label_mask, labels, torch.ones_like(labels) * -1
)
masked_labels = masked_labels.reshape(out.shape[0])
temp = nn.CrossEntropyLoss(ignore_index=-1)(score, masked_labels.long())
return temp
def create_task(args, len_context_tok, slice_datasets=None):
"""Returns an EmmentalTask for named entity disambiguation (NED).
Args:
args: args
entity_symbols: entity symbols (default None)
slice_datasets: slice datasets used in scorer (default None)
Returns: EmmentalTask for NED
"""
disamig_loss = DisambigLoss(
args.model_config.normalize, args.model_config.temperature
)
loss_func = disamig_loss.batch_cands_disambig_loss
output_func = disamig_loss.batch_cands_disambig_output
# Create sentence encoder
context_model = AutoModel.from_pretrained(
args.data_config.word_embedding.bert_model
)
context_model.encoder.layer = context_model.encoder.layer[
: args.data_config.word_embedding.context_layers
]
context_model.resize_token_embeddings(len_context_tok)
context_model = Encoder(context_model, args.model_config.hidden_size)
entity_model = AutoModel.from_pretrained(args.data_config.word_embedding.bert_model)
entity_model.encoder.layer = entity_model.encoder.layer[
: args.data_config.word_embedding.entity_layers
]
entity_model.resize_token_embeddings(len_context_tok)
entity_model = Encoder(entity_model, args.model_config.hidden_size)
sliced_scorer = BootlegSlicedScorer(
args.data_config.train_in_candidates, slice_datasets
)
# Create module pool and combine with embedding module pool
module_pool = nn.ModuleDict(
{
"context_encoder": context_model,
"entity_encoder": entity_model,
}
)
# Create task flow
task_flow = [
Action(
name="entity_encoder",
module="entity_encoder",
inputs=[
("_input_", "entity_cand_input_ids"),
("_input_", "entity_cand_attention_mask"),
("_input_", "entity_cand_token_type_ids"),
],
),
Action(
name="context_encoder",
module="context_encoder",
inputs=[
("_input_", "input_ids"),
("_input_", "token_type_ids"),
("_input_", "attention_mask"),
],
),
]
return EmmentalTask(
name=CANDGEN_TASK,
module_pool=module_pool,
task_flow=task_flow,
loss_func=loss_func,
output_func=output_func,
require_prob_for_eval=False,
require_pred_for_eval=True,
# action_outputs are used to stitch together sentence fragments
action_outputs=[
("_input_", "sent_idx"),
("_input_", "subsent_idx"),
("_input_", "alias_orig_list_pos"),
("_input_", "for_dump_gold_cand_K_idx_train"),
("entity_encoder", 0), # entity embeddings
],
scorer=Scorer(
customize_metric_funcs={
f"{CANDGEN_TASK}_scorer": sliced_scorer.bootleg_score
}
),
)
| bootleg-master | cand_gen/tasks/candgen_task.py |
import torch.nn.functional as F
from emmental.scorer import Scorer
from emmental.task import Action, EmmentalTask
from torch import nn
from transformers import AutoModel
from bootleg.layers.bert_encoder import Encoder
from cand_gen.task_config import CANDGEN_TASK
class ContextGenOutput:
"""Context gen for output."""
def __init__(self, normalize):
"""Context gen for output initializer."""
self.normalize = normalize
def entity_output_func(self, intermediate_output_dict):
"""Context output func."""
ctx_out = intermediate_output_dict["context_encoder"][0]
if self.normalize:
ctx_out = F.normalize(ctx_out, p=2, dim=-1)
return ctx_out
def create_task(args, len_context_tok):
"""Returns an EmmentalTask for a forward pass through the entity encoder only.
Args:
args: args
len_context_tok: number of tokens in the tokenizer
Returns: EmmentalTask for entity embedding extraction
"""
# Create sentence encoder
context_model = AutoModel.from_pretrained(
args.data_config.word_embedding.bert_model
)
context_model.encoder.layer = context_model.encoder.layer[
: args.data_config.word_embedding.context_layers
]
context_model.resize_token_embeddings(len_context_tok)
context_model = Encoder(context_model, args.model_config.hidden_size)
# Create module pool and combine with embedding module pool
module_pool = nn.ModuleDict(
{
"context_encoder": context_model,
}
)
# Create task flow
task_flow = [
Action(
name="context_encoder",
module="context_encoder",
inputs=[
("_input_", "input_ids"),
("_input_", "token_type_ids"),
("_input_", "attention_mask"),
],
),
]
return EmmentalTask(
name=CANDGEN_TASK,
module_pool=module_pool,
task_flow=task_flow,
loss_func=None,
output_func=ContextGenOutput(args.model_config.normalize).entity_output_func,
require_prob_for_eval=False,
require_pred_for_eval=True,
scorer=Scorer(),
)
| bootleg-master | cand_gen/tasks/context_gen_task.py |
import torch.nn.functional as F
from emmental.scorer import Scorer
from emmental.task import Action, EmmentalTask
from torch import nn
from transformers import AutoModel
from bootleg.layers.bert_encoder import Encoder
from cand_gen.task_config import CANDGEN_TASK
class EntityGenOutput:
"""Entity gen for output."""
def __init__(self, normalize):
"""Entity gen for output initializer."""
self.normalize = normalize
def entity_output_func(self, intermediate_output_dict):
"""Entity output func."""
ent_out = intermediate_output_dict["entity_encoder"][0]
if self.normalize:
ent_out = F.normalize(ent_out, p=2, dim=-1)
return ent_out
def create_task(args, len_context_tok):
"""Returns an EmmentalTask for a forward pass through the entity encoder only.
Args:
args: args
len_context_tok: number of tokens in the tokenizer
Returns: EmmentalTask for entity embedding extraction
"""
entity_model = AutoModel.from_pretrained(args.data_config.word_embedding.bert_model)
entity_model.encoder.layer = entity_model.encoder.layer[
: args.data_config.word_embedding.entity_layers
]
entity_model.resize_token_embeddings(len_context_tok)
entity_model = Encoder(entity_model, args.model_config.hidden_size)
# Create module pool and combine with embedding module pool
module_pool = nn.ModuleDict(
{
"entity_encoder": entity_model,
}
)
# Create task flow
task_flow = [
Action(
name="entity_encoder",
module="entity_encoder",
inputs=[
("_input_", "entity_input_ids"),
("_input_", "entity_attention_mask"),
("_input_", "entity_token_type_ids"),
],
),
]
return EmmentalTask(
name=CANDGEN_TASK,
module_pool=module_pool,
task_flow=task_flow,
loss_func=None,
output_func=EntityGenOutput(args.model_config.normalize).entity_output_func,
require_prob_for_eval=False,
require_pred_for_eval=True,
scorer=Scorer(),
)
| bootleg-master | cand_gen/tasks/entity_gen_task.py |
"""
Merge contextual candidates for NED.
This file
1. Reads in raw wikipedia sentences from /lfs/raiders7/0/lorr1/sentences
2. Reads in map of WPID-Title-QID from /lfs/raiders7/0/lorr1/title_to_all_ids.jsonl
3. Computes frequencies for alias-QID over Wikipedia. Keeps only alias-QID mentions which occur > args.min_frequency
4. Merges alias-QID map with alias-QID map extracted from Wikidata
2. Saves alias-qid map as alias_to_qid_filter.json to args.data_dir
After this, run remove_bad_aliases.py
Example run command:
python3.6 -m contextual_embeddings.bootleg_data_prep.curate_aliases
"""
import argparse
import glob
import multiprocessing
import os
import shutil
import time
import numpy as np
import ujson
import ujson as json
from rich.progress import track
from bootleg.symbols.entity_symbols import EntitySymbols
from bootleg.utils import utils
def get_arg_parser():
"""Get arg parser."""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--contextual_cand_data",
type=str,
default="/dfs/scratch0/lorr1/projects/bootleg-data/data/medmentions_0203/files",
help="Contextual candidate files",
)
parser.add_argument(
"--entity_dump",
type=str,
default="/dfs/scratch0/lorr1/projects/bootleg-data/data/medmentions_0203/entity_db",
help="Entity dump",
)
parser.add_argument(
"--data_dir",
type=str,
default="/dfs/scratch0/lorr1/projects/bootleg-data/data/medmentions_0203",
help="Data files",
)
parser.add_argument(
"--out_subdir",
type=str,
default="text",
help="Save data dir",
)
parser.add_argument("--no_add_gold_qid_to_train", action="store_true")
parser.add_argument("--max_candidates", type=int, default=int(30))
parser.add_argument("--processes", type=int, default=int(1))
return parser
def init_process(entity_dump_f):
"""Multiprocessing initializer."""
global ed_global
ed_global = EntitySymbols.load_from_cache(load_dir=entity_dump_f)
def merge_data(
num_processes,
no_add_gold_qid_to_train,
max_candidates,
file_pairs,
entity_dump_f,
):
"""Merge contextual cand data."""
# File pair is in file, cand map file, out file, is_train
# Chunk file for parallel writing
create_ex_indir = os.path.join(
os.path.dirname(file_pairs[0]), "_bootleg_temp_indir"
)
utils.ensure_dir(create_ex_indir)
create_ex_indir_cands = os.path.join(
os.path.dirname(file_pairs[0]), "_bootleg_temp_indir2"
)
utils.ensure_dir(create_ex_indir_cands)
create_ex_outdir = os.path.join(
os.path.dirname(file_pairs[0]), "_bootleg_temp_outdir"
)
utils.ensure_dir(create_ex_outdir)
print("Counting lines")
total_input = sum(1 for _ in open(file_pairs[0]))
total_input_cands = sum(1 for _ in open(file_pairs[1]))
assert (
total_input_cands == total_input
), f"{total_input} lines of orig data != {total_input_cands} of cand data"
chunk_input_size = int(np.ceil(total_input / num_processes))
total_input_from_chunks, input_files_dict = utils.chunk_file(
file_pairs[0], create_ex_indir, chunk_input_size
)
total_input_cands_from_chunks, input_files_cands_dict = utils.chunk_file(
file_pairs[1], create_ex_indir_cands, chunk_input_size
)
input_files = list(input_files_dict.keys())
input_cand_files = list(input_files_cands_dict.keys())
assert len(input_cand_files) == len(input_files)
input_file_lines = [input_files_dict[k] for k in input_files]
input_cand_file_lines = [input_files_cands_dict[k] for k in input_cand_files]
for p_l, p_r in zip(input_file_lines, input_cand_file_lines):
assert (
p_l == p_r
), f"The matching chunk files don't have matching sizes {p_l} versus {p_r}"
output_files = [
in_file_name.replace(create_ex_indir, create_ex_outdir)
for in_file_name in input_files
]
assert (
total_input == total_input_from_chunks
), f"Lengths of files {total_input} doesn't match {total_input_from_chunks}"
assert (
total_input_cands == total_input_cands_from_chunks
), f"Lengths of files {total_input_cands} doesn't match {total_input_cands_from_chunks}"
# file_pairs is input file, cand map file, output file, is_train
input_args = [
[
no_add_gold_qid_to_train,
max_candidates,
input_files[i],
input_file_lines[i],
input_cand_files[i],
output_files[i],
file_pairs[3],
]
for i in range(len(input_files))
]
pool = multiprocessing.Pool(
processes=num_processes, initializer=init_process, initargs=[entity_dump_f]
)
new_alias2qids = {}
total_seen = 0
total_dropped = 0
for res in pool.imap(merge_data_hlp, input_args, chunksize=1):
temp_alias2qids, seen, dropped = res
total_seen += seen
total_dropped += dropped
for k in temp_alias2qids:
assert k not in new_alias2qids, f"{k}"
new_alias2qids[k] = temp_alias2qids[k]
pool.close()
pool.join()
print(
f"Overall Recall for {file_pairs[0]}: {(total_seen - total_dropped) / total_seen} for seeing {total_seen}"
)
# Merge output files to final file
print("Merging output files")
with open(file_pairs[2], "wb") as outfile:
for filename in glob.glob(os.path.join(create_ex_outdir, "*")):
if filename == file_pairs[2]:
# don't want to copy the output into the output
continue
with open(filename, "rb") as readfile:
shutil.copyfileobj(readfile, outfile)
# Remove temporary files/folders
shutil.rmtree(create_ex_indir)
shutil.rmtree(create_ex_indir_cands)
shutil.rmtree(create_ex_outdir)
return new_alias2qids
def merge_data_hlp(args):
"""Merge data multiprocessing helper function."""
(
no_add_gold_qid_to_train,
max_candidates,
input_file,
total_input,
input_cand_file,
output_file,
is_train,
) = args
sent2cands = {}
sent2probs = {}
new_alias2qids = {}
with open(input_cand_file, "r") as f_in:
for line in track(f_in, total=total_input, description="Processing cand data"):
line = ujson.loads(line)
sent2probs[line["sent_idx_unq"]] = line["probs"]
sent2cands[line["sent_idx_unq"]] = line["cands"]
total_dropped = 0
total_seen = 0
total_len = 0
with open(input_file) as f_in, open(output_file, "w") as f_out:
tag = os.path.splitext(os.path.basename(input_file))[0]
for line in track(f_in, total=total_input, description="Processing data"):
line = ujson.loads(line)
sent_idx_unq = line["sent_idx_unq"]
if sent_idx_unq not in sent2cands:
assert (
len(line["aliases"]) == 0
), f"{sent_idx_unq} not in cand maps but there are aliases"
cands = sent2cands[sent_idx_unq]
probs = sent2probs[sent_idx_unq]
assert len(cands) == len(
line["aliases"]
), f"The length of aliases does not match cands in {sent_idx_unq}"
assert len(probs) == len(
line["aliases"]
), f"The length of aliases does not match probs in {sent_idx_unq}"
new_als, new_qids, new_spans, new_golds = [], [], [], []
new_slices = {}
j = 0
for i in range(len(line["aliases"])):
total_seen += 1
new_al = f"al_{sent_idx_unq}_{i}_{tag}"
new_cand_pairs = [
[c, p]
for c, p in zip(cands[i], probs[i])
if ed_global.qid_exists(c)
]
final_cand_pairs = new_cand_pairs[:max_candidates]
total_len += len(final_cand_pairs)
# We are training in candidates and gold is not in list, discard
if (
is_train
and not no_add_gold_qid_to_train
and line["qids"][i] not in [p[0] for p in final_cand_pairs]
):
final_cand_pairs[-1] = [line["qids"][i], final_cand_pairs[-1][1]]
new_alias2qids[new_al] = final_cand_pairs
new_als.append(new_al)
new_qids.append(line["qids"][i])
new_spans.append(line["spans"][i])
new_golds.append(line["gold"][i])
for slice_name in line.get("slices", {}):
if slice_name not in new_slices:
new_slices[slice_name] = {}
new_slices[slice_name][str(j)] = line["slices"][slice_name][str(i)]
j += 1
line["old_aliases"] = line["aliases"][:]
line["aliases"] = new_als
line["qids"] = new_qids
line["spans"] = new_spans
line["gold"] = new_golds
line["slices"] = new_slices
f_out.write(ujson.dumps(line) + "\n")
print(
f"Total Seen: {total_seen}, Total Dropped: {total_dropped}, "
f"Recall: {(total_seen - total_dropped) / total_seen}, "
f"Avg Cand Len: {total_len / (total_seen)} for {input_file}"
)
return new_alias2qids, total_seen, total_dropped
def main():
"""Run."""
gl_start = time.time()
multiprocessing.set_start_method("spawn")
args = get_arg_parser().parse_args()
print(json.dumps(vars(args), indent=4))
utils.ensure_dir(args.data_dir)
out_dir = os.path.join(args.data_dir, args.out_subdir)
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.makedirs(out_dir, exist_ok=True)
# Reading in files
in_files_train = glob.glob(os.path.join(args.data_dir, "*.jsonl"))
in_files_cand = glob.glob(os.path.join(args.contextual_cand_data, "*.jsonl"))
assert len(in_files_train) > 0, f"We didn't find any train files at {args.data_dir}"
assert (
len(in_files_cand) > 0
), f"We didn't find any contextual files at {args.contextual_cand_data}"
in_files = []
for file in in_files_train:
file_name = os.path.basename(file)
tag = os.path.splitext(file_name)[0]
is_train = "train" in tag
if is_train:
print(f"{file_name} is a training dataset...will be processed as such")
pair = None
for f in in_files_cand:
if tag in f:
pair = f
break
assert pair is not None, f"{file_name} name, {tag} tag"
out_file = os.path.join(out_dir, file_name)
in_files.append([file, pair, out_file, is_train])
final_cand_map = {}
max_cands = 0
for all_inputs in in_files:
print(
f"Reading in {all_inputs[0]} with cand maps {all_inputs[1]} and dumping to {all_inputs[2]}"
)
new_alias2qids = merge_data(
args.processes,
args.no_add_gold_qid_to_train,
args.max_candidates,
all_inputs,
os.path.join(args.entity_dump, "entity_mappings"),
)
for al in new_alias2qids:
assert al not in final_cand_map, f"{al} is already in final_cand_map"
final_cand_map[al] = new_alias2qids[al]
max_cands = max(max_cands, len(final_cand_map[al]))
print("Buidling new entity symbols")
entity_dump = EntitySymbols.load_from_cache(
load_dir=os.path.join(args.entity_dump, "entity_mappings")
)
entity_dump_new = EntitySymbols(
max_candidates=max_cands,
alias2qids=final_cand_map,
qid2title=entity_dump.get_qid2title_dict(),
)
# Copying over entity_db
shutil.copytree(
os.path.join(args.entity_dump),
os.path.join(out_dir, "entity_db"),
dirs_exist_ok=True,
)
# Copying over entity_db
shutil.copytree(
os.path.join(args.entity_dump),
os.path.join(out_dir, "entity_db"),
dirs_exist_ok=True,
)
out_dir = os.path.join(out_dir, "entity_db/entity_mappings")
entity_dump_new.save(out_dir)
print(f"Finished in {time.time() - gl_start}s")
if __name__ == "__main__":
main()
| bootleg-master | cand_gen/utils/merge_contextual_cands.py |
"""Parses a Booleg input config into a DottedDict of config values (with
defaults filled in) for running a model."""
import argparse
import os
from bootleg.utils.classes.dotted_dict import create_bool_dotted_dict
from bootleg.utils.parser.emm_parse_args import (
parse_args as emm_parse_args,
parse_args_to_config as emm_parse_args_to_config,
)
from bootleg.utils.parser.parser_utils import (
add_nested_flags_from_config,
flatten_nested_args_for_parser,
load_commented_json_file,
merge_configs,
reconstructed_nested_args,
recursive_keys,
)
from bootleg.utils.utils import load_yaml_file
from cand_gen.utils.parser.candgen_args import config_args
def get_boot_config(config, parser_hierarchy=None, parser=None, unknown=None):
"""
Returns a parsed Bootleg config from config. Config can be a path to a config file or an already loaded dictionary.
The high level work flow
1. Reads Bootleg default config (config_args) and addes params to a arg parser,
flattening all hierarchical values into "." values
E.g., data_config -> word_embeddings -> layers becomes --data_config.word_embedding.layers
2. Flattens the given config values into the "." format
3. Adds any unknown values from the first arg parser that parses the config script.
Allows the user to add --data_config.word_embedding.layers to command line that overwrite values in file
4. Parses the flattened args w.r.t the arg parser
5. Reconstruct the args back into their hierarchical form
Args:
config: model specific config
parser_hierarchy: Dict of hierarchy of config (or None)
parser: arg parser (or None)
unknown: unknown arg values passed from command line to be added to config and overwrite values in file
Returns: parsed config
"""
if unknown is None:
unknown = []
if parser_hierarchy is None:
parser_hierarchy = {}
if parser is None:
parser = argparse.ArgumentParser()
add_nested_flags_from_config(parser, config_args, parser_hierarchy, prefix="")
if type(config) is str:
assert os.path.splitext(config)[1] in [
".json",
".yaml",
], "We only accept json or yaml ending for configs"
if os.path.splitext(config)[1] == ".json":
params = load_commented_json_file(config)
else:
params = load_yaml_file(config)
else:
assert (
type(config) is dict
), "We only support loading configs that are paths to json/yaml files or preloaded configs."
params = config
all_keys = list(recursive_keys(parser_hierarchy))
new_params = flatten_nested_args_for_parser(params, [], groups=all_keys, prefix="")
# update with new args
# unknown must have ["--arg1", "value1", "--arg2", "value2"] as we don't have any action_true args
assert len(unknown) % 2 == 0
assert all(
unknown[idx].startswith(("-", "--")) for idx in range(0, len(unknown), 2)
)
for idx in range(1, len(unknown), 2):
# allow passing -1 for emmental.device argument
assert (not unknown[idx].startswith(("-", "--"))) or (
unknown[idx - 1] == "--emmental.device" and unknown[idx] == "-1"
)
for idx in range(0, len(unknown), 2):
arg = unknown[idx]
# If override one you already have in json
if arg in new_params:
idx2 = new_params.index(arg)
new_params[idx2 : idx2 + 2] = unknown[idx : idx + 2]
# If override one that is in bootleg_args.py by not in json
else:
new_params.extend(unknown[idx : idx + 2])
args = parser.parse_args(new_params)
top_names = {}
reconstructed_nested_args(args, top_names, parser_hierarchy, prefix="")
# final_args = argparse.Namespace(**top_names)
final_args = create_bool_dotted_dict(top_names)
# turn_to_dotdicts(final_args)
return final_args
def parse_boot_and_emm_args(config_script, unknown=None):
"""
Merges the Emmental config with the Bootleg config.
As we have an emmental: ... level in our config for emmental commands,
we need to parse those with the Emmental parser and then merge the Bootleg only config values
with the Emmental ones.
Args:
config_script: config script for Bootleg and Emmental args
unknown: unknown arg values passed from command line to overwrite file values
Returns: parsed merged Bootleg and Emmental config
"""
if unknown is None:
unknown = []
config_parser = argparse.ArgumentParser(
description="Bootleg Config",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Modified parse_args to have 'emmental.group' prefixes. This represents a hierarchy in our parser
config_parser, parser_hierarchy = emm_parse_args(parser=config_parser)
# Add Bootleg args and parse
all_args = get_boot_config(config_script, parser_hierarchy, config_parser, unknown)
# These have emmental -> config group -> arg structure for emmental.
# Must remove that hierarchy to converte to internal Emmental hierarchy
emm_args = {}
for k, v in all_args["emmental"].items():
emm_args[k] = v
del all_args["emmental"]
# create and add Emmental hierarchy
config = emm_parse_args_to_config(create_bool_dotted_dict(emm_args))
# Merge configs back (merge workds on dicts so must convert to dict first)
config = create_bool_dotted_dict(merge_configs(all_args, config))
return config
| bootleg-master | cand_gen/utils/parser/parser_utils.py |
"""Bootleg default configuration parameters.
In the json file, everything is a string or number. In this python file,
if the default is a boolean, it will be parsed as such. If the default
is a dictionary, True and False strings will become booleans. Otherwise
they will stay string.
"""
import multiprocessing
config_args = {
"run_config": {
"spawn_method": (
"forkserver",
"multiprocessing spawn method. forkserver will save memory but have slower startup costs.",
),
"eval_batch_size": (128, "batch size for eval"),
"dump_preds_accumulation_steps": (
10000,
"number of eval steps to accumulate the output tensors for before saving results to file",
),
"dataloader_threads": (16, "data loader threads to feed gpus"),
"log_level": ("info", "logging level"),
"dataset_threads": (
int(multiprocessing.cpu_count() * 0.9),
"data set threads for prepping data",
),
},
# Parameters for hyperparameter tuning
"train_config": {
"batch_size": (32, "batch size"),
},
"model_config": {
"hidden_size": (300, "hidden dimension for the embeddings before scoring"),
"normalize": (False, "normalize embeddings before dot product"),
"temperature": (1.0, "temperature for softmax in loss"),
},
"data_config": {
"eval_slices": ([], "slices for evaluation"),
"train_in_candidates": (
True,
"Train in candidates (if False, this means we include NIL entity)",
),
"data_dir": ("data", "where training, testing, and dev data is stored"),
"data_prep_dir": (
"prep",
"directory where data prep files are saved inside data_dir",
),
"entity_dir": (
"entity_data",
"where entity profile information and prepped embedding data is stored",
),
"entity_prep_dir": (
"prep",
"directory where prepped embedding data is saved inside entity_dir",
),
"entity_map_dir": (
"entity_mappings",
"directory where entity json mappings are saved inside entity_dir",
),
"alias_cand_map": (
"alias2qids",
"name of alias candidate map file, should be saved in entity_dir/entity_map_dir",
),
"alias_idx_map": (
"alias2id",
"name of alias index map file, should be saved in entity_dir/entity_map_dir",
),
"qid_cnt_map": (
"qid2cnt.json",
"name of alias index map file, should be saved in data_dir",
),
"max_seq_len": (128, "max token length sentences"),
"max_seq_window_len": (64, "max window around an entity"),
"max_ent_len": (128, "max token length for entire encoded entity"),
"max_ent_aka_len": (20, "max token length for alternate names"),
"overwrite_preprocessed_data": (False, "overwrite preprocessed data"),
"print_examples_prep": (True, "whether to print examples during prep or not"),
"use_entity_desc": (True, "whether to use entity descriptions or not"),
"use_entity_akas": (
True,
"whether to use entity alternates names from the candidates or not",
),
"train_dataset": {
"file": ("train.jsonl", ""),
"use_weak_label": (True, "Use weakly labeled mentions"),
},
"dev_dataset": {
"file": ("dev.jsonl", ""),
"use_weak_label": (True, "Use weakly labeled mentions"),
},
"test_dataset": {
"file": ("test.jsonl", ""),
"use_weak_label": (True, "Use weakly labeled mentions"),
},
"word_embedding": {
"bert_model": ("bert-base-uncased", ""),
"context_layers": (1, ""),
"entity_layers": (1, ""),
"cache_dir": (
"pretrained_bert_models",
"Directory where word embeddings are cached",
),
},
},
}
| bootleg-master | cand_gen/utils/parser/candgen_args.py |
"""Test entity."""
import os
import shutil
import unittest
from pathlib import Path
import torch
from bootleg.layers.alias_to_ent_encoder import AliasEntityTable
from bootleg.symbols.entity_symbols import EntitySymbols
from bootleg.symbols.kg_symbols import KGSymbols
from bootleg.symbols.type_symbols import TypeSymbols
from bootleg.utils import utils
from bootleg.utils.classes.dotted_dict import DottedDict
class TypeSymbolsTest(unittest.TestCase):
"""Test type symbols."""
def setUp(self) -> None:
"""Set up."""
self.save_dir = Path("tests/data/entity_loader/entity_db_save")
self.save_dir.mkdir(exist_ok=True, parents=True)
def tearDown(self) -> None:
"""Tear down."""
if os.path.exists(self.save_dir):
shutil.rmtree(self.save_dir)
def test_type_init(self):
"""Test type init."""
qid2typenames = {
"Q123": ["animal"],
"Q345": ["dog"],
"Q567": ["animal", "animall", "drop"],
"Q789": [],
}
max_types = 2
type_symbols = TypeSymbols(qid2typenames, max_types=max_types)
gold_qid2typenames = {
"Q123": ["animal"],
"Q345": ["dog"],
"Q567": ["animal", "animall"],
"Q789": [],
}
gold_type_vocab = {"animal", "animall", "dog", "drop"}
self.assertDictEqual(gold_qid2typenames, type_symbols.get_qid2typename_dict())
self.assertIsNone(type_symbols._typename2qids)
self.assertSetEqual(gold_type_vocab, type_symbols.get_all_types())
max_types = 4
type_symbols = TypeSymbols(qid2typenames, max_types=max_types, edit_mode=True)
gold_qid2typenames = {
"Q123": ["animal"],
"Q345": ["dog"],
"Q567": ["animal", "animall", "drop"],
"Q789": [],
}
gold_typename2qids = {
"animal": {"Q123", "Q567"},
"animall": {"Q567"},
"dog": {"Q345"},
"drop": {"Q567"},
}
self.assertDictEqual(gold_qid2typenames, type_symbols.get_qid2typename_dict())
self.assertDictEqual(gold_typename2qids, type_symbols._typename2qids)
def test_type_load_and_save(self):
"""Test type load and save."""
qid2typenames = {
"Q123": ["animal"],
"Q345": ["dog"],
"Q567": ["animal", "animall", "drop"],
"Q789": [],
}
max_types = 2
type_symbols = TypeSymbols(qid2typenames, max_types=max_types)
type_symbols.save(self.save_dir, prefix="test")
type_symbols_2 = TypeSymbols.load_from_cache(self.save_dir, prefix="test")
self.assertEqual(type_symbols_2.max_types, type_symbols.max_types)
self.assertDictEqual(
type_symbols_2.get_qid2typename_dict(), type_symbols.get_qid2typename_dict()
)
self.assertIsNone(type_symbols._typename2qids)
self.assertIsNone(type_symbols_2._typename2qids)
self.assertSetEqual(
type_symbols_2.get_all_types(), type_symbols.get_all_types()
)
def test_type_add_remove_typemap(self):
"""Test type add remove typemap."""
qid2typenames = {
"Q123": ["animal"],
"Q345": ["dog"],
"Q567": ["animal", "animall", "drop"],
"Q789": [],
}
max_types = 3
type_symbols = TypeSymbols(qid2typenames, max_types=max_types, edit_mode=False)
# Check if fails with edit_mode = False
with self.assertRaises(AttributeError) as context:
type_symbols.add_type("Q789", "animal")
assert type(context.exception) is AttributeError
type_symbols = TypeSymbols(qid2typenames, max_types=max_types, edit_mode=True)
# Add new type
type_symbols.add_type("Q789", "annnimal")
gold_qid2typenames = {
"Q123": ["animal"],
"Q345": ["dog"],
"Q567": ["animal", "animall", "drop"],
"Q789": ["annnimal"],
}
gold_typename2qids = {
"animal": {"Q123", "Q567"},
"animall": {"Q567"},
"annnimal": {"Q789"},
"dog": {"Q345"},
"drop": {"Q567"},
}
self.assertDictEqual(gold_qid2typenames, type_symbols.get_qid2typename_dict())
self.assertDictEqual(gold_typename2qids, type_symbols._typename2qids)
# Check that nothing happens with relation pair that doesn't exist and the operation goes through
type_symbols.remove_type("Q345", "animal")
self.assertDictEqual(gold_qid2typenames, type_symbols.get_qid2typename_dict())
self.assertDictEqual(gold_typename2qids, type_symbols._typename2qids)
# Now actually remove something
type_symbols.remove_type("Q789", "annnimal")
gold_qid2typenames = {
"Q123": ["animal"],
"Q345": ["dog"],
"Q567": ["animal", "animall", "drop"],
"Q789": [],
}
gold_typename2qids = {
"animal": {"Q123", "Q567"},
"animall": {"Q567"},
"annnimal": set(),
"dog": {"Q345"},
"drop": {"Q567"},
}
self.assertDictEqual(gold_qid2typenames, type_symbols.get_qid2typename_dict())
self.assertDictEqual(gold_typename2qids, type_symbols._typename2qids)
# Add to a full QID where we must replace. We do not bring back the old type if we remove the replace one.
type_symbols.add_type("Q567", "dog")
gold_qid2typenames = {
"Q123": ["animal"],
"Q345": ["dog"],
"Q567": ["animal", "animall", "dog"],
"Q789": [],
}
gold_typename2qids = {
"animal": {"Q123", "Q567"},
"animall": {"Q567"},
"annnimal": set(),
"dog": {"Q345", "Q567"},
"drop": set(),
}
self.assertDictEqual(gold_qid2typenames, type_symbols.get_qid2typename_dict())
self.assertDictEqual(gold_typename2qids, type_symbols._typename2qids)
type_symbols.remove_type("Q567", "dog")
gold_qid2typenames = {
"Q123": ["animal"],
"Q345": ["dog"],
"Q567": ["animal", "animall"],
"Q789": [],
}
gold_typename2qids = {
"animal": {"Q123", "Q567"},
"animall": {"Q567"},
"annnimal": set(),
"dog": {"Q345"},
"drop": set(),
}
self.assertDictEqual(gold_qid2typenames, type_symbols.get_qid2typename_dict())
self.assertDictEqual(gold_typename2qids, type_symbols._typename2qids)
def test_add_entity(self):
"""Test add entity."""
qid2typenames = {
"Q123": ["animal"],
"Q345": ["dog"],
"Q567": ["animal", "animall", "drop"],
"Q789": [],
}
max_types = 3
type_symbols = TypeSymbols(qid2typenames, max_types=max_types, edit_mode=True)
# Add to a previously empty QID
type_symbols.add_entity("Q910", ["annnimal", "animal", "dog", "drop"])
gold_qid2typenames = {
"Q123": ["animal"],
"Q345": ["dog"],
"Q567": ["animal", "animall", "drop"],
"Q789": [],
"Q910": ["annnimal", "animal", "dog"], # Max types limits new types added
}
gold_typename2qids = {
"animal": {"Q123", "Q567", "Q910"},
"animall": {"Q567"},
"annnimal": {"Q910"},
"dog": {"Q345", "Q910"},
"drop": {"Q567"},
}
self.assertDictEqual(gold_qid2typenames, type_symbols.get_qid2typename_dict())
self.assertDictEqual(gold_typename2qids, type_symbols._typename2qids)
def test_reidentify_entity(self):
"""Test reidentiy entity."""
qid2typenames = {
"Q123": ["animal"],
"Q345": ["dog"],
"Q567": ["animal", "animall", "drop"],
"Q789": [],
}
max_types = 3
type_symbols = TypeSymbols(qid2typenames, max_types=max_types, edit_mode=True)
type_symbols.reidentify_entity("Q567", "Q911")
gold_qid2typenames = {
"Q123": ["animal"],
"Q345": ["dog"],
"Q911": ["animal", "animall", "drop"],
"Q789": [],
}
gold_typename2qids = {
"animal": {"Q123", "Q911"},
"animall": {"Q911"},
"dog": {"Q345"},
"drop": {"Q911"},
}
self.assertDictEqual(gold_qid2typenames, type_symbols.get_qid2typename_dict())
self.assertDictEqual(gold_typename2qids, type_symbols._typename2qids)
def test_prune_to_entities(self):
"""Test prune to entities."""
qid2typenames = {
"Q123": ["animal"],
"Q345": ["dog"],
"Q567": ["animal", "animall", "drop"],
"Q789": [],
}
max_types = 3
type_symbols = TypeSymbols(qid2typenames, max_types=max_types, edit_mode=True)
type_symbols.prune_to_entities({"Q123", "Q345"})
gold_qid2typenames = {
"Q123": ["animal"],
"Q345": ["dog"],
}
gold_typename2qids = {
"animal": {"Q123"},
"animall": set(),
"dog": {"Q345"},
"drop": set(),
}
self.assertDictEqual(gold_qid2typenames, type_symbols.get_qid2typename_dict())
self.assertDictEqual(gold_typename2qids, type_symbols._typename2qids)
class KGSymbolsTest(unittest.TestCase):
"""Kg symbols test."""
def setUp(self) -> None:
"""Set up."""
self.save_dir = Path("tests/data/entity_loader/entity_db_save")
self.save_dir.mkdir(exist_ok=True, parents=True)
def tearDown(self) -> None:
"""Tear down."""
if os.path.exists(self.save_dir):
shutil.rmtree(self.save_dir)
def test_kg_init(self):
"""Test kg init."""
qid2relations = {
"Q123": {"sibling": ["Q345", "Q567"]},
"Q345": {"sibling": ["Q123"]},
"Q567": {"sibling": ["Q123"]},
"Q789": {},
}
max_connections = 1
kg_symbols = KGSymbols(qid2relations, max_connections=max_connections)
gold_qid2relations = {
"Q123": {"sibling": ["Q345"]},
"Q345": {"sibling": ["Q123"]},
"Q567": {"sibling": ["Q123"]},
"Q789": {},
}
gold_allrelations = {"sibling"}
self.assertDictEqual(gold_qid2relations, kg_symbols.get_qid2relations_dict())
self.assertIsNone(kg_symbols._obj2head)
self.assertSetEqual(gold_allrelations, kg_symbols.get_all_relations())
def test_kg_load_and_save(self):
"""Test kg load and save."""
qid2relations = {
"Q123": {"sibling": ["Q345", "Q567"]},
"Q345": {"sibling": ["Q123"]},
"Q567": {"sibling": ["Q123"]},
"Q789": {},
}
max_connections = 1
kg_symbols = KGSymbols(qid2relations, max_connections=max_connections)
kg_symbols.save(self.save_dir, prefix="test")
kg_symbols_2 = KGSymbols.load_from_cache(self.save_dir, prefix="test")
self.assertEqual(kg_symbols_2.max_connections, kg_symbols.max_connections)
self.assertDictEqual(
kg_symbols_2.get_qid2relations_dict(), kg_symbols.get_qid2relations_dict()
)
self.assertIsNone(kg_symbols_2._obj2head)
max_connections = 2
kg_symbols = KGSymbols(qid2relations, max_connections=max_connections)
kg_symbols.save(self.save_dir, prefix="test")
kg_symbols_2 = KGSymbols.load_from_cache(self.save_dir, prefix="test")
self.assertEqual(kg_symbols_2.max_connections, kg_symbols.max_connections)
self.assertDictEqual(
kg_symbols_2.get_qid2relations_dict(), kg_symbols.get_qid2relations_dict()
)
self.assertDictEqual(qid2relations, kg_symbols.get_qid2relations_dict())
self.assertIsNone(kg_symbols_2._obj2head)
def test_relation_add_remove_kgmapping(self):
"""Test relation add remoce kg mapping."""
qid2relations = {
"Q123": {"sibling": ["Q345", "Q567"]},
"Q345": {"sibling": ["Q123"]},
"Q567": {"sibling": ["Q123"]},
"Q789": {},
}
max_connections = 2
kg_symbols = KGSymbols(qid2relations, max_connections=max_connections)
# Check if fails with edit_mode = False
with self.assertRaises(AttributeError) as context:
kg_symbols.add_relation("Q789", "sibling", "Q123")
assert type(context.exception) is AttributeError
kg_symbols = KGSymbols(
qid2relations, max_connections=max_connections, edit_mode=True
)
kg_symbols.add_relation("Q789", "sibling", "Q123")
gold_qid2relations = {
"Q123": {"sibling": ["Q345", "Q567"]},
"Q345": {"sibling": ["Q123"]},
"Q567": {"sibling": ["Q123"]},
"Q789": {"sibling": ["Q123"]},
}
gold_obj2head = {
"Q123": {"Q789", "Q567", "Q345"},
"Q345": {"Q123"},
"Q567": {"Q123"},
}
gold_allrelationes = {"sibling"}
self.assertDictEqual(gold_qid2relations, kg_symbols.get_qid2relations_dict())
self.assertDictEqual(gold_obj2head, kg_symbols._obj2head)
self.assertSetEqual(gold_allrelationes, kg_symbols.get_all_relations())
kg_symbols.add_relation("Q123", "sibling", "Q789")
gold_qid2relations = {
"Q123": {"sibling": ["Q345", "Q789"]},
"Q345": {"sibling": ["Q123"]},
"Q567": {"sibling": ["Q123"]},
"Q789": {"sibling": ["Q123"]},
}
gold_obj2head = {
"Q123": {"Q789", "Q567", "Q345"},
"Q345": {"Q123"},
"Q789": {"Q123"},
}
gold_allrelationes = {"sibling"}
self.assertDictEqual(gold_qid2relations, kg_symbols.get_qid2relations_dict())
self.assertDictEqual(gold_obj2head, kg_symbols._obj2head)
self.assertSetEqual(gold_allrelationes, kg_symbols.get_all_relations())
kg_symbols.remove_relation("Q123", "sibling", "Q789")
gold_qid2relations = {
"Q123": {"sibling": ["Q345"]},
"Q345": {"sibling": ["Q123"]},
"Q567": {"sibling": ["Q123"]},
"Q789": {"sibling": ["Q123"]},
}
gold_obj2head = {"Q123": {"Q789", "Q567", "Q345"}, "Q345": {"Q123"}}
gold_allrelationes = {"sibling"}
self.assertDictEqual(gold_qid2relations, kg_symbols.get_qid2relations_dict())
self.assertDictEqual(gold_obj2head, kg_symbols._obj2head)
self.assertSetEqual(gold_allrelationes, kg_symbols.get_all_relations())
# Check nothing changes with bad remove that doesn't exist
kg_symbols.remove_relation("Q789", "siblinggg", "Q123")
self.assertDictEqual(gold_qid2relations, kg_symbols.get_qid2relations_dict())
self.assertDictEqual(gold_obj2head, kg_symbols._obj2head)
self.assertSetEqual(gold_allrelationes, kg_symbols.get_all_relations())
# Check the new relation is added
kg_symbols.add_relation("Q789", "siblinggg", "Q567")
gold_qid2relations = {
"Q123": {"sibling": ["Q345"]},
"Q345": {"sibling": ["Q123"]},
"Q567": {"sibling": ["Q123"]},
"Q789": {"sibling": ["Q123"], "siblinggg": ["Q567"]},
}
gold_obj2head = {
"Q123": {"Q789", "Q567", "Q345"},
"Q345": {"Q123"},
"Q567": {"Q789"},
}
gold_allrelationes = {"sibling", "siblinggg"}
self.assertDictEqual(gold_qid2relations, kg_symbols.get_qid2relations_dict())
self.assertDictEqual(gold_obj2head, kg_symbols._obj2head)
self.assertSetEqual(gold_allrelationes, kg_symbols.get_all_relations())
# Check nothing changes with relation pair that doesn't exist
kg_symbols.remove_relation("Q567", "siblinggg", "Q789")
self.assertDictEqual(gold_qid2relations, kg_symbols.get_qid2relations_dict())
self.assertDictEqual(gold_obj2head, kg_symbols._obj2head)
self.assertSetEqual(gold_allrelationes, kg_symbols.get_all_relations())
kg_symbols.remove_relation("Q789", "sibling", "Q123")
gold_qid2relations = {
"Q123": {"sibling": ["Q345"]},
"Q345": {"sibling": ["Q123"]},
"Q567": {"sibling": ["Q123"]},
"Q789": {"siblinggg": ["Q567"]},
}
gold_obj2head = {"Q123": {"Q567", "Q345"}, "Q345": {"Q123"}, "Q567": {"Q789"}}
gold_allrelationes = {"sibling", "siblinggg"}
self.assertDictEqual(gold_qid2relations, kg_symbols.get_qid2relations_dict())
self.assertDictEqual(gold_obj2head, kg_symbols._obj2head)
self.assertSetEqual(gold_allrelationes, kg_symbols.get_all_relations())
def test_add_entity(self):
"""Test add entity."""
qid2relations = {
"Q123": {"sibling": ["Q345", "Q567"]},
"Q345": {"sibling": ["Q123"]},
"Q567": {"sibling": ["Q123"]},
"Q789": {},
}
max_connections = 2
kg_symbols = KGSymbols(
qid2relations, max_connections=max_connections, edit_mode=True
)
kg_symbols.add_entity("Q910", {"siblinggg": ["Q567", "Q123", "Q345"]})
gold_qid2relations = {
"Q123": {"sibling": ["Q345", "Q567"]},
"Q345": {"sibling": ["Q123"]},
"Q567": {"sibling": ["Q123"]},
"Q789": {},
"Q910": {"siblinggg": ["Q567", "Q123"]}, # Max connections limits to 2
}
gold_obj2head = {
"Q123": {"Q910", "Q567", "Q345"},
"Q345": {"Q123"},
"Q567": {"Q123", "Q910"},
}
gold_allrelationes = {"sibling", "siblinggg"}
self.assertDictEqual(gold_qid2relations, kg_symbols.get_qid2relations_dict())
self.assertDictEqual(gold_obj2head, kg_symbols._obj2head)
self.assertSetEqual(gold_allrelationes, kg_symbols.get_all_relations())
# Add kg
# Check can't add new entity
with self.assertRaises(ValueError) as context:
kg_symbols.add_entity("Q910", {"sibling": ["Q567", "Q123", "Q345"]})
assert type(context.exception) is ValueError
kg_symbols.add_entity("Q911", {"sibling": ["Q567", "Q123", "Q345"]})
gold_qid2relations = {
"Q123": {"sibling": ["Q345", "Q567"]},
"Q345": {"sibling": ["Q123"]},
"Q567": {"sibling": ["Q123"]},
"Q789": {},
"Q910": {"siblinggg": ["Q567", "Q123"]}, # Max connections limits to 2
"Q911": {"sibling": ["Q567", "Q123"]}, # Max connections limits to 2
}
gold_obj2head = {
"Q123": {"Q910", "Q567", "Q345", "Q911"},
"Q345": {"Q123"},
"Q567": {"Q123", "Q910", "Q911"},
}
gold_allrelationes = {"sibling", "siblinggg"}
self.assertDictEqual(gold_qid2relations, kg_symbols.get_qid2relations_dict())
self.assertDictEqual(gold_obj2head, kg_symbols._obj2head)
self.assertSetEqual(gold_allrelationes, kg_symbols.get_all_relations())
def test_reidentify_entities(self):
"""Test reidentify entities."""
qid2relations = {
"Q123": {"sibling": ["Q345", "Q567"], "sib": ["Q567"]},
"Q345": {"sibling": ["Q123"]},
"Q567": {"sibling": ["Q123"], "sib": ["Q123", "Q567"]},
"Q789": {},
}
max_connections = 2
kg_symbols = KGSymbols(
qid2relations, max_connections=max_connections, edit_mode=True
)
kg_symbols.reidentify_entity("Q567", "Q911")
gold_qid2relations = {
"Q123": {"sibling": ["Q345", "Q911"], "sib": ["Q911"]},
"Q345": {"sibling": ["Q123"]},
"Q911": {"sibling": ["Q123"], "sib": ["Q123", "Q911"]},
"Q789": {},
}
gold_obj2head = {
"Q123": {"Q911", "Q345"},
"Q345": {"Q123"},
"Q911": {"Q123", "Q911"},
}
gold_allrelationes = {"sibling", "sib"}
self.assertDictEqual(gold_qid2relations, kg_symbols.get_qid2relations_dict())
self.assertDictEqual(gold_obj2head, kg_symbols._obj2head)
self.assertSetEqual(gold_allrelationes, kg_symbols.get_all_relations())
with self.assertRaises(ValueError) as context:
kg_symbols.reidentify_entity("Q912", "Q913")
assert type(context.exception) is ValueError
kg_symbols.reidentify_entity("Q789", "Q912")
gold_qid2relations = {
"Q123": {"sibling": ["Q345", "Q911"], "sib": ["Q911"]},
"Q345": {"sibling": ["Q123"]},
"Q911": {"sibling": ["Q123"], "sib": ["Q123", "Q911"]},
"Q912": {},
}
gold_obj2head = {
"Q123": {"Q911", "Q345"},
"Q345": {"Q123"},
"Q911": {"Q123", "Q911"},
}
gold_allrelationes = {"sibling", "sib"}
self.assertDictEqual(gold_qid2relations, kg_symbols.get_qid2relations_dict())
self.assertDictEqual(gold_obj2head, kg_symbols._obj2head)
self.assertSetEqual(gold_allrelationes, kg_symbols.get_all_relations())
def test_prune_to_entities(self):
"""Test prune to entities."""
qid2relations = {
"Q123": {"sibling": ["Q345", "Q567"]},
"Q345": {"sibling": ["Q567"]},
"Q567": {"sibling": ["Q123"]},
"Q789": {},
}
max_connections = 2
kg_symbols = KGSymbols(
qid2relations, max_connections=max_connections, edit_mode=True
)
kg_symbols.prune_to_entities({"Q345", "Q567"})
gold_qid2relations = {
"Q345": {"sibling": ["Q567"]},
"Q567": {},
}
gold_obj2head = {"Q567": {"Q345"}}
gold_allrelationes = {"sibling"}
self.assertDictEqual(gold_qid2relations, kg_symbols.get_qid2relations_dict())
self.assertDictEqual(gold_obj2head, kg_symbols._obj2head)
self.assertSetEqual(gold_allrelationes, kg_symbols.get_all_relations())
class EntitySymbolTest(unittest.TestCase):
"""Entity symbol test class."""
def test_create_entities(self):
"""Test create entities."""
truealias2qids = {
"alias1": [["Q1", 10.0], ["Q4", 6]],
"multi word alias2": [["Q2", 5.0], ["Q1", 3], ["Q4", 2]],
"alias3": [["Q1", 30.0]],
"alias4": [["Q4", 20], ["Q3", 15.0], ["Q2", 1]],
}
trueqid2title = {
"Q1": "alias1",
"Q2": "multi alias2",
"Q3": "word alias3",
"Q4": "nonalias4",
}
trueqid2desc = {
"Q1": "d alias1",
"Q2": "d multi alias2",
"Q3": "d word alias3",
"Q4": "d nonalias4",
}
# the non-candidate class is included in entity_dump
trueqid2eid = {"Q1": 1, "Q2": 2, "Q3": 3, "Q4": 4}
truealias2id = {"alias1": 0, "alias3": 2, "alias4": 3, "multi word alias2": 1}
entity_symbols = EntitySymbols(
max_candidates=3,
alias2qids=truealias2qids,
qid2title=trueqid2title,
qid2desc=trueqid2desc,
)
self.assertEqual(entity_symbols.max_candidates, 3)
self.assertEqual(entity_symbols.max_eid, 4)
self.assertEqual(entity_symbols.max_alid, 3)
self.assertDictEqual(entity_symbols.get_alias2qids_dict(), truealias2qids)
self.assertDictEqual(entity_symbols.get_qid2title_dict(), trueqid2title)
self.assertDictEqual(entity_symbols._qid2desc, trueqid2desc)
self.assertDictEqual(entity_symbols.get_qid2eid_dict(), trueqid2eid)
self.assertDictEqual(entity_symbols._alias2id.to_dict(), truealias2id)
self.assertIsNone(entity_symbols._qid2aliases)
# Test load from dump
temp_save_dir = "tests/data/entity_loader_test"
entity_symbols.save(temp_save_dir)
entity_symbols = EntitySymbols.load_from_cache(temp_save_dir)
self.assertEqual(entity_symbols.max_candidates, 3)
self.assertEqual(entity_symbols.max_eid, 4)
self.assertEqual(entity_symbols.max_alid, 3)
self.assertDictEqual(entity_symbols.get_alias2qids_dict(), truealias2qids)
self.assertDictEqual(entity_symbols.get_qid2title_dict(), trueqid2title)
self.assertDictEqual(entity_symbols._qid2desc, trueqid2desc)
self.assertDictEqual(entity_symbols.get_qid2eid_dict(), trueqid2eid)
self.assertDictEqual(entity_symbols._alias2id.to_dict(), truealias2id)
self.assertIsNone(entity_symbols._qid2aliases)
shutil.rmtree(temp_save_dir)
# Test edit mode
entity_symbols = EntitySymbols(
max_candidates=3,
alias2qids=truealias2qids,
qid2title=trueqid2title,
qid2desc=trueqid2desc,
edit_mode=True,
)
trueqid2aliases = {
"Q1": {"alias1", "multi word alias2", "alias3"},
"Q2": {"multi word alias2", "alias4"},
"Q3": {"alias4"},
"Q4": {"alias1", "multi word alias2", "alias4"},
}
self.assertDictEqual(entity_symbols._qid2aliases, trueqid2aliases)
def test_getters(self):
"""Test getters."""
truealias2qids = {
"alias1": [["Q1", 10.0], ["Q4", 6]],
"multi word alias2": [["Q2", 5.0], ["Q1", 3], ["Q4", 2]],
"alias3": [["Q1", 30.0]],
"alias4": [["Q4", 20], ["Q3", 15.0], ["Q2", 1]],
}
trueqid2title = {
"Q1": "alias1",
"Q2": "multi alias2",
"Q3": "word alias3",
"Q4": "nonalias4",
}
trueqid2desc = {
"Q1": "d alias1",
"Q2": "d multi alias2",
"Q3": "d word alias3",
"Q4": "d nonalias4",
}
entity_symbols = EntitySymbols(
max_candidates=3,
alias2qids=truealias2qids,
qid2title=trueqid2title,
qid2desc=trueqid2desc,
)
self.assertEqual(entity_symbols.get_qid(1), "Q1")
self.assertSetEqual(
set(entity_symbols.get_all_aliases()),
{"alias1", "multi word alias2", "alias3", "alias4"},
)
self.assertEqual(entity_symbols.get_eid("Q3"), 3)
self.assertListEqual(entity_symbols.get_qid_cands("alias1"), ["Q1", "Q4"])
self.assertListEqual(
entity_symbols.get_qid_cands("alias1", max_cand_pad=True),
["Q1", "Q4", "-1"],
)
self.assertListEqual(
entity_symbols.get_eid_cands("alias1", max_cand_pad=True), [1, 4, -1]
)
self.assertEqual(entity_symbols.get_title("Q1"), "alias1")
self.assertEqual(entity_symbols.get_desc("Q1"), "d alias1")
self.assertEqual(entity_symbols.get_alias_idx("alias1"), 0)
self.assertEqual(entity_symbols.get_alias_from_idx(2), "alias3")
self.assertEqual(entity_symbols.alias_exists("alias3"), True)
self.assertEqual(entity_symbols.alias_exists("alias5"), False)
self.assertDictEqual(
entity_symbols.get_all_alias_vocabtrie().to_dict(),
{"alias1": 0, "alias3": 2, "alias4": 3, "multi word alias2": 1},
)
def test_add_remove_mention(self):
"""Test add remove mention."""
alias2qids = {
"alias1": [["Q1", 10.0], ["Q4", 6]],
"multi word alias2": [["Q2", 5.0], ["Q1", 3], ["Q4", 2], ["Q3", 1]],
"alias3": [["Q1", 30.0]],
"alias4": [["Q4", 20], ["Q3", 15.0], ["Q2", 1]],
}
qid2title = {
"Q1": "alias1",
"Q2": "multi alias2",
"Q3": "word alias3",
"Q4": "nonalias4",
}
qid2desc = {
"Q1": "d alias1",
"Q2": "d multi alias2",
"Q3": "d word alias3",
"Q4": "d nonalias4",
}
max_candidates = 3
# the non-candidate class is included in entity_dump
trueqid2eid = {"Q1": 1, "Q2": 2, "Q3": 3, "Q4": 4}
truealias2id = {"alias1": 0, "alias3": 2, "alias4": 3, "multi word alias2": 1}
truealias2qids = {
"alias1": [["Q1", 10.0], ["Q4", 6]],
"multi word alias2": [["Q2", 5.0], ["Q1", 3], ["Q4", 2]],
"alias3": [["Q1", 30.0]],
"alias4": [["Q4", 20], ["Q3", 15.0], ["Q2", 1]],
}
entity_symbols = EntitySymbols(
max_candidates=max_candidates,
alias2qids=alias2qids,
qid2title=qid2title,
qid2desc=qid2desc,
)
self.assertEqual(entity_symbols.max_candidates, 3)
self.assertEqual(entity_symbols.max_eid, 4)
self.assertEqual(entity_symbols.max_alid, 3)
self.assertDictEqual(entity_symbols.get_alias2qids_dict(), truealias2qids)
self.assertDictEqual(entity_symbols.get_qid2title_dict(), qid2title)
self.assertDictEqual(entity_symbols._qid2desc, qid2desc)
self.assertDictEqual(entity_symbols.get_qid2eid_dict(), trueqid2eid)
self.assertDictEqual(entity_symbols._alias2id.to_dict(), truealias2id)
self.assertIsNone(entity_symbols._qid2aliases)
# Check if fails with edit_mode = False
with self.assertRaises(AttributeError) as context:
entity_symbols.add_mention("Q2", "alias3", 31.0)
assert type(context.exception) is AttributeError
entity_symbols = EntitySymbols(
max_candidates=max_candidates,
alias2qids=alias2qids,
qid2title=qid2title,
qid2desc=qid2desc,
edit_mode=True,
)
# Check nothing changes if pair doesn't exist
entity_symbols.remove_mention("Q3", "alias1")
trueqid2aliases = {
"Q1": {"alias1", "multi word alias2", "alias3"},
"Q2": {"multi word alias2", "alias4"},
"Q3": {"alias4"},
"Q4": {"alias1", "multi word alias2", "alias4"},
}
self.assertEqual(entity_symbols.max_candidates, 3)
self.assertEqual(entity_symbols.max_eid, 4)
self.assertEqual(entity_symbols.max_alid, 3)
self.assertDictEqual(entity_symbols._qid2title, qid2title)
self.assertDictEqual(entity_symbols._qid2desc, qid2desc)
self.assertDictEqual(entity_symbols._qid2eid, trueqid2eid)
self.assertDictEqual(entity_symbols._qid2aliases, trueqid2aliases)
self.assertDictEqual(entity_symbols._alias2qids, truealias2qids)
self.assertDictEqual(entity_symbols._alias2id, truealias2id)
# ADD Q2 ALIAS 3
entity_symbols.add_mention("Q2", "alias3", 31.0)
trueqid2aliases = {
"Q1": {"alias1", "multi word alias2", "alias3"},
"Q2": {"multi word alias2", "alias4", "alias3"},
"Q3": {"alias4"},
"Q4": {"alias1", "multi word alias2", "alias4"},
}
truealias2qids = {
"alias1": [["Q1", 10.0], ["Q4", 6]],
"multi word alias2": [["Q2", 5.0], ["Q1", 3], ["Q4", 2]],
"alias3": [["Q2", 31.0], ["Q1", 30.0]],
"alias4": [["Q4", 20], ["Q3", 15.0], ["Q2", 1]],
}
truealias2id = {"alias1": 0, "alias3": 2, "alias4": 3, "multi word alias2": 1}
trueid2alias = {0: "alias1", 2: "alias3", 3: "alias4", 1: "multi word alias2"}
self.assertEqual(entity_symbols.max_eid, 4)
self.assertEqual(entity_symbols.max_alid, 3)
self.assertDictEqual(entity_symbols._qid2aliases, trueqid2aliases)
self.assertDictEqual(entity_symbols._alias2qids, truealias2qids)
self.assertDictEqual(entity_symbols._alias2id, truealias2id)
self.assertDictEqual(entity_symbols._id2alias, trueid2alias)
# ADD Q1 ALIAS 4
entity_symbols.add_mention("Q1", "alias4", 31.0)
trueqid2aliases = {
"Q1": {"alias1", "multi word alias2", "alias3", "alias4"},
"Q2": {"multi word alias2", "alias3"},
"Q3": {"alias4"},
"Q4": {"alias1", "multi word alias2", "alias4"},
}
truealias2qids = {
"alias1": [["Q1", 10.0], ["Q4", 6]],
"multi word alias2": [["Q2", 5.0], ["Q1", 3], ["Q4", 2]],
"alias3": [["Q2", 31.0], ["Q1", 30.0]],
"alias4": [["Q1", 31.0], ["Q4", 20], ["Q3", 15.0]],
}
truealias2id = {"alias1": 0, "alias3": 2, "alias4": 3, "multi word alias2": 1}
trueid2alias = {0: "alias1", 2: "alias3", 3: "alias4", 1: "multi word alias2"}
self.assertEqual(entity_symbols.max_eid, 4)
self.assertEqual(entity_symbols.max_alid, 3)
self.assertDictEqual(entity_symbols._qid2aliases, trueqid2aliases)
self.assertDictEqual(entity_symbols._alias2qids, truealias2qids)
self.assertDictEqual(entity_symbols._alias2id, truealias2id)
self.assertDictEqual(entity_symbols._id2alias, trueid2alias)
# REMOVE Q3 ALIAS 4
entity_symbols.remove_mention("Q3", "alias4")
trueqid2aliases = {
"Q1": {"alias1", "multi word alias2", "alias3", "alias4"},
"Q2": {"multi word alias2", "alias3"},
"Q3": set(),
"Q4": {"alias1", "multi word alias2", "alias4"},
}
truealias2qids = {
"alias1": [["Q1", 10.0], ["Q4", 6]],
"multi word alias2": [["Q2", 5.0], ["Q1", 3], ["Q4", 2]],
"alias3": [["Q2", 31.0], ["Q1", 30.0]],
"alias4": [["Q1", 31.0], ["Q4", 20]],
}
truealias2id = {"alias1": 0, "alias3": 2, "alias4": 3, "multi word alias2": 1}
trueid2alias = {0: "alias1", 2: "alias3", 3: "alias4", 1: "multi word alias2"}
self.assertEqual(entity_symbols.max_eid, 4)
self.assertEqual(entity_symbols.max_alid, 3)
self.assertDictEqual(entity_symbols._qid2aliases, trueqid2aliases)
self.assertDictEqual(entity_symbols._alias2qids, truealias2qids)
self.assertDictEqual(entity_symbols._alias2id, truealias2id)
self.assertDictEqual(entity_symbols._id2alias, trueid2alias)
# REMOVE Q4 ALIAS 4
entity_symbols.remove_mention("Q4", "alias4")
trueqid2aliases = {
"Q1": {"alias1", "multi word alias2", "alias3", "alias4"},
"Q2": {"multi word alias2", "alias3"},
"Q3": set(),
"Q4": {"alias1", "multi word alias2"},
}
truealias2qids = {
"alias1": [["Q1", 10.0], ["Q4", 6]],
"multi word alias2": [["Q2", 5.0], ["Q1", 3], ["Q4", 2]],
"alias3": [["Q2", 31.0], ["Q1", 30.0]],
"alias4": [["Q1", 31.0]],
}
truealias2id = {"alias1": 0, "alias3": 2, "alias4": 3, "multi word alias2": 1}
trueid2alias = {0: "alias1", 2: "alias3", 3: "alias4", 1: "multi word alias2"}
self.assertEqual(entity_symbols.max_eid, 4)
self.assertEqual(entity_symbols.max_alid, 3)
self.assertDictEqual(entity_symbols._qid2aliases, trueqid2aliases)
self.assertDictEqual(entity_symbols._alias2qids, truealias2qids)
self.assertDictEqual(entity_symbols._alias2id, truealias2id)
self.assertDictEqual(entity_symbols._id2alias, trueid2alias)
# REMOVE Q1 ALIAS 4
entity_symbols.remove_mention("Q1", "alias4")
trueqid2aliases = {
"Q1": {"alias1", "multi word alias2", "alias3"},
"Q2": {"multi word alias2", "alias3"},
"Q3": set(),
"Q4": {"alias1", "multi word alias2"},
}
truealias2qids = {
"alias1": [["Q1", 10.0], ["Q4", 6]],
"multi word alias2": [["Q2", 5.0], ["Q1", 3], ["Q4", 2]],
"alias3": [["Q2", 31.0], ["Q1", 30.0]],
}
truealias2id = {"alias1": 0, "alias3": 2, "multi word alias2": 1}
trueid2alias = {0: "alias1", 2: "alias3", 1: "multi word alias2"}
self.assertEqual(entity_symbols.max_eid, 4)
self.assertEqual(entity_symbols.max_alid, 3)
self.assertDictEqual(entity_symbols._qid2aliases, trueqid2aliases)
self.assertDictEqual(entity_symbols._alias2qids, truealias2qids)
self.assertDictEqual(entity_symbols._alias2id, truealias2id)
self.assertDictEqual(entity_symbols._id2alias, trueid2alias)
# ADD Q1 BLIAS 0
entity_symbols.add_mention("Q1", "blias0", 11)
trueqid2aliases = {
"Q1": {"alias1", "multi word alias2", "alias3", "blias0"},
"Q2": {"multi word alias2", "alias3"},
"Q3": set(),
"Q4": {"alias1", "multi word alias2"},
}
truealias2qids = {
"alias1": [["Q1", 10.0], ["Q4", 6]],
"multi word alias2": [["Q2", 5.0], ["Q1", 3], ["Q4", 2]],
"alias3": [["Q2", 31.0], ["Q1", 30.0]],
"blias0": [["Q1", 11.0]],
}
truealias2id = {"alias1": 0, "alias3": 2, "multi word alias2": 1, "blias0": 4}
trueid2alias = {0: "alias1", 2: "alias3", 1: "multi word alias2", 4: "blias0"}
self.assertEqual(entity_symbols.max_eid, 4)
self.assertEqual(entity_symbols.max_alid, 4)
self.assertDictEqual(entity_symbols._qid2aliases, trueqid2aliases)
self.assertDictEqual(entity_symbols._alias2qids, truealias2qids)
self.assertDictEqual(entity_symbols._alias2id, truealias2id)
self.assertDictEqual(entity_symbols._id2alias, trueid2alias)
# SET SCORE Q2 ALIAS3
# Check if fails not a pair
with self.assertRaises(ValueError) as context:
entity_symbols.set_score("Q2", "alias1", 2)
assert type(context.exception) is ValueError
entity_symbols.set_score("Q2", "alias3", 2)
trueqid2aliases = {
"Q1": {"alias1", "multi word alias2", "alias3", "blias0"},
"Q2": {"multi word alias2", "alias3"},
"Q3": set(),
"Q4": {"alias1", "multi word alias2"},
}
truealias2qids = {
"alias1": [["Q1", 10.0], ["Q4", 6]],
"multi word alias2": [["Q2", 5.0], ["Q1", 3], ["Q4", 2]],
"alias3": [["Q1", 30.0], ["Q2", 2]],
"blias0": [["Q1", 11.0]],
}
truealias2id = {"alias1": 0, "alias3": 2, "multi word alias2": 1, "blias0": 4}
trueid2alias = {0: "alias1", 2: "alias3", 1: "multi word alias2", 4: "blias0"}
self.assertEqual(entity_symbols.max_eid, 4)
self.assertEqual(entity_symbols.max_alid, 4)
self.assertDictEqual(entity_symbols._qid2aliases, trueqid2aliases)
self.assertDictEqual(entity_symbols._alias2qids, truealias2qids)
self.assertDictEqual(entity_symbols._alias2id, truealias2id)
self.assertDictEqual(entity_symbols._id2alias, trueid2alias)
# MAKE SURE CHANGES TAKE EFFECT
temp_save_dir = "tests/data/entity_loader_test"
entity_symbols.save(temp_save_dir)
entity_symbols = EntitySymbols.load_from_cache(temp_save_dir)
self.assertEqual(entity_symbols.max_eid, 4)
self.assertEqual(entity_symbols.max_alid, 4)
self.assertIsNone(entity_symbols._qid2aliases)
self.assertDictEqual(entity_symbols.get_alias2qids_dict(), truealias2qids)
self.assertDictEqual(entity_symbols._alias2id.to_dict(), truealias2id)
self.assertDictEqual(
{
entity_symbols.get_alias_idx(a): a
for a in entity_symbols.get_all_aliases()
},
trueid2alias,
)
shutil.rmtree(temp_save_dir)
def test_add_entity(self):
"""Test add entity."""
alias2qids = {
"alias1": [["Q1", 10.0], ["Q4", 6]],
"multi word alias2": [["Q2", 5.0], ["Q1", 3], ["Q4", 2], ["Q3", 1]],
"alias3": [["Q1", 30.0]],
"alias4": [["Q4", 20], ["Q3", 15.0], ["Q2", 1]],
}
qid2title = {
"Q1": "alias1",
"Q2": "multi alias2",
"Q3": "word alias3",
"Q4": "nonalias4",
}
qid2desc = {
"Q1": "d alias1",
"Q2": "d multi alias2",
"Q3": "d word alias3",
"Q4": "d nonalias4",
}
max_candidates = 3
entity_symbols = EntitySymbols(
max_candidates=max_candidates,
alias2qids=alias2qids,
qid2title=qid2title,
qid2desc=qid2desc,
edit_mode=True,
)
trueqid2aliases = {
"Q1": {"alias1", "multi word alias2", "alias3"},
"Q2": {"multi word alias2", "alias4"},
"Q3": {"alias4"},
"Q4": {"alias1", "multi word alias2", "alias4"},
}
truealias2qids = {
"alias1": [["Q1", 10.0], ["Q4", 6]],
"multi word alias2": [["Q2", 5.0], ["Q1", 3], ["Q4", 2]],
"alias3": [["Q1", 30.0]],
"alias4": [["Q4", 20], ["Q3", 15.0], ["Q2", 1]],
}
trueqid2eid = {"Q1": 1, "Q2": 2, "Q3": 3, "Q4": 4}
truealias2id = {"alias1": 0, "alias3": 2, "alias4": 3, "multi word alias2": 1}
trueid2alias = {0: "alias1", 2: "alias3", 3: "alias4", 1: "multi word alias2"}
truemax_eid = 4
truemax_alid = 3
truenum_entities = 4
truenum_entities_with_pad_and_nocand = 6
self.assertDictEqual(entity_symbols._qid2aliases, trueqid2aliases)
self.assertDictEqual(entity_symbols._qid2eid, trueqid2eid)
self.assertDictEqual(
entity_symbols._eid2qid, {v: i for i, v in trueqid2eid.items()}
)
self.assertDictEqual(entity_symbols._alias2qids, truealias2qids)
self.assertDictEqual(entity_symbols._qid2title, qid2title)
self.assertDictEqual(entity_symbols._qid2desc, qid2desc)
self.assertDictEqual(entity_symbols._alias2id, truealias2id)
self.assertDictEqual(entity_symbols._id2alias, trueid2alias)
self.assertEqual(entity_symbols.max_eid, truemax_eid)
self.assertEqual(entity_symbols.max_alid, truemax_alid)
self.assertEqual(entity_symbols.num_entities, truenum_entities)
self.assertEqual(
entity_symbols.num_entities_with_pad_and_nocand,
truenum_entities_with_pad_and_nocand,
)
# Add entity
entity_symbols.add_entity(
"Q5", [["multi word alias2", 1.5], ["alias5", 20.0]], "Snake", "d Snake"
)
qid2title = {
"Q1": "alias1",
"Q2": "multi alias2",
"Q3": "word alias3",
"Q4": "nonalias4",
"Q5": "Snake",
}
qid2desc = {
"Q1": "d alias1",
"Q2": "d multi alias2",
"Q3": "d word alias3",
"Q4": "d nonalias4",
"Q5": "d Snake",
}
trueqid2aliases = {
"Q1": {"alias1", "multi word alias2", "alias3"},
"Q2": {"multi word alias2", "alias4"},
"Q3": {"alias4"},
"Q4": {"alias1", "alias4"},
"Q5": {"multi word alias2", "alias5"},
}
truealias2qids = {
"alias1": [["Q1", 10.0], ["Q4", 6]],
"multi word alias2": [
["Q2", 5.0],
["Q1", 3],
["Q5", 1.5],
], # adding new entity-mention pair - we override scores to add it. Hence Q4 is removed
"alias3": [["Q1", 30.0]],
"alias4": [["Q4", 20], ["Q3", 15.0], ["Q2", 1]],
"alias5": [["Q5", 20]],
}
trueqid2eid = {"Q1": 1, "Q2": 2, "Q3": 3, "Q4": 4, "Q5": 5}
truealias2id = {
"alias1": 0,
"alias3": 2,
"alias4": 3,
"multi word alias2": 1,
"alias5": 4,
}
trueid2alias = {
0: "alias1",
2: "alias3",
3: "alias4",
1: "multi word alias2",
4: "alias5",
}
truemax_eid = 5
truemax_alid = 4
truenum_entities = 5
truenum_entities_with_pad_and_nocand = 7
self.assertDictEqual(entity_symbols._qid2aliases, trueqid2aliases)
self.assertDictEqual(entity_symbols._qid2eid, trueqid2eid)
self.assertDictEqual(
entity_symbols._eid2qid, {v: i for i, v in trueqid2eid.items()}
)
self.assertDictEqual(entity_symbols._alias2qids, truealias2qids)
self.assertDictEqual(entity_symbols._alias2id, truealias2id)
self.assertDictEqual(entity_symbols._id2alias, trueid2alias)
self.assertEqual(entity_symbols.max_eid, truemax_eid)
self.assertEqual(entity_symbols.max_alid, truemax_alid)
self.assertEqual(entity_symbols.num_entities, truenum_entities)
self.assertEqual(
entity_symbols.num_entities_with_pad_and_nocand,
truenum_entities_with_pad_and_nocand,
)
def test_reidentify_entity(self):
"""Test reidentify entities."""
alias2qids = {
"alias1": [["Q1", 10.0], ["Q4", 6]],
"multi word alias2": [["Q2", 5.0], ["Q1", 3], ["Q4", 2], ["Q3", 1]],
"alias3": [["Q1", 30.0]],
"alias4": [["Q4", 20], ["Q3", 15.0], ["Q2", 1]],
}
qid2title = {
"Q1": "alias1",
"Q2": "multi alias2",
"Q3": "word alias3",
"Q4": "nonalias4",
}
qid2desc = {
"Q1": "d alias1",
"Q2": "d multi alias2",
"Q3": "d word alias3",
"Q4": "d nonalias4",
}
max_candidates = 3
entity_symbols = EntitySymbols(
max_candidates=max_candidates,
alias2qids=alias2qids,
qid2title=qid2title,
qid2desc=qid2desc,
edit_mode=True,
)
entity_symbols.reidentify_entity("Q1", "Q7")
trueqid2aliases = {
"Q7": {"alias1", "multi word alias2", "alias3"},
"Q2": {"multi word alias2", "alias4"},
"Q3": {"alias4"},
"Q4": {"alias1", "multi word alias2", "alias4"},
}
truealias2qids = {
"alias1": [["Q7", 10.0], ["Q4", 6]],
"multi word alias2": [["Q2", 5.0], ["Q7", 3], ["Q4", 2]],
"alias3": [["Q7", 30.0]],
"alias4": [["Q4", 20], ["Q3", 15.0], ["Q2", 1]],
}
trueqid2eid = {"Q7": 1, "Q2": 2, "Q3": 3, "Q4": 4}
truealias2id = {"alias1": 0, "alias3": 2, "alias4": 3, "multi word alias2": 1}
trueid2alias = {0: "alias1", 2: "alias3", 3: "alias4", 1: "multi word alias2"}
truemax_eid = 4
truenum_entities = 4
truenum_entities_with_pad_and_nocand = 6
self.assertDictEqual(entity_symbols._qid2aliases, trueqid2aliases)
self.assertDictEqual(entity_symbols._qid2eid, trueqid2eid)
self.assertDictEqual(
entity_symbols._eid2qid, {v: i for i, v in trueqid2eid.items()}
)
self.assertDictEqual(entity_symbols._alias2qids, truealias2qids)
self.assertDictEqual(entity_symbols._qid2title, qid2title)
self.assertDictEqual(entity_symbols._qid2desc, qid2desc)
self.assertDictEqual(entity_symbols._alias2id, truealias2id)
self.assertDictEqual(entity_symbols._id2alias, trueid2alias)
self.assertEqual(entity_symbols.max_eid, truemax_eid)
self.assertEqual(entity_symbols.num_entities, truenum_entities)
self.assertEqual(
entity_symbols.num_entities_with_pad_and_nocand,
truenum_entities_with_pad_and_nocand,
)
def test_prune_to_entities(self):
"""Test prune to entities."""
alias2qids = {
"alias1": [["Q1", 10.0], ["Q4", 6]],
"multi word alias2": [["Q2", 5.0], ["Q1", 3], ["Q4", 2], ["Q3", 1]],
"alias3": [["Q1", 30.0]],
"alias4": [["Q4", 20], ["Q3", 15.0], ["Q2", 1]],
}
qid2title = {
"Q1": "alias1",
"Q2": "multi alias2",
"Q3": "word alias3",
"Q4": "nonalias4",
}
qid2desc = {
"Q1": "d alias1",
"Q2": "d multi alias2",
"Q3": "d word alias3",
"Q4": "d nonalias4",
}
max_candidates = 3
entity_symbols = EntitySymbols(
max_candidates=max_candidates,
alias2qids=alias2qids,
qid2title=qid2title,
qid2desc=qid2desc,
edit_mode=True,
)
entity_symbols.prune_to_entities({"Q3", "Q4"})
trueqid2title = {
"Q3": "word alias3",
"Q4": "nonalias4",
}
trueqid2desc = {
"Q3": "d word alias3",
"Q4": "d nonalias4",
}
trueqid2aliases = {
"Q3": {"alias4"},
"Q4": {"alias1", "multi word alias2", "alias4"},
}
truealias2qids = {
"alias1": [["Q4", 6]],
"multi word alias2": [["Q4", 2]],
"alias4": [["Q4", 20], ["Q3", 15.0]],
}
trueqid2eid = {"Q3": 1, "Q4": 2}
truealias2id = {"alias1": 0, "alias4": 1, "multi word alias2": 2}
trueid2alias = {0: "alias1", 1: "alias4", 2: "multi word alias2"}
truemax_eid = 2
truemax_alid = 2
truenum_entities = 2
truenum_entities_with_pad_and_nocand = 4
self.assertDictEqual(entity_symbols._qid2aliases, trueqid2aliases)
self.assertDictEqual(entity_symbols._qid2eid, trueqid2eid)
self.assertDictEqual(
entity_symbols._eid2qid, {v: i for i, v in trueqid2eid.items()}
)
self.assertDictEqual(entity_symbols._alias2qids, truealias2qids)
self.assertDictEqual(entity_symbols._qid2title, trueqid2title)
self.assertDictEqual(entity_symbols._qid2desc, trueqid2desc)
self.assertDictEqual(entity_symbols._alias2id, truealias2id)
self.assertDictEqual(entity_symbols._id2alias, trueid2alias)
self.assertEqual(entity_symbols.max_eid, truemax_eid)
self.assertEqual(entity_symbols.max_alid, truemax_alid)
self.assertEqual(entity_symbols.num_entities, truenum_entities)
self.assertEqual(
entity_symbols.num_entities_with_pad_and_nocand,
truenum_entities_with_pad_and_nocand,
)
class AliasTableTest(unittest.TestCase):
"""Alias table test."""
def setUp(self):
"""Set up."""
entity_dump_dir = "tests/data/entity_loader/entity_data/entity_mappings"
self.entity_symbols = EntitySymbols.load_from_cache(
entity_dump_dir, alias_cand_map_dir="alias2qids"
)
self.config = {
"data_config": {
"train_in_candidates": False,
"entity_dir": "tests/data/entity_loader/entity_data",
"entity_prep_dir": "prep",
"alias_cand_map": "alias2qids.json",
"max_aliases": 3,
"data_dir": "tests/data/entity_loader",
"overwrite_preprocessed_data": True,
},
"run_config": {"distributed": False},
}
def tearDown(self) -> None:
"""Tear down."""
dir = os.path.join(
self.config["data_config"]["entity_dir"],
self.config["data_config"]["entity_prep_dir"],
)
if utils.exists_dir(dir):
shutil.rmtree(dir)
def test_setup_notincand(self):
"""Test setup not in canddiate."""
self.alias_entity_table = AliasEntityTable(
DottedDict(self.config["data_config"]), self.entity_symbols
)
gold_alias2entity_table = torch.tensor(
[
[0, 1, 4, -1],
[0, 2, 1, 4],
[0, 1, -1, -1],
[0, 4, 3, 2],
[0, 4, 3, 2],
[0, 4, 3, 2],
[0, 4, 3, 2],
[0, 4, 3, 2],
[0, 4, 3, 2],
[0, 4, 3, 2],
[0, 4, 3, 2],
[0, 4, 3, 2],
[0, 4, 3, 2],
[0, 4, 3, 2],
[0, 4, 3, 2],
[0, 4, 3, 2],
[0, 4, 3, 2],
[-1, -1, -1, -1],
[-1, -1, -1, -1],
]
)
assert torch.equal(
gold_alias2entity_table.long(),
self.alias_entity_table.alias2entity_table.long(),
)
def test_setup_incand(self):
"""Test setup in candidate."""
self.config["data_config"]["train_in_candidates"] = True
self.alias_entity_table = AliasEntityTable(
DottedDict(self.config["data_config"]), self.entity_symbols
)
gold_alias2entity_table = torch.tensor(
[
[1, 4, -1],
[2, 1, 4],
[1, -1, -1],
[4, 3, 2],
[4, 3, 2],
[4, 3, 2],
[4, 3, 2],
[4, 3, 2],
[4, 3, 2],
[4, 3, 2],
[4, 3, 2],
[4, 3, 2],
[4, 3, 2],
[4, 3, 2],
[4, 3, 2],
[4, 3, 2],
[4, 3, 2],
[-1, -1, -1],
[-1, -1, -1],
]
)
assert torch.equal(
gold_alias2entity_table.long(),
self.alias_entity_table.alias2entity_table.long(),
)
def test_forward(self):
"""Test forward."""
self.alias_entity_table = AliasEntityTable(
DottedDict(self.config["data_config"]), self.entity_symbols
)
# idx 1 is multi word alias 2, idx 0 is alias 1
actual_indices = self.alias_entity_table.forward(torch.tensor([[[0, 1, -2]]]))
# 0 is for non-candidate, -1 is for padded value
expected_tensor = torch.tensor(
[[[[0, 1, 4, -1], [0, 2, 1, 4], [-1, -1, -1, -1]]]]
)
assert torch.equal(actual_indices.long(), expected_tensor.long())
if __name__ == "__main__":
unittest.main()
| bootleg-master | tests/test_entity/test_entity.py |
"""Test entity profile."""
import os
import shutil
import unittest
from pathlib import Path
import emmental
import numpy as np
import torch
import ujson
from pydantic import ValidationError
from bootleg.run import run_model
from bootleg.symbols.entity_profile import EntityProfile
from bootleg.utils.parser import parser_utils
class EntityProfileTest(unittest.TestCase):
"""Entity profile test."""
def setUp(self) -> None:
"""Set up."""
self.dir = Path("tests/data/entity_profile_test")
self.save_dir = Path(self.dir / "entity_db_save")
self.save_dir.mkdir(exist_ok=True, parents=True)
self.save_dir2 = Path(self.dir / "entity_db_save2")
self.save_dir2.mkdir(exist_ok=True, parents=True)
self.profile_file = Path(self.dir / "raw_data/entity_profile.jsonl")
self.profile_file.parent.mkdir(exist_ok=True, parents=True)
self.data_dir = self.dir / "data"
self.train_data = self.data_dir / "train.jsonl"
self.train_data.parent.mkdir(exist_ok=True, parents=True)
self.arg_file = self.dir / "args.json"
def tearDown(self) -> None:
"""Tear down."""
if os.path.exists(self.dir):
shutil.rmtree(self.dir, ignore_errors=True)
def write_data(self, file, data):
"""Write data to file."""
with open(file, "w") as out_f:
for d in data:
out_f.write(ujson.dumps(d) + "\n")
def test_profile_load_simple(self):
"""Test profile load simple."""
data = [
{
"entity_id": "Q123",
"mentions": [["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
"title": "Dog",
"description": "Dog",
"types": {"hyena": ["animal"], "wiki": ["dog"]},
"relations": [
{"relation": "sibling", "object": "Q345"},
{"relation": "sibling", "object": "Q567"},
],
},
{
"entity_id": "Q345",
"mentions": [["cat", 10.0], ["catt", 7.0], ["animal", 3.0]],
"title": "Cat",
"description": "Cat",
"types": {"hyena": ["animal"], "wiki": ["cat"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
},
# Missing type system
{
"entity_id": "Q567",
"mentions": [["catt", 6.5], ["animal", 3.3]],
"title": "Catt",
"description": "Catt",
"types": {"hyena": ["animal", "animall"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
},
# No KG/Types
{
"entity_id": "Q789",
"mentions": [["animal", 12.2]],
"title": "Dogg",
},
]
self.write_data(self.profile_file, data)
gold_qid2title = {"Q123": "Dog", "Q345": "Cat", "Q567": "Catt", "Q789": "Dogg"}
gold_qid2desc = {"Q123": "Dog", "Q345": "Cat", "Q567": "Catt", "Q789": ""}
gold_alias2qids = {
"dog": [["Q123", 10.0]],
"dogg": [["Q123", 7.0]],
"cat": [["Q345", 10.0]],
"catt": [["Q345", 7.0], ["Q567", 6.5]],
"animal": [["Q789", 12.2], ["Q123", 4.0], ["Q567", 3.3], ["Q345", 3.0]],
}
gold_type_systems = {
"hyena": {
"Q123": ["animal"],
"Q345": ["animal"],
"Q567": ["animal", "animall"],
"Q789": [],
},
"wiki": {"Q123": ["dog"], "Q345": ["cat"], "Q567": [], "Q789": []},
}
gold_qid2relations = {
"Q123": {"sibling": ["Q345", "Q567"]},
"Q345": {"sibling": ["Q123"]},
"Q567": {"sibling": ["Q123"]},
}
(
qid2title,
qid2desc,
alias2qids,
type_systems,
qid2relations,
) = EntityProfile._read_profile_file(self.profile_file)
self.assertDictEqual(gold_qid2title, qid2title)
self.assertDictEqual(gold_qid2desc, qid2desc)
self.assertDictEqual(gold_alias2qids, alias2qids)
self.assertDictEqual(gold_type_systems, type_systems)
self.assertDictEqual(gold_qid2relations, qid2relations)
# Test loading/saving from jsonl
ep = EntityProfile.load_from_jsonl(self.profile_file, edit_mode=True)
ep.save_to_jsonl(self.profile_file)
read_in_data = [ujson.loads(li) for li in open(self.profile_file)]
assert len(read_in_data) == len(data)
for qid_obj in data:
found_other_obj = None
for possible_match in read_in_data:
if qid_obj["entity_id"] == possible_match["entity_id"]:
found_other_obj = possible_match
break
assert found_other_obj is not None
self.assertDictEqual(qid_obj, found_other_obj)
data = [
{
"entity_id": "Q123",
"mentions": [["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
"title": "Dog",
"description": "Dog",
"types": {"hyena": ["animal"], "wiki": ["dog"]},
"relations": [
{"relation": "sibling", "object": "Q345"},
{"relation": "sibling", "object": "Q567"},
],
},
# Extra QID
{
"entity_id": "Q123",
"mentions": [["cat", 10.0], ["catt", 7.0], ["animal", 3.0]],
"title": "Cat",
"description": "Cat",
"types": {"hyena": ["animal"], "wiki": ["cat"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
},
]
self.write_data(self.profile_file, data)
with self.assertRaises(ValueError) as context:
EntityProfile._read_profile_file(self.profile_file)
assert type(context.exception) is ValueError
assert "is already in our dump" in str(context.exception)
data = [
# Relation in wrong format
{
"entity_id": "Q123",
"mentions": [["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
"title": "Dog",
"description": "Dog",
"types": {"hyena": ["animal"], "wiki": ["dog"]},
"relations": [
{"relationnn": "sibling", "objject": "Q345"},
],
}
]
self.write_data(self.profile_file, data)
with self.assertRaises(ValueError) as context:
EntityProfile._read_profile_file(self.profile_file)
assert type(context.exception) is ValueError
assert "it must be a JSON with keys relation and object" in str(
context.exception
)
def test_profile_load_jsonl_errors(self):
"""Test profile load from jsonl."""
data = [
{
"entity_id": 123,
"mentions": [["dog"], ["dogg"], ["animal"]],
"title": "Dog",
"description": "Dog",
"types": {"hyena": ["animal"], "wiki": ["dog"]},
"relations": [
{"relation": "sibling", "object": "Q345"},
{"relation": "sibling", "object": "Q567"},
],
},
]
self.write_data(self.profile_file, data)
with self.assertRaises(ValidationError) as context:
EntityProfile._read_profile_file(self.profile_file)
assert type(context.exception) is ValidationError
def test_profile_dump_load(self):
"""Test profile load from dump."""
data = [
{
"entity_id": "Q123",
"mentions": [["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
"title": "Dog",
"description": "Dog",
"types": {"hyena": ["animal"], "wiki": ["dog"]},
"relations": [
{"relation": "sibling", "object": "Q345"},
{"relation": "sibling", "object": "Q567"},
],
},
{
"entity_id": "Q345",
"mentions": [["cat", 10.0], ["catt", 7.0], ["animal", 3.0]],
"title": "Cat",
"types": {"hyena": ["animal"], "wiki": ["cat"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
},
]
self.write_data(self.profile_file, data)
entity_profile = EntityProfile.load_from_jsonl(
self.profile_file, max_candidates=5, edit_mode=True
)
entity_profile.save(self.save_dir2)
# Test load correctly
entity_profile2 = EntityProfile.load_from_cache(self.save_dir2)
self.assertSetEqual(
set(entity_profile.get_all_qids()), set(entity_profile2.get_all_qids())
)
self.assertSetEqual(
set(entity_profile.get_all_typesystems()),
set(entity_profile2.get_all_typesystems()),
)
for type_sys in entity_profile.get_all_typesystems():
self.assertSetEqual(
set(entity_profile.get_all_types(type_sys)),
set(entity_profile2.get_all_types(type_sys)),
)
for qid in entity_profile.get_all_qids():
self.assertDictEqual(
entity_profile.get_relations_tails_for_qid(qid),
entity_profile2.get_relations_tails_for_qid(qid),
)
# Test load with no types or kgs
entity_profile2 = EntityProfile.load_from_cache(
self.save_dir2, no_type=True, no_kg=True
)
self.assertSetEqual(
set(entity_profile.get_all_qids()), set(entity_profile2.get_all_qids())
)
assert len(entity_profile2.get_all_typesystems()) == 0
self.assertIsNone(entity_profile2._kg_symbols)
# Testing that the functions still work despite not loading them
assert entity_profile2.get_relations_tails_for_qid("Q123") is None
# Test load with no types or kgs
entity_profile2 = EntityProfile.load_from_cache(
self.save_dir2, no_kg=True, type_systems_to_load=["wiki"]
)
self.assertSetEqual(
set(entity_profile.get_all_qids()), set(entity_profile2.get_all_qids())
)
assert entity_profile2.get_all_typesystems() == ["wiki"]
self.assertSetEqual(
set(entity_profile.get_all_types("wiki")),
set(entity_profile2.get_all_types("wiki")),
)
self.assertIsNone(entity_profile2._kg_symbols)
# Assert error loading type system that is not there
with self.assertRaises(ValueError) as context:
entity_profile2.get_all_types("hyena")
assert type(context.exception) is ValueError
assert "type system hyena is not one" in str(context.exception)
def test_checks(self):
"""Test checks."""
data = [
{
"entity_id": "Q123",
"mentions": [["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
"title": "Dog",
"description": "Dog",
"types": {"hyena": ["animal"], "wiki": ["dog"]},
"relations": [
{"relation": "sibling", "object": "Q345"},
{"relation": "sibling", "object": "Q567"},
],
},
{
"entity_id": "Q345",
"mentions": [["cat", 10.0], ["catt", 7.0], ["animal", 3.0]],
"title": "Cat",
"types": {"hyena": ["animal"], "wiki": ["cat"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
},
]
self.write_data(self.profile_file, data)
entity_profile = EntityProfile.load_from_jsonl(
self.profile_file, max_candidates=5
)
with self.assertRaises(AttributeError) as context:
entity_profile.add_relation("Q345", "sibling", "Q123")
assert type(context.exception) is AttributeError
entity_profile = EntityProfile.load_from_jsonl(
self.profile_file, max_candidates=5, edit_mode=True
)
with self.assertRaises(ValueError) as context:
entity_profile.add_relation("Q789", "sibling", "Q123")
assert type(context.exception) is ValueError
assert "is not in our dump" in str(context.exception)
with self.assertRaises(ValueError) as context:
entity_profile.add_relation(qid="Q789", relation="sibling", qid2="Q123")
assert type(context.exception) is ValueError
assert "is not in our dump" in str(context.exception)
with self.assertRaises(ValueError) as context:
entity_profile.add_type(qid="Q345", type="sibling", type_system="blah")
assert type(context.exception) is ValueError
assert "type system blah is not one" in str(context.exception)
with self.assertRaises(ValueError) as context:
entity_profile.get_types(qid="Q345", type_system="blah")
assert type(context.exception) is ValueError
assert "type system blah is not one" in str(context.exception)
def test_getters(self):
"""Test getters."""
data = [
{
"entity_id": "Q123",
"mentions": [["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
"title": "Dog",
"description": "Dog",
"types": {"hyena": ["animal"], "wiki": ["dog"]},
"relations": [
{"relation": "sibling", "object": "Q345"},
{"relation": "sibling", "object": "Q567"},
],
},
{
"entity_id": "Q345",
"mentions": [["cat", 10.0], ["catt", 7.0], ["animal", 3.0]],
"title": "Cat",
"types": {"hyena": ["animal"], "wiki": ["cat"]},
"relations": [
{"relation": "sibling", "object": "Q123"},
{"relation": "sibbbling", "object": "Q123"},
],
},
]
self.write_data(self.profile_file, data)
entity_profile = EntityProfile.load_from_jsonl(
self.profile_file, max_candidates=3, edit_mode=True
)
self.assertEqual(entity_profile.get_eid("Q345"), 2)
self.assertTrue(entity_profile.mention_exists("cat"))
self.assertFalse(entity_profile.mention_exists("dat"))
self.assertListEqual(entity_profile.get_qid_cands("cat"), ["Q345"])
self.assertListEqual(
entity_profile.get_qid_count_cands("cat"), [["Q345", 10.0]]
)
self.assertSetEqual(
set(entity_profile.get_all_mentions()),
{"dog", "dogg", "animal", "cat", "catt"},
)
self.assertSetEqual(
set(entity_profile.get_mentions("Q345")), {"animal", "cat", "catt"}
)
self.assertSetEqual(
set(entity_profile.get_entities_of_type("cat", "wiki")), {"Q345"}
)
self.assertEqual(entity_profile.num_entities_with_pad_and_nocand, 4)
self.assertSetEqual(
entity_profile.get_relations_between("Q345", "Q123"),
{"sibling", "sibbbling"},
)
def test_add_entity(self):
"""Test add entity."""
data = [
{
"entity_id": "Q123",
"mentions": [["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
"title": "Dog",
"description": "Dog",
"types": {"hyena": ["animal"], "wiki": ["dog"]},
"relations": [
{"relation": "sibling", "object": "Q345"},
{"relation": "sibling", "object": "Q567"},
],
},
{
"entity_id": "Q345",
"mentions": [["cat", 10.0], ["catt", 7.0], ["animal", 3.0]],
"title": "Cat",
"types": {"hyena": ["animal"], "wiki": ["cat"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
},
]
self.write_data(self.profile_file, data)
entity_profile = EntityProfile.load_from_jsonl(
self.profile_file, max_candidates=3, edit_mode=True
)
entity_profile.save(self.save_dir2)
# Test bad format
with self.assertRaises(ValueError) as context:
entity_profile.add_entity(["bad format"])
assert type(context.exception) is ValueError
assert "The input to update_entity needs to be a dictionary" in str(
context.exception
)
new_entity = {
"entity_id": "Q345",
"mentions": [["cat", 10.0], ["catt", 7.0], ["animal", 3.0]],
"title": "Cat",
"types": {"hyena": ["animal"], "wiki": ["cat"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
}
# Test already existing entity
with self.assertRaises(ValueError) as context:
entity_profile.add_entity(new_entity)
assert type(context.exception) is ValueError
assert "The entity Q345 already exists" in str(context.exception)
new_entity = {
"entity_id": "Q789",
"mentions": [["snake", 10.0], ["animal", 3.0]],
"title": "Snake",
"description": "Snake",
"types": {"hyena": ["animal"], "new_sys": ["snakey"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
}
# Test can't update qid not in dump
with self.assertRaises(ValueError) as context:
entity_profile.update_entity(new_entity)
assert type(context.exception) is ValueError
assert "The entity Q789 is not in our dump" in str(context.exception)
# Test new type system
with self.assertRaises(ValueError) as context:
entity_profile.add_entity(new_entity)
assert type(context.exception) is ValueError
assert "When adding a new entity, you must use the same type system" in str(
context.exception
)
new_entity = {
"entity_id": "Q789",
"mentions": [["snake", 10.0], ["animal", 3.0]],
"title": "Snake",
"description": "Snake",
"types": {"hyena": ["animal"]},
"relations": [{"relatiion": "sibbbling", "object": "Q123"}],
}
# Test new bad relation format
with self.assertRaises(ValueError) as context:
entity_profile.add_entity(new_entity)
assert type(context.exception) is ValueError
assert (
"For each value in relations, it must be a JSON with keys relation and object"
in str(context.exception)
)
new_entity = {
"entity_id": "Q790",
"mentions": [["snake", 10.0], ["animal", 3.0]],
"title": "Snake",
"description": "Snake",
"types": {"hyena": ["animal"]},
"relations": [{"relation": "sibbbling", "object": "Q123"}],
}
entity_profile.add_entity(new_entity)
self.assertTrue(entity_profile.qid_exists("Q790"))
self.assertEqual(entity_profile.get_title("Q790"), "Snake")
self.assertEqual(entity_profile.get_desc("Q790"), "Snake")
self.assertListEqual(
entity_profile.get_mentions_with_scores("Q790"),
[["snake", 10.0], ["animal", 3.0]],
)
self.assertListEqual(entity_profile.get_types("Q790", "hyena"), ["animal"])
self.assertListEqual(entity_profile.get_types("Q790", "wiki"), [])
self.assertSetEqual(
entity_profile.get_relations_between("Q790", "Q123"), {"sibbbling"}
)
new_entity = {
"entity_id": "Q789",
"mentions": [["snakke", 10.0], ["animal", 3.0]],
"title": "Snake",
"description": "Snake",
"types": {"hyena": ["animal"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
}
# Assert it is added
entity_profile.add_entity(new_entity)
self.assertTrue(entity_profile.qid_exists("Q789"))
self.assertEqual(entity_profile.get_title("Q789"), "Snake")
self.assertEqual(entity_profile.get_desc("Q789"), "Snake")
self.assertListEqual(
entity_profile.get_mentions_with_scores("Q789"),
[["snakke", 10.0], ["animal", 3.0]],
)
self.assertListEqual(entity_profile.get_types("Q789", "hyena"), ["animal"])
self.assertListEqual(entity_profile.get_types("Q789", "wiki"), [])
self.assertSetEqual(
entity_profile.get_relations_between("Q789", "Q123"), {"sibling"}
)
# Update entity
new_entity = {
"entity_id": "Q789",
"mentions": [["snake", 10.0], ["animal", 3.0]],
"title": "Snake",
"description": "Snake",
"types": {"hyena": ["animal"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
}
# Assert it is added
entity_profile.update_entity(new_entity)
self.assertTrue(entity_profile.qid_exists("Q789"))
self.assertEqual(entity_profile.get_title("Q789"), "Snake")
self.assertEqual(entity_profile.get_desc("Q789"), "Snake")
self.assertListEqual(
entity_profile.get_mentions_with_scores("Q789"),
[["snake", 10.0], ["animal", 3.0]],
)
self.assertListEqual(entity_profile.get_types("Q789", "hyena"), ["animal"])
self.assertListEqual(entity_profile.get_types("Q789", "wiki"), [])
self.assertSetEqual(
entity_profile.get_relations_between("Q789", "Q123"), {"sibling"}
)
# Check that no_kg still works with load_from_cache
entity_profile2 = EntityProfile.load_from_cache(
self.save_dir2, no_kg=True, edit_mode=True
)
entity_profile2.add_entity(new_entity)
self.assertTrue(entity_profile2.qid_exists("Q789"))
self.assertEqual(entity_profile2.get_title("Q789"), "Snake")
self.assertEqual(entity_profile2.get_desc("Q789"), "Snake")
self.assertListEqual(
entity_profile2.get_mentions_with_scores("Q789"),
[["snake", 10.0], ["animal", 3.0]],
)
self.assertListEqual(entity_profile2.get_types("Q789", "hyena"), ["animal"])
self.assertListEqual(entity_profile2.get_types("Q789", "wiki"), [])
self.assertSetEqual(
entity_profile.get_relations_between("Q789", "Q123"), {"sibling"}
)
def test_reindentify_entity(self):
"""Test reindentify entity."""
data = [
{
"entity_id": "Q123",
"mentions": [["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
"title": "Dog",
"description": "Dog",
"types": {"hyena": ["animal"], "wiki": ["dog"]},
"relations": [
{"relation": "sibling", "object": "Q345"},
{"relation": "sibling", "object": "Q567"},
],
},
{
"entity_id": "Q345",
"mentions": [["cat", 10.0], ["catt", 7.0], ["animal", 3.0]],
"title": "Cat",
"types": {"hyena": ["animal"], "wiki": ["cat"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
},
]
self.write_data(self.profile_file, data)
entity_profile = EntityProfile.load_from_jsonl(
self.profile_file, max_candidates=5, edit_mode=True
)
entity_profile.save(self.save_dir2)
with self.assertRaises(ValueError) as context:
entity_profile.reidentify_entity("Q123", "Q345")
assert type(context.exception) is ValueError
assert "The entity Q345 already exists" in str(context.exception)
with self.assertRaises(ValueError) as context:
entity_profile.reidentify_entity("Q125", "Q911")
assert type(context.exception) is ValueError
assert "The entity Q125 is not in our dump" in str(context.exception)
entity_profile.reidentify_entity("Q123", "Q911")
self.assertTrue(entity_profile.qid_exists("Q911"))
self.assertFalse(entity_profile.qid_exists("Q123"))
self.assertEqual(entity_profile.get_title("Q911"), "Dog")
self.assertEqual(entity_profile.get_desc("Q911"), "Dog")
self.assertListEqual(
entity_profile.get_mentions_with_scores("Q911"),
[["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
)
self.assertListEqual(entity_profile.get_types("Q911", "hyena"), ["animal"])
self.assertListEqual(entity_profile.get_types("Q911", "wiki"), ["dog"])
self.assertSetEqual(
entity_profile.get_relations_between("Q911", "Q345"), {"sibling"}
)
self.assertSetEqual(
entity_profile.get_relations_between("Q911", "Q567"), {"sibling"}
)
# Check that no_kg still works with load_from_cache
entity_profile2 = EntityProfile.load_from_cache(
self.save_dir2, no_kg=True, edit_mode=True
)
entity_profile2.reidentify_entity("Q123", "Q911")
self.assertTrue(entity_profile2.qid_exists("Q911"))
self.assertFalse(entity_profile2.qid_exists("Q123"))
self.assertEqual(entity_profile2.get_title("Q911"), "Dog")
self.assertEqual(entity_profile2.get_desc("Q911"), "Dog")
self.assertListEqual(
entity_profile2.get_mentions_with_scores("Q911"),
[["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
)
self.assertListEqual(entity_profile2.get_types("Q911", "hyena"), ["animal"])
self.assertListEqual(entity_profile2.get_types("Q911", "wiki"), ["dog"])
self.assertIsNone(entity_profile2.get_relations_between("Q911", "Q345"))
def test_prune_to_entities(self):
"""Test prune to entities."""
data = [
{
"entity_id": "Q123",
"mentions": [["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
"title": "Dog",
"description": "Dog",
"types": {"hyena": ["animal"], "wiki": ["dog"]},
"relations": [
{"relation": "sibling", "object": "Q345"},
{"relation": "sibling", "object": "Q567"},
],
},
{
"entity_id": "Q345",
"mentions": [["cat", 10.0], ["catt", 7.0], ["animal", 3.0]],
"title": "Cat",
"types": {"hyena": ["animal"], "wiki": ["cat"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
},
]
self.write_data(self.profile_file, data)
entity_profile = EntityProfile.load_from_jsonl(
self.profile_file, max_candidates=5, edit_mode=True
)
entity_profile.save(self.save_dir2)
with self.assertRaises(ValueError) as context:
entity_profile.prune_to_entities({"Q123", "Q567"})
assert type(context.exception) is ValueError
assert "The entity Q567 does not exist" in str(context.exception)
entity_profile.prune_to_entities({"Q123"})
self.assertTrue(entity_profile.qid_exists("Q123"))
self.assertFalse(entity_profile.qid_exists("Q345"))
self.assertListEqual(
entity_profile.get_mentions_with_scores("Q123"),
[["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
)
self.assertListEqual(entity_profile.get_types("Q123", "hyena"), ["animal"])
self.assertListEqual(entity_profile.get_types("Q123", "wiki"), ["dog"])
self.assertSetEqual(entity_profile.get_relations_between("Q123", "Q567"), set())
# Check that no_kg still works with load_from_cache
entity_profile2 = EntityProfile.load_from_cache(
self.save_dir2, no_kg=True, edit_mode=True
)
entity_profile2.prune_to_entities({"Q123"})
self.assertTrue(entity_profile2.qid_exists("Q123"))
self.assertFalse(entity_profile2.qid_exists("Q345"))
self.assertListEqual(
entity_profile2.get_mentions_with_scores("Q123"),
[["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
)
self.assertListEqual(entity_profile2.get_types("Q123", "hyena"), ["animal"])
self.assertListEqual(entity_profile2.get_types("Q123", "wiki"), ["dog"])
self.assertSetEqual(entity_profile.get_relations_between("Q123", "Q567"), set())
def test_end2end(self):
"""Test end2end."""
# ======================
# PART 1: TRAIN A SMALL MODEL WITH ONE PROFILE DUMP
# ======================
# Generate entity profile data
data = [
{
"entity_id": "Q123",
"mentions": [["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
"title": "Dog",
"description": "Dog",
"types": {"hyena": ["animal"], "wiki": ["dog"]},
"relations": [
{"relation": "sibling", "object": "Q345"},
{"relation": "sibling", "object": "Q567"},
],
},
{
"entity_id": "Q345",
"mentions": [["cat", 10.0], ["catt", 7.0], ["animal", 3.0]],
"title": "Cat",
"description": "Cat",
"types": {"hyena": ["animal"], "wiki": ["cat"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
},
# Missing type system
{
"entity_id": "Q567",
"mentions": [["catt", 6.5], ["animal", 3.3]],
"title": "Catt",
"description": "Catt",
"types": {"hyena": ["animal", "animall"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
},
# No KG/Types
{
"entity_id": "Q789",
"mentions": [["animal", 12.2]],
"title": "Dogg",
},
]
# Generate train data
train_data = [
{
"sent_idx_unq": 1,
"sentence": "I love animals and dogs",
"qids": ["Q567", "Q123"],
"aliases": ["animal", "dog"],
"gold": [True, True],
"char_spans": [[7, 14], [19, 23]],
}
]
self.write_data(self.profile_file, data)
self.write_data(self.train_data, train_data)
entity_profile = EntityProfile.load_from_jsonl(self.profile_file)
# Dump profile data in format for model
entity_profile.save(self.save_dir)
# Setup model args to read the data/new profile data
raw_args = {
"emmental": {
"n_epochs": 1,
},
"run_config": {
"dataloader_threads": 0,
"dataset_threads": 1,
},
"model_config": {"hidden_size": 10},
"train_config": {"batch_size": 2},
"data_config": {
"entity_dir": str(self.save_dir),
"max_seq_len": 7,
"data_dir": str(self.data_dir),
"word_embedding": {
"context_layers": 1,
"entity_layers": 1,
"cache_dir": str(self.save_dir / "retrained_bert_model"),
},
"train_dataset": {"file": "train.jsonl"},
"dev_dataset": {"file": "train.jsonl"},
"test_dataset": {"file": "train.jsonl"},
},
}
with open(self.arg_file, "w") as out_f:
ujson.dump(raw_args, out_f)
args = parser_utils.parse_boot_and_emm_args(str(self.arg_file))
# This _MUST_ get passed the args so it gets a random seed set
emmental.init(log_dir=str(self.dir / "temp_log"), config=args)
if not os.path.exists(emmental.Meta.log_path):
os.makedirs(emmental.Meta.log_path)
# ======================
# PART 2: RUN MODEL
# ======================
scores = run_model(mode="train", config=args)
saved_model_path1 = f"{emmental.Meta.log_path}/last_model.pth"
assert type(scores) is dict
# ======================
# PART 3: MODIFY PROFILE AND LOAD PRETRAINED MODEL AND TRAIN FOR MORE
# ======================
entity_profile = EntityProfile.load_from_jsonl(
self.profile_file, edit_mode=True
)
entity_profile.add_type("Q123", "cat", "wiki")
entity_profile.remove_type("Q123", "dog", "wiki")
entity_profile.add_mention("Q123", "cat", 100.0)
# Dump profile data in format for model
entity_profile.save(self.save_dir2)
# Modify arg paths
args["data_config"]["entity_dir"] = str(self.save_dir2)
# Load pretrained model
args["model_config"]["model_path"] = f"{emmental.Meta.log_path}/last_model.pth"
emmental.Meta.config["model_config"]["model_path"] = saved_model_path1
# Init another run
emmental.init(log_dir=str(self.dir / "temp_log"), config=args)
if not os.path.exists(emmental.Meta.log_path):
os.makedirs(emmental.Meta.log_path)
scores = run_model(mode="train", config=args)
assert type(scores) is dict
# ======================
# PART 4: VERIFY CHANGES IN THE MODEL WERE AS EXPECTED
# ======================
# Check that the alias mappings are different
alias2entity_table1 = torch.from_numpy(
np.memmap(
self.save_dir / "prep" / "alias2entity_table_alias2qids_InC1.pt",
dtype="int64",
mode="r",
shape=(5, 30),
)
)
gold_alias2entity_table1 = torch.tensor(
[
[
1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
],
[
1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
],
[
4,
1,
3,
2,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
],
[
2,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
],
[
2,
3,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
],
]
)
assert torch.equal(alias2entity_table1, gold_alias2entity_table1)
# The change is the "cat" alias has entity 1 added to the beginning
# It used to only point to Q345 which is entity 2
alias2entity_table2 = torch.from_numpy(
np.memmap(
self.save_dir2 / "prep" / "alias2entity_table_alias2qids_InC1.pt",
dtype="int64",
mode="r",
shape=(5, 30),
)
)
gold_alias2entity_table2 = torch.tensor(
[
[
1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
],
[
1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
],
[
4,
1,
3,
2,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
],
[
1,
2,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
],
[
2,
3,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
],
]
)
assert torch.equal(alias2entity_table2, gold_alias2entity_table2)
if __name__ == "__main__":
unittest.main()
| bootleg-master | tests/test_entity/test_entity_profile.py |
"""End2end test."""
import os
import shutil
import unittest
import emmental
import ujson
from bootleg.run import run_model
from bootleg.utils import utils
from bootleg.utils.parser import parser_utils
class TestEnd2End(unittest.TestCase):
"""Test end to end."""
def setUp(self) -> None:
"""Set up."""
self.args = parser_utils.parse_boot_and_emm_args(
"tests/run_args/test_end2end.json"
)
# This _MUST_ get passed the args so it gets a random seed set
emmental.init(log_dir="tests/temp_log", config=self.args)
if not os.path.exists(emmental.Meta.log_path):
os.makedirs(emmental.Meta.log_path)
def tearDown(self) -> None:
"""Tear down."""
dir = os.path.join(
self.args.data_config.data_dir, self.args.data_config.data_prep_dir
)
if utils.exists_dir(dir):
shutil.rmtree(dir, ignore_errors=True)
dir = os.path.join(
self.args.data_config.entity_dir, self.args.data_config.entity_prep_dir
)
if utils.exists_dir(dir):
shutil.rmtree(dir, ignore_errors=True)
dir = os.path.join("tests/temp_log")
if os.path.exists(dir):
shutil.rmtree(dir, ignore_errors=True)
def test_end2end(self):
"""End2end base test."""
# Just setting this for testing pipelines
scores = run_model(mode="train", config=self.args)
assert type(scores) is dict
assert len(scores) > 0
assert scores["model/all/dev/loss"] < 1.1
self.args["model_config"][
"model_path"
] = f"{emmental.Meta.log_path}/last_model.pth"
emmental.Meta.config["model_config"][
"model_path"
] = f"{emmental.Meta.log_path}/last_model.pth"
result_file = run_model(mode="dump_preds", config=self.args)
assert os.path.exists(result_file)
results = [ujson.loads(li) for li in open(result_file)]
assert 19 == len(results) # 18 total sentences
assert len([f for li in results for f in li["entity_ids"]]) == 52
# Doubling up a test here to also test accumulation steps
def test_end2end_accstep(self):
"""Test end2end with accumulation steps."""
# Just setting this for testing pipelines
self.args.data_config.dump_preds_accumulation_steps = 2
self.args.run_config.dataset_threads = 2
scores = run_model(mode="train", config=self.args)
assert type(scores) is dict
assert len(scores) > 0
assert scores["model/all/dev/loss"] < 1.1
self.args["model_config"][
"model_path"
] = f"{emmental.Meta.log_path}/last_model.pth"
emmental.Meta.config["model_config"][
"model_path"
] = f"{emmental.Meta.log_path}/last_model.pth"
result_file = run_model(mode="dump_preds", config=self.args)
assert os.path.exists(result_file)
results = [ujson.loads(li) for li in open(result_file)]
assert 19 == len(results) # 18 total sentences
assert len([f for li in results for f in li["entity_ids"]]) == 52
# Doubling up a test here to also test greater than 1 eval batch size
def test_end2end_evalbatch(self):
"""Test end2end with eval batch size."""
self.args.data_config.dump_preds_accumulation_steps = 2
self.args.run_config.dataset_threads = 2
self.args.run_config.eval_batch_size = 2
scores = run_model(mode="train", config=self.args)
assert type(scores) is dict
assert len(scores) > 0
assert scores["model/all/dev/loss"] < 1.1
self.args["model_config"][
"model_path"
] = f"{emmental.Meta.log_path}/last_model.pth"
emmental.Meta.config["model_config"][
"model_path"
] = f"{emmental.Meta.log_path}/last_model.pth"
result_file = run_model(mode="dump_preds", config=self.args)
assert os.path.exists(result_file)
results = [ujson.loads(li) for li in open(result_file)]
assert 19 == len(results) # 18 total sentences
assert len([f for li in results for f in li["entity_ids"]]) == 52
shutil.rmtree("tests/temp", ignore_errors=True)
def test_end2end_bert_long_context(self):
"""Test end2end with longer sentence context."""
self.args.data_config.max_seq_len = 256
self.args.run_config.dump_preds_num_data_splits = 4
scores = run_model(mode="train", config=self.args)
assert type(scores) is dict
assert len(scores) > 0
assert scores["model/all/dev/loss"] < 1.1
self.args["model_config"][
"model_path"
] = f"{emmental.Meta.log_path}/last_model.pth"
emmental.Meta.config["model_config"][
"model_path"
] = f"{emmental.Meta.log_path}/last_model.pth"
result_file = run_model(mode="dump_preds", config=self.args)
assert os.path.exists(result_file)
results = [ujson.loads(li) for li in open(result_file)]
assert 19 == len(results) # 18 total sentences
assert len([f for li in results for f in li["entity_ids"]]) == 52
shutil.rmtree("tests/temp", ignore_errors=True)
def test_end2end_train_in_cands_false(self):
"""End2end base test."""
# Just setting this for testing pipelines
self.args.data_config.train_in_candidates = False
self.args.data_config.train_dataset.file = "end2end_train_not_in_cand.jsonl"
scores = run_model(mode="train", config=self.args)
assert type(scores) is dict
assert len(scores) > 0
assert scores["model/all/dev/loss"] < 1.5
self.args["model_config"][
"model_path"
] = f"{emmental.Meta.log_path}/last_model.pth"
emmental.Meta.config["model_config"][
"model_path"
] = f"{emmental.Meta.log_path}/last_model.pth"
result_file = run_model(mode="dump_preds", config=self.args)
assert os.path.exists(result_file)
results = [ujson.loads(li) for li in open(result_file)]
assert 19 == len(results) # 18 total sentences
assert len([f for li in results for f in li["entity_ids"]]) == 52
if __name__ == "__main__":
unittest.main()
| bootleg-master | tests/test_end_to_end/test_end_to_end.py |
"""Test mention extraction."""
import os
import tempfile
import unittest
from pathlib import Path
import ujson
from bootleg.symbols.entity_symbols import EntitySymbols
class MentionExtractionTest(unittest.TestCase):
"""Mention extraction test."""
def setUp(self) -> None:
"""Set up."""
self.test_dir = tempfile.TemporaryDirectory()
def tearDown(self) -> None:
"""Tear down."""
self.test_dir.cleanup()
def write_data(self, file, data):
"""Write data."""
Path(file).parent.mkdir(parents=True, exist_ok=True)
with open(file, "w") as out_f:
for line in data:
out_f.write(ujson.dumps(line) + "\n")
def test_mention_extraction(self):
"""Test that mention extraction runs without crashing."""
in_file = Path(self.test_dir.name) / "train.jsonl"
out_file = Path(self.test_dir.name) / "train_out.jsonl"
entity_db = Path(self.test_dir.name) / "entity_db" / "entity_mappings"
alias2qids = {
"happy": [["Q1", 1.0], ["Q2", 1.0], ["Q3", 1.0]],
"cow": [["Q4", 1.0], ["Q5", 1.0], ["Q6", 1.0]],
"batman": [["Q7", 1.0], ["Q8", 1.0]],
}
qid2title = {
"Q1": "aack",
"Q2": "back",
"Q3": "cack",
"Q4": "dack",
"Q5": "eack",
"Q6": "fack",
"Q7": "gack",
"Q8": "hack",
}
mock_entity_db = EntitySymbols(alias2qids, qid2title)
mock_entity_db.save(entity_db)
data = [
{
"sentence": "happy cow batman",
}
] * 100
self.write_data(in_file, data)
os.system(
f"python3 bootleg/end2end/extract_mentions.py "
f"--in_file {str(in_file)} "
f"--out_file {str(out_file)} "
f"--entity_db {str(entity_db)} "
f"--num_workers 1 "
f"--num_chunks 10"
)
assert out_file.exists()
out_data = [ln for ln in open(out_file)]
assert len(out_data) == 100
os.system(
f"python3 bootleg/end2end/extract_mentions.py "
f"--in_file {str(in_file)} "
f"--out_file {str(out_file)} "
f"--entity_db {str(entity_db)} "
f"--num_workers 2 "
f"--num_chunks 10"
)
assert out_file.exists()
out_data = [ln for ln in open(out_file)]
assert len(out_data) == 100
| bootleg-master | tests/test_end_to_end/test_mention_extraction.py |
"""Test generate entities."""
import os
import shutil
import unittest
import emmental
import numpy as np
import torch
import ujson
import bootleg.extract_all_entities as extract_all_entities
import bootleg.run as run
from bootleg.utils import utils
from bootleg.utils.parser import parser_utils
class TestGenEntities(unittest.TestCase):
"""Test generate entites."""
def setUp(self) -> None:
"""Set up."""
self.args = parser_utils.parse_boot_and_emm_args(
"tests/run_args/test_end2end.json"
)
# This _MUST_ get passed the args so it gets a random seed set
emmental.init(log_dir="tests/temp_log", config=self.args)
if not os.path.exists(emmental.Meta.log_path):
os.makedirs(emmental.Meta.log_path)
def tearDown(self) -> None:
"""Tear down."""
dir = os.path.join(
self.args.data_config.data_dir, self.args.data_config.data_prep_dir
)
if utils.exists_dir(dir):
shutil.rmtree(dir, ignore_errors=True)
dir = os.path.join(
self.args.data_config.entity_dir, self.args.data_config.entity_prep_dir
)
if utils.exists_dir(dir):
shutil.rmtree(dir, ignore_errors=True)
dir = os.path.join("tests/temp_log")
if os.path.exists(dir):
shutil.rmtree(dir, ignore_errors=True)
def test_end2end(self):
"""Test end to end."""
# For the collate and dataloaders to play nicely, the spawn must be fork (this is set in run.py)
torch.multiprocessing.set_start_method("fork", force=True)
# Train and save model
run.run_model(mode="train", config=self.args)
self.args["model_config"][
"model_path"
] = f"{emmental.Meta.log_path}/last_model.pth"
emmental.Meta.config["model_config"][
"model_path"
] = f"{emmental.Meta.log_path}/last_model.pth"
out_emb_file = extract_all_entities.run_model(config=self.args)
assert os.path.exists(out_emb_file)
embs = np.load(out_emb_file)
assert list(embs.shape) == [6, 32]
final_result_file = run.run_model(
mode="dump_preds", config=self.args, entity_emb_file=out_emb_file
)
lines = [ujson.loads(ln) for ln in open(final_result_file)]
final_result_file = run.run_model(
mode="dump_preds", config=self.args, entity_emb_file=None
)
lines_no_emb_file = [ujson.loads(ln) for ln in open(final_result_file)]
assert len(lines) == len(lines_no_emb_file)
if __name__ == "__main__":
unittest.main()
| bootleg-master | tests/test_end_to_end/test_gen_entities.py |
"""Test annotator."""
import os
import shutil
import unittest
import emmental
import torch
from bootleg import extract_all_entities
from bootleg.end2end.bootleg_annotator import BootlegAnnotator
from bootleg.run import run_model
from bootleg.utils import utils
from bootleg.utils.parser import parser_utils
class TestEnd2End(unittest.TestCase):
"""Test annotator end to end."""
def setUp(self) -> None:
"""Set up."""
self.args = parser_utils.parse_boot_and_emm_args(
"tests/run_args/test_end2end.json"
)
# This _MUST_ get passed the args so it gets a random seed set
emmental.init(log_dir="tests/temp_log", config=self.args)
if not os.path.exists(emmental.Meta.log_path):
os.makedirs(emmental.Meta.log_path)
def tearDown(self) -> None:
"""Tear down."""
dir = os.path.join(
self.args.data_config.data_dir, self.args.data_config.data_prep_dir
)
if utils.exists_dir(dir):
shutil.rmtree(dir, ignore_errors=True)
dir = os.path.join(
self.args.data_config.entity_dir, self.args.data_config.entity_prep_dir
)
if utils.exists_dir(dir):
shutil.rmtree(dir, ignore_errors=True)
dir = os.path.join("tests/temp_log")
if os.path.exists(dir):
shutil.rmtree(dir, ignore_errors=True)
def test_annotator(self):
"""Test annotator end to end."""
torch.multiprocessing.set_start_method("fork", force=True)
# Just to make it go faster
self.args["learner_config"]["n_epochs"] = 1
# First train some model so we have it stored
run_model(mode="train", config=self.args)
self.args["model_config"][
"model_path"
] = f"{emmental.Meta.log_path}/last_model.pth"
emmental.Meta.config["model_config"][
"model_path"
] = f"{emmental.Meta.log_path}/last_model.pth"
out_emb_file = extract_all_entities.run_model(config=self.args)
ann = BootlegAnnotator(
config=self.args, verbose=True, extract_method="ngram_spacy"
)
# TEST SINGLE TEXT
# Res should have alias1
res = ann.label_mentions(
"alias1 and alias2 and multi word alias3 I have no idea"
)
gold_ans = {
"qids": [["Q1"]],
"titles": [["alias1"]],
"cands": [[["Q1", "Q4", "-1"]]],
"char_spans": [[[0, 6]]],
"aliases": [["alias1"]],
}
for k in gold_ans:
# In case model doesn't learn the right pattern (happens on GitHub for some reason),
# Do not test qids or titles
if k in ["char_spans", "aliases", "cands"]:
self.assertListEqual(gold_ans[k], res[k])
# TEST LONG TEXT
# Res should have alias1
res = ann.label_mentions(
[
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea",
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea",
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea",
]
)
gold_ans = {
"qids": [["Q1"] * 8] * 3,
"titles": [["alias1"] * 8] * 3,
"cands": [[["Q1", "Q4", "-1"]] * 8] * 3,
"char_spans": [
[
[0, 6],
[56, 62],
[112, 118],
[168, 174],
[224, 230],
[280, 286],
[336, 342],
[392, 398],
],
]
* 3,
"aliases": [["alias1"] * 8] * 3,
}
for k in gold_ans:
# In case model doesn't learn the right pattern (happens on GitHub for some reason),
# Do not test qids or titles
if k in ["char_spans", "aliases", "cands"]:
self.assertListEqual(gold_ans[k], res[k])
# TEST RETURN EMBS
ann.return_embs = True
res = ann.label_mentions(
"alias1 and alias2 and multi word alias3 I have no idea"
)
assert "embs" in res
assert res["embs"][0][0].shape[0] == 32
assert list(res["cand_embs"][0][0].shape) == [3, 32]
# TEST RETURN EMBS
ann.return_embs = False
ann.return_ctx_embs = True
res = ann.label_mentions(
"alias1 and alias2 and multi word alias3 I have no idea"
)
assert "ctx_embs" in res
assert res["ctx_embs"][0][0].shape[0] == 32
# TEST CUSTOM CANDS
ann.return_embs = False
extracted_exs = [
{
"sentence": "alias1 and alias2 and multi word alias3 I have no idea",
"aliases": ["alias3"],
"char_spans": [[0, 6]],
"cands": [["Q3"]],
},
{
"sentence": "alias1 and alias2 and multi word alias3 I have no idea. "
"alias1 and alias2 and multi word alias3 I have no idea. ",
"aliases": ["alias1", "alias3", "alias1"],
"char_spans": [[0, 6], [11, 17], [22, 39]],
"cands": [["Q2"], ["Q3"], ["Q2"]],
},
]
res = ann.label_mentions(extracted_examples=extracted_exs)
gold_ans = {
"qids": [["Q3"], ["Q2", "Q3", "Q2"]],
"titles": [
["word alias3"],
["multi alias2", "word alias3", "multi alias2"],
],
"cands": [
[["Q3", "-1", "-1"]],
[["Q2", "-1", "-1"], ["Q3", "-1", "-1"], ["Q2", "-1", "-1"]],
],
"char_spans": [[[0, 6]], [[0, 6], [11, 17], [22, 39]]],
"aliases": [["alias3"], ["alias1", "alias3", "alias1"]],
}
for k in gold_ans:
self.assertListEqual(gold_ans[k], res[k])
ann = BootlegAnnotator(
config=self.args,
verbose=True,
entity_emb_file=out_emb_file,
extract_method="ngram_spacy",
)
ann.return_embs = True
# TEST SINGLE TEXT
# Res should have alias1
res = ann.label_mentions(
"alias1 and alias2 and multi word alias3 I have no idea"
)
assert "embs" in res
gold_ans = {
"qids": [["Q1"]],
"titles": [["alias1"]],
"cands": [[["Q1", "Q4", "-1"]]],
"char_spans": [[[0, 6]]],
"aliases": [["alias1"]],
}
for k in gold_ans:
# In case model doesn't learn the right pattern (happens on GitHub for some reason),
# Do not test qids or titles
if k in ["char_spans", "aliases", "cands"]:
self.assertListEqual(gold_ans[k], res[k])
if __name__ == "__main__":
unittest.main()
| bootleg-master | tests/test_end_to_end/test_annotator.py |
"""Test scorer."""
import unittest
import numpy as np
from bootleg.scorer import BootlegSlicedScorer
class BootlegMockScorer(BootlegSlicedScorer):
"""Bootleg mock scorer class."""
def __init__(self, train_in_candidates):
"""Mock initializer."""
self.mock_slices = {
0: {"all": [1], "slice_1": [0]},
1: {"all": [1], "slice_1": [1]},
2: {"all": [1], "slice_1": [0]},
3: {"all": [1], "slice_1": [0]},
4: {"all": [1], "slice_1": [1]},
5: {"all": [1], "slice_1": [0]},
}
self.train_in_candidates = train_in_candidates
def get_slices(self, uid):
"""Get slices."""
return self.mock_slices[uid]
class TestScorer(unittest.TestCase):
"""Scorer test."""
def test_bootleg_scorer(self):
"""Test scorer."""
# batch = 6
scorer = BootlegMockScorer(train_in_candidates=True)
golds = np.array([0, -2, 1, -1, 0, 3])
probs = np.array([])
preds = np.array([1, 2, 0, 1, 0, 3])
uids = np.array([0, 1, 2, 3, 4, 5])
res = scorer.bootleg_score(golds, probs, preds, uids)
gold_res = {}
slice_name = "all"
gold_res[f"{slice_name}/total_men"] = 5
gold_res[f"{slice_name}/total_notNC_men"] = 4
gold_res[f"{slice_name}/acc_boot"] = 2 / 5
gold_res[f"{slice_name}/acc_notNC_boot"] = 2 / 4
gold_res[f"{slice_name}/acc_pop"] = 2 / 5
gold_res[f"{slice_name}/acc_notNC_pop"] = 2 / 4
slice_name = "slice_1"
gold_res[f"{slice_name}/total_men"] = 2
gold_res[f"{slice_name}/total_notNC_men"] = 1
gold_res[f"{slice_name}/acc_boot"] = 1 / 2
gold_res[f"{slice_name}/acc_notNC_boot"] = 1 / 1
gold_res[f"{slice_name}/acc_pop"] = 1 / 2
gold_res[f"{slice_name}/acc_notNC_pop"] = 1 / 1
self.assertDictEqual(res, gold_res)
def test_bootleg_scorer_notincand(self):
"""Test scorer non in candidate."""
# batch = 6
scorer = BootlegMockScorer(train_in_candidates=False)
golds = np.array([0, 3, 2, -1, 1, 4])
probs = np.array([])
preds = np.array([0, 3, 0, 1, 2, 4])
uids = np.array([0, 1, 2, 3, 4, 5])
res = scorer.bootleg_score(golds, probs, preds, uids)
gold_res = {}
slice_name = "all"
gold_res[f"{slice_name}/total_men"] = 5
gold_res[f"{slice_name}/total_notNC_men"] = 4
gold_res[f"{slice_name}/acc_boot"] = 3 / 5
gold_res[f"{slice_name}/acc_notNC_boot"] = 2 / 4
gold_res[f"{slice_name}/acc_pop"] = 1 / 5
gold_res[f"{slice_name}/acc_notNC_pop"] = 1 / 4
slice_name = "slice_1"
gold_res[f"{slice_name}/total_men"] = 2
gold_res[f"{slice_name}/total_notNC_men"] = 2
gold_res[f"{slice_name}/acc_boot"] = 1 / 2
gold_res[f"{slice_name}/acc_notNC_boot"] = 1 / 2
gold_res[f"{slice_name}/acc_pop"] = 1 / 2
gold_res[f"{slice_name}/acc_notNC_pop"] = 1 / 2
self.assertDictEqual(res, gold_res)
if __name__ == "__main__":
unittest.main()
| bootleg-master | tests/test_scorer/test_scorer.py |
"""Test eval utils."""
import os
import shutil
import tempfile
import unittest
import jsonlines
import numpy as np
import torch
import ujson
from bootleg.symbols.entity_symbols import EntitySymbols
from bootleg.utils import eval_utils
from bootleg.utils.classes.nested_vocab_tries import (
TwoLayerVocabularyScoreTrie,
VocabularyTrie,
)
from bootleg.utils.eval_utils import write_data_labels
class EntitySymbolsSubclass(EntitySymbols):
"""Mock entity symbols class."""
def __init__(self):
"""Entity symbols initializer."""
self.max_candidates = 2
# Used if we need to do any string searching for aliases. This keep track of the largest n-gram needed.
self.max_alias_len = 1
self._qid2title = {"Q1": "a b c d e", "Q2": "f", "Q3": "dd a b", "Q4": "x y z"}
self._qid2desc = None
self._qid2eid = VocabularyTrie(input_dict={"Q1": 1, "Q2": 2, "Q3": 3, "Q4": 4})
self._alias2id = {"a": 0, "b": 1, "c": 2, "d": 3, "e": 4, "f": 5, "g": 6}
alias2qids = {
"a": [["Q1", 10.0], ["Q4", 6]],
"b": [["Q2", 5.0], ["Q1", 3]],
"c": [["Q1", 30.0], ["Q2", 3]],
"d": [["Q4", 20], ["Q3", 15.0]],
"e": [["Q1", 10.0], ["Q4", 6]],
"f": [["Q2", 5.0], ["Q1", 3]],
"g": [["Q1", 30.0], ["Q2", 3]],
}
self._alias2qids = TwoLayerVocabularyScoreTrie(
input_dict=alias2qids,
vocabulary=self._qid2title,
max_value=self.max_candidates,
)
self.num_entities = len(self._qid2eid)
self.num_entities_with_pad_and_nocand = self.num_entities + 2
self.alias_cand_map_dir = "alias2qids"
self.alias_idx_dir = "alias2qids"
class EvalUtils(unittest.TestCase):
"""Eval utils test."""
# tests if we match standard torch fns where expected
def test_masked_class_logsoftmax_basic(self):
"""Test masked class softmax."""
# shape batch x M x K
# model outputs
preds = torch.tensor([[[2.0, 2.0, 1.0], [3.0, 5.0, 4.0]]])
# all that matters for this test is that the below is non-negative
# since negative indicates masking
entity_ids = torch.tensor([[[1, 3, 4], [5, 3, 1]]])
mask = torch.where(
entity_ids < 0, torch.zeros_like(preds), torch.ones_like(preds)
)
pred_log_preds = eval_utils.masked_class_logsoftmax(pred=preds, mask=mask)
torch_logsoftmax = torch.nn.LogSoftmax(dim=2)
torch_log_preds = torch_logsoftmax(preds)
assert torch.allclose(torch_log_preds, pred_log_preds)
# if we mask one of the candidates, we should no longer
# get the same result as torch fn which doesn't mask
entity_ids = torch.tensor([[[1, 3, 4], [5, 3, -1]]])
mask = torch.where(
entity_ids < 0, torch.zeros_like(preds), torch.ones_like(preds)
)
pred_log_preds = eval_utils.masked_class_logsoftmax(pred=preds, mask=mask)
assert not torch.allclose(torch_log_preds, pred_log_preds)
# make sure masked values are approximately zero before log (when exponented)
assert torch.allclose(
torch.tensor([[[0.422319, 0.422319, 0.155362], [0.119203, 0.880797, 0.0]]]),
torch.exp(pred_log_preds),
)
# combines with loss fn to see if we match torch cross entropy where expected
def test_masked_class_logsoftmax_with_loss(self):
"""Test masked class softmax with loss."""
# shape batch x M x K
# model outputs
preds = torch.tensor([[[2.0, 2.0, 1.0], [3.0, 5.0, 4.0]]])
# all that matters for this test is that the below is non-negative
# since negative indicates masking
entity_ids = torch.tensor([[[1, 3, 4], [5, 3, 1]]])
true_entity_class = torch.tensor([[0, 1]])
mask = torch.where(
entity_ids < 0, torch.zeros_like(preds), torch.ones_like(preds)
)
pred_log_preds = eval_utils.masked_class_logsoftmax(
pred=preds, mask=mask
).transpose(1, 2)
pred_loss = torch.nn.NLLLoss(ignore_index=-1)(pred_log_preds, true_entity_class)
torch_loss_fn = torch.nn.CrossEntropyLoss()
# predictions need to be batch_size x K x M
torch_loss = torch_loss_fn(preds.transpose(1, 2), true_entity_class)
assert torch.allclose(torch_loss, pred_loss)
# tests if masking is done correctly
def test_masked_class_logsoftmax_masking(self):
"""Test masked class softmax masking."""
preds = torch.tensor([[[2.0, 4.0, 1.0], [3.0, 5.0, 4.0]]])
entity_ids = torch.tensor([[[1, 3, -1], [5, -1, -1]]])
first_sample = torch.tensor([[2.0, 4.0]])
denom_0 = torch.log(torch.sum(torch.exp(first_sample)))
mask = torch.where(
entity_ids < 0, torch.zeros_like(preds), torch.ones_like(preds)
)
# we only need to match on non-masked values
expected_log_probs = torch.tensor(
[
[
[first_sample[0][0] - denom_0, first_sample[0][1] - denom_0, 0],
[0, 0, 0],
]
]
)
pred_log_preds = (
eval_utils.masked_class_logsoftmax(pred=preds, mask=mask) * mask
)
assert torch.allclose(expected_log_probs, pred_log_preds)
# check the case where the entire row is masked out
def test_masked_class_logsoftmax_grads_full_mask(self):
"""Test masked class softmax gradients full mask."""
preds = torch.tensor([[[2.0, 4.0], [3.0, 5.0], [1.0, 4.0]]], requires_grad=True)
# batch x M x K
entity_ids = torch.tensor([[[1, -1], [-1, -1], [4, 5]]])
# batch x M
true_entity_class = torch.tensor([[0, -1, 1]])
mask = torch.where(
entity_ids < 0, torch.zeros_like(preds), torch.ones_like(preds)
)
pred_log_preds = eval_utils.masked_class_logsoftmax(
pred=preds, mask=mask
).transpose(1, 2)
pred_loss = torch.nn.NLLLoss(ignore_index=-1)(pred_log_preds, true_entity_class)
pred_loss.backward()
actual_grad = preds.grad
true_entity_class_expanded = true_entity_class.unsqueeze(-1).expand_as(
entity_ids
)
masked_actual_grad = torch.where(
(entity_ids != -1) & (true_entity_class_expanded != -1),
torch.ones_like(preds),
actual_grad,
)
# just put 1's where we want non-zeros and use mask above to only compare padded gradients
expected_grad = torch.tensor([[[1.0, 0.0], [0.0, 0.0], [1.0, 1.0]]])
assert torch.allclose(expected_grad, masked_actual_grad)
# check the case where the entire row is masked out
def test_masked_class_logsoftmax_grads_excluded_alias(self):
"""Test masked class softmax gradients excluding alias."""
preds = torch.tensor([[[2.0, 4.0], [1.0, 4.0], [8.0, 2.0]]], requires_grad=True)
# batch x M x K
entity_ids = torch.tensor([[[1, -1], [4, 5], [8, 9]]])
# batch x M
true_entity_class = torch.tensor([[0, -1, 1]])
mask = torch.where(
entity_ids < 0, torch.zeros_like(preds), torch.ones_like(preds)
)
pred_log_preds = eval_utils.masked_class_logsoftmax(
pred=preds, mask=mask
).transpose(1, 2)
pred_loss = torch.nn.NLLLoss(ignore_index=-1)(pred_log_preds, true_entity_class)
pred_loss.backward()
actual_grad = preds.grad
true_entity_class_expanded = true_entity_class.unsqueeze(-1).expand_as(
entity_ids
)
masked_actual_grad = torch.where(
(entity_ids != -1) & (true_entity_class_expanded != -1),
torch.ones_like(preds),
actual_grad,
)
# just put 1's where we want non-zeros and use mask above to only compare padded gradients
expected_grad = torch.tensor([[[1.0, 0.0], [0.0, 0.0], [1.0, 1.0]]])
assert torch.allclose(expected_grad, masked_actual_grad)
# compare grads with and without masking
def test_masked_class_logsoftmax_grads(self):
"""Test masked class softmax grads."""
# check gradients on preds since that will go back into the rest of the network
preds = torch.tensor(
[[[2.0, 4.0, 1.0], [3.0, 5.0, 4.0], [1.0, 4.0, 6.0]]], requires_grad=True
)
entity_ids = torch.tensor([[[1, 3, -1], [5, -1, -1], [4, 5, 6]]])
true_entity_class = torch.tensor([[1, 0, 2]])
mask = torch.where(
entity_ids < 0, torch.zeros_like(preds), torch.ones_like(preds)
)
pred_log_preds = eval_utils.masked_class_logsoftmax(
pred=preds, mask=mask
).transpose(1, 2)
pred_loss = torch.nn.NLLLoss(ignore_index=-1)(pred_log_preds, true_entity_class)
pred_loss.backward()
actual_grad = preds.grad
# we want zero grads on masked candidates
masked_actual_grad = torch.where(
entity_ids > 0, torch.ones_like(preds), actual_grad
)
# just put 1's where we want non-zeros and use mask above to only compare padded gradients
expected_grad = torch.tensor(
[[[1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 1.0]]]
)
assert torch.allclose(expected_grad, masked_actual_grad)
# we want to match pytorch when NOT using masking
# zero out the gradient to call backward again
preds.grad.zero_()
# no masking now
entity_ids = torch.tensor([[[1, 3, 1], [5, 4, 8], [4, 5, 6]]])
true_entity_class = torch.tensor([[1, 0, 2]])
mask = torch.where(
entity_ids < 0, torch.zeros_like(preds), torch.ones_like(preds)
)
pred_log_preds = eval_utils.masked_class_logsoftmax(
pred=preds, mask=mask
).transpose(1, 2)
pred_loss = torch.nn.NLLLoss(ignore_index=-1)(pred_log_preds, true_entity_class)
pred_loss.backward()
# clone so we can call backward again and zero out the grad
actual_grad = preds.grad.clone()
preds.grad.zero_()
torch_loss_fn = torch.nn.CrossEntropyLoss()
torch_loss = torch_loss_fn(preds.transpose(1, 2), true_entity_class)
torch_loss.backward()
torch_grad = preds.grad
assert torch.allclose(torch_grad, actual_grad)
def test_merge_subsentences(self):
"""Test merge subsentences in eval."""
test_full_emb_file = tempfile.NamedTemporaryFile()
test_merged_emb_file = tempfile.NamedTemporaryFile()
cache_folder = tempfile.TemporaryDirectory()
num_examples = 7
total_num_mentions = 7
K = 3
hidden_size = 2
# create full embedding file
storage_type_full = np.dtype(
[
("K", int),
("hidden_size", int),
("sent_idx", int),
("subsent_idx", int),
("alias_list_pos", int, 1),
("final_loss_true", int, 1),
("final_loss_pred", int, 1),
("final_loss_prob", float, 1),
("final_loss_cand_probs", float, K),
]
)
full_emb = np.memmap(
test_full_emb_file.name,
dtype=storage_type_full,
mode="w+",
shape=(num_examples,),
)
full_emb["hidden_size"] = hidden_size
full_emb["K"] = K
full_emb[0]["sent_idx"] = 0
full_emb[0]["subsent_idx"] = 0
full_emb[0]["alias_list_pos"] = 0
full_emb[0]["final_loss_true"] = 0
full_emb[1]["sent_idx"] = 0
full_emb[1]["subsent_idx"] = 1
full_emb[1]["alias_list_pos"] = 1
full_emb[1]["final_loss_true"] = 1
full_emb[2]["sent_idx"] = 1
full_emb[2]["subsent_idx"] = 0
full_emb[2]["alias_list_pos"] = 0
full_emb[2]["final_loss_true"] = 1
full_emb[3]["sent_idx"] = 1
full_emb[3]["subsent_idx"] = 1
full_emb[3]["alias_list_pos"] = 1
full_emb[3]["final_loss_true"] = 1
full_emb[4]["sent_idx"] = 1
full_emb[4]["subsent_idx"] = 2
full_emb[4]["alias_list_pos"] = 2
full_emb[4]["final_loss_true"] = 1
full_emb[5]["sent_idx"] = 1
full_emb[5]["subsent_idx"] = 3
full_emb[5]["alias_list_pos"] = 3
full_emb[5]["final_loss_true"] = 1
full_emb[6]["sent_idx"] = 1
full_emb[6]["subsent_idx"] = 4
full_emb[6]["alias_list_pos"] = 4
full_emb[6]["final_loss_true"] = 1
# create merged embedding file
storage_type_merged = np.dtype(
[
("hidden_size", int),
("sent_idx", int),
("alias_list_pos", int),
("final_loss_pred", int),
("final_loss_prob", float),
("final_loss_cand_probs", float, K),
]
)
# create data file -- just needs aliases and sentence indices
data = [
{
"aliases": ["a", "b"],
"char_spans": [[0, 1], [2, 3]],
"sentence": "a b c d e f g",
"sent_idx_unq": 0,
},
{
"aliases": ["c", "d", "e", "f", "g"],
"char_spans": [[4, 5], [6, 7], [8, 9], [10, 11], [14, 15]],
"sentence": "a b c d e f g",
"sent_idx_unq": 1,
},
]
# Keys are string for trie
sent_idx2num_mentions = {"0": 2, "1": 5}
temp_file = tempfile.NamedTemporaryFile(delete=False).name
with jsonlines.open(temp_file, "w") as f:
for row in data:
f.write(row)
# assert that output of merge_subsentences is correct
num_processes = 1
eval_utils.merge_subsentences(
num_processes,
sent_idx2num_mentions,
cache_folder.name,
test_merged_emb_file.name,
storage_type_merged,
test_full_emb_file.name,
storage_type_full,
)
bootleg_merged_emb = np.memmap(
test_merged_emb_file.name, dtype=storage_type_merged, mode="r+"
)
assert len(bootleg_merged_emb) == total_num_mentions
# Try with multiprocessing
num_processes = 5
eval_utils.merge_subsentences(
num_processes,
sent_idx2num_mentions,
cache_folder.name,
test_merged_emb_file.name,
storage_type_merged,
test_full_emb_file.name,
storage_type_full,
)
bootleg_merged_emb = np.memmap(
test_merged_emb_file.name, dtype=storage_type_merged, mode="r+"
)
assert len(bootleg_merged_emb) == total_num_mentions
# clean up
if os.path.exists(temp_file):
os.remove(temp_file)
test_full_emb_file.close()
test_merged_emb_file.close()
cache_folder.cleanup()
def test_write_out_subsentences(self):
"""Test write out subsentences in eval."""
merged_entity_emb_file = tempfile.NamedTemporaryFile()
out_file = tempfile.NamedTemporaryFile()
data_file = tempfile.NamedTemporaryFile()
cache_folder = tempfile.TemporaryDirectory()
entity_dir = "tests/entity_db"
entity_map_dir = "entity_mappings"
entity_symbols = EntitySymbolsSubclass()
entity_symbols.save(save_dir=os.path.join(entity_dir, entity_map_dir))
total_num_mentions = 7
K = 2
hidden_size = 2
# create data file -- just needs aliases and sentence indices
data = [
{
"aliases": ["a", "b"],
"char_spans": [[0, 1], [2, 3]],
"sentence": "a b c d e f g",
"sent_idx_unq": 0,
},
{
"aliases": ["c", "d", "e", "f", "g"],
"char_spans": [[4, 5], [6, 7], [8, 9], [10, 11], [14, 15]],
"sentence": "a b c d e f g",
"sent_idx_unq": 1,
},
]
# Dict is a string key for trie
sent_idx2rows = {"0": data[0], "1": data[1]}
with jsonlines.open(data_file.name, "w") as f:
for row in data:
f.write(row)
merged_storage_type = np.dtype(
[
("hidden_size", int),
("sent_idx", int),
("alias_list_pos", int),
("entity_emb", float, hidden_size),
("final_loss_pred", int),
("final_loss_prob", float),
("final_loss_cand_probs", float, K),
]
)
merged_entity_emb = np.memmap(
merged_entity_emb_file.name,
dtype=merged_storage_type,
mode="w+",
shape=(total_num_mentions,),
)
# 2 sentences, 1st sent has 1 subsentence, 2nd sentence has 2 subsentences - 7 mentions total
merged_entity_emb["hidden_size"] = hidden_size
# first men
merged_entity_emb[0]["sent_idx"] = 0
merged_entity_emb[0]["alias_list_pos"] = 0
merged_entity_emb[0]["entity_emb"] = np.array([0, 1])
merged_entity_emb[0]["final_loss_pred"] = 1
merged_entity_emb[0]["final_loss_prob"] = 0.9
merged_entity_emb[0]["final_loss_cand_probs"] = np.array([0.1, 0.9])
# second men
merged_entity_emb[1]["sent_idx"] = 0
merged_entity_emb[1]["alias_list_pos"] = 1
merged_entity_emb[1]["entity_emb"] = np.array([2, 3])
merged_entity_emb[1]["final_loss_pred"] = 1
merged_entity_emb[1]["final_loss_prob"] = 0.9
merged_entity_emb[1]["final_loss_cand_probs"] = np.array([0.1, 0.9])
# third men
merged_entity_emb[2]["sent_idx"] = 1
merged_entity_emb[2]["alias_list_pos"] = 0
merged_entity_emb[2]["entity_emb"] = np.array([4, 5])
merged_entity_emb[2]["final_loss_pred"] = 0
merged_entity_emb[2]["final_loss_prob"] = 0.9
merged_entity_emb[2]["final_loss_cand_probs"] = np.array([0.9, 0.1])
# fourth men
merged_entity_emb[3]["sent_idx"] = 1
merged_entity_emb[3]["alias_list_pos"] = 1
merged_entity_emb[3]["entity_emb"] = np.array([6, 7])
merged_entity_emb[3]["final_loss_pred"] = 0
merged_entity_emb[3]["final_loss_prob"] = 0.9
merged_entity_emb[3]["final_loss_cand_probs"] = np.array([0.9, 0.1])
# fifth men
merged_entity_emb[4]["sent_idx"] = 1
merged_entity_emb[4]["alias_list_pos"] = 2
merged_entity_emb[4]["entity_emb"] = np.array([10, 11])
merged_entity_emb[4]["final_loss_pred"] = 1
merged_entity_emb[4]["final_loss_prob"] = 0.9
merged_entity_emb[4]["final_loss_cand_probs"] = np.array([0.1, 0.9])
# sixth men
merged_entity_emb[5]["sent_idx"] = 1
merged_entity_emb[5]["alias_list_pos"] = 3
merged_entity_emb[5]["entity_emb"] = np.array([12, 13])
merged_entity_emb[5]["final_loss_pred"] = 1
merged_entity_emb[5]["final_loss_prob"] = 0.9
merged_entity_emb[5]["final_loss_cand_probs"] = np.array([0.1, 0.9])
# seventh men
merged_entity_emb[6]["sent_idx"] = 1
merged_entity_emb[6]["alias_list_pos"] = 4
merged_entity_emb[6]["entity_emb"] = np.array([14, 15])
merged_entity_emb[6]["final_loss_pred"] = 1
merged_entity_emb[6]["final_loss_prob"] = 0.9
merged_entity_emb[6]["final_loss_cand_probs"] = np.array([0.1, 0.9])
num_processes = 1
train_in_candidates = True
max_candidates = 2
"""
"a":[["Q1",10.0],["Q4",6]],
"b":[["Q2",5.0],["Q1",3]],
"c":[["Q1",30.0],["Q2",3]],
"d":[["Q4",20],["Q3",15.0]],
"e":[["Q1",10.0],["Q4",6]],
"f":[["Q2",5.0],["Q1",3]],
"g":[["Q1",30.0],["Q2",3]]
"""
gold_lines = [
{
"sent_idx_unq": 0,
"aliases": ["a", "b"],
"char_spans": [[0, 1], [2, 3]],
"sentence": "a b c d e f g",
"qids": ["Q4", "Q1"],
"probs": [0.9, 0.9],
"cands": [["Q1", "Q4"], ["Q2", "Q1"]],
"cand_probs": [[0.1, 0.9], [0.1, 0.9]],
"entity_ids": [4, 1],
},
{
"sent_idx_unq": 1,
"aliases": ["c", "d", "e", "f", "g"],
"char_spans": [[4, 5], [6, 7], [8, 9], [10, 11], [14, 15]],
"sentence": "a b c d e f g",
"qids": ["Q1", "Q4", "Q4", "Q1", "Q2"],
"probs": [0.9, 0.9, 0.9, 0.9, 0.9],
"cands": [
["Q1", "Q2"],
["Q4", "Q3"],
["Q1", "Q4"],
["Q2", "Q1"],
["Q1", "Q2"],
],
"cand_probs": [
[0.9, 0.1],
[0.9, 0.1],
[0.1, 0.9],
[0.1, 0.9],
[0.1, 0.9],
],
"entity_ids": [1, 4, 4, 1, 2],
},
]
write_data_labels(
num_processes=num_processes,
merged_entity_emb_file=merged_entity_emb_file.name,
merged_storage_type=merged_storage_type,
sent_idx2row=sent_idx2rows,
cache_folder=cache_folder.name,
out_file=out_file.name,
entity_dump=entity_symbols,
train_in_candidates=train_in_candidates,
max_candidates=max_candidates,
trie_candidate_map_folder=None,
trie_qid2eid_file=None,
)
all_lines = []
with open(out_file.name) as check_f:
for line in check_f:
all_lines.append(ujson.loads(line))
assert len(all_lines) == len(gold_lines)
all_lines_sent_idx_map = {line["sent_idx_unq"]: line for line in all_lines}
gold_lines_sent_idx_map = {line["sent_idx_unq"]: line for line in gold_lines}
assert len(all_lines_sent_idx_map) == len(gold_lines_sent_idx_map)
for sent_idx in all_lines_sent_idx_map:
self.assertDictEqual(
gold_lines_sent_idx_map[sent_idx],
all_lines_sent_idx_map[sent_idx],
f"{ujson.dumps(gold_lines_sent_idx_map[sent_idx], indent=4)} VS "
f"{ujson.dumps(all_lines_sent_idx_map[sent_idx], indent=4)}",
)
# TRY MULTIPROCESSING
num_processes = 2
# create memmory files for multiprocessing
trie_candidate_map_folder = tempfile.TemporaryDirectory()
trie_qid2eid_folder = tempfile.TemporaryDirectory()
entity_symbols._qid2eid.dump(trie_qid2eid_folder.name)
entity_symbols._alias2qids.dump(trie_candidate_map_folder.name)
write_data_labels(
num_processes=num_processes,
merged_entity_emb_file=merged_entity_emb_file.name,
merged_storage_type=merged_storage_type,
sent_idx2row=sent_idx2rows,
cache_folder=cache_folder.name,
out_file=out_file.name,
entity_dump=entity_symbols,
train_in_candidates=train_in_candidates,
max_candidates=max_candidates,
trie_candidate_map_folder=trie_candidate_map_folder.name,
trie_qid2eid_file=trie_qid2eid_folder.name,
)
all_lines = []
with open(out_file.name) as check_f:
for line in check_f:
all_lines.append(ujson.loads(line))
assert len(all_lines) == len(gold_lines)
all_lines_sent_idx_map = {line["sent_idx_unq"]: line for line in all_lines}
gold_lines_sent_idx_map = {line["sent_idx_unq"]: line for line in gold_lines}
assert len(all_lines_sent_idx_map) == len(gold_lines_sent_idx_map)
for sent_idx in all_lines_sent_idx_map:
self.assertDictEqual(
gold_lines_sent_idx_map[sent_idx],
all_lines_sent_idx_map[sent_idx],
f"{ujson.dumps(gold_lines_sent_idx_map[sent_idx], indent=4)} VS "
f"{ujson.dumps(all_lines_sent_idx_map[sent_idx], indent=4)}",
)
# clean up
if os.path.exists(entity_dir):
shutil.rmtree(entity_dir, ignore_errors=True)
merged_entity_emb_file.close()
out_file.close()
data_file.close()
trie_candidate_map_folder.cleanup()
cache_folder.cleanup()
trie_qid2eid_folder.cleanup()
| bootleg-master | tests/test_utils/test_eval_utils.py |
"""Test preprocessing utils."""
import os
import tempfile
import unittest
from pathlib import Path
import ujson
from bootleg.symbols.entity_symbols import EntitySymbols
class PreprocessingUtils(unittest.TestCase):
"""Preprocessing utils test."""
def setUp(self) -> None:
"""Set up."""
self.test_dir = tempfile.TemporaryDirectory()
def tearDown(self) -> None:
"""Tear down."""
self.test_dir.cleanup()
def write_data(self, file, data):
"""Write data."""
Path(file).parent.mkdir(parents=True, exist_ok=True)
with open(file, "w") as out_f:
for line in data:
out_f.write(ujson.dumps(line) + "\n")
def test_get_train_qid_counts(self):
"""Test get train qid counts."""
in_file = Path(self.test_dir.name) / "train.jsonl"
out_file = Path(self.test_dir.name) / "train_counts_out.json"
data = [{"qids": [f"Q{i}" for i in range(5)]}] * 100
self.write_data(in_file, data)
os.system(
f"python3 bootleg/utils/preprocessing/get_train_qid_counts.py "
f"--train_file {in_file} "
f"--out_file {out_file}"
)
res = ujson.load(open(out_file, "r"))
assert len(res) == 5
for k in res:
assert res[k] == 100
def test_compute_statistics(self):
"""Test compute statistics."""
in_file = Path(self.test_dir.name) / "train.jsonl"
entity_db = Path(self.test_dir.name) / "entity_db" / "entity_mappings"
alias2qids = {
"happy": [["Q1", 1.0], ["Q2", 1.0], ["Q3", 1.0]],
"cow": [["Q4", 1.0], ["Q5", 1.0], ["Q6", 1.0]],
"batman": [["Q7", 1.0], ["Q8", 1.0]],
}
qid2title = {
"Q1": "aack",
"Q2": "back",
"Q3": "cack",
"Q4": "dack",
"Q5": "eack",
"Q6": "fack",
"Q7": "gack",
"Q8": "hack",
}
mock_entity_db = EntitySymbols(alias2qids, qid2title)
mock_entity_db.save(entity_db)
data = [
{
"qids": ["Q1", "Q4", "Q7"],
"unswap_aliases": ["happy", "cow", "batman"],
"sentence": "happy cow batman",
}
] * 100
self.write_data(in_file, data)
os.system(
f"python3 bootleg/utils/preprocessing/compute_statistics.py "
f"--data_dir {self.test_dir.name} "
f"--save_dir {self.test_dir.name}"
)
out_dir = Path(self.test_dir.name) / "stats"
assert out_dir.exists()
alias_cnts = ujson.load(open(out_dir / "alias_counts.json"))
assert len(alias_cnts) == 3
assert all(v == 100 for v in alias_cnts.values())
def test_sample_eval_data(self):
"""Test sample eval data."""
in_file = Path(self.test_dir.name) / "train.jsonl"
data = [
{
"qids": ["Q1", "Q4", "Q7"],
"sent_idx_unq": i,
"aliases": ["happy", "cow", "batman"],
"gold": [True, True, False],
"slices": {"slice_1": {"0": 1.0, "1": 1.0, "2": 1.0}},
"sentence": "happy cow batman",
}
for i in range(100)
]
self.write_data(in_file, data)
os.system(
f"python3 bootleg/utils/preprocessing/sample_eval_data.py "
f"--data_dir {self.test_dir.name} "
f"--slice slice_1 --file train.jsonl --out_file_name train_out.jsonl --min_sample_size 10"
)
out_file = Path(self.test_dir.name) / "train_out.jsonl"
assert out_file.exists()
alias_out = [ln for ln in open(out_file)]
assert len(alias_out) == 10
| bootleg-master | tests/test_utils/test_preprocessing.py |
"""Test class utils."""
import tempfile
import unittest
from bootleg.end2end.annotator_utils import DownloadProgressBar
from bootleg.utils.classes.nested_vocab_tries import (
ThreeLayerVocabularyTrie,
TwoLayerVocabularyScoreTrie,
VocabularyTrie,
)
class UtilClasses(unittest.TestCase):
"""Class util test."""
def test_vocab_trie(self):
"""Test vocab trie."""
input_dict = {"a": 2, "b": 3, "c": -1}
tri = VocabularyTrie(input_dict=input_dict)
self.assertDictEqual(tri.to_dict(), input_dict)
self.assertEqual(tri["b"], 3)
self.assertEqual(tri["c"], -1)
self.assertEqual(tri.get_key(-1), "c")
self.assertEqual(tri.get_key(2), "a")
self.assertTrue(tri.is_key_in_trie("b"))
self.assertFalse(tri.is_key_in_trie("f"))
self.assertTrue("b" in tri)
self.assertTrue("f" not in tri)
self.assertTrue(tri.is_value_in_trie(-1))
self.assertFalse(tri.is_value_in_trie(6))
self.assertEqual(tri.get_max_id(), 3)
self.assertEqual(len(tri), 3)
save_path = tempfile.TemporaryDirectory()
tri.dump(save_path.name)
tri2 = VocabularyTrie(load_dir=save_path.name)
self.assertDictEqual(tri.to_dict(), input_dict)
self.assertEqual(tri2["b"], 3)
self.assertEqual(tri2["c"], -1)
self.assertEqual(tri2.get_key(-1), "c")
self.assertEqual(tri2.get_key(2), "a")
self.assertTrue(tri2.is_key_in_trie("b"))
self.assertFalse(tri2.is_key_in_trie("f"))
self.assertTrue("b" in tri2)
self.assertTrue("f" not in tri2)
self.assertTrue(tri2.is_value_in_trie(-1))
self.assertFalse(tri2.is_value_in_trie(6))
self.assertEqual(tri2.get_max_id(), 3)
self.assertEqual(len(tri2), 3)
save_path.cleanup()
def test_paired_vocab_trie(self):
"""Test paired vocab trie."""
for with_scores in [True, False]:
raw_input_dict = {"a": ["1", "4", "5"], "b": ["5", "2"], "c": []}
vocabulary = {"1": 1, "2": 2, "4": 3, "5": 4}
input_dict = {}
score = 1.0 if with_scores else 0.0
for k, lst in list(raw_input_dict.items()):
input_dict[k] = [[it, score] for it in lst]
if with_scores:
tri = TwoLayerVocabularyScoreTrie(
input_dict=input_dict, vocabulary=vocabulary, max_value=3
)
else:
tri = TwoLayerVocabularyScoreTrie(
input_dict=raw_input_dict, vocabulary=vocabulary, max_value=3
)
self.assertDictEqual(tri.to_dict(keep_score=True), input_dict)
self.assertDictEqual(tri.to_dict(keep_score=False), raw_input_dict)
self.assertEqual(tri.get_value("b"), [["5", score], ["2", score]])
self.assertTrue(tri.is_key_in_trie("b"))
self.assertFalse(tri.is_key_in_trie("f"))
self.assertSetEqual(set(input_dict.keys()), set(tri.keys()))
self.assertSetEqual(set(vocabulary.keys()), set(tri.vocab_keys()))
save_path = tempfile.TemporaryDirectory()
tri.dump(save_path.name)
tri2 = TwoLayerVocabularyScoreTrie(load_dir=save_path.name)
self.assertDictEqual(tri2.to_dict(keep_score=True), input_dict)
self.assertDictEqual(tri2.to_dict(keep_score=False), raw_input_dict)
self.assertEqual(tri2.get_value("b"), [["5", score], ["2", score]])
self.assertTrue(tri2.is_key_in_trie("b"))
self.assertFalse(tri2.is_key_in_trie("f"))
self.assertSetEqual(set(input_dict.keys()), set(tri2.keys()))
self.assertSetEqual(set(vocabulary.keys()), set(tri2.vocab_keys()))
save_path.cleanup()
def test_dict_vocab_trie(self):
"""Test paired vocab trie."""
raw_input_dict = {
"q1": {"a": ["1", "4", "5"], "b": ["3", "5"]},
"q2": {"b": ["5", "2"]},
}
key_vocabulary = {"a": 1, "b": 2, "c": 3}
value_vocabulary = {"1": 1, "2": 2, "3": 3, "4": 4, "5": 5}
tri = ThreeLayerVocabularyTrie(
input_dict=raw_input_dict,
key_vocabulary=key_vocabulary,
value_vocabulary=value_vocabulary,
max_value=6,
)
self.assertDictEqual(tri.to_dict(), raw_input_dict)
self.assertDictEqual(tri.get_value("q1"), raw_input_dict["q1"])
self.assertTrue(tri.is_key_in_trie("q2"))
self.assertFalse(tri.is_key_in_trie("q3"))
self.assertSetEqual(set(raw_input_dict.keys()), set(tri.keys()))
self.assertSetEqual(set(key_vocabulary.keys()), set(tri.key_vocab_keys()))
self.assertSetEqual(set(value_vocabulary.keys()), set(tri.value_vocab_keys()))
save_path = tempfile.TemporaryDirectory()
tri.dump(save_path.name)
tri2 = ThreeLayerVocabularyTrie(load_dir=save_path.name)
self.assertDictEqual(tri2.to_dict(), raw_input_dict)
self.assertDictEqual(tri2.get_value("q1"), raw_input_dict["q1"])
self.assertTrue(tri2.is_key_in_trie("q2"))
self.assertFalse(tri2.is_key_in_trie("q3"))
self.assertSetEqual(set(raw_input_dict.keys()), set(tri2.keys()))
self.assertSetEqual(set(key_vocabulary.keys()), set(tri2.key_vocab_keys()))
self.assertSetEqual(set(value_vocabulary.keys()), set(tri2.value_vocab_keys()))
save_path.cleanup()
def test_download_progress_bar(self):
"""Test download progress bar."""
pbar = DownloadProgressBar()
pbar(1, 5, 10)
assert pbar.pbar is not None
| bootleg-master | tests/test_utils/test_util_classes.py |
"""Test entity dataset."""
import os
import shutil
import unittest
import ujson
from transformers import AutoTokenizer
from bootleg.dataset import BootlegDataset
from bootleg.symbols.constants import SPECIAL_TOKENS
from bootleg.symbols.entity_symbols import EntitySymbols
from bootleg.symbols.type_symbols import TypeSymbols
from bootleg.utils import utils
from bootleg.utils.parser import parser_utils
class DataEntityLoader(unittest.TestCase):
"""Entity data loader."""
def setUp(self):
"""Set up."""
# tests that the sampling is done correctly on indices
# load data from directory
self.args = parser_utils.parse_boot_and_emm_args(
"tests/run_args/test_entity_data.json"
)
self.tokenizer = AutoTokenizer.from_pretrained(
"bert-base-cased",
do_lower_case=False,
use_fast=True,
cache_dir="tests/data/emb_data/pretrained_bert_models",
)
self.tokenizer.add_special_tokens(SPECIAL_TOKENS)
self.is_bert = True
self.entity_symbols = EntitySymbols.load_from_cache(
os.path.join(
self.args.data_config.entity_dir, self.args.data_config.entity_map_dir
),
alias_cand_map_dir=self.args.data_config.alias_cand_map,
alias_idx_dir=self.args.data_config.alias_idx_map,
)
self.entity_temp_dir = "tests/data/entity_loader/entity_data_test"
self.temp_file_name = "tests/data/data_loader/test_data.jsonl"
def tearDown(self) -> None:
"""Tear down."""
dir = os.path.join(
self.args.data_config.data_dir, self.args.data_config.data_prep_dir
)
if utils.exists_dir(dir):
shutil.rmtree(dir)
dir = os.path.join(
self.args.data_config.entity_dir, self.args.data_config.entity_prep_dir
)
try:
if utils.exists_dir(dir):
shutil.rmtree(dir)
if os.path.exists(self.temp_file_name):
os.remove(self.temp_file_name)
if os.path.exists(self.entity_temp_dir):
shutil.rmtree(self.entity_temp_dir)
except Exception:
pass
def test_load_type_data(self):
"""
Test load type data.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
ENTITY TITLE
{
"Q1":"alias1",
"Q2":"multi alias2",
"Q3":"word alias3",
"Q4":"nonalias4"
}
TYPE LABELS
{
"Q1": [1, 2],
"Q2": [3],
"Q3": [],
"Q4": [2]
}
TYPE VOCAB
{
"T1": 1,
"T2": 2,
"T3": 3
}
"""
qid2typename_gold = {"Q1": ["T1", "T2"], "Q2": ["T3"], "Q3": [], "Q4": ["T2"]}
type_symbols = TypeSymbols.load_from_cache(
os.path.join(
self.args.data_config.entity_dir,
self.args.data_config.entity_type_data.type_symbols_dir,
)
)
self.assertDictEqual(type_symbols.get_qid2typename_dict(), qid2typename_gold)
def test_load_type_data_extra_entity(self):
"""
Test load type data extra entity.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
ENTITY TITLE
{
"Q1":"alias1",
"Q2":"multi alias2",
"Q3":"word alias3",
"Q4":"nonalias4"
}
TYPE LABELS
{
"Q1": [1, 2],
"Q2": [3],
"Q3": [],
"Q4": [2],
"Q5": [3]
}
"""
temp_type_data = {"Q1": [1, 2], "Q2": [3], "Q3": [], "Q4": [2], "Q5": [2]}
file = "tests/data/emb_data/temp_type_mapping.json"
with open(file, "w") as out_f:
ujson.dump(temp_type_data, out_f)
self.args.data_config.entity_type_data.type_file = "temp_type_mapping.json"
qid2typename_gold = {"Q1": ["T1", "T2"], "Q2": ["T3"], "Q3": [], "Q4": ["T2"]}
type_symbols = TypeSymbols.load_from_cache(
os.path.join(
self.args.data_config.entity_dir,
self.args.data_config.entity_type_data.type_symbols_dir,
)
)
self.assertDictEqual(type_symbols.get_qid2typename_dict(), qid2typename_gold)
if os.path.exists(file):
os.remove(file)
def test_simple_entity_data(self):
"""
Test simple entity data.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
ENTITY TITLE
{
"Q1":"alias1",
"Q2":"multi alias2",
"Q3":"word alias3",
"Q4":"nonalias4"
}
TYPE LABELS
{
"Q1": [1, 2],
"Q2": [3],
"Q3": [],
"Q4": [2]
}
"""
max_seq_len = 7
max_ent_len = 10
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_ent_len = max_ent_len
input_data = [
{
"aliases": ["alias1", "multi word alias2"],
"qids": ["Q1", "Q4"],
"sent_idx_unq": 0,
"sentence": "alias1 or multi word alias2",
"char_spans": [[0, 6], [10, 27]],
"gold": [True, True],
}
]
# THERE ARE NO DESCRIPTIONS BUT THE SEP TOKEN IS STILL ADDED WITH EMPTY DESC
X_entity_dict = self.tokenizer(
[
"[SEP]",
"alias1 [ent_type] T1 [ent_type] T2",
"multi alias2 [ent_type] T3",
"word alias3 [ent_type]",
"nonalias4 [ent_type] T2",
"[SEP]",
],
max_length=max_ent_len,
padding="max_length",
truncation=True,
add_special_tokens=True,
)
gold_entity_to_mask = [
[0 for _ in range(len(inp))] for inp in X_entity_dict["input_ids"]
]
gold_entity_to_mask[1][1:3] = [1, 1]
gold_entity_to_mask[2][1:4] = [1, 1, 1]
gold_entity_to_mask[3][1:4] = [1, 1, 1]
gold_entity_to_mask[4][1:5] = [1, 1, 1, 1]
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=True,
load_entity_data=True,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split="train",
is_bert=True,
)
self.assertListEqual(
X_entity_dict["input_ids"],
dataset.X_entity_dict["entity_input_ids"].tolist(),
)
self.assertListEqual(
X_entity_dict["token_type_ids"],
dataset.X_entity_dict["entity_token_type_ids"].tolist(),
)
self.assertListEqual(
X_entity_dict["attention_mask"],
dataset.X_entity_dict["entity_attention_mask"].tolist(),
)
self.assertListEqual(
gold_entity_to_mask,
dataset.X_entity_dict["entity_to_mask"].tolist(),
)
def test_max_ent_type_len(self):
"""
Test max entity type length.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
ENTITY TITLE
{
"Q1":"alias1",
"Q2":"multi alias2",
"Q3":"word alias3",
"Q4":"nonalias4"
}
TYPE LABELS
{
"Q1": [1, 2],
"Q2": [3],
"Q3": [],
"Q4": [2]
}
"""
max_seq_len = 7
max_ent_len = 10
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_ent_len = max_ent_len
self.args.data_config.entity_type_data.max_ent_type_len = 1
input_data = [
{
"aliases": ["alias1", "multi word alias2"],
"qids": ["Q1", "Q4"],
"sent_idx_unq": 0,
"sentence": "alias1 or multi word alias2",
"char_spans": [[0, 6], [10, 27]],
"gold": [True, True],
}
]
# THERE ARE NO DESCRIPTIONS
X_entity_dict = self.tokenizer(
[
"[SEP]",
"alias1 [ent_type] T1",
"multi alias2 [ent_type] T3",
"word alias3 [ent_type]",
"nonalias4 [ent_type] T2",
"[SEP]",
],
max_length=max_ent_len,
padding="max_length",
truncation=True,
add_special_tokens=True,
)
gold_entity_to_mask = [
[0 for _ in range(len(inp))] for inp in X_entity_dict["input_ids"]
]
gold_entity_to_mask[1][1:3] = [1, 1]
gold_entity_to_mask[2][1:4] = [1, 1, 1]
gold_entity_to_mask[3][1:4] = [1, 1, 1]
gold_entity_to_mask[4][1:5] = [1, 1, 1, 1]
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=True,
load_entity_data=True,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split="train",
is_bert=True,
)
self.assertListEqual(
X_entity_dict["input_ids"],
dataset.X_entity_dict["entity_input_ids"].tolist(),
)
self.assertListEqual(
X_entity_dict["token_type_ids"],
dataset.X_entity_dict["entity_token_type_ids"].tolist(),
)
self.assertListEqual(
X_entity_dict["attention_mask"],
dataset.X_entity_dict["entity_attention_mask"].tolist(),
)
self.assertListEqual(
gold_entity_to_mask,
dataset.X_entity_dict["entity_to_mask"].tolist(),
)
def test_desc_entity_data(self):
"""
Test entity description data.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
ENTITY TITLE
{
"Q1":"alias1",
"Q2":"multi alias2",
"Q3":"word alias3",
"Q4":"nonalias4"
}
TYPE LABELS
{
"Q1": [1, 2],
"Q2": [3],
"Q3": [],
"Q4": [2]
}
"""
self.args.data_config.use_entity_desc = True
# For this test, we make a new entity_mappings directory to prevent multiprocessing tests from pytest to
# also read in the qid2desc file
shutil.copytree("tests/data/entity_loader/entity_data", self.entity_temp_dir)
self.args.data_config.entity_dir = self.entity_temp_dir
qid2desc = {
"Q1": "testing desc",
"Q3": "words",
}
out_file = (
"tests/data/entity_loader/entity_data_test/entity_mappings/qid2desc.json"
)
with open(out_file, "w") as out_f:
ujson.dump(qid2desc, out_f)
entity_symbols = EntitySymbols.load_from_cache(
os.path.join(
self.args.data_config.entity_dir, self.args.data_config.entity_map_dir
),
alias_cand_map_dir=self.args.data_config.alias_cand_map,
alias_idx_dir=self.args.data_config.alias_idx_map,
)
max_seq_len = 7
max_ent_len = 7
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_ent_len = max_ent_len
input_data = [
{
"aliases": ["alias1", "multi word alias2"],
"qids": ["Q1", "Q4"],
"sent_idx_unq": 0,
"sentence": "alias1 or multi word alias2",
"char_spans": [[0, 6], [10, 27]],
"gold": [True, True],
}
]
# THERE ARE DESCRIPTIONS
X_entity_dict = self.tokenizer(
[
"[SEP]",
"alias1 [ent_type] T1 [ent_type] T2 [ent_desc] testing desc",
"multi alias2 [ent_type] T3 [ent_desc]",
"word alias3 [ent_type] [ent_desc] words",
"nonalias4 [ent_type] T2 [ent_desc]",
"[SEP]",
],
max_length=max_ent_len,
padding="max_length",
truncation=True,
add_special_tokens=True,
)
gold_entity_to_mask = [
[0 for _ in range(len(inp))] for inp in X_entity_dict["input_ids"]
]
gold_entity_to_mask[1][1:3] = [1, 1]
gold_entity_to_mask[2][1:4] = [1, 1, 1]
gold_entity_to_mask[3][1:4] = [1, 1, 1]
gold_entity_to_mask[4][1:5] = [1, 1, 1, 1]
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=True,
load_entity_data=True,
tokenizer=self.tokenizer,
entity_symbols=entity_symbols,
dataset_threads=1,
split="train",
is_bert=True,
)
self.assertListEqual(
X_entity_dict["input_ids"],
dataset.X_entity_dict["entity_input_ids"].tolist(),
)
self.assertListEqual(
X_entity_dict["token_type_ids"],
dataset.X_entity_dict["entity_token_type_ids"].tolist(),
)
self.assertListEqual(
X_entity_dict["attention_mask"],
dataset.X_entity_dict["entity_attention_mask"].tolist(),
)
self.assertListEqual(
gold_entity_to_mask,
dataset.X_entity_dict["entity_to_mask"].tolist(),
)
def test_entity_kg_data(self):
"""
Test entity KG data.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
ENTITY TITLE
{
"Q1":"alias1",
"Q2":"multi alias2",
"Q3":"word alias3",
"Q4":"nonalias4"
}
TYPE LABELS
{
"Q1": [1, 2],
"Q2": [3],
"Q3": [],
"Q4": [2]
}
KG LABELS
{
"Q1": {"rel1": ["Q2"]},
"Q3": {"rel2": ["Q2"]}
}
"""
max_seq_len = 7
max_ent_len = 10
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_ent_len = max_ent_len
self.args.data_config.entity_kg_data.use_entity_kg = True
input_data = [
{
"aliases": ["alias1", "multi word alias2"],
"qids": ["Q1", "Q4"],
"sent_idx_unq": 0,
"sentence": "alias1 or multi word alias2",
"char_spans": [[0, 6], [10, 27]],
"gold": [True, True],
}
]
# THERE ARE NO DESCRIPTIONS BUT THE SEP TOKEN IS STILL ADDED WITH EMPTY DESC
X_entity_dict = self.tokenizer(
[
"[SEP]",
"alias1 [ent_type] T1 [ent_type] T2 [ent_kg] rel1 multi alias2",
"multi alias2 [ent_type] T3 [ent_kg]",
"word alias3 [ent_type] [ent_kg] rel2 multi alias2",
"nonalias4 [ent_type] T2 [ent_kg]",
"[SEP]",
],
max_length=max_ent_len,
padding="max_length",
truncation=True,
add_special_tokens=True,
)
gold_entity_to_mask = [
[0 for _ in range(len(inp))] for inp in X_entity_dict["input_ids"]
]
gold_entity_to_mask[1][1:3] = [1, 1]
gold_entity_to_mask[2][1:4] = [1, 1, 1]
gold_entity_to_mask[3][1:4] = [1, 1, 1]
gold_entity_to_mask[4][1:5] = [1, 1, 1, 1]
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=True,
load_entity_data=True,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split="train",
is_bert=True,
)
self.assertListEqual(
X_entity_dict["input_ids"],
dataset.X_entity_dict["entity_input_ids"].tolist(),
)
self.assertListEqual(
X_entity_dict["token_type_ids"],
dataset.X_entity_dict["entity_token_type_ids"].tolist(),
)
self.assertListEqual(
X_entity_dict["attention_mask"],
dataset.X_entity_dict["entity_attention_mask"].tolist(),
)
self.assertListEqual(
gold_entity_to_mask,
dataset.X_entity_dict["entity_to_mask"].tolist(),
)
def test_multiprocess_entity_data(self):
"""
Test multiprocessing entity data.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
ENTITY TITLE
{
"Q1":"alias1",
"Q2":"multi alias2",
"Q3":"word alias3",
"Q4":"nonalias4"
}
TYPE LABELS
{
"Q1": [1, 2],
"Q2": [3],
"Q3": [],
"Q4": [2]
}
"""
max_seq_len = 7
max_ent_len = 10
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_ent_len = max_ent_len
input_data = [
{
"aliases": ["alias1", "multi word alias2"],
"qids": ["Q1", "Q4"],
"sent_idx_unq": 0,
"sentence": "alias1 or multi word alias2",
"char_spans": [[0, 6], [10, 27]],
"gold": [True, True],
}
]
# THERE ARE NO DESCRIPTIONS
X_entity_dict = self.tokenizer(
[
"[SEP]",
"alias1 [ent_type] T1 [ent_type] T2",
"multi alias2 [ent_type] T3",
"word alias3 [ent_type]",
"nonalias4 [ent_type] T2",
"[SEP]",
],
max_length=max_ent_len,
padding="max_length",
truncation=True,
add_special_tokens=True,
)
gold_entity_to_mask = [
[0 for _ in range(len(inp))] for inp in X_entity_dict["input_ids"]
]
gold_entity_to_mask[1][1:3] = [1, 1]
gold_entity_to_mask[2][1:4] = [1, 1, 1]
gold_entity_to_mask[3][1:4] = [1, 1, 1]
gold_entity_to_mask[4][1:5] = [1, 1, 1, 1]
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=True,
load_entity_data=True,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=3,
split="train",
is_bert=True,
)
self.assertListEqual(
X_entity_dict["input_ids"],
dataset.X_entity_dict["entity_input_ids"].tolist(),
)
self.assertListEqual(
X_entity_dict["token_type_ids"],
dataset.X_entity_dict["entity_token_type_ids"].tolist(),
)
self.assertListEqual(
X_entity_dict["attention_mask"],
dataset.X_entity_dict["entity_attention_mask"].tolist(),
)
self.assertListEqual(
gold_entity_to_mask,
dataset.X_entity_dict["entity_to_mask"].tolist(),
)
if __name__ == "__main__":
unittest.main()
| bootleg-master | tests/test_data/test_entity_data.py |
"""Test slice data."""
import os
import shutil
import unittest
import numpy as np
import torch
from bootleg.slicing.slice_dataset import BootlegSliceDataset
from bootleg.symbols.constants import FINAL_LOSS
from bootleg.symbols.entity_symbols import EntitySymbols
from bootleg.utils import utils
from bootleg.utils.parser import parser_utils
def assert_data_dicts_equal(dict_l, dict_r):
"""Assert dicts are equal."""
for k in dict_l:
assert k in dict_r
if type(dict_l[k]) is torch.Tensor:
assert torch.equal(dict_l[k].float(), dict_r[k].float())
elif type(dict_l[k]) is np.ndarray:
np.testing.assert_array_equal(dict_l[k], dict_r[k])
else:
assert dict_l[k] == dict_r[k]
for k in dict_r:
assert k in dict_l
def assert_slice_data_equal(gold_data, data):
"""Assert slice data is equal."""
assert len(gold_data) == len(data)
assert len(gold_data[0].tolist()[0]) == len(data[0].tolist()[0])
assert len(gold_data[0].tolist()[0][0]) == len(data[0].tolist()[0][0])
for i in range(len(gold_data)):
for j in range(len(gold_data[i].tolist()[0])): # number of slices
for k in range(len(gold_data[i].tolist()[0][0])): # number of columns
np.testing.assert_allclose(
gold_data[i].tolist()[0][j][k],
data[i].tolist()[0][j][k],
err_msg=f"i j k {i} {j} {k}",
)
class DataSlice(unittest.TestCase):
"""Slice data test."""
def setUp(self):
"""Set up."""
# tests that the sampling is done correctly on indices
# load data from directory
self.args = parser_utils.parse_boot_and_emm_args(
"tests/run_args/test_data.json"
)
self.entity_symbols = EntitySymbols.load_from_cache(
os.path.join(
self.args.data_config.entity_dir, self.args.data_config.entity_map_dir
),
alias_cand_map_dir=self.args.data_config.alias_cand_map,
)
self.temp_file_name = "tests/data/data_loader/test_slice_data.jsonl"
def tearDown(self) -> None:
"""Tear down."""
dir = os.path.join(
self.args.data_config.data_dir, self.args.data_config.data_prep_dir
)
if utils.exists_dir(dir):
shutil.rmtree(dir, ignore_errors=True)
dir = os.path.join(
self.args.data_config.entity_dir, self.args.data_config.entity_prep_dir
)
if utils.exists_dir(dir):
shutil.rmtree(dir, ignore_errors=True)
if os.path.exists(self.temp_file_name):
os.remove(self.temp_file_name)
def test_simple_dataset(self):
"""
Test simple dataset.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
"""
max_aliases = 4
self.args.data_config.max_aliases = max_aliases
self.args.data_config.eval_slices = ["slice1"]
input_data = [
{
"aliases": ["alias1", "multi word alias2"],
"qids": ["Q1", "Q4"],
"sent_idx_unq": 0,
"sentence": "alias1 or multi word alias2",
"spans": [[0, 6], [10, 27]],
"slices": {"slice1": {"0": 0.9, "1": 0.3}},
"gold": [True, True],
}
]
slice_dt = np.dtype(
[
("sent_idx", int),
("subslice_idx", int),
("alias_slice_incidence", int, 2),
("prob_labels", float, 2),
]
)
storage_type = np.dtype(
[(slice_name, slice_dt, 1) for slice_name in [FINAL_LOSS, "slice1"]]
)
ex1 = [
np.rec.array(
[0, 0, [1, 1], [1.0, 1.0]], dtype=slice_dt
), # FINAL LOSS SLICE
np.rec.array([0, 0, [1, 0], [0.9, 0.3]], dtype=slice_dt), # SLICE1 SLICE
]
gold_data = np.rec.array(ex1, dtype=storage_type).reshape(1, 1)
# res = np.vstack((mat1))
gold_sent_to_row_id_dict = {0: [0]}
utils.write_jsonl(self.temp_file_name, input_data)
use_weak_label = True
split = "dev"
dataset = BootlegSliceDataset(
self.args,
self.temp_file_name,
use_weak_label,
self.entity_symbols,
dataset_threads=1,
split=split,
)
assert_slice_data_equal(gold_data, dataset.data)
self.assertDictEqual(gold_sent_to_row_id_dict, dataset.sent_to_row_id_dict)
def test_single_mention_dataset(self):
"""
Test single mention dataset.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
"""
max_aliases = 1
self.args.data_config.max_aliases = max_aliases
self.args.data_config.eval_slices = ["slice1"]
input_data = [
{
"aliases": ["alias1", "multi word alias2"],
"qids": ["Q1", "Q4"],
"sent_idx_unq": 0,
"sentence": "alias1 or multi word alias2",
"spans": [[0, 6], [10, 27]],
"slices": {"slice1": {"0": 0.9, "1": 0.3}},
"gold": [True, True],
}
]
slice_dt = np.dtype(
[
("sent_idx", int),
("subslice_idx", int),
("alias_slice_incidence", int, 2),
("prob_labels", float, 2),
]
)
storage_type = np.dtype(
[(slice_name, slice_dt, 1) for slice_name in [FINAL_LOSS, "slice1"]]
)
ex1 = [
np.rec.array(
[0, 0, [1, 1], [1.0, 1.0]], dtype=slice_dt
), # FINAL LOSS SLICE
np.rec.array([0, 0, [1, 0], [0.9, 0.3]], dtype=slice_dt), # SLICE1 SLICE
]
mat1 = np.rec.array(ex1, dtype=storage_type).reshape(1, 1)
gold_data = mat1
gold_sent_to_row_id_dict = {0: [0]}
utils.write_jsonl(self.temp_file_name, input_data)
use_weak_label = True
split = "dev"
dataset = BootlegSliceDataset(
self.args,
self.temp_file_name,
use_weak_label,
self.entity_symbols,
dataset_threads=1,
split=split,
)
assert_slice_data_equal(gold_data, dataset.data)
self.assertDictEqual(gold_sent_to_row_id_dict, dataset.sent_to_row_id_dict)
def test_long_aliases(self):
"""
Test long number aliases.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
"""
# Test 1: even though this sentence was split into multiple parts, the slices remain intact
# as an entire sentence
max_seq_len = 5
max_aliases = 2
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.eval_slices = ["slice1", "slice2"]
input_data = [
{
"aliases": ["alias3", "alias4", "alias3"],
"qids": ["Q1", "Q4", "Q1"],
"sent_idx_unq": 0,
"sentence": "alias3 alias4 alias3",
"spans": [[0, 6], [7, 13], [14, 20]],
"slices": {
"slice1": {"0": 0.9, "1": 0.3, "2": 0.5},
"slice2": {"0": 0.0, "1": 0.0, "2": 1.0},
},
"gold": [True, True, True],
}
]
slice_dt = np.dtype(
[
("sent_idx", int),
("subslice_idx", int),
("alias_slice_incidence", int, 3),
("prob_labels", float, 3),
]
)
storage_type = np.dtype(
[
(slice_name, slice_dt, 1)
for slice_name in [FINAL_LOSS, "slice1", "slice2"]
]
)
ex1 = [
np.rec.array(
[0, 0, [1, 1, 1], [1.0, 1.0, 1.0]], dtype=slice_dt
), # FINAL LOSS
np.rec.array([0, 0, [1, 0, 0], [0.9, 0.3, 0.5]], dtype=slice_dt), # slice1
np.rec.array([0, 0, [0, 0, 1], [0.0, 0.0, 1.0]], dtype=slice_dt), # slice2
]
gold_data = np.rec.array(ex1, dtype=storage_type).reshape(1, 1)
# res = np.vstack((mat1))
gold_sent_to_row_id_dict = {0: [0]}
utils.write_jsonl(self.temp_file_name, input_data)
use_weak_label = True
split = "dev"
dataset = BootlegSliceDataset(
self.args,
self.temp_file_name,
use_weak_label,
self.entity_symbols,
dataset_threads=1,
split=split,
)
assert_slice_data_equal(gold_data, dataset.data)
self.assertDictEqual(gold_sent_to_row_id_dict, dataset.sent_to_row_id_dict)
# Test2: everything should remain the same even if the split is train
use_weak_label = True
split = "train"
dataset = BootlegSliceDataset(
self.args,
self.temp_file_name,
use_weak_label,
self.entity_symbols,
dataset_threads=1,
split=split,
)
assert_slice_data_equal(gold_data, dataset.data)
self.assertDictEqual(gold_sent_to_row_id_dict, dataset.sent_to_row_id_dict)
# Test3: when we add another sentence that has fewer aliases, they should be padded to the largest
# that exist in a sentence, even if it's greater than max aliases
max_seq_len = 5
max_aliases = 2
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.eval_slices = ["slice1", "slice2"]
input_data = [
{
"aliases": ["alias3", "alias4", "alias3"],
"qids": ["Q1", "Q4", "Q1"],
"sent_idx_unq": 0,
"sentence": "alias3 alias4 alias3",
"spans": [[0, 6], [7, 13], [14, 20]],
"slices": {
"slice1": {"0": 0.9, "1": 0.3, "2": 0.5},
"slice2": {"0": 0.0, "1": 0.0, "2": 1.0},
},
"gold": [True, True, True],
},
{
"aliases": ["alias3"],
"qids": ["Q1"],
"sent_idx_unq": "1",
"sentence": "alias3",
"spans": [[0, 1]],
"slices": {"slice1": {"0": 0.4}, "slice2": {"0": 1.0}},
"gold": [True],
},
]
slice_dt = np.dtype(
[
("sent_idx", int),
("subslice_idx", int),
("alias_slice_incidence", int, 3),
("prob_labels", float, 3),
]
)
storage_type = np.dtype(
[
(slice_name, slice_dt, 1)
for slice_name in [FINAL_LOSS, "slice1", "slice2"]
]
)
ex1 = [
np.rec.array(
[0, 0, [1, 1, 1], [1.0, 1.0, 1.0]], dtype=slice_dt
), # FINAL LOSS
np.rec.array([0, 0, [1, 0, 0], [0.9, 0.3, 0.5]], dtype=slice_dt), # slice1
np.rec.array([0, 0, [0, 0, 1], [0.0, 0.0, 1.0]], dtype=slice_dt), # slice2
]
ex2 = [
np.rec.array(
[1, 0, [1, 0, 0], [1.0, -1.0, -1.0]], dtype=slice_dt
), # FINAL LOSS
np.rec.array(
[1, 0, [0, 0, 0], [0.4, -1.0, -1.0]], dtype=slice_dt
), # slice1
np.rec.array(
[1, 0, [1, 0, 0], [1.0, -1.0, -1.0]], dtype=slice_dt
), # slice2
]
mat1 = np.rec.array(ex1, dtype=storage_type).reshape(1, 1)
mat2 = np.rec.array(ex2, dtype=storage_type).reshape(1, 1)
gold_data = np.vstack((mat1, mat2))
gold_sent_to_row_id_dict = {0: [0], 1: [1]}
utils.write_jsonl(self.temp_file_name, input_data)
use_weak_label = True
split = "dev"
dataset = BootlegSliceDataset(
self.args,
self.temp_file_name,
use_weak_label,
self.entity_symbols,
dataset_threads=1,
split=split,
)
assert_slice_data_equal(gold_data, dataset.data)
self.assertDictEqual(gold_sent_to_row_id_dict, dataset.sent_to_row_id_dict)
def test_non_gold_aliases(self):
"""
Test non-gold aliases.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
"""
# Test 1: for a dev split, the False golds will not count as aliases to score and not be in a slice
max_seq_len = 5
max_aliases = 2
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.eval_slices = ["slice1", "slice2"]
input_data = [
{
"aliases": ["alias3", "alias4", "alias3"],
"qids": ["Q1", "Q4", "Q1"],
"sent_idx_unq": 0,
"sentence": "alias3 alias4 alias3",
"spans": [[0, 6], [7, 13], [14, 20]],
"slices": {
"slice1": {"0": 0.9, "1": 0.3, "2": 0.5},
"slice2": {"0": 0.0, "1": 0.0, "2": 1.0},
},
"gold": [False, False, True],
}
]
slice_dt = np.dtype(
[
("sent_idx", int),
("subslice_idx", int),
("alias_slice_incidence", int, 3),
("prob_labels", float, 3),
]
)
storage_type = np.dtype(
[
(slice_name, slice_dt, 1)
for slice_name in [FINAL_LOSS, "slice1", "slice2"]
]
)
ex1 = [
np.rec.array(
[0, 0, [0, 0, 1], [-1.0, -1.0, 1.0]], dtype=slice_dt
), # FINAL LOSS
np.rec.array(
[0, 0, [0, 0, 0], [-1.0, -1.0, 0.5]], dtype=slice_dt
), # slice1
np.rec.array(
[0, 0, [0, 0, 1], [-1.0, -1.0, 1.0]], dtype=slice_dt
), # slice2
]
gold_data = np.rec.array(ex1, dtype=storage_type).reshape(1, 1)
# res = np.vstack((mat1))
gold_sent_to_row_id_dict = {0: [0]}
utils.write_jsonl(self.temp_file_name, input_data)
use_weak_label = True
split = "dev"
dataset = BootlegSliceDataset(
self.args,
self.temp_file_name,
use_weak_label,
self.entity_symbols,
dataset_threads=1,
split=split,
)
assert_slice_data_equal(gold_data, dataset.data)
self.assertDictEqual(gold_sent_to_row_id_dict, dataset.sent_to_row_id_dict)
# Test2: everything should remain as it was with a split of train (i.e. FALSE golds are treated as TRUE)
slice_dt = np.dtype(
[
("sent_idx", int),
("subslice_idx", int),
("alias_slice_incidence", int, 3),
("prob_labels", float, 3),
]
)
storage_type = np.dtype(
[
(slice_name, slice_dt, 1)
for slice_name in [FINAL_LOSS, "slice1", "slice2"]
]
)
ex1 = [
np.rec.array(
[0, 0, [1, 1, 1], [1.0, 1.0, 1.0]], dtype=slice_dt
), # FINAL LOSS
np.rec.array([0, 0, [1, 0, 0], [0.9, 0.3, 0.5]], dtype=slice_dt), # slice1
np.rec.array([0, 0, [0, 0, 1], [0.0, 0.0, 1.0]], dtype=slice_dt), # slice2
]
gold_data = np.rec.array(ex1, dtype=storage_type).reshape(1, 1)
# res = np.vstack((mat1))
gold_sent_to_row_id_dict = {0: [0]}
utils.write_jsonl(self.temp_file_name, input_data)
use_weak_label = True
split = "train"
dataset = BootlegSliceDataset(
self.args,
self.temp_file_name,
use_weak_label,
self.entity_symbols,
dataset_threads=1,
split=split,
)
assert_slice_data_equal(gold_data, dataset.data)
self.assertDictEqual(gold_sent_to_row_id_dict, dataset.sent_to_row_id_dict)
# Test3: when we have multiple all FALSE anchors, we keep the slice indices but all aliases are not in a slice
max_seq_len = 5
max_aliases = 2
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.eval_slices = ["slice1", "slice2"]
input_data = [
{
"aliases": ["alias3", "alias4", "alias3"],
"qids": ["Q1", "Q4", "Q1"],
"sent_idx_unq": 0,
"sentence": "alias3 alias4 alias3",
"spans": [[0, 6], [7, 13], [14, 20]],
"slices": {
"slice1": {"0": 0.9, "1": 0.3, "2": 0.5},
"slice2": {"0": 0.0, "1": 0.0, "2": 1.0},
},
"gold": [False, False, False],
},
{
"aliases": ["alias3"],
"qids": ["Q1"],
"sent_idx_unq": "1",
"sentence": "alias3",
"spans": [[0, 1]],
"slices": {"slice1": {"0": 0.4}, "slice2": {"0": 1.0}},
"gold": [False],
},
]
slice_dt = np.dtype(
[
("sent_idx", int),
("subslice_idx", int),
("alias_slice_incidence", int, 3),
("prob_labels", float, 3),
]
)
storage_type = np.dtype(
[
(slice_name, slice_dt, 1)
for slice_name in [FINAL_LOSS, "slice1", "slice2"]
]
)
ex1 = [
np.rec.array(
[0, 0, [0, 0, 0], [-1.0, -1.0, -1.0]], dtype=slice_dt
), # FINAL LOSS
np.rec.array(
[0, 0, [0, 0, 0], [-1.0, -1.0, -1.0]], dtype=slice_dt
), # slice1
np.rec.array(
[0, 0, [0, 0, 0], [-1.0, -1.0, -1.0]], dtype=slice_dt
), # slice2
]
ex2 = [
np.rec.array(
[1, 0, [0, 0, 0], [-1.0, -1.0, -1.0]], dtype=slice_dt
), # FINAL LOSS
np.rec.array(
[1, 0, [0, 0, 0], [-1.0, -1.0, -1.0]], dtype=slice_dt
), # slice1
np.rec.array(
[1, 0, [0, 0, 0], [-1.0, -1.0, -1.0]], dtype=slice_dt
), # slice2
]
mat1 = np.rec.array(ex1, dtype=storage_type).reshape(1, 1)
mat2 = np.rec.array(ex2, dtype=storage_type).reshape(1, 1)
gold_data = np.vstack((mat1, mat2))
gold_sent_to_row_id_dict = {0: [0], 1: [1]}
utils.write_jsonl(self.temp_file_name, input_data)
use_weak_label = True
split = "dev"
dataset = BootlegSliceDataset(
self.args,
self.temp_file_name,
use_weak_label,
self.entity_symbols,
dataset_threads=1,
split=split,
)
assert_slice_data_equal(gold_data, dataset.data)
self.assertDictEqual(gold_sent_to_row_id_dict, dataset.sent_to_row_id_dict)
def test_non_gold_no_weak_label_aliases(self):
"""
Test non gold aliases without weak labels.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
"""
# Test 0: when use weak labels is FALSE and all golds are TRUE, nothing should change
max_seq_len = 5
max_aliases = 2
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.eval_slices = ["slice1", "slice2"]
input_data = [
{
"aliases": ["alias3", "alias4", "alias3"],
"qids": ["Q1", "Q4", "Q1"],
"sent_idx_unq": 0,
"sentence": "alias3 alias4 alias3",
"spans": [[0, 6], [7, 13], [14, 20]],
"slices": {
"slice1": {"0": 0.9, "1": 0.3, "2": 0.5},
"slice2": {"0": 0.0, "1": 0.0, "2": 1.0},
},
"gold": [True, True, True],
}
]
slice_dt = np.dtype(
[
("sent_idx", int),
("subslice_idx", int),
("alias_slice_incidence", int, 3),
("prob_labels", float, 3),
]
)
storage_type = np.dtype(
[
(slice_name, slice_dt, 1)
for slice_name in [FINAL_LOSS, "slice1", "slice2"]
]
)
ex1 = [
np.rec.array(
[0, 0, [1, 1, 1], [1.0, 1.0, 1.0]], dtype=slice_dt
), # FINAL LOSS
np.rec.array([0, 0, [1, 0, 0], [0.9, 0.3, 0.5]], dtype=slice_dt), # slice1
np.rec.array([0, 0, [0, 0, 1], [0.0, 0.0, 1.0]], dtype=slice_dt), # slice2
]
gold_data = np.rec.array(ex1, dtype=storage_type).reshape(1, 1)
# res = np.vstack((mat1))
gold_sent_to_row_id_dict = {0: [0]}
utils.write_jsonl(self.temp_file_name, input_data)
use_weak_label = False
split = "dev"
dataset = BootlegSliceDataset(
self.args,
self.temp_file_name,
use_weak_label,
self.entity_symbols,
dataset_threads=1,
split=split,
)
assert_slice_data_equal(gold_data, dataset.data)
self.assertDictEqual(gold_sent_to_row_id_dict, dataset.sent_to_row_id_dict)
# Test 1: the FALSE golds will be dropped, leaving one alias to score. However, as we have
# to have at least 2 aliases to predict for memmap to store as an array, we have two in our slice as a minimum.
max_seq_len = 5
max_aliases = 2
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.eval_slices = ["slice1", "slice2"]
input_data = [
{
"aliases": ["alias3", "alias4", "alias3"],
"qids": ["Q1", "Q4", "Q1"],
"sent_idx_unq": 0,
"sentence": "alias3 alias4 alias3",
"spans": [[0, 6], [7, 13], [14, 20]],
"slices": {
"slice1": {"0": 0.9, "1": 0.3, "2": 0.5},
"slice2": {"0": 0.0, "1": 0.0, "2": 1.0},
},
"gold": [False, False, True],
}
]
slice_dt = np.dtype(
[
("sent_idx", int),
("subslice_idx", int),
("alias_slice_incidence", int, 2),
("prob_labels", float, 2),
]
)
storage_type = np.dtype(
[
(slice_name, slice_dt, 1)
for slice_name in [FINAL_LOSS, "slice1", "slice2"]
]
)
ex1 = [
np.rec.array([0, 0, [1, 0], [1.0, -1.0]], dtype=slice_dt), # FINAL LOSS
np.rec.array([0, 0, [0, 0], [0.5, -1.0]], dtype=slice_dt), # slice1
np.rec.array([0, 0, [1, 0], [1.0, -1.0]], dtype=slice_dt), # slice2
]
gold_data = np.rec.array(ex1, dtype=storage_type).reshape(1, 1)
# res = np.vstack((mat1))
gold_sent_to_row_id_dict = {0: [0]}
utils.write_jsonl(self.temp_file_name, input_data)
use_weak_label = False
split = "dev"
dataset = BootlegSliceDataset(
self.args,
self.temp_file_name,
use_weak_label,
self.entity_symbols,
dataset_threads=1,
split=split,
)
assert_slice_data_equal(gold_data, dataset.data)
self.assertDictEqual(gold_sent_to_row_id_dict, dataset.sent_to_row_id_dict)
# Test2: nothing should be different for a split of train
split = "train"
dataset = BootlegSliceDataset(
self.args,
self.temp_file_name,
use_weak_label,
self.entity_symbols,
dataset_threads=1,
split=split,
)
assert_slice_data_equal(gold_data, dataset.data)
self.assertDictEqual(gold_sent_to_row_id_dict, dataset.sent_to_row_id_dict)
# Test3: when we have multiple all FALSE anchors, we keep the slice indices but all aliases are not in a slice
max_seq_len = 5
max_aliases = 2
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.eval_slices = ["slice1", "slice2"]
input_data = [
{
"aliases": ["alias3", "alias4", "alias3"],
"qids": ["Q1", "Q4", "Q1"],
"sent_idx_unq": 0,
"sentence": "alias3 alias4 alias3",
"spans": [[0, 6], [7, 13], [14, 20]],
"slices": {
"slice1": {"0": 0.9, "1": 0.3, "2": 0.5},
"slice2": {"0": 0.0, "1": 0.0, "2": 1.0},
},
"gold": [False, False, False],
},
{
"aliases": ["alias3"],
"qids": ["Q1"],
"sent_idx_unq": "1",
"sentence": "alias3",
"spans": [[0, 1]],
"slices": {"slice1": {"0": 0.4}, "slice2": {"0": 1.0}},
"gold": [True],
},
]
slice_dt = np.dtype(
[
("sent_idx", int),
("subslice_idx", int),
("alias_slice_incidence", int, 2),
("prob_labels", float, 2),
]
)
storage_type = np.dtype(
[
(slice_name, slice_dt, 1)
for slice_name in [FINAL_LOSS, "slice1", "slice2"]
]
)
ex1 = [
np.rec.array([0, 0, [0, 0], [-1.0, -1.0]], dtype=slice_dt), # FINAL LOSS
np.rec.array([0, 0, [0, 0], [-1.0, -1.0]], dtype=slice_dt), # slice1
np.rec.array([0, 0, [0, 0], [-1.0, -1.0]], dtype=slice_dt), # slice2
]
ex2 = [
np.rec.array([1, 0, [1, 0], [1.0, -1.0]], dtype=slice_dt), # FINAL LOSS
np.rec.array([1, 0, [0, 0], [0.4, -1.0]], dtype=slice_dt), # slice1
np.rec.array([1, 0, [1, 0], [1.0, -1.0]], dtype=slice_dt), # slice2
]
mat1 = np.rec.array(ex1, dtype=storage_type).reshape(1, 1)
mat2 = np.rec.array(ex2, dtype=storage_type).reshape(1, 1)
gold_data = np.vstack((mat1, mat2))
gold_sent_to_row_id_dict = {0: [0], 1: [1]}
utils.write_jsonl(self.temp_file_name, input_data)
use_weak_label = False
split = "dev"
dataset = BootlegSliceDataset(
self.args,
self.temp_file_name,
use_weak_label,
self.entity_symbols,
dataset_threads=1,
split=split,
)
assert_slice_data_equal(gold_data, dataset.data)
self.assertDictEqual(gold_sent_to_row_id_dict, dataset.sent_to_row_id_dict)
def test_multiple_processes(self):
"""
Test multiple processes.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
"""
max_seq_len = 5
max_aliases = 2
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.eval_slices = ["slice1", "slice2"]
input_data = [
{
"aliases": ["alias3", "alias4", "alias3"],
"qids": ["Q1", "Q4", "Q1"],
"sent_idx_unq": 0,
"sentence": "alias3 alias4 alias3",
"spans": [[0, 6], [7, 13], [14, 20]],
"slices": {
"slice1": {"0": 0.9, "1": 0.3, "2": 0.5},
"slice2": {"0": 0.0, "1": 0.0, "2": 1.0},
},
"gold": [True, True, True],
},
{
"aliases": ["alias3"],
"qids": ["Q1"],
"sent_idx_unq": "1",
"sentence": "alias3",
"spans": [[0, 1]],
"slices": {"slice1": {"0": 0.4}, "slice2": {"0": 1.0}},
"gold": [True],
},
]
slice_dt = np.dtype(
[
("sent_idx", int),
("subslice_idx", int),
("alias_slice_incidence", int, 3),
("prob_labels", float, 3),
]
)
storage_type = np.dtype(
[
(slice_name, slice_dt, 1)
for slice_name in [FINAL_LOSS, "slice1", "slice2"]
]
)
ex1 = [
np.rec.array(
[0, 0, [1, 1, 1], [1.0, 1.0, 1.0]], dtype=slice_dt
), # FINAL LOSS
np.rec.array([0, 0, [1, 0, 0], [0.9, 0.3, 0.5]], dtype=slice_dt), # slice1
np.rec.array([0, 0, [0, 0, 1], [0.0, 0.0, 1.0]], dtype=slice_dt), # slice2
]
ex2 = [
np.rec.array(
[1, 0, [1, 0, 0], [1.0, -1.0, -1.0]], dtype=slice_dt
), # FINAL LOSS
np.rec.array(
[1, 0, [0, 0, 0], [0.4, -1.0, -1.0]], dtype=slice_dt
), # slice1
np.rec.array(
[1, 0, [1, 0, 0], [1.0, -1.0, -1.0]], dtype=slice_dt
), # slice2
]
mat1 = np.rec.array(ex1, dtype=storage_type).reshape(1, 1)
mat2 = np.rec.array(ex2, dtype=storage_type).reshape(1, 1)
gold_data = np.vstack((mat1, mat2))
# As we are doing multiprocessing in this test, the order may be reversed
# This is cleaner than an order independent equality check of recarrays
gold_data_rev_order = np.vstack((mat2, mat1))
gold_sent_to_row_id_dict = {0: [0], 1: [1]}
gold_sent_to_row_id_dict_rev_order = {0: [0], 1: [1]}
utils.write_jsonl(self.temp_file_name, input_data)
use_weak_label = True
split = "dev"
dataset = BootlegSliceDataset(
self.args,
self.temp_file_name,
use_weak_label,
self.entity_symbols,
dataset_threads=1,
split=split,
)
try:
assert_slice_data_equal(gold_data, dataset.data)
self.assertDictEqual(gold_sent_to_row_id_dict, dataset.sent_to_row_id_dict)
except AssertionError:
assert_slice_data_equal(gold_data_rev_order, dataset.data)
self.assertDictEqual(
gold_sent_to_row_id_dict_rev_order, dataset.sent_to_row_id_dict
)
if __name__ == "__main__":
unittest.main()
| bootleg-master | tests/test_data/test_slice_data.py |
"""Test data."""
import os
import shutil
import unittest
from collections import defaultdict
import numpy as np
import torch
from transformers import AutoTokenizer
from bootleg.dataset import BootlegDataset, extract_context
from bootleg.symbols.constants import SPECIAL_TOKENS
from bootleg.symbols.entity_symbols import EntitySymbols
from bootleg.utils import utils
from bootleg.utils.parser import parser_utils
def adjust_sentence(sentence, max_len, max_window_len, span, tokenizer):
"""Tokenize and adjust sentence for max length."""
context = extract_context(span, sentence, max_window_len, tokenizer)
new_span = [
context.index("[ent_start]"),
context.index("[ent_end]") + len("[ent_end]"),
]
encoded = tokenizer(
context,
is_split_into_words=False,
padding="max_length",
add_special_tokens=True,
truncation=True,
max_length=max_len,
return_overflowing_tokens=False,
)
return encoded, new_span
def get_uniq_ids(sent_i, num_aliases, guid_dtype, max_aliases=1):
"""Get unique ids."""
res = []
for i in range(num_aliases):
res.append(np.array((sent_i, i, [i]), dtype=guid_dtype(max_aliases)))
return res
def assert_data_dicts_equal(dict_l, dict_r):
"""Assert data dicts are equals."""
for k in dict_l:
assert k in dict_r, f"Key is {k}"
if type(dict_l[k]) is torch.Tensor:
assert torch.allclose(dict_l[k].float(), dict_r[k].float()), f"Key is {k}"
elif type(dict_l[k]) is np.ndarray:
np.testing.assert_array_equal(dict_l[k], dict_r[k])
elif type(dict_l[k]) is list:
assert len(dict_l[k]) == len(dict_r[k]), f"Key is {k}"
for item_l, item_r in zip(dict_l[k], dict_r[k]):
if (
type(item_l) is np.ndarray
): # special case with lists of UIDs being loaded from mmap file being slightly different
for subitem_l, subitem_r in zip(
item_l.tolist()[0], item_r.tolist()
):
if type(subitem_l) is np.ndarray:
assert all(subitem_l == subitem_r), f"Key is {k}"
else:
assert subitem_l == subitem_r, f"Key is {k}"
else:
assert item_l == item_r, f"Key is {k}"
else:
assert dict_l[k] == dict_r[k], f"Key is {k}"
for k in dict_r:
assert k in dict_l, f"Key is {k}"
class DataLoader(unittest.TestCase):
"""Data test."""
def setUp(self):
"""Set up."""
# tests that the sampling is done correctly on indices
# load data from directory
self.args = parser_utils.parse_boot_and_emm_args(
"tests/run_args/test_data.json"
)
self.tokenizer = AutoTokenizer.from_pretrained(
"bert-base-cased",
do_lower_case=False,
use_fast=True,
cache_dir="tests/data/emb_data/pretrained_bert_models",
)
self.tokenizer.add_special_tokens(SPECIAL_TOKENS)
self.is_bert = True
self.entity_symbols = EntitySymbols.load_from_cache(
os.path.join(
self.args.data_config.entity_dir, self.args.data_config.entity_map_dir
),
alias_cand_map_dir=self.args.data_config.alias_cand_map,
)
self.temp_file_name = "tests/data/data_loader/test_data.jsonl"
self.guid_dtype = lambda max_aliases: np.dtype(
[
("sent_idx", "i8", 1),
("subsent_idx", "i8", 1),
("alias_orig_list_pos", "i8", (max_aliases,)),
]
)
def tearDown(self) -> None:
"""Tear down."""
dir = os.path.join(
self.args.data_config.data_dir, self.args.data_config.data_prep_dir
)
if utils.exists_dir(dir):
shutil.rmtree(dir, ignore_errors=True)
dir = os.path.join(
self.args.data_config.entity_dir, self.args.data_config.entity_prep_dir
)
if utils.exists_dir(dir):
shutil.rmtree(dir, ignore_errors=True)
if os.path.exists(self.temp_file_name):
os.remove(self.temp_file_name)
def prep_dicts(
self,
max_seq_len,
max_window_len,
gold_cand_idx,
gold_cand_idx_train,
use_weak,
train_in_cands,
input_data,
):
"""Prep data dicts."""
X_dict, Y_dict = defaultdict(list), defaultdict(list)
for i, inp in enumerate(input_data):
guids = get_uniq_ids(i, len(inp["aliases"]), self.guid_dtype)
for j in range(len(inp["aliases"])):
if use_weak is False and inp["gold"][j] is False:
continue
X_dict["guids"].append(guids[j])
tok_sent, new_span = adjust_sentence(
inp["sentence"],
max_seq_len,
max_window_len,
inp["char_spans"][j],
self.tokenizer,
)
for k in tok_sent:
X_dict[k].append(tok_sent[k])
X_dict["sent_idx"].append(i)
X_dict["subsent_idx"].append(j)
if inp["aliases"][j] not in self.entity_symbols.get_all_aliases():
alias_idx = -2
else:
alias_idx = self.entity_symbols.get_alias_idx(inp["aliases"][j])
X_dict["alias_idx"].append(alias_idx)
X_dict["alias_orig_list_pos"].append(j)
if gold_cand_idx[i][j] != -1:
gold_eid = self.entity_symbols.get_eid(inp["qids"][j])
else:
gold_eid = -1
# Set gold EID to NC if cand_idx is 0 and train_in_cands False
if gold_cand_idx[i][j] == 0 and not train_in_cands:
gold_eid = 0
X_dict["gold_eid"].append(gold_eid)
X_dict["for_dump_gold_eid"].append(
self.entity_symbols.get_eid(inp["qids"][j])
)
word_mask_scores = [-1 for _ in range(len(tok_sent["input_ids"]))]
new_span_start = tok_sent.char_to_token(new_span[0]) + 1
# -1 to index the [ent_end] characters, not the space after
if tok_sent.char_to_token(new_span[1] - 1) is None:
new_span_end = len(tok_sent["input_ids"])
else:
new_span_end = tok_sent.char_to_token(new_span[1] - 1)
word_mask_scores[new_span_start:new_span_end] = [
1 for _ in range(new_span_start, new_span_end)
]
X_dict["word_qid_cnt_mask_score"].append(word_mask_scores)
X_dict["for_dump_gold_cand_K_idx_train"].append(
gold_cand_idx_train[i][j]
)
Y_dict["gold_cand_K_idx"].append(gold_cand_idx[i][j])
for k in X_dict:
if k == "guids":
X_dict[k] = np.array(X_dict[k])
else:
X_dict[k] = torch.tensor(X_dict[k])
for k in Y_dict:
Y_dict[k] = torch.tensor(Y_dict[k])
return X_dict, Y_dict
def test_get_sentidx(self):
"""Test get sentidx to row id getter."""
max_seq_len = 50
max_window_len = 10
max_aliases = 1
use_weak_label = True
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [
{
"aliases": ["alias1", "multi word alias2"],
"qids": ["Q1", "Q4"],
"sent_idx_unq": 0,
"sentence": "alias'-1 or multi word alias2",
"char_spans": [[0, 8], [12, 29]],
"gold": [True, True],
}
]
gold_sentidx2rowid = {"0": [0, 1]}
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split="train",
is_bert=True,
)
sentidx2rowid = dataset.get_sentidx_to_rowids()
self.assertDictEqual(gold_sentidx2rowid, sentidx2rowid)
def test_simple_dataset(self):
"""
Test simple dataset.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
"""
max_seq_len = 50
max_window_len = 10
max_aliases = 1
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [
{
"aliases": ["alias1", "multi word alias2"],
"qids": ["Q1", "Q4"],
"sent_idx_unq": 0,
"sentence": "alias'-1 or multi word alias2",
"char_spans": [[0, 8], [12, 29]],
"gold": [True, True],
}
]
gold_cand_idx_train = [[0, 2]]
gold_cand_idx = [[0, 2]]
use_weak_label = True
X_dict, Y_dict = self.prep_dicts(
max_seq_len,
max_window_len,
gold_cand_idx,
gold_cand_idx_train,
use_weak_label,
self.args.data_config.train_in_candidates,
input_data,
)
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split="train",
is_bert=True,
)
assert_data_dicts_equal(X_dict, dataset.X_dict)
assert_data_dicts_equal(Y_dict, dataset.Y_dict)
def test_in_candidate_flag(self):
"""
Test in candidates.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
"""
max_seq_len = 30
max_aliases = 1
max_window_len = 10
split = "train"
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
# Test 1: the code fails because it's training and Q3 is not a candidate of multi word alias2
input_data = [
{
"aliases": ["alias1", "multi word alias2"],
"qids": ["Q1", "Q3"],
"sent_idx_unq": 0,
"sentence": "alias'-1 or multi word alias2",
"char_spans": [[0, 8], [12, 29]],
"gold": [True, True],
}
]
use_weak_label = True
utils.write_jsonl(self.temp_file_name, input_data)
with self.assertRaises(Exception) as context:
BootlegDataset(
self.args.data_config,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split=split,
is_bert=True,
)
self.assertTrue(type(context.exception) == AssertionError)
# Test 2: the code passes because it's split is dev and Q3 is not a candidate of multi word alias2
split = "dev"
input_data = [
{
"aliases": ["alias1", "multi word alias2"],
"qids": ["Q1", "Q3"],
"sent_idx_unq": 0,
"sentence": "alias'-1 or multi word alias2",
"char_spans": [[0, 8], [12, 29]],
"gold": [True, True],
}
]
gold_cand_idx_train = [[0, -2]]
gold_cand_idx = [[0, -2]]
use_weak_label = True
X_dict, Y_dict = self.prep_dicts(
max_seq_len,
max_window_len,
gold_cand_idx,
gold_cand_idx_train,
use_weak_label,
self.args.data_config.train_in_candidates,
input_data,
)
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split=split,
is_bert=True,
)
assert_data_dicts_equal(X_dict, dataset.X_dict)
assert_data_dicts_equal(Y_dict, dataset.Y_dict)
# Test 3: the code passes because it's training but train in candidates is False
# and Q3 is not a candidate of multi word alias2
split = "train"
self.args.data_config.train_in_candidates = False
input_data = [
{
"aliases": ["alias1", "multi word alias2"],
"qids": ["Q1", "Q3"],
"sent_idx_unq": 0,
"sentence": "alias'-1 or multi word alias2",
"char_spans": [[0, 8], [12, 29]],
"gold": [True, True],
}
]
gold_cand_idx_train = [[1, 0]]
gold_cand_idx = [[1, 0]]
use_weak_label = True
X_dict, Y_dict = self.prep_dicts(
max_seq_len,
max_window_len,
gold_cand_idx,
gold_cand_idx_train,
use_weak_label,
self.args.data_config.train_in_candidates,
input_data,
)
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split=split,
is_bert=True,
)
assert_data_dicts_equal(X_dict, dataset.X_dict)
assert_data_dicts_equal(Y_dict, dataset.Y_dict)
def test_nonmatch_alias(self):
"""
Test aliases not in dict.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
"""
max_seq_len = 50
max_window_len = 10
max_aliases = 1
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [
{
"aliases": ["alias0", "multi word alias2"],
"qids": ["Q1", "Q4"],
"sent_idx_unq": 0,
"sentence": "alias0 or multi word alias2",
"char_spans": [[0, 6], [10, 27]],
"gold": [True, True],
}
]
gold_cand_idx_train = [[-2, 2]]
gold_cand_idx = [[-2, 2]]
use_weak_label = True
X_dict, Y_dict = self.prep_dicts(
max_seq_len,
max_window_len,
gold_cand_idx,
gold_cand_idx_train,
use_weak_label,
self.args.data_config.train_in_candidates,
input_data,
)
utils.write_jsonl(self.temp_file_name, input_data)
with self.assertRaises(Exception) as context:
dataset = BootlegDataset(
self.args.data_config,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split="train",
is_bert=True,
)
self.assertTrue(type(context.exception) == AssertionError)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split="test",
is_bert=True,
)
assert_data_dicts_equal(X_dict, dataset.X_dict)
assert_data_dicts_equal(Y_dict, dataset.Y_dict)
# Assert the -2 stays even though train_in_cands is False
self.args.data_config.train_in_candidates = False
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split="test",
is_bert=True,
)
X_dict["for_dump_gold_cand_K_idx_train"] = torch.tensor([-2, 3])
Y_dict["gold_cand_K_idx"] = torch.tensor([-2, 3])
assert_data_dicts_equal(X_dict, dataset.X_dict)
assert_data_dicts_equal(Y_dict, dataset.Y_dict)
def test_long_sentences(self):
"""
Test long sentences.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
"""
# Test 1: the sentence is long and has far apart aliases so it gets split up into two subsentences
max_seq_len = 50
max_window_len = 10
max_aliases = 1
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [
{
"aliases": ["alias3", "alias4"],
"qids": ["Q1", "Q4"],
"sent_idx_unq": 0,
"sentence": "alias3 cat cat cat cat cat cat alias4",
"char_spans": [[0, 6], [31, 37]],
"gold": [True, True],
}
]
gold_cand_idx_train = [[0, 0]]
gold_cand_idx = [[0, 0]]
use_weak_label = True
X_dict, Y_dict = self.prep_dicts(
max_seq_len,
max_window_len,
gold_cand_idx,
gold_cand_idx_train,
use_weak_label,
self.args.data_config.train_in_candidates,
input_data,
)
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split="train",
is_bert=True,
)
assert_data_dicts_equal(X_dict, dataset.X_dict)
assert_data_dicts_equal(Y_dict, dataset.Y_dict)
# Test 1: the sentence is long but there is only one alias, so the sentence gets windowed
max_seq_len = 50
max_window_len = 10
max_aliases = 1
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [
{
"aliases": ["alias3", "alias4"],
"qids": ["Q1", "Q4"],
"sent_idx_unq": 0,
"sentence": "alias3 cat alias4 cat cat cat cat",
"char_spans": [[0, 6], [11, 17]],
"gold": [True, True],
}
]
gold_cand_idx_train = [[0, 0]]
gold_cand_idx = [[0, 0]]
use_weak_label = True
X_dict, Y_dict = self.prep_dicts(
max_seq_len,
max_window_len,
gold_cand_idx,
gold_cand_idx_train,
use_weak_label,
self.args.data_config.train_in_candidates,
input_data,
)
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split="train",
is_bert=True,
)
assert_data_dicts_equal(X_dict, dataset.X_dict)
assert_data_dicts_equal(Y_dict, dataset.Y_dict)
def test_long_aliases(self):
"""
Test large number aliases.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
"""
max_seq_len = 50
max_window_len = 10
max_aliases = 1
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [
{
"aliases": ["alias3", "alias4", "alias3"],
"qids": ["Q1", "Q4", "Q1"],
"sent_idx_unq": 0,
"sentence": "alias3 alias4 alias3",
"char_spans": [[0, 6], [7, 13], [14, 20]],
"gold": [True, True, True],
}
]
gold_cand_idx_train = [[0, 0, 0]]
gold_cand_idx = [[0, 0, 0]]
use_weak_label = True
X_dict, Y_dict = self.prep_dicts(
max_seq_len,
max_window_len,
gold_cand_idx,
gold_cand_idx_train,
use_weak_label,
self.args.data_config.train_in_candidates,
input_data,
)
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split="train",
is_bert=True,
)
assert_data_dicts_equal(X_dict, dataset.X_dict)
assert_data_dicts_equal(Y_dict, dataset.Y_dict)
def test_non_gold_aliases(self):
"""
Test non-gold aliases.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
"""
# Test 1: the gold of False should be untouched for train, with only one True gold
max_seq_len = 50
max_window_len = 10
max_aliases = 1
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [
{
"aliases": ["alias3", "alias4", "alias3"],
"qids": ["Q1", "Q4", "Q1"],
"sent_idx_unq": 0,
"sentence": "alias3 alias4 alias3",
"char_spans": [[0, 6], [7, 13], [14, 20]],
"gold": [True, False, False],
}
]
gold_cand_idx_train = [[0, 0, 0]]
gold_cand_idx = [[0, 0, 0]]
use_weak_label = True
X_dict, Y_dict = self.prep_dicts(
max_seq_len,
max_window_len,
gold_cand_idx,
gold_cand_idx_train,
use_weak_label,
self.args.data_config.train_in_candidates,
input_data,
)
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split="train",
is_bert=True,
)
assert_data_dicts_equal(X_dict, dataset.X_dict)
assert_data_dicts_equal(Y_dict, dataset.Y_dict)
# Test 2: the gold of False should be untouched for train, with all False golds
max_seq_len = 50
max_window_len = 10
max_aliases = 1
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [
{
"aliases": ["alias3", "alias4", "alias3"],
"qids": ["Q1", "Q4", "Q1"],
"sent_idx_unq": 0,
"sentence": "alias3 alias4 alias3",
"char_spans": [[0, 6], [7, 13], [14, 20]],
"gold": [False, False, False],
}
]
gold_cand_idx_train = [[0, 0, 0]]
gold_cand_idx = [[0, 0, 0]]
use_weak_label = True
X_dict, Y_dict = self.prep_dicts(
max_seq_len,
max_window_len,
gold_cand_idx,
gold_cand_idx_train,
use_weak_label,
self.args.data_config.train_in_candidates,
input_data,
)
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split="train",
is_bert=True,
)
assert_data_dicts_equal(X_dict, dataset.X_dict)
assert_data_dicts_equal(Y_dict, dataset.Y_dict)
# Test 3: with the split of "dev", the subsentences should remain unchanged
# but the true index in Y_dict should be -1
max_seq_len = 50
max_window_len = 10
max_aliases = 1
split = "dev"
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [
{
"aliases": ["alias3", "alias4", "alias3"],
"qids": ["Q1", "Q4", "Q1"],
"sent_idx_unq": 0,
"sentence": "alias3 alias4 alias3",
"char_spans": [[0, 6], [7, 13], [14, 20]],
"gold": [True, False, False],
}
]
gold_cand_idx_train = [[0, 0, 0]]
gold_cand_idx = [[0, -1, -1]]
use_weak_label = True
X_dict, Y_dict = self.prep_dicts(
max_seq_len,
max_window_len,
gold_cand_idx,
gold_cand_idx_train,
use_weak_label,
self.args.data_config.train_in_candidates,
input_data,
)
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split=split,
is_bert=True,
)
assert_data_dicts_equal(X_dict, dataset.X_dict)
assert_data_dicts_equal(Y_dict, dataset.Y_dict)
# Test 4: with the split of dev, all true indices should be -1 but the sentences should still be used
max_seq_len = 50
max_window_len = 10
max_aliases = 1
split = "dev"
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [
{
"aliases": ["alias3", "alias4", "alias3"],
"qids": ["Q1", "Q4", "Q1"],
"sent_idx_unq": 0,
"sentence": "alias3 alias4 alias3",
"char_spans": [[0, 6], [7, 13], [14, 20]],
"gold": [False, False, False],
}
]
gold_cand_idx_train = [[0, 0, 0]]
gold_cand_idx = [[-1, -1, -1]]
use_weak_label = True
X_dict, Y_dict = self.prep_dicts(
max_seq_len,
max_window_len,
gold_cand_idx,
gold_cand_idx_train,
use_weak_label,
self.args.data_config.train_in_candidates,
input_data,
)
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split=split,
is_bert=True,
)
assert_data_dicts_equal(X_dict, dataset.X_dict)
assert_data_dicts_equal(Y_dict, dataset.Y_dict)
def test_non_gold_no_weak_label_aliases(self):
"""
Test non gold aliases without weak labels.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
"""
# Test 0: with all TRUE golds, use weak label of FALSE doesn't change anything
max_seq_len = 50
max_window_len = 10
max_aliases = 1
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [
{
"aliases": ["alias3", "alias4", "alias3"],
"qids": ["Q1", "Q4", "Q1"],
"sent_idx_unq": 0,
"sentence": "alias3 alias4 alias3",
"char_spans": [[0, 6], [7, 13], [14, 20]],
"gold": [True, True, True],
}
]
gold_cand_idx_train = [[0, 0, 0]]
gold_cand_idx = [[0, 0, 0]]
use_weak_label = False
X_dict, Y_dict = self.prep_dicts(
max_seq_len,
max_window_len,
gold_cand_idx,
gold_cand_idx_train,
use_weak_label,
self.args.data_config.train_in_candidates,
input_data,
)
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split="train",
is_bert=True,
)
assert_data_dicts_equal(X_dict, dataset.X_dict)
assert_data_dicts_equal(Y_dict, dataset.Y_dict)
# Test 1: now that weak label is set to False, the golds of False should be removed for split of "train"
max_seq_len = 50
max_window_len = 10
max_aliases = 1
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [
{
"aliases": ["alias3", "alias4", "alias3"],
"qids": ["Q1", "Q4", "Q1"],
"sent_idx_unq": 0,
"sentence": "alias3 alias4 alias3",
"char_spans": [[0, 6], [7, 13], [14, 20]],
"gold": [True, False, False],
}
]
gold_cand_idx_train = [[0, -1, -1]]
gold_cand_idx = [[0, -1, -1]]
use_weak_label = False
X_dict, Y_dict = self.prep_dicts(
max_seq_len,
max_window_len,
gold_cand_idx,
gold_cand_idx_train,
use_weak_label,
self.args.data_config.train_in_candidates,
input_data,
)
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split="train",
is_bert=True,
)
assert_data_dicts_equal(X_dict, dataset.X_dict)
assert_data_dicts_equal(Y_dict, dataset.Y_dict)
# Test 2: now that weak label is set to False, the sentence with all golds of False
# should be removed for "train".
max_seq_len = 50
max_window_len = 10
max_aliases = 1
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [
{
"aliases": ["alias3", "alias4", "alias3"],
"qids": ["Q1", "Q4", "Q1"],
"sent_idx_unq": 0,
"sentence": "alias3 alias4 alias3",
"char_spans": [[0, 6], [7, 13], [14, 20]],
"gold": [False, False, False],
},
{
"aliases": ["alias3"],
"qids": ["Q1"],
"sent_idx_unq": 1,
"sentence": "alias3",
"char_spans": [[0, 1]],
"gold": [True],
},
]
gold_cand_idx_train = [[-1, -1, -1], [0]]
gold_cand_idx = [[-1, -1, -1], [0]]
use_weak_label = False
X_dict, Y_dict = self.prep_dicts(
max_seq_len,
max_window_len,
gold_cand_idx,
gold_cand_idx_train,
use_weak_label,
self.args.data_config.train_in_candidates,
input_data,
)
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split="train",
is_bert=True,
)
assert_data_dicts_equal(X_dict, dataset.X_dict)
assert_data_dicts_equal(Y_dict, dataset.Y_dict)
# Test 3: with the split of "dev", nothing should change from test 1 above where we were using "train"
max_seq_len = 50
max_window_len = 10
max_aliases = 1
split = "dev"
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [
{
"aliases": ["alias3", "alias4", "alias3"],
"qids": ["Q1", "Q4", "Q1"],
"sent_idx_unq": 0,
"sentence": "alias3 alias4 alias3",
"char_spans": [[0, 6], [7, 13], [14, 20]],
"gold": [True, False, False],
}
]
gold_cand_idx_train = [[0, -1, -1]]
gold_cand_idx = [[0, -1, -1]]
use_weak_label = False
X_dict, Y_dict = self.prep_dicts(
max_seq_len,
max_window_len,
gold_cand_idx,
gold_cand_idx_train,
use_weak_label,
self.args.data_config.train_in_candidates,
input_data,
)
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split=split,
is_bert=True,
)
assert_data_dicts_equal(X_dict, dataset.X_dict)
assert_data_dicts_equal(Y_dict, dataset.Y_dict)
# Test 4: with the split of dev, all true indices should be -1 but the sentences should still be used
max_seq_len = 50
max_window_len = 10
max_aliases = 1
split = "dev"
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [
{
"aliases": ["alias3", "alias4", "alias3"],
"qids": ["Q1", "Q4", "Q1"],
"sent_idx_unq": 0,
"sentence": "alias3 alias4 alias3",
"char_spans": [[0, 6], [7, 13], [14, 20]],
"gold": [False, False, False],
},
{
"aliases": ["alias3"],
"qids": ["Q1"],
"sent_idx_unq": 1,
"sentence": "alias3",
"char_spans": [[0, 1]],
"gold": [True],
},
]
gold_cand_idx_train = [[-1, -1, -1], [0]]
gold_cand_idx = [[-1, -1, -1], [0]]
use_weak_label = False
X_dict, Y_dict = self.prep_dicts(
max_seq_len,
max_window_len,
gold_cand_idx,
gold_cand_idx_train,
use_weak_label,
self.args.data_config.train_in_candidates,
input_data,
)
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=1,
split=split,
is_bert=True,
)
assert_data_dicts_equal(X_dict, dataset.X_dict)
assert_data_dicts_equal(Y_dict, dataset.Y_dict)
def test_multiple_sentences(self):
"""
Test multiple sentences at once with multiprocessing.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
"""
# Test 1: the gold of False should be untouched for train
max_seq_len = 50
max_window_len = 10
max_aliases = 1
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [
{
"aliases": ["alias1", "multi word alias2"],
"qids": ["Q1", "Q4"],
"sent_idx_unq": i,
"sentence": "alias'-1 or multi word alias2",
"char_spans": [[0, 8], [12, 29]],
"gold": [True, True],
}
for i in range(53)
]
assert len(input_data) == 53
gold_cand_idx_train = [[0, 2]] * 53
gold_cand_idx = [[0, 2]] * 53
use_weak_label = True
X_dict, Y_dict = self.prep_dicts(
max_seq_len,
max_window_len,
gold_cand_idx,
gold_cand_idx_train,
use_weak_label,
self.args.data_config.train_in_candidates,
input_data,
)
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(
self.args,
name="Bootleg_test",
dataset=self.temp_file_name,
use_weak_label=use_weak_label,
load_entity_data=False,
tokenizer=self.tokenizer,
entity_symbols=self.entity_symbols,
dataset_threads=2,
split="train",
is_bert=True,
)
# Using multiple threads will make data in random sorted chunk order
sort_arr = np.array(np.zeros(53 * 2), dtype=[("x", "<i8"), ("y", "<i8")])
sort_arr["x"] = dataset.X_dict["sent_idx"]
sort_arr["y"] = dataset.X_dict["subsent_idx"]
sort_idx = np.argsort(sort_arr, order=["x", "y"])
for key in list(dataset.X_dict.keys()):
dataset.X_dict[key] = dataset.X_dict[key][sort_idx]
for key in list(dataset.Y_dict.keys()):
dataset.Y_dict[key] = dataset.Y_dict[key][sort_idx]
assert_data_dicts_equal(X_dict, dataset.X_dict)
assert_data_dicts_equal(Y_dict, dataset.Y_dict)
if __name__ == "__main__":
unittest.main()
| bootleg-master | tests/test_data/test_data.py |
"""Test entity embedding generation."""
import os
import shutil
import unittest
import emmental
import torch
import cand_gen.eval as eval
import cand_gen.train as train
from bootleg.utils import utils
from cand_gen.utils.parser import parser_utils
class TestGenEntities(unittest.TestCase):
"""Test entity generation."""
def setUp(self) -> None:
"""Set up."""
self.args = parser_utils.parse_boot_and_emm_args(
"tests/run_args/test_candgen.json"
)
# This _MUST_ get passed the args so it gets a random seed set
emmental.init(log_dir="tests/temp_log", config=self.args)
if not os.path.exists(emmental.Meta.log_path):
os.makedirs(emmental.Meta.log_path)
def tearDown(self) -> None:
"""Tear down."""
dir = os.path.join(
self.args.data_config.data_dir, self.args.data_config.data_prep_dir
)
if utils.exists_dir(dir):
shutil.rmtree(dir, ignore_errors=True)
dir = os.path.join(
self.args.data_config.entity_dir, self.args.data_config.entity_prep_dir
)
if utils.exists_dir(dir):
shutil.rmtree(dir, ignore_errors=True)
dir = os.path.join("tests/temp_log")
if os.path.exists(dir):
shutil.rmtree(dir, ignore_errors=True)
def test_end2end(self):
"""Test end2end entity generation."""
# For the collate and dataloaders to play nicely, the spawn must be fork (this is set in run.py)
torch.multiprocessing.set_start_method("fork", force=True)
# Train and save model
train.run_model(config=self.args)
self.args["model_config"][
"model_path"
] = f"{emmental.Meta.log_path}/last_model.pth"
emmental.Meta.config["model_config"][
"model_path"
] = f"{emmental.Meta.log_path}/last_model.pth"
candidates_file, metrics_file = eval.run_model(config=self.args)
assert os.path.exists(candidates_file)
assert os.path.exists(candidates_file)
num_sents = len([_ for _ in open(candidates_file)])
assert num_sents == 17
if __name__ == "__main__":
unittest.main()
| bootleg-master | tests/test_cand_gen/test_eval.py |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# sys.path.insert(0, os.path.abspath(""))
sys.path.insert(0, os.path.abspath("."))
# sys.path.insert(0, os.path.abspath(".."))
# sys.path.insert(0, os.path.abspath("../.."))
# sys.setrecursionlimit(1500)
# -- Project information -----------------------------------------------------
project = "Bootleg"
copyright = "2021, Laurel Orr"
author = "Laurel Orr"
# The full version, including alpha/beta/rc tags
release = "v1.1.0dev1"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinx_rtd_theme",
"nbsphinx",
"recommonmark",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_options = {"navigation_depth": 2}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| bootleg-master | docs/source/conf.py |
"""Bootleg run command."""
import argparse
import itertools
import logging
import os
import shutil
import subprocess
import sys
import warnings
from copy import copy
import emmental
import numpy as np
import torch
from emmental.learner import EmmentalLearner
from emmental.model import EmmentalModel
from rich.logging import RichHandler
from transformers import AutoTokenizer
from bootleg import log_rank_0_debug, log_rank_0_info
from bootleg.data import get_dataloaders, get_slicedatasets
from bootleg.symbols.constants import DEV_SPLIT, TEST_SPLIT, TRAIN_SPLIT
from bootleg.symbols.entity_symbols import EntitySymbols
from bootleg.task_config import NED_TASK
from bootleg.tasks import ned_task
from bootleg.utils import data_utils, eval_utils, utils
from bootleg.utils.eval_utils import collect_and_merge_results, dump_model_outputs
from bootleg.utils.model_utils import count_parameters
from bootleg.utils.parser.parser_utils import parse_boot_and_emm_args
from bootleg.utils.utils import (
dump_yaml_file,
load_yaml_file,
recurse_redict,
write_to_file,
)
warnings.filterwarnings("ignore")
logger = logging.getLogger(__name__)
def parse_cmdline_args():
"""
Take an input config file and parse it into the correct subdictionary groups for the model.
Returns:
model run mode of train, eval, or dumping
parsed Dict config
path to original config path
"""
# Parse cmdline args to specify config and mode
cli_parser = argparse.ArgumentParser(
description="Bootleg CLI Config",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
cli_parser.add_argument(
"--config_script",
type=str,
default="",
help="Should mimic the config_args found in utils/parser/bootleg_args.py with parameters you want to override."
"You can also override the parameters from config_script by passing them in directly after config_script. "
"E.g., --train_config.batch_size 5",
)
cli_parser.add_argument(
"--mode",
type=str,
default="train",
choices=["train", "eval", "dump_preds"],
)
cli_parser.add_argument(
"--entity_emb_file",
type=str,
default=None,
help="Path to dumped entity embeddings (see ```extract_all_entities.py``` for how). Used in eval and dumping",
)
# you can add other args that will override those in the config_script
# parse_known_args returns 'args' that are the same as what parse_args() returns
# and 'unknown' which are args that the parser doesn't recognize but you want to keep.
# 'unknown' are what we pass on to our override any args from the second phase of arg parsing from the json file
cli_args, unknown = cli_parser.parse_known_args()
if len(cli_args.config_script) == 0:
raise ValueError("You must pass a config script via --config.")
config = parse_boot_and_emm_args(cli_args.config_script, unknown)
# Modify the local rank param from the cli args
config.learner_config.local_rank = int(os.getenv("LOCAL_RANK", -1))
mode = cli_args.mode
entity_emb_file = cli_args.entity_emb_file
return mode, config, cli_args.config_script, entity_emb_file
def setup(config, run_config_path=None):
"""
Set distributed backend and save configuration files.
Args:
config: config
run_config_path: path for original run config
"""
# torch.multiprocessing.set_sharing_strategy("file_system")
# spawn method must be fork to work with Meta.config
torch.multiprocessing.set_start_method("fork", force=True)
"""
ulimit -n 500000
python3 -m torch.distributed.launch --nproc_per_node=2 bootleg/run.py --config_script ...
"""
log_level = logging.getLevelName(config.run_config.log_level.upper())
emmental.init(
log_dir=config["meta_config"]["log_path"],
config=config,
use_exact_log_path=config["meta_config"]["use_exact_log_path"],
local_rank=config.learner_config.local_rank,
level=log_level,
)
log = logging.getLogger()
# Remove streaming handlers and use rich
log.handlers = [h for h in log.handlers if not type(h) is logging.StreamHandler]
log.addHandler(RichHandler())
# Set up distributed backend
emmental.Meta.init_distributed_backend()
cmd_msg = " ".join(sys.argv)
# Recast to dictionaries for emmental - will remove Dotteddicts
emmental.Meta.config = recurse_redict(copy(emmental.Meta.config))
# Log configuration into filess
if config.learner_config.local_rank in [0, -1]:
write_to_file(f"{emmental.Meta.log_path}/cmd.txt", cmd_msg)
dump_yaml_file(
f"{emmental.Meta.log_path}/parsed_config.yaml", emmental.Meta.config
)
# Dump the run config (does not contain defaults)
if run_config_path is not None:
dump_yaml_file(
f"{emmental.Meta.log_path}/run_config.yaml",
load_yaml_file(run_config_path),
)
log_rank_0_info(logger, f"COMMAND: {cmd_msg}")
log_rank_0_info(
logger, f"Saving config to {emmental.Meta.log_path}/parsed_config.yaml"
)
git_hash = "Not able to retrieve git hash"
try:
git_hash = subprocess.check_output(
["git", "log", "-n", "1", "--pretty=tformat:%h-%ad", "--date=short"]
).strip()
except subprocess.CalledProcessError:
pass
log_rank_0_info(logger, f"Git Hash: {git_hash}")
def configure_optimizer():
"""
Configure the optimizer for Bootleg.
Args:
config: config
"""
# Specify parameter group for Adam BERT
def grouped_parameters(model):
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
return [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": emmental.Meta.config["learner_config"][
"optimizer_config"
]["l2"],
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
emmental.Meta.config["learner_config"]["optimizer_config"][
"parameters"
] = grouped_parameters
return
# TODO: optimize slices so we split them based on max aliases (save A LOT of memory)
def run_model(mode, config, run_config_path=None, entity_emb_file=None):
"""
Run Emmental Bootleg models.
Args:
mode: run mode (train, eval, dump_preds)
config: parsed model config
run_config_path: original config path (for saving)
entity_emb_file: file for dumped entity embeddings
"""
# torch.multiprocessing.set_sharing_strategy("file_system")
# Set up distributed backend and save configuration files
setup(config, run_config_path)
# Load entity symbols
log_rank_0_info(logger, "Loading entity symbols...")
entity_symbols = EntitySymbols.load_from_cache(
load_dir=os.path.join(
config.data_config.entity_dir, config.data_config.entity_map_dir
),
alias_cand_map_dir=config.data_config.alias_cand_map,
alias_idx_dir=config.data_config.alias_idx_map,
)
# Create tasks
tasks = [NED_TASK]
# Create splits for data loaders
data_splits = [TRAIN_SPLIT, DEV_SPLIT, TEST_SPLIT]
# Slices are for eval so we only split on test/dev
slice_splits = [DEV_SPLIT, TEST_SPLIT]
# If doing eval, only run on test data
if mode in ["eval"]:
data_splits = [TEST_SPLIT]
slice_splits = [TEST_SPLIT]
elif mode in ["dump_preds"]:
data_splits = [TEST_SPLIT]
slice_splits = []
# We only do dumping if weak labels is True
if config.data_config[f"{TEST_SPLIT}_dataset"].use_weak_label is False:
raise ValueError(
"When calling dump_preds, we require use_weak_label to be True."
)
load_entity_data = True
if mode == "train":
assert (
entity_emb_file is None
), "We do not accept entity_emb_file when training."
else:
# If we are doing eval with the entity embeddings, do not create/load entity token data
if entity_emb_file is not None:
load_entity_data = False
# Batch cands is for training
use_batch_cands = mode == "train"
# Create tokenizer
context_tokenizer = AutoTokenizer.from_pretrained(
config.data_config.word_embedding.bert_model
)
data_utils.add_special_tokens(context_tokenizer)
# Gets dataloaders
dataloaders = get_dataloaders(
config,
tasks,
use_batch_cands,
load_entity_data,
data_splits,
entity_symbols,
context_tokenizer,
)
slice_datasets = get_slicedatasets(config, slice_splits, entity_symbols)
configure_optimizer()
# Create models and add tasks
log_rank_0_info(logger, "Starting Bootleg Model")
model_name = "Bootleg"
model = EmmentalModel(name=model_name)
model.add_task(
ned_task.create_task(
config,
use_batch_cands,
len(context_tokenizer),
slice_datasets,
entity_emb_file,
)
)
# Print param counts
if mode == "train":
log_rank_0_debug(logger, "PARAMS WITH GRAD\n" + "=" * 30)
total_params = count_parameters(model, requires_grad=True, logger=logger)
log_rank_0_info(logger, f"===> Total Params With Grad: {total_params}")
log_rank_0_debug(logger, "PARAMS WITHOUT GRAD\n" + "=" * 30)
total_params = count_parameters(model, requires_grad=False, logger=logger)
log_rank_0_info(logger, f"===> Total Params Without Grad: {total_params}")
# Load the best model from the pretrained model
if config["model_config"]["model_path"] is not None:
model.load(config["model_config"]["model_path"])
# Train model
if mode == "train":
emmental_learner = EmmentalLearner()
emmental_learner._set_optimizer(model)
# Save first checkpoint
if config.learner_config.local_rank in [0, -1]:
model.save(f"{emmental.Meta.log_path}/checkpoint_0.0.model.pth")
emmental_learner.learn(model, dataloaders)
if config.learner_config.local_rank in [0, -1]:
model.save(f"{emmental.Meta.log_path}/last_model.pth")
# Multi-gpu DataParallel eval (NOT distributed)
if mode in ["eval", "dump_preds"]:
# This happens inside EmmentalLearner for training
if (
config["learner_config"]["local_rank"] == -1
and config["model_config"]["dataparallel"]
):
model._to_dataparallel()
# If just finished training a model or in eval mode, run eval
if mode in ["train", "eval"]:
if config.learner_config.local_rank in [0, -1]:
if mode == "train":
# Skip the TRAIN dataloader
scores = model.score(dataloaders[1:])
else:
scores = model.score(dataloaders)
# Save metrics and models
log_rank_0_info(logger, f"Saving metrics to {emmental.Meta.log_path}")
log_rank_0_info(logger, f"Metrics: {scores}")
scores["log_path"] = emmental.Meta.log_path
write_to_file(f"{emmental.Meta.log_path}/{mode}_metrics.txt", scores)
eval_utils.write_disambig_metrics_to_csv(
f"{emmental.Meta.log_path}/{mode}_disambig_metrics.csv", scores
)
else:
scores = {}
return scores
# If you want detailed dumps, save model outputs
assert mode in [
"dump_preds",
], 'Mode must be "dump_preds"'
assert (
len(dataloaders) == 1
), "We should only have length 1 dataloaders for dump_preds!"
final_result_file = None
# Get emmental action output for entity embeddings
num_dump_file_splits = config.run_config.dump_preds_num_data_splits
if config.learner_config.local_rank in [0, -1]:
# Setup files/folders
filename = os.path.basename(dataloaders[0].dataset.raw_filename)
eval_folder = eval_utils.get_eval_folder(filename)
temp_eval_folder = os.path.join(eval_folder, "_cache")
utils.ensure_dir(temp_eval_folder)
log_rank_0_debug(
logger,
f"Will split {os.path.join(config.data_config.data_dir, filename)} int {num_dump_file_splits} splits.",
)
# Chunk file into splits if desired
if num_dump_file_splits > 1:
chunk_prep_dir = os.path.join(temp_eval_folder, "_data_split_in")
utils.ensure_dir(chunk_prep_dir)
total_input = sum(
1 for _ in open(os.path.join(config.data_config.data_dir, filename))
)
chunk_input = int(np.ceil(total_input / num_dump_file_splits))
log_rank_0_debug(
logger,
f"Chunking up {total_input} lines into subfiles of size {chunk_input} lines",
)
total_input_from_chunks, input_files_dict = utils.chunk_file(
os.path.join(config.data_config.data_dir, filename),
chunk_prep_dir,
chunk_input,
)
input_files = list(input_files_dict.keys())
else:
input_files = [os.path.join(config.data_config.data_dir, filename)]
# Before running dump, we need to collect a mapping from sent_idx to prepped dataset indexes. We don't
# want to reprep that data and have no guarantees as to the order of the prepped data w.r.t these chunks.
sent_idx2preppedids = dataloaders[0].dataset.get_sentidx_to_rowids()
# For each split, run dump preds
output_files = []
total_mentions_seen = 0
for input_id, input_filename in enumerate(input_files):
sentidx2num_mentions, sent_idx2row = eval_utils.get_sent_idx2num_mens(
input_filename
)
log_rank_0_debug(logger, "Done collecting sentence to mention map")
dataloader = get_dataloaders(
config,
tasks,
use_batch_cands,
load_entity_data,
data_splits,
entity_symbols,
context_tokenizer,
dataset_offsets={
data_splits[0]: list(
itertools.chain(
*[
sent_idx2preppedids.get(sent_id, [])
for sent_id in sentidx2num_mentions
]
)
)
},
)[0]
input_file_save_folder = os.path.join(
temp_eval_folder, f"_data_out_{input_id}"
)
saved_dump_memmap, save_dump_memmap_config = dump_model_outputs(
model,
dataloader,
config,
sentidx2num_mentions,
input_file_save_folder,
entity_symbols,
NED_TASK,
config.run_config.overwrite_eval_dumps,
)
log_rank_0_debug(
logger,
f"Saving intermediate files to {saved_dump_memmap} and {save_dump_memmap_config}",
)
del dataloader
result_file, mentions_seen = collect_and_merge_results(
saved_dump_memmap,
save_dump_memmap_config,
config,
sentidx2num_mentions,
sent_idx2row,
input_file_save_folder,
entity_symbols,
)
log_rank_0_info(
logger,
f"{mentions_seen} mentions seen. Bootleg labels saved at {result_file}",
)
# Collect results
total_mentions_seen += mentions_seen
assert (
result_file not in output_files
), f"{result_file} already in output_files"
output_files.append(result_file)
# Merge results
final_result_file = eval_utils.get_result_file(eval_folder)
with open(final_result_file, "wb") as outfile:
for filename in output_files:
with open(filename, "rb") as readfile:
shutil.copyfileobj(readfile, outfile)
log_rank_0_info(
logger,
f"Saved final bootleg outputs at {final_result_file}. "
f"Removing cached folder {temp_eval_folder}",
)
eval_utils.try_rmtree(temp_eval_folder)
return final_result_file
if __name__ == "__main__":
mode, config, run_config_path, entity_emb_file = parse_cmdline_args()
run_model(mode, config, run_config_path, entity_emb_file)
| bootleg-master | bootleg/run.py |
"""Bootleg version."""
__version__ = "1.1.1dev0"
| bootleg-master | bootleg/_version.py |
"""Emmental task constants."""
NED_TASK = "NED"
BATCH_CANDS_LABEL = "gold_unq_eid_idx"
CANDS_LABEL = "gold_cand_K_idx"
| bootleg-master | bootleg/task_config.py |
"""Print functions for distributed computation."""
import torch
def log_rank_0_info(logger, message):
"""If distributed is initialized log info only on rank 0."""
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
logger.info(message)
else:
logger.info(message)
def log_rank_0_debug(logger, message):
"""If distributed is initialized log debug only on rank 0."""
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
logger.debug(message)
else:
logger.debug(message)
| bootleg-master | bootleg/__init__.py |
"""Bootleg NED Dataset."""
import logging
import multiprocessing
import os
import re
import shutil
import sys
import time
import traceback
import warnings
from collections import defaultdict
import numpy as np
import torch
import ujson
from emmental.data import EmmentalDataset
from tqdm.auto import tqdm
from bootleg import log_rank_0_debug, log_rank_0_info
from bootleg.layers.alias_to_ent_encoder import AliasEntityTable
from bootleg.symbols.constants import ANCHOR_KEY, PAD_ID, STOP_WORDS
from bootleg.symbols.entity_symbols import EntitySymbols
from bootleg.symbols.kg_symbols import KGSymbols
from bootleg.symbols.type_symbols import TypeSymbols
from bootleg.utils import data_utils, utils
warnings.filterwarnings(
"ignore",
message="Could not import the lzma module. Your installed Python is incomplete. "
"Attempting to use lzma compression will result in a RuntimeError.",
)
warnings.filterwarnings(
"ignore",
message="FutureWarning: Passing (type, 1) or '1type'*",
)
logger = logging.getLogger(__name__)
# Removes warnings about TOKENIZERS_PARALLELISM
os.environ["TOKENIZERS_PARALLELISM"] = "false"
class InputExample(object):
"""A single training/test example for prediction."""
def __init__(
self,
sent_idx,
subsent_idx,
alias_list_pos,
alias_to_predict,
span,
phrase,
alias,
qid,
qid_cnt_mask_score,
):
"""Init InputExample."""
assert (
type(sent_idx) is int
), f"We need the sentence index is an int. You have {type(sent_idx)}"
self.sent_idx = int(sent_idx)
self.subsent_idx = subsent_idx
self.alias_list_pos = alias_list_pos
self.alias_to_predict = alias_to_predict
self.span = span
self.phrase = phrase
self.alias = alias
self.qid = qid
self.qid_cnt_mask_score = qid_cnt_mask_score
def to_dict(self):
"""Return dictionary of object."""
return {
"sent_idx": self.sent_idx,
"subsent_idx": self.subsent_idx,
"alias_list_pos": self.alias_list_pos,
"alias_to_predict": self.alias_to_predict,
"span": self.span,
"phrase": self.phrase,
"alias": self.alias,
"qid": self.qid,
"qid_cnt_mask_score": self.qid_cnt_mask_score,
}
@classmethod
def from_dict(cls, in_dict):
"""Create pobject from dictionary."""
return cls(
in_dict["sent_idx"],
in_dict["subsent_idx"],
in_dict["alias_list_pos"],
in_dict["alias_to_predict"],
in_dict["span"],
in_dict["phrase"],
in_dict["alias"],
in_dict["qid"],
in_dict["qid_cnt_mask_score"],
)
class InputFeatures(object):
"""A single set of features of data."""
def __init__(
self,
alias_idx,
word_input_ids,
word_token_type_ids,
word_attention_mask,
word_qid_cnt_mask_score,
gold_eid,
for_dump_gold_eid,
gold_cand_K_idx,
for_dump_gold_cand_K_idx_train,
alias_list_pos,
sent_idx,
subsent_idx,
guid,
):
"""Initialize InputFeature."""
self.alias_idx = alias_idx
self.word_input_ids = word_input_ids
self.word_token_type_ids = word_token_type_ids
self.word_attention_mask = word_attention_mask
self.word_qid_cnt_mask_score = word_qid_cnt_mask_score
self.gold_eid = gold_eid
self.for_dump_gold_eid = for_dump_gold_eid
self.gold_cand_K_idx = gold_cand_K_idx
self.for_dump_gold_cand_K_idx_train = for_dump_gold_cand_K_idx_train
self.alias_list_pos = alias_list_pos
self.sent_idx = sent_idx
self.subsent_idx = subsent_idx
self.guid = guid
def to_dict(self):
"""Return dictionary of object."""
return {
"alias_idx": self.alias_idx,
"word_input_ids": self.word_input_ids,
"word_token_type_ids": self.word_token_type_ids,
"word_attention_mask": self.word_attention_mask,
"word_qid_cnt_mask_score": self.word_qid_cnt_mask_score,
"gold_eid": self.gold_eid,
"for_dump_gold_eid": self.for_dump_gold_eid,
"gold_cand_K_idx": self.gold_cand_K_idx,
"for_dump_gold_cand_K_idx_train": self.for_dump_gold_cand_K_idx_train,
"alias_list_pos": self.alias_list_pos,
"sent_idx": self.sent_idx,
"subsent_idx": self.subsent_idx,
"guid": self.guid,
}
@classmethod
def from_dict(cls, in_dict):
"""Create pobject from dictionary."""
return cls(
alias_idx=in_dict["alias_idx"],
word_input_ids=in_dict["word_input_ids"],
word_token_type_ids=in_dict["word_token_type_ids"],
word_attention_mask=in_dict["word_attention_mask"],
word_qid_cnt_mask_score=in_dict["word_qid_cnt_mask_score"],
gold_eid=in_dict["gold_eid"],
for_dump_gold_eid=in_dict["for_dump_gold_eid"],
gold_cand_K_idx=in_dict["gold_cand_K_idx"],
for_dump_gold_cand_K_idx_train=in_dict["for_dump_gold_cand_K_idx_train"],
alias_list_pos=in_dict["alias_list_pos"],
sent_idx=in_dict["sent_idx"],
subsent_idx=in_dict["subsent_idx"],
guid=in_dict["guid"],
)
def extract_context(span, sentence, max_seq_window_len, tokenizer):
"""Extract the left and right context window around a span.
Args:
span: character span (left and right values)
sentence: sentence
max_seq_window_len: maximum window length around a span
tokenizer: tokenizer
Returns: context window
"""
start_token_pieces = []
if span[0] > 0 and sentence[span[0] - 1] != " ":
start_token_pieces.append(" ")
start_token_pieces.append("[ent_start]")
if span[0] < len(sentence) and sentence[span[0]] != " ":
start_token_pieces.append(" ")
end_token_pieces = []
if span[1] > 0 and sentence[span[1] - 1] != " ":
end_token_pieces.append(" ")
end_token_pieces.append("[ent_end]")
if span[1] < len(sentence) and sentence[span[1]] != " ":
end_token_pieces.append(" ")
char_window_around_mention = (
tokenizer.model_max_length * 20
) # Assumes max 20 chars per token
with_entity_toks = (
f"{sentence[max(0, span[0]-char_window_around_mention):span[0]]}"
f"{''.join(start_token_pieces)}"
f"{sentence[span[0]:span[1]]}"
f"{''.join(end_token_pieces)}"
f"{sentence[span[1]:span[1]+char_window_around_mention]}"
)
tokens = tokenizer.tokenize(with_entity_toks)
# New indexes including ent start and end
span_tok_l = tokens.index("[ent_start]")
span_tok_r = tokens.index("[ent_end]") + 1
# If more tokens to the right, shift weight there
if span_tok_l < len(tokens) - span_tok_r:
prev_context = tokens[max(0, span_tok_l - max_seq_window_len // 2) : span_tok_l]
# Add in the mention tokens
next_context = tokens[
span_tok_l : span_tok_r + max_seq_window_len - len(prev_context)
]
else:
next_context = tokens[span_tok_r : span_tok_r + max_seq_window_len // 2]
# Add in the mention tokens
prev_context = tokens[
max(0, span_tok_l - (max_seq_window_len - len(next_context))) : span_tok_r
]
context = tokenizer.convert_tokens_to_string(prev_context + next_context)
return context
def get_structural_entity_str(items, max_tok_len, sep_tok):
"""Return sep_tok joined list of items of strucutral resources.
Args:
items: list of structural resources
max_tok_len: maximum token length
sep_tok: token to separate out resources
Returns:
result string, number of items that went beyond ``max_tok_len``
"""
i = 1
over_len = 0
while True:
res = f" {sep_tok} " + f" {sep_tok} ".join(items[:i])
if len(res.split()) > max_tok_len or i > len(items):
if i < len(items):
over_len = 1
res = f" {sep_tok} " + f" {sep_tok} ".join(items[: max(1, i - 1)])
break
i += 1
return res, over_len
def get_entity_string(
qid,
constants,
entity_symbols,
kg_symbols,
type_symbols,
):
"""
Get string representation of entity.
For each entity, generates a string that is fed into a language model to
generate an entity embedding. Returns all tokens that are the title of the
entity (even if in the description)
Args:
qid: QID
constants: Dict of constants
entity_symbols: entity symbols
kg_symbols: kg symbols
type_symbols: type symbols
Returns: entity strings, number of types over max length, number of relations over max length
"""
over_kg_len = 0
over_type_len = 0
desc_str = (
"[ent_desc] " + entity_symbols.get_desc(qid) if constants["use_desc"] else ""
)
title_str = entity_symbols.get_title(qid) if entity_symbols.qid_exists(qid) else ""
# To encourage mention similarity, we remove the (<type>) from titles
title_str = re.sub(r"(\(.*\))", r"", title_str).strip()
# To add kgs, sep by "[ent_kg]" and then truncate to max_ent_kg_len
# Then merge with description text
if constants["use_kg"]:
# Triples stores "relation tail_qid_title" (e.g. "is member of Manchester United" for qid = David Beckham)
triples = []
for rel, tail_qids in kg_symbols.get_relations_tails_for_qid(qid).items():
for tail_q in tail_qids:
if not entity_symbols.qid_exists(tail_q):
continue
triples.append(rel + " " + entity_symbols.get_title(tail_q))
kg_str, over_len = get_structural_entity_str(
triples,
constants["max_ent_kg_len"],
"[ent_kg]",
)
over_kg_len += over_len
desc_str = " ".join([kg_str, desc_str])
# To add types, sep by "[ent_type]" and then truncate to max_type_ent_len
# Then merge with description text
if constants["use_types"]:
type_str, over_len = get_structural_entity_str(
type_symbols.get_types(qid),
constants["max_ent_type_len"],
"[ent_type]",
)
over_type_len += over_len
desc_str = " ".join([type_str, desc_str])
ent_str = " ".join([title_str, desc_str])
# Remove double spaces
ent_split = ent_str.split()
ent_str = " ".join(ent_split)
title_spans = []
if len(title_str) > 0:
# Find all occurrences of title words in the ent_str (helps if description has abbreviated name)
# Make sure you don't mask any types or kg relations
title_pieces = set(title_str.split())
to_skip = False
for e_id, ent_w in enumerate(ent_split):
if ent_w == "[ent_type]":
to_skip = True
if ent_w == "[ent_desc]":
to_skip = False
if to_skip:
continue
if ent_w in title_pieces and ent_w not in STOP_WORDS:
title_spans.append(e_id)
# all_title_occ = re.finditer(f"({title_str})", ent_str)
# all_spaces = np.array([m.start() for m in re.finditer("\s", ent_str)])
# for match in all_title_occ:
# start_w = np.sum(all_spaces < match.start())
# end_w = np.sum(all_spaces <= match.end())
# for i in range(start_w, end_w):
# title_spans.append(i)
return ent_str, title_spans, over_type_len, over_kg_len
def create_examples_initializer(constants_dict, tokenizer):
"""Create examples multiprocessing initializer."""
global constants_global
constants_global = constants_dict
global tokenizer_global
tokenizer_global = tokenizer
def create_examples(
dataset,
create_ex_indir,
create_ex_outdir,
meta_file,
data_config,
dataset_threads,
use_weak_label,
split,
is_bert,
tokenizer,
):
"""Create examples from the raw input data.
Args:
dataset: data file to read
create_ex_indir: temporary directory where input files are stored
create_ex_outdir: temporary directory to store output files from method
meta_file: metadata file to save the file names/paths for the next step in prep pipeline
data_config: data config
dataset_threads: number of threads
use_weak_label: whether to use weak labeling or not
split: data split
is_bert: is the tokenizer a BERT one
tokenizer: tokenizer
"""
start = time.time()
num_processes = min(dataset_threads, int(0.8 * multiprocessing.cpu_count()))
qidcnt_file = os.path.join(data_config.data_dir, data_config.qid_cnt_map)
log_rank_0_debug(logger, "Counting lines")
total_input = sum(1 for _ in open(dataset))
constants_dict = {
"is_bert": is_bert,
"use_weak_label": use_weak_label,
"split": split,
"qidcnt_file": qidcnt_file,
"max_seq_len": data_config.max_seq_len,
"max_seq_window_len": data_config.max_seq_window_len,
}
if not os.path.exists(qidcnt_file):
log_rank_0_info(
logger, f"{qidcnt_file} does not exist. Using uniform counts..."
)
if num_processes == 1:
out_file_name = os.path.join(create_ex_outdir, os.path.basename(dataset))
res = create_examples_single(
in_file_idx=0,
in_file_name=dataset,
in_file_lines=total_input,
out_file_name=out_file_name,
constants_dict=constants_dict,
tokenizer=tokenizer,
)
files_and_counts = {}
total_output = res["total_lines"]
files_and_counts[res["output_filename"]] = res["total_lines"]
else:
log_rank_0_info(
logger, f"Starting to extract examples using {num_processes} processes"
)
chunk_input = int(np.ceil(total_input / num_processes))
log_rank_0_debug(
logger,
f"Chunking up {total_input} lines into subfiles of size {chunk_input} lines",
)
total_input_from_chunks, input_files_dict = utils.chunk_file(
dataset, create_ex_indir, chunk_input
)
input_files = list(input_files_dict.keys())
input_file_lines = [input_files_dict[k] for k in input_files]
output_files = [
in_file_name.replace(create_ex_indir, create_ex_outdir)
for in_file_name in input_files
]
assert (
total_input == total_input_from_chunks
), f"Lengths of files {total_input} doesn't mathc {total_input_from_chunks}"
log_rank_0_debug(logger, "Done chunking files. Starting pool.")
pool = multiprocessing.Pool(
processes=num_processes,
initializer=create_examples_initializer,
initargs=[constants_dict, tokenizer],
)
total_output = 0
input_args = list(
zip(
list(range(len(input_files))),
input_files,
input_file_lines,
output_files,
)
)
# Store output files and counts for saving in next step
files_and_counts = {}
for res in pool.imap_unordered(create_examples_hlp, input_args, chunksize=1):
total_output += res["total_lines"]
files_and_counts[res["output_filename"]] = res["total_lines"]
pool.close()
pool.join()
utils.dump_json_file(
meta_file, {"num_mentions": total_output, "files_and_counts": files_and_counts}
)
log_rank_0_debug(
logger,
f"Done with extracting examples in {time.time() - start}. "
f"Total lines seen {total_input}. Total lines kept {total_output}.",
)
return
def create_examples_hlp(args):
"""Create examples multiprocessing helper."""
in_file_idx, in_file_name, in_file_lines, out_file_name = args
return create_examples_single(
in_file_idx,
in_file_name,
in_file_lines,
out_file_name,
constants_global,
tokenizer_global,
)
def create_examples_single(
in_file_idx, in_file_name, in_file_lines, out_file_name, constants_dict, tokenizer
):
"""Create examples."""
split = constants_dict["split"]
max_seq_window_len = constants_dict["max_seq_window_len"]
use_weak_label = constants_dict["use_weak_label"]
qidcnt_file = constants_dict["qidcnt_file"]
qid2cnt = {}
quantile_buckets = [float(i / 100) for i in list(range(0, 101, 5))]
# If not qid2cnt, the quantile_bucket will be 1.0
quants = np.array([-1 for _ in quantile_buckets])
quants[-1] = 0
if os.path.exists(qidcnt_file):
qid2cnt = ujson.load(open(qidcnt_file))
quants = np.quantile(list(qid2cnt.values()), quantile_buckets)
with open(out_file_name, "w", encoding="utf-8") as out_f:
total_subsents = 0
total_lines = 0
for ex in tqdm(
open(in_file_name, "r", encoding="utf-8"),
total=in_file_lines,
desc=f"{in_file_idx}",
position=in_file_idx,
):
total_lines += 1
line = ujson.loads(ex)
assert "sent_idx_unq" in line
assert "aliases" in line
assert "qids" in line
assert "char_spans" in line, (
'Require "char_spans" to be input. '
"See utils/preprocessing/convert_to_char_spans.py"
)
assert "sentence" in line
assert ANCHOR_KEY in line
sent_idx = line["sent_idx_unq"]
# aliases are assumed to be lower-cased in candidate map
aliases = [alias.lower() for alias in line["aliases"]]
qids = line["qids"]
spans = line["char_spans"]
phrase = line["sentence"]
assert (
len(spans) == len(aliases) == len(qids)
), "lengths of alias-related values not equal"
# For datasets, we see all aliases, unless use_weak_label is turned off
# aliases_seen_by_model = [i for i in range(len(aliases))]
anchor = [True for i in range(len(aliases))]
if ANCHOR_KEY in line:
anchor = line[ANCHOR_KEY]
assert len(aliases) == len(anchor)
assert all(isinstance(a, bool) for a in anchor)
for span in spans:
assert (
len(span) == 2
), f"Span should be len 2. Your span {span} is {len(span)}"
assert span[1] <= len(
phrase
), f"You have span {span} that is beyond the length of the sentence {phrase}"
if not use_weak_label:
aliases = [aliases[i] for i in range(len(anchor)) if anchor[i] is True]
qids = [qids[i] for i in range(len(anchor)) if anchor[i] is True]
spans = [spans[i] for i in range(len(anchor)) if anchor[i] is True]
# aliases_seen_by_model = [i for i in range(len(aliases))]
anchor = [True for i in range(len(aliases))]
# Happens if use weak labels is False
if len(aliases) == 0:
continue
for subsent_idx in range(len(aliases)):
span = spans[subsent_idx]
alias_anchor = anchor[subsent_idx]
alias = aliases[subsent_idx]
qid = qids[subsent_idx]
context = extract_context(span, phrase, max_seq_window_len, tokenizer)
# Get the percentile bucket between [0, 1]
# Large counts will be closer to 1
qid_cnt_mask_score = quantile_buckets[sum(qid2cnt.get(qid, 0) > quants)]
assert 0 <= qid_cnt_mask_score <= 100
new_span = [
context.index("[ent_start]"),
context.index("[ent_end]") + len("[ent_end]"),
]
# alias_to_predict_arr is an index into idxs_arr/anchor_arr/aliases_arr.
# It should only include true anchors if eval dataset.
# During training want to backpropagate on false anchors as well
if split != "train":
alias_to_predict = 0 if alias_anchor is True else -1
else:
alias_to_predict = 0
total_subsents += 1
out_f.write(
ujson.dumps(
InputExample(
sent_idx=sent_idx,
subsent_idx=subsent_idx,
alias_list_pos=subsent_idx,
alias_to_predict=alias_to_predict,
span=new_span,
phrase=context,
alias=alias,
qid=qid,
qid_cnt_mask_score=qid_cnt_mask_score,
).to_dict(),
ensure_ascii=False,
)
+ "\n"
)
return {"total_lines": total_subsents, "output_filename": out_file_name}
def convert_examples_to_features_and_save_initializer(
tokenizer,
data_config,
save_dataset_name,
save_labels_name,
X_storage,
Y_storage,
):
"""Create examples multiprocessing initializer."""
global tokenizer_global
tokenizer_global = tokenizer
global entitysymbols_global
entitysymbols_global = EntitySymbols.load_from_cache(
load_dir=os.path.join(data_config.entity_dir, data_config.entity_map_dir),
alias_cand_map_dir=data_config.alias_cand_map,
alias_idx_dir=data_config.alias_idx_map,
)
global mmap_file_global
mmap_file_global = np.memmap(save_dataset_name, dtype=X_storage, mode="r+")
global mmap_label_file_global
mmap_label_file_global = np.memmap(save_labels_name, dtype=Y_storage, mode="r+")
def convert_examples_to_features_and_save(
meta_file,
guid_dtype,
data_config,
dataset_threads,
use_weak_label,
split,
is_bert,
save_dataset_name,
save_labels_name,
X_storage,
Y_storage,
tokenizer,
entity_symbols,
):
"""
Create features from examples.
Converts the prepped examples into input features and saves in memmap
files. These are used in the __get_item__ method.
Args:
meta_file: metadata file where input file paths are saved
guid_dtype: unique identifier dtype
data_config: data config
dataset_threads: number of threads
use_weak_label: whether to use weak labeling or not
split: data split
is_bert: is the tokenizer a BERT tokenizer
save_dataset_name: data features file name to save
save_labels_name: data labels file name to save
X_storage: data features storage type (for memmap)
Y_storage: data labels storage type (for memmap)
tokenizer: tokenizer
entity_symbols: entity symbols
"""
start = time.time()
num_processes = min(dataset_threads, int(0.8 * multiprocessing.cpu_count()))
# One example per mention per candidate
total_input = utils.load_json_file(meta_file)["num_mentions"]
files_and_counts = utils.load_json_file(meta_file)["files_and_counts"]
# IMPORTANT: for distributed writing to memmap files, you must create them in w+ mode before being opened in r+
memmap_file = np.memmap(
save_dataset_name, dtype=X_storage, mode="w+", shape=(total_input,), order="C"
)
# Save -1 in sent_idx to check that things are loaded correctly later
memmap_file["sent_idx"][:] = -1
memmap_label_file = np.memmap(
save_labels_name, dtype=Y_storage, mode="w+", shape=(total_input,), order="C"
)
input_args = []
# Saves where in memap file to start writing
offset = 0
for i, in_file_name in enumerate(files_and_counts.keys()):
input_args.append(
{
"file_name": in_file_name,
"in_file_idx": i,
"in_file_lines": files_and_counts[in_file_name],
"save_file_offset": offset,
"ex_print_mod": int(np.ceil(total_input / 20)),
"guid_dtype": guid_dtype,
"is_bert": is_bert,
"use_weak_label": use_weak_label,
"split": split,
"max_seq_len": data_config.max_seq_len,
"train_in_candidates": data_config.train_in_candidates,
"print_examples": data_config.print_examples_prep,
}
)
offset += files_and_counts[in_file_name]
if num_processes == 1:
assert len(input_args) == 1
total_output = convert_examples_to_features_and_save_single(
input_args[0],
tokenizer,
entity_symbols,
memmap_file,
memmap_label_file,
)
else:
log_rank_0_debug(
logger,
"Initializing pool. This make take a few minutes.",
)
pool = multiprocessing.Pool(
processes=num_processes,
initializer=convert_examples_to_features_and_save_initializer,
initargs=[
tokenizer,
data_config,
save_dataset_name,
save_labels_name,
X_storage,
Y_storage,
],
)
total_output = 0
for res in pool.imap_unordered(
convert_examples_to_features_and_save_hlp, input_args, chunksize=1
):
c = res
total_output += c
pool.close()
# Verify that sentences are unique and saved correctly
mmap_file = np.memmap(save_dataset_name, dtype=X_storage, mode="r")
all_uniq_ids = set()
for i in tqdm(range(total_input), desc="Checking sentence uniqueness"):
assert mmap_file["sent_idx"][i] != -1, f"Index {i} has -1 sent idx"
uniq_id_without_al = str(
f"{mmap_file['sent_idx'][i]}.{mmap_file['subsent_idx'][i]}"
)
assert (
uniq_id_without_al not in all_uniq_ids
), f"Idx {uniq_id_without_al} is not unique and already in data"
all_uniq_ids.add(uniq_id_without_al)
log_rank_0_debug(
logger,
f"Done with extracting examples in {time.time() - start}. Total lines seen {total_input}. "
f"Total lines kept {total_output}.",
)
return
def convert_examples_to_features_and_save_hlp(input_dict):
"""Convert examples to features multiprocessing initializer."""
return convert_examples_to_features_and_save_single(
input_dict,
tokenizer_global,
entitysymbols_global,
mmap_file_global,
mmap_label_file_global,
)
def convert_examples_to_features_and_save_single(
input_dict,
tokenizer,
entitysymbols,
mmap_file,
mmap_label_file,
):
"""Convert examples to features multiprocessing helper."""
file_name = input_dict["file_name"]
in_file_idx = input_dict["in_file_idx"]
in_file_lines = input_dict["in_file_lines"]
save_file_offset = input_dict["save_file_offset"]
ex_print_mod = input_dict["ex_print_mod"]
guid_dtype = input_dict["guid_dtype"]
print_examples = input_dict["print_examples"]
max_seq_len = input_dict["max_seq_len"]
split = input_dict["split"]
train_in_candidates = input_dict["train_in_candidates"]
# if not train_in_candidates:
# raise NotImplementedError("train_in_candidates of False is not fully supported yet")
max_total_input_len = max_seq_len
total_saved_features = 0
for idx, in_line in tqdm(
enumerate(open(file_name, "r", encoding="utf-8")),
total=in_file_lines,
desc=f"Processing {file_name}",
position=in_file_idx,
):
example = InputExample.from_dict(ujson.loads(in_line))
example_idx = save_file_offset + idx
alias_to_predict = (
example.alias_to_predict
) # Stores -1 if dev data and false anchor
alias_list_pos = example.alias_list_pos
span_start_idx, span_end_idx = example.span
alias = example.alias
qid = example.qid
candidate_sentence_input_ids = (
np.ones(max_total_input_len) * tokenizer.pad_token_id
)
candidate_sentence_attn_msks = np.ones(max_total_input_len) * 0
candidate_sentence_token_type_ids = np.ones(max_total_input_len) * 0
candidate_mention_cnt_ratio = np.ones(max_total_input_len) * -1
# ===========================================================
# GET GOLD LABEL
# ===========================================================
# generate indexes into alias table; -2 if unk
if not entitysymbols.alias_exists(alias):
# if we do not have this alias in our set, we give it an index of -2, meaning we will
# always get it wrong in eval
assert split in ["test", "dev"], (
f"Expected split of 'test' or 'dev'. If you are training, "
f"the alias {alias} must be in our entity dump"
)
alias_trie_idx = -2
alias_qids = []
else:
alias_trie_idx = entitysymbols.get_alias_idx(alias)
alias_qids = entitysymbols.get_qid_cands(alias)
# EID used in generating labels in dataloader - will set to 0 for NC
eid = -1
# EID used in final prediction dumping - keep as gold EID
for_dump_eid = -1
if entitysymbols.qid_exists(qid):
eid = entitysymbols.get_eid(qid)
for_dump_eid = eid
if qid not in alias_qids:
# if we are not training in candidates, we only assign 0 correct id if the alias is in our map;
# otherwise we assign -2
if not train_in_candidates and alias_trie_idx != -2:
# set class label to be "not in candidate set"
gold_cand_K_idx = 0
eid = 0
else:
# if we are not using a NC (no candidate) but are in eval mode, we let the gold
# candidate not be in the candidate set we give in a true index of -2,
# meaning our model will always get this example incorrect
assert split in ["test", "dev"], (
f"Expected split of 'test' or 'dev' in sent {example.sent_idx}. If you are training, "
f"the QID {qid} must be in the candidate list for {alias} for "
f"data_args.train_in_candidates to be True"
)
gold_cand_K_idx = -2
else:
# Here we are getting the correct class label for training.
# Our training is "which of the max_entities entity candidates is the right one
# (class labels 1 to max_entities) or is it none of these (class label 0)".
# + (not discard_noncandidate_entities) is to ensure label 0 is
# reserved for "not in candidate set" class
gold_cand_K_idx = np.nonzero(np.array(alias_qids) == qid)[0][0] + (
not train_in_candidates
)
assert gold_cand_K_idx < entitysymbols.max_candidates + int(
not train_in_candidates
), (
f"The qid {qid} and alias {alias} is not in the top {entitysymbols.max_candidates} max candidates. "
f"The QID must be within max candidates."
)
# Create input IDs here to ensure each entity is truncated properly
inputs = tokenizer(
example.phrase,
is_split_into_words=False,
padding="max_length",
add_special_tokens=True,
truncation=True,
max_length=max_seq_len,
return_overflowing_tokens=False,
)
# In the rare case that the pre-context goes beyond max_seq_len, retokenize strating from
# ent start to guarantee the start/end tok will be there
start_tok = inputs.char_to_token(span_start_idx)
if start_tok is None:
new_phrase = example.phrase[span_start_idx:]
# Adjust spans
span_dist = span_end_idx - span_start_idx
span_start_idx = 0
span_end_idx = span_start_idx + span_dist
inputs = tokenizer(
new_phrase,
is_split_into_words=False,
padding="max_length",
add_special_tokens=True,
truncation=True,
max_length=max_seq_len,
return_overflowing_tokens=False,
)
if inputs.char_to_token(span_start_idx) is None:
print("REALLY BAD")
print(example)
new_span_start = inputs.char_to_token(span_start_idx) + 1
else:
# Includes the [ent_start]; we do not want to mask that so +1
new_span_start = start_tok + 1
# -1 to index the [ent_end] token, not the token after
end_tok = inputs.char_to_token(span_end_idx - 1)
if end_tok is None:
# -1 for CLS token
new_span_end = len(inputs["input_ids"])
else:
new_span_end = end_tok
final_toks = tokenizer.convert_ids_to_tokens(inputs["input_ids"])
assert (
final_toks[new_span_start - 1] == "[ent_start]"
), f"{final_toks} {new_span_start} {new_span_end} {span_start_idx} {span_end_idx}"
assert (new_span_end == len(inputs["input_ids"])) or final_toks[
new_span_end
] == "[ent_end]", f"{final_toks} {new_span_start} {new_span_end} {span_start_idx} {span_end_idx}"
candidate_sentence_input_ids[: len(inputs["input_ids"])] = inputs["input_ids"]
candidate_mention_cnt_ratio[new_span_start:new_span_end] = [
example.qid_cnt_mask_score for _ in range(new_span_start, new_span_end)
]
candidate_sentence_attn_msks[: len(inputs["attention_mask"])] = inputs[
"attention_mask"
]
candidate_sentence_token_type_ids[: len(inputs["token_type_ids"])] = inputs[
"token_type_ids"
]
# this stores the true entity pos in the candidate list we use to compute loss -
# all anchors for train and true anchors for dev/test
# leave as -1 if it's not an alias we want to predict; we get these if we split a
# sentence and need to only predict subsets
example_true_cand_positions_for_loss = PAD_ID
# this stores the true entity pos in the candidate list for all alias seen by model -
# all anchors for both train and eval
example_true_entity_eid = PAD_ID
# checks if alias is gold or not - alias_to_predict will be -1 for non gold aliases for eval
if alias_to_predict == 0:
example_true_cand_positions_for_loss = gold_cand_K_idx
example_true_entity_eid = eid
example_true_cand_positions_for_train = gold_cand_K_idx
# drop example if we have nothing to predict (no valid aliases) -- make sure this doesn't cause
# problems when we start using unk aliases...
if alias_trie_idx == PAD_ID:
logging.error(
f"There were 0 aliases in this example {example}. This shouldn't happen."
)
sys.exit(0)
total_saved_features += 1
feature = InputFeatures(
alias_idx=alias_trie_idx,
word_input_ids=candidate_sentence_input_ids,
word_token_type_ids=candidate_sentence_token_type_ids,
word_attention_mask=candidate_sentence_attn_msks,
word_qid_cnt_mask_score=candidate_mention_cnt_ratio,
gold_eid=example_true_entity_eid,
for_dump_gold_eid=for_dump_eid, # Store the one that isn't -1 for non-gold aliases
gold_cand_K_idx=example_true_cand_positions_for_loss,
for_dump_gold_cand_K_idx_train=example_true_cand_positions_for_train,
alias_list_pos=alias_list_pos,
sent_idx=int(example.sent_idx),
subsent_idx=int(example.subsent_idx),
guid=np.array(
[
(
int(example.sent_idx),
int(example.subsent_idx),
[alias_list_pos],
)
],
dtype=guid_dtype,
),
)
# Write feature
# We are storing mmap file in column format, so column name first
mmap_file["sent_idx"][example_idx] = feature.sent_idx
mmap_file["subsent_idx"][example_idx] = feature.subsent_idx
mmap_file["guids"][example_idx] = feature.guid
mmap_file["alias_idx"][example_idx] = feature.alias_idx
mmap_file["input_ids"][example_idx] = feature.word_input_ids
mmap_file["token_type_ids"][example_idx] = feature.word_token_type_ids
mmap_file["attention_mask"][example_idx] = feature.word_attention_mask
mmap_file["word_qid_cnt_mask_score"][
example_idx
] = feature.word_qid_cnt_mask_score
mmap_file["alias_orig_list_pos"][example_idx] = feature.alias_list_pos
mmap_file["for_dump_gold_cand_K_idx_train"][
example_idx
] = feature.for_dump_gold_cand_K_idx_train
mmap_file["gold_eid"][example_idx] = feature.gold_eid
mmap_file["for_dump_gold_eid"][example_idx] = feature.for_dump_gold_eid
mmap_label_file["gold_cand_K_idx"][example_idx] = feature.gold_cand_K_idx
if example_idx % ex_print_mod == 0:
# Make one string for distributed computation consistency
output_str = ""
output_str += "*** Example ***" + "\n"
output_str += (
f"guid: {example.sent_idx} subsent {example.subsent_idx}"
+ "\n"
)
output_str += f"phrase toks: {example.phrase}" + "\n"
output_str += (
f"alias_to_predict: {example.alias_to_predict}" + "\n"
)
output_str += (
f"alias_list_pos: {example.alias_list_pos}" + "\n"
)
output_str += f"aliases: {example.alias}" + "\n"
output_str += f"qids: {example.qid}" + "\n"
output_str += "*** Feature ***" + "\n"
output_str += (
f"gold_cand_K_idx: {feature.gold_cand_K_idx}" + "\n"
)
output_str += f"gold_eid: {feature.gold_eid}" + "\n"
output_str += (
f"for_dump_gold_eid: {feature.for_dump_gold_eid}"
+ "\n"
)
output_str += (
f"for_dump_gold_cand_K_idx_train: {feature.for_dump_gold_cand_K_idx_train}"
+ "\n"
)
output_str += (
f"input_ids: {' '.join([str(x) for x in feature.word_input_ids])}"
+ "\n"
)
output_str += (
f"token_type_ids: {' '.join([str(x) for x in feature.word_token_type_ids])}"
+ "\n"
)
output_str += (
f"attention_mask: {' '.join([str(x) for x in feature.word_attention_mask])}"
+ "\n"
)
output_str += (
f"word_qid_cnt_mask_score: {' '.join([str(x) for x in feature.word_qid_cnt_mask_score])}"
+ "\n"
)
output_str += f"guid: {feature.guid}" + "\n"
if print_examples:
print(output_str)
mmap_file.flush()
mmap_label_file.flush()
return total_saved_features
def build_and_save_entity_inputs_initializer(
constants,
data_config,
save_entity_dataset_name,
X_entity_storage,
tokenizer,
):
"""Create entity features multiprocessing initializer."""
global type_symbols_global
if data_config.entity_type_data.use_entity_types:
type_symbols_global = TypeSymbols.load_from_cache(
load_dir=os.path.join(
data_config.entity_dir, data_config.entity_type_data.type_symbols_dir
)
)
else:
type_symbols_global = None
global kg_symbols_global
if data_config.entity_kg_data.use_entity_kg:
kg_symbols_global = KGSymbols.load_from_cache(
load_dir=os.path.join(
data_config.entity_dir, data_config.entity_kg_data.kg_symbols_dir
)
)
else:
kg_symbols_global = None
global mmap_entity_file_global
mmap_entity_file_global = np.memmap(
save_entity_dataset_name, dtype=X_entity_storage, mode="r+"
)
global constants_global
constants_global = constants
global tokenizer_global
tokenizer_global = tokenizer
global entitysymbols_global
entitysymbols_global = EntitySymbols.load_from_cache(
load_dir=os.path.join(data_config.entity_dir, data_config.entity_map_dir),
alias_cand_map_dir=data_config.alias_cand_map,
alias_idx_dir=data_config.alias_idx_map,
)
def build_and_save_entity_inputs(
save_entity_dataset_name,
X_entity_storage,
data_config,
dataset_threads,
tokenizer,
entity_symbols,
):
"""Create entity features.
Args:
save_entity_dataset_name: memmap filename to save the entity data
X_entity_storage: storage type for memmap file
data_config: data config
dataset_threads: number of threads
tokenizer: tokenizer
entity_symbols: entity symbols
"""
num_processes = min(dataset_threads, int(0.8 * multiprocessing.cpu_count()))
# IMPORTANT: for distributed writing to memmap files, you must create them in w+
# mode before being opened in r+ mode by workers
memfile = np.memmap(
save_entity_dataset_name,
dtype=X_entity_storage,
mode="w+",
shape=(entity_symbols.num_entities_with_pad_and_nocand,),
order="C",
)
# We'll use the -1 to check that things were written correctly later because at
# the end, there should be no -1
memfile["entity_token_type_ids"][:] = -1
# The memfile corresponds to eids. As eid 0 and -1 are reserved for UNK/PAD
# we need to set the values. These get a single [SEP] for title [SEP] rest of entity
empty_ent = tokenizer(
"[SEP]",
padding="max_length",
add_special_tokens=True,
truncation=True,
max_length=data_config.max_ent_len,
)
memfile["entity_input_ids"][0] = empty_ent["input_ids"][:]
memfile["entity_token_type_ids"][0] = empty_ent["token_type_ids"][:]
memfile["entity_attention_mask"][0] = empty_ent["attention_mask"][:]
memfile["entity_to_mask"][0] = [0 for _ in range(len(empty_ent["input_ids"]))]
memfile["entity_input_ids"][-1] = empty_ent["input_ids"][:]
memfile["entity_token_type_ids"][-1] = empty_ent["token_type_ids"][:]
memfile["entity_attention_mask"][-1] = empty_ent["attention_mask"][:]
memfile["entity_to_mask"][-1] = [0 for _ in range(len(empty_ent["input_ids"]))]
constants = {
"train_in_candidates": data_config.train_in_candidates,
"max_ent_len": data_config.max_ent_len,
"max_ent_type_len": data_config.entity_type_data.max_ent_type_len,
"max_ent_kg_len": data_config.entity_kg_data.max_ent_kg_len,
"use_types": data_config.entity_type_data.use_entity_types,
"use_kg": data_config.entity_kg_data.use_entity_kg,
"use_desc": data_config.use_entity_desc,
"print_examples_prep": data_config.print_examples_prep,
}
if num_processes == 1:
if data_config.entity_type_data.use_entity_types:
type_symbols = TypeSymbols.load_from_cache(
load_dir=os.path.join(
data_config.entity_dir,
data_config.entity_type_data.type_symbols_dir,
)
)
else:
type_symbols = None
if data_config.entity_kg_data.use_entity_kg:
kg_symbols = KGSymbols.load_from_cache(
load_dir=os.path.join(
data_config.entity_dir, data_config.entity_kg_data.kg_symbols_dir
)
)
else:
kg_symbols = None
input_qids = list(entity_symbols.get_all_qids())
num_qids, overflowed = build_and_save_entity_inputs_single(
input_qids,
constants,
memfile,
type_symbols,
kg_symbols,
tokenizer,
entity_symbols,
)
else:
input_qids = list(entity_symbols.get_all_qids())
chunk_size = int(np.ceil(len(input_qids) / num_processes))
input_chunks = [
input_qids[i : i + chunk_size]
for i in range(0, len(input_qids), chunk_size)
]
log_rank_0_debug(logger, f"Starting pool with {num_processes} processes")
pool = multiprocessing.Pool(
processes=num_processes,
initializer=build_and_save_entity_inputs_initializer,
initargs=[
constants,
data_config,
save_entity_dataset_name,
X_entity_storage,
tokenizer,
],
)
cnt = 0
overflowed = 0
for res in tqdm(
pool.imap_unordered(
build_and_save_entity_inputs_hlp, input_chunks, chunksize=1
),
total=len(input_chunks),
desc="Building entity data",
):
c, overfl = res
cnt += c
overflowed += overfl
pool.close()
log_rank_0_debug(
logger,
f"{overflowed} out of {len(input_qids)} were overflowed",
)
memfile = np.memmap(save_entity_dataset_name, dtype=X_entity_storage, mode="r")
for i in tqdm(
range(entity_symbols.num_entities_with_pad_and_nocand),
desc="Verifying entity data",
):
assert all(memfile["entity_token_type_ids"][i] != -1), f"Memfile at {i} is -1."
memfile = None
return
def build_and_save_entity_inputs_hlp(input_qids):
"""Create entity features multiprocessing helper."""
return build_and_save_entity_inputs_single(
input_qids,
constants_global,
mmap_entity_file_global,
type_symbols_global,
kg_symbols_global,
tokenizer_global,
entitysymbols_global,
)
def build_and_save_entity_inputs_single(
input_qids,
constants,
memfile,
type_symbols,
kg_symbols,
tokenizer,
entity_symbols,
):
"""Create entity features."""
printed = 0
num_overflow = 0
for qid in tqdm(input_qids, desc="Processing entities"):
ent_str, title_spans, over_type_len, over_kg_len = get_entity_string(
qid,
constants,
entity_symbols,
kg_symbols,
type_symbols,
)
inputs = tokenizer(
ent_str.split(),
is_split_into_words=True,
padding="max_length",
add_special_tokens=True,
truncation=True,
max_length=constants["max_ent_len"],
)
to_mask = [0 for _ in range(len(inputs["input_ids"]))]
for title_sp in title_spans:
title_toks = inputs.word_to_tokens(title_sp)
if title_toks is None:
continue
for i in range(title_toks.start, title_toks.end):
to_mask[i] = 1
# Heuristic function to compute this
if inputs["input_ids"][-1] == tokenizer.sep_token_id:
num_overflow += 1
if printed < 8 and constants["print_examples_prep"]:
print("QID:", qid)
print("TITLE:", entity_symbols.get_title(qid))
print("ENT STR:", ent_str)
print("INPUTS:", inputs)
print("TITLE SPANS:", title_spans)
print("TO MASK:", to_mask)
print(tokenizer.convert_ids_to_tokens(np.array(inputs["input_ids"])))
printed += 1
eid = entity_symbols.get_eid(qid)
for k, value in inputs.items():
memfile[f"entity_{k}"][eid] = value
memfile["entity_to_mask"][eid] = to_mask
memfile.flush()
return len(input_qids), num_overflow
class BootlegDataset(EmmentalDataset):
"""Bootleg Dataset class.
Args:
main_args: input config
name: internal dataset name
dataset: dataset file
use_weak_label: whether to use weakly labeled mentions or not
load_entity_data: whether to load entity data or not
tokenizer: sentence tokenizer
entity_symbols: entity database class
dataset_threads: number of threads to use
split: data split
is_bert: is the tokenizer a BERT or not
dataset_range: offset into dataset
"""
def __init__(
self,
main_args,
name,
dataset,
use_weak_label,
load_entity_data,
tokenizer,
entity_symbols,
dataset_threads,
split="train",
is_bert=True,
dataset_range=None,
):
"""Bootleg dataset initlializer."""
log_rank_0_info(
logger,
f"Starting to build data for {split} from {dataset}",
)
global_start = time.time()
data_config = main_args.data_config
spawn_method = main_args.run_config.spawn_method
log_rank_0_debug(logger, f"Setting spawn method to be {spawn_method}")
orig_spawn = multiprocessing.get_start_method()
multiprocessing.set_start_method(spawn_method, force=True)
# Unique identifier is sentence index, subsentence index (due to sentence splitting), and aliases in split
guid_dtype = np.dtype(
[
("sent_idx", "i8", 1),
("subsent_idx", "i8", 1),
("alias_orig_list_pos", "i8", (1,)),
]
)
max_total_input_len = data_config.max_seq_len
# Storage for saving the data.
self.X_storage, self.Y_storage, self.X_entity_storage = (
[
("guids", guid_dtype, 1),
("sent_idx", "i8", 1),
("subsent_idx", "i8", 1),
("alias_idx", "i8", 1),
(
"input_ids",
"i8",
(max_total_input_len,),
),
(
"token_type_ids",
"i8",
(max_total_input_len,),
),
(
"attention_mask",
"i8",
(max_total_input_len,),
),
(
"word_qid_cnt_mask_score",
"float",
(max_total_input_len,),
),
("alias_orig_list_pos", "i8", 1),
(
"gold_eid",
"i8",
1,
), # What the eid of the gold entity is
(
"for_dump_gold_eid",
"i8",
1,
), # What the eid of the gold entity is independent of gold alias or not
(
"for_dump_gold_cand_K_idx_train",
"i8",
1,
), # Which of the K candidates is correct. Only used in dump_pred to stitch sub-sentences together
],
[
(
"gold_cand_K_idx",
"i8",
1,
), # Which of the K candidates is correct.
],
[
("entity_input_ids", "i8", (data_config.max_ent_len)),
("entity_token_type_ids", "i8", (data_config.max_ent_len)),
("entity_attention_mask", "i8", (data_config.max_ent_len)),
("entity_to_mask", "i8", (data_config.max_ent_len)),
],
)
self.split = split
self.popularity_mask = data_config.popularity_mask
self.context_mask_perc = data_config.context_mask_perc
self.tokenizer = tokenizer
# Table to map from alias_idx to entity_cand_eid used in the __get_item__
self.alias2cands_model = AliasEntityTable(
data_config=data_config, entity_symbols=entity_symbols
)
# Total number of entities used in the __get_item__
self.num_entities_with_pad_and_nocand = (
entity_symbols.num_entities_with_pad_and_nocand
)
self.raw_filename = dataset
# Folder for all mmap saved files
save_dataset_folder = data_utils.get_save_data_folder(
data_config, use_weak_label, self.raw_filename
)
utils.ensure_dir(save_dataset_folder)
# Folder for entity mmap saved files
save_entity_folder = data_utils.get_emb_prep_dir(data_config)
utils.ensure_dir(save_entity_folder)
# Folder for temporary output files
temp_output_folder = os.path.join(
data_config.data_dir,
data_config.data_prep_dir,
f"prep_{split}_dataset_files",
)
utils.ensure_dir(temp_output_folder)
# Input step 1
create_ex_indir = os.path.join(temp_output_folder, "create_examples_input")
utils.ensure_dir(create_ex_indir)
# Input step 2
create_ex_outdir = os.path.join(temp_output_folder, "create_examples_output")
utils.ensure_dir(create_ex_outdir)
# Meta data saved files
meta_file = os.path.join(temp_output_folder, "meta_data.json")
# File for standard training data
self.save_dataset_name = os.path.join(save_dataset_folder, "ned_data.bin")
# File for standard labels
self.save_labels_name = os.path.join(save_dataset_folder, "ned_label.bin")
# File for type labels
self.save_entity_dataset_name = None
# =======================================================================================
# =======================================================================================
# =======================================================================================
# STANDARD DISAMBIGUATION
# =======================================================================================
# =======================================================================================
# =======================================================================================
log_rank_0_debug(
logger,
f"Seeing if {self.save_dataset_name} exists and {self.save_labels_name} exists",
)
if (
data_config.overwrite_preprocessed_data
or (not os.path.exists(self.save_dataset_name))
or (not os.path.exists(self.save_labels_name))
):
st_time = time.time()
log_rank_0_info(
logger,
f"Building dataset from scratch. Saving to {save_dataset_folder}.",
)
create_examples(
dataset,
create_ex_indir,
create_ex_outdir,
meta_file,
data_config,
dataset_threads,
use_weak_label,
split,
is_bert,
tokenizer,
)
try:
convert_examples_to_features_and_save(
meta_file,
guid_dtype,
data_config,
dataset_threads,
use_weak_label,
split,
is_bert,
self.save_dataset_name,
self.save_labels_name,
self.X_storage,
self.Y_storage,
tokenizer,
entity_symbols,
)
log_rank_0_debug(
logger,
f"Finished prepping disambig training data in {time.time() - st_time}",
)
except Exception as e:
tb = traceback.TracebackException.from_exception(e)
logger.error(e)
logger.error(traceback.format_exc())
logger.error("\n".join(tb.stack.format()))
os.remove(self.save_dataset_name)
os.remove(self.save_labels_name)
shutil.rmtree(save_dataset_folder, ignore_errors=True)
raise
log_rank_0_info(
logger,
f"Loading data from {self.save_dataset_name} and {self.save_labels_name}",
)
X_dict, Y_dict = self.build_data_dicts(
self.save_dataset_name,
self.save_labels_name,
self.X_storage,
self.Y_storage,
)
# =======================================================================================
# =======================================================================================
# =======================================================================================
# ENTITY TOKENS
# =======================================================================================
# =======================================================================================
# =======================================================================================
self.save_entity_dataset_name = os.path.join(
save_entity_folder,
f"entity_data"
f"_type{int(data_config.entity_type_data.use_entity_types)}"
f"_kg{int(data_config.entity_kg_data.use_entity_kg)}"
f"_desc{int(data_config.use_entity_desc)}.bin",
)
log_rank_0_debug(logger, f"Seeing if {self.save_entity_dataset_name} exists")
if load_entity_data:
if data_config.overwrite_preprocessed_data or (
not os.path.exists(self.save_entity_dataset_name)
):
st_time = time.time()
log_rank_0_info(logger, "Building entity data from scatch.")
try:
# Creating/saving data
build_and_save_entity_inputs(
self.save_entity_dataset_name,
self.X_entity_storage,
data_config,
dataset_threads,
tokenizer,
entity_symbols,
)
log_rank_0_debug(
logger, f"Finished prepping data in {time.time() - st_time}"
)
except Exception as e:
tb = traceback.TracebackException.from_exception(e)
logger.error(e)
logger.error(traceback.format_exc())
logger.error("\n".join(tb.stack.format()))
os.remove(self.save_entity_dataset_name)
raise
X_entity_dict = self.build_data_entity_dicts(
self.save_entity_dataset_name, self.X_entity_storage
)
self.X_entity_dict = X_entity_dict
else:
self.X_entity_dict = None
log_rank_0_debug(logger, "Removing temporary output files")
shutil.rmtree(temp_output_folder, ignore_errors=True)
log_rank_0_info(
logger,
f"Final data initialization time for {split} is {time.time() - global_start}s",
)
self.dataset_range = (
list(range(len(X_dict[next(iter(X_dict.keys()))])))
if dataset_range is None
else dataset_range
)
# Set spawn back to original/default, which is "fork" or "spawn".
# This is needed for the Meta.config to be correctly passed in the collate_fn.
multiprocessing.set_start_method(orig_spawn, force=True)
super().__init__(name, X_dict=X_dict, Y_dict=Y_dict, uid="guids")
@classmethod
def build_data_dicts(
cls, save_dataset_name, save_labels_name, X_storage, Y_storage
):
"""Return the X_dict and Y_dict of inputs and labels.
Args:
save_dataset_name: memmap file name with inputs
save_labels_name: memmap file name with labels
X_storage: memmap storage for inputs
Y_storage: memmap storage labels
Returns: X_dict of inputs and Y_dict of labels for Emmental datasets
"""
X_dict, Y_dict = (
{
"guids": [],
"sent_idx": [],
"subsent_idx": [],
"alias_idx": [],
"input_ids": [],
"token_type_ids": [],
"attention_mask": [],
"word_qid_cnt_mask_score": [],
"alias_orig_list_pos": [], # list of original position in the alias list this example is (see eval)
"gold_eid": [], # List of gold entity eids
"for_dump_gold_eid": [],
"for_dump_gold_cand_K_idx_train": [], # list of gold indices without subsentence masking (see eval)
},
{
"gold_cand_K_idx": [],
},
)
mmap_file = np.memmap(save_dataset_name, dtype=X_storage, mode="r")
mmap_label_file = np.memmap(save_labels_name, dtype=Y_storage, mode="r")
X_dict["sent_idx"] = torch.from_numpy(mmap_file["sent_idx"])
X_dict["subsent_idx"] = torch.from_numpy(mmap_file["subsent_idx"])
X_dict["guids"] = mmap_file["guids"] # uid doesn't need to be tensor
X_dict["alias_idx"] = torch.from_numpy(mmap_file["alias_idx"])
X_dict["input_ids"] = torch.from_numpy(mmap_file["input_ids"])
X_dict["token_type_ids"] = torch.from_numpy(mmap_file["token_type_ids"])
X_dict["attention_mask"] = torch.from_numpy(mmap_file["attention_mask"])
X_dict["word_qid_cnt_mask_score"] = torch.from_numpy(
mmap_file["word_qid_cnt_mask_score"]
)
X_dict["alias_orig_list_pos"] = torch.from_numpy(
mmap_file["alias_orig_list_pos"]
)
X_dict["gold_eid"] = torch.from_numpy(mmap_file["gold_eid"])
X_dict["for_dump_gold_eid"] = torch.from_numpy(mmap_file["for_dump_gold_eid"])
X_dict["for_dump_gold_cand_K_idx_train"] = torch.from_numpy(
mmap_file["for_dump_gold_cand_K_idx_train"]
)
Y_dict["gold_cand_K_idx"] = torch.from_numpy(mmap_label_file["gold_cand_K_idx"])
return X_dict, Y_dict
@classmethod
def build_data_entity_dicts(cls, save_dataset_name, X_storage):
"""Return the X_dict for the entity data.
Args:
save_dataset_name: memmap file name with entity data
X_storage: memmap storage type
Returns: Dict of labels
"""
X_dict = {
"entity_input_ids": [],
"entity_token_type_ids": [],
"entity_attention_mask": [],
"entity_to_mask": [],
}
mmap_label_file = np.memmap(save_dataset_name, dtype=X_storage, mode="r")
X_dict["entity_input_ids"] = torch.from_numpy(
mmap_label_file["entity_input_ids"]
)
X_dict["entity_token_type_ids"] = torch.from_numpy(
mmap_label_file["entity_token_type_ids"]
)
X_dict["entity_attention_mask"] = torch.from_numpy(
mmap_label_file["entity_attention_mask"]
)
X_dict["entity_to_mask"] = torch.from_numpy(mmap_label_file["entity_to_mask"])
return X_dict
def get_sentidx_to_rowids(self):
"""Get mapping from sent idx to row id in X_dict.
Returns: Dict of sent idx to row id
"""
sentidx2rowids = defaultdict(list)
for i, sent_id in enumerate(self.X_dict["sent_idx"]):
# Saving/loading dict will convert numeric keys to strings - keep consistent
sentidx2rowids[str(sent_id.item())].append(i)
return dict(sentidx2rowids)
def __getitem__(self, index):
r"""Get item by index.
Args:
index(index): The index of the item.
Returns:
Tuple[Dict[str, Any], Dict[str, Tensor]]: Tuple of x_dict and y_dict
"""
index = self.dataset_range[index]
x_dict = {name: feature[index] for name, feature in self.X_dict.items()}
y_dict = {name: label[index] for name, label in self.Y_dict.items()}
# Mask the mention tokens
if self.split == "train" and self.popularity_mask:
input_ids = self._mask_input_ids(x_dict)
x_dict["input_ids"] = input_ids
# Get the entity_cand_eid
entity_cand_eid = self.alias2cands_model(x_dict["alias_idx"]).long()
if self.X_entity_dict is not None:
entity_cand_input_ids = []
entity_cand_token_type_ids = []
entity_cand_attention_mask = []
# Get the entity token ids
for eid in entity_cand_eid:
if self.split == "train" and self.popularity_mask:
entity_input_ids = self._mask_entity_input_ids(x_dict, eid)
else:
entity_input_ids = self.X_entity_dict["entity_input_ids"][eid]
entity_cand_input_ids.append(entity_input_ids)
entity_cand_token_type_ids.append(
self.X_entity_dict["entity_token_type_ids"][eid]
)
entity_cand_attention_mask.append(
self.X_entity_dict["entity_attention_mask"][eid]
)
# Create M x K x token length
x_dict["entity_cand_input_ids"] = torch.stack(entity_cand_input_ids, dim=0)
x_dict["entity_cand_token_type_ids"] = torch.stack(
entity_cand_token_type_ids, dim=0
)
x_dict["entity_cand_attention_mask"] = torch.stack(
entity_cand_attention_mask, dim=0
)
x_dict["entity_cand_eval_mask"] = entity_cand_eid == -1
# Handles the index errors with -1 indexing into an embedding
x_dict["entity_cand_eid"] = torch.where(
entity_cand_eid >= 0,
entity_cand_eid,
(
torch.ones_like(entity_cand_eid, dtype=torch.long)
* (self.num_entities_with_pad_and_nocand - 1)
),
)
# Add dummy gold_unq_eid_idx for Emmental init - this gets overwritten in the collator in data.py
y_dict["gold_unq_eid_idx"] = y_dict["gold_cand_K_idx"]
return x_dict, y_dict
def _mask_input_ids(self, x_dict):
"""
Mask input context ids.
Mask the entity mention with high probability, especially if rare.
Further mask tokens 10% of the time
"""
# Get core dump if you don't do this
input_ids = torch.clone(x_dict["input_ids"])
cnt_ratio = x_dict["word_qid_cnt_mask_score"]
probability_matrix = torch.full(cnt_ratio.shape, 0.0)
fill_v = 0.0
if torch.any((0.0 <= cnt_ratio) & (cnt_ratio < 0.5)):
fill_v = 0.5
elif torch.any((0.5 <= cnt_ratio) & (cnt_ratio < 0.65)):
fill_v = 0.62
elif torch.any((0.65 <= cnt_ratio) & (cnt_ratio < 0.8)):
fill_v = 0.73
elif torch.any((0.8 <= cnt_ratio) & (cnt_ratio < 0.95)):
fill_v = 0.84
elif torch.any(0.95 <= cnt_ratio):
fill_v = 0.95
probability_matrix.masked_fill_(cnt_ratio >= 0.0, value=fill_v)
masked_indices = torch.bernoulli(probability_matrix).bool()
input_ids.masked_fill_(
masked_indices,
value=self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token),
)
# Mask all tokens by context_mask_perc
if self.context_mask_perc > 0.0:
input_ids_clone = input_ids.clone()
# We sample a few tokens in each sequence
probability_matrix = torch.full(
input_ids_clone.shape, self.context_mask_perc
)
special_tokens_mask = self.tokenizer.get_special_tokens_mask(
input_ids.tolist(), already_has_special_tokens=True
)
probability_matrix.masked_fill_(
torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0
)
if self.tokenizer._pad_token is not None:
padding_mask = input_ids.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
input_ids_clone[masked_indices] = self.tokenizer.convert_tokens_to_ids(
self.tokenizer.mask_token
)
input_ids = input_ids_clone
return input_ids
def _mask_entity_input_ids(self, x_dict, eid):
"""
Mask entity input ids.
Mask the entity to_mask index with high probability, especially if
mention is rare.
"""
# Get core dump if you don't do this
entity_input_ids = torch.clone(self.X_entity_dict["entity_input_ids"][eid])
cnt_ratio = x_dict["word_qid_cnt_mask_score"]
probability_matrix = torch.tensor(
self.X_entity_dict["entity_to_mask"][eid]
).float()
fill_v = 0.0
if torch.any((0.0 <= cnt_ratio) & (cnt_ratio < 0.5)):
fill_v = 0.5
elif torch.any((0.5 <= cnt_ratio) & (cnt_ratio < 0.65)):
fill_v = 0.62
elif torch.any((0.65 <= cnt_ratio) & (cnt_ratio < 0.8)):
fill_v = 0.73
elif torch.any((0.8 <= cnt_ratio) & (cnt_ratio < 0.95)):
fill_v = 0.84
elif torch.any(0.95 <= cnt_ratio):
fill_v = 0.95
probability_matrix.masked_fill_(probability_matrix > 0.0, value=fill_v)
masked_indices = torch.bernoulli(probability_matrix).bool()
entity_input_ids.masked_fill_(
masked_indices, value=self.tokenizer.convert_tokens_to_ids("[MASK]")
)
return entity_input_ids
def __getstate__(self):
"""Get state."""
state = self.__dict__.copy()
del state["X_dict"]
del state["Y_dict"]
return state
def __setstate__(self, state):
"""Set state."""
self.__dict__.update(state)
self.X_dict, self.Y_dict = self.build_data_dicts(
self.save_dataset_name,
self.save_labels_name,
self.X_storage,
self.Y_storage,
)
return state
def __repr__(self):
"""Repr."""
return (
f"Bootleg Dataset. Data at {self.save_dataset_name}. "
f"Labels at {self.save_labels_name}. "
)
def __len__(self):
"""Length."""
return len(self.dataset_range)
class BootlegEntityDataset(EmmentalDataset):
"""Bootleg Dataset class for entities.
Args:
main_args: input config
name: internal dataset name
dataset: dataset file
tokenizer: sentence tokenizer
entity_symbols: entity database class
dataset_threads: number of threads to use
split: data split
"""
def __init__(
self,
main_args,
name,
dataset,
tokenizer,
entity_symbols,
dataset_threads,
split="test",
):
"""Bootleg entity dataset initializer."""
assert split == "test", "Split must be test split for EntityDataset"
log_rank_0_info(
logger,
f"Starting to build data for {split} from {dataset}",
)
global_start = time.time()
data_config = main_args.data_config
spawn_method = main_args.run_config.spawn_method
log_rank_0_debug(logger, f"Setting spawn method to be {spawn_method}")
orig_spawn = multiprocessing.get_start_method()
multiprocessing.set_start_method(spawn_method, force=True)
# Storage for saving the data.
self.X_entity_storage = [
("entity_input_ids", "i8", (data_config.max_ent_len)),
("entity_token_type_ids", "i8", (data_config.max_ent_len)),
("entity_attention_mask", "i8", (data_config.max_ent_len)),
("entity_to_mask", "i8", (data_config.max_ent_len)),
]
self.split = split
self.popularity_mask = data_config.popularity_mask
self.context_mask_perc = data_config.context_mask_perc
self.tokenizer = tokenizer
# Table to map from alias_idx to entity_cand_eid used in the __get_item__
self.alias2cands_model = AliasEntityTable(
data_config=data_config, entity_symbols=entity_symbols
)
# Total number of entities used in the __get_item__
self.num_entities_with_pad_and_nocand = (
entity_symbols.num_entities_with_pad_and_nocand
)
# Folder for entity mmap saved files
save_entity_folder = data_utils.get_emb_prep_dir(data_config)
utils.ensure_dir(save_entity_folder)
# =======================================================================================
# =======================================================================================
# =======================================================================================
# ENTITY TOKENS
# =======================================================================================
# =======================================================================================
# =======================================================================================
self.save_entity_dataset_name = os.path.join(
save_entity_folder,
f"entity_data"
f"_type{int(data_config.entity_type_data.use_entity_types)}"
f"_kg{int(data_config.entity_kg_data.use_entity_kg)}"
f"_desc{int(data_config.use_entity_desc)}.bin",
)
log_rank_0_debug(logger, f"Seeing if {self.save_entity_dataset_name} exists")
if data_config.overwrite_preprocessed_data or (
not os.path.exists(self.save_entity_dataset_name)
):
st_time = time.time()
log_rank_0_info(logger, "Building entity data from scatch.")
try:
# Creating/saving data
build_and_save_entity_inputs(
self.save_entity_dataset_name,
self.X_entity_storage,
data_config,
dataset_threads,
tokenizer,
entity_symbols,
)
log_rank_0_debug(
logger, f"Finished prepping data in {time.time() - st_time}"
)
except Exception as e:
tb = traceback.TracebackException.from_exception(e)
logger.error(e)
logger.error(traceback.format_exc())
logger.error("\n".join(tb.stack.format()))
os.remove(self.save_entity_dataset_name)
raise
X_entity_dict = self.build_data_entity_dicts(
self.save_entity_dataset_name, self.X_entity_storage
)
# Add the unique identified of EID (the embeddings are already in this order)
X_entity_dict["guids"] = torch.arange(len(X_entity_dict["entity_input_ids"]))
log_rank_0_info(
logger,
f"Final data initialization time for {split} is {time.time() - global_start}s",
)
# Set spawn back to original/default, which is "fork" or "spawn".
# This is needed for the Meta.config to be correctly passed in the collate_fn.
multiprocessing.set_start_method(orig_spawn, force=True)
super().__init__(name, X_dict=X_entity_dict, uid="guids")
@classmethod
def build_data_entity_dicts(cls, save_dataset_name, X_storage):
"""Return the X_dict for the entity data.
Args:
save_dataset_name: memmap file name with entity data
X_storage: memmap storage type
Returns: Dict of labels
"""
X_dict = {
"entity_input_ids": [],
"entity_token_type_ids": [],
"entity_attention_mask": [],
"entity_to_mask": [],
}
mmap_label_file = np.memmap(save_dataset_name, dtype=X_storage, mode="r")
X_dict["entity_input_ids"] = torch.from_numpy(
mmap_label_file["entity_input_ids"]
)
X_dict["entity_token_type_ids"] = torch.from_numpy(
mmap_label_file["entity_token_type_ids"]
)
X_dict["entity_attention_mask"] = torch.from_numpy(
mmap_label_file["entity_attention_mask"]
)
X_dict["entity_to_mask"] = torch.from_numpy(mmap_label_file["entity_to_mask"])
return X_dict
def __getitem__(self, index):
r"""Get item by index.
Args:
index(index): The index of the item.
Returns:
Tuple[Dict[str, Any], Dict[str, Tensor]]: Tuple of x_dict and y_dict
"""
x_dict = {name: feature[index] for name, feature in self.X_dict.items()}
return x_dict
def __getstate__(self):
"""Get state."""
state = self.__dict__.copy()
del state["X_dict"]
del state["Y_dict"]
return state
def __setstate__(self, state):
"""Set state."""
self.__dict__.update(state)
return state
def __repr__(self):
"""Repr."""
return f"Bootleg Entity Dataset. Data at {self.save_entity_dataset_name}."
| bootleg-master | bootleg/dataset.py |
"""Bootleg run command."""
import argparse
import logging
import os
import subprocess
import sys
from copy import copy
import emmental
import numpy as np
import torch
from emmental.model import EmmentalModel
from rich.logging import RichHandler
from transformers import AutoTokenizer
from bootleg import log_rank_0_info
from bootleg.data import get_entity_dataloaders
from bootleg.symbols.entity_symbols import EntitySymbols
from bootleg.task_config import NED_TASK
from bootleg.tasks import entity_gen_task
from bootleg.utils import data_utils
from bootleg.utils.parser.parser_utils import parse_boot_and_emm_args
from bootleg.utils.utils import (
dump_yaml_file,
load_yaml_file,
recurse_redict,
write_to_file,
)
logger = logging.getLogger(__name__)
def parse_cmdline_args():
"""
Parse command line.
Takes an input config file and parses it into the correct subdictionary
groups for the model.
Returns:
model run mode of train, eval, or dumping
parsed Dict config
path to original config path
"""
# Parse cmdline args to specify config and mode
cli_parser = argparse.ArgumentParser(
description="Bootleg CLI Config",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
cli_parser.add_argument(
"--config_script",
type=str,
default="",
help="Should mimic the config_args found in utils/parser/bootleg_args.py with parameters you want to override."
"You can also override the parameters from config_script by passing them in directly after config_script. "
"E.g., --train_config.batch_size 5",
)
# you can add other args that will override those in the config_script
# parse_known_args returns 'args' that are the same as what parse_args() returns
# and 'unknown' which are args that the parser doesn't recognize but you want to keep.
# 'unknown' are what we pass on to our override any args from the second phase of arg parsing from the json file
cli_args, unknown = cli_parser.parse_known_args()
if len(cli_args.config_script) == 0:
raise ValueError("You must pass a config script via --config.")
config = parse_boot_and_emm_args(cli_args.config_script, unknown)
# Modify the local rank param from the cli args
config.learner_config.local_rank = int(os.getenv("LOCAL_RANK", -1))
return config, cli_args.config_script
def setup(config, run_config_path=None):
"""
Set distributed backend and save configuration files.
Args:
config: config
run_config_path: path for original run config
"""
torch.multiprocessing.set_sharing_strategy("file_system")
# spawn method must be fork to work with Meta.config
torch.multiprocessing.set_start_method("fork", force=True)
"""
ulimit -n 500000
python3 -m torch.distributed.launch --nproc_per_node=2 bootleg/run.py --config_script ...
"""
log_level = logging.getLevelName(config.run_config.log_level.upper())
emmental.init(
log_dir=config["meta_config"]["log_path"],
config=config,
use_exact_log_path=config["meta_config"]["use_exact_log_path"],
local_rank=config.learner_config.local_rank,
level=log_level,
)
log = logging.getLogger()
# Remove streaming handlers and use rich
log.handlers = [h for h in log.handlers if not type(h) is logging.StreamHandler]
log.addHandler(RichHandler())
# Set up distributed backend
emmental.Meta.init_distributed_backend()
cmd_msg = " ".join(sys.argv)
# Recast to dictionaries for emmental - will remove Dotteddicts
emmental.Meta.config = recurse_redict(copy(emmental.Meta.config))
# Log configuration into filess
if config.learner_config.local_rank in [0, -1]:
write_to_file(f"{emmental.Meta.log_path}/cmd.txt", cmd_msg)
dump_yaml_file(
f"{emmental.Meta.log_path}/parsed_config.yaml", emmental.Meta.config
)
# Dump the run config (does not contain defaults)
if run_config_path is not None:
dump_yaml_file(
f"{emmental.Meta.log_path}/run_config.yaml",
load_yaml_file(run_config_path),
)
log_rank_0_info(logger, f"COMMAND: {cmd_msg}")
log_rank_0_info(
logger, f"Saving config to {emmental.Meta.log_path}/parsed_config.yaml"
)
git_hash = "Not able to retrieve git hash"
try:
git_hash = subprocess.check_output(
["git", "log", "-n", "1", "--pretty=tformat:%h-%ad", "--date=short"]
).strip()
except subprocess.CalledProcessError:
pass
log_rank_0_info(logger, f"Git Hash: {git_hash}")
def run_model(config, run_config_path=None):
"""
Run Emmental Bootleg model.
Args:
config: parsed model config
run_config_path: original config path (for saving)
"""
# Set up distributed backend and save configuration files
setup(config, run_config_path)
# Load entity symbols
log_rank_0_info(logger, "Loading entity symbols...")
entity_symbols = EntitySymbols.load_from_cache(
load_dir=os.path.join(
config.data_config.entity_dir, config.data_config.entity_map_dir
),
alias_cand_map_dir=config.data_config.alias_cand_map,
alias_idx_dir=config.data_config.alias_idx_map,
)
qid2eid = entity_symbols.get_qid2eid_dict()
eid2qid = {v: k for k, v in qid2eid.items()}
assert len(qid2eid) == len(eid2qid), "Duplicate EIDs detected"
# Create tasks
tasks = [NED_TASK]
# Create tokenizer
context_tokenizer = AutoTokenizer.from_pretrained(
config.data_config.word_embedding.bert_model
)
data_utils.add_special_tokens(context_tokenizer)
# Gets dataloader - will set the split to be TEST even though there is no eval file used to generate entities
dataloader = get_entity_dataloaders(
config,
tasks,
entity_symbols,
context_tokenizer,
)
# Create models and add tasks
log_rank_0_info(logger, "Starting Bootleg Model")
model_name = "Bootleg"
model = EmmentalModel(name=model_name)
model.add_task(
entity_gen_task.create_task(
config,
len(context_tokenizer),
)
)
# Load the best model from the pretrained model
if config["model_config"]["model_path"] is not None:
model.load(config["model_config"]["model_path"])
# This happens inside EmmentalLearner for training
if (
config["learner_config"]["local_rank"] == -1
and config["model_config"]["dataparallel"]
):
model._to_dataparallel()
preds = model.predict(dataloader, return_preds=True, return_action_outputs=False)
final_out_emb_file = os.path.join(emmental.Meta.log_path, "entity_embeddings.npy")
log_rank_0_info(logger, f"Saving entity embeddings into {final_out_emb_file}")
log_rank_0_info(
logger,
"Use the entity profile's ```get_eid``` command to get the emb ids for QIDs",
)
np.save(final_out_emb_file, np.array(preds["probs"][NED_TASK]))
return final_out_emb_file
if __name__ == "__main__":
config, run_config_path = parse_cmdline_args()
run_model(config, run_config_path)
| bootleg-master | bootleg/extract_all_entities.py |
"""Bootleg data creation."""
import copy
import logging
import os
from collections import defaultdict
from typing import Any, Dict, List, Tuple, Union
import torch
from emmental import Meta
from emmental.data import EmmentalDataLoader, emmental_collate_fn
from emmental.utils.utils import list_to_tensor
from torch.utils.data import DistributedSampler, RandomSampler
from bootleg import log_rank_0_info
from bootleg.dataset import BootlegDataset, BootlegEntityDataset
from bootleg.slicing.slice_dataset import BootlegSliceDataset
from bootleg.task_config import BATCH_CANDS_LABEL, CANDS_LABEL
logger = logging.getLogger(__name__)
def get_slicedatasets(args, splits, entity_symbols):
"""Get the slice datasets.
Args:
args: main args
splits: splits to get datasets for
entity_symbols: entity symbols
Returns: Dict of slice datasets
"""
datasets = {}
splits = splits
for split in splits:
dataset_path = os.path.join(
args.data_config.data_dir, args.data_config[f"{split}_dataset"].file
)
datasets[split] = BootlegSliceDataset(
main_args=args,
dataset=dataset_path,
use_weak_label=args.data_config[f"{split}_dataset"].use_weak_label,
entity_symbols=entity_symbols,
dataset_threads=args.run_config.dataset_threads,
split=split,
)
return datasets
def get_dataloaders(
args,
tasks,
use_batch_cands,
load_entity_data,
splits,
entity_symbols,
tokenizer,
dataset_offsets: Dict[str, List[int]] = None,
):
"""Get the dataloaders.
Args:
args: main args
tasks: task names
use_batch_cands: whether to use candidates across a batch (train and eval_batch_cands)
load_entity_data: whether to load entity data
splits: data splits to generate dataloaders for
entity_symbols: entity symbols
dataset_offsets: [start, end] offsets for each split to index into the dataset. Dataset len is end-start.
If end is None, end is the length of the dataset.
Returns: list of dataloaders
"""
if dataset_offsets is None:
dataset_offsets = {split: None for split in splits}
task_to_label_dict = {
t: BATCH_CANDS_LABEL if use_batch_cands else CANDS_LABEL for t in tasks
}
is_bert = True
datasets = {}
for split in splits:
if dataset_offsets[split] is not None and not isinstance(
dataset_offsets[split], list
):
raise TypeError(
"dataset_offsets must be dict from split to list of indexes to subselect."
)
dataset_path = os.path.join(
args.data_config.data_dir, args.data_config[f"{split}_dataset"].file
)
datasets[split] = BootlegDataset(
main_args=args,
name="Bootleg",
dataset=dataset_path,
use_weak_label=args.data_config[f"{split}_dataset"].use_weak_label,
load_entity_data=load_entity_data,
tokenizer=tokenizer,
entity_symbols=entity_symbols,
dataset_threads=args.run_config.dataset_threads,
split=split,
is_bert=is_bert,
dataset_range=dataset_offsets[split],
)
dataloaders = []
for split, dataset in datasets.items():
if split in args.learner_config.train_split:
dataset_sampler = (
RandomSampler(dataset)
if Meta.config["learner_config"]["local_rank"] == -1
else DistributedSampler(
dataset, seed=Meta.config["meta_config"]["seed"]
)
)
else:
dataset_sampler = None
if Meta.config["learner_config"]["local_rank"] != -1:
log_rank_0_info(
logger,
"You are using distributed computing for eval. We are not using a distributed sampler. "
"Please use DataParallel and not DDP.",
)
dataloaders.append(
EmmentalDataLoader(
task_to_label_dict=task_to_label_dict,
dataset=dataset,
sampler=dataset_sampler,
split=split,
collate_fn=bootleg_collate_fn
if use_batch_cands
else emmental_collate_fn,
batch_size=args.train_config.batch_size
if split in args.learner_config.train_split
or args.run_config.eval_batch_size is None
else args.run_config.eval_batch_size,
num_workers=args.run_config.dataloader_threads,
pin_memory=False,
)
)
log_rank_0_info(
logger,
f"Built dataloader for {split} set with {len(dataset)} and {args.run_config.dataloader_threads} threads "
f"samples (Shuffle={split in args.learner_config.train_split}, "
f"Batch size={dataloaders[-1].batch_size}).",
)
return dataloaders
def get_entity_dataloaders(
args,
tasks,
entity_symbols,
tokenizer,
):
"""Get the entity dataloaders.
Args:
args: main args
tasks: task names
entity_symbols: entity symbols
Returns: list of dataloaders
"""
task_to_label_dict = {t: None for t in tasks}
split = "test"
dataset_path = os.path.join(
args.data_config.data_dir, args.data_config[f"{split}_dataset"].file
)
dataset = BootlegEntityDataset(
main_args=args,
name="Bootleg",
dataset=dataset_path,
tokenizer=tokenizer,
entity_symbols=entity_symbols,
dataset_threads=args.run_config.dataset_threads,
split=split,
)
dataset_sampler = None
if Meta.config["learner_config"]["local_rank"] != -1:
log_rank_0_info(
logger,
"You are using distributed computing for eval. We are not using a distributed sampler. "
"Please use DataParallel and not DDP.",
)
dataloader = EmmentalDataLoader(
task_to_label_dict=task_to_label_dict,
dataset=dataset,
sampler=dataset_sampler,
split=split,
collate_fn=emmental_collate_fn,
batch_size=args.train_config.batch_size
if split in args.learner_config.train_split
or args.run_config.eval_batch_size is None
else args.run_config.eval_batch_size,
num_workers=args.run_config.dataloader_threads,
pin_memory=False,
)
log_rank_0_info(
logger,
f"Built dataloader for {split} set with {len(dataset)} and {args.run_config.dataloader_threads} threads "
f"samples (Shuffle={split in args.learner_config.train_split}, "
f"Batch size={dataloader.batch_size}).",
)
return dataloader
def bootleg_collate_fn(
batch: Union[
List[Tuple[Dict[str, Any], Dict[str, torch.Tensor]]], List[Dict[str, Any]]
]
) -> Union[Tuple[Dict[str, Any], Dict[str, torch.Tensor]], Dict[str, Any]]:
"""Collate function (modified from emmental collate fn).
The main
difference is our collate function merges candidates from across the batch for disambiguation.
Args:
batch: The batch to collate.
Returns:
The collated batch.
"""
X_batch: defaultdict = defaultdict(list)
# In Bootleg, we may have a nested dictionary in x_dict; we want to keep this structure but
# collate the subtensors
X_sub_batch: defaultdict = defaultdict(lambda: defaultdict(list))
Y_batch: defaultdict = defaultdict(list)
# Learnable batch should be a pair of dict, while non learnable batch is a dict
is_learnable = True if not isinstance(batch[0], dict) else False
if is_learnable:
for x_dict, y_dict in batch:
if isinstance(x_dict, dict) and isinstance(y_dict, dict):
for field_name, value in x_dict.items():
if isinstance(value, list):
X_batch[field_name] += value
elif isinstance(value, dict):
# We reinstantiate the field_name here
# This keeps the field_name key intact
if field_name not in X_sub_batch:
X_sub_batch[field_name] = defaultdict(list)
for sub_field_name, sub_value in value.items():
if isinstance(sub_value, list):
X_sub_batch[field_name][sub_field_name] += sub_value
else:
X_sub_batch[field_name][sub_field_name].append(
sub_value
)
else:
X_batch[field_name].append(value)
for label_name, value in y_dict.items():
if isinstance(value, list):
Y_batch[label_name] += value
else:
Y_batch[label_name].append(value)
else:
for x_dict in batch: # type: ignore
for field_name, value in x_dict.items(): # type: ignore
if isinstance(value, list):
X_batch[field_name] += value
elif isinstance(value, dict):
# We reinstantiate the field_name here
# This keeps the field_name key intact
if field_name not in X_sub_batch:
X_sub_batch[field_name] = defaultdict(list)
for sub_field_name, sub_value in value.items():
if isinstance(sub_value, list):
X_sub_batch[field_name][sub_field_name] += sub_value
else:
X_sub_batch[field_name][sub_field_name].append(sub_value)
else:
X_batch[field_name].append(value)
field_names = copy.deepcopy(list(X_batch.keys()))
for field_name in field_names:
values = X_batch[field_name]
# Only merge list of tensors
if isinstance(values[0], torch.Tensor):
item_tensor, item_mask_tensor = list_to_tensor(
values,
min_len=Meta.config["data_config"]["min_data_len"],
max_len=Meta.config["data_config"]["max_data_len"],
)
X_batch[field_name] = item_tensor
field_names = copy.deepcopy(list(X_sub_batch.keys()))
for field_name in field_names:
sub_field_names = copy.deepcopy(list(X_sub_batch[field_name].keys()))
for sub_field_name in sub_field_names:
values = X_sub_batch[field_name][sub_field_name]
# Only merge list of tensors
if isinstance(values[0], torch.Tensor):
item_tensor, item_mask_tensor = list_to_tensor(
values,
min_len=Meta.config["data_config"]["min_data_len"],
max_len=Meta.config["data_config"]["max_data_len"],
)
X_sub_batch[field_name][sub_field_name] = item_tensor
# Add sub batch to batch
for field_name in field_names:
X_batch[field_name] = dict(X_sub_batch[field_name])
if is_learnable:
for label_name, values in Y_batch.items():
Y_batch[label_name] = list_to_tensor(
values,
min_len=Meta.config["data_config"]["min_data_len"],
max_len=Meta.config["data_config"]["max_data_len"],
)[0]
# ACROSS BATCH CANDIDATE MERGING
# Turns from b x m x k to E where E is the number of unique entities
all_uniq_eids = []
all_uniq_eid_idx = []
label = []
for k, batch_eids in enumerate(X_batch["entity_cand_eid"]):
for j, eid in enumerate(batch_eids):
# Skip if already in batch or if it's the unk...we don't use masking in the softmax for batch_cands
# data loading (training and during train eval)
if (
eid in all_uniq_eids
or X_batch["entity_cand_eval_mask"][k][j].item() is True
):
continue
all_uniq_eids.append(eid)
all_uniq_eid_idx.append([k, j])
for eid in X_batch["gold_eid"]:
men_label = []
if eid not in all_uniq_eids:
men_label.append(-2)
else:
men_label.append(all_uniq_eids.index(eid))
label.append(men_label)
# Super rare edge case if doing eval during training on small batch sizes and have an entire batch
# where the alias is -2 (i.e., we don't have it in our dump)
if len(all_uniq_eids) == 0:
# Give the unq entity in this case -> we want the model to get the wrong answer anyways and it will
# all_uniq_eids = [X_batch["entity_cand_eid"][0][0]]
all_uniq_eid_idx = [[0, 0]]
all_uniq_eid_idx = torch.LongTensor(all_uniq_eid_idx)
assert len(all_uniq_eid_idx.size()) == 2 and all_uniq_eid_idx.size(1) == 2
for key in X_batch.keys():
# Don't transform the mask as that's only used for no batch cands
if (
key.startswith("entity_")
and key != "entity_cand_eval_mask"
and key != "entity_to_mask"
):
X_batch[key] = X_batch[key][all_uniq_eid_idx[:, 0], all_uniq_eid_idx[:, 1]]
# print("FINAL", X_batch["entity_cand_eid"])
Y_batch["gold_unq_eid_idx"] = torch.LongTensor(label)
# for k in X_batch:
# try:
# print(k, X_batch[k].shape)
# except:
# print(k, len(X_batch[k]))
# for k in Y_batch:
# print(k, Y_batch[k].shape, Y_batch[k])
if is_learnable:
return dict(X_batch), dict(Y_batch)
else:
return dict(X_batch)
| bootleg-master | bootleg/data.py |
"""Bootleg scorer."""
import logging
from collections import Counter
from typing import Dict, List, Optional
from numpy import ndarray
logger = logging.getLogger(__name__)
class BootlegSlicedScorer:
"""Sliced NED scorer init.
Args:
train_in_candidates: are we training assuming that all gold qids are in the candidates or not
slices_datasets: slice dataset (see slicing/slice_dataset.py)
"""
def __init__(self, train_in_candidates, slices_datasets=None):
"""Bootleg scorer initializer."""
self.train_in_candidates = train_in_candidates
self.slices_datasets = slices_datasets
def get_slices(self, uid):
"""
Get slices incidence matrices.
Get slice incidence matrices for the uid Uid is dtype
(np.dtype([('sent_idx', 'i8', 1), ('subsent_idx', 'i8', 1),
("alias_orig_list_pos", 'i8', max_aliases)]) where alias_orig_list_pos
gives the mentions original positions in the sentence.
Args:
uid: unique identifier of sentence
Returns: dictionary of slice_name -> matrix of 0/1 for if alias is in slice or not (-1 for no alias)
"""
if self.slices_datasets is None:
return {}
for split, dataset in self.slices_datasets.items():
sent_idx = uid["sent_idx"]
alias_orig_list_pos = uid["alias_orig_list_pos"]
if dataset.contains_sentidx(sent_idx):
return dataset.get_slice_incidence_arr(sent_idx, alias_orig_list_pos)
return {}
def bootleg_score(
self,
golds: ndarray,
probs: ndarray,
preds: Optional[ndarray],
uids: Optional[List[str]] = None,
) -> Dict[str, float]:
"""Scores the predictions using the gold labels and slices.
Args:
golds: gold labels
probs: probabilities
preds: predictions (max prob candidate)
uids: unique identifiers
Returns: dictionary of tensorboard compatible keys and metrics
"""
batch = golds.shape[0]
NO_MENTION = -1
NOT_IN_CANDIDATES = -2 if self.train_in_candidates else 0
res = {}
total = Counter()
total_in_cand = Counter()
correct_boot = Counter()
correct_pop_cand = Counter()
correct_boot_in_cand = Counter()
correct_pop_cand_in_cand = Counter()
assert (
len(uids) == batch
), f"Length of uids {len(uids)} does not match batch {batch} in scorer"
for row in range(batch):
gold = golds[row]
pred = preds[row]
uid = uids[row]
pop_cand = 0 + int(not self.train_in_candidates)
if gold == NO_MENTION:
continue
# Slices is dictionary of slice_name -> incidence array. Each array value is 1/0 for if in slice or not
slices = self.get_slices(uid)
for slice_name in slices:
assert (
slices[slice_name][0] != -1
), f"Something went wrong with slices {slices} and uid {uid}"
# Check if alias is in slice
if slices[slice_name][0] == 1:
total[slice_name] += 1
if gold != NOT_IN_CANDIDATES:
total_in_cand[slice_name] += 1
if gold == pred:
correct_boot[slice_name] += 1
if gold != NOT_IN_CANDIDATES:
correct_boot_in_cand[slice_name] += 1
if gold == pop_cand:
correct_pop_cand[slice_name] += 1
if gold != NOT_IN_CANDIDATES:
correct_pop_cand_in_cand[slice_name] += 1
for slice_name in total:
res[f"{slice_name}/total_men"] = total[slice_name]
res[f"{slice_name}/total_notNC_men"] = total_in_cand[slice_name]
res[f"{slice_name}/acc_boot"] = (
0
if total[slice_name] == 0
else correct_boot[slice_name] / total[slice_name]
)
res[f"{slice_name}/acc_notNC_boot"] = (
0
if total_in_cand[slice_name] == 0
else correct_boot_in_cand[slice_name] / total_in_cand[slice_name]
)
res[f"{slice_name}/acc_pop"] = (
0
if total[slice_name] == 0
else correct_pop_cand[slice_name] / total[slice_name]
)
res[f"{slice_name}/acc_notNC_pop"] = (
0
if total_in_cand[slice_name] == 0
else correct_pop_cand_in_cand[slice_name] / total_in_cand[slice_name]
)
return res
| bootleg-master | bootleg/scorer.py |
"""Task init."""
| bootleg-master | bootleg/tasks/__init__.py |
"""NED task definitions."""
import torch
import torch.nn.functional as F
from emmental.scorer import Scorer
from emmental.task import Action, EmmentalTask
from torch import nn
from transformers import AutoModel
from bootleg.layers.bert_encoder import Encoder
from bootleg.layers.static_entity_embeddings import EntityEmbedding
from bootleg.scorer import BootlegSlicedScorer
from bootleg.task_config import NED_TASK
from bootleg.utils import eval_utils
class DisambigLoss:
"""Disambiguation loss."""
def __init__(self, normalize, temperature, entity_encoder_key):
"""Disambiguation loss initializer."""
self.normalize = normalize
self.temperature = temperature
self.entity_encoder_key = entity_encoder_key
def disambig_output(self, intermediate_output_dict):
"""Return the probs for a task in Emmental.
Args:
intermediate_output_dict: output dict from Emmental task flow
Returns: NED probabilities for candidates (B x M x K)
"""
mask = intermediate_output_dict["_input_"]["entity_cand_eval_mask"]
out = intermediate_output_dict["context_encoder"][0].unsqueeze(1)
ent_out = intermediate_output_dict[self.entity_encoder_key][0]
if self.normalize:
out = F.normalize(out, p=2, dim=-1)
ent_out = F.normalize(ent_out, p=2, dim=-1)
pred = torch.bmm(out, ent_out.transpose(-2, -1))
mask = mask.reshape(*pred.shape)
ret = eval_utils.masked_class_logsoftmax(
pred=pred, mask=~mask, temp=self.temperature
).squeeze(
1
) # Squeeze single alias
return ret.exp()
def disambig_loss(self, intermediate_output_dict, Y):
"""Return the entity disambiguation loss on prediction heads.
Args:
intermediate_output_dict: output dict from the Emmental task flor
Y: gold labels
Returns: loss
"""
# Grab the first value of training (when doing distributed training, we will have one per process)
if len(intermediate_output_dict["context_encoder"][1].shape) <= 0:
training = intermediate_output_dict["context_encoder"][1].item()
else:
training = intermediate_output_dict["context_encoder"][1][0].item()
assert type(training) is bool
mask = intermediate_output_dict["_input_"]["entity_cand_eval_mask"]
out = intermediate_output_dict["context_encoder"][0].unsqueeze(1)
ent_out = intermediate_output_dict[self.entity_encoder_key][0]
if self.normalize:
out = F.normalize(out, p=2, dim=-1)
ent_out = F.normalize(ent_out, p=2, dim=-1)
pred = torch.bmm(out, ent_out.transpose(-2, -1))
mask = mask.reshape(*pred.shape)
labels = Y
# During eval, even if our model does not predict a NIC candidate, we allow for a NIC gold QID
# This qid gets assigned the label of -2 and is always incorrect
# As NLLLoss assumes classes of 0 to #classes-1 except for pad idx, we manually mask
# the -2 labels for the loss computation only. As this is just for eval, it won't matter.
masked_labels = labels
if not training:
label_mask = labels == -2
masked_labels = torch.where(
~label_mask, labels, torch.ones_like(labels) * -1
)
log_probs = eval_utils.masked_class_logsoftmax(
pred=pred, mask=~mask, temp=self.temperature
).squeeze(
1
) # Squeeze single alias
loss = nn.NLLLoss(ignore_index=-1)(log_probs, masked_labels.long())
return loss
def batch_cands_disambig_output(self, intermediate_output_dict):
"""Return the probs for a task in Emmental.
Args:
intermediate_output_dict: output dict from Emmental task flow
Returns: NED probabilities for candidates (B x M x K)
"""
out = intermediate_output_dict["context_encoder"][0]
ent_out = intermediate_output_dict[self.entity_encoder_key][0]
if self.normalize:
out = F.normalize(out, p=2, dim=-1)
ent_out = F.normalize(ent_out, p=2, dim=-1)
score = torch.mm(out, ent_out.t()) / self.temperature
return F.softmax(score, dim=-1)
def batch_cands_disambig_loss(self, intermediate_output_dict, Y):
"""Return the entity disambiguation loss on prediction heads.
Args:
intermediate_output_dict: output dict from the Emmental task flor
Y: gold labels
Returns: loss
"""
# Grab the first value of training (when doing distributed training, we will have one per process)
training = intermediate_output_dict["context_encoder"][1].item()
assert type(training) is bool
out = intermediate_output_dict["context_encoder"][0]
ent_out = intermediate_output_dict[self.entity_encoder_key][0]
if self.normalize:
out = F.normalize(out, p=2, dim=-1)
ent_out = F.normalize(ent_out, p=2, dim=-1)
score = torch.mm(out, ent_out.t()) / self.temperature
labels = Y
masked_labels = labels.reshape(out.shape[0])
if not training:
label_mask = labels == -2
masked_labels = torch.where(
~label_mask, labels, torch.ones_like(labels) * -1
)
masked_labels = masked_labels.reshape(out.shape[0])
temp = nn.CrossEntropyLoss(ignore_index=-1)(score, masked_labels.long())
return temp
def create_task(
args, use_batch_cands, len_context_tok, slice_datasets=None, entity_emb_file=None
):
"""Return an EmmentalTask for named entity disambiguation (NED).
Args:
args: args
use_batch_cands: use batch candidates for training
len_context_tok: length of the context tokenizer
slice_datasets: slice datasets used in scorer (default None)
entity_emb_file: file for pretrained entity embeddings - used for EVAL only
Returns: EmmentalTask for NED
"""
if entity_emb_file is None:
entity_model = AutoModel.from_pretrained(
args.data_config.word_embedding.bert_model
)
entity_model.encoder.layer = entity_model.encoder.layer[
: args.data_config.word_embedding.entity_layers
]
entity_model.resize_token_embeddings(len_context_tok)
entity_model = Encoder(entity_model, args.model_config.hidden_size)
entity_inputs = [
("_input_", "entity_cand_input_ids"),
("_input_", "entity_cand_attention_mask"),
("_input_", "entity_cand_token_type_ids"),
]
entity_encoder_str = "entity_encoder"
else:
entity_model = EntityEmbedding(entity_emb_file)
entity_inputs = [("_input_", "entity_cand_eid")]
entity_encoder_str = "entity_encoder_static"
# Create sentence encoder
context_model = AutoModel.from_pretrained(
args.data_config.word_embedding.bert_model
)
context_model.encoder.layer = context_model.encoder.layer[
: args.data_config.word_embedding.context_layers
]
context_model.resize_token_embeddings(len_context_tok)
context_model = Encoder(context_model, args.model_config.hidden_size)
sliced_scorer = BootlegSlicedScorer(
args.data_config.train_in_candidates, slice_datasets
)
disamig_loss = DisambigLoss(
args.model_config.normalize, args.model_config.temperature, entity_encoder_str
)
output_func = disamig_loss.disambig_output
loss_func = disamig_loss.disambig_loss
if use_batch_cands:
loss_func = disamig_loss.batch_cands_disambig_loss
output_func = disamig_loss.batch_cands_disambig_output
# Create module pool and combine with embedding module pool
module_pool = nn.ModuleDict(
{
"context_encoder": context_model,
entity_encoder_str: entity_model,
}
)
# Create task flow
task_flow = [
Action(
name=entity_encoder_str, module=entity_encoder_str, inputs=entity_inputs
),
Action(
name="context_encoder",
module="context_encoder",
inputs=[
("_input_", "input_ids"),
("_input_", "token_type_ids"),
("_input_", "attention_mask"),
],
),
]
return EmmentalTask(
name=NED_TASK,
module_pool=module_pool,
task_flow=task_flow,
loss_func=loss_func,
output_func=output_func,
require_prob_for_eval=False,
require_pred_for_eval=True,
# action_outputs are used to stitch together sentence fragments
action_outputs=[
("_input_", "sent_idx"),
("_input_", "subsent_idx"),
("_input_", "alias_orig_list_pos"),
("_input_", "for_dump_gold_cand_K_idx_train"),
(entity_encoder_str, 0), # entity embeddings
],
scorer=Scorer(
customize_metric_funcs={f"{NED_TASK}_scorer": sliced_scorer.bootleg_score}
),
)
| bootleg-master | bootleg/tasks/ned_task.py |
"""Entity gen task definitions."""
import torch.nn.functional as F
from emmental.scorer import Scorer
from emmental.task import Action, EmmentalTask
from torch import nn
from transformers import AutoModel
from bootleg.layers.bert_encoder import Encoder
from bootleg.task_config import NED_TASK
class EntityGenOutput:
"""Entity gen for output."""
def __init__(self, normalize):
"""Entity gen for output initializer."""
self.normalize = normalize
def entity_output_func(self, intermediate_output_dict):
"""Entity output func."""
ent_out = intermediate_output_dict["entity_encoder"][0]
if self.normalize:
ent_out = F.normalize(ent_out, p=2, dim=-1)
return ent_out
def create_task(args, len_context_tok):
"""Return an EmmentalTask for entity encoder only.
Args:
args: args
len_context_tok: number of tokens in the tokenizer
Returns: EmmentalTask for entity embedding extraction
"""
entity_model = AutoModel.from_pretrained(args.data_config.word_embedding.bert_model)
entity_model.encoder.layer = entity_model.encoder.layer[
: args.data_config.word_embedding.entity_layers
]
entity_model.resize_token_embeddings(len_context_tok)
entity_model = Encoder(entity_model, args.model_config.hidden_size)
# Create module pool and combine with embedding module pool
module_pool = nn.ModuleDict(
{
"entity_encoder": entity_model,
}
)
# Create task flow
task_flow = [
Action(
name="entity_encoder",
module="entity_encoder",
inputs=[
("_input_", "entity_input_ids"),
("_input_", "entity_attention_mask"),
("_input_", "entity_token_type_ids"),
],
),
]
return EmmentalTask(
name=NED_TASK,
module_pool=module_pool,
task_flow=task_flow,
loss_func=None,
output_func=EntityGenOutput(args.model_config.normalize).entity_output_func,
require_prob_for_eval=False,
require_pred_for_eval=True,
scorer=Scorer(),
)
| bootleg-master | bootleg/tasks/entity_gen_task.py |
"""AliasEntityTable class."""
import logging
import os
import time
import numpy as np
import torch
import torch.nn as nn
from tqdm.auto import tqdm
from bootleg import log_rank_0_debug
from bootleg.utils import data_utils, utils
from bootleg.utils.model_utils import get_max_candidates
logger = logging.getLogger(__name__)
class AliasEntityTable(nn.Module):
"""Stores table of the K candidate entity ids for each alias.
Args:
data_config: data config
entity_symbols: entity symbols
"""
def __init__(self, data_config, entity_symbols):
"""Alias table initializer."""
super(AliasEntityTable, self).__init__()
self.num_entities_with_pad_and_nocand = (
entity_symbols.num_entities_with_pad_and_nocand
)
self.num_aliases_with_pad_and_unk = len(entity_symbols.get_all_aliases()) + 2
self.K = get_max_candidates(entity_symbols, data_config)
(self.alias2entity_table, self.prep_file,) = self.prep(
data_config,
entity_symbols,
num_aliases_with_pad_and_unk=self.num_aliases_with_pad_and_unk,
num_cands_K=self.K,
)
# self.alias2entity_table = model_utils.move_to_device(self.alias2entity_table)
# Small check that loading was done correctly. This isn't a catch all,
# but will catch is the same or something went wrong. -2 is for alias not in our set and -1 is pad.
assert torch.equal(
self.alias2entity_table[-2],
torch.ones_like(self.alias2entity_table[-1]) * -1,
), "The second to last row of the alias table isn't -1, something wasn't loaded right."
assert torch.equal(
self.alias2entity_table[-1],
torch.ones_like(self.alias2entity_table[-1]) * -1,
), "The last row of the alias table isn't -1, something wasn't loaded right."
@classmethod
def prep(
cls,
data_config,
entity_symbols,
num_aliases_with_pad_and_unk,
num_cands_K,
):
"""Preps the alias to entity EID table.
Args:
data_config: data config
entity_symbols: entity symbols
num_aliases_with_pad_and_unk: number of aliases including pad and unk
num_cands_K: number of candidates per alias (aka K)
Returns: torch Tensor of the alias to EID table, save pt file
"""
# we pass num_aliases_with_pad_and_unk and num_cands_K to remove the dependence on entity_symbols
# when the alias table is already prepped
data_shape = (num_aliases_with_pad_and_unk, num_cands_K)
# dependent on train_in_candidates flag
prep_dir = data_utils.get_emb_prep_dir(data_config)
alias_str = os.path.splitext(data_config.alias_cand_map.replace("/", "_"))[0]
prep_file = os.path.join(
prep_dir,
f"alias2entity_table_{alias_str}_InC{int(data_config.train_in_candidates)}.pt",
)
log_rank_0_debug(logger, f"Looking for alias table in {prep_file}")
if not data_config.overwrite_preprocessed_data and os.path.exists(prep_file):
log_rank_0_debug(logger, f"Loading alias table from {prep_file}")
start = time.time()
alias2entity_table = np.memmap(
prep_file, dtype="int64", mode="r+", shape=data_shape
)
log_rank_0_debug(
logger, f"Loaded alias table in {round(time.time() - start, 2)}s"
)
else:
start = time.time()
log_rank_0_debug(logger, "Building alias table")
utils.ensure_dir(prep_dir)
alias2entity_table = cls.build_alias_table(data_config, entity_symbols)
mmap_file = np.memmap(prep_file, dtype="int64", mode="w+", shape=data_shape)
mmap_file[:] = alias2entity_table[:]
mmap_file.flush()
log_rank_0_debug(
logger,
f"Finished building and saving alias table in {round(time.time() - start, 2)}s.",
)
alias2entity_table = torch.from_numpy(alias2entity_table)
return alias2entity_table, prep_file
@classmethod
def build_alias_table(cls, data_config, entity_symbols):
"""Construct the alias to EID table.
Args:
data_config: data config
entity_symbols: entity symbols
Returns: numpy array where row is alias ID and columns are EID
"""
# we need to include a non candidate entity option for each alias and a row for PAD alias and not in dump alias
# +2 is for PAD alias (last row) and not in dump alias (second to last row)
# - same as -2 entity ids being not in cand list
num_aliases_with_pad_and_unk = len(entity_symbols.get_all_aliases()) + 2
alias2entity_table = (
np.ones(
(
num_aliases_with_pad_and_unk,
get_max_candidates(entity_symbols, data_config),
)
)
* -1
)
for alias in tqdm(
entity_symbols.get_all_aliases(), desc="Iterating over aliases"
):
alias_id = entity_symbols.get_alias_idx(alias)
# set all to -1 and fill in with real values for padding and fill in with real values
entity_list = np.ones(get_max_candidates(entity_symbols, data_config)) * -1
# set first column to zero
# if we are using noncandidate entity, this will remain a 0
# if we are not using noncandidate entities, this will get overwritten below.
entity_list[0] = 0
eid_cands = entity_symbols.get_eid_cands(alias)
# we get qids and want entity ids
# first entry is the non candidate class
# val[0] because vals is [qid, page_counts]
entity_list[
(not data_config.train_in_candidates) : len(eid_cands)
+ (not data_config.train_in_candidates)
] = np.array(eid_cands)
alias2entity_table[alias_id, :] = entity_list
return alias2entity_table
def get_alias_eid_priors(self, alias_indices):
"""Return the prior scores of the given alias_indices.
Args:
alias_indices: alias indices (B x M)
Returns: entity candidate normalized scores (B x M x K x 1)
"""
candidate_entity_scores = (
self.alias2entityprior_table[alias_indices].unsqueeze(-1).float()
)
return candidate_entity_scores
def forward(self, alias_indices):
"""Model forward.
Args:
alias_indices: alias indices (B x M)
Returns: entity candidate EIDs (B x M x K)
"""
candidate_entity_ids = self.alias2entity_table[alias_indices]
return candidate_entity_ids
def __getstate__(self):
"""Get state."""
state = self.__dict__.copy()
# Not picklable
del state["alias2entity_table"]
del state["alias2entityprior_table"]
return state
def __setstate__(self, state):
"""Set state."""
self.__dict__.update(state)
self.alias2entity_table = torch.tensor(
np.memmap(
self.prep_file,
dtype="int64",
mode="r",
shape=(self.num_aliases_with_pad_and_unk, self.K),
)
)
| bootleg-master | bootleg/layers/alias_to_ent_encoder.py |
"""Entity embeddings."""
import logging
import numpy as np
import torch
logger = logging.getLogger(__name__)
class EntityEmbedding(torch.nn.Module):
"""Static entity embeddings class.
Args:
entity_emb_file: numpy file of entity embeddings
"""
def __init__(self, entity_emb_file):
"""Entity embedding initializer."""
super(EntityEmbedding, self).__init__()
embs = torch.FloatTensor(np.load(entity_emb_file))
# Add -1 padding row; not required as dump from Bootleg should include PAD entity but as a safety
embs = torch.cat([embs, torch.zeros(1, embs.shape[-1])], dim=0)
self.embeddings = torch.nn.Embedding.from_pretrained(embs, padding_idx=-1)
def forward(self, entity_cand_eid):
"""Model forward.
Args:
entity_cand_eid: entity candidate EIDs (B x M x K)
Returns: B x M x K x dim tensor of entity embeddings
"""
training_bool = (
torch.tensor([1], device=entity_cand_eid.device) * self.training
).bool()
return self.embeddings(entity_cand_eid), training_bool
| bootleg-master | bootleg/layers/static_entity_embeddings.py |
"""Layer init."""
| bootleg-master | bootleg/layers/__init__.py |
"""BERT encoder."""
import torch
from torch import nn
class Encoder(nn.Module):
"""
Encoder module.
Return the CLS token of Transformer.
Args:
transformer: transformer
out_dim: out dimension to project to
"""
def __init__(self, transformer, out_dim):
"""BERT Encoder initializer."""
super(Encoder, self).__init__()
transformer_output_dim = transformer.embeddings.word_embeddings.weight.size(1)
self.linear = nn.Linear(transformer_output_dim, out_dim)
self.activation = nn.Tanh()
self.transformer = transformer
def forward(self, token_ids, segment_ids=None, attention_mask=None):
"""BERT Encoder forward."""
encoded_layers, pooled_output = self.transformer(
input_ids=token_ids.reshape(-1, token_ids.shape[-1]),
token_type_ids=segment_ids.reshape(-1, segment_ids.shape[-1]),
attention_mask=attention_mask.reshape(-1, attention_mask.shape[-1]),
return_dict=False,
)
full_embs = pooled_output.reshape(*token_ids.shape[:-1], -1)
embs = self.activation(self.linear(full_embs))
training_bool = (
torch.tensor([1], device=token_ids.device) * self.training
).bool()
return embs, training_bool
| bootleg-master | bootleg/layers/bert_encoder.py |
"""Model utils."""
import logging
from bootleg import log_rank_0_debug
logger = logging.getLogger(__name__)
def count_parameters(model, requires_grad, logger):
"""Count the number of parameters.
Args:
model: model to count
requires_grad: whether to look at grad or no grad params
logger: logger
"""
for p in [
p for p in model.named_parameters() if p[1].requires_grad is requires_grad
]:
log_rank_0_debug(
logger,
"{:s} {:d} {:.2f} MB".format(
p[0], p[1].numel(), p[1].numel() * 4 / 1024**2
),
)
return sum(
p.numel() for p in model.parameters() if p.requires_grad is requires_grad
)
def get_max_candidates(entity_symbols, data_config):
"""
Get max candidates.
Returns the maximum number of candidates used in the model, taking into
account train_in_candidates If train_in_canddiates is False, we add a NC
entity candidate (for null candidate)
Args:
entity_symbols: entity symbols
data_config: data config
"""
return entity_symbols.max_candidates + int(not data_config.train_in_candidates)
| bootleg-master | bootleg/utils/model_utils.py |
"""Util init."""
| bootleg-master | bootleg/utils/__init__.py |
"""Bootleg data utils."""
import os
from bootleg.symbols.constants import FINAL_LOSS, SPECIAL_TOKENS
from bootleg.utils import utils
def correct_not_augmented_dict_values(gold, dict_values):
"""
Correct gold label dict values in data prep.
Modifies the dict_values to only contain those mentions that are gold
labels. The new dictionary has the alias indices be corrected to start at 0
and end at the number of gold mentions.
Args:
gold: List of T/F values if mention is gold label or not
dict_values: Dict of slice_name -> Dict[alias_idx] -> slice probability
Returns: adjusted dict_values such that only gold = True aliases are kept (dict is reindexed to start at 0)
"""
new_dict_values = {}
gold_idx = [i for i in range(len(gold)) if gold[i] is True]
for slice_name in list(dict_values.keys()):
alias_dict = dict_values[slice_name]
# i will not be in gold_idx if it wasn't an gold to being with
new_dict_values[slice_name] = {
str(gold_idx.index(int(i))): alias_dict[i]
for i in alias_dict
if int(i) in gold_idx
}
if len(new_dict_values[slice_name]) <= 0:
del new_dict_values[slice_name]
return new_dict_values
# eval_slices must include FINAL_LOSS
def get_eval_slices(eval_slices):
"""
Get eval slices in data prep.
Given input eval slices (passed in config), ensure FINAL_LOSS is in the
eval slices. FINAL_LOSS gives overall metrics.
Args:
eval_slices: list of input eval slices
Returns: list of eval slices to use in the model
"""
slice_names = eval_slices[:]
# FINAL LOSS is in ALL MODELS for ALL SLICES
if FINAL_LOSS not in slice_names:
slice_names.insert(0, FINAL_LOSS)
return slice_names
def get_save_data_folder(data_args, use_weak_label, dataset):
"""
Get save data folder for the prepped data.
Args:
data_args: data config
use_weak_label: whether to use weak labelling or not
dataset: dataset name
Returns: folder string path
"""
name = os.path.splitext(os.path.basename(dataset))[0]
direct = os.path.dirname(dataset)
bert_mod = data_args.word_embedding.bert_model.replace("/", "_")
fold_name = (
f"{name}_{bert_mod}_L{data_args.max_seq_len}_E{data_args.max_ent_len}"
f"_W{data_args.max_seq_window_len}"
f"_T{data_args.entity_type_data.use_entity_types}"
f"_K{data_args.entity_kg_data.use_entity_kg}"
f"_D{data_args.use_entity_desc}"
f"_InC{int(data_args.train_in_candidates)}"
f"_Aug{int(use_weak_label)}"
)
return os.path.join(direct, data_args.data_prep_dir, fold_name)
def get_save_data_folder_candgen(data_args, use_weak_label, dataset):
"""Give save data folder for the prepped data.
Args:
data_args: data config
use_weak_label: whether to use weak labelling or not
dataset: dataset name
Returns: folder string path
"""
name = os.path.splitext(os.path.basename(dataset))[0]
direct = os.path.dirname(dataset)
bert_mod = data_args.word_embedding.bert_model.replace("/", "_")
fold_name = (
f"{name}_{bert_mod}_L{data_args.max_seq_len}_E{data_args.max_ent_len}"
f"_W{data_args.max_seq_window_len}"
f"_A{data_args.use_entity_akas}"
f"_D{data_args.use_entity_desc}"
f"_InC{int(data_args.train_in_candidates)}"
f"_Aug{int(use_weak_label)}"
)
return os.path.join(direct, data_args.data_prep_dir, fold_name)
def generate_slice_name(data_args, slice_names, use_weak_label, dataset):
"""
Generate name for slice datasets, taking into account the config eval slices.
Args:
data_args: data args
slice_names: slice names
use_weak_label: if using weak labels or not
dataset: dataset name
Returns: dataset name for saving slice data
"""
dataset_name = os.path.join(
get_save_data_folder(data_args, use_weak_label, dataset), "slices.pt"
)
names_for_dataset = str(hash(slice_names))
dataset_name = os.path.splitext(dataset_name)[0] + "_" + names_for_dataset + ".pt"
return dataset_name
def get_emb_prep_dir(data_config):
"""
Get embedding prep directory for saving prep files.
Args:
data_config: data config
Returns: directory path
"""
prep_dir = os.path.join(data_config.entity_dir, data_config.entity_prep_dir)
utils.ensure_dir(prep_dir)
return prep_dir
def get_data_prep_dir(data_config):
"""
Get data prep directory for saving prep files.
Args:
data_config: data config
Returns: directory path
"""
prep_dir = os.path.join(data_config.data_dir, data_config.data_prep_dir)
utils.ensure_dir(prep_dir)
return prep_dir
def get_chunk_dir(prep_dir):
"""
Get directory for saving data chunks.
Args:
prep_dir: prep directory
Returns: directory path
"""
return os.path.join(prep_dir, "chunks")
def add_special_tokens(tokenizer):
"""
Add special tokens.
Args:
tokenizer: tokenizer
data_config: data config
entitysymbols: entity symbols
"""
# Add standard tokens
tokenizer.add_special_tokens(SPECIAL_TOKENS)
def read_in_akas(entitysymbols):
"""Read in alias to QID mappings and generates a QID to list of alternate names.
Args:
entitysymbols: entity symbols
Returns: dictionary of QID to type names
"""
# take the first type; UNK type is 0
qid2aliases = {}
for al in entitysymbols.get_all_aliases():
for qid in entitysymbols.get_qid_cands(al):
if qid not in qid2aliases:
qid2aliases[qid] = set()
qid2aliases[qid].add(al)
# Turn into sets for dumping
for qid in qid2aliases:
qid2aliases[qid] = list(qid2aliases[qid])
return qid2aliases
| bootleg-master | bootleg/utils/data_utils.py |
"""Bootleg utils."""
import collections
import json
import logging
import math
import os
import pathlib
import shutil
import time
import unicodedata
from itertools import chain, islice
import marisa_trie
import ujson
import yaml
from bootleg import log_rank_0_info
from bootleg.symbols.constants import USE_LOWER, USE_STRIP
from bootleg.utils.classes.dotted_dict import DottedDict
logger = logging.getLogger(__name__)
def ensure_dir(d):
"""
Check if a directory exists. If not, it makes it.
Args:
d: path
"""
pathlib.Path(d).mkdir(exist_ok=True, parents=True)
def exists_dir(d):
"""
Check if directory exists.
Args:
d: path
"""
return pathlib.Path(d).exists()
def dump_json_file(filename, contents, ensure_ascii=False):
"""
Dump dictionary to json file.
Args:
filename: file to write to
contents: dictionary to save
ensure_ascii: ensure ascii
"""
filename = pathlib.Path(filename)
filename.parent.mkdir(exist_ok=True, parents=True)
with open(filename, "w") as f:
try:
ujson.dump(contents, f, ensure_ascii=ensure_ascii)
except OverflowError:
json.dump(contents, f, ensure_ascii=ensure_ascii)
def dump_yaml_file(filename, contents):
"""
Dump dictionary to yaml file.
Args:
filename: file to write to
contents: dictionary to save
"""
filename = pathlib.Path(filename)
filename.parent.mkdir(exist_ok=True, parents=True)
with open(filename, "w") as f:
yaml.dump(contents, f)
def load_json_file(filename):
"""
Load dictionary from json file.
Args:
filename: file to read from
Returns: Dict
"""
with open(filename, "r") as f:
contents = ujson.load(f)
return contents
def load_yaml_file(filename):
"""
Load dictionary from yaml file.
Args:
filename: file to read from
Returns: Dict
"""
with open(filename) as f:
contents = yaml.load(f, Loader=yaml.FullLoader)
return contents
def recurse_redict(d):
"""
Cast all DottedDict values in a dictionary to be dictionaries.
Useful for YAML dumping.
Args:
d: Dict
Returns: Dict with no DottedDicts
"""
d = dict(d)
for k, v in d.items():
if isinstance(v, (DottedDict, dict)):
d[k] = recurse_redict(dict(d[k]))
return d
def write_to_file(filename, value):
"""
Write generic value to a file.
If value is not string, will cast to str().
Args:
filename: file to write to
value: context to write
Returns: Dict
"""
ensure_dir(os.path.dirname(filename))
if not isinstance(value, str):
value = str(value)
fout = open(filename, "w")
fout.write(value + "\n")
fout.close()
def write_jsonl(filepath, values, ensure_ascii=False):
"""
Write List[Dict] data to jsonlines file.
Args:
filepath: file to write to
values: list of dictionary data to write
ensure_ascii: ensure_ascii for json
"""
with open(filepath, "w") as out_f:
for val in values:
out_f.write(ujson.dumps(val, ensure_ascii=ensure_ascii) + "\n")
return
def chunks(iterable, n):
"""
Chunk data.
chunks(ABCDE,2) => AB CD E.
Args:
iterable: iterable input
n: number of chunks
Returns: next chunk
"""
iterable = iter(iterable)
while True:
try:
yield chain([next(iterable)], islice(iterable, n - 1))
except StopIteration:
return None
def chunk_file(in_file, out_dir, num_lines, prefix="out_"):
"""
Chunk a file into num_lines chunks.
Args:
in_file: input file
out_dir: output directory
num_lines: number of lines in each chunk
prefix: prefix for output files in out_dir
Returns: total number of lines read, dictionary of output file path -> number of lines in that file (for tqdms)
"""
ensure_dir(out_dir)
out_files = {}
total_lines = 0
ending = os.path.splitext(in_file)[1]
with open(in_file) as bigfile:
i = 0
while True:
try:
lines = next(chunks(bigfile, num_lines))
except StopIteration:
break
except RuntimeError:
break
file_split = os.path.join(out_dir, f"{prefix}{i}{ending}")
total_file_lines = 0
i += 1
with open(file_split, "w") as f:
while True:
try:
line = next(lines)
except StopIteration:
break
total_lines += 1
total_file_lines += 1
f.write(line)
out_files[file_split] = total_file_lines
return total_lines, out_files
def create_single_item_trie(in_dict, out_file=""):
"""
Create marisa trie.
Creates a marisa trie from the input dictionary. We assume the
dictionary has string keys and integer values.
Args:
in_dict: Dict[str] -> Int
out_file: marisa file to save (useful for reading as memmap) (optional)
Returns: marisa trie of in_dict
"""
keys = []
values = []
for k in in_dict:
assert type(in_dict[k]) is int
keys.append(k)
# Tries require list of item for the record trie
values.append(tuple([in_dict[k]]))
fmt = "<l"
trie = marisa_trie.RecordTrie(fmt, zip(keys, values))
if out_file != "":
trie.save(out_file)
return trie
def load_single_item_trie(file):
"""
Load a marisa trie with integer values from memmap file.
Args:
file: marisa input file
Returns: marisa trie
"""
assert exists_dir(file)
return marisa_trie.RecordTrie("<l").mmap(file)
def get_lnrm(s, strip=USE_STRIP, lower=USE_LOWER):
"""
Convert to lnrm form.
Convert a string to its lnrm form We form the lower-cased normalized
version l(s) of a string s by canonicalizing its UTF-8 characters,
eliminating diacritics, lower-casing the UTF-8 and throwing out all ASCII-
range characters that are not alpha-numeric.
from http://nlp.stanford.edu/pubs/subctackbp.pdf Section 2.3
Args:
s: input string
strip: boolean for stripping alias or not
lower: boolean for lowercasing alias or not
Returns: the lnrm form of the string
"""
if not strip and not lower:
return s
lnrm = str(s)
if lower:
lnrm = lnrm.lower()
if strip:
lnrm = unicodedata.normalize("NFD", lnrm)
lnrm = "".join(
[
x
for x in lnrm
if (not unicodedata.combining(x) and x.isalnum() or x == " ")
]
).strip()
# will remove if there are any duplicate white spaces e.g. "the alias is here"
lnrm = " ".join(lnrm.split())
return lnrm
def strip_nan(input_list):
"""
Replace float('nan') with nulls.
Used for ujson loading/dumping.
Args:
input_list: list of items to remove the Nans from
Returns: list or nested list where Nan is not None
"""
final_list = []
for item in input_list:
if isinstance(item, collections.abc.Iterable):
final_list.append(strip_nan(item))
else:
final_list.append(item if not math.isnan(item) else None)
return final_list
def try_rmtree(rm_dir):
"""
Try to remove a directory tree.
In the case a resource is open, rmtree will fail. This retries to rmtree
after 1 second waits for 5 times.
Args:
rm_dir: directory to remove
"""
num_retries = 0
max_retries = 5
while num_retries < max_retries:
try:
shutil.rmtree(rm_dir)
break
except OSError:
time.sleep(1)
num_retries += 1
if num_retries >= max_retries:
log_rank_0_info(
logger,
f"{rm_dir} was not able to be deleted. This is okay but will have to manually be removed.",
)
| bootleg-master | bootleg/utils/utils.py |
import logging
import string
from collections import namedtuple
from typing import List, Tuple, Union
import nltk
import spacy
from spacy.cli.download import download as spacy_download
from bootleg.symbols.constants import LANG_CODE
from bootleg.utils.utils import get_lnrm
logger = logging.getLogger(__name__)
span_tuple = namedtuple("Span", ["text", "start_char_idx", "end_char_idx"])
try:
nlp = spacy.load(f"{LANG_CODE}_core_web_sm")
except OSError:
logger.warning(
f"Spacy models {LANG_CODE}_core_web_sm not found. Downloading and installing."
)
try:
spacy_download(f"{LANG_CODE}_core_web_sm")
nlp = spacy.load(f"{LANG_CODE}_core_web_sm")
except OSError:
nlp = None
# We want this to pass gracefully in the case Readthedocs is trying to build.
# This will fail later on if a user is actually trying to run Bootleg without mention extraction
if nlp is not None:
ALL_STOPWORDS = nlp.Defaults.stop_words
nlp.max_length = 2097152
else:
ALL_STOPWORDS = {}
PUNC = string.punctuation
KEEP_POS = {"PROPN", "NOUN"} # ADJ, VERB, ADV, SYM
PLURAL = {"s", "'s"}
NER_CLASSES = {
"PERSON",
"NORP",
"ORG",
"GPE",
"LOC",
"PRODUCT",
"EVENT",
"WORK_OF_ART",
"LANGUAGE",
}
table = str.maketrans(
dict.fromkeys(PUNC)
) # OR {key: None for key in string.punctuation}
def is_noun_phrase(words: List[spacy.tokens.token.Token]) -> bool:
"""Check if noun phrase.
Must have a POS that is a noun.
"""
return any(g.pos_ in KEEP_POS for g in words)
def is_split_noun(
words: List[spacy.tokens.token.Token],
left_of: Union[None, spacy.tokens.token.Token],
right_of: Union[None, spacy.tokens.token.Token],
) -> bool:
"""Check if the words are a split noun.
If the first word is noun and left_of is noun
or if last word is noun and right_of is noun.
"""
if left_of is not None and words[0].pos_ in KEEP_POS and left_of.pos_ in KEEP_POS:
return True
if (
right_of is not None
and words[-1].pos_ in KEEP_POS
and right_of.pos_ in KEEP_POS
):
return True
return False
def bounded_by_stopword(
words: List[spacy.tokens.token.Token], start_word_idx: int
) -> bool:
"""Check if boundary word is stopword/plural/punc word.
If starts or ends with stopword/plural/punc word, return True, except
when start of text is capitalized.
"""
is_important_word = words[0].text[0].isupper() or start_word_idx == 0
if words[0].text.lower() in PLURAL or words[-1].text.lower() in PLURAL:
return True
if not is_important_word and (
words[0].text.lower() in ALL_STOPWORDS or words[0].text.lower() in PUNC
):
return True
if words[-1].text.lower() in ALL_STOPWORDS or words[-1].text.lower() in PUNC:
return True
return False
def is_numeric(words: List[spacy.tokens.token.Token]) -> bool:
"""Check if numeric word span."""
return get_lnrm(
" ".join(map(lambda x: x.text, words)), strip=True, lower=True
).isnumeric()
def iter_noun_phrases(
doc: spacy.tokens.doc.Doc, min_alias_len: int, max_alias_len: int
):
"""Yield noun phrase from spacy parsed doc."""
for n in range(max_alias_len, min_alias_len - 1, -1):
grams = nltk.ngrams(doc, n)
for start_word_idx, gram_words in enumerate(grams):
start_char_idx = gram_words[0].idx
end_char_idx = gram_words[-1].idx + len(gram_words[-1])
if not is_noun_phrase(gram_words):
continue
if is_split_noun(
gram_words,
doc[start_word_idx - 1] if start_word_idx > 0 else None,
doc[start_word_idx + n] if start_word_idx + n < len(doc) else None,
):
continue
if bounded_by_stopword(gram_words, start_word_idx):
continue
if is_numeric(gram_words):
continue
yield span_tuple(
" ".join(map(lambda x: x.text, gram_words)),
start_char_idx,
end_char_idx,
)
def ngram_spacy_extract_aliases(
text, all_aliases, min_alias_len=1, max_alias_len=6
) -> Tuple[List[str], List[Tuple[int, int]], List[Tuple[int, int]]]:
"""Extract aliases from the text.
Does ngram search using POS tags from spacy
Args:
text: text to extract aliases.
all_aliases: all possible aliases to consider as a mention
min_alias_len: minimum alias length
max_alias_len: maximum alias length
Returns:
list of aliases, list of span offsets, list of char span offsets.
"""
used_aliases = []
try:
doc = nlp(text, disable=["ner", "parser"])
except Exception:
return [], [], []
for noun_span in iter_noun_phrases(doc, min_alias_len, max_alias_len):
gram_attempt = get_lnrm(noun_span.text)
final_gram = None
if gram_attempt in all_aliases:
final_gram = gram_attempt
else:
joined_gram_merged_plural = get_lnrm(noun_span.text.replace(" 's", "'s"))
if joined_gram_merged_plural in all_aliases:
final_gram = joined_gram_merged_plural
else:
joined_gram_merged_noplural = get_lnrm(noun_span.text.replace("'s", ""))
if joined_gram_merged_noplural in all_aliases:
final_gram = joined_gram_merged_noplural
else:
joined_gram_merged_nopunc = get_lnrm(
joined_gram_merged_noplural.translate(table),
)
if joined_gram_merged_nopunc in all_aliases:
final_gram = joined_gram_merged_nopunc
if final_gram is not None:
keep = True
# Make sure we don't double add an alias. As we traverse a tree,
# we will always go largest to smallest.
for u_al in used_aliases:
u_j_st = u_al[1]
u_j_end = u_al[2]
if (
noun_span.start_char_idx < u_j_end
and noun_span.end_char_idx > u_j_st
):
keep = False
break
if not keep:
continue
used_aliases.append(
tuple([final_gram, noun_span.start_char_idx, noun_span.end_char_idx])
)
# Sort based on span order
aliases_for_sorting = sorted(used_aliases, key=lambda elem: [elem[1], elem[2]])
used_aliases = [a[0] for a in aliases_for_sorting]
chars = [[a[1], a[2]] for a in aliases_for_sorting]
# Backwards Compatibility: convert back to word spans
spans = [[len(text[: sp[0]].split()), len(text[: sp[1]].split())] for sp in chars]
assert all([sp[1] <= len(doc) for sp in spans]), f"{spans} {text}"
return used_aliases, spans, chars
def spacy_extract_aliases(
text, all_aliases, min_alias_len=1, max_alias_len=6
) -> Tuple[List[str], List[Tuple[int, int]], List[Tuple[int, int]]]:
"""Extract aliases from the text.
Does NER parsing using Spacy
Args:
text: text to extract aliases.
all_aliases: all possible aliases to consider as a mention
min_alias_len: minimum alias length
max_alias_len: maximum alias length
Returns:
list of aliases, list of span offsets, list of char span offsets.
"""
used_aliases = []
try:
doc = nlp(text)
except Exception:
return [], [], []
for ent in doc.ents:
if ent.label_ in NER_CLASSES:
gram_attempt = get_lnrm(ent.text)
if (
len(gram_attempt.split()) < min_alias_len
or len(gram_attempt.split()) > max_alias_len
):
continue
final_gram = None
if gram_attempt in all_aliases:
final_gram = gram_attempt
else:
joined_gram_merged_plural = get_lnrm(ent.text.replace(" 's", "'s"))
if joined_gram_merged_plural in all_aliases:
final_gram = joined_gram_merged_plural
else:
joined_gram_merged_noplural = get_lnrm(ent.text.replace("'s", ""))
if joined_gram_merged_noplural in all_aliases:
final_gram = joined_gram_merged_noplural
if final_gram is not None:
keep = True
# Make sure we don't double add an alias. As we traverse a tree,
# we will always go largest to smallest.
for u_al in used_aliases:
u_j_st = u_al[1]
u_j_end = u_al[2]
if ent.start_char < u_j_end and ent.end_char > u_j_st:
keep = False
break
if not keep:
continue
used_aliases.append(tuple([final_gram, ent.start_char, ent.end_char]))
# Sort based on span order
aliases_for_sorting = sorted(used_aliases, key=lambda elem: [elem[1], elem[2]])
used_aliases = [a[0] for a in aliases_for_sorting]
chars = [[a[1], a[2]] for a in aliases_for_sorting]
# Backwards Compatibility: convert back to word spans
spans = [[len(text[: sp[0]].split()), len(text[: sp[1]].split())] for sp in chars]
assert all([sp[1] <= len(doc) for sp in spans]), f"{spans} {text}"
return used_aliases, spans, chars
| bootleg-master | bootleg/utils/mention_extractor_utils.py |
"""Bootleg eval utils."""
import glob
import logging
import math
import multiprocessing
import os
import shutil
import time
from collections import defaultdict
import emmental
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
import ujson
from emmental.utils.utils import array_to_numpy, prob_to_pred
from tqdm.auto import tqdm
from bootleg import log_rank_0_debug, log_rank_0_info
from bootleg.task_config import NED_TASK
from bootleg.utils import data_utils, utils
from bootleg.utils.classes.nested_vocab_tries import (
TwoLayerVocabularyScoreTrie,
VocabularyTrie,
)
from bootleg.utils.utils import strip_nan, try_rmtree
logger = logging.getLogger(__name__)
def masked_class_logsoftmax(pred, mask, dim=2, temp=1.0, zero_delta=1e-45):
"""
Masked logsoftmax.
Mask of 0/False means mask value (ignore it)
Args:
pred: input tensor
mask: mask
dim: softmax dimension
temp: softmax temperature
zero_delta: small value to add so that vector + (mask+zero_delta).log() is not Nan for all 0s
Returns: masked softmax tensor
"""
assert temp > 0, "You can't have a temperature of 0"
# pred is batch x M x K
# https://github.com/allenai/allennlp/blob/b6cc9d39651273e8ec2a7e334908ffa9de5c2026/allennlp/nn/util.py#L272-L303
pred = pred / temp
pred = (
pred + (mask + zero_delta).log()
) # we could also do 1e-46 but I feel safer 1e-45
# WARNING: might need 5e-16 with FP16 and training
# compute softmax over the k dimension
return F.log_softmax(input=pred, dim=dim)
def map_aliases_to_candidates(
train_in_candidates, max_candidates, alias_cand_map, aliases
):
"""
Get list of QID candidates for each alias.
Args:
train_in_candidates: whether the model has a NC entity or not (assumes all gold QIDs are in candidate lists)
alias_cand_map: alias -> candidate qids in dict or TwoLayerVocabularyScoreTrie format
aliases: list of aliases
Returns: List of lists QIDs
"""
not_tic = 1 - train_in_candidates
res = []
for al in aliases:
if isinstance(alias_cand_map, dict):
if al in alias_cand_map:
cands = [qid_pair[0] for qid_pair in alias_cand_map[al]]
else:
cands = ["-1"] * max_candidates
else:
if alias_cand_map.is_key_in_trie(al):
cands = alias_cand_map.get_value(al, keep_score=False)
else:
cands = ["-1"] * max_candidates
cands = cands + ["-1"] * (max_candidates - len(cands))
res.append(not_tic * ["NC"] + cands)
return res
def map_candidate_qids_to_eid(candidate_qids, qid2eid):
"""
Get list of EID candidates for each alias.
Args:
candidate_qids: list of list of candidate QIDs
qid2eid: mapping of qid to entity id
Returns: List of lists EIDs
"""
res = []
for cand_list in candidate_qids:
res_cands = []
for q in cand_list:
if q == "NC":
res_cands.append(0)
elif q == "-1":
res_cands.append(1)
else:
if isinstance(qid2eid, dict):
res_cands.append(qid2eid[q])
else:
res_cands.append(qid2eid[q])
res.append(res_cands)
return res
def get_eval_folder(file):
"""
Return eval folder for the given evaluation file.
Stored in log_path/filename/model_name.
Args:
file: eval file
Returns: eval folder
"""
return os.path.join(
emmental.Meta.log_path,
os.path.splitext(file)[0],
os.path.splitext(
os.path.basename(emmental.Meta.config["model_config"]["model_path"])
)[0],
)
def write_disambig_metrics_to_csv(file_path, dictionary):
"""Save disambiguation metrics in the dictionary to file_path.
Args:
file_path: file path
dictionary: dictionary of scores (output of Emmental score)
"""
# Only saving NED, ignore Type. dictionary has keys such as "NED/Bootleg/dev/unif_HD/total_men" which
# corresponds to task/dataset/split/slice/metric, and the value is the associated value for that metric as
# calculated on the dataset. Sort keys to ensure that the rest of the code below remains in the correct order
# across slices
all_keys = [x for x in sorted(dictionary.keys()) if x.startswith(NED_TASK)]
# This line uses endswith("total_men") because we are just trying to get 1 copy of each task/dataset/split/slice
# combo. We are not actually using the total_men information in this line below (could've used acc_boot instead,
# etc.)
task, dataset, split, slices = list(
zip(*[x.split("/")[:4] for x in all_keys if x.endswith("total_men")])
)
acc_boot = [dictionary[x] for x in all_keys if x.endswith("acc_boot")]
acc_boot_notNC = [dictionary[x] for x in all_keys if x.endswith("acc_notNC_boot")]
mentions = [dictionary[x] for x in all_keys if x.endswith("total_men")]
mentions_notNC = [dictionary[x] for x in all_keys if x.endswith("total_notNC_men")]
acc_pop = [dictionary[x] for x in all_keys if x.endswith("acc_pop")]
acc_pop_notNC = [dictionary[x] for x in all_keys if x.endswith("acc_notNC_pop")]
df_info = {
"task": task,
"dataset": dataset,
"split": split,
"slice": slices,
"mentions": mentions,
"mentions_notNC": mentions_notNC,
"acc_boot": acc_boot,
"acc_boot_notNC": acc_boot_notNC,
"acc_pop": acc_pop,
"acc_pop_notNC": acc_pop_notNC,
}
df = pd.DataFrame(data=df_info)
df.to_csv(file_path, index=False)
def get_sent_idx2num_mens(data_file):
"""Get the map from sentence index to number of mentions and to data.
Used for calculating offsets and chunking file.
Args:
data_file: eval file
Returns: Dict of sentence index -> number of mention per sentence, Dict of sentence index -> input line
"""
sent_idx2num_mens = {}
sent_idx2row = {}
total_num_mentions = 0
with open(data_file) as f:
for line in tqdm(
f,
total=sum([1 for _ in open(data_file)]),
desc="Getting sentidx2line mapping",
):
line = ujson.loads(line)
# keep track of the start idx in the condensed memory mapped file for each sentence (varying number of
# aliases)
assert (
line["sent_idx_unq"] not in sent_idx2num_mens
), f'Sentence indices must be unique. {line["sent_idx_unq"]} already seen.'
sent_idx2row[str(line["sent_idx_unq"])] = line
# Save as string for Marisa Tri later
sent_idx2num_mens[str(line["sent_idx_unq"])] = len(line["aliases"])
# We include false aliases for debugging (and alias_pos includes them)
total_num_mentions += len(line["aliases"])
# print("INSIDE SENT MAP", str(line["sent_idx_unq"]), total_num_mentions)
log_rank_0_debug(
logger, f"Total number of mentions across all sentences: {total_num_mentions}"
)
return sent_idx2num_mens, sent_idx2row
# Modified from
# https://github.com/SenWu/emmental/blob/master/src/emmental/model.py#L455
# to support dump_preds_accumulation_steps
@torch.no_grad()
def batched_pred_iter(
model,
dataloader,
dump_preds_accumulation_steps,
sent_idx2num_mens,
):
"""
Predict from dataloader.
Predict from dataloader taking into account eval accumulation steps.
Will yield a new prediction set after each set accumulation steps for
writing out.
If a sentence or batch doesn't have any mentions, it will not be returned by this method.
Recall that we split up sentences that are too long to feed to the model.
We use the sent_idx2num_mens dict to ensure we have full sentences evaluated before
returning, otherwise we'll have incomplete sentences to merge together when dumping.
Args:
model: model
dataloader: The dataloader to predict
dump_preds_accumulation_steps: Number of eval steps to run before returning
sent_idx2num_mens: list of sent index to number of mentions
Returns:
Iterator over result dict.
"""
def collect_result(uid_d, gold_d, pred_d, prob_d, out_d, cur_sentidx_nummen):
"""Merge results for the sentences where all mentions have been evaluated."""
final_uid_d = defaultdict(list)
final_prob_d = defaultdict(list)
final_pred_d = defaultdict(list)
final_gold_d = defaultdict(list)
final_out_d = defaultdict(lambda: defaultdict(list))
sentidxs_finalized = []
# print("FINALIZE", cur_sentidx_nummen, [sent_idx2num_mens[str(k)] for k in cur_sentidx_nummen])
log_rank_0_debug(logger, f"Collecting {len(cur_sentidx_nummen)} results")
for sent_idx, cur_mention_set in cur_sentidx_nummen.items():
assert (
len(cur_mention_set) <= sent_idx2num_mens[str(sent_idx)]
), f"Too many mentions for {sent_idx}: {cur_mention_set} VS {sent_idx2num_mens[str(sent_idx)]}"
if len(cur_mention_set) == sent_idx2num_mens[str(sent_idx)]:
sentidxs_finalized.append(sent_idx)
for task_name in uid_d:
final_uid_d[task_name].extend(uid_d[task_name][sent_idx])
final_prob_d[task_name].extend(prob_d[task_name][sent_idx])
final_pred_d[task_name].extend(pred_d[task_name][sent_idx])
final_gold_d[task_name].extend(gold_d[task_name][sent_idx])
if task_name in out_d.keys():
for action_name in out_d[task_name].keys():
final_out_d[task_name][action_name].extend(
out_d[task_name][action_name][sent_idx]
)
# If batch size is close to 1 and accumulation step was close to 1,
# we may get to where there are no complete sentences
if len(sentidxs_finalized) == 0:
return {}, sentidxs_finalized
res = {
"uids": final_uid_d,
"golds": final_gold_d,
}
for task_name in final_prob_d.keys():
final_prob_d[task_name] = array_to_numpy(final_prob_d[task_name])
res["probs"] = final_prob_d
for task_name in final_pred_d.keys():
final_pred_d[task_name] = array_to_numpy(final_pred_d[task_name])
res["preds"] = final_pred_d
res["outputs"] = final_out_d
return res, sentidxs_finalized
model.eval()
# Will store sent_idx -> task_name -> list output
uid_dict = defaultdict(lambda: defaultdict(list))
prob_dict = defaultdict(lambda: defaultdict(list))
pred_dict = defaultdict(lambda: defaultdict(list))
gold_dict = defaultdict(lambda: defaultdict(list))
# Will store sent_idx -> task_name -> output key -> list output
out_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
# list of all finalized and yielded sentences
all_finalized_sentences = []
# Storing currently stored sent idx -> unique mentions seed (for sentences that aren't complete,
# we'll hold until they are)
cur_sentidx2_nummentions = dict()
num_eval_steps = 0
# Collect dataloader information
task_to_label_dict = dataloader.task_to_label_dict
uid = dataloader.uid
with torch.no_grad():
for batch_num, bdict in tqdm(
enumerate(dataloader),
total=len(dataloader),
desc=f"Evaluating {dataloader.data_name} ({dataloader.split})",
):
num_eval_steps += 1
X_bdict, Y_bdict = bdict
(
uid_bdict,
loss_bdict,
prob_bdict,
gold_bdict,
out_bdict,
) = model.forward( # type: ignore
X_bdict[uid],
X_bdict,
Y_bdict,
task_to_label_dict,
return_action_outputs=True,
return_probs=True,
)
assert (
NED_TASK in uid_bdict
), f"{NED_TASK} task needs to be in returned in uid to get number of mentions"
for task_name in uid_bdict.keys():
for ex_idx in range(len(uid_bdict[task_name])):
# Recall that our uid is
# ============================
# guid_dtype = np.dtype(
# [
# ("sent_idx", "i8", 1),
# ("subsent_idx", "i8", 1),
# ("alias_orig_list_pos", "i8", 1),
# ]
# )
# ============================
# Index 0 -> sent_idx, Index 1 -> subsent_idx, Index 2 -> List of aliases positions
# (-1 means no mention in train example)
sent_idx = uid_bdict[task_name][ex_idx][0]
# Only increment for NED TASK
if task_name == NED_TASK:
# alias_pos_for_eval gives which mentions are meant to be evaluated in this batch (-1 means
# skip) for scoring. This will be different than the mentions seen by the model as we window
# sentences and a mention may be seen multiple times but only scored once. This includes for
# True and False anchors - we dump all anchors for analysis
alias_pos_for_eval = out_bdict[task_name][
"_input__for_dump_gold_cand_K_idx_train"
][ex_idx]
# This is the number of mentions - there should only be 1
assert len(uid_bdict[task_name][ex_idx][2]) == 1
alias_pos_in_og_list = uid_bdict[task_name][ex_idx][2][0]
if sent_idx not in cur_sentidx2_nummentions:
cur_sentidx2_nummentions[sent_idx] = set()
# Index 2 is index of alias positions in original list (-1 means no mention)
if alias_pos_for_eval != -1:
cur_sentidx2_nummentions[sent_idx].add(alias_pos_in_og_list)
uid_dict[task_name][sent_idx].extend(
uid_bdict[task_name][ex_idx : ex_idx + 1]
)
prob_dict[task_name][sent_idx].extend(prob_bdict[task_name][ex_idx : ex_idx + 1]) # type: ignore
pred_dict[task_name][sent_idx].extend( # type: ignore
prob_to_pred(prob_bdict[task_name][ex_idx : ex_idx + 1])
)
gold_dict[task_name][sent_idx].extend(
gold_bdict[task_name][ex_idx : ex_idx + 1]
)
if task_name in out_bdict.keys():
for action_name in out_bdict[task_name].keys():
out_dict[task_name][action_name][sent_idx].extend(
out_bdict[task_name][action_name][ex_idx : ex_idx + 1]
)
if num_eval_steps >= dump_preds_accumulation_steps:
# Collect the sentences that have all mentions collected
res, finalized_sent_idxs = collect_result(
uid_dict,
gold_dict,
pred_dict,
prob_dict,
out_dict,
cur_sentidx2_nummentions,
)
all_finalized_sentences.extend([str(s) for s in finalized_sent_idxs])
num_eval_steps = 0
for final_sent_i in finalized_sent_idxs:
assert final_sent_i in cur_sentidx2_nummentions
del cur_sentidx2_nummentions[final_sent_i]
for task_name in uid_dict.keys():
del uid_dict[task_name][final_sent_i]
del prob_dict[task_name][final_sent_i]
del pred_dict[task_name][final_sent_i]
del gold_dict[task_name][final_sent_i]
if task_name in out_dict.keys():
for action_name in out_dict[task_name].keys():
del out_dict[task_name][action_name][final_sent_i]
if len(res) > 0:
yield res
res, finalized_sent_idxs = collect_result(
uid_dict, gold_dict, pred_dict, prob_dict, out_dict, cur_sentidx2_nummentions
)
all_finalized_sentences.extend([str(s) for s in finalized_sent_idxs])
for final_sent_i in finalized_sent_idxs:
del cur_sentidx2_nummentions[final_sent_i]
if len(res) > 0:
# print("FINALIZED", finalized_sent_idxs)
yield res
assert (
len(cur_sentidx2_nummentions) == 0
), f"After eval, some sentences had left over mentions {cur_sentidx2_nummentions}"
assert set(all_finalized_sentences).intersection(sent_idx2num_mens.keys()) == set(
[k for k, v in sent_idx2num_mens.items() if v > 0]
), (
f"Some sentences are left over "
f"{[s for s in sent_idx2num_mens if s not in set(all_finalized_sentences) and sent_idx2num_mens[s] > 0]}"
)
return None
def check_and_create_alias_cand_trie(save_folder, entity_symbols):
"""Create a mmap memory trie object for storing the alias-candidate mappings.
Args:
save_folder: save folder for alias trie
entity_symbols: entity symbols
"""
try:
TwoLayerVocabularyScoreTrie(load_dir=save_folder)
except FileNotFoundError:
log_rank_0_debug(
logger,
"Creating the alias candidate trie for faster parallel processing. "
"This is a one time cost",
)
alias_trie = entity_symbols._alias2qids
alias_trie.dump(save_folder)
return
def get_emb_file(save_folder):
"""Get the embedding numpy file for the batch.
Args:
save_folder: save folder
Returns: string
"""
return os.path.join(save_folder, "bootleg_emb_file.npy")
def get_result_file(save_folder):
"""Get the jsonl label file for the batch.
Args:
save_folder: save folder
Returns: string
"""
return os.path.join(save_folder, "bootleg_labels.jsonl")
def dump_model_outputs(
model,
dataloader,
config,
sentidx2num_mentions,
save_folder,
entity_symbols,
task_name,
overwrite_data,
):
"""Dump model outputs.
Args:
model: model
dataloader: data loader
config: config
sentidx2num_mentions: Dict from sentence idx to number of mentions
save_folder: save folder
entity_symbols: entity symbols
task_name: task name
overwrite_data: overwrite saved mmap files
Returns: mmemp file name for saved outputs, dtype file name for loading memmap file
"""
# write to file (M x hidden x size for each data point -- next step will deal with recovering original sentence
# indices for overflowing sentences)
unmerged_memmap_dir = os.path.join(save_folder, "model_outputs_mmap")
utils.ensure_dir(unmerged_memmap_dir)
final_unmerged_memmap = os.path.join(save_folder, "model_outputs_final.mmap")
emb_file_config = os.path.join(unmerged_memmap_dir, "model_outputs_config.npy")
if (
not overwrite_data
and os.path.exists(final_unmerged_memmap)
and os.path.exists(emb_file_config)
):
log_rank_0_info(
logger,
f"Skipping GPU dumpings. {final_unmerged_memmap} already exists and overwrite is F.",
)
return final_unmerged_memmap, emb_file_config
K = entity_symbols.max_candidates + (not config.data_config.train_in_candidates)
unmerged_storage_type = np.dtype(
[
("M", int),
("K", int),
("hidden_size", int),
("sent_idx", int),
("subsent_idx", int),
("alias_list_pos", int, 1),
("final_loss_true", int, 1),
("final_loss_pred", int, 1),
("final_loss_prob", float, 1),
("final_loss_cand_probs", float, K),
]
)
np.save(emb_file_config, unmerged_storage_type, allow_pickle=True)
item_size = np.memmap(
final_unmerged_memmap,
dtype=unmerged_storage_type,
mode="w+",
shape=(1,),
).nbytes
total_expected_size = item_size * len(dataloader.dataset) / 1024**3
log_rank_0_info(
logger,
f"Expected size is {total_expected_size}GB.",
)
data_arr = np.memmap(
final_unmerged_memmap,
dtype=unmerged_storage_type,
mode="w+",
shape=(len(dataloader.dataset),),
)
# Init sent_idx to -1 for debugging
data_arr[:]["sent_idx"] = -1
arr_idx = 0
for res_i, res_dict in enumerate(
batched_pred_iter(
model,
dataloader,
config.run_config.dump_preds_accumulation_steps,
sentidx2num_mentions,
)
):
batch_size = len(res_dict["uids"][task_name])
for i in tqdm(range(batch_size), total=batch_size, desc="Saving outputs"):
# res_dict["output"][task_name] is dict with keys ['_input__alias_orig_list_pos',
# 'bootleg_pred_1', '_input__sent_idx', '_input__for_dump_gold_cand_K_idx_train',
# '_input__subsent_idx', 0, 1]
sent_idx = res_dict["outputs"][task_name]["_input__sent_idx"][i]
# print("INSIDE LOOP", sent_idx, "AT", i)
subsent_idx = res_dict["outputs"][task_name]["_input__subsent_idx"][i]
alias_orig_list_pos = res_dict["outputs"][task_name][
"_input__alias_orig_list_pos"
][i]
gold_cand_K_idx_train = res_dict["outputs"][task_name][
"_input__for_dump_gold_cand_K_idx_train"
][i]
data_arr[arr_idx]["K"] = K
data_arr[arr_idx]["hidden_size"] = config.model_config.hidden_size
data_arr[arr_idx]["sent_idx"] = sent_idx
data_arr[arr_idx]["subsent_idx"] = subsent_idx
data_arr[arr_idx]["alias_list_pos"] = alias_orig_list_pos
# This will give all aliases seen by the model during training, independent of if it's gold or not
data_arr[arr_idx]["final_loss_true"] = gold_cand_K_idx_train
# get max for each alias, probs is K
max_probs = res_dict["probs"][task_name][i].max(axis=0)
pred_cands = res_dict["probs"][task_name][i].argmax(axis=0)
data_arr[arr_idx]["final_loss_pred"] = pred_cands
data_arr[arr_idx]["final_loss_prob"] = max_probs
data_arr[arr_idx]["final_loss_cand_probs"] = res_dict["probs"][task_name][
i
].reshape(1, -1)
arr_idx += 1
del res_dict
# Merge all memmap files
log_rank_0_info(
logger,
f"Finished dumping to memmap files. with {len(dataloader.dataset)} samples. Saved to {final_unmerged_memmap}",
)
# for i in range(len(mmap_file)):
# si = mmap_file[i]["sent_idx"]
# if -1 == si:
# import pdb
# pdb.set_trace()
# assert si != -1, f"{i} {mmap_file[i]}"
return final_unmerged_memmap, emb_file_config
def collect_and_merge_results(
unmerged_entity_emb_file,
emb_file_config,
config,
sent_idx2num_mens,
sent_idx2row,
save_folder,
entity_symbols,
):
"""Merge mentions, filtering non-gold labels, and saves to file.
Args:
unmerged_entity_emb_file: memmap file from dump step
emb_file_config: config file for loading memmap file
config: model config
res_dict: result dictionary from Emmental predict
sent_idx2num_mens: Dict sentence idx to number of mentions
sent_idx2row: Dict sentence idx to row of eval data
save_folder: folder to save results
entity_symbols: entity symbols
Returns: saved prediction file, total mentions seen
"""
num_processes = min(
config.run_config.dataset_threads, int(multiprocessing.cpu_count() * 0.9)
)
cache_dir = os.path.join(save_folder, "cache")
utils.ensure_dir(cache_dir)
trie_candidate_map_folder = None
trie_qid2eid_file = None
# Save the alias->QID candidate map and the QID->EID mapping in memory efficient structures for faster
# prediction dumping
if num_processes > 1:
entity_prep_dir = data_utils.get_emb_prep_dir(config.data_config)
trie_candidate_map_folder = os.path.join(
entity_prep_dir, "for_dumping_preds", "alias_cand_trie"
)
utils.ensure_dir(trie_candidate_map_folder)
check_and_create_alias_cand_trie(trie_candidate_map_folder, entity_symbols)
trie_qid2eid_file = os.path.join(
entity_prep_dir, "for_dumping_preds", "qid2eid_trie"
)
if not os.path.exists(trie_qid2eid_file):
assert isinstance(entity_symbols._qid2eid, VocabularyTrie)
entity_symbols._qid2eid.dump(trie_qid2eid_file)
# write to file (M x hidden x size for each data point -- next step will deal with recovering original sentence
# indices for overflowing sentences)
merged_entity_emb_file = os.path.join(save_folder, "entity_embs_unmerged.mmap")
K = entity_symbols.max_candidates + (not config.data_config.train_in_candidates)
merged_storage_type = np.dtype(
[
("hidden_size", int),
("sent_idx", int),
("alias_list_pos", int),
("final_loss_pred", int),
("final_loss_prob", float),
("final_loss_cand_probs", float, K),
]
)
unmerged_storage_type = np.dtype(
np.load(emb_file_config, allow_pickle=True).tolist()
)
result_file = get_result_file(save_folder)
log_rank_0_debug(logger, f"Writing predictions to {result_file}...")
merge_subsentences(
num_processes=num_processes,
subset_sent_idx2num_mens=sent_idx2num_mens,
cache_folder=cache_dir,
to_save_file=merged_entity_emb_file,
to_save_storage=merged_storage_type,
to_read_file=unmerged_entity_emb_file,
to_read_storage=unmerged_storage_type,
)
write_data_labels(
num_processes=num_processes,
merged_entity_emb_file=merged_entity_emb_file,
merged_storage_type=merged_storage_type,
sent_idx2row=sent_idx2row,
cache_folder=cache_dir,
out_file=result_file,
entity_dump=entity_symbols,
train_in_candidates=config.data_config.train_in_candidates,
max_candidates=entity_symbols.max_candidates,
trie_candidate_map_folder=trie_candidate_map_folder,
trie_qid2eid_file=trie_qid2eid_file,
)
filt_emb_data = np.memmap(
merged_entity_emb_file, dtype=merged_storage_type, mode="r"
)
total_mentions_seen = len(filt_emb_data)
filt_emb_data = None
# Cleanup cache - sometimes the file in cache_dir is still open so we need to retry to delete it
try_rmtree(cache_dir)
log_rank_0_debug(
logger,
f"Wrote predictions to {result_file} with {total_mentions_seen} mentions",
)
return result_file, total_mentions_seen
def merge_subsentences(
num_processes,
subset_sent_idx2num_mens,
cache_folder,
to_save_file,
to_save_storage,
to_read_file,
to_read_storage,
):
"""
Merge and flatten sentence over sub-sentences.
Flatten all sentences back together over sub-sentences; removing the PAD
aliases from the data I.e., converts from sent_idx -> array of values to
(sent_idx, alias_idx) -> value with varying numbers of aliases per
sentence.
Args:
num_processes: number of processes
subset_sent_idx2num_mens: Dict of sentence index to number of mentions for this batch
cache_folder: cache directory
to_save_file: memmap file to save results to
to_save_storage: save file storage type
to_read_file: memmap file to read predictions from
to_read_storage: read file storage type
"""
# Compute sent idx to offset so we know where to fill in mentions
cur_offset = 0
sentidx2offset = {}
for k, v in subset_sent_idx2num_mens.items():
sentidx2offset[k] = cur_offset
cur_offset += v
# print("Sent Idx, Num Mens, Offset", k, v, cur_offset)
total_num_mentions = cur_offset
# print("TOTAL", total_num_mentions)
full_pred_data = np.memmap(to_read_file, dtype=to_read_storage, mode="r")
K = int(full_pred_data[0]["K"])
hidden_size = int(full_pred_data[0]["hidden_size"])
# print("TOTAL MENS", total_num_mentions)
filt_emb_data = np.memmap(
to_save_file, dtype=to_save_storage, mode="w+", shape=(total_num_mentions,)
)
filt_emb_data["hidden_size"] = hidden_size
filt_emb_data["sent_idx"][:] = -1
filt_emb_data["alias_list_pos"][:] = -1
all_ids = list(range(0, len(full_pred_data)))
start = time.time()
if num_processes == 1:
seen_ids = merge_subsentences_single(
K,
hidden_size,
all_ids,
filt_emb_data,
full_pred_data,
sentidx2offset,
)
else:
# Get trie for sentence start map
trie_folder = os.path.join(cache_folder, "bootleg_sent_idx2num_mens")
utils.ensure_dir(trie_folder)
trie_file = os.path.join(trie_folder, "sentidx.marisa")
utils.create_single_item_trie(sentidx2offset, out_file=trie_file)
# Chunk up data
chunk_size = int(np.ceil(len(full_pred_data) / num_processes))
row_idx_set_chunks = [
all_ids[ids : ids + chunk_size]
for ids in range(0, len(full_pred_data), chunk_size)
]
# Start pool
input_args = [[K, hidden_size, chunk] for chunk in row_idx_set_chunks]
log_rank_0_debug(
logger, f"Merging sentences together with {num_processes} processes"
)
pool = multiprocessing.Pool(
processes=num_processes,
initializer=merge_subsentences_initializer,
initargs=[
to_save_file,
to_save_storage,
to_read_file,
to_read_storage,
trie_file,
],
)
seen_ids = set()
for sent_ids_seen in pool.imap_unordered(
merge_subsentences_hlp, input_args, chunksize=1
):
for emb_id in sent_ids_seen:
assert (
emb_id not in seen_ids
), f"{emb_id} already seen, something went wrong with sub-sentences"
seen_ids.add(emb_id)
pool.close()
pool.join()
filt_emb_data = np.memmap(to_save_file, dtype=to_save_storage, mode="r")
# for i in range(len(filt_emb_data)):
# si = filt_emb_data[i]["sent_idx"]
# al_test = filt_emb_data[i]["alias_list_pos"]
# if si == -1 or al_test == -1:
# print("BAD", i, filt_emb_data[i])
# import pdb
#
# pdb.set_trace()
logging.debug(f"Saw {len(seen_ids)} sentences")
logging.debug(f"Time to merge sub-sentences {time.time() - start}s")
return
def merge_subsentences_initializer(
to_write_file, to_write_storage, to_read_file, to_read_storage, sentidx2offset_file
):
"""Merge subsentences initializer for multiprocessing.
Args:
to_write_file: file to write
to_write_storage: mmap storage type
to_read_file: file to read
to_read_storage: mmap storage type
sentidx2offset_file: sentence index to offset in mmap data
"""
global filt_emb_data_global
filt_emb_data_global = np.memmap(to_write_file, dtype=to_write_storage, mode="r+")
global full_pred_data_global
full_pred_data_global = np.memmap(to_read_file, dtype=to_read_storage, mode="r")
global sentidx2offset_marisa_global
sentidx2offset_marisa_global = utils.load_single_item_trie(sentidx2offset_file)
def merge_subsentences_hlp(args):
"""Merge subsentences multiprocessing subprocess helper."""
K, hidden_size, r_idx_set = args
return merge_subsentences_single(
K,
hidden_size,
r_idx_set,
filt_emb_data_global,
full_pred_data_global,
sentidx2offset_marisa_global,
)
def merge_subsentences_single(
K,
hidden_size,
r_idx_set,
filt_emb_data,
full_pred_data,
sentidx2offset,
):
"""
Merge subsentences single process.
Will flatted out the results from `full_pred_data` so each line of
`filt_emb_data` is one alias prediction.
Args:
K: number candidates
hidden_size: hidden size
r_idx_set: batch result index
filt_emb_data: mmap embedding file to write
full_pred_data: mmap result file to read
sentidx2offset: sentence to emb data offset
"""
seen_ids = set()
for r_idx in r_idx_set:
row = full_pred_data[r_idx]
# get corresponding row to start writing into condensed memory mapped file
sent_idx = str(row["sent_idx"])
if isinstance(sentidx2offset, dict):
sent_start_idx = sentidx2offset[sent_idx]
else:
# Get from Trie
sent_start_idx = sentidx2offset[sent_idx][0][0]
# print("R IDS", r_idx, row["sent_idx"], "START", sent_start_idx)
# for each VALID mention, need to write into original alias list pos in list
true_val = row["final_loss_true"]
alias_orig_pos = row["alias_list_pos"]
# bc we are are using the mentions which includes both true and false golds, true_val == -1 only for
# padded mentions or sub-sentence mentions
if true_val != -1:
# print(
# "INSIDE MERGE", "I", i, "SENT", sent_idx, "TRUE", true_val, "ALIAS ORIG POS", alias_orig_pos,
# "START SENT IDX", sent_start_idx, "EMB ID", sent_start_idx + alias_orig_pos
# )
# id in condensed embedding
emb_id = sent_start_idx + alias_orig_pos
assert (
emb_id not in seen_ids
), f"{emb_id} already seen, something went wrong with sub-sentences"
seen_ids.add(emb_id)
filt_emb_data["sent_idx"][emb_id] = sent_idx
filt_emb_data["alias_list_pos"][emb_id] = alias_orig_pos
filt_emb_data["final_loss_pred"][emb_id] = row["final_loss_pred"]
filt_emb_data["final_loss_prob"][emb_id] = row["final_loss_prob"]
filt_emb_data["final_loss_cand_probs"][emb_id] = row[
"final_loss_cand_probs"
]
return seen_ids
def get_sental2embid(merged_entity_emb_file, merged_storage_type):
"""Get sent_idx, alias_idx mapping to emb idx for quick lookup.
Args:
merged_entity_emb_file: memmap file after merge sentences
merged_storage_type: file storage type
Returns: Dict of f"{sent_idx}_{alias_idx}" -> index in merged_entity_emb_file
"""
filt_emb_data = np.memmap(
merged_entity_emb_file, dtype=merged_storage_type, mode="r"
)
sental2embid = {}
for i, row in tqdm(
enumerate(filt_emb_data),
total=len(filt_emb_data),
desc="Getting setnal2emb map",
):
sent_idx = row["sent_idx"]
alias_idx = row["alias_list_pos"]
assert (
sent_idx != -1 and alias_idx != -1
), f"{i} {row} Has Sent {sent_idx}, Al {alias_idx}"
# Keep as string for Marisa Tri later
sental2embid[f"{sent_idx}_{alias_idx}"] = i
return sental2embid
def write_data_labels(
num_processes,
merged_entity_emb_file,
merged_storage_type,
sent_idx2row,
cache_folder,
out_file,
entity_dump,
train_in_candidates,
max_candidates,
trie_candidate_map_folder=None,
trie_qid2eid_file=None,
):
"""Take the flattened data from merge_sentences and write out predictions.
Args:
num_processes: number of processes
merged_entity_emb_file: input memmap file after merge sentences
merged_storage_type: input file storage type
sent_idx2row: Dict of sentence idx to row relevant to this subbatch
cache_folder: folder to save temporary outputs
out_file: final output file for predictions
entity_dump: entity dump
train_in_candidates: whether NC entities are not in candidate lists
max_candidates: maximum number of candidates
trie_candidate_map_folder: folder where trie of alias->candidate map is stored for parallel proccessing
trie_qid2eid_file: file where trie of qid->eid map is stored for parallel proccessing
"""
st = time.time()
sental2embid = get_sental2embid(merged_entity_emb_file, merged_storage_type)
log_rank_0_debug(logger, f"Finished getting sentence map {time.time() - st}s")
total_input = len(sent_idx2row)
if num_processes == 1:
filt_emb_data = np.memmap(
merged_entity_emb_file, dtype=merged_storage_type, mode="r"
)
write_data_labels_single(
sentidx2row=sent_idx2row,
output_file=out_file,
filt_emb_data=filt_emb_data,
sental2embid=sental2embid,
alias_cand_map=entity_dump.get_alias2qids_dict(),
qid2eid=entity_dump.get_qid2eid_dict(),
train_in_cands=train_in_candidates,
max_cands=max_candidates,
)
else:
assert (
trie_candidate_map_folder is not None
), "trie_candidate_map_folder is None and you have parallel turned on"
assert (
trie_qid2eid_file is not None
), "trie_qid2eid_file is None and you have parallel turned on"
# Get trie of sentence map
trie_folder = os.path.join(cache_folder, "bootleg_sental2embid")
utils.ensure_dir(trie_folder)
trie_file = os.path.join(trie_folder, "sentidx.marisa")
utils.create_single_item_trie(sental2embid, out_file=trie_file)
# Chunk file for parallel writing
# We do not use TemporaryFolders as the temp dir may not have enough space for large files
create_ex_indir = os.path.join(cache_folder, "_bootleg_eval_temp_indir")
utils.ensure_dir(create_ex_indir)
create_ex_outdir = os.path.join(cache_folder, "_bootleg_eval_temp_outdir")
utils.ensure_dir(create_ex_outdir)
chunk_input = int(np.ceil(total_input / num_processes))
logger.debug(
f"Chunking up {total_input} lines into subfiles of size {chunk_input} lines"
)
# Chunk up dictionary of data for parallel processing
input_files = []
i = 0
cur_lines = 0
file_split = os.path.join(create_ex_indir, f"out{i}.jsonl")
open_file = open(file_split, "w")
for s_idx in sent_idx2row:
if cur_lines >= chunk_input:
open_file.close()
input_files.append(file_split)
cur_lines = 0
i += 1
file_split = os.path.join(create_ex_indir, f"out{i}.jsonl")
open_file = open(file_split, "w")
line = sent_idx2row[s_idx]
open_file.write(ujson.dumps(line, ensure_ascii=False) + "\n")
cur_lines += 1
open_file.close()
input_files.append(file_split)
# Generation input/output pairs
output_files = [
in_file_name.replace(create_ex_indir, create_ex_outdir)
for in_file_name in input_files
]
log_rank_0_debug(logger, "Done chunking files. Starting pool")
pool = multiprocessing.Pool(
processes=num_processes,
initializer=write_data_labels_initializer,
initargs=[
merged_entity_emb_file,
merged_storage_type,
trie_file,
train_in_candidates,
max_candidates,
trie_candidate_map_folder,
trie_qid2eid_file,
],
)
input_args = list(zip(input_files, output_files))
total = 0
for res in pool.imap(write_data_labels_hlp, input_args, chunksize=1):
total += 1
pool.close()
pool.join()
# Merge output files to final file
log_rank_0_debug(logger, "Merging output files")
with open(out_file, "wb") as outfile:
for filename in glob.glob(os.path.join(create_ex_outdir, "*")):
if filename == out_file:
# don't want to copy the output into the output
continue
with open(filename, "rb") as readfile:
shutil.copyfileobj(readfile, outfile)
def write_data_labels_initializer(
merged_entity_emb_file,
merged_storage_type,
sental2embid_file,
train_in_candidates,
max_cands,
trie_candidate_map_folder,
trie_qid2eid_file,
):
"""
Write data labels multiprocessing initializer.
Args:
merged_entity_emb_file: flattened embedding input file
merged_storage_type: mmap storage type
sental2embid_file: sentence, alias -> embedding id mapping
train_in_candidates: train in candidates flag
max_cands: max candidates
trie_candidate_map_folder: alias trie folder
trie_qid2eid_file: qid to eid trie file
"""
global filt_emb_data_global
filt_emb_data_global = np.memmap(
merged_entity_emb_file, dtype=merged_storage_type, mode="r"
)
global sental2embid_global
sental2embid_global = utils.load_single_item_trie(sental2embid_file)
global alias_cand_trie_global
alias_cand_trie_global = TwoLayerVocabularyScoreTrie(
load_dir=trie_candidate_map_folder
)
global qid2eid_global
qid2eid_global = VocabularyTrie(load_dir=trie_qid2eid_file)
global train_in_candidates_global
train_in_candidates_global = train_in_candidates
global max_cands_global
max_cands_global = max_cands
def write_data_labels_hlp(args):
"""Write data labels multiprocess helper function."""
input_file, output_file = args
s_idx2row = {}
with open(input_file) as in_f:
for line in in_f:
line = ujson.loads(line)
s_idx2row[str(line["sent_idx_unq"])] = line
return write_data_labels_single(
s_idx2row,
output_file,
filt_emb_data_global,
sental2embid_global,
alias_cand_trie_global,
qid2eid_global,
train_in_candidates_global,
max_cands_global,
)
def write_data_labels_single(
sentidx2row,
output_file,
filt_emb_data,
sental2embid,
alias_cand_map,
qid2eid,
train_in_cands,
max_cands,
):
"""Write data labels single subprocess function.
Will take the alias predictions and merge them back by sentence to be written out.
Args:
sentidx2row: sentence index to raw eval data row
output_file: output file
filt_emb_data: mmap embedding data (one prediction per row)
sental2embid: sentence index, alias index -> embedding row id
alias_cand_map: alias to candidate map
qid2eid: qid to entity id map
train_in_cands: training in candidates flag
max_cands: maximum candidates
"""
with open(output_file, "w") as f_out:
for sent_idx in sentidx2row:
line = sentidx2row[sent_idx]
aliases = line["aliases"]
char_spans = line["char_spans"]
assert sent_idx == str(line["sent_idx_unq"])
qids = []
ctx_emb_ids = []
entity_ids = []
probs = []
cands = []
cand_probs = []
entity_cands_qid = map_aliases_to_candidates(
train_in_cands, max_cands, alias_cand_map, aliases
)
# eid is entity id
entity_cands_eid = map_candidate_qids_to_eid(entity_cands_qid, qid2eid)
for al_idx, alias in enumerate(aliases):
sent_idx_key = f"{sent_idx}_{al_idx}"
assert (
sent_idx_key in sental2embid
), f"Dumped prediction data does not match data file. Can not find {sent_idx} - {al_idx}"
if isinstance(sental2embid, dict):
emb_idx = sental2embid[sent_idx_key]
else:
# Get from Trie
emb_idx = sental2embid[sent_idx_key][0][0]
# We will concatenate all contextualized embeddings at the end and need the row id to be offset here
ctx_emb_ids.append(emb_idx)
prob = filt_emb_data[emb_idx]["final_loss_prob"]
prob = prob if not math.isnan(prob) else None
cand_prob = strip_nan(filt_emb_data[emb_idx]["final_loss_cand_probs"])
pred_cand = filt_emb_data[emb_idx]["final_loss_pred"]
eid = entity_cands_eid[al_idx][pred_cand]
qid = entity_cands_qid[al_idx][pred_cand]
qids.append(qid)
probs.append(prob)
cands.append(list(entity_cands_qid[al_idx]))
cand_probs.append(list(cand_prob))
entity_ids.append(eid)
line["qids"] = qids
line["probs"] = probs
line["cands"] = cands
line["cand_probs"] = cand_probs
line["entity_ids"] = entity_ids
line["char_spans"] = char_spans
f_out.write(ujson.dumps(line, ensure_ascii=False) + "\n")
| bootleg-master | bootleg/utils/eval_utils.py |
"""Classes init."""
| bootleg-master | bootleg/utils/classes/__init__.py |
"""Dotted dict class."""
import keyword
import re
import string
import ujson
class DottedDict(dict):
"""
Dotted dictionary.
Override for the dict object to allow referencing of keys as attributes, i.e. dict.key.
"""
def __init__(self, *args, **kwargs):
"""Dotted dict initializer."""
for arg in args:
if isinstance(arg, dict):
self._parse_input_(arg)
elif isinstance(arg, list):
for k, v in arg:
self.__setitem__(k, v)
elif hasattr(arg, "__iter__"):
for k, v in list(arg):
self.__setitem__(k, v)
if kwargs:
self._parse_input_(kwargs)
def __delattr__(self, item):
"""Delete attr."""
self.__delitem__(item)
def __delitem__(self, key):
"""Delete item."""
super(DottedDict, self).__delitem__(key)
del self.__dict__[key]
def __getattr__(self, attr):
"""Get attr."""
try:
return self.__dict__[attr]
# Do this to match python default behavior
except KeyError:
raise AttributeError(attr)
def __getitem__(self, key):
"""Get item."""
return self.__dict__[key]
def __repr__(self):
"""Wrap the returned dict in DottedDict() on output."""
return "{0}({1})".format(
type(self).__name__, super(DottedDict, self).__repr__()
)
def __setattr__(self, key, value):
"""Set attr."""
# No need to run _is_valid_identifier since a syntax error is raised if invalid attr name
self.__setitem__(key, value)
def __setitem__(self, key, value):
"""Set item."""
try:
self._is_valid_identifier_(key)
except ValueError:
if not keyword.iskeyword(key):
key = self._make_safe_(key)
else:
raise ValueError('Key "{0}" is a reserved keyword.'.format(key))
super(DottedDict, self).__setitem__(key, value)
self.__dict__.update({key: value})
def _is_valid_identifier_(self, identifier):
"""Test the key name for valid identifier status as considered by the python lexer.
Also check that the key name is not a python keyword.
https://stackoverflow.com/questions/12700893/how-to-check-if-a-string-is-a-valid-python-identifier-including-keyword-check
"""
if re.match("[a-zA-Z_][a-zA-Z0-9_]*$", str(identifier)):
if not keyword.iskeyword(identifier):
return True
raise ValueError('Key "{0}" is not a valid identifier.'.format(identifier))
def _make_safe_(self, key):
"""Replace the space characters on the key with _ to make valid attrs."""
key = str(key)
allowed = string.ascii_letters + string.digits + "_" + "/"
# Replace spaces with _
if " " in key:
key = key.replace(" ", "_")
# Find invalid characters for use of key as attr
diff = set(key).difference(set(allowed))
# Replace invalid characters with _
if diff:
for char in diff:
key = key.replace(char, "_")
# Add _ if key begins with int
try:
int(key[0])
except ValueError:
pass
else:
key = "_{0}".format(key)
return key
def _parse_input_(self, input_item):
"""Parse the input item if dict into the dotted_dict constructor."""
for key, value in input_item.items():
if isinstance(value, dict):
value = DottedDict(**{str(k): v for k, v in value.items()})
if isinstance(value, list):
_list = []
for item in value:
if isinstance(item, dict):
_list.append(DottedDict(item))
else:
_list.append(item)
value = _list
self.__setitem__(key, value)
def copy(self):
"""Ensure copy object is DottedDict, not dict."""
return type(self)(self)
def to_dict(self):
"""Recursive conversion back to dict."""
out = dict(self)
for key, value in out.items():
if value is self:
out[key] = out
elif hasattr(value, "to_dict"):
out[key] = value.to_dict()
elif isinstance(value, list):
_list = []
for item in value:
if hasattr(item, "to_dict"):
_list.append(item.to_dict())
else:
_list.append(item)
out[key] = _list
return out
class PreserveKeysDottedDict(dict):
"""
Override auto correction of key names to safe attr names.
Can result in errors when using attr name resolution.
"""
def __init__(self, *args, **kwargs):
"""Preserve keys DottedDict initializer."""
for arg in args:
if isinstance(arg, dict):
self._parse_input_(arg)
elif isinstance(arg, list):
for k, v in arg:
self.__setitem__(k, v)
elif hasattr(arg, "__iter__"):
for k, v in list(arg):
self.__setitem__(k, v)
if kwargs:
self._parse_input_(kwargs)
def __delattr__(self, item):
"""Delete attr."""
self.__delitem__(item)
def __delitem__(self, key):
"""Delete item."""
super(PreserveKeysDottedDict, self).__delitem__(key)
del self.__dict__[key]
def __getattr__(self, attr):
"""Get attr."""
try:
return self.__dict__[attr]
# Do this to match python default behavior
except KeyError:
raise AttributeError(attr)
def __getitem__(self, key):
"""Get item."""
return self.__dict__[key]
def __repr__(self):
"""Wrap the returned dict in DottedDict() on output."""
return "{0}({1})".format(
type(self).__name__, super(PreserveKeysDottedDict, self).__repr__()
)
def __setattr__(self, key, value):
"""Set attr."""
self.__setitem__(key, value)
def __setitem__(self, key, value):
"""Set item."""
super(PreserveKeysDottedDict, self).__setitem__(key, value)
self.__dict__.update({key: value})
def _parse_input_(self, input_item):
"""Parse the input item if dict into the dotted_dict constructor."""
for key, value in input_item.items():
if isinstance(value, dict):
value = PreserveKeysDottedDict(**{str(k): v for k, v in value.items()})
if isinstance(value, list):
_list = []
for item in value:
if isinstance(item, dict):
_list.append(PreserveKeysDottedDict(item))
else:
_list.append(item)
value = _list
self.__setitem__(key, value)
def copy(self):
"""Ensure copy object is DottedDict, not dict."""
return type(self)(self)
def to_dict(self):
"""Recursive conversion back to dict."""
out = dict(self)
for key, value in out.items():
if value is self:
out[key] = out
elif hasattr(value, "to_dict"):
out[key] = value.to_dict()
elif isinstance(value, list):
_list = []
for item in value:
if hasattr(item, "to_dict"):
_list.append(item.to_dict())
else:
_list.append(item)
out[key] = _list
return out
def create_bool_dotted_dict(d_dict):
"""Create boolean Dotted Dict."""
if (type(d_dict) is DottedDict) or (type(d_dict) is dict):
d_dict = DottedDict(d_dict)
if type(d_dict) is str and is_json(d_dict):
d_dict = DottedDict(ujson.loads(d_dict))
if type(d_dict) is DottedDict:
for k in d_dict:
if d_dict[k] == "True":
d_dict[k] = True
elif d_dict[k] == "False":
d_dict[k] = False
elif (
(type(d_dict[k]) is DottedDict)
or (type(d_dict[k]) is dict)
or (type(d_dict[k]) is str and is_json(d_dict[k]))
):
d_dict[k] = create_bool_dotted_dict(d_dict[k])
elif type(d_dict[k]) is list:
for i in range(len(d_dict[k])):
d_dict[k][i] = create_bool_dotted_dict(d_dict[k][i])
return d_dict
def is_number(s):
"""Return True is string is a number."""
try:
float(s)
return True
except ValueError:
return False
def is_json(value):
"""Return true if is json."""
# ujson is weird in that a string of a number is a dictionary; we don't want this
if is_number(value):
return False
try:
ujson.loads(value)
except ValueError:
return False
return True
| bootleg-master | bootleg/utils/classes/dotted_dict.py |
"""Nested vocab tries."""
import itertools
import logging
import os
from pathlib import Path
from typing import Any, Callable, Dict, List, Set, Tuple, Union
import marisa_trie
import numpy as np
import ujson
from numba import njit
from tqdm.auto import tqdm
from bootleg.utils.utils import dump_json_file, load_json_file
numba_logger = logging.getLogger("numba")
numba_logger.setLevel(logging.WARNING)
def flatten(arr):
"""Flatten array."""
return [item for sublist in arr for item in sublist]
@njit
def index(array, item):
"""Retrun index of imte in array."""
for idx, val in np.ndenumerate(array):
if val == item:
# ndenumerate retuns tuple of index; we have 1D array and only care about first value
return idx[0]
return None
def get_cand_with_score(
max_value: int, value: List[Tuple[str, int]], vocabulary: marisa_trie
):
"""Get the keys with score numerical values as list of ints."""
assert type(value) is list
if len(value) > 0:
assert all(type(v[0]) is str for v in value)
assert all((type(v[1]) is float) or (type(v[1]) is int) for v in value)
new_value = flatten([[vocabulary[p[0]], p[1]] for p in value])[: (2 * max_value)]
assert -1 not in new_value
overflowed = len(new_value) == (2 * max_value)
new_value.extend([-1] * (2 * max_value - len(new_value)))
return tuple(new_value), overflowed
def get_key_value_pair(
max_value: int,
value: List[Tuple[str, int]],
key_vocabulary: marisa_trie,
value_vocabulary: marisa_trie,
):
"""Get the key value pairs as list of ints."""
new_value = flatten(
[[key_vocabulary[p[0]], value_vocabulary[p[1]]] for p in value]
)[: (2 * max_value)]
assert -1 not in new_value
overflowed = len(new_value) == (2 * max_value)
new_value.extend([-1] * (2 * max_value - len(new_value)))
return tuple(new_value), overflowed
def inverse_qid_cand_with_score(value: List[int], itos: Callable[[int], str]):
"""Return entity candidate and prob score from numerical values."""
assert len(value) % 2 == 0
new_value = []
for i in range(0, len(value), 2):
# -1 values are only added at the end as padding
if value[i] == -1:
break
new_value.append([itos(value[i]), value[i + 1]])
return new_value
def inverse_key_value_pair(
value: List[int], key_itos: Callable[[int], str], value_itos: Callable[[int], str]
):
"""Return list of key value pairs from numerical values."""
assert len(value) % 2 == 0
new_value = []
for i in range(0, len(value), 2):
# -1 values are only added at the end as padding
if value[i] == -1:
break
new_value.append([key_itos(value[i]), value_itos(value[i + 1])])
return new_value
class VocabularyTrie:
"""String (vocabulary) to int trie.
This is basically a marisa trie except that we maintain the original indexes given in the input dict.
This helps keep indexes the same even if underlying trie is different.
"""
def __init__(
self,
load_dir: str = None,
input_dict: Dict[str, int] = None,
) -> None:
"""Vocab trie initializer."""
# One long integer
self._get_fmt_string = lambda x: "<'l'"
if load_dir is not None:
self.load(load_dir)
self._loaded_from_dir = load_dir
else:
self._stoi: marisa_trie = marisa_trie.Trie(input_dict.keys())
# Array from internal trie id to external id from dict
self._itoexti: np.array = (np.ones(len(input_dict)) * -1).astype(int)
# Keep track external ids to prevent duplicates
extis: set = set()
self._max_id = next(iter(input_dict.values()))
for k, exti in input_dict.items():
i = self._stoi[k]
self._itoexti[i] = exti
self._max_id = max(self._max_id, exti)
if exti in extis:
raise ValueError(f"All ids must be unique. {exti} is a duplicate.")
self._loaded_from_dir = None
def dump(self, save_dir):
"""Dump."""
save_dir = Path(save_dir)
save_dir.mkdir(parents=True, exist_ok=True)
dump_json_file(
filename=(save_dir / "config.json"),
contents={"max_id": self._max_id},
)
self._stoi.save(str(save_dir / "vocabulary_trie.marisa"))
np.save(str(save_dir / "itoexti.npy"), self._itoexti)
def load(self, load_dir):
"""Load."""
load_dir = Path(load_dir)
self._max_id = ujson.load(open(load_dir / "config.json"))["max_id"]
self._stoi = marisa_trie.Trie().mmap(str(load_dir / "vocabulary_trie.marisa"))
self._itoexti = np.load(str(load_dir / "itoexti.npy")).astype(int)
def to_dict(self):
"""Convert to dictionary."""
res_dict = {}
for key in self.keys():
res_dict[key] = self.get_value(key)
return res_dict
def get_value(self, key):
"""Get value for key."""
i_value = self._stoi[key]
ext_value = int(self._itoexti[i_value])
return ext_value
def get_key(self, value):
"""Get key for value."""
i_value = index(self._itoexti, value)
if i_value is None:
raise KeyError(f"{value} not in Trie")
return self._stoi.restore_key(i_value)
def keys(self):
"""Get keys."""
return self._stoi.keys()
def is_key_in_trie(self, key):
"""Return if key in trie."""
return key in self._stoi
def is_value_in_trie(self, value):
"""Return if value in trie."""
try:
self.get_key(value)
return True
except KeyError:
return False
def get_max_id(self):
"""Get max id."""
return self._max_id
def __getitem__(self, item):
"""Get item."""
return self.get_value(item)
def __len__(self):
"""Get length."""
return len(self.keys())
def __contains__(self, key):
"""Contain key or not."""
return self.is_key_in_trie(key)
class TwoLayerVocabularyScoreTrie:
"""TwoLayerVocabularyScoreTrie.
This creates a record trie from a string to a list of string candidates. These candidates are either a single
list of string items. Or a list of pairs [string item, float score].
"""
def __init__(
self,
load_dir: str = None,
input_dict: Dict[str, Any] = None,
vocabulary: Union[Dict[str, Any], Set[str]] = None,
max_value: int = None,
) -> None:
"""Paired vocab initializer."""
self._get_fmt_string = lambda x: f"<{'lf'*x}"
if load_dir is not None:
self.load(load_dir)
self._loaded_from_dir = load_dir
else:
if max_value is None:
raise ValueError("max_value cannot be None when creating trie")
self._max_value = max_value
if isinstance(vocabulary, dict):
vocabulary = set(vocabulary.keys())
self._stoi: marisa_trie = marisa_trie.Trie(vocabulary)
self._itos: Callable[[int], str] = lambda x: self._stoi.restore_key(x)
self._record_trie = self.build_trie(input_dict, self._max_value)
self._loaded_from_dir = None
def dump(self, save_dir):
"""Dump."""
# memmapped files behave badly if you try to overwrite them in memory,
# which is what we'd be doing if load_dir == save_dir
if self._loaded_from_dir is None or self._loaded_from_dir != save_dir:
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
dump_json_file(
filename=os.path.join(save_dir, "max_value.json"),
contents=self._max_value,
)
self._stoi.save(os.path.join(save_dir, "vocabulary_trie.marisa"))
self._record_trie.save(os.path.join(save_dir, "record_trie.marisa"))
def load(self, load_dir):
"""Load."""
self._max_value = load_json_file(
filename=os.path.join(load_dir, "max_value.json")
)
self._stoi = marisa_trie.Trie().mmap(
os.path.join(load_dir, "vocabulary_trie.marisa")
)
self._itos = lambda x: self._stoi.restore_key(x)
self._record_trie = marisa_trie.RecordTrie(
self._get_fmt_string(self._max_value)
).mmap(os.path.join(load_dir, "record_trie.marisa"))
def to_dict(self, keep_score=True):
"""Convert to dictionary."""
res_dict = {}
for key in self.keys():
res_dict[key] = self.get_value(key, keep_score)
return res_dict
def build_trie(self, input_dict: Dict[str, Any], max_value: int):
"""Build trie."""
all_values = []
all_keys = sorted(list(input_dict.keys()))
total_overflow = 0
for key in tqdm(all_keys, desc="Creating trie"):
# Extract the QID candidate
cand_list = input_dict[key]
# If the scores are not in the candidate list, set them as default 0.0
if len(cand_list) > 0 and not isinstance(cand_list[0], list):
cand_list = [[c, 0.0] for c in cand_list]
new_value, overflow = get_cand_with_score(
max_value=max_value, value=cand_list, vocabulary=self._stoi
)
total_overflow += overflow
all_values.append(new_value)
trie = marisa_trie.RecordTrie(
self._get_fmt_string(max_value), zip(all_keys, all_values)
)
print(
f"There were {total_overflow/len(all_keys)}% of items that lost information because max_connections"
f" was too small."
)
return trie
def get_value(self, key, keep_score=True):
"""Get value for key."""
record_trie = self._record_trie
assert key in record_trie
value = record_trie[key]
# Record trie allows keys to have multiple values and returns a list of values for each key.
# As we make the value for each key a list already (to control order/not have to sort again),
# we need to assert there is only a single value
assert len(value) == 1
value = value[0]
return_value = inverse_qid_cand_with_score(value=value, itos=self._itos)
if not keep_score:
return_value = [x[0] for x in return_value]
assert len(return_value) <= self._max_value
return return_value
def keys(self):
"""Get keys."""
return self._record_trie.keys()
def vocab_keys(self):
"""Get vocab keys."""
return self._stoi.keys()
def is_key_in_trie(self, key):
"""Return if key in trie."""
return key in self._record_trie
class ThreeLayerVocabularyTrie:
"""ThreeLayerVocabularyTrie.
This creates a dict from query -> key -> list of values but
saves as trie with query -> flatten lower level dict.
Note that max_value is the maximum number of values for each possible key.
"""
def __init__(
self,
load_dir: str = None,
input_dict: Dict[str, Any] = None,
key_vocabulary: Union[Dict[str, Any], Set[str]] = None,
value_vocabulary: Union[Dict[str, Any], Set[str]] = None,
max_value: int = None,
) -> None:
"""Doct vocab initializer."""
self._get_fmt_string = lambda x: f"<{'ll'*x}"
if load_dir is not None:
self.load(load_dir)
self._loaded_from_dir = load_dir
else:
if max_value is None:
raise ValueError("max_value cannot be None when creating trie")
if isinstance(key_vocabulary, dict):
key_vocabulary = set(key_vocabulary.keys())
if isinstance(value_vocabulary, dict):
value_vocabulary = set(value_vocabulary.keys())
self._max_value = (
max_value * 2
) # Add a buffer to try to keep all connections - it's imperfect
self._key_stoi: marisa_trie = marisa_trie.Trie(key_vocabulary)
self._key_itos: Callable[[int], str] = lambda x: self._key_stoi.restore_key(
x
)
self._value_stoi: marisa_trie = marisa_trie.Trie(value_vocabulary)
self._value_itos: Callable[
[int], str
] = lambda x: self._value_stoi.restore_key(x)
self._record_trie = self.build_trie(input_dict, self._max_value)
self._loaded_from_dir = None
def dump(self, save_dir):
"""Dump."""
# memmapped files bahve badly if you try to overwrite them in memory,
# which is what we'd be doing if load_dir == save_dir
if self._loaded_from_dir is None or self._loaded_from_dir != save_dir:
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
dump_json_file(
filename=os.path.join(save_dir, "max_value.json"),
contents=self._max_value,
)
self._key_stoi.save(os.path.join(save_dir, "key_vocabulary_trie.marisa"))
self._value_stoi.save(
os.path.join(save_dir, "value_vocabulary_trie.marisa")
)
self._record_trie.save(os.path.join(save_dir, "record_trie.marisa"))
def load(self, load_dir):
"""Load."""
self._max_value = load_json_file(
filename=os.path.join(load_dir, "max_value.json")
)
self._key_stoi = marisa_trie.Trie().mmap(
os.path.join(load_dir, "key_vocabulary_trie.marisa")
)
self._key_itos = lambda x: self._key_stoi.restore_key(x)
self._value_stoi = marisa_trie.Trie().mmap(
os.path.join(load_dir, "value_vocabulary_trie.marisa")
)
self._value_itos = lambda x: self._value_stoi.restore_key(x)
self._record_trie = marisa_trie.RecordTrie(
self._get_fmt_string(self._max_value)
).mmap(os.path.join(load_dir, "record_trie.marisa"))
def to_dict(self, keep_score=True):
"""Convert to dictionary."""
res_dict = {}
for key in self.keys():
res_dict[key] = self.get_value(key)
return res_dict
def build_trie(self, input_dict: Dict[str, Any], max_value: int):
"""Build trie."""
all_values = []
all_keys = sorted(list(input_dict.keys()))
total_overflow = 0
from tqdm.auto import tqdm
for key in tqdm(all_keys, desc="Prepping trie data"):
# Extract the QID candidate
cand_list = [
[key2, val]
for key2, values in input_dict[key].items()
for val in values
]
# If the scores are not in the candidate list, set them as default 0.0
new_value, overflow = get_key_value_pair(
max_value=max_value,
value=cand_list,
key_vocabulary=self._key_stoi,
value_vocabulary=self._value_stoi,
)
total_overflow += overflow
all_values.append(new_value)
print(
f"Creating trie with {len(all_keys)} values. This can take a few minutes."
)
trie = marisa_trie.RecordTrie(
self._get_fmt_string(max_value), zip(all_keys, all_values)
)
print(
f"There were {total_overflow/len(all_keys)}% of items that lost information because max_connections"
f" was too small."
)
return trie
def get_value(self, key):
"""Get value for query as dict of key -> values."""
assert key in self._record_trie
flattened_value = self._record_trie[key]
# Record trie allows keys to have multiple values and returns a list of values for each key.
# As we make the value for each key a list already (to control order/not have to sort again),
# we need to assert there is only a single value
assert len(flattened_value) == 1
flattened_value = flattened_value[0]
flattened_return_value = inverse_key_value_pair(
value=flattened_value, key_itos=self._key_itos, value_itos=self._value_itos
)
assert len(flattened_return_value) <= self._max_value
return_dict = {}
for k, grped_v in itertools.groupby(flattened_return_value, key=lambda x: x[0]):
return_dict[k] = list(map(lambda x: x[1], grped_v))
return return_dict
def keys(self):
"""Get keys."""
return self._record_trie.keys()
def key_vocab_keys(self):
"""Get key vocab keys."""
return self._key_stoi.keys()
def value_vocab_keys(self):
"""Get value vocab keys."""
return self._value_stoi.keys()
def is_key_in_trie(self, key):
"""Return if key in trie."""
return key in self._record_trie
| bootleg-master | bootleg/utils/classes/nested_vocab_tries.py |
"""
JSON with comments class.
An example of how to remove comments and trailing commas from JSON before
parsing. You only need the two functions below, `remove_comments()` and
`remove_trailing_commas()` to accomplish this. This script serves as an
example of how to use them but feel free to just copy & paste them into your
own code/projects. Usage:: json_cleaner.py some_file.json Alternatively, you
can pipe JSON into this script and it'll clean it up:: cat some_file.json |
json_cleaner.py Why would you do this? So you can have human-generated .json
files (say, for configuration) that include comments and, really, who wants to
deal with catching all those trailing commas that might be present? Here's an
example of a file that will be successfully cleaned up and JSON-parseable:
.. code-block:: javascript
{
// A comment! You normally can't put these in JSON
"testing": {
"foo": "bar", // <-- A trailing comma! No worries.
}, // <-- Another one!
/*
This style of comments will also be safely removed before parsing
*/
}
FYI: This script will also pretty-print the JSON after it's cleaned up (if
using it from the command line) with an indentation level of 4 (that is, four
spaces).
"""
__version__ = "1.0.0"
__version_info__ = (1, 0, 0)
__license__ = "Unlicense"
__author__ = "Dan McDougall <[email protected]>"
import re
def remove_comments(json_like):
r"""Remove C-style comments from *json_like* and returns the result.
Example::
>>> test_json = '''\
{
"foo": "bar", // This is a single-line comment
"baz": "blah" /* Multi-line
Comment */
}'''
>>> remove_comments('{"foo":"bar","baz":"blah",}')
'{\n "foo":"bar",\n "baz":"blah"\n}'
"""
comments_re = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE,
)
def replacer(match):
s = match.group(0)
if s[0] == "/":
return ""
return s
return comments_re.sub(replacer, json_like)
def remove_trailing_commas(json_like):
"""Remove trailing commas from *json_like* and returns the result.
Example::
>>> remove_trailing_commas('{"foo":"bar","baz":["blah",],}')
'{"foo":"bar","baz":["blah"]}'
"""
trailing_object_commas_re = re.compile(
r'(,)\s*}(?=([^"\\]*(\\.|"([^"\\]*\\.)*[^"\\]*"))*[^"]*$)'
)
trailing_array_commas_re = re.compile(
r'(,)\s*\](?=([^"\\]*(\\.|"([^"\\]*\\.)*[^"\\]*"))*[^"]*$)'
)
# Fix objects {} first
objects_fixed = trailing_object_commas_re.sub("}", json_like)
# Now fix arrays/lists [] and return the result
return trailing_array_commas_re.sub("]", objects_fixed)
| bootleg-master | bootleg/utils/classes/comment_json.py |
"""Emmental dataset and dataloader."""
import logging
from typing import Any, Dict, Optional, Tuple, Union
from emmental import EmmentalDataset
from torch import Tensor
logger = logging.getLogger(__name__)
class RangedEmmentalDataset(EmmentalDataset):
"""
RangedEmmentalDataset dataset.
An advanced dataset class to handle that the input data contains multiple fields
and the output data contains multiple label sets.
Args:
name: The name of the dataset.
X_dict: The feature dict where key is the feature name and value is the
feature.
Y_dict: The label dict where key is the label name and value is
the label, defaults to None.
uid: The unique id key in the X_dict, defaults to None.
data_range: The range of data to select.
"""
def __init__(
self,
name: str,
X_dict: Dict[str, Any],
Y_dict: Optional[Dict[str, Tensor]] = None,
uid: Optional[str] = None,
data_range: Optional[list] = None,
) -> None:
"""Initialize RangedEmmentalDataset."""
super().__init__(name, X_dict, Y_dict, uid)
if data_range is not None:
self.data_range = data_range
else:
self.data_range = list(range(len(next(iter(self.X_dict.values())))))
def __getitem__(
self, index: int
) -> Union[Tuple[Dict[str, Any], Dict[str, Tensor]], Dict[str, Any]]:
"""Get item by index after taking range into account.
Args:
index: The index of the item.
Returns:
Tuple of x_dict and y_dict
"""
return super().__getitem__(self.data_range[index])
def __len__(self) -> int:
"""Total number of items in the dataset."""
return len(self.data_range)
| bootleg-master | bootleg/utils/classes/emmental_data.py |
"""
Bootleg parser utils.
Parses a Booleg input config into a DottedDict of config values (with
defaults filled in) for running a model.
"""
import argparse
import fileinput
import os
import ujson
import bootleg.utils.classes.comment_json as comment_json
from bootleg.utils.classes.dotted_dict import DottedDict, create_bool_dotted_dict
from bootleg.utils.parser.bootleg_args import config_args
from bootleg.utils.parser.emm_parse_args import (
parse_args as emm_parse_args,
parse_args_to_config as emm_parse_args_to_config,
)
from bootleg.utils.utils import load_yaml_file
def or_none(default):
"""Return or None function."""
def func(x):
# Convert "none" to proper None object
if x.lower() == "none":
return None
# If default is None (and x is not None), return x without conversion as str
elif default is None:
return str(x)
# Treat bools separately as bool("False") is true
elif isinstance(default, bool):
if x.lower() == "false":
return False
return True
# Otherwise, default has non-None type; convert x to that type
else:
return type(default)(x)
return func
def is_number(s):
"""Return True is string is a number."""
try:
float(s)
return True
except ValueError:
return False
def is_json(value):
"""Return True if json."""
# ujson is weird in that a string of a number is a dictionary; we don't want this
if is_number(value):
return False
try:
ujson.loads(value)
except ValueError:
return False
return True
def recursive_keys(dictionary):
"""Recursively yields all keys of dict."""
for key, value in dictionary.items():
if type(value) is dict:
yield key
yield from recursive_keys(value)
else:
yield key
def merge_configs(config_l, config_r, new_config=None):
"""Merge two dotted dict configs."""
if new_config is None:
new_config = {}
for k in config_l:
# If unique to config_l or the same in both configs, add
if k not in config_r or config_l[k] == config_r[k]:
new_config[k] = config_l[k]
# If not unique and different, then they must be dictionaries (that we can recursively merge)
else:
assert type(config_l[k]) in [dict, DottedDict] and type(config_r[k]) in [
dict,
DottedDict,
], f"You have two conflicting values for key {k}: {config_l[k]} vs {config_r[k]}"
new_config[k] = merge_configs(config_l[k], config_r[k])
for k in config_r:
# If unique to config_r or the same in both configs, add
if k not in config_l or config_l[k] == config_r[k]:
new_config[k] = config_r[k]
return new_config
def add_nested_flags_from_config(parser, config_dict, parser_hierarchy, prefix):
"""
Add flags from config file, keeping the hierarchy the same.
When a lower level is needed, parser.add_argument_group is called.
Note, we append the parent key to the --param option (via prefix parameter).
Args:
parser: arg parser to add options to
config_dict: raw config dictionary
parser_hierarchy: Dict to add parser hierarhcy to
prefix: prefix to add to arg parser
"""
for param in config_dict:
if isinstance(config_dict[param], dict):
parser_hierarchy[param] = {}
temp = parser.add_argument_group(f"Bootleg specific {param.split('_')[0]}")
add_nested_flags_from_config(
temp, config_dict[param], parser_hierarchy[param], f"{prefix}{param}."
)
else:
default, description = config_dict[param]
try:
if isinstance(default, str) and is_json(default):
parser.add_argument(
f"--{prefix}{param}",
type=ujson.loads,
default=default,
help=description,
)
elif isinstance(default, list):
if len(default) > 0:
# pass a list as argument
parser.add_argument(
f"--{prefix}{param}",
action="append",
type=type(default[0]),
default=default,
help=description,
)
else:
parser.add_argument(
f"--{prefix}{param}",
action="append",
default=default,
help=description,
)
parser_hierarchy["_global"] = parser
else:
# pass
parser.add_argument(
f"--{prefix}{param}",
type=or_none(default),
default=default,
help=description,
)
parser_hierarchy["_global"] = parser
except argparse.ArgumentError:
print(
f"Could not add flag for param {param} because it was already present."
)
return
def flatten_nested_args_for_parser(args, new_args, groups, prefix):
"""Flatten all parameters to be passed as a single list to arg parse."""
for key in args:
if isinstance(args[key], dict):
if key in groups:
new_args = flatten_nested_args_for_parser(
args[key], new_args, groups, f"{prefix}{key}."
)
else:
new_args.append(f"--{prefix}{key}")
# print("HERE2", vars(args))
if isinstance(args, dict):
new_args.append(f"{ujson.dumps(args[key])}")
else:
new_args.append(f"{ujson.dumps(vars(args)[key])}")
elif isinstance(args[key], list):
for v in args[key]:
new_args.append(f"--{prefix}{key}")
if isinstance(v, dict):
new_args.append(f"{ujson.dumps(v)}")
else:
new_args.append(f"{v}")
else:
new_args.append(f"--{prefix}{key}")
new_args.append(f"{args[key]}")
return new_args
def reconstructed_nested_args(args, names, parser_hierarchy, prefix):
"""Reconstruct the arguments and pass them to the necessary subparsers."""
for key, sub_parser in parser_hierarchy.items():
if isinstance(sub_parser, dict):
names[key] = {}
reconstructed_nested_args(args, names[key], sub_parser, f"{prefix}{key}.")
else:
sub_options = [action.dest for action in sub_parser._group_actions]
sub_names = {
name: value
for (name, value) in args._get_kwargs()
if name in sub_options
}
temp = argparse.Namespace(**sub_names)
# remove the prefix from the key
for k, v in temp.__dict__.items():
names[k.replace(f"{prefix}", "")] = v
return
def load_commented_json_file(file):
"""Load commented json file."""
json_out = ""
for line in fileinput.input(file): # Read it all in
json_out += line
almost_json = comment_json.remove_comments(json_out) # Remove comments
proper_json = comment_json.remove_trailing_commas(
almost_json
) # Remove trailing commas
validated = ujson.loads(proper_json) # We now have parseable JSON!
return validated
def get_boot_config(config, parser_hierarchy=None, parser=None, unknown=None):
"""
Return a parsed Bootleg config from config.
Config can be a path to a config file or an already loaded dictionary.
The high level work flow
1. Reads Bootleg default config (config_args) and addes params to a arg parser,
flattening all hierarchical values into "." values
E.g., data_config -> word_embeddings -> layers becomes --data_config.word_embedding.layers
2. Flattens the given config values into the "." format
3. Adds any unknown values from the first arg parser that parses the config script.
Allows the user to add --data_config.word_embedding.layers to command line that overwrite values in file
4. Parses the flattened args w.r.t the arg parser
5. Reconstruct the args back into their hierarchical form
Args:
config: model specific config
parser_hierarchy: Dict of hierarchy of config (or None)
parser: arg parser (or None)
unknown: unknown arg values passed from command line to be added to config and overwrite values in file
"""
if unknown is None:
unknown = []
if parser_hierarchy is None:
parser_hierarchy = {}
if parser is None:
parser = argparse.ArgumentParser()
add_nested_flags_from_config(parser, config_args, parser_hierarchy, prefix="")
if type(config) is str:
assert os.path.splitext(config)[1] in [
".json",
".yaml",
], "We only accept json or yaml ending for configs"
if os.path.splitext(config)[1] == ".json":
params = load_commented_json_file(config)
else:
params = load_yaml_file(config)
else:
assert (
type(config) is dict
), "We only support loading configs that are paths to json/yaml files or preloaded configs."
params = config
all_keys = list(recursive_keys(parser_hierarchy))
new_params = flatten_nested_args_for_parser(params, [], groups=all_keys, prefix="")
# update with new args
# unknown must have ["--arg1", "value1", "--arg2", "value2"] as we don't have any action_true args
assert len(unknown) % 2 == 0
assert all(
unknown[idx].startswith(("-", "--")) for idx in range(0, len(unknown), 2)
)
for idx in range(1, len(unknown), 2):
# allow passing -1 for emmental.device argument
assert not unknown[idx].startswith(("-", "--")) or (
unknown[idx - 1] == "--emmental.device" and unknown[idx] == "-1"
)
for idx in range(0, len(unknown), 2):
arg = unknown[idx]
# If override one you already have in json
if arg in new_params:
idx2 = new_params.index(arg)
new_params[idx2 : idx2 + 2] = unknown[idx : idx + 2]
# If override one that is in bootleg_args.py by not in json
else:
new_params.extend(unknown[idx : idx + 2])
args = parser.parse_args(new_params)
top_names = {}
reconstructed_nested_args(args, top_names, parser_hierarchy, prefix="")
# final_args = argparse.Namespace(**top_names)
final_args = create_bool_dotted_dict(top_names)
# turn_to_dotdicts(final_args)
return final_args
def parse_boot_and_emm_args(config_script, unknown=None):
"""
Merge the Emmental config with the Bootleg config.
As we have an emmental: ... level in our config for emmental commands,
we need to parse those with the Emmental parser and then merge the Bootleg only config values
with the Emmental ones.
Args:
config_script: config script for Bootleg and Emmental args
unknown: unknown arg values passed from command line to overwrite file values
Returns: parsed merged Bootleg and Emmental config
"""
if unknown is None:
unknown = []
config_parser = argparse.ArgumentParser(
description="Bootleg Config",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Modified parse_args to have 'emmental.group' prefixes. This represents a hierarchy in our parser
config_parser, parser_hierarchy = emm_parse_args(parser=config_parser)
# Add Bootleg args and parse
all_args = get_boot_config(config_script, parser_hierarchy, config_parser, unknown)
# These have emmental -> config group -> arg structure for emmental.
# Must remove that hierarchy to converte to internal Emmental hierarchy
emm_args = {}
for k, v in all_args["emmental"].items():
emm_args[k] = v
del all_args["emmental"]
# create and add Emmental hierarchy
config = emm_parse_args_to_config(create_bool_dotted_dict(emm_args))
# Merge configs back (merge workds on dicts so must convert to dict first)
config = create_bool_dotted_dict(merge_configs(all_args, config))
return config
| bootleg-master | bootleg/utils/parser/parser_utils.py |
"""Parser init."""
| bootleg-master | bootleg/utils/parser/__init__.py |
"""Overrides the Emmental parse_args."""
import argparse
from argparse import ArgumentParser
from typing import Any, Dict, Optional, Tuple
from emmental.utils.utils import (
nullable_float,
nullable_int,
nullable_string,
str2bool,
str2dict,
)
from bootleg.utils.classes.dotted_dict import DottedDict, create_bool_dotted_dict
def parse_args(parser: Optional[ArgumentParser] = None) -> Tuple[ArgumentParser, Dict]:
"""Parse args.
Overrides the default Emmental parser to add the "emmental."
level to the parser so we can parse it correctly with the Bootleg config.
Args:
parser: Argument parser object, defaults to None.
Returns:
The updated argument parser object.
"""
if parser is None:
parser = argparse.ArgumentParser(
"Emmental configuration",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_hierarchy = {"emmental": {}}
# Load meta configuration
meta_config = parser.add_argument_group("Meta configuration")
meta_config.add_argument(
"--emmental.seed",
type=nullable_int,
default=1234,
help="Random seed for all numpy/torch/cuda operations in model and learning",
)
meta_config.add_argument(
"--emmental.verbose",
type=str2bool,
default=True,
help="Whether to print the log information",
)
meta_config.add_argument(
"--emmental.log_path",
type=str,
default="logs",
help="Directory to save running log",
)
meta_config.add_argument(
"--emmental.use_exact_log_path",
type=str2bool,
default=False,
help="Whether to use the exact log directory",
)
parser_hierarchy["emmental"]["_global_meta"] = meta_config
# Load data configuration
data_config = parser.add_argument_group("Data configuration")
data_config.add_argument(
"--emmental.min_data_len", type=int, default=0, help="Minimal data length"
)
data_config.add_argument(
"--emmental.max_data_len",
type=int,
default=0,
help="Maximal data length (0 for no max_len)",
)
parser_hierarchy["emmental"]["_global_data"] = data_config
# Load model configuration
model_config = parser.add_argument_group("Model configuration")
model_config.add_argument(
"--emmental.model_path",
type=nullable_string,
default=None,
help="Path to pretrained model",
)
model_config.add_argument(
"--emmental.device",
type=int,
default=0,
help="Which device to use (-1 for cpu or gpu id (e.g., 0 for cuda:0))",
)
model_config.add_argument(
"--emmental.dataparallel",
type=str2bool,
default=False,
help="Whether to use dataparallel or not",
)
model_config.add_argument(
"--emmental.distributed_backend",
type=str,
default="nccl",
choices=["nccl", "gloo"],
help="Which backend to use for distributed training.",
)
parser_hierarchy["emmental"]["_global_model"] = model_config
# Learning configuration
learner_config = parser.add_argument_group("Learning configuration")
learner_config.add_argument(
"--emmental.optimizer_path",
type=nullable_string,
default=None,
help="Path to optimizer state",
)
learner_config.add_argument(
"--emmental.scheduler_path",
type=nullable_string,
default=None,
help="Path to lr scheduler state",
)
learner_config.add_argument(
"--emmental.fp16",
type=str2bool,
default=False,
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex)"
"instead of 32-bit",
)
learner_config.add_argument(
"--emmental.fp16_opt_level",
type=str,
default="O1",
help="Apex AMP optimization level selected in ['O0', 'O1', 'O2', 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
learner_config.add_argument(
"--emmental.local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus",
)
learner_config.add_argument(
"--emmental.epochs_learned", type=int, default=0, help="Learning epochs learned"
)
learner_config.add_argument(
"--emmental.n_epochs",
type=int,
default=1,
help="Total number of learning epochs",
)
learner_config.add_argument(
"--emmental.steps_learned", type=int, default=0, help="Learning steps learned"
)
learner_config.add_argument(
"--emmental.n_steps",
type=int,
default=None,
help="Total number of learning steps",
)
learner_config.add_argument(
"--emmental.skip_learned_data",
type=str2bool,
default=False,
help="Iterate through dataloader when steps or epochs learned is true",
)
learner_config.add_argument(
"--emmental.train_split",
nargs="+",
type=str,
default=["train"],
help="The split for training",
)
learner_config.add_argument(
"--emmental.valid_split",
nargs="+",
type=str,
default=["dev"],
help="The split for validation",
)
learner_config.add_argument(
"--emmental.test_split",
nargs="+",
type=str,
default=["test"],
help="The split for testing",
)
learner_config.add_argument(
"--emmental.ignore_index",
type=nullable_int,
default=None,
help="The ignore index, uses for masking samples",
)
learner_config.add_argument(
"--emmental.online_eval",
type=str2bool,
default=False,
help="Whether to perform online evaluation",
)
parser_hierarchy["emmental"]["_global_learner"] = learner_config
# Optimizer configuration
optimizer_config = parser.add_argument_group("Optimizer configuration")
optimizer_config.add_argument(
"--emmental.optimizer",
type=nullable_string,
default="adamw",
choices=[
"asgd",
"adadelta",
"adagrad",
"adam",
"adamw",
"adamax",
"lbfgs",
"rms_prop",
"r_prop",
"sgd",
"sparse_adam",
"bert_adam",
None,
],
help="The optimizer to use",
)
optimizer_config.add_argument(
"--emmental.lr", type=float, default=1e-3, help="Learing rate"
)
optimizer_config.add_argument(
"--emmental.l2", type=float, default=0.0, help="l2 regularization"
)
optimizer_config.add_argument(
"--emmental.grad_clip",
type=nullable_float,
default=None,
help="Gradient clipping",
)
optimizer_config.add_argument(
"--emmental.gradient_accumulation_steps",
type=int,
default=1,
help="Gradient accumulation steps",
)
# ASGD config
optimizer_config.add_argument(
"--emmental.asgd_lambd", type=float, default=0.0001, help="ASGD lambd"
)
optimizer_config.add_argument(
"--emmental.asgd_alpha", type=float, default=0.75, help="ASGD alpha"
)
optimizer_config.add_argument(
"--emmental.asgd_t0", type=float, default=1000000.0, help="ASGD t0"
)
# Adadelta config
optimizer_config.add_argument(
"--emmental.adadelta_rho", type=float, default=0.9, help="Adadelta rho"
)
optimizer_config.add_argument(
"--emmental.adadelta_eps", type=float, default=0.000001, help="Adadelta eps"
)
# Adagrad config
optimizer_config.add_argument(
"--emmental.adagrad_lr_decay", type=float, default=0, help="Adagrad lr_decay"
)
optimizer_config.add_argument(
"--emmental.adagrad_initial_accumulator_value",
type=float,
default=0,
help="Adagrad initial accumulator value",
)
optimizer_config.add_argument(
"--emmental.adagrad_eps", type=float, default=0.0000000001, help="Adagrad eps"
)
# Adam config
optimizer_config.add_argument(
"--emmental.adam_betas",
nargs="+",
type=float,
default=(0.9, 0.999),
help="Adam betas",
)
optimizer_config.add_argument(
"--emmental.adam_eps", type=float, default=1e-6, help="Adam eps"
)
optimizer_config.add_argument(
"--emmental.adam_amsgrad",
type=str2bool,
default=False,
help="Whether to use the AMSGrad variant of adam",
)
# AdamW config
optimizer_config.add_argument(
"--emmental.adamw_betas",
nargs="+",
type=float,
default=(0.9, 0.999),
help="AdamW betas",
)
optimizer_config.add_argument(
"--emmental.adamw_eps", type=float, default=1e-6, help="AdamW eps"
)
optimizer_config.add_argument(
"--emmental.adamw_amsgrad",
type=str2bool,
default=False,
help="Whether to use the AMSGrad variant of AdamW",
)
# Adamax config
optimizer_config.add_argument(
"--emmental.adamax_betas",
nargs="+",
type=float,
default=(0.9, 0.999),
help="Adamax betas",
)
optimizer_config.add_argument(
"--emmental.adamax_eps", type=float, default=1e-6, help="Adamax eps"
)
# LBFGS config
optimizer_config.add_argument(
"--emmental.lbfgs_max_iter", type=int, default=20, help="LBFGS max iter"
)
optimizer_config.add_argument(
"--emmental.lbfgs_max_eval",
type=nullable_int,
default=None,
help="LBFGS max eval",
)
optimizer_config.add_argument(
"--emmental.lbfgs_tolerance_grad",
type=float,
default=1e-07,
help="LBFGS tolerance grad",
)
optimizer_config.add_argument(
"--emmental.lbfgs_tolerance_change",
type=float,
default=1e-09,
help="LBFGS tolerance change",
)
optimizer_config.add_argument(
"--emmental.lbfgs_history_size",
type=int,
default=100,
help="LBFGS history size",
)
optimizer_config.add_argument(
"--emmental.lbfgs_line_search_fn",
type=nullable_string,
default=None,
help="LBFGS line search fn",
)
# RMSprop config
optimizer_config.add_argument(
"--emmental.rms_prop_alpha", type=float, default=0.99, help="RMSprop alpha"
)
optimizer_config.add_argument(
"--emmental.rms_prop_eps", type=float, default=1e-08, help="RMSprop eps"
)
optimizer_config.add_argument(
"--emmental.rms_prop_momentum", type=float, default=0, help="RMSprop momentum"
)
optimizer_config.add_argument(
"--emmental.rms_prop_centered",
type=str2bool,
default=False,
help="RMSprop centered",
)
# Rprop config
optimizer_config.add_argument(
"--emmental.r_prop_etas",
nargs="+",
type=float,
default=(0.5, 1.2),
help="Rprop etas",
)
optimizer_config.add_argument(
"--emmental.r_prop_step_sizes",
nargs="+",
type=float,
default=(1e-06, 50),
help="Rprop step sizes",
)
# SGD config
optimizer_config.add_argument(
"--emmental.sgd_momentum", type=float, default=0, help="SGD momentum"
)
optimizer_config.add_argument(
"--emmental.sgd_dampening", type=float, default=0, help="SGD dampening"
)
optimizer_config.add_argument(
"--emmental.sgd_nesterov", type=str2bool, default=False, help="SGD nesterov"
)
# SparseAdam config
optimizer_config.add_argument(
"--emmental.sparse_adam_betas",
nargs="+",
type=float,
default=(0.9, 0.999),
help="SparseAdam betas",
)
optimizer_config.add_argument(
"--emmental.sparse_adam_eps", type=float, default=1e-06, help="SparseAdam eps"
)
# BertAdam config
optimizer_config.add_argument(
"--emmental.bert_adam_betas",
nargs="+",
type=float,
default=(0.9, 0.999),
help="BertAdam betas",
)
optimizer_config.add_argument(
"--emmental.bert_adam_eps", type=float, default=1e-06, help="BertAdam eps"
)
parser_hierarchy["emmental"]["_global_optimizer"] = optimizer_config
# Scheduler configuration
scheduler_config = parser.add_argument_group("Scheduler configuration")
scheduler_config.add_argument(
"--emmental.lr_scheduler",
type=nullable_string,
default=None,
choices=[
"linear",
"exponential",
"plateau",
"step",
"multi_step",
"cyclic",
"one_cycle",
"cosine_annealing",
],
help="Learning rate scheduler",
)
scheduler_config.add_argument(
"--emmental.lr_scheduler_step_unit",
type=str,
default="batch",
choices=["batch", "epoch"],
help="Learning rate scheduler step unit",
)
scheduler_config.add_argument(
"--emmental.lr_scheduler_step_freq",
type=int,
default=1,
help="Learning rate scheduler step freq",
)
scheduler_config.add_argument(
"--emmental.warmup_steps", type=float, default=None, help="Warm up steps"
)
scheduler_config.add_argument(
"--emmental.warmup_unit",
type=str,
default="batch",
choices=["batch", "epoch"],
help="Warm up unit",
)
scheduler_config.add_argument(
"--emmental.warmup_percentage",
type=float,
default=None,
help="Warm up percentage",
)
scheduler_config.add_argument(
"--emmental.min_lr", type=float, default=0.0, help="Minimum learning rate"
)
scheduler_config.add_argument(
"--emmental.reset_state",
type=str2bool,
default=False,
help="Whether reset the state of the optimizer when lr changes",
)
scheduler_config.add_argument(
"--emmental.exponential_lr_scheduler_gamma",
type=float,
default=0.9,
help="Gamma for exponential lr scheduler",
)
# ReduceLROnPlateau lr scheduler config
scheduler_config.add_argument(
"--emmental.plateau_lr_scheduler_metric",
type=str,
default="model/train/all/loss",
help="Metric of plateau lr scheduler",
)
scheduler_config.add_argument(
"--emmental.plateau_lr_scheduler_mode",
type=str,
default="min",
choices=["min", "max"],
help="Mode of plateau lr scheduler",
)
scheduler_config.add_argument(
"--emmental.plateau_lr_scheduler_factor",
type=float,
default=0.1,
help="Factor of plateau lr scheduler",
)
scheduler_config.add_argument(
"--emmental.plateau_lr_scheduler_patience",
type=int,
default=10,
help="Patience for plateau lr scheduler",
)
scheduler_config.add_argument(
"--emmental.plateau_lr_scheduler_threshold",
type=float,
default=0.0001,
help="Threshold of plateau lr scheduler",
)
scheduler_config.add_argument(
"--emmental.plateau_lr_scheduler_threshold_mode",
type=str,
default="rel",
choices=["rel", "abs"],
help="Threshold mode of plateau lr scheduler",
)
scheduler_config.add_argument(
"--emmental.plateau_lr_scheduler_cooldown",
type=int,
default=0,
help="Cooldown of plateau lr scheduler",
)
scheduler_config.add_argument(
"--emmental.plateau_lr_scheduler_eps",
type=float,
default=0.00000001,
help="Eps of plateau lr scheduler",
)
# Step lr scheduler config
scheduler_config.add_argument(
"--emmental.step_lr_scheduler_step_size",
type=int,
default=1,
help="Period of learning rate decay",
)
scheduler_config.add_argument(
"--emmental.step_lr_scheduler_gamma",
type=float,
default=0.1,
help="Multiplicative factor of learning rate decay",
)
scheduler_config.add_argument(
"--emmental.step_lr_scheduler_last_epoch",
type=int,
default=-1,
help="The index of last epoch",
)
scheduler_config.add_argument(
"--emmental.multi_step_lr_scheduler_milestones",
nargs="+",
type=int,
default=[1000],
help="List of epoch indices. Must be increasing.",
)
scheduler_config.add_argument(
"--emmental.multi_step_lr_scheduler_gamma",
type=float,
default=0.1,
help="Multiplicative factor of learning rate decay",
)
scheduler_config.add_argument(
"--emmental.multi_step_lr_scheduler_last_epoch",
type=int,
default=-1,
help="The index of last epoch",
)
# Cyclic lr scheduler config
scheduler_config.add_argument(
"--emmental.cyclic_lr_scheduler_base_lr",
nargs="+",
type=float,
default=0.001,
help="Base lr of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--emmental.cyclic_lr_scheduler_max_lr",
nargs="+",
type=float,
default=0.1,
help="Max lr of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--emmental.cyclic_lr_scheduler_step_size_up",
type=int,
default=2000,
help="Step size up of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--emmental.cyclic_lr_scheduler_step_size_down",
type=nullable_int,
default=None,
help="Step size down of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--emmental.cyclic_lr_scheduler_mode",
type=nullable_string,
default="triangular",
help="Mode of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--emmental.cyclic_lr_scheduler_gamma",
type=float,
default=1.0,
help="Gamma of cyclic lr scheduler",
)
# TODO: support cyclic_lr_scheduler_scale_fn
scheduler_config.add_argument(
"--emmental.cyclic_lr_scheduler_scale_mode",
type=str,
default="cycle",
choices=["cycle", "iterations"],
help="Scale mode of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--emmental.cyclic_lr_scheduler_cycle_momentum",
type=str2bool,
default=True,
help="Cycle momentum of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--emmental.cyclic_lr_scheduler_base_momentum",
nargs="+",
type=float,
default=0.8,
help="Base momentum of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--emmental.cyclic_lr_scheduler_max_momentum",
nargs="+",
type=float,
default=0.9,
help="Max momentum of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--emmental.cyclic_lr_scheduler_last_epoch",
type=int,
default=-1,
help="Last epoch of cyclic lr scheduler",
)
# One cycle lr scheduler config
scheduler_config.add_argument(
"--emmental.one_cycle_lr_scheduler_max_lr",
nargs="+",
type=float,
default=0.1,
help="Max lr of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--emmental.one_cycle_lr_scheduler_pct_start",
type=float,
default=0.3,
help="Percentage start of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--emmental.one_cycle_lr_scheduler_anneal_strategy",
type=str,
default="cos",
choices=["cos", "linear"],
help="Anneal strategyr of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--emmental.one_cycle_lr_scheduler_cycle_momentum",
type=str2bool,
default=True,
help="Cycle momentum of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--emmental.one_cycle_lr_scheduler_base_momentum",
nargs="+",
type=float,
default=0.85,
help="Base momentum of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--emmental.one_cycle_lr_scheduler_max_momentum",
nargs="+",
type=float,
default=0.95,
help="Max momentum of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--emmental.one_cycle_lr_scheduler_div_factor",
type=float,
default=25,
help="Div factor of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--emmental.one_cycle_lr_scheduler_final_div_factor",
type=float,
default=1e4,
help="Final div factor of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--emmental.one_cycle_lr_scheduler_last_epoch",
type=int,
default=-1,
help="Last epoch of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--emmental.cosine_annealing_lr_scheduler_last_epoch",
type=int,
default=-1,
help="The index of last epoch",
)
scheduler_config.add_argument(
"--emmental.task_scheduler",
type=str,
default="round_robin",
# choices=["sequential", "round_robin", "mixed"],
help="Task scheduler",
)
scheduler_config.add_argument(
"--emmental.sequential_scheduler_fillup",
type=str2bool,
default=False,
help="Whether fillup in sequential scheduler",
)
scheduler_config.add_argument(
"--emmental.round_robin_scheduler_fillup",
type=str2bool,
default=False,
help="whether fillup in round robin scheduler",
)
scheduler_config.add_argument(
"--emmental.mixed_scheduler_fillup",
type=str2bool,
default=False,
help="whether fillup in mixed scheduler scheduler",
)
parser_hierarchy["emmental"]["_global_scheduler"] = scheduler_config
# Logging configuration
logging_config = parser.add_argument_group("Logging configuration")
logging_config.add_argument(
"--emmental.counter_unit",
type=str,
default="epoch",
choices=["epoch", "batch"],
help="Logging unit (epoch, batch)",
)
logging_config.add_argument(
"--emmental.evaluation_freq",
type=float,
default=1,
help="Logging evaluation frequency",
)
logging_config.add_argument(
"--emmental.writer",
type=str,
default="tensorboard",
choices=["json", "tensorboard", "wandb"],
help="The writer format (json, tensorboard, wandb)",
)
logging_config.add_argument(
"--emmental.write_loss_per_step",
type=bool,
default=False,
help="Whether to log loss per step",
)
logging_config.add_argument(
"--emmental.wandb_project_name",
type=nullable_string,
default=None,
help="Wandb project name",
)
logging_config.add_argument(
"--emmental.wandb_run_name",
type=nullable_string,
default=None,
help="Wandb run name",
)
logging_config.add_argument(
"--emmental.wandb_watch_model",
type=bool,
default=False,
help="Whether use wandb to watch model",
)
logging_config.add_argument(
"--emmental.wandb_model_watch_freq",
type=nullable_int,
default=None,
help="Wandb model watch frequency",
)
logging_config.add_argument(
"--emmental.checkpointing",
type=str2bool,
default=True,
help="Whether to checkpoint the model",
)
logging_config.add_argument(
"--emmental.checkpoint_path", type=str, default=None, help="Checkpointing path"
)
logging_config.add_argument(
"--emmental.checkpoint_freq",
type=int,
default=1,
help="Checkpointing every k logging time",
)
logging_config.add_argument(
"--emmental.checkpoint_metric",
type=str2dict,
default={"model/train/all/loss": "min"},
help=(
"Checkpointing metric (metric_name:mode), "
"e.g., `model/train/all/loss:min`"
),
)
logging_config.add_argument(
"--emmental.checkpoint_task_metrics",
type=str2dict,
default=None,
help=(
"Task specific checkpointing metric "
"(metric_name1:mode1,metric_name2:mode2)"
),
)
logging_config.add_argument(
"--emmental.checkpoint_runway",
type=float,
default=0,
help="Checkpointing runway (no checkpointing before k checkpointing unit)",
)
logging_config.add_argument(
"--emmental.checkpoint_all",
type=str2bool,
default=True,
help="Whether to checkpoint all checkpoints",
)
logging_config.add_argument(
"--emmental.clear_intermediate_checkpoints",
type=str2bool,
default=False,
help="Whether to clear intermediate checkpoints",
)
logging_config.add_argument(
"--emmental.clear_all_checkpoints",
type=str2bool,
default=False,
help="Whether to clear all checkpoints",
)
parser_hierarchy["emmental"]["_global_logging"] = logging_config
return parser, parser_hierarchy
def parse_args_to_config(args: DottedDict) -> Dict[str, Any]:
"""Parse the Emmental arguments to config dict.
Args:
args: parsed namespace from argument parser.
Returns: Emmental config dict.
"""
config = {
"meta_config": {
"seed": args.seed,
"verbose": args.verbose,
"log_path": args.log_path,
"use_exact_log_path": args.use_exact_log_path,
},
"data_config": {
"min_data_len": args.min_data_len,
"max_data_len": args.max_data_len,
},
"model_config": {
"model_path": args.model_path,
"device": args.device,
"dataparallel": args.dataparallel,
"distributed_backend": args.distributed_backend,
},
"learner_config": {
"optimizer_path": args.optimizer_path,
"scheduler_path": args.scheduler_path,
"fp16": args.fp16,
"fp16_opt_level": args.fp16_opt_level,
"local_rank": args.local_rank,
"epochs_learned": args.epochs_learned,
"n_epochs": args.n_epochs,
"steps_learned": args.steps_learned,
"n_steps": args.n_steps,
"skip_learned_data": args.skip_learned_data,
"train_split": args.train_split,
"valid_split": args.valid_split,
"test_split": args.test_split,
"ignore_index": args.ignore_index,
"online_eval": args.online_eval,
"optimizer_config": {
"optimizer": args.optimizer,
"lr": args.lr,
"l2": args.l2,
"grad_clip": args.grad_clip,
"gradient_accumulation_steps": args.gradient_accumulation_steps,
"asgd_config": {
"lambd": args.asgd_lambd,
"alpha": args.asgd_alpha,
"t0": args.asgd_t0,
},
"adadelta_config": {"rho": args.adadelta_rho, "eps": args.adadelta_eps},
"adagrad_config": {
"lr_decay": args.adagrad_lr_decay,
"initial_accumulator_value": args.adagrad_initial_accumulator_value,
"eps": args.adagrad_eps,
},
"adam_config": {
"betas": args.adam_betas,
"amsgrad": args.adam_amsgrad,
"eps": args.adam_eps,
},
"adamw_config": {
"betas": args.adamw_betas,
"amsgrad": args.adamw_amsgrad,
"eps": args.adamw_eps,
},
"adamax_config": {"betas": args.adamax_betas, "eps": args.adamax_eps},
"lbfgs_config": {
"max_iter": args.lbfgs_max_iter,
"max_eval": args.lbfgs_max_eval,
"tolerance_grad": args.lbfgs_tolerance_grad,
"tolerance_change": args.lbfgs_tolerance_change,
"history_size": args.lbfgs_history_size,
"line_search_fn": args.lbfgs_line_search_fn,
},
"rms_prop_config": {
"alpha": args.rms_prop_alpha,
"eps": args.rms_prop_eps,
"momentum": args.rms_prop_momentum,
"centered": args.rms_prop_centered,
},
"r_prop_config": {
"etas": args.r_prop_etas,
"step_sizes": args.r_prop_step_sizes,
},
"sgd_config": {
"momentum": args.sgd_momentum,
"dampening": args.sgd_dampening,
"nesterov": args.sgd_nesterov,
},
"sparse_adam_config": {
"betas": args.sparse_adam_betas,
"eps": args.sparse_adam_eps,
},
"bert_adam_config": {
"betas": args.bert_adam_betas,
"eps": args.bert_adam_eps,
},
},
"lr_scheduler_config": {
"lr_scheduler": args.lr_scheduler,
"lr_scheduler_step_unit": args.lr_scheduler_step_unit,
"lr_scheduler_step_freq": args.lr_scheduler_step_freq,
"warmup_steps": args.warmup_steps,
"warmup_unit": args.warmup_unit,
"warmup_percentage": args.warmup_percentage,
"min_lr": args.min_lr,
"reset_state": args.reset_state,
"exponential_config": {"gamma": args.exponential_lr_scheduler_gamma},
"plateau_config": {
"metric": args.plateau_lr_scheduler_metric,
"mode": args.plateau_lr_scheduler_mode,
"factor": args.plateau_lr_scheduler_factor,
"patience": args.plateau_lr_scheduler_patience,
"threshold": args.plateau_lr_scheduler_threshold,
"threshold_mode": args.plateau_lr_scheduler_threshold_mode,
"cooldown": args.plateau_lr_scheduler_cooldown,
"eps": args.plateau_lr_scheduler_eps,
},
"step_config": {
"step_size": args.step_lr_scheduler_step_size,
"gamma": args.step_lr_scheduler_gamma,
"last_epoch": args.step_lr_scheduler_last_epoch,
},
"multi_step_config": {
"milestones": args.multi_step_lr_scheduler_milestones,
"gamma": args.multi_step_lr_scheduler_gamma,
"last_epoch": args.multi_step_lr_scheduler_last_epoch,
},
"cyclic_config": {
"base_lr": args.cyclic_lr_scheduler_base_lr,
"max_lr": args.cyclic_lr_scheduler_max_lr,
"step_size_up": args.cyclic_lr_scheduler_step_size_up,
"step_size_down": args.cyclic_lr_scheduler_step_size_down,
"mode": args.cyclic_lr_scheduler_mode,
"gamma": args.cyclic_lr_scheduler_gamma,
"scale_fn": None,
"scale_mode": args.cyclic_lr_scheduler_scale_mode,
"cycle_momentum": args.cyclic_lr_scheduler_cycle_momentum,
"base_momentum": args.cyclic_lr_scheduler_base_momentum,
"max_momentum": args.cyclic_lr_scheduler_max_momentum,
"last_epoch": args.cyclic_lr_scheduler_last_epoch,
},
"one_cycle_config": {
"max_lr": args.one_cycle_lr_scheduler_max_lr,
"pct_start": args.one_cycle_lr_scheduler_pct_start,
"anneal_strategy": args.one_cycle_lr_scheduler_anneal_strategy,
"cycle_momentum": args.one_cycle_lr_scheduler_cycle_momentum,
"base_momentum": args.one_cycle_lr_scheduler_base_momentum,
"max_momentum": args.one_cycle_lr_scheduler_max_momentum,
"div_factor": args.one_cycle_lr_scheduler_div_factor,
"final_div_factor": args.one_cycle_lr_scheduler_final_div_factor,
"last_epoch": args.one_cycle_lr_scheduler_last_epoch,
},
"cosine_annealing_config": {
"last_epoch": args.cosine_annealing_lr_scheduler_last_epoch
},
},
"task_scheduler_config": {
"task_scheduler": args.task_scheduler,
"sequential_scheduler_config": {
"fillup": args.sequential_scheduler_fillup
},
"round_robin_scheduler_config": {
"fillup": args.round_robin_scheduler_fillup
},
"mixed_scheduler_config": {"fillup": args.mixed_scheduler_fillup},
},
},
"logging_config": {
"counter_unit": args.counter_unit,
"evaluation_freq": args.evaluation_freq,
"writer_config": {
"verbose": True,
"writer": args.writer,
"write_loss_per_step": args.write_loss_per_step,
"wandb_project_name": args.wandb_project_name,
"wandb_run_name": args.wandb_run_name,
"wandb_watch_model": args.wandb_watch_model,
"wandb_model_watch_freq": args.wandb_model_watch_freq,
},
"checkpointing": args.checkpointing,
"checkpointer_config": {
"checkpoint_path": args.checkpoint_path,
"checkpoint_freq": args.checkpoint_freq,
"checkpoint_metric": args.checkpoint_metric,
"checkpoint_task_metrics": args.checkpoint_task_metrics,
"checkpoint_runway": args.checkpoint_runway,
"checkpoint_all": args.checkpoint_all,
"clear_intermediate_checkpoints": args.clear_intermediate_checkpoints,
"clear_all_checkpoints": args.clear_all_checkpoints,
},
},
}
return create_bool_dotted_dict(config)
| bootleg-master | bootleg/utils/parser/emm_parse_args.py |
"""Bootleg default configuration parameters.
In the json file, everything is a string or number. In this python file,
if the default is a boolean, it will be parsed as such. If the default
is a dictionary, True and False strings will become booleans. Otherwise
they will stay string.
"""
import multiprocessing
config_args = {
"run_config": {
"spawn_method": (
"forkserver",
"multiprocessing spawn method. forkserver will save memory but have slower startup costs.",
),
"eval_batch_size": (128, "batch size for eval"),
"dump_preds_accumulation_steps": (
1000,
"number of eval steps to accumulate the output tensors for before saving results to file",
),
"dump_preds_num_data_splits": (
1,
"number of chunks to split the input file; helps with OOM issues",
),
"overwrite_eval_dumps": (False, "overwrite dumped eval data"),
"dataloader_threads": (16, "data loader threads to feed gpus"),
"log_level": ("info", "logging level"),
"dataset_threads": (
int(multiprocessing.cpu_count() * 0.9),
"data set threads for prepping data",
),
"result_label_file": (
"bootleg_labels.jsonl",
"file name to save predicted entities in",
),
"result_emb_file": (
"bootleg_embs.npy",
"file name to save contextualized embs in",
),
},
# Parameters for hyperparameter tuning
"train_config": {
"batch_size": (32, "batch size"),
},
"model_config": {
"hidden_size": (300, "hidden dimension for the embeddings before scoring"),
"normalize": (False, "normalize embeddings before dot product"),
"temperature": (1.0, "temperature for softmax in loss"),
},
"data_config": {
"eval_slices": ([], "slices for evaluation"),
"train_in_candidates": (
True,
"Train in candidates (if False, this means we include NIL entity)",
),
"data_dir": ("data", "where training, testing, and dev data is stored"),
"data_prep_dir": (
"prep",
"directory where data prep files are saved inside data_dir",
),
"entity_dir": (
"entity_data",
"where entity profile information and prepped embedding data is stored",
),
"entity_prep_dir": (
"prep",
"directory where prepped embedding data is saved inside entity_dir",
),
"entity_map_dir": (
"entity_mappings",
"directory where entity json mappings are saved inside entity_dir",
),
"alias_cand_map": (
"alias2qids",
"name of alias candidate map file, should be saved in entity_dir/entity_map_dir",
),
"alias_idx_map": (
"alias2id",
"name of alias index map file, should be saved in entity_dir/entity_map_dir",
),
"qid_cnt_map": (
"qid2cnt.json",
"name of alias index map file, should be saved in data_dir",
),
"max_seq_len": (128, "max token length sentences"),
"max_seq_window_len": (64, "max window around an entity"),
"max_ent_len": (128, "max token length for entire encoded entity"),
"context_mask_perc": (
0.0,
"mask percent for context tokens in addition to tail masking",
),
"popularity_mask": (
True,
"whether to use popularity masking for training in the entity and context encoders",
),
"overwrite_preprocessed_data": (False, "overwrite preprocessed data"),
"print_examples_prep": (True, "whether to print examples during prep or not"),
"use_entity_desc": (True, "whether to use entity descriptions or not"),
"entity_type_data": {
"use_entity_types": (False, "whether to use entity type data"),
"type_symbols_dir": (
"type_mappings/wiki",
"directory to type symbols inside entity_dir",
),
"max_ent_type_len": (20, "max WORD length for type sequence"),
},
"entity_kg_data": {
"use_entity_kg": (False, "whether to use entity type data"),
"kg_symbols_dir": (
"kg_mappings",
"directory to kg symbols inside entity_dir",
),
"max_ent_kg_len": (60, "max WORD length for kg sequence"),
},
"train_dataset": {
"file": ("train.jsonl", ""),
"use_weak_label": (True, "Use weakly labeled mentions"),
},
"dev_dataset": {
"file": ("dev.jsonl", ""),
"use_weak_label": (True, "Use weakly labeled mentions"),
},
"test_dataset": {
"file": ("test.jsonl", ""),
"use_weak_label": (True, "Use weakly labeled mentions"),
},
"word_embedding": {
"bert_model": ("bert-base-uncased", ""),
"context_layers": (12, ""),
"entity_layers": (12, ""),
"cache_dir": (
"pretrained_bert_models",
"Directory where word embeddings are cached",
),
},
},
}
| bootleg-master | bootleg/utils/parser/bootleg_args.py |
"""
Compute statistics over data.
Helper file for computing various statistics over our data such as mention
frequency, mention text frequency in the data (even if not labeled as an
anchor), ...
etc.
"""
import argparse
import logging
import multiprocessing
import os
import time
from collections import Counter
import marisa_trie
import nltk
import numpy as np
import ujson
import ujson as json
from tqdm.auto import tqdm
from bootleg.symbols.entity_symbols import EntitySymbols
from bootleg.utils import utils
from bootleg.utils.utils import get_lnrm
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
def parse_args():
"""Parse args."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir", type=str, default="data/", help="Data dir for training data"
)
parser.add_argument(
"--save_dir", type=str, default="data/", help="Data dir for saving stats"
)
parser.add_argument("--train_file", type=str, default="train.jsonl")
parser.add_argument(
"--entity_symbols_dir",
type=str,
default="entity_db/entity_mappings",
help="Path to entities inside data_dir",
)
parser.add_argument("--lower", action="store_true", help="Lower aliases")
parser.add_argument("--strip", action="store_true", help="Strip punc aliases")
parser.add_argument(
"--num_workers", type=int, help="Number of workers to parallelize", default=2
)
args = parser.parse_args()
return args
def compute_histograms(save_dir, entity_symbols):
"""Compute histogram."""
al_counts = Counter()
for al in entity_symbols.get_all_aliases():
num_entities = len(entity_symbols.get_qid_cands(al))
al_counts.update([num_entities])
utils.dump_json_file(
filename=os.path.join(save_dir, "candidate_counts.json"), contents=al_counts
)
return
def get_num_lines(input_src):
"""Get number of lines."""
# get number of lines
num_lines = 0
with open(input_src, "r", encoding="utf-8") as in_file:
try:
for line in in_file:
num_lines += 1
except Exception as e:
logging.error("ERROR READING IN TRAINING DATA")
logging.error(e)
return []
return num_lines
def chunk_text_data(input_src, chunk_files, chunk_size, num_lines):
"""Chunk text data."""
logging.info(f"Reading in {input_src}")
start = time.time()
# write out chunks as text data
chunk_id = 0
num_lines_in_chunk = 0
# keep track of what files are written
out_file = open(chunk_files[chunk_id], "w")
with open(input_src, "r", encoding="utf-8") as in_file:
for i, line in enumerate(in_file):
out_file.write(line)
num_lines_in_chunk += 1
# move on to new chunk when it hits chunk size
if num_lines_in_chunk == chunk_size:
chunk_id += 1
# reset number of lines in chunk and open new file if not at end
num_lines_in_chunk = 0
out_file.close()
if i < (num_lines - 1):
out_file = open(chunk_files[chunk_id], "w")
out_file.close()
logging.info(f"Wrote out data chunks in {round(time.time() - start, 2)}s")
def compute_occurrences_single(args, max_alias_len=6):
"""Compute statistics single process."""
data_file, aliases_file, lower, strip = args
num_lines = sum(1 for _ in open(data_file))
all_aliases = ujson.load(open(aliases_file))
all_aliases = marisa_trie.Trie(all_aliases)
# entity histogram
ent_occurrences = Counter()
# alias histogram
alias_occurrences = Counter()
# alias text occurrances
alias_text_occurrences = Counter()
# number of aliases per sentence
alias_pair_occurrences = Counter()
# alias|entity histogram
alias_entity_pair = Counter()
with open(data_file, "r") as in_file:
for line in tqdm(in_file, total=num_lines):
line = json.loads(line.strip())
for n in range(max_alias_len + 1, 0, -1):
grams = nltk.ngrams(line["sentence"].split(), n)
for gram_words in grams:
gram_attempt = get_lnrm(" ".join(gram_words), lower, strip)
if gram_attempt in all_aliases:
alias_text_occurrences[gram_attempt] += 1
# Get aliases in wikipedia _before_ the swapping - these represent the true textual aliases
aliases = line["unswap_aliases"]
qids = line["qids"]
for qid, alias in zip(qids, aliases):
ent_occurrences[qid] += 1
alias_occurrences[alias] += 1
alias_entity_pair[alias + "|" + qid] += 1
alias_pair_occurrences[len(aliases)] += 1
results = {
"ent_occurrences": ent_occurrences,
"alias_occurrences": alias_occurrences,
"alias_text_occurrences": alias_text_occurrences,
"alias_pair_occurrences": alias_pair_occurrences,
"alias_entity_pair": alias_entity_pair,
}
return results
def compute_occurrences(save_dir, data_file, entity_dump, lower, strip, num_workers=8):
"""Compute statistics."""
all_aliases = entity_dump.get_all_aliases()
chunk_file_path = os.path.join(save_dir, "tmp")
all_aliases_f = os.path.join(chunk_file_path, "all_aliases.json")
utils.ensure_dir(chunk_file_path)
ujson.dump(all_aliases, open(all_aliases_f, "w"), ensure_ascii=False)
# divide up data into chunks
num_lines = get_num_lines(data_file)
num_processes = min(num_workers, int(multiprocessing.cpu_count()))
logging.info(f"Using {num_processes} workers...")
chunk_size = int(np.ceil(num_lines / (num_processes)))
utils.ensure_dir(chunk_file_path)
chunk_infiles = [
os.path.join(f"{chunk_file_path}", f"data_chunk_{chunk_id}_in.jsonl")
for chunk_id in range(num_processes)
]
chunk_text_data(data_file, chunk_infiles, chunk_size, num_lines)
pool = multiprocessing.Pool(processes=num_processes)
subprocess_args = [
[chunk_infiles[i], all_aliases_f, lower, strip] for i in range(num_processes)
]
results = pool.map(compute_occurrences_single, subprocess_args)
pool.close()
pool.join()
logging.info("Finished collecting counts")
logging.info("Merging counts....")
# merge counters together
ent_occurrences = Counter()
# alias histogram
alias_occurrences = Counter()
# alias text occurrances
alias_text_occurrences = Counter()
# number of aliases per sentence
alias_pair_occurrences = Counter()
# alias|entity histogram
alias_entity_pair = Counter()
for result_set in tqdm(results, desc="Merging"):
ent_occurrences += result_set["ent_occurrences"]
alias_occurrences += result_set["alias_occurrences"]
alias_text_occurrences += result_set["alias_text_occurrences"]
alias_pair_occurrences += result_set["alias_pair_occurrences"]
alias_entity_pair += result_set["alias_entity_pair"]
# save counters
utils.dump_json_file(
filename=os.path.join(save_dir, "entity_count.json"), contents=ent_occurrences
)
utils.dump_json_file(
filename=os.path.join(save_dir, "alias_counts.json"), contents=alias_occurrences
)
utils.dump_json_file(
filename=os.path.join(save_dir, "alias_text_counts.json"),
contents=alias_text_occurrences,
)
utils.dump_json_file(
filename=os.path.join(save_dir, "alias_pair_occurrences.json"),
contents=alias_pair_occurrences,
)
utils.dump_json_file(
filename=os.path.join(save_dir, "alias_entity_counts.json"),
contents=alias_entity_pair,
)
def main():
"""Run."""
args = parse_args()
logging.info(json.dumps(vars(args), indent=4))
entity_symbols = EntitySymbols.load_from_cache(
load_dir=os.path.join(args.data_dir, args.entity_symbols_dir)
)
train_file = os.path.join(args.data_dir, args.train_file)
save_dir = os.path.join(args.save_dir, "stats")
logging.info(f"Will save data to {save_dir}")
utils.ensure_dir(save_dir)
# compute_histograms(save_dir, entity_symbols)
compute_occurrences(
save_dir,
train_file,
entity_symbols,
args.lower,
args.strip,
num_workers=args.num_workers,
)
if __name__ == "__main__":
main()
| bootleg-master | bootleg/utils/preprocessing/compute_statistics.py |
"""Preprocessing init."""
| bootleg-master | bootleg/utils/preprocessing/__init__.py |
"""
Sample eval data.
This will sample a jsonl train or eval data based on the slices in the data.
This is useful for subsampling a smaller eval dataset.py.
The output of this file is a files with a subset of sentences from the
input file samples such that for each slice in --args.slice, a minimum
of args.min_sample_size mentions are in the slice (if possible). Once
that is satisfied, we sample to get approximately --args.sample_perc of
mentions from each slice.
"""
import argparse
import multiprocessing
import os
import random
import shutil
from collections import defaultdict
import numpy as np
import ujson
from tqdm.auto import tqdm
from bootleg.utils import utils
FINAL_COUNTS_PREFIX = "final_counts"
FINAL_SLICE_TO_SENT_PREFIX = "final_slice_to_sent"
FINAL_SENT_TO_SLICE_PREFIX = "final_sent_to_slices"
def parse_args():
"""Parse args."""
parser = argparse.ArgumentParser()
parser.add_argument("--file", type=str, default="merged.jsonl")
parser.add_argument(
"--data_dir",
type=str,
default="/dfs/scratch0/lorr1/projects/bootleg-data/data/wiki_title_0122",
help="Where files saved",
)
parser.add_argument(
"--out_file_name",
type=str,
default="merged_sample.jsonl",
help="Where files saved",
)
parser.add_argument(
"--sample_perc", type=float, default=0.005, help="Perc of each slice to sample"
)
parser.add_argument(
"--min_sample_size",
type=int,
default=5000,
help="Min number of mentions per slice",
)
parser.add_argument(
"--slice",
default=[],
action="append",
required=True,
help="Slices to consider when sampling",
)
args = parser.parse_args()
return args
def get_slice_stats(num_processes, file):
"""Get true anchor slice counts."""
pool = multiprocessing.Pool(processes=num_processes)
final_counts = defaultdict(int)
final_slice_to_sent = defaultdict(set)
final_sent_to_slices = defaultdict(lambda: defaultdict(int))
temp_out_dir = os.path.join(os.path.dirname(file), "_temp")
os.mkdir(temp_out_dir)
all_lines = [li for li in open(file)]
num_lines = len(all_lines)
chunk_size = int(np.ceil(num_lines / num_processes))
line_chunks = [
all_lines[i : i + chunk_size] for i in range(0, num_lines, chunk_size)
]
input_args = [
[i, line_chunks[i], i * chunk_size, temp_out_dir]
for i in range(len(line_chunks))
]
for i in tqdm(
pool.imap_unordered(get_slice_stats_hlp, input_args, chunksize=1),
total=len(line_chunks),
desc="Gathering slice counts",
):
cnt_res = utils.load_json_file(
os.path.join(temp_out_dir, f"{FINAL_COUNTS_PREFIX}_{i}.json")
)
sent_to_slices = utils.load_json_file(
os.path.join(temp_out_dir, f"{FINAL_SENT_TO_SLICE_PREFIX}_{i}.json")
)
slice_to_sent = utils.load_json_file(
os.path.join(temp_out_dir, f"{FINAL_SLICE_TO_SENT_PREFIX}_{i}.json")
)
for k in cnt_res:
final_counts[k] += cnt_res[k]
for k in slice_to_sent:
final_slice_to_sent[k].update(set(slice_to_sent[k]))
for k in sent_to_slices:
final_sent_to_slices[k].update(sent_to_slices[k])
pool.close()
pool.join()
shutil.rmtree(temp_out_dir)
return dict(final_counts), dict(final_slice_to_sent), dict(final_sent_to_slices)
def get_slice_stats_hlp(args):
"""Get slice count helper."""
i, lines, offset, temp_out_dir = args
res = defaultdict(int) # slice -> cnt
slice_to_sent = defaultdict(set) # slice -> sent_idx (for sampling)
sent_to_slices = defaultdict(
lambda: defaultdict(int)
) # sent_idx -> slice -> cnt (for sampling)
for line in tqdm(lines, total=len(lines), desc=f"Processing lines for {i}"):
line = ujson.loads(line)
slices = line.get("slices", {})
anchors = line["gold"]
for slice_name in slices:
for al_str in slices[slice_name]:
if anchors[int(al_str)] is True and slices[slice_name][al_str] > 0.5:
res[slice_name] += 1
slice_to_sent[slice_name].add(int(line["sent_idx_unq"]))
sent_to_slices[int(line["sent_idx_unq"])].update(res)
utils.dump_json_file(
os.path.join(temp_out_dir, f"{FINAL_COUNTS_PREFIX}_{i}.json"), res
)
utils.dump_json_file(
os.path.join(temp_out_dir, f"{FINAL_SENT_TO_SLICE_PREFIX}_{i}.json"),
sent_to_slices,
)
# Turn into list for dumping
for slice_name in slice_to_sent:
slice_to_sent[slice_name] = list(slice_to_sent[slice_name])
utils.dump_json_file(
os.path.join(temp_out_dir, f"{FINAL_SLICE_TO_SENT_PREFIX}_{i}.json"),
slice_to_sent,
)
return i
def main():
"""Run."""
args = parse_args()
print(ujson.dumps(vars(args), indent=4))
num_processes = int(0.8 * multiprocessing.cpu_count())
in_file = os.path.join(args.data_dir, args.file)
print(f"Getting slice counts from {in_file}")
slice_counts, slice_to_sents, sent_to_slices = get_slice_stats(
num_processes, in_file
)
print("****SLICE COUNTS*****")
print(ujson.dumps(slice_counts, indent=4))
desired_slices = args.slice
final_sentences = set()
new_counts = defaultdict(int)
for sl_name in desired_slices:
cur_count = new_counts[sl_name]
sample_size = max(
min(args.min_sample_size - cur_count, len(slice_to_sents[sl_name])),
min(
int(args.sample_perc * slice_counts[sl_name]) - cur_count,
len(slice_to_sents[sl_name]),
),
0,
)
if sample_size > 0:
sents_to_add = random.sample(list(slice_to_sents[sl_name]), k=sample_size)
final_sentences.update(sents_to_add)
new_counts = defaultdict(int)
for sent_id in final_sentences:
for sl_name2 in sent_to_slices.get(sent_id, {}):
new_counts[sl_name2] += sent_to_slices.get(sent_id, {}).get(
sl_name2, 0
)
out_file = os.path.join(args.data_dir, args.out_file_name)
print(f"Outputting results to {out_file}")
num_lines = sum([1 for _ in open(in_file)])
final_cnt = 0
final_slice_cnts = defaultdict(int)
with open(out_file, "w") as out_f:
for line in tqdm(
[ujson.loads(li.strip()) for li in open(in_file)],
desc="Writing out file",
total=num_lines,
):
if int(line["sent_idx_unq"]) in final_sentences:
out_f.write(ujson.dumps(line, ensure_ascii=False) + "\n")
for sl_name in line.get("slices", {}):
for al_idx in line["slices"][sl_name]:
if (
line["slices"][sl_name][al_idx] > 0.5
and line["gold"][int(al_idx)] is True
):
final_slice_cnts[sl_name] += 1
final_cnt += 1
print(f"Wrote out {final_cnt} lines to {out_file}")
print("****FINAL SLICE COUNTS*****")
print(ujson.dumps(final_slice_cnts, indent=4))
if __name__ == "__main__":
main()
| bootleg-master | bootleg/utils/preprocessing/sample_eval_data.py |
"""
Compute QID counts.
Helper function that computes a dictionary of QID -> count in training data.
If a QID is not in this dictionary, it has a count of zero.
"""
import argparse
import multiprocessing
import os
import shutil
import tempfile
from collections import defaultdict
from pathlib import Path
import ujson
from tqdm.auto import tqdm
def parse_args():
"""Parse args."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--file",
type=str,
default="train.jsonl",
)
args = parser.parse_args()
return args
def get_char_spans(spans, text):
"""
Get character spans instead of default word spans.
Args:
spans: word spans
text: text
Returns: character spans
"""
word_i = 0
prev_is_space = True
char2word = {}
word2char = defaultdict(list)
for char_i, c in enumerate(text):
if c.isspace():
if not prev_is_space:
word_i += 1
prev_is_space = True
else:
prev_is_space = False
char2word[char_i] = word_i
word2char[word_i].append(char_i)
char_spans = []
for span in spans:
char_l = min(word2char[span[0]])
char_r = max(word2char[span[1] - 1]) + 1
char_spans.append([char_l, char_r])
return char_spans
def convert_char_spans(num_processes, file):
"""Add char spans to jsonl file."""
pool = multiprocessing.Pool(processes=num_processes)
num_lines = sum([1 for _ in open(file)])
temp_file = Path(tempfile.gettempdir()) / "_convert_char_spans.jsonl"
with open(file) as in_f, open(temp_file, "wb") as out_f:
for res in tqdm(
pool.imap_unordered(convert_char_spans_helper, in_f, chunksize=100),
total=num_lines,
desc="Adding char spans",
):
out_f.write(bytes(res, encoding="utf-8"))
out_f.seek(0)
pool.close()
pool.join()
shutil.copy(temp_file, file)
os.remove(temp_file)
return
def convert_char_spans_helper(line):
"""Get char spans helper.
Parses line, adds char spans, and dumps it back again
"""
line = ujson.loads(line)
line["char_spans"] = get_char_spans(line["spans"], line["sentence"])
to_write = ujson.dumps(line) + "\n"
return to_write
def main():
"""Run."""
args = parse_args()
print(ujson.dumps(vars(args), indent=4))
num_processes = int(0.8 * multiprocessing.cpu_count())
print(f"Getting slice counts from {args.file}")
convert_char_spans(num_processes, args.file)
if __name__ == "__main__":
main()
| bootleg-master | bootleg/utils/preprocessing/convert_to_char_spans.py |
"""
Compute QID counts.
Helper function that computes a dictionary of QID -> count in training data.
If a QID is not in this dictionary, it has a count of zero.
"""
import argparse
import multiprocessing
from collections import defaultdict
import ujson
from tqdm.auto import tqdm
from bootleg.utils import utils
def parse_args():
"""Parse args."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--train_file",
type=str,
default="/dfs/scratch0/lorr1/projects/bootleg-data/data/wiki_title_0114/train.jsonl",
)
parser.add_argument(
"--out_file",
type=str,
default="/dfs/scratch0/lorr1/projects/bootleg-data/data/wiki_title_0114/train_qidcnt.json",
help="Regularization of each qid",
)
args = parser.parse_args()
return args
def get_counts(num_processes, file):
"""Get true anchor slice counts."""
pool = multiprocessing.Pool(processes=num_processes)
num_lines = sum(1 for _ in open(file))
qid_cnts = defaultdict(int)
for res in tqdm(
pool.imap_unordered(get_counts_hlp, open(file), chunksize=1000),
total=num_lines,
desc="Gathering counts",
):
for qid in res:
qid_cnts[qid] += res[qid]
pool.close()
pool.join()
return qid_cnts
def get_counts_hlp(line):
"""Get count helper."""
res = defaultdict(int) # qid -> cnt
line = ujson.loads(line)
for qid in line["qids"]:
res[qid] += 1
return res
def main():
"""Run."""
args = parse_args()
print(ujson.dumps(vars(args), indent=4))
num_processes = int(0.8 * multiprocessing.cpu_count())
print(f"Getting slice counts from {args.train_file}")
qid_cnts = get_counts(num_processes, args.train_file)
utils.dump_json_file(args.out_file, qid_cnts)
if __name__ == "__main__":
main()
| bootleg-master | bootleg/utils/preprocessing/get_train_qid_counts.py |
"""BootlegAnnotator."""
import logging
import os
import tarfile
import urllib
from pathlib import Path
from typing import Any, Dict, Union
import emmental
import numpy as np
import torch
from emmental.model import EmmentalModel
from tqdm.auto import tqdm
from transformers import AutoTokenizer
from bootleg.dataset import extract_context, get_entity_string
from bootleg.end2end.annotator_utils import DownloadProgressBar
from bootleg.end2end.extract_mentions import MENTION_EXTRACTOR_OPTIONS
from bootleg.symbols.constants import PAD_ID
from bootleg.symbols.entity_symbols import EntitySymbols
from bootleg.symbols.kg_symbols import KGSymbols
from bootleg.symbols.type_symbols import TypeSymbols
from bootleg.task_config import NED_TASK
from bootleg.tasks import ned_task
from bootleg.utils import data_utils
from bootleg.utils.model_utils import get_max_candidates
from bootleg.utils.parser.parser_utils import parse_boot_and_emm_args
from bootleg.utils.utils import load_yaml_file
logger = logging.getLogger(__name__)
BOOTLEG_MODEL_PATHS = {
"bootleg_uncased": "https://bootleg-ned-data.s3-us-west-1.amazonaws.com/models/latest/bootleg_uncased.tar.gz",
}
def get_default_cache():
"""Get default cache directory for saving Bootleg data."""
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv(
"TORCH_HOME",
os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"),
)
)
return Path(torch_cache_home) / "bootleg"
def create_config(model_path, data_path, model_name):
"""Create Bootleg config.
Args:
model_path: model directory
data_path: data directory
model_name: model name
Returns: updated config
"""
config_file = model_path / model_name / "bootleg_config.yaml"
config_args = load_yaml_file(config_file)
# set the model checkpoint path
config_args["emmental"]["model_path"] = str(
model_path / model_name / "bootleg_wiki.pth"
)
# set the path for the entity db and candidate map
config_args["data_config"]["entity_dir"] = str(data_path / "entity_db")
config_args["data_config"]["alias_cand_map"] = "alias2qids"
# set the embedding paths
config_args["data_config"]["word_embedding"]["cache_dir"] = str(
data_path / "pretrained_bert_models"
)
# set log path
config_args["emmental"]["log_path"] = str(data_path / "log_dir")
config_args = parse_boot_and_emm_args(config_args)
return config_args
def create_sources(model_path, data_path, model_name):
"""Download Bootleg data and saves in log dir.
Args:
model_path: model directory
data_path: data directory
model_name: model name to download
"""
download_path = BOOTLEG_MODEL_PATHS[model_name]
if not (model_path / model_name).exists():
print(
f"{model_path / model_name} not found. Downloading from {download_path}.."
)
urllib.request.urlretrieve(
download_path,
filename=str(model_path / f"{model_name}.tar.gz"),
reporthook=DownloadProgressBar(),
)
print("Downloaded. Decompressing...")
tar = tarfile.open(str(model_path / f"{model_name}.tar.gz"), "r:gz")
tar.extractall(model_path)
tar.close()
if not (data_path / "entity_db").exists():
print(f"{data_path / 'entity_db'} not found. Downloading..")
urllib.request.urlretrieve(
"https://bootleg-ned-data.s3-us-west-1.amazonaws.com/data/latest/entity_db.tar.gz",
filename=str(data_path / "entity_db.tar.gz"),
reporthook=DownloadProgressBar(),
)
print("Downloaded. Decompressing...")
tar = tarfile.open(str(data_path / "entity_db.tar.gz"), "r:gz")
tar.extractall(data_path)
tar.close()
class BootlegAnnotator(object):
"""
Bootleg on-the-fly annotator.
BootlegAnnotator class: convenient wrapper of preprocessing and model
eval to allow for annotating single sentences at a time for quick
experimentation, e.g. in notebooks.
Args:
config: model config or path to config (default None)
device: model device, -1 for CPU (default None)
min_alias_len: minimum alias length (default 1)
max_alias_len: maximum alias length (default 6)
threshold: probability threshold (default 0.0)
cache_dir: cache directory (default None)
model_name: model name (default None)
entity_emb_file: entity embedding file (default None)
return_embs: whether to return entity embeddings or not (default False)
return_ctx_embs: whether to return context embeddings or not (default False)
extract_method: mention extraction method
verbose: verbose boolean (default False)
"""
def __init__(
self,
config: Union[str, Dict[str, Any]] = None,
device: int = None,
min_alias_len: int = 1,
max_alias_len: int = 6,
threshold: float = 0.0,
cache_dir: str = None,
model_name: str = None,
entity_emb_file: str = None,
return_embs: bool = False,
return_ctx_embs: bool = False,
extract_method: str = "spacy",
verbose: bool = False,
):
"""Bootleg annotator initializer."""
self.min_alias_len = min_alias_len
self.max_alias_len = max_alias_len
self.verbose = verbose
self.threshold = threshold
self.return_embs = return_embs
self.return_ctx_embs = return_ctx_embs
self.entity_emb_file = entity_emb_file
self.extract_method = extract_method
if self.entity_emb_file is not None:
assert Path(
self.entity_emb_file
).exists(), f"{self.entity_emb_file} must exist."
if not cache_dir:
self.cache_dir = get_default_cache()
self.model_path = self.cache_dir / "models"
self.data_path = self.cache_dir / "data"
else:
self.cache_dir = Path(cache_dir)
self.model_path = self.cache_dir / "models"
self.data_path = self.cache_dir / "data"
if not model_name:
model_name = "bootleg_uncased"
assert model_name in {
"bootleg_uncased",
}, f"model_name must be bootleg_uncased. You have {model_name}."
if not config:
self.cache_dir.mkdir(parents=True, exist_ok=True)
self.model_path.mkdir(parents=True, exist_ok=True)
self.data_path.mkdir(parents=True, exist_ok=True)
create_sources(self.model_path, self.data_path, model_name)
self.config = create_config(self.model_path, self.data_path, model_name)
else:
if isinstance(config, str):
config = load_yaml_file(config)
if "emmental" in config:
config = parse_boot_and_emm_args(config)
self.config = config
# Ensure some of the critical annotator args are the correct type
self.config.run_config.eval_batch_size = int(
self.config.run_config.eval_batch_size
)
self.config.data_config.max_seq_len = int(
self.config.data_config.max_seq_len
)
self.config.data_config.train_in_candidates = bool(
self.config.data_config.train_in_candidates
)
if entity_emb_file is not None:
assert Path(entity_emb_file).exists(), f"{entity_emb_file} does not exist"
if not device:
device = 0 if torch.cuda.is_available() else -1
if self.verbose:
self.config["run_config"]["log_level"] = "DEBUG"
else:
self.config["run_config"]["log_level"] = "INFO"
self.torch_device = (
torch.device(device) if device != -1 else torch.device("cpu")
)
self.config.model_config.device = device
log_level = logging.getLevelName(self.config["run_config"]["log_level"].upper())
emmental.init(
log_dir=self.config["meta_config"]["log_path"],
config=self.config,
use_exact_log_path=self.config["meta_config"]["use_exact_log_path"],
level=log_level,
)
logger.debug("Reading entity database")
self.entity_db = EntitySymbols.load_from_cache(
os.path.join(
self.config.data_config.entity_dir,
self.config.data_config.entity_map_dir,
),
alias_cand_map_dir=self.config.data_config.alias_cand_map,
alias_idx_dir=self.config.data_config.alias_idx_map,
)
self.all_aliases_trie = self.entity_db.get_all_alias_vocabtrie()
add_entity_type = self.config.data_config.entity_type_data.use_entity_types
self.type_symbols = None
# If we do not have self.entity_emb_file, then need to generate entity encoder input with metadata
if add_entity_type and self.entity_emb_file is None:
logger.debug("Reading entity type database")
self.type_symbols = TypeSymbols.load_from_cache(
os.path.join(
self.config.data_config.entity_dir,
self.config.data_config.entity_type_data.type_symbols_dir,
)
)
add_entity_kg = self.config.data_config.entity_kg_data.use_entity_kg
self.kg_symbols = None
# If we do not have self.entity_emb_file, then need to generate entity encoder input with metadata
if add_entity_kg and self.entity_emb_file is None:
logger.debug("Reading entity kg database")
self.kg_symbols = KGSymbols.load_from_cache(
os.path.join(
self.config.data_config.entity_dir,
self.config.data_config.entity_kg_data.kg_symbols_dir,
)
)
logger.debug("Reading word tokenizers")
self.tokenizer = AutoTokenizer.from_pretrained(
self.config.data_config.word_embedding.bert_model,
do_lower_case=True
if "uncased" in self.config.data_config.word_embedding.bert_model
else False,
cache_dir=self.config.data_config.word_embedding.cache_dir,
)
data_utils.add_special_tokens(self.tokenizer)
# Create tasks
self.task_to_label_dict = {NED_TASK: None}
# Create tasks
self.model = EmmentalModel(name="Bootleg")
task_to_add = ned_task.create_task(
self.config,
use_batch_cands=False,
len_context_tok=len(self.tokenizer),
entity_emb_file=self.entity_emb_file,
)
# As we manually keep track of the aliases for scoring, we only need the embeddings as action outputs
task_to_add.action_outputs = (
[("entity_encoder", 0), ("context_encoder", 0)]
if self.entity_emb_file is None
else [("entity_encoder_static", 0), ("context_encoder", 0)]
)
self.model.add_task(task_to_add)
logger.debug("Loading model")
# Load the best model from the pretrained model
assert (
self.config["model_config"]["model_path"] is not None
), "Must have a model to load in the model_path for the BootlegAnnotator"
self.model.load(self.config["model_config"]["model_path"])
self.model.eval()
def extract_mentions(self, text):
"""Mention extraction wrapper.
Args:
text: text to extract mentions from
Returns: JSON object of sentence to be used in eval
"""
found_aliases, found_spans, found_char_spans = MENTION_EXTRACTOR_OPTIONS[
self.extract_method
](text, self.all_aliases_trie, self.min_alias_len, self.max_alias_len)
return {
"sentence": text,
"aliases": found_aliases,
"char_spans": found_char_spans,
"cands": [self.entity_db.get_qid_cands(al) for al in found_aliases],
# we don't know the true QID
"qids": ["Q-1" for i in range(len(found_aliases))],
"gold": [True for i in range(len(found_aliases))],
}
def set_threshold(self, value):
"""Set threshold.
Args:
value: threshold value
"""
self.threshold = value
def label_mentions(
self,
text_list=None,
extracted_examples=None,
):
"""Extract mentions and runs disambiguation.
If user provides extracted_examples, we will ignore text_list.
Args:
text_list: list of text to disambiguate (or single string) (can be None if extracted_examples is not None)
extracted_examples: List of Dicts of keys "sentence", "aliases", "spans", "cands" (QIDs) (optional)
Returns: Dict of
* ``qids``: final predicted QIDs,
* ``probs``: final predicted probs,
* ``titles``: final predicted titles,
* ``cands``: all entity candidates,
* ``cand_probs``: probabilities of all candidates,
* ``char_spans``: final extracted char spans,
* ``aliases``: final extracted aliases,
* ``embs``: final entity contextualized embeddings (if return_embs is True)
* ``cand_embs``: final candidate entity contextualized embeddings (if return_embs is True)
"""
# Check inputs are sane
do_extract_mentions = True
if extracted_examples is not None:
do_extract_mentions = False
assert (
type(extracted_examples) is list
), "Must provide a list of Dics for extracted_examples"
check_ex = extracted_examples[0]
assert (
len(
{"sentence", "aliases", "char_spans", "cands"}.intersection(
check_ex.keys()
)
)
== 4
), (
f"You must have keys of sentence, aliases, char_spans, and cands for extracted_examples. You have"
f"{extracted_examples[0].keys()}"
)
else:
assert (
text_list is not None
), "If you do not provide extracted_examples you must provide text_list"
if text_list is None:
assert (
extracted_examples is not None
), "If you do not provide text_list you must provide extracted_exampels"
else:
if type(text_list) is str:
text_list = [text_list]
else:
assert (
type(text_list) is list
and len(text_list) > 0
and type(text_list[0]) is str
), "We only accept inputs of strings and lists of strings"
# Get number of examples
if extracted_examples is not None:
num_exs = len(extracted_examples)
else:
num_exs = len(text_list)
ebs = int(self.config.run_config.eval_batch_size)
total_start_exs = 0
total_final_exs = 0
dropped_by_thresh = 0
batch_example_qid_cands = []
batch_example_eid_cands = []
batch_example_true_entities = []
batch_word_indices = []
batch_word_token_types = []
batch_word_attention = []
batch_ent_indices = []
batch_ent_token_types = []
batch_ent_attention = []
batch_char_spans_arr = []
batch_example_aliases = []
batch_idx_unq = []
for idx_unq in tqdm(
range(num_exs),
desc="Prepping data",
total=num_exs,
disable=not self.verbose,
):
if do_extract_mentions:
sample = self.extract_mentions(text_list[idx_unq])
else:
sample = extracted_examples[idx_unq]
# Add the unk qids and gold values
sample["qids"] = ["Q-1" for _ in range(len(sample["aliases"]))]
sample["gold"] = [True for _ in range(len(sample["aliases"]))]
total_start_exs += len(sample["aliases"])
for men_idx in range(len(sample["aliases"])):
# ====================================================
# GENERATE TEXT INPUTS
# ====================================================
inputs = self.get_sentence_tokens(sample, men_idx)
# ====================================================
# GENERATE CANDIDATE INPUTS
# ====================================================
example_qid_cands = [
"-1"
for _ in range(
get_max_candidates(self.entity_db, self.config.data_config)
)
]
example_eid_cands = [
-1
for _ in range(
get_max_candidates(self.entity_db, self.config.data_config)
)
]
# generate indexes into alias table.
alias_qids = np.array(sample["cands"][men_idx])
# first entry is the non candidate class (NC and eid 0) - used when train in cands is false
# if we train in candidates, this gets overwritten
example_qid_cands[0] = "NC"
example_qid_cands[
(not self.config.data_config.train_in_candidates) : len(alias_qids)
+ (not self.config.data_config.train_in_candidates)
] = sample["cands"][men_idx]
example_eid_cands[0] = 0
example_eid_cands[
(not self.config.data_config.train_in_candidates) : len(alias_qids)
+ (not self.config.data_config.train_in_candidates)
] = [self.entity_db.get_eid(q) for q in sample["cands"][men_idx]]
if not sample["qids"][men_idx] in alias_qids:
# assert not data_args.train_in_candidates
if not self.config.data_config.train_in_candidates:
# set class label to be "not in candidate set"
true_entity_idx = 0
else:
true_entity_idx = -2
else:
# Here we are getting the correct class label for training.
# Our training is "which of the max_entities entity candidates is the right one
# (class labels 1 to max_entities) or is it none of these (class label 0)".
# + (not discard_noncandidate_entities) is to ensure label 0 is
# reserved for "not in candidate set" class
true_entity_idx = np.nonzero(alias_qids == sample["qids"][men_idx])[
0
][0] + (not self.config.data_config.train_in_candidates)
# Get candidate tokens
example_cand_input_ids = []
example_cand_token_type_ids = []
example_cand_attention_mask = []
if self.entity_emb_file is None:
entity_tokens = [
self.get_entity_tokens(cand_qid)
for cand_qid in example_qid_cands
]
example_cand_input_ids = [
ent_toks["input_ids"] for ent_toks in entity_tokens
]
example_cand_token_type_ids = [
ent_toks["token_type_ids"] for ent_toks in entity_tokens
]
example_cand_attention_mask = [
ent_toks["attention_mask"] for ent_toks in entity_tokens
]
# ====================================================
# ACCUMULATE
# ====================================================
batch_example_qid_cands.append(example_qid_cands)
batch_example_eid_cands.append(example_eid_cands)
batch_example_true_entities.append(true_entity_idx)
batch_word_indices.append(inputs["input_ids"])
batch_word_token_types.append(inputs["token_type_ids"])
batch_word_attention.append(inputs["attention_mask"])
batch_ent_indices.append(example_cand_input_ids)
batch_ent_token_types.append(example_cand_token_type_ids)
batch_ent_attention.append(example_cand_attention_mask)
batch_example_aliases.append(sample["aliases"][men_idx])
batch_char_spans_arr.append(sample["char_spans"][men_idx])
batch_idx_unq.append(idx_unq)
batch_example_eid_cands = torch.tensor(batch_example_eid_cands).long()
batch_example_true_entities = torch.tensor(batch_example_true_entities)
final_pred_cands = [[] for _ in range(num_exs)]
final_all_cands = [[] for _ in range(num_exs)]
final_cand_probs = [[] for _ in range(num_exs)]
final_pred_probs = [[] for _ in range(num_exs)]
final_entity_embs = [[] for _ in range(num_exs)]
final_ctx_embs = [[] for _ in range(num_exs)]
final_entity_cand_embs = [[] for _ in range(num_exs)]
final_titles = [[] for _ in range(num_exs)]
final_char_spans = [[] for _ in range(num_exs)]
final_aliases = [[] for _ in range(num_exs)]
for b_i in tqdm(
range(0, len(batch_word_indices), ebs),
desc="Evaluating model",
disable=not self.verbose,
):
x_dict = self.get_forward_batch(
input_ids=torch.tensor(batch_word_indices)[b_i : b_i + ebs],
token_type_ids=torch.tensor(batch_word_token_types)[b_i : b_i + ebs],
attention_mask=torch.tensor(batch_word_attention)[b_i : b_i + ebs],
entity_token_ids=torch.tensor(batch_ent_indices)[b_i : b_i + ebs],
entity_type_ids=torch.tensor(batch_ent_token_types)[b_i : b_i + ebs],
entity_attention_mask=torch.tensor(batch_ent_attention)[
b_i : b_i + ebs
],
entity_cand_eid=batch_example_eid_cands[b_i : b_i + ebs],
generate_entity_inputs=(self.entity_emb_file is None),
)
x_dict["guid"] = torch.arange(b_i, b_i + ebs, device=self.torch_device)
with torch.no_grad():
res = self.model( # type: ignore
uids=x_dict["guid"],
X_dict=x_dict,
Y_dict=None,
task_to_label_dict=self.task_to_label_dict,
return_loss=False,
return_probs=True,
return_action_outputs=self.return_embs or self.return_ctx_embs,
)
del x_dict
if not self.return_embs and not self.return_ctx_embs:
(uid_bdict, _, prob_bdict, _) = res
output_embs = None
output_ctx_embs = None
else:
(uid_bdict, _, prob_bdict, _, out_bdict) = res
if self.return_embs:
output_embs = out_bdict[NED_TASK][
"entity_encoder_0"
if (self.entity_emb_file is None)
else "entity_encoder_static_0"
]
else:
output_embs = None
if self.return_ctx_embs:
output_ctx_embs = out_bdict[NED_TASK]["context_encoder_0"]
else:
output_ctx_embs = None
# ====================================================
# EVALUATE MODEL OUTPUTS
# ====================================================
# recover predictions
probs = prob_bdict[NED_TASK]
max_probs = probs.max(1)
max_probs_indices = probs.argmax(1)
for ex_i in range(probs.shape[0]):
idx_unq = batch_idx_unq[b_i + ex_i]
entity_cands = batch_example_qid_cands[b_i + ex_i]
# batch size is 1 so we can reshape
probs_ex = probs[ex_i].tolist()
true_entity_pos_idx = batch_example_true_entities[b_i + ex_i]
if true_entity_pos_idx != PAD_ID:
pred_idx = max_probs_indices[ex_i]
pred_prob = max_probs[ex_i].item()
pred_qid = entity_cands[pred_idx]
if pred_prob > self.threshold:
final_all_cands[idx_unq].append(entity_cands)
final_cand_probs[idx_unq].append(probs_ex)
final_pred_cands[idx_unq].append(pred_qid)
final_pred_probs[idx_unq].append(pred_prob)
if self.return_embs:
final_entity_embs[idx_unq].append(
output_embs[ex_i][pred_idx]
)
final_entity_cand_embs[idx_unq].append(output_embs[ex_i])
if self.return_ctx_embs:
final_ctx_embs[idx_unq].append(output_ctx_embs[ex_i])
final_aliases[idx_unq].append(batch_example_aliases[b_i + ex_i])
final_char_spans[idx_unq].append(
batch_char_spans_arr[b_i + ex_i]
)
final_titles[idx_unq].append(
self.entity_db.get_title(pred_qid)
if pred_qid != "NC"
else "NC"
)
total_final_exs += 1
else:
dropped_by_thresh += 1
assert total_final_exs + dropped_by_thresh == total_start_exs, (
f"Something went wrong and we have predicted fewer mentions than extracted. "
f"Start {total_start_exs}, Out {total_final_exs}, No cand {dropped_by_thresh}"
)
res_dict = {
"qids": final_pred_cands,
"probs": final_pred_probs,
"titles": final_titles,
"cands": final_all_cands,
"cand_probs": final_cand_probs,
"char_spans": final_char_spans,
"aliases": final_aliases,
}
if self.return_embs:
res_dict["embs"] = final_entity_embs
res_dict["cand_embs"] = final_entity_cand_embs
if self.return_ctx_embs:
res_dict["ctx_embs"] = final_ctx_embs
return res_dict
def get_sentence_tokens(self, sample, men_idx):
"""
Get context tokens.
Args:
sample: Dict sample after extraction
men_idx: mention index to select
Returns: Dict of tokenized outputs
"""
span = sample["char_spans"][men_idx]
context = extract_context(
span,
sample["sentence"],
self.config.data_config.max_seq_window_len,
self.tokenizer,
)
inputs = self.tokenizer(
context,
padding="max_length",
add_special_tokens=True,
truncation=True,
max_length=self.config.data_config.max_seq_len,
return_overflowing_tokens=False,
)
return inputs
def get_entity_tokens(self, qid):
"""
Get entity tokens.
Args:
qid: entity QID
Returns:
Dict of input tokens for forward pass.
"""
constants = {
"max_ent_len": self.config.data_config.max_ent_len,
"max_ent_type_len": self.config.data_config.entity_type_data.max_ent_type_len,
"max_ent_kg_len": self.config.data_config.entity_kg_data.max_ent_kg_len,
"use_types": self.config.data_config.entity_type_data.use_entity_types,
"use_kg": self.config.data_config.entity_kg_data.use_entity_kg,
"use_desc": self.config.data_config.use_entity_desc,
}
ent_str, title_spans, over_type_len, over_kg_len = get_entity_string(
qid,
constants,
self.entity_db,
self.kg_symbols,
self.type_symbols,
)
inputs = self.tokenizer(
ent_str,
padding="max_length",
add_special_tokens=True,
truncation=True,
max_length=constants["max_ent_len"],
)
return inputs
def get_forward_batch(
self,
input_ids,
token_type_ids,
attention_mask,
entity_token_ids,
entity_type_ids,
entity_attention_mask,
entity_cand_eid,
generate_entity_inputs,
):
"""Generate emmental batch.
Args:
input_ids: word token ids
token_type_ids: word token type ids
attention_mask: work attention mask
entity_token_ids: entity token ids
entity_type_ids: entity type ids
entity_attention_mask: entity attention mask
entity_cand_eid: entity candidate eids
generate_entity_inputs: whether to generate entity id inputs
Returns: X_dict for emmental
"""
entity_cand_eval_mask = entity_cand_eid == -1
entity_cand_eid_noneg = torch.where(
entity_cand_eid >= 0,
entity_cand_eid,
(
torch.ones_like(entity_cand_eid, dtype=torch.long)
* (self.entity_db.num_entities_with_pad_and_nocand - 1)
),
)
X_dict = {
"guids": [],
"input_ids": input_ids.to(self.torch_device),
"token_type_ids": token_type_ids.to(self.torch_device),
"attention_mask": attention_mask.to(self.torch_device),
"entity_cand_eid": entity_cand_eid_noneg.to(self.torch_device),
"entity_cand_eval_mask": entity_cand_eval_mask.to(self.torch_device),
}
if generate_entity_inputs:
X_dict["entity_cand_input_ids"] = entity_token_ids.to(self.torch_device)
X_dict["entity_cand_token_type_ids"] = entity_type_ids.to(self.torch_device)
X_dict["entity_cand_attention_mask"] = entity_attention_mask.to(
self.torch_device
)
return X_dict
| bootleg-master | bootleg/end2end/bootleg_annotator.py |
"""End2End init."""
| bootleg-master | bootleg/end2end/__init__.py |
"""Annotator utils."""
import progressbar
class DownloadProgressBar:
"""Progress bar."""
def __init__(self):
"""Progress bar initializer."""
self.pbar = None
def __call__(self, block_num, block_size, total_size):
"""Call."""
if not self.pbar:
self.pbar = progressbar.ProgressBar(
maxval=total_size if total_size > 0 else 1e-2
)
self.pbar.start()
downloaded = block_num * block_size
if downloaded < total_size:
self.pbar.update(downloaded)
else:
self.pbar.finish()
| bootleg-master | bootleg/end2end/annotator_utils.py |
"""
Extract mentions.
This file takes in a jsonlines file with sentences
and extract aliases and spans using a pre-computed alias table.
"""
import argparse
import logging
import multiprocessing
import os
import time
import jsonlines
import numpy as np
from tqdm.auto import tqdm
from bootleg.symbols.constants import ANCHOR_KEY
from bootleg.symbols.entity_symbols import EntitySymbols
from bootleg.utils.classes.nested_vocab_tries import VocabularyTrie
from bootleg.utils.mention_extractor_utils import (
ngram_spacy_extract_aliases,
spacy_extract_aliases,
)
logger = logging.getLogger(__name__)
MENTION_EXTRACTOR_OPTIONS = {
"ngram_spacy": ngram_spacy_extract_aliases,
"spacy": spacy_extract_aliases,
}
def parse_args():
"""Generate args."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--in_file", type=str, required=True, help="File to extract mentions from"
)
parser.add_argument(
"--out_file",
type=str,
required=True,
help="File to write extracted mentions to",
)
parser.add_argument(
"--entity_db_dir", type=str, required=True, help="Path to entity db"
)
parser.add_argument(
"--extract_method",
type=str,
choices=list(MENTION_EXTRACTOR_OPTIONS.keys()),
default="ngram_spacy",
)
parser.add_argument("--min_alias_len", type=int, default=1)
parser.add_argument("--max_alias_len", type=int, default=6)
parser.add_argument("--num_workers", type=int, default=8)
parser.add_argument("--num_chunks", type=int, default=8)
parser.add_argument("--verbose", action="store_true")
return parser.parse_args()
def create_out_line(sent_obj, final_aliases, final_spans, found_char_spans):
"""Create JSON output line.
Args:
sent_obj: input sentence JSON
final_aliases: list of final aliases
final_spans: list of final spans
found_char_spans: list of final char spans
Returns: JSON object
"""
sent_obj["aliases"] = final_aliases
sent_obj["spans"] = final_spans
sent_obj["char_spans"] = found_char_spans
# we don't know the true QID (or even if there is one) at this stage
# we assign to the most popular candidate for now so models w/o NIL can also evaluate this data
sent_obj["qids"] = ["Q-1"] * len(final_aliases)
# global alias2qids
# sent_obj["qids"] = [alias2qids[alias][0] for alias in final_aliases]
sent_obj[ANCHOR_KEY] = [True] * len(final_aliases)
return sent_obj
def chunk_text_data(input_src, chunk_files, chunk_size, num_lines):
"""Chunk text input file into chunk_size chunks.
Args:
input_src: input file
chunk_files: list of chunk file names
chunk_size: chunk size in number of lines
num_lines: total number of lines
"""
logger.debug(f"Reading in {input_src}")
start = time.time()
# write out chunks as text data
chunk_id = 0
num_lines_in_chunk = 0
# keep track of what files are written
out_file = open(chunk_files[chunk_id], "w")
with open(input_src, "r", encoding="utf-8") as in_file:
for i, line in enumerate(in_file):
out_file.write(line)
num_lines_in_chunk += 1
# move on to new chunk when it hits chunk size
if num_lines_in_chunk == chunk_size:
chunk_id += 1
# reset number of lines in chunk and open new file if not at end
num_lines_in_chunk = 0
out_file.close()
if i < (num_lines - 1):
out_file = open(chunk_files[chunk_id], "w")
out_file.close()
logger.debug(f"Wrote out data chunks in {round(time.time() - start, 2)}s")
def subprocess(args):
"""
Extract mentions single process.
Args:
args: subprocess args
"""
in_file = args["in_file"]
out_file = args["out_file"]
extact_method = args["extract_method"]
min_alias_len = args["min_alias_len"]
max_alias_len = args["max_alias_len"]
verbose = args["verbose"]
all_aliases = VocabularyTrie(load_dir=args["all_aliases_trie_f"])
num_lines = sum(1 for _ in open(in_file))
with jsonlines.open(in_file) as f_in, jsonlines.open(out_file, "w") as f_out:
for line in tqdm(
f_in, total=num_lines, disable=not verbose, desc="Processing data"
):
found_aliases, found_spans, found_char_spans = MENTION_EXTRACTOR_OPTIONS[
extact_method
](line["sentence"], all_aliases, min_alias_len, max_alias_len)
f_out.write(
create_out_line(line, found_aliases, found_spans, found_char_spans)
)
def merge_files(chunk_outfiles, out_filepath):
"""Merge output files.
Args:
chunk_outfiles: list of chunk files
out_filepath: final output file path
"""
sent_idx_unq = 0
with jsonlines.open(out_filepath, "w") as f_out:
for file in chunk_outfiles:
with jsonlines.open(file) as f_in:
for line in f_in:
if "sent_idx_unq" not in line:
line["sent_idx_unq"] = sent_idx_unq
f_out.write(line)
sent_idx_unq += 1
def extract_mentions(
in_filepath,
out_filepath,
entity_db_dir,
extract_method="ngram_spacy",
min_alias_len=1,
max_alias_len=6,
num_workers=8,
num_chunks=None,
verbose=False,
):
"""Extract mentions from file.
Args:
in_filepath: input file
out_filepath: output file
entity_db_dir: path to entity db
extract_method: mention extraction method
min_alias_len: minimum alias length (in words)
max_alias_len: maximum alias length (in words)
num_workers: number of multiprocessing workers
num_chunks: number of subchunks to feed to workers
verbose: verbose boolean
"""
assert os.path.exists(in_filepath), f"{in_filepath} does not exist"
entity_symbols: EntitySymbols = EntitySymbols.load_from_cache(entity_db_dir)
all_aliases_trie: VocabularyTrie = entity_symbols.get_all_alias_vocabtrie()
if num_chunks is None:
num_chunks = num_workers
start_time = time.time()
# multiprocessing
if num_workers > 1:
prep_dir = os.path.join(os.path.dirname(out_filepath), "prep")
os.makedirs(prep_dir, exist_ok=True)
all_aliases_trie_f = os.path.join(prep_dir, "mention_extract_alias.marisa")
all_aliases_trie.dump(all_aliases_trie_f)
# chunk file for multiprocessing
num_lines = sum([1 for _ in open(in_filepath)])
num_processes = min(num_workers, int(multiprocessing.cpu_count()))
num_chunks = min(num_lines, num_chunks)
logger.debug(f"Using {num_processes} workers...")
chunk_size = int(np.ceil(num_lines / num_chunks))
chunk_file_path = os.path.join(prep_dir, "data_chunk")
chunk_infiles = [
f"{chunk_file_path}_{chunk_id}_in.jsonl" for chunk_id in range(num_chunks)
]
chunk_text_data(in_filepath, chunk_infiles, chunk_size, num_lines)
logger.debug("Calling subprocess...")
# call subprocesses on chunks
pool = multiprocessing.Pool(processes=num_processes)
chunk_outfiles = [
f"{chunk_file_path}_{chunk_id}_out.jsonl" for chunk_id in range(num_chunks)
]
subprocess_args = [
{
"in_file": chunk_infiles[i],
"out_file": chunk_outfiles[i],
"extract_method": extract_method,
"min_alias_len": min_alias_len,
"max_alias_len": max_alias_len,
"all_aliases_trie_f": all_aliases_trie_f,
"verbose": verbose,
}
for i in range(num_chunks)
]
pool.map(subprocess, subprocess_args)
pool.close()
pool.join()
logger.debug("Merging files...")
# write all chunks back in single file
merge_files(chunk_outfiles, out_filepath)
logger.debug("Removing temporary files...")
# clean up and remove chunked files
for file in chunk_infiles:
try:
os.remove(file)
except PermissionError:
pass
for file in chunk_outfiles:
try:
os.remove(file)
except PermissionError:
pass
try:
os.remove(all_aliases_trie_f)
except PermissionError:
pass
# single process
else:
logger.debug("Using 1 worker...")
with jsonlines.open(in_filepath, "r") as in_file, jsonlines.open(
out_filepath, "w"
) as out_file:
sent_idx_unq = 0
for line in in_file:
(
found_aliases,
found_spans,
found_char_spans,
) = MENTION_EXTRACTOR_OPTIONS[extract_method](
line["sentence"], all_aliases_trie, min_alias_len, max_alias_len
)
new_line = create_out_line(
line, found_aliases, found_spans, found_char_spans
)
if "sent_idx_unq" not in new_line:
new_line["sent_idx_unq"] = sent_idx_unq
sent_idx_unq += 1
out_file.write(new_line)
logger.debug(
f"Finished in {time.time() - start_time} seconds. Wrote out to {out_filepath}"
)
def main():
"""Run."""
args = parse_args()
in_file = args.in_file
out_file = args.out_file
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
print(args)
extract_mentions(
in_file,
out_file,
entity_db_dir=args.entity_db_dir,
min_alias_len=args.min_alias_len,
max_alias_len=args.max_alias_len,
num_workers=args.num_workers,
num_chunks=args.num_chunks,
verbose=args.verbose,
)
if __name__ == "__main__":
main()
| bootleg-master | bootleg/end2end/extract_mentions.py |
"""KG symbols class."""
import copy
import os
import re
from typing import Dict, List, Optional, Set, Union
from tqdm.auto import tqdm
from bootleg.symbols.constants import edit_op
from bootleg.utils import utils
from bootleg.utils.classes.nested_vocab_tries import ThreeLayerVocabularyTrie
def _convert_to_trie(qid2relations, max_connections):
all_relations = set()
all_qids = set()
qid2relations_filt = {}
for q, rel_dict in qid2relations.items():
qid2relations_filt[q] = {}
for rel, tail_qs in rel_dict.items():
all_qids.update(set(tail_qs))
all_relations.add(rel)
qid2relations_filt[q][rel] = tail_qs[:max_connections]
qid2relations_trie = ThreeLayerVocabularyTrie(
input_dict=qid2relations_filt,
key_vocabulary=all_relations,
value_vocabulary=all_qids,
max_value=max_connections,
)
return qid2relations_trie
class KGSymbols:
"""KG Symbols class for managing KG metadata."""
def __init__(
self,
qid2relations: Union[Dict[str, Dict[str, List[str]]], ThreeLayerVocabularyTrie],
max_connections: Optional[int] = 50,
edit_mode: Optional[bool] = False,
verbose: Optional[bool] = False,
):
"""KG initializer.
max_connections acts as the max single number of connections for a given relation.
max_connections * 2 is the max number of connections across all relations for a
given entity (see ThreeLayerVocabularyTrie).
"""
self.max_connections = max_connections
self.edit_mode = edit_mode
self.verbose = verbose
if self.edit_mode:
self._load_edit_mode(
qid2relations,
)
else:
self._load_non_edit_mode(
qid2relations,
)
def _load_edit_mode(
self,
qid2relations: Union[Dict[str, Dict[str, List[str]]], ThreeLayerVocabularyTrie],
):
"""Load relations in edit mode."""
if isinstance(qid2relations, ThreeLayerVocabularyTrie):
self._qid2relations: Union[
Dict[str, Dict[str, List[str]]], ThreeLayerVocabularyTrie
] = qid2relations.to_dict()
else:
self._qid2relations: Union[
Dict[str, Dict[str, List[str]]], ThreeLayerVocabularyTrie
] = {
head_qid: {
rel: tail_qids[: self.max_connections]
for rel, tail_qids in rel_dict.items()
}
for head_qid, rel_dict in qid2relations.items()
}
self._obj2head: Union[Dict[str, set], None] = {}
self._all_relations: Union[Set[str], None] = set()
for qid in tqdm(
self._qid2relations,
total=len(self._qid2relations),
desc="Checking relations and building edit mode objs",
disable=not self.verbose,
):
for rel in self._qid2relations[qid]:
self._all_relations.add(rel)
for qid2 in self._qid2relations[qid][rel]:
if qid2 not in self._obj2head:
self._obj2head[qid2] = set()
self._obj2head[qid2].add(qid)
def _load_non_edit_mode(
self,
qid2relations: Union[Dict[str, Dict[str, List[str]]], ThreeLayerVocabularyTrie],
):
"""Load relations in not edit mode."""
if isinstance(qid2relations, dict):
self._qid2relations: Union[
Dict[str, Dict[str, List[str]]], ThreeLayerVocabularyTrie
] = _convert_to_trie(qid2relations, self.max_connections)
else:
self._qid2relations: Union[
Dict[str, Dict[str, List[str]]], ThreeLayerVocabularyTrie
] = qid2relations
self._all_relations: Union[Set[str], None] = None
self._obj2head: Union[Dict[str, set], None] = None
def save(self, save_dir, prefix=""):
"""Dump the kg symbols.
Args:
save_dir: directory string to save
prefix: prefix to add to beginning to file
"""
utils.ensure_dir(str(save_dir))
utils.dump_json_file(
filename=os.path.join(save_dir, "config.json"),
contents={
"max_connections": self.max_connections,
},
)
if isinstance(self._qid2relations, dict):
qid2relations = _convert_to_trie(self._qid2relations, self.max_connections)
qid2relations.dump(os.path.join(save_dir, f"{prefix}qid2relations"))
else:
self._qid2relations.dump(os.path.join(save_dir, f"{prefix}qid2relations"))
@classmethod
def load_from_cache(cls, load_dir, prefix="", edit_mode=False, verbose=False):
"""Load type symbols from load_dir.
Args:
load_dir: directory to load from
prefix: prefix to add to beginning to file
edit_mode: edit mode
verbose: verbose flag
Returns: TypeSymbols
"""
config = utils.load_json_file(filename=os.path.join(load_dir, "config.json"))
max_connections = config["max_connections"]
# For backwards compatibility, check if trie directory exists, otherwise load from json
rel_load_dir = os.path.join(load_dir, f"{prefix}qid2relations")
if not os.path.exists(rel_load_dir):
qid2relations: Union[
Dict[str, Dict[str, List[str]]], ThreeLayerVocabularyTrie
] = utils.load_json_file(
filename=os.path.join(load_dir, f"{prefix}qid2relations.json")
)
# Make sure relation is _not_ PID. The user should have the qid2relation dict that is pre-translated
first_qid = next(iter(qid2relations.keys()))
first_rel = next(iter(qid2relations[first_qid].keys()))
if re.match("^P[0-9]+$", first_rel):
raise ValueError(
"Your qid2relations dict has a relation as a PID identifier. Please replace "
"with human readable strings for training. "
"See https://www.wikidata.org/wiki/Wikidata:Database_reports/List_of_properties/all"
)
else:
qid2relations: Union[
Dict[str, Dict[str, List[str]]], ThreeLayerVocabularyTrie
] = ThreeLayerVocabularyTrie(
load_dir=rel_load_dir, max_value=max_connections
)
return cls(qid2relations, max_connections, edit_mode, verbose)
def get_qid2relations_dict(self):
"""Return a dictionary form of the relation to qid mappings object.
Returns: Dict of relation to head qid to list of tail qids
"""
if isinstance(self._qid2relations, dict):
return copy.deepcopy(self._qid2relations)
else:
return self._qid2relations.to_dict()
def get_all_relations(self):
"""Get all relations in our KG mapping.
Returns: Set
"""
if isinstance(self._qid2relations, dict):
return self._all_relations
else:
return set(self._qid2relations.key_vocab_keys())
def get_relations_between(self, qid1, qid2):
"""Check if two QIDs are connected in KG and returns the relations between then.
Args:
qid1: QID one
qid2: QID two
Returns: string relation or empty set
"""
rel_dict = {}
if isinstance(self._qid2relations, dict):
rel_dict = self._qid2relations.get(qid1, {})
else:
if self._qid2relations.is_key_in_trie(qid1):
rel_dict = self._qid2relations.get_value(qid1)
rels = set()
for rel, tail_qids in rel_dict.items():
if qid2 in set(tail_qids):
rels.add(rel)
return rels
def get_relations_tails_for_qid(self, qid):
"""Get dict of relation to tail qids for given qid.
Args:
qid: QID
Returns: Dict relation to list of tail qids for that relation
"""
if isinstance(self._qid2relations, dict):
return self._qid2relations.get(qid, {})
else:
rel_dict = {}
if self._qid2relations.is_key_in_trie(qid):
rel_dict = self._qid2relations.get_value(qid)
return rel_dict
# ============================================================
# EDIT MODE OPERATIONS
# ============================================================
@edit_op
def add_relation(self, qid, relation, qid2):
"""Add a relationship triple to our mapping.
If the QID already has max connection through ``relation``,
the last ``other_qid`` is removed and replaced by ``qid2``.
Args:
qid: head entity QID
relation: relation
qid2: tail entity QID:
"""
if relation not in self._all_relations:
self._all_relations.add(relation)
if relation not in self._qid2relations[qid]:
self._qid2relations[qid][relation] = []
# Check if qid2 already in that relation
if qid2 in self._qid2relations[qid][relation]:
return
if len(self._qid2relations[qid][relation]) >= self.max_connections:
qid_to_remove = self._qid2relations[qid][relation][-1]
self.remove_relation(qid, relation, qid_to_remove)
assert len(self._qid2relations[qid][relation]) < self.max_connections, (
f"Something went wrong and we still have more that {self.max_connections} "
f"relations when removing {qid}, {relation}, {qid2}"
)
self._qid2relations[qid][relation].append(qid2)
if qid2 not in self._obj2head:
self._obj2head[qid2] = set()
self._obj2head[qid2].add(qid)
return
@edit_op
def remove_relation(self, qid, relation, qid2):
"""Remove a relation triple from our mapping.
Args:
qid: head entity QID
relation: relation
qid2: tail entity QID
"""
if relation not in self._qid2relations[qid]:
return
if qid2 not in self._qid2relations[qid][relation]:
return
self._qid2relations[qid][relation].remove(qid2)
self._obj2head[qid2].remove(qid)
# If no connections, remove relation
if len(self._qid2relations[qid][relation]) <= 0:
del self._qid2relations[qid][relation]
if len(self._obj2head[qid2]) <= 0:
del self._obj2head[qid2]
return
@edit_op
def add_entity(self, qid, relation_dict):
"""Add a new entity to our relation mapping.
Args:
qid: QID
relation_dict: dictionary of relation -> list of connected other_qids by relation
"""
if qid in self._qid2relations:
raise ValueError(f"{qid} is already in kg symbols")
for relation in relation_dict:
if relation not in self._all_relations:
self._all_relations.add(relation)
self._qid2relations[qid] = relation_dict.copy()
for rel in self._qid2relations[qid]:
self._qid2relations[qid][rel] = self._qid2relations[qid][rel][
: self.max_connections
]
# Use self._qid2relations[qid] rather than relation_dict as the former is limited by max connections
for rel in self._qid2relations[qid]:
for obj_qid in self._qid2relations[qid][rel]:
if obj_qid not in self._obj2head:
self._obj2head[obj_qid] = set()
self._obj2head[obj_qid].add(qid)
return
@edit_op
def reidentify_entity(self, old_qid, new_qid):
"""Rename ``old_qid`` to ``new_qid``.
Args:
old_qid: old QID
new_qid: new QID
"""
if old_qid not in self._qid2relations or new_qid in self._qid2relations:
raise ValueError(
f"Either old qid {old_qid} is not in kg symbols or new qid {new_qid} is already in kg symbols"
)
# Update all object qids (aka subjects-object pairs where the object is the old qid)
for subj_qid in self._obj2head.get(old_qid, {}):
for rel in self._qid2relations[subj_qid]:
if old_qid in self._qid2relations[subj_qid][rel]:
for j in range(len(self._qid2relations[subj_qid][rel])):
if self._qid2relations[subj_qid][rel][j] == old_qid:
self._qid2relations[subj_qid][rel][j] = new_qid
# Update all subject qids - take the set union in case a subject has the same object with different relations
for obj_qid in set().union(
*[
set(self._qid2relations[old_qid][rel])
for rel in self._qid2relations[old_qid]
]
):
# May get cyclic relationship ann the obj qid qill already have been transformed
if obj_qid == new_qid:
obj_qid = old_qid
assert (
old_qid in self._obj2head[obj_qid]
), f"{old_qid} {obj_qid} {self._obj2head[obj_qid]}"
self._obj2head[obj_qid].remove(old_qid)
self._obj2head[obj_qid].add(new_qid)
# Update qid2relations and the object2head mappings
self._qid2relations[new_qid] = self._qid2relations[old_qid]
del self._qid2relations[old_qid]
if old_qid in self._obj2head:
self._obj2head[new_qid] = self._obj2head[old_qid]
del self._obj2head[old_qid]
@edit_op
def prune_to_entities(self, entities_to_keep):
"""Remove all entities except those in ``entities_to_keep``.
Args:
entities_to_keep: Set of entities to keep
"""
# Update qid2relations
self._qid2relations = {
k: v for k, v in self._qid2relations.items() if k in entities_to_keep
}
new_obj2head = {}
# Update all object qids
for qid in self._qid2relations:
for rel in list(self._qid2relations[qid].keys()):
filtered_object_ents = [
j for j in self._qid2relations[qid][rel] if j in entities_to_keep
][: self.max_connections]
# Keep relation only if more than one object
if len(filtered_object_ents) > 0:
self._qid2relations[qid][rel] = filtered_object_ents
for obj_qid in filtered_object_ents:
if obj_qid not in new_obj2head:
new_obj2head[obj_qid] = set()
new_obj2head[obj_qid].add(qid)
else:
del self._qid2relations[qid][rel]
self._obj2head = new_obj2head
| bootleg-master | bootleg/symbols/kg_symbols.py |
"""Entity profile."""
import logging
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import ujson
from pydantic import BaseModel, ValidationError
from tqdm.auto import tqdm
from bootleg.symbols.constants import check_qid_exists, edit_op
from bootleg.symbols.entity_symbols import EntitySymbols
from bootleg.symbols.kg_symbols import KGSymbols
from bootleg.symbols.type_symbols import TypeSymbols
from bootleg.utils.utils import get_lnrm
logger = logging.getLogger(__name__)
ENTITY_SUBFOLDER = "entity_mappings"
TYPE_SUBFOLDER = "type_mappings"
KG_SUBFOLDER = "kg_mappings"
REQUIRED_KEYS = ["entity_id", "mentions"]
OTHER_KEYS = ["title", "types", "relations"]
class EntityObj(BaseModel):
"""Base entity object class to check types."""
entity_id: str
mentions: List[Tuple[str, float]]
title: str
description: str
types: Optional[Dict[str, List[str]]]
relations: Optional[List[Dict[str, str]]]
class EntityProfile:
"""Entity Profile object to handle and manage entity, type, and KG metadata."""
def __init__(
self,
entity_symbols,
type_systems=None,
kg_symbols=None,
edit_mode=False,
verbose=False,
):
"""Entity profile initializer."""
self.edit_mode = edit_mode
self.verbose = verbose
self._entity_symbols = entity_symbols
self._type_systems = type_systems
self._kg_symbols = kg_symbols
def save(self, save_dir):
"""Save the profile.
Args:
save_dir: save directory
"""
save_dir = Path(save_dir)
self._entity_symbols.save(save_dir / ENTITY_SUBFOLDER)
for type_sys in self._type_systems:
self._type_systems[type_sys].save(save_dir / TYPE_SUBFOLDER / type_sys)
if self._kg_symbols is not None:
self._kg_symbols.save(save_dir / KG_SUBFOLDER)
@classmethod
def load_from_cache(
cls,
load_dir,
edit_mode=False,
verbose=False,
no_kg=False,
no_type=False,
type_systems_to_load=None,
):
"""Load a pre-saved profile.
Args:
load_dir: load directory
edit_mode: edit mode flag, default False
verbose: verbose flag, default False
no_kg: load kg or not flag, default False
no_type: load types or not flag, default False. If True, this will ignore type_systems_to_load.
type_systems_to_load: list of type systems to load, default is None which means all types systems
Returns: entity profile object
"""
# Check type system input
load_dir = Path(load_dir)
type_subfolder = load_dir / TYPE_SUBFOLDER
if type_systems_to_load is not None:
if not isinstance(type_systems_to_load, list):
raise ValueError(
f"`type_systems` must be a list of subfolders in {type_subfolder}"
)
for sys in type_systems_to_load:
if sys not in list([p.name for p in type_subfolder.iterdir()]):
raise ValueError(
f"`type_systems` must be a list of subfolders in {type_subfolder}. {sys} is not one."
)
if verbose:
print("Loading Entity Symbols")
entity_symbols = EntitySymbols.load_from_cache(
load_dir / ENTITY_SUBFOLDER,
edit_mode=edit_mode,
verbose=verbose,
)
if no_type:
print(
"Not loading type information. We will act as if there is no types associated with any entity "
"and will not modify the types in any way, even if calling `add`."
)
type_sys_dict = {}
for fold in type_subfolder.iterdir():
if (
(not no_type)
and (type_systems_to_load is None or fold.name in type_systems_to_load)
and (fold.is_dir())
):
if verbose:
print(f"Loading Type Symbols from {fold}")
type_sys_dict[fold.name] = TypeSymbols.load_from_cache(
type_subfolder / fold.name,
edit_mode=edit_mode,
verbose=verbose,
)
if verbose:
print("Loading KG Symbols")
if no_kg:
print(
"Not loading KG information. We will act as if there is no KG connections between entities. "
"We will not modify the KG information in any way, even if calling `add`."
)
kg_symbols = None
if not no_kg:
kg_symbols = KGSymbols.load_from_cache(
load_dir / KG_SUBFOLDER,
edit_mode=edit_mode,
verbose=verbose,
)
return cls(entity_symbols, type_sys_dict, kg_symbols, edit_mode, verbose)
@classmethod
def load_from_jsonl(
cls,
profile_file,
max_candidates=30,
max_types=10,
max_kg_connections=100,
edit_mode=False,
):
"""Load an entity profile from the raw jsonl file.
Each line is a JSON object with entity metadata.
Example object::
{
"entity_id": "C000",
"mentions": [["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
"title": "Dog",
"types": {"hyena": ["animal"], "wiki": ["dog"]},
"relations": [
{"relation": "sibling", "object": "Q345"},
{"relation": "sibling", "object": "Q567"},
],
}
Args:
profile_file: file where jsonl data lives
max_candidates: maximum entity candidates
max_types: maximum types per entity
max_kg_connections: maximum KG connections per entity
edit_mode: edit mode
Returns: entity profile object
"""
(
qid2title,
qid2desc,
alias2qids,
type_systems,
qid2relations,
) = cls._read_profile_file(profile_file)
entity_symbols = EntitySymbols(
alias2qids=alias2qids,
qid2title=qid2title,
qid2desc=qid2desc,
max_candidates=max_candidates,
edit_mode=edit_mode,
)
all_type_symbols = {
ty_name: TypeSymbols(
qid2typenames=type_map, max_types=max_types, edit_mode=edit_mode
)
for ty_name, type_map in type_systems.items()
}
kg_symbols = KGSymbols(
qid2relations, max_connections=max_kg_connections, edit_mode=edit_mode
)
return cls(entity_symbols, all_type_symbols, kg_symbols, edit_mode)
@classmethod
def _read_profile_file(cls, profile_file):
"""Read profile data helper.
Args:
profile_file: file where jsonl data lives
Returns: Dicts of qid2title, alias2qids, type_systems, qid2relations
"""
qid2title: Dict[str, str] = {}
qid2desc: Dict[str, str] = {}
alias2qids: Dict[str, list] = {}
type_systems: Dict[str, Dict[str, List[str]]] = {}
qid2relations: Dict[str, Dict[str, List[str]]] = {}
num_lines = sum(1 for _ in open(profile_file))
with open(profile_file, "r") as in_f:
for line in tqdm(in_f, total=num_lines, desc="Reading profile"):
line = ujson.loads(line)
# Check keys and schema
assert all(
k in line.keys() for k in REQUIRED_KEYS
), f"A key from {REQUIRED_KEYS} was not in {line}"
try:
# Asserts the types are correct
ent = EntityObj(
entity_id=line["entity_id"],
mentions=line["mentions"],
title=line.get("title", line["entity_id"]),
description=line.get("description", ""),
types=line.get("types", {}),
relations=line.get("relations", []),
)
except ValidationError as e:
print(e.json())
raise e
if ent.entity_id in qid2title:
raise ValueError(f"{ent.entity_id} is already in our dump")
qid2title[ent.entity_id] = ent.title
qid2desc[ent.entity_id] = ent.description
# For each [mention, score] value, create a value of mention -> [qid, score] in the alias2qid dict
for men_pair in ent.mentions:
# Lower case mentions for mention extraction
new_men = get_lnrm(men_pair[0])
if new_men not in alias2qids:
alias2qids[new_men] = []
alias2qids[new_men].append([ent.entity_id, men_pair[1]])
# Add type systems of type_sys -> QID -> list of type names
for type_sys in ent.types:
if type_sys not in type_systems:
type_systems[type_sys] = {}
type_systems[type_sys][ent.entity_id] = ent.types[type_sys]
# Add kg relations QID -> relation -> list of object QIDs
for rel_pair in ent.relations:
if "relation" not in rel_pair or "object" not in rel_pair:
raise ValueError(
"For each value in relations, it must be a JSON with keys relation and object"
)
if ent.entity_id not in qid2relations:
qid2relations[ent.entity_id] = {}
if rel_pair["relation"] not in qid2relations[ent.entity_id]:
qid2relations[ent.entity_id][rel_pair["relation"]] = []
qid2relations[ent.entity_id][rel_pair["relation"]].append(
rel_pair["object"]
)
# Sort mentions based on score, highest first
for al in list(alias2qids.keys()):
alias2qids[al] = sorted(alias2qids[al], key=lambda x: x[1], reverse=True)
# Add all qids to the type systems and KG connections with empty values
# This isn't strictly required but can make the sets more clean as they'll have consistent keys
for qid in qid2title:
for type_sys in type_systems:
if qid not in type_systems[type_sys]:
type_systems[type_sys][qid] = []
return qid2title, qid2desc, alias2qids, type_systems, qid2relations
# To quickly get the mention scores, the object must be in edit mode
@edit_op
def save_to_jsonl(self, profile_file):
"""Dump the entity dump to jsonl format.
Args:
profile_file: file to save the data
"""
with open(profile_file, "w") as out_f:
for qid in tqdm(self.get_all_qids(), disable=not self.verbose):
mentions = self.get_mentions_with_scores(qid)
title = self.get_title(qid)
desc = self.get_desc(qid)
ent_type_sys = {}
for type_sys in self._type_systems:
types = self.get_types(qid, type_sys)
if len(types) > 0:
ent_type_sys[type_sys] = types
relations = []
all_connections = self.get_relations_tails_for_qid(qid)
for rel in all_connections:
for qid2 in all_connections[rel]:
relations.append({"relation": rel, "object": qid2})
ent_obj = {
"entity_id": qid,
"mentions": mentions,
"title": title,
}
# Add description if nonempty
if len(desc) > 0:
ent_obj["description"] = desc
if len(ent_type_sys) > 0:
ent_obj["types"] = ent_type_sys
if len(relations) > 0:
ent_obj["relations"] = relations
out_f.write(ujson.dumps(ent_obj, ensure_ascii=False) + "\n")
# ============================================================
# GETTERS
# ============================================================
def qid_exists(self, qid):
"""Check if QID exists.
Args:
qid: entity QID
Returns: Boolean
"""
return self._entity_symbols.qid_exists(qid)
def mention_exists(self, mention):
"""Check if mention exists.
Args:
mention: mention
Returns: Boolean
"""
return self._entity_symbols.alias_exists(mention)
def get_all_qids(self):
"""Return all entity QIDs.
Returns: List of strings
"""
return self._entity_symbols.get_all_qids()
def get_all_mentions(self):
"""Return list of all mentions.
Returns: List of strings
"""
return self._entity_symbols.get_all_aliases()
def get_all_typesystems(self):
"""Return list of all type systems.
Returns: List of strings
"""
return list(self._type_systems.keys())
def get_all_types(self, type_system):
"""Return list of all type names for a type system.
Args:
type_system: type system
Returns: List of strings
"""
if type_system not in self._type_systems:
raise ValueError(
f"The type system {type_system} is not one of {self._type_systems.keys()}"
)
return self._type_systems[type_system].get_all_types()
def get_type_typeid(self, type, type_system):
"""Get the type type id for the type of the ``type_system`` system.
Args:
type: type
type_system: type system
Returns: type id
"""
if type_system not in self._type_systems:
raise ValueError(
f"The type system {type_system} is not one of {self._type_systems.keys()}"
)
return self._type_systems[type_system].get_type_typeid(type)
@check_qid_exists
def get_title(self, qid):
"""Get the title of an entity QID.
Args:
qid: entity QID
Returns: string
"""
return self._entity_symbols.get_title(qid)
@check_qid_exists
def get_desc(self, qid):
"""Get the description of an entity QID.
Args:
qid: entity QID
Returns: string
"""
return self._entity_symbols.get_desc(qid)
@check_qid_exists
def get_eid(self, qid):
"""Get the entity EID (internal number) of an entity QID.
Args:
qid: entity QID
Returns: integer
"""
return self._entity_symbols.get_eid(qid)
def get_qid_cands(self, mention):
"""Get the entity QID candidates of the mention.
Args:
mention: mention
Returns: List of QIDs
"""
return self._entity_symbols.get_qid_cands(mention)
def get_qid_count_cands(self, mention):
"""Get the entity QID candidates with their scores of the mention.
Args:
mention: mention
Returns: List of tuples [QID, score]
"""
return self._entity_symbols.get_qid_count_cands(mention)
@property
def num_entities_with_pad_and_nocand(self):
"""Get the number of entities including a PAD and UNK entity.
Returns: integer
"""
return self._entity_symbols.num_entities_with_pad_and_nocand
@check_qid_exists
def get_types(self, qid, type_system):
"""Get the type names associated with the given QID for the ``type_system`` system.
Args:
qid: QID
type_system: type system
Returns: list of typename strings
"""
if type_system not in self._type_systems:
raise ValueError(
f"The type system {type_system} is not one of {self._type_systems.keys()}"
)
return self._type_systems[type_system].get_types(qid)
@check_qid_exists
def get_relations_between(self, qid, qid2):
"""Check if two QIDs are connected in KG and returns their relation.
Args:
qid: QID one
qid2: QID two
Returns: string relation or None
"""
if self._kg_symbols is None:
return None
return self._kg_symbols.get_relations_between(qid, qid2)
@check_qid_exists
def get_relations_tails_for_qid(self, qid):
"""Get dict of relation to tail qids for given qid.
Args:
qid: QID
Returns: Dict relation to list of tail qids for that relation
"""
if self._kg_symbols is None:
return None
return self._kg_symbols.get_relations_tails_for_qid(qid)
# ============================================================
# EDIT MODE OPERATIONS
# ============================================================
# GETTERS
# get_mentions is in edit mode due to needing the qid->mention dict
@edit_op
@check_qid_exists
def get_mentions(self, qid):
"""Get the mentions for the QID.
Args:
qid: QID
Returns: List of mentions
"""
return self._entity_symbols.get_mentions(qid)
@edit_op
@check_qid_exists
def get_mentions_with_scores(self, qid):
"""Get the mentions with thier scores associated with the QID.
Args:
qid: QID
Returns: List of tuples [mention, score]
"""
return self._entity_symbols.get_mentions_with_scores(qid)
@edit_op
def get_entities_of_type(self, typename, type_system):
"""Get all entities of type ``typename`` for type system ``type_system``.
Args:
typename: type name
type_system: type system
Returns: List of QIDs
"""
if type_system not in self._type_systems:
raise ValueError(
f"The type system {type_system} is not one of {self._type_systems.keys()}"
)
return self._type_systems[type_system].get_entities_of_type(typename)
# UPDATES
@edit_op
def add_entity(self, entity_obj):
"""Add entity to our dump.
Args:
entity_obj: JSON object of entity metadata
"""
if (
type(entity_obj) is not dict
or "entity_id" not in entity_obj
or "mentions" not in entity_obj
):
raise ValueError(
"The input to update_entity needs to be a dictionary with an entity_id key and mentions key as "
"you are replacing the entity information in bulk."
)
try:
ent = EntityObj(
entity_id=entity_obj["entity_id"],
mentions=entity_obj["mentions"],
title=entity_obj.get("title", entity_obj["entity_id"]),
description=entity_obj.get("description", ""),
types=entity_obj.get("types", {}),
relations=entity_obj.get("relations", []),
)
except ValidationError as e:
print(e.json())
raise e
# We assume this is a new entity
if self._entity_symbols.qid_exists(ent.entity_id):
raise ValueError(
f"The entity {ent.entity_id} already exists. Please call update_entity instead."
)
# Add type systems of type_sys -> QID -> list of type names
for type_sys in ent.types:
if type_sys not in self._type_systems:
raise ValueError(
f"Error {entity_obj}. When adding a new entity, you must use the same type system. "
f"We don't support new type systems."
)
# Add kg relations relation -> list of object QIDs
parsed_rels = {}
for rel_pair in ent.relations:
if "relation" not in rel_pair or "object" not in rel_pair:
raise ValueError(
"For each value in relations, it must be a JSON with keys relation and object"
)
if rel_pair["relation"] not in parsed_rels:
parsed_rels[rel_pair["relation"]] = []
parsed_rels[rel_pair["relation"]].append(rel_pair["object"])
# Lower case mentions for mention extraction
mentions = [[get_lnrm(men[0]), men[1]] for men in ent.mentions]
self._entity_symbols.add_entity(
ent.entity_id, mentions, ent.title, ent.description
)
for type_sys in self._type_systems:
self._type_systems[type_sys].add_entity(
ent.entity_id, ent.types.get(type_sys, [])
)
if self._kg_symbols is not None:
self._kg_symbols.add_entity(ent.entity_id, parsed_rels)
@edit_op
@check_qid_exists
def reidentify_entity(self, qid, new_qid):
"""Rename ``qid`` to ``new_qid``.
Args:
qid: old QID
new_qid: new QID
"""
# We assume this is a new entity
if self._entity_symbols.qid_exists(new_qid):
raise ValueError(
f"The entity {new_qid} already exists. Please call update_entity instead."
)
self._entity_symbols.reidentify_entity(qid, new_qid)
for type_sys in self._type_systems:
self._type_systems[type_sys].reidentify_entity(qid, new_qid)
if self._kg_symbols is not None:
self._kg_symbols.reidentify_entity(qid, new_qid)
@edit_op
def update_entity(self, entity_obj):
"""Update the metadata associated with the entity.
The entity must already be in our dump to be updated.
Args:
entity_obj: JSON of entity metadata.
"""
if (
type(entity_obj) is not dict
or "entity_id" not in entity_obj
or "mentions" not in entity_obj
):
raise ValueError(
"The input to update_entity needs to be a dictionary with an entity_id key and mentions key as "
"you are replacing the entity information in bulk."
)
if not self._entity_symbols.qid_exists(entity_obj["entity_id"]):
raise ValueError(f"The entity {entity_obj['entity_id']} is not in our dump")
try:
ent = EntityObj(
entity_id=entity_obj["entity_id"],
mentions=entity_obj["mentions"],
title=entity_obj.get("title", entity_obj["entity_id"]),
description=entity_obj.get("description", ""),
types=entity_obj.get("types", {}),
relations=entity_obj.get("relations", []),
)
except ValidationError as e:
print(e.json())
raise e
# Update mentions
for men in list(self.get_mentions(ent.entity_id)):
self._entity_symbols.remove_mention(ent.entity_id, men)
for men in ent.mentions:
# Lower case mentions for mention extraction
men = [get_lnrm(men[0]), men[1]]
self._entity_symbols.add_mention(ent.entity_id, *men)
# Update title
self._entity_symbols.set_title(ent.entity_id, ent.title)
# Update types
for type_sys in self._type_systems:
for typename in self._type_systems[type_sys].get_types(ent.entity_id):
self._type_systems[type_sys].remove_type(ent.entity_id, typename)
for type_sys in ent.types:
for typename in ent.types[type_sys]:
self._type_systems[type_sys].add_type(ent.entity_id, typename)
# Update KG
if self._kg_symbols is not None:
for rel, qid2_list in list(
self._kg_symbols.get_relations_tails_for_qid(ent.entity_id).items()
):
for qid2 in qid2_list:
self._kg_symbols.remove_relation(ent.entity_id, rel, qid2)
for rel_pair in ent.relations:
self._kg_symbols.add_relation(
ent.entity_id, rel_pair["relation"], rel_pair["object"]
)
@edit_op
def prune_to_entities(self, entities_to_keep):
"""Remove all entities except those in ``entities_to_keep``.
Args:
entities_to_keep: List or Set of entities to keep
"""
entities_to_keep = set(entities_to_keep)
# Check that all entities to keep actually exist
for qid in entities_to_keep:
if not self.qid_exists(qid):
raise ValueError(
f"The entity {qid} does not exist in our dump and cannot be kept."
)
if self.verbose:
print("Pruning entity data")
self._entity_symbols.prune_to_entities(entities_to_keep)
for type_sys in self._type_systems:
if self.verbose:
print(f"Pruning {type_sys} data")
self._type_systems[type_sys].prune_to_entities(entities_to_keep)
if self.verbose:
print("Pruning kg data")
if self._kg_symbols is not None:
self._kg_symbols.prune_to_entities(entities_to_keep)
@edit_op
@check_qid_exists
def add_type(self, qid, type, type_system):
"""Add type to QID in for the given type system.
Args:
qid: QID
type: type name
type_system: type system
"""
if type_system not in self._type_systems:
raise ValueError(
f"The type system {type_system} is not one of {self._type_systems.keys()}"
)
self._type_systems[type_system].add_type(qid, type)
@edit_op
@check_qid_exists
def add_relation(self, qid, relation, qid2):
"""Add the relation triple.
Args:
qid: head QID
relation: relation
qid2: tail QID
"""
if self._kg_symbols is not None:
self._kg_symbols.add_relation(qid, relation, qid2)
@edit_op
@check_qid_exists
def add_mention(self, qid: str, mention: str, score: float):
"""Add the mention with its score to the QID.
Args:
qid: QID
mention: mention
score: score
"""
self._entity_symbols.add_mention(qid, mention, score)
@edit_op
@check_qid_exists
def remove_type(self, qid, type, type_system):
"""Remove the type from QID in the given type system.
Args:
qid: QID
type: type to remove
type_system: type system
"""
if type_system not in self._type_systems:
raise ValueError(
f"The type system {type_system} is not one of {self._type_systems.keys()}"
)
self._type_systems[type_system].remove_type(qid, type)
@edit_op
@check_qid_exists
def remove_relation(self, qid, relation, qid2):
"""Remove the relation triple.
Args:
qid: head QID
relation: relation
qid2: tail QID
"""
if self._kg_symbols is not None:
self._kg_symbols.remove_relation(qid, relation, qid2)
@edit_op
@check_qid_exists
def remove_mention(self, qid, mention):
"""Remove the mention from being associated with the QID.
Args:
qid: QID
mention: mention
"""
self._entity_symbols.remove_mention(qid, mention)
| bootleg-master | bootleg/symbols/entity_profile.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.