code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def accumulate(self):
"""
Accumulate metric results and calculate mAP
"""
mAP = 0.
valid_cnt = 0
eval_results = []
for score_pos, count in zip(self.class_score_poss,
self.class_gt_counts):
if count == 0: continue
if len(score_pos) == 0:
valid_cnt += 1
continue
accum_tp_list, accum_fp_list = \
self._get_tp_fp_accum(score_pos)
precision = []
recall = []
for ac_tp, ac_fp in zip(accum_tp_list, accum_fp_list):
precision.append(float(ac_tp) / (ac_tp + ac_fp))
recall.append(float(ac_tp) / count)
one_class_ap = 0.0
if self.map_type == '11point':
max_precisions = [0.] * 11
start_idx = len(precision) - 1
for j in range(10, -1, -1):
for i in range(start_idx, -1, -1):
if recall[i] < float(j) / 10.:
start_idx = i
if j > 0:
max_precisions[j - 1] = max_precisions[j]
break
else:
if max_precisions[j] < precision[i]:
max_precisions[j] = precision[i]
one_class_ap = sum(max_precisions) / 11.
mAP += one_class_ap
valid_cnt += 1
elif self.map_type == 'integral':
import math
prev_recall = 0.
for i in range(len(precision)):
recall_gap = math.fabs(recall[i] - prev_recall)
if recall_gap > 1e-6:
one_class_ap += precision[i] * recall_gap
prev_recall = recall[i]
mAP += one_class_ap
valid_cnt += 1
else:
logger.error("Unspported mAP type {}".format(self.map_type))
sys.exit(1)
eval_results.append({
'class': self.classes[valid_cnt - 1],
'ap': one_class_ap,
'precision': precision,
'recall': recall,
})
self.eval_results = eval_results
self.mAP = mAP / float(valid_cnt) if valid_cnt > 0 else mAP |
Accumulate metric results and calculate mAP
| accumulate | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/metrics/map_utils.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/metrics/map_utils.py | Apache-2.0 |
def _get_tp_fp_accum(self, score_pos_list):
"""
Calculate accumulating true/false positive results from
[score, pos] records
"""
sorted_list = sorted(score_pos_list, key=lambda s: s[0], reverse=True)
accum_tp = 0
accum_fp = 0
accum_tp_list = []
accum_fp_list = []
for (score, pos) in sorted_list:
accum_tp += int(pos)
accum_tp_list.append(accum_tp)
accum_fp += 1 - int(pos)
accum_fp_list.append(accum_fp)
return accum_tp_list, accum_fp_list |
Calculate accumulating true/false positive results from
[score, pos] records
| _get_tp_fp_accum | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/metrics/map_utils.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/metrics/map_utils.py | Apache-2.0 |
def ap_per_class(tp, conf, pred_cls, target_cls):
"""
Computes the average precision, given the recall and precision curves.
Method originally from https://github.com/rafaelpadilla/Object-Detection-Metrics.
Args:
tp (list): True positives.
conf (list): Objectness value from 0-1.
pred_cls (list): Predicted object classes.
target_cls (list): Target object classes.
"""
tp, conf, pred_cls, target_cls = np.array(tp), np.array(conf), np.array(
pred_cls), np.array(target_cls)
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
# Find unique classes
unique_classes = np.unique(np.concatenate((pred_cls, target_cls), 0))
# Create Precision-Recall curve and compute AP for each class
ap, p, r = [], [], []
for c in unique_classes:
i = pred_cls == c
n_gt = sum(target_cls == c) # Number of ground truth objects
n_p = sum(i) # Number of predicted objects
if (n_p == 0) and (n_gt == 0):
continue
elif (n_p == 0) or (n_gt == 0):
ap.append(0)
r.append(0)
p.append(0)
else:
# Accumulate FPs and TPs
fpc = np.cumsum(1 - tp[i])
tpc = np.cumsum(tp[i])
# Recall
recall_curve = tpc / (n_gt + 1e-16)
r.append(tpc[-1] / (n_gt + 1e-16))
# Precision
precision_curve = tpc / (tpc + fpc)
p.append(tpc[-1] / (tpc[-1] + fpc[-1]))
# AP from recall-precision curve
ap.append(compute_ap(recall_curve, precision_curve))
return np.array(ap), unique_classes.astype('int32'), np.array(r), np.array(
p) |
Computes the average precision, given the recall and precision curves.
Method originally from https://github.com/rafaelpadilla/Object-Detection-Metrics.
Args:
tp (list): True positives.
conf (list): Objectness value from 0-1.
pred_cls (list): Predicted object classes.
target_cls (list): Target object classes.
| ap_per_class | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/metrics/map_utils.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/metrics/map_utils.py | Apache-2.0 |
def compute_ap(recall, precision):
"""
Computes the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
Args:
recall (list): The recall curve.
precision (list): The precision curve.
Returns:
The average precision as computed in py-faster-rcnn.
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], recall, [1.]))
mpre = np.concatenate(([0.], precision, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap |
Computes the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
Args:
recall (list): The recall curve.
precision (list): The precision curve.
Returns:
The average precision as computed in py-faster-rcnn.
| compute_ap | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/metrics/map_utils.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/metrics/map_utils.py | Apache-2.0 |
def __init__(self,
width,
num_joints,
backbone='HRNet',
loss='KeyPointMSELoss',
post_process='HRNetPostProcess',
flip_perm=None,
flip=True,
shift_heatmap=True,
use_dark=True):
"""
HRNet network, see https://arxiv.org/abs/1902.09212
Args:
backbone (nn.Layer): backbone instance
post_process (object): `HRNetPostProcess` instance
flip_perm (list): The left-right joints exchange order list
use_dark(bool): Whether to use DARK in post processing
"""
super(TopDownHRNet, self).__init__()
self.backbone = backbone
self.post_process = HRNetPostProcess(use_dark)
self.loss = loss
self.flip_perm = flip_perm
self.flip = flip
self.final_conv = Conv2d(width, num_joints, 1, 1, 0, bias=True)
self.shift_heatmap = shift_heatmap
self.deploy = False |
HRNet network, see https://arxiv.org/abs/1902.09212
Args:
backbone (nn.Layer): backbone instance
post_process (object): `HRNetPostProcess` instance
flip_perm (list): The left-right joints exchange order list
use_dark(bool): Whether to use DARK in post processing
| __init__ | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/models/keypoint_hrnet.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/models/keypoint_hrnet.py | Apache-2.0 |
def get_max_preds(self, heatmaps):
'''get predictions from score maps
Args:
heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
Returns:
preds: numpy.ndarray([batch_size, num_joints, 2]), keypoints coords
maxvals: numpy.ndarray([batch_size, num_joints, 2]), the maximum confidence of the keypoints
'''
assert isinstance(heatmaps,
np.ndarray), 'heatmaps should be numpy.ndarray'
assert heatmaps.ndim == 4, 'batch_images should be 4-ndim'
batch_size = heatmaps.shape[0]
num_joints = heatmaps.shape[1]
width = heatmaps.shape[3]
heatmaps_reshaped = heatmaps.reshape((batch_size, num_joints, -1))
idx = np.argmax(heatmaps_reshaped, 2)
maxvals = np.amax(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = (preds[:, :, 0]) % width
preds[:, :, 1] = np.floor((preds[:, :, 1]) / width)
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
return preds, maxvals | get predictions from score maps
Args:
heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
Returns:
preds: numpy.ndarray([batch_size, num_joints, 2]), keypoints coords
maxvals: numpy.ndarray([batch_size, num_joints, 2]), the maximum confidence of the keypoints
| get_max_preds | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/models/keypoint_hrnet.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/models/keypoint_hrnet.py | Apache-2.0 |
def dark_postprocess(self, hm, coords, kernelsize):
'''DARK postpocessing, Zhang et al. Distribution-Aware Coordinate
Representation for Human Pose Estimation (CVPR 2020).
'''
hm = self.gaussian_blur(hm, kernelsize)
hm = np.maximum(hm, 1e-10)
hm = np.log(hm)
for n in range(coords.shape[0]):
for p in range(coords.shape[1]):
coords[n, p] = self.dark_parse(hm[n][p], coords[n][p])
return coords | DARK postpocessing, Zhang et al. Distribution-Aware Coordinate
Representation for Human Pose Estimation (CVPR 2020).
| dark_postprocess | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/models/keypoint_hrnet.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/models/keypoint_hrnet.py | Apache-2.0 |
def get_final_preds(self, heatmaps, center, scale, kernelsize=3):
"""the highest heatvalue location with a quarter offset in the
direction from the highest response to the second highest response.
Args:
heatmaps (numpy.ndarray): The predicted heatmaps
center (numpy.ndarray): The boxes center
scale (numpy.ndarray): The scale factor
Returns:
preds: numpy.ndarray([batch_size, num_joints, 2]), keypoints coords
maxvals: numpy.ndarray([batch_size, num_joints, 1]), the maximum confidence of the keypoints
"""
coords, maxvals = self.get_max_preds(heatmaps)
heatmap_height = heatmaps.shape[2]
heatmap_width = heatmaps.shape[3]
if self.use_dark:
coords = self.dark_postprocess(heatmaps, coords, kernelsize)
else:
for n in range(coords.shape[0]):
for p in range(coords.shape[1]):
hm = heatmaps[n][p]
px = int(math.floor(coords[n][p][0] + 0.5))
py = int(math.floor(coords[n][p][1] + 0.5))
if 1 < px < heatmap_width - 1 and 1 < py < heatmap_height - 1:
diff = np.array([
hm[py][px + 1] - hm[py][px - 1],
hm[py + 1][px] - hm[py - 1][px]
])
coords[n][p] += np.sign(diff) * .25
preds = coords.copy()
# Transform back
for i in range(coords.shape[0]):
preds[i] = transform_preds(coords[i], center[i], scale[i],
[heatmap_width, heatmap_height])
return preds, maxvals | the highest heatvalue location with a quarter offset in the
direction from the highest response to the second highest response.
Args:
heatmaps (numpy.ndarray): The predicted heatmaps
center (numpy.ndarray): The boxes center
scale (numpy.ndarray): The scale factor
Returns:
preds: numpy.ndarray([batch_size, num_joints, 2]), keypoints coords
maxvals: numpy.ndarray([batch_size, num_joints, 1]), the maximum confidence of the keypoints
| get_final_preds | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/models/keypoint_hrnet.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/models/keypoint_hrnet.py | Apache-2.0 |
def __init__(self, use_target_weight=True, loss_scale=0.5):
"""
KeyPointMSELoss layer
Args:
use_target_weight (bool): whether to use target weight
"""
super(KeyPointMSELoss, self).__init__()
self.criterion = nn.MSELoss(reduction='mean')
self.use_target_weight = use_target_weight
self.loss_scale = loss_scale |
KeyPointMSELoss layer
Args:
use_target_weight (bool): whether to use target weight
| __init__ | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/models/loss.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/models/loss.py | Apache-2.0 |
def check_gpu(use_gpu):
"""
Log error and exit when set use_gpu=true in paddlepaddle
cpu version.
"""
err = "Config use_gpu cannot be set as true while you are " \
"using paddlepaddle cpu version ! \nPlease try: \n" \
"\t1. Install paddlepaddle-gpu to run model on GPU \n" \
"\t2. Set use_gpu as false in config file to run " \
"model on CPU"
try:
if use_gpu and not paddle.is_compiled_with_cuda():
logger.error(err)
sys.exit(1)
except Exception as e:
pass |
Log error and exit when set use_gpu=true in paddlepaddle
cpu version.
| check_gpu | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/check.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/check.py | Apache-2.0 |
def check_version(version='2.0'):
"""
Log error and exit when the installed version of paddlepaddle is
not satisfied.
"""
err = "PaddlePaddle version {} or higher is required, " \
"or a suitable develop version is satisfied as well. \n" \
"Please make sure the version is good with your code.".format(version)
version_installed = [
fluid_version.major, fluid_version.minor, fluid_version.patch,
fluid_version.rc
]
if version_installed == ['0', '0', '0', '0']:
return
version_split = version.split('.')
length = min(len(version_installed), len(version_split))
for i in six.moves.range(length):
if version_installed[i] > version_split[i]:
return
if version_installed[i] < version_split[i]:
raise Exception(err) |
Log error and exit when the installed version of paddlepaddle is
not satisfied.
| check_version | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/check.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/check.py | Apache-2.0 |
def check_config(cfg):
"""
Check the correctness of the configuration file. Log error and exit
when Config is not compliant.
"""
err = "'{}' not specified in config file. Please set it in config file."
check_list = ['architecture', 'num_classes']
try:
for var in check_list:
if not var in cfg:
logger.error(err.format(var))
sys.exit(1)
except Exception as e:
pass
if 'log_iter' not in cfg:
cfg.log_iter = 20
return cfg |
Check the correctness of the configuration file. Log error and exit
when Config is not compliant.
| check_config | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/check.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/check.py | Apache-2.0 |
def is_url(path):
"""
Whether path is URL.
Args:
path (string): URL string or not.
"""
return path.startswith('http://') \
or path.startswith('https://') \
or path.startswith('ppdet://') |
Whether path is URL.
Args:
path (string): URL string or not.
| is_url | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/checkpoint.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/checkpoint.py | Apache-2.0 |
def match_state_dict(model_state_dict, weight_state_dict):
"""
Match between the model state dict and pretrained weight state dict.
Return the matched state dict.
The method supposes that all the names in pretrained weight state dict are
subclass of the names in models`, if the prefix 'backbone.' in pretrained weight
keys is stripped. And we could get the candidates for each model key. Then we
select the name with the longest matched size as the final match result. For
example, the model state dict has the name of
'backbone.res2.res2a.branch2a.conv.weight' and the pretrained weight as
name of 'res2.res2a.branch2a.conv.weight' and 'branch2a.conv.weight'. We
match the 'res2.res2a.branch2a.conv.weight' to the model key.
"""
model_keys = sorted(model_state_dict.keys())
weight_keys = sorted(weight_state_dict.keys())
def match(a, b):
if a.startswith('backbone.res5'):
# In Faster RCNN, res5 pretrained weights have prefix of backbone,
# however, the corresponding model weights have difficult prefix,
# bbox_head.
b = b[9:]
return a == b or a.endswith("." + b)
match_matrix = np.zeros([len(model_keys), len(weight_keys)])
for i, m_k in enumerate(model_keys):
for j, w_k in enumerate(weight_keys):
if match(m_k, w_k):
match_matrix[i, j] = len(w_k)
max_id = match_matrix.argmax(1)
max_len = match_matrix.max(1)
max_id[max_len == 0] = -1
not_load_weight_name = []
for match_idx in range(len(max_id)):
if match_idx < len(weight_keys) and max_id[match_idx] == -1:
not_load_weight_name.append(weight_keys[match_idx])
if len(not_load_weight_name) > 0:
logger.info('{} in pretrained weight is not used in the model, '
'and its will not be loaded'.format(not_load_weight_name))
matched_keys = {}
result_state_dict = {}
for model_id, weight_id in enumerate(max_id):
if weight_id == -1:
continue
model_key = model_keys[model_id]
weight_key = weight_keys[weight_id]
weight_value = weight_state_dict[weight_key]
model_value_shape = list(model_state_dict[model_key].shape)
if list(weight_value.shape) != model_value_shape:
logger.info(
'The shape {} in pretrained weight {} is unmatched with '
'the shape {} in model {}. And the weight {} will not be '
'loaded'.format(weight_value.shape, weight_key,
model_value_shape, model_key, weight_key))
continue
assert model_key not in result_state_dict
result_state_dict[model_key] = weight_value
if weight_key in matched_keys:
raise ValueError('Ambiguity weight {} loaded, it matches at least '
'{} and {} in the model'.format(
weight_key, model_key, matched_keys[
weight_key]))
matched_keys[weight_key] = model_key
return result_state_dict |
Match between the model state dict and pretrained weight state dict.
Return the matched state dict.
The method supposes that all the names in pretrained weight state dict are
subclass of the names in models`, if the prefix 'backbone.' in pretrained weight
keys is stripped. And we could get the candidates for each model key. Then we
select the name with the longest matched size as the final match result. For
example, the model state dict has the name of
'backbone.res2.res2a.branch2a.conv.weight' and the pretrained weight as
name of 'res2.res2a.branch2a.conv.weight' and 'branch2a.conv.weight'. We
match the 'res2.res2a.branch2a.conv.weight' to the model key.
| match_state_dict | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/checkpoint.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/checkpoint.py | Apache-2.0 |
def save_model(model, optimizer, save_dir, save_name, last_epoch):
"""
save model into disk.
Args:
model (paddle.nn.Layer): the Layer instalce to save parameters.
optimizer (paddle.optimizer.Optimizer): the Optimizer instance to
save optimizer states.
save_dir (str): the directory to be saved.
save_name (str): the path to be saved.
last_epoch (int): the epoch index.
"""
if paddle.distributed.get_rank() != 0:
return
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_path = os.path.join(save_dir, save_name)
if isinstance(model, nn.Layer):
paddle.save(model.state_dict(), save_path + ".pdparams")
else:
assert isinstance(model,
dict), 'model is not a instance of nn.layer or dict'
paddle.save(model, save_path + ".pdparams")
state_dict = optimizer.state_dict()
state_dict['last_epoch'] = last_epoch
paddle.save(state_dict, save_path + ".pdopt")
logger.info("Save checkpoint: {}".format(save_dir)) |
save model into disk.
Args:
model (paddle.nn.Layer): the Layer instalce to save parameters.
optimizer (paddle.optimizer.Optimizer): the Optimizer instance to
save optimizer states.
save_dir (str): the directory to be saved.
save_name (str): the path to be saved.
last_epoch (int): the epoch index.
| save_model | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/checkpoint.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/checkpoint.py | Apache-2.0 |
def get_weights_path(url):
"""Get weights path from WEIGHTS_HOME, if not exists,
download it from url.
"""
url = parse_url(url)
path, _ = get_path(url, WEIGHTS_HOME)
return path | Get weights path from WEIGHTS_HOME, if not exists,
download it from url.
| get_weights_path | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py | Apache-2.0 |
def get_config_path(url):
"""Get weights path from CONFIGS_HOME, if not exists,
download it from url.
"""
url = parse_url(url)
path = map_path(url, CONFIGS_HOME, path_depth=2)
if os.path.isfile(path):
return path
# config file not found, try download
# 1. clear configs directory
if osp.isdir(CONFIGS_HOME):
shutil.rmtree(CONFIGS_HOME)
# 2. get url
try:
from ppdet import __version__ as version
except ImportError:
version = None
cfg_url = "ppdet://configs/{}/configs.tar".format(version) \
if version else "ppdet://configs/configs.tar"
cfg_url = parse_url(cfg_url)
# 3. download and decompress
cfg_fullname = _download_dist(cfg_url, osp.dirname(CONFIGS_HOME))
_decompress_dist(cfg_fullname)
# 4. check config file existing
if os.path.isfile(path):
return path
else:
logger.error("Get config {} failed after download, please contact us on " \
"https://github.com/PaddlePaddle/PaddleDetection/issues".format(path))
sys.exit(1) | Get weights path from CONFIGS_HOME, if not exists,
download it from url.
| get_config_path | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py | Apache-2.0 |
def get_dataset_path(path, annotation, image_dir):
"""
If path exists, return path.
Otherwise, get dataset path from DATASET_HOME, if not exists,
download it.
"""
if _dataset_exists(path, annotation, image_dir):
return path
logger.info(
"Dataset {} is not valid for reason above, try searching {} or "
"downloading dataset...".format(osp.realpath(path), DATASET_HOME))
data_name = os.path.split(path.strip().lower())[-1]
for name, dataset in DATASETS.items():
if data_name == name:
logger.debug("Parse dataset_dir {} as dataset "
"{}".format(path, name))
if name == 'objects365':
raise NotImplementedError(
"Dataset {} is not valid for download automatically. "
"Please apply and download the dataset from "
"https://www.objects365.org/download.html".format(name))
data_dir = osp.join(DATASET_HOME, name)
if name == 'mot':
if osp.exists(path) or osp.exists(data_dir):
return data_dir
else:
raise NotImplementedError(
"Dataset {} is not valid for download automatically. "
"Please apply and download the dataset following docs/tutorials/PrepareMOTDataSet.md".
format(name))
if name == "spine_coco":
if _dataset_exists(data_dir, annotation, image_dir):
return data_dir
# For voc, only check dir VOCdevkit/VOC2012, VOCdevkit/VOC2007
if name in ['voc', 'fruit', 'roadsign_voc']:
exists = True
for sub_dir in dataset[1]:
check_dir = osp.join(data_dir, sub_dir)
if osp.exists(check_dir):
logger.info("Found {}".format(check_dir))
else:
exists = False
if exists:
return data_dir
# voc exist is checked above, voc is not exist here
check_exist = name != 'voc' and name != 'fruit' and name != 'roadsign_voc'
for url, md5sum in dataset[0]:
get_path(url, data_dir, md5sum, check_exist)
# voc should create list after download
if name == 'voc':
create_voc_list(data_dir)
return data_dir
# not match any dataset in DATASETS
raise ValueError(
"Dataset {} is not valid and cannot parse dataset type "
"'{}' for automaticly downloading, which only supports "
"'voc' , 'coco', 'wider_face', 'fruit', 'roadsign_voc' and 'mot' currently".
format(path, osp.split(path)[-1])) |
If path exists, return path.
Otherwise, get dataset path from DATASET_HOME, if not exists,
download it.
| get_dataset_path | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py | Apache-2.0 |
def get_path(url, root_dir, md5sum=None, check_exist=True):
""" Download from given url to root_dir.
if file or directory specified by url is exists under
root_dir, return the path directly, otherwise download
from url and decompress it, return the path.
url (str): download url
root_dir (str): root dir for downloading, it should be
WEIGHTS_HOME or DATASET_HOME
md5sum (str): md5 sum of download package
"""
# parse path after download to decompress under root_dir
fullpath = map_path(url, root_dir)
# For same zip file, decompressed directory name different
# from zip file name, rename by following map
decompress_name_map = {
"VOCtrainval_11-May-2012": "VOCdevkit/VOC2012",
"VOCtrainval_06-Nov-2007": "VOCdevkit/VOC2007",
"VOCtest_06-Nov-2007": "VOCdevkit/VOC2007",
"annotations_trainval": "annotations"
}
for k, v in decompress_name_map.items():
if fullpath.find(k) >= 0:
fullpath = osp.join(osp.split(fullpath)[0], v)
if osp.exists(fullpath) and check_exist:
if not osp.isfile(fullpath) or \
_check_exist_file_md5(fullpath, md5sum, url):
logger.debug("Found {}".format(fullpath))
return fullpath, True
else:
os.remove(fullpath)
fullname = _download_dist(url, root_dir, md5sum)
# new weights format which postfix is 'pdparams' not
# need to decompress
if osp.splitext(fullname)[-1] not in ['.pdparams', '.yml']:
_decompress_dist(fullname)
return fullpath, False | Download from given url to root_dir.
if file or directory specified by url is exists under
root_dir, return the path directly, otherwise download
from url and decompress it, return the path.
url (str): download url
root_dir (str): root dir for downloading, it should be
WEIGHTS_HOME or DATASET_HOME
md5sum (str): md5 sum of download package
| get_path | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py | Apache-2.0 |
def _dataset_exists(path, annotation, image_dir):
"""
Check if user define dataset exists
"""
if not osp.exists(path):
logger.warning("Config dataset_dir {} is not exits, "
"dataset config is not valid".format(path))
return False
if annotation:
annotation_path = osp.join(path, annotation)
if not osp.isfile(annotation_path):
logger.warning("Config annotation {} is not a "
"file, dataset config is not "
"valid".format(annotation_path))
return False
if image_dir:
image_path = osp.join(path, image_dir)
if not osp.isdir(image_path):
logger.warning("Config image_dir {} is not a "
"directory, dataset config is not "
"valid".format(image_path))
return False
return True |
Check if user define dataset exists
| _dataset_exists | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py | Apache-2.0 |
def _download(url, path, md5sum=None):
"""
Download from url, save to path.
url (str): download url
path (str): download to given path
"""
if not osp.exists(path):
os.makedirs(path)
fname = osp.split(url)[-1]
fullname = osp.join(path, fname)
retry_cnt = 0
while not (osp.exists(fullname) and _check_exist_file_md5(fullname, md5sum,
url)):
if retry_cnt < DOWNLOAD_RETRY_LIMIT:
retry_cnt += 1
else:
raise RuntimeError("Download from {} failed. "
"Retry limit reached".format(url))
logger.info("Downloading {} from {}".format(fname, url))
# NOTE: windows path join may incur \, which is invalid in url
if sys.platform == "win32":
url = url.replace('\\', '/')
req = requests.get(url, stream=True)
if req.status_code != 200:
raise RuntimeError("Downloading from {} failed with code "
"{}!".format(url, req.status_code))
# For protecting download interupted, download to
# tmp_fullname firstly, move tmp_fullname to fullname
# after download finished
tmp_fullname = fullname + "_tmp"
total_size = req.headers.get('content-length')
with open(tmp_fullname, 'wb') as f:
if total_size:
for chunk in tqdm.tqdm(
req.iter_content(chunk_size=1024),
total=(int(total_size) + 1023) // 1024,
unit='KB'):
f.write(chunk)
else:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
shutil.move(tmp_fullname, fullname)
return fullname |
Download from url, save to path.
url (str): download url
path (str): download to given path
| _download | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py | Apache-2.0 |
def _decompress(fname):
"""
Decompress for zip and tar file
"""
logger.info("Decompressing {}...".format(fname))
# For protecting decompressing interupted,
# decompress to fpath_tmp directory firstly, if decompress
# successed, move decompress files to fpath and delete
# fpath_tmp and remove download compress file.
fpath = osp.split(fname)[0]
fpath_tmp = osp.join(fpath, 'tmp')
if osp.isdir(fpath_tmp):
shutil.rmtree(fpath_tmp)
os.makedirs(fpath_tmp)
if fname.find('tar') >= 0:
with tarfile.open(fname) as tf:
tf.extractall(path=fpath_tmp)
elif fname.find('zip') >= 0:
with zipfile.ZipFile(fname) as zf:
zf.extractall(path=fpath_tmp)
elif fname.find('.txt') >= 0:
return
else:
raise TypeError("Unsupport compress file type {}".format(fname))
for f in os.listdir(fpath_tmp):
src_dir = osp.join(fpath_tmp, f)
dst_dir = osp.join(fpath, f)
_move_and_merge_tree(src_dir, dst_dir)
shutil.rmtree(fpath_tmp)
os.remove(fname) |
Decompress for zip and tar file
| _decompress | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py | Apache-2.0 |
def _move_and_merge_tree(src, dst):
"""
Move src directory to dst, if dst is already exists,
merge src to dst
"""
if not osp.exists(dst):
shutil.move(src, dst)
elif osp.isfile(src):
shutil.move(src, dst)
else:
for fp in os.listdir(src):
src_fp = osp.join(src, fp)
dst_fp = osp.join(dst, fp)
if osp.isdir(src_fp):
if osp.isdir(dst_fp):
_move_and_merge_tree(src_fp, dst_fp)
else:
shutil.move(src_fp, dst_fp)
elif osp.isfile(src_fp) and \
not osp.isfile(dst_fp):
shutil.move(src_fp, dst_fp) |
Move src directory to dst, if dst is already exists,
merge src to dst
| _move_and_merge_tree | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py | Apache-2.0 |
def get_affine_transform(center,
input_size,
rot,
output_size,
shift=(0., 0.),
inv=False):
"""Get the affine transform matrix, given the center/scale/rot/output_size.
Args:
center (np.ndarray[2, ]): Center of the bounding box (x, y).
input_size (np.ndarray[2, ]): Size of input feature (width, height).
rot (float): Rotation angle (degree).
output_size (np.ndarray[2, ]): Size of the destination heatmaps.
shift (0-100%): Shift translation ratio wrt the width/height.
Default (0., 0.).
inv (bool): Option to inverse the affine transform direction.
(inv=False: src->dst or inv=True: dst->src)
Returns:
np.ndarray: The transform matrix.
"""
assert len(center) == 2
assert len(output_size) == 2
assert len(shift) == 2
if not isinstance(input_size, (np.ndarray, list)):
input_size = np.array([input_size, input_size], dtype=np.float32)
scale_tmp = input_size
shift = np.array(shift)
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = rotate_point([0., src_w * -0.5], rot_rad)
dst_dir = np.array([0., dst_w * -0.5])
src = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
src[2, :] = _get_3rd_point(src[0, :], src[1, :])
dst = np.zeros((3, 2), dtype=np.float32)
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans | Get the affine transform matrix, given the center/scale/rot/output_size.
Args:
center (np.ndarray[2, ]): Center of the bounding box (x, y).
input_size (np.ndarray[2, ]): Size of input feature (width, height).
rot (float): Rotation angle (degree).
output_size (np.ndarray[2, ]): Size of the destination heatmaps.
shift (0-100%): Shift translation ratio wrt the width/height.
Default (0., 0.).
inv (bool): Option to inverse the affine transform direction.
(inv=False: src->dst or inv=True: dst->src)
Returns:
np.ndarray: The transform matrix.
| get_affine_transform | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py | Apache-2.0 |
def get_warp_matrix(theta, size_input, size_dst, size_target):
"""Calculate the transformation matrix under the constraint of unbiased.
Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased
Data Processing for Human Pose Estimation (CVPR 2020).
Args:
theta (float): Rotation angle in degrees.
size_input (np.ndarray): Size of input image [w, h].
size_dst (np.ndarray): Size of output image [w, h].
size_target (np.ndarray): Size of ROI in input plane [w, h].
Returns:
matrix (np.ndarray): A matrix for transformation.
"""
theta = np.deg2rad(theta)
matrix = np.zeros((2, 3), dtype=np.float32)
scale_x = size_dst[0] / size_target[0]
scale_y = size_dst[1] / size_target[1]
matrix[0, 0] = np.cos(theta) * scale_x
matrix[0, 1] = -np.sin(theta) * scale_x
matrix[0, 2] = scale_x * (
-0.5 * size_input[0] * np.cos(theta) + 0.5 * size_input[1] *
np.sin(theta) + 0.5 * size_target[0])
matrix[1, 0] = np.sin(theta) * scale_y
matrix[1, 1] = np.cos(theta) * scale_y
matrix[1, 2] = scale_y * (
-0.5 * size_input[0] * np.sin(theta) - 0.5 * size_input[1] *
np.cos(theta) + 0.5 * size_target[1])
return matrix | Calculate the transformation matrix under the constraint of unbiased.
Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased
Data Processing for Human Pose Estimation (CVPR 2020).
Args:
theta (float): Rotation angle in degrees.
size_input (np.ndarray): Size of input image [w, h].
size_dst (np.ndarray): Size of output image [w, h].
size_target (np.ndarray): Size of ROI in input plane [w, h].
Returns:
matrix (np.ndarray): A matrix for transformation.
| get_warp_matrix | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py | Apache-2.0 |
def _get_3rd_point(a, b):
"""To calculate the affine matrix, three pairs of points are required. This
function is used to get the 3rd point, given 2D points a & b.
The 3rd point is defined by rotating vector `a - b` by 90 degrees
anticlockwise, using b as the rotation center.
Args:
a (np.ndarray): point(x,y)
b (np.ndarray): point(x,y)
Returns:
np.ndarray: The 3rd point.
"""
assert len(
a) == 2, 'input of _get_3rd_point should be point with length of 2'
assert len(
b) == 2, 'input of _get_3rd_point should be point with length of 2'
direction = a - b
third_pt = b + np.array([-direction[1], direction[0]], dtype=np.float32)
return third_pt | To calculate the affine matrix, three pairs of points are required. This
function is used to get the 3rd point, given 2D points a & b.
The 3rd point is defined by rotating vector `a - b` by 90 degrees
anticlockwise, using b as the rotation center.
Args:
a (np.ndarray): point(x,y)
b (np.ndarray): point(x,y)
Returns:
np.ndarray: The 3rd point.
| _get_3rd_point | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py | Apache-2.0 |
def rotate_point(pt, angle_rad):
"""Rotate a point by an angle.
Args:
pt (list[float]): 2 dimensional point to be rotated
angle_rad (float): rotation angle by radian
Returns:
list[float]: Rotated point.
"""
assert len(pt) == 2
sn, cs = np.sin(angle_rad), np.cos(angle_rad)
new_x = pt[0] * cs - pt[1] * sn
new_y = pt[0] * sn + pt[1] * cs
rotated_pt = [new_x, new_y]
return rotated_pt | Rotate a point by an angle.
Args:
pt (list[float]): 2 dimensional point to be rotated
angle_rad (float): rotation angle by radian
Returns:
list[float]: Rotated point.
| rotate_point | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py | Apache-2.0 |
def warp_affine_joints(joints, mat):
"""Apply affine transformation defined by the transform matrix on the
joints.
Args:
joints (np.ndarray[..., 2]): Origin coordinate of joints.
mat (np.ndarray[3, 2]): The affine matrix.
Returns:
matrix (np.ndarray[..., 2]): Result coordinate of joints.
"""
joints = np.array(joints)
shape = joints.shape
joints = joints.reshape(-1, 2)
return np.dot(np.concatenate(
(joints, joints[:, 0:1] * 0 + 1), axis=1),
mat.T).reshape(shape) | Apply affine transformation defined by the transform matrix on the
joints.
Args:
joints (np.ndarray[..., 2]): Origin coordinate of joints.
mat (np.ndarray[3, 2]): The affine matrix.
Returns:
matrix (np.ndarray[..., 2]): Result coordinate of joints.
| warp_affine_joints | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py | Apache-2.0 |
def oks_nms(kpts_db, thresh, sigmas=None, in_vis_thre=None):
"""greedily select boxes with high confidence and overlap with current maximum <= thresh
rule out overlap >= thresh
Args:
kpts_db (list): The predicted keypoints within the image
thresh (float): The threshold to select the boxes
sigmas (np.array): The variance to calculate the oks iou
Default: None
in_vis_thre (float): The threshold to select the high confidence boxes
Default: None
Return:
keep (list): indexes to keep
"""
if len(kpts_db) == 0:
return []
scores = np.array([kpts_db[i]['score'] for i in range(len(kpts_db))])
kpts = np.array(
[kpts_db[i]['keypoints'].flatten() for i in range(len(kpts_db))])
areas = np.array([kpts_db[i]['area'] for i in range(len(kpts_db))])
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]],
sigmas, in_vis_thre)
inds = np.where(oks_ovr <= thresh)[0]
order = order[inds + 1]
return keep | greedily select boxes with high confidence and overlap with current maximum <= thresh
rule out overlap >= thresh
Args:
kpts_db (list): The predicted keypoints within the image
thresh (float): The threshold to select the boxes
sigmas (np.array): The variance to calculate the oks iou
Default: None
in_vis_thre (float): The threshold to select the high confidence boxes
Default: None
Return:
keep (list): indexes to keep
| oks_nms | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py | Apache-2.0 |
def soft_oks_nms(kpts_db, thresh, sigmas=None, in_vis_thre=None):
"""greedily select boxes with high confidence and overlap with current maximum <= thresh
rule out overlap >= thresh
Args:
kpts_db (list): The predicted keypoints within the image
thresh (float): The threshold to select the boxes
sigmas (np.array): The variance to calculate the oks iou
Default: None
in_vis_thre (float): The threshold to select the high confidence boxes
Default: None
Return:
keep (list): indexes to keep
"""
if len(kpts_db) == 0:
return []
scores = np.array([kpts_db[i]['score'] for i in range(len(kpts_db))])
kpts = np.array(
[kpts_db[i]['keypoints'].flatten() for i in range(len(kpts_db))])
areas = np.array([kpts_db[i]['area'] for i in range(len(kpts_db))])
order = scores.argsort()[::-1]
scores = scores[order]
# max_dets = order.size
max_dets = 20
keep = np.zeros(max_dets, dtype=np.intp)
keep_cnt = 0
while order.size > 0 and keep_cnt < max_dets:
i = order[0]
oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]],
sigmas, in_vis_thre)
order = order[1:]
scores = rescore(oks_ovr, scores[1:], thresh)
tmp = scores.argsort()[::-1]
order = order[tmp]
scores = scores[tmp]
keep[keep_cnt] = i
keep_cnt += 1
keep = keep[:keep_cnt]
return keep | greedily select boxes with high confidence and overlap with current maximum <= thresh
rule out overlap >= thresh
Args:
kpts_db (list): The predicted keypoints within the image
thresh (float): The threshold to select the boxes
sigmas (np.array): The variance to calculate the oks iou
Default: None
in_vis_thre (float): The threshold to select the high confidence boxes
Default: None
Return:
keep (list): indexes to keep
| soft_oks_nms | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py | Apache-2.0 |
def setup_logger(name="ppdet", output=None):
"""
Initialize logger and set its verbosity level to INFO.
Args:
output (str): a file name or a directory to save log. If None, will not save log file.
If ends with ".txt" or ".log", assumed to be a file name.
Otherwise, logs will be saved to `output/log.txt`.
name (str): the root module name of this logger
Returns:
logging.Logger: a logger
"""
logger = logging.getLogger(name)
if name in logger_initialized:
return logger
logger.setLevel(logging.INFO)
logger.propagate = False
formatter = logging.Formatter(
"[%(asctime)s] %(name)s %(levelname)s: %(message)s",
datefmt="%m/%d %H:%M:%S")
# stdout logging: master only
local_rank = dist.get_rank()
if local_rank == 0:
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
# file logging: all workers
if output is not None:
if output.endswith(".txt") or output.endswith(".log"):
filename = output
else:
filename = os.path.join(output, "log.txt")
if local_rank > 0:
filename = filename + ".rank{}".format(local_rank)
os.makedirs(os.path.dirname(filename))
fh = logging.FileHandler(filename, mode='a')
fh.setLevel(logging.DEBUG)
fh.setFormatter(logging.Formatter())
logger.addHandler(fh)
logger_initialized.append(name)
return logger |
Initialize logger and set its verbosity level to INFO.
Args:
output (str): a file name or a directory to save log. If None, will not save log file.
If ends with ".txt" or ".log", assumed to be a file name.
Otherwise, logs will be saved to `output/log.txt`.
name (str): the root module name of this logger
Returns:
logging.Logger: a logger
| setup_logger | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/logger.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/logger.py | Apache-2.0 |
def colormap(rgb=False):
"""
Get colormap
The code of this function is copied from https://github.com/facebookresearch/Detectron/blob/main/detectron/utils/colormap.py
"""
color_list = np.array([
0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494,
0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078,
0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000,
1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000,
0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667,
0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000,
0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000,
1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000,
0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500,
0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667,
0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333,
0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000,
0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333,
0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000,
1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000,
1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.167,
0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000,
0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000,
0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000,
0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000,
0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833,
0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.286,
0.286, 0.286, 0.429, 0.429, 0.429, 0.571, 0.571, 0.571, 0.714, 0.714,
0.714, 0.857, 0.857, 0.857, 1.000, 1.000, 1.000
]).astype(np.float32)
color_list = color_list.reshape((-1, 3)) * 255
if not rgb:
color_list = color_list[:, ::-1]
return color_list |
Get colormap
The code of this function is copied from https://github.com/facebookresearch/Detectron/blob/main/detectron/utils/colormap.py
| colormap | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/visualizer.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/visualizer.py | Apache-2.0 |
def load_config(file_path):
"""
Load config from file.
Args:
file_path (str): Path of the config file to be loaded.
Returns: global config
"""
_, ext = os.path.splitext(file_path)
assert ext in ['.yml', '.yaml'], "only support yaml files for now"
# load config from file and merge into global config
with open(file_path) as f:
cfg = yaml.load(f, Loader=yaml.Loader)
cfg['filename'] = os.path.splitext(os.path.split(file_path)[-1])[0]
merge_config(cfg)
return global_config |
Load config from file.
Args:
file_path (str): Path of the config file to be loaded.
Returns: global config
| load_config | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/workspace.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/workspace.py | Apache-2.0 |
def dict_merge(dct, merge_dct):
""" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
Args:
dct: dict onto which the merge is executed
merge_dct: dct merged into dct
Returns: dct
"""
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict) and
isinstance(merge_dct[k], collectionsAbc.Mapping)):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
return dct | Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
Args:
dct: dict onto which the merge is executed
merge_dct: dct merged into dct
Returns: dct
| dict_merge | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/workspace.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/workspace.py | Apache-2.0 |
def merge_config(config, another_cfg=None):
"""
Merge config into global config or another_cfg.
Args:
config (dict): Config to be merged.
Returns: global config
"""
global global_config
dct = another_cfg or global_config
return dict_merge(dct, config) |
Merge config into global config or another_cfg.
Args:
config (dict): Config to be merged.
Returns: global config
| merge_config | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/workspace.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/workspace.py | Apache-2.0 |
def register(cls):
"""
Register a given module class.
Args:
cls (type): Module class to be registered.
Returns: cls
"""
if cls.__name__ in global_config:
raise ValueError("Module class already registered: {}".format(
cls.__name__))
if hasattr(cls, '__op__'):
cls = make_partial(cls)
global_config[cls.__name__] = extract_schema(cls)
return cls |
Register a given module class.
Args:
cls (type): Module class to be registered.
Returns: cls
| register | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/workspace.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/workspace.py | Apache-2.0 |
def create(cls_or_name, **kwargs):
"""
Create an instance of given module class.
Args:
cls_or_name (type or str): Class of which to create instance.
Returns: instance of type `cls_or_name`
"""
assert type(cls_or_name) in [type, str
], "should be a class or name of a class"
name = type(cls_or_name) == str and cls_or_name or cls_or_name.__name__
assert name in global_config and \
isinstance(global_config[name], SchemaDict), \
"the module {} is not registered".format(name)
config = global_config[name]
cls = getattr(config.pymodule, name)
cls_kwargs = {}
cls_kwargs.update(global_config[name])
# parse `shared` annoation of registered modules
if getattr(config, 'shared', None):
for k in config.shared:
target_key = config[k]
shared_conf = config.schema[k].default
assert isinstance(shared_conf, SharedConfig)
if target_key is not None and not isinstance(target_key,
SharedConfig):
continue # value is given for the module
elif shared_conf.key in global_config:
# `key` is present in config
cls_kwargs[k] = global_config[shared_conf.key]
else:
cls_kwargs[k] = shared_conf.default_value
# parse `inject` annoation of registered modules
if getattr(cls, 'from_config', None):
cls_kwargs.update(cls.from_config(config, **kwargs))
if getattr(config, 'inject', None):
for k in config.inject:
target_key = config[k]
# optional dependency
if target_key is None:
continue
if isinstance(target_key, dict) or hasattr(target_key, '__dict__'):
if 'name' not in target_key.keys():
continue
inject_name = str(target_key['name'])
if inject_name not in global_config:
raise ValueError(
"Missing injection name {} and check it's name in cfg file".
format(k))
target = global_config[inject_name]
for i, v in target_key.items():
if i == 'name':
continue
target[i] = v
if isinstance(target, SchemaDict):
cls_kwargs[k] = create(inject_name)
elif isinstance(target_key, str):
if target_key not in global_config:
raise ValueError("Missing injection config:", target_key)
target = global_config[target_key]
if isinstance(target, SchemaDict):
cls_kwargs[k] = create(target_key)
elif hasattr(target, '__dict__'): # serialized object
cls_kwargs[k] = target
else:
raise ValueError("Unsupported injection type:", target_key)
# prevent modification of global config values of reference types
# (e.g., list, dict) from within the created module instances
#kwargs = copy.deepcopy(kwargs)
return cls(**cls_kwargs) |
Create an instance of given module class.
Args:
cls_or_name (type or str): Class of which to create instance.
Returns: instance of type `cls_or_name`
| create | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/workspace.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/workspace.py | Apache-2.0 |
def extract_schema(cls):
"""
Extract schema from a given class
Args:
cls (type): Class from which to extract.
Returns:
schema (SchemaDict): Extracted schema.
"""
ctor = cls.__init__
# python 2 compatibility
if hasattr(inspect, 'getfullargspec'):
argspec = inspect.getfullargspec(ctor)
annotations = argspec.annotations
has_kwargs = argspec.varkw is not None
else:
argspec = inspect.getfullargspec(ctor)
# python 2 type hinting workaround, see pep-3107
# however, since `typeguard` does not support python 2, type checking
# is still python 3 only for now
annotations = getattr(ctor, '__annotations__', {})
has_kwargs = argspec.varkw is not None
names = [arg for arg in argspec.args if arg != 'self']
defaults = argspec.defaults
num_defaults = argspec.defaults is not None and len(argspec.defaults) or 0
num_required = len(names) - num_defaults
docs = cls.__doc__
if docs is None and getattr(cls, '__category__', None) == 'op':
docs = cls.__call__.__doc__
try:
docstring = doc_parse(docs)
except Exception:
docstring = None
if docstring is None:
comments = {}
else:
comments = {}
for p in docstring.params:
match_obj = re.match('^([a-zA-Z_]+[a-zA-Z_0-9]*).*', p.arg_name)
if match_obj is not None:
comments[match_obj.group(1)] = p.description
schema = SchemaDict()
schema.name = cls.__name__
schema.doc = ""
if docs is not None:
start_pos = docs[0] == '\n' and 1 or 0
schema.doc = docs[start_pos:].split("\n")[0].strip()
# XXX handle paddle's weird doc convention
if '**' == schema.doc[:2] and '**' == schema.doc[-2:]:
schema.doc = schema.doc[2:-2].strip()
schema.category = hasattr(cls, '__category__') and getattr(
cls, '__category__') or 'module'
schema.strict = not has_kwargs
schema.pymodule = importlib.import_module(cls.__module__)
schema.inject = getattr(cls, '__inject__', [])
schema.shared = getattr(cls, '__shared__', [])
for idx, name in enumerate(names):
comment = name in comments and comments[name] or name
if name in schema.inject:
type_ = None
else:
type_ = name in annotations and annotations[name] or None
value_schema = SchemaValue(name, comment, type_)
if name in schema.shared:
assert idx >= num_required, "shared config must have default value"
default = defaults[idx - num_required]
value_schema.set_default(SharedConfig(name, default))
elif idx >= num_required:
default = defaults[idx - num_required]
value_schema.set_default(default)
schema.set_schema(name, value_schema)
return schema |
Extract schema from a given class
Args:
cls (type): Class from which to extract.
Returns:
schema (SchemaDict): Extracted schema.
| extract_schema | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/config/schema.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/config/schema.py | Apache-2.0 |
def serializable(cls):
"""
Add loader and dumper for given class, which must be
"trivially serializable"
Args:
cls: class to be serialized
Returns: cls
"""
yaml.add_constructor(u'!{}'.format(cls.__name__),
_make_python_constructor(cls))
yaml.add_representer(cls, _make_python_representer(cls))
return cls |
Add loader and dumper for given class, which must be
"trivially serializable"
Args:
cls: class to be serialized
Returns: cls
| serializable | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/lib/utils/config/yaml_helpers.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/config/yaml_helpers.py | Apache-2.0 |
def get_test_images(infer_dir, infer_img):
"""
Get image path list in TEST mode
"""
assert infer_img is not None or infer_dir is not None, \
"--infer_img or --infer_dir should be set"
assert infer_img is None or os.path.isfile(infer_img), \
"{} is not a file".format(infer_img)
assert infer_dir is None or os.path.isdir(infer_dir), \
"{} is not a directory".format(infer_dir)
# infer_img has a higher priority
if infer_img and os.path.isfile(infer_img):
return [infer_img]
images = set()
infer_dir = os.path.abspath(infer_dir)
assert os.path.isdir(infer_dir), \
"infer_dir {} is not a directory".format(infer_dir)
exts = ['jpg', 'jpeg', 'png', 'bmp']
exts += [ext.upper() for ext in exts]
for ext in exts:
images.update(glob.glob('{}/*.{}'.format(infer_dir, ext)))
images = list(images)
assert len(images) > 0, "no image found in {}".format(infer_dir)
logger.info("Found {} inference images in total.".format(len(images)))
return images |
Get image path list in TEST mode
| get_test_images | python | PaddlePaddle/models | tutorials/pp-series/HRNet-Keypoint/tools/infer.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/tools/infer.py | Apache-2.0 |
def get_args(add_help=True):
"""get_args
Parse all args using argparse lib
Args:
add_help: Whether to add -h option on args
Returns:
An object which contains many parameters used for inference.
"""
import argparse
parser = argparse.ArgumentParser(
description='PaddlePaddle Args', add_help=add_help)
args = parser.parse_args()
return args | get_args
Parse all args using argparse lib
Args:
add_help: Whether to add -h option on args
Returns:
An object which contains many parameters used for inference.
| get_args | python | PaddlePaddle/models | tutorials/tipc/train_infer_python/template/code/export_model.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/tipc/train_infer_python/template/code/export_model.py | Apache-2.0 |
def export(args):
"""export
export inference model using jit.save
Args:
args: Parameters generated using argparser.
Returns: None
"""
model = build_model(args)
# decorate model with jit.save
model = paddle.jit.to_static(
model,
input_spec=[
InputSpec(
shape=[None, 3, args.img_size, args.img_size], dtype='float32')
])
# save inference model
paddle.jit.save(model, os.path.join(args.save_inference_dir, "inference"))
print(f"inference model is saved in {args.save_inference_dir}") | export
export inference model using jit.save
Args:
args: Parameters generated using argparser.
Returns: None
| export | python | PaddlePaddle/models | tutorials/tipc/train_infer_python/template/code/export_model.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/tipc/train_infer_python/template/code/export_model.py | Apache-2.0 |
def infer_main(args):
"""infer_main
Main inference function.
Args:
args: Parameters generated using argparser.
Returns:
class_id: Class index of the input.
prob: : Probability of the input.
"""
# init inference engine
inference_engine = InferenceEngine(args)
# init benchmark log
if args.benchmark:
import auto_log
autolog = auto_log.AutoLogger(
model_name="example",
batch_size=args.batch_size,
inference_config=inference_engine.config,
gpu_ids="auto" if args.use_gpu else None)
# enable benchmark
if args.benchmark:
autolog.times.start()
# preprocess
img = inference_engine.preprocess(args.img_path)
if args.benchmark:
autolog.times.stamp()
output = inference_engine.run(img)
if args.benchmark:
autolog.times.stamp()
# postprocess
class_id, prob = inference_engine.postprocess(output)
if args.benchmark:
autolog.times.stamp()
autolog.times.end(stamp=True)
autolog.report()
return class_id, prob | infer_main
Main inference function.
Args:
args: Parameters generated using argparser.
Returns:
class_id: Class index of the input.
prob: : Probability of the input.
| infer_main | python | PaddlePaddle/models | tutorials/tipc/train_infer_python/template/code/infer.py | https://github.com/PaddlePaddle/models/blob/master/tutorials/tipc/train_infer_python/template/code/infer.py | Apache-2.0 |
def pytest_configure(config):
"""Pytest configuration hook to help reproduce test segfaults
Sets and outputs rng seeds.
The segfault-debug procedure on a module called test_module.py is:
1. run "pytest --verbose test_module.py". A seg-faulting output might be:
[INFO] np, mx and python random seeds = 4018804151
test_module.test1 ... ok
test_module.test2 ... Illegal instruction (core dumped)
2. Copy the module-starting seed into the next command, then run:
MXNET_MODULE_SEED=4018804151 pytest --log-level=DEBUG --verbose test_module.py
Output might be:
[WARNING] **** module-level seed is set: all tests running deterministically ****
[INFO] np, mx and python random seeds = 4018804151
test_module.test1 ... [DEBUG] np and mx random seeds = 3935862516
ok
test_module.test2 ... [DEBUG] np and mx random seeds = 1435005594
Illegal instruction (core dumped)
3. Copy the segfaulting-test seed into the command:
MXNET_TEST_SEED=1435005594 pytest --log-level=DEBUG --verbose test_module.py:test2
Output might be:
[INFO] np, mx and python random seeds = 2481884723
test_module.test2 ... [DEBUG] np and mx random seeds = 1435005594
Illegal instruction (core dumped)
3. Finally reproduce the segfault directly under gdb (might need additional os packages)
by editing the bottom of test_module.py to be
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
test2()
MXNET_TEST_SEED=1435005594 gdb -ex r --args python test_module.py
4. When finished debugging the segfault, remember to unset any exported MXNET_ seed
variables in the environment to return to non-deterministic testing (a good thing).
"""
config.addinivalue_line("markers", "slow: mark test as slow to run")
module_seed_str = os.getenv('MXNET_MODULE_SEED')
if module_seed_str is None:
seed = np.random.randint(0, np.iinfo(np.int32).max)
else:
seed = int(module_seed_str)
logging.warning('*** module-level seed is set: '
'all tests running deterministically ***')
print('Setting module np/mx/python random seeds, '
'use MXNET_MODULE_SEED={} to reproduce.'.format(seed))
np.random.seed(seed)
mx.npx.random.seed(seed)
random.seed(seed)
# The MXNET_TEST_SEED environment variable will override MXNET_MODULE_SEED for tests with
# the 'with_seed()' decoration. Inform the user of this once here at the module level.
if os.getenv('MXNET_TEST_SEED') is not None:
logging.warning('*** test-level seed set: all "@with_seed()" '
'tests run deterministically ***') | Pytest configuration hook to help reproduce test segfaults
Sets and outputs rng seeds.
The segfault-debug procedure on a module called test_module.py is:
1. run "pytest --verbose test_module.py". A seg-faulting output might be:
[INFO] np, mx and python random seeds = 4018804151
test_module.test1 ... ok
test_module.test2 ... Illegal instruction (core dumped)
2. Copy the module-starting seed into the next command, then run:
MXNET_MODULE_SEED=4018804151 pytest --log-level=DEBUG --verbose test_module.py
Output might be:
[WARNING] **** module-level seed is set: all tests running deterministically ****
[INFO] np, mx and python random seeds = 4018804151
test_module.test1 ... [DEBUG] np and mx random seeds = 3935862516
ok
test_module.test2 ... [DEBUG] np and mx random seeds = 1435005594
Illegal instruction (core dumped)
3. Copy the segfaulting-test seed into the command:
MXNET_TEST_SEED=1435005594 pytest --log-level=DEBUG --verbose test_module.py:test2
Output might be:
[INFO] np, mx and python random seeds = 2481884723
test_module.test2 ... [DEBUG] np and mx random seeds = 1435005594
Illegal instruction (core dumped)
3. Finally reproduce the segfault directly under gdb (might need additional os packages)
by editing the bottom of test_module.py to be
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
test2()
MXNET_TEST_SEED=1435005594 gdb -ex r --args python test_module.py
4. When finished debugging the segfault, remember to unset any exported MXNET_ seed
variables in the environment to return to non-deterministic testing (a good thing).
| pytest_configure | python | dmlc/gluon-nlp | conftest.py | https://github.com/dmlc/gluon-nlp/blob/master/conftest.py | Apache-2.0 |
def pytest_runtest_makereport(item, call):
"""Make test outcome available to fixture.
https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures
"""
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# set a report attribute for each phase of a call, which can
# be "setup", "call", "teardown"
setattr(item, "rep_" + rep.when, rep) | Make test outcome available to fixture.
https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures
| pytest_runtest_makereport | python | dmlc/gluon-nlp | conftest.py | https://github.com/dmlc/gluon-nlp/blob/master/conftest.py | Apache-2.0 |
def function_scope_seed(request):
"""A function scope fixture that manages rng seeds.
This fixture automatically initializes the python, numpy and mxnet random
number generators randomly on every test run.
def test_ok_with_random_data():
...
To fix the seed used for a test case mark the test function with the
desired seed:
@pytest.mark.seed(1)
def test_not_ok_with_random_data():
'''This testcase actually works.'''
assert 17 == random.randint(0, 100)
When a test fails, the fixture outputs the seed used. The user can then set
the environment variable MXNET_TEST_SEED to the value reported, then rerun
the test with:
pytest --verbose -s <test_module_name.py> -k <failing_test>
To run a test repeatedly, install pytest-repeat and add the --count argument:
pip install pytest-repeat
pytest --verbose -s <test_module_name.py> -k <failing_test> --count 1000
"""
seed = request.node.get_closest_marker('seed')
env_seed_str = os.getenv('MXNET_TEST_SEED')
if seed is not None:
seed = seed.args[0]
assert isinstance(seed, int)
elif env_seed_str is not None:
seed = int(env_seed_str)
else:
seed = np.random.randint(0, np.iinfo(np.int32).max)
post_test_state = np.random.get_state()
np.random.seed(seed)
mx.random.seed(seed)
random.seed(seed)
seed_message = ('np/mx/python random seeds are set to '
'{}, use MXNET_TEST_SEED={} to reproduce.')
seed_message = seed_message.format(seed, seed)
# Always log seed on DEBUG log level. This makes sure we can find out the
# value of the seed even if the test case causes a segfault and subsequent
# teardown code is not run.
logging.debug(seed_message)
yield # run the test
if request.node.rep_setup.failed:
logging.info("Setting up a test failed: {}", request.node.nodeid)
elif request.node.rep_call.outcome == 'failed':
# Either request.node.rep_setup.failed or request.node.rep_setup.passed
# should be True
assert request.node.rep_setup.passed
# On failure also log seed on INFO log level
logging.info(seed_message)
np.random.set_state(post_test_state) | A function scope fixture that manages rng seeds.
This fixture automatically initializes the python, numpy and mxnet random
number generators randomly on every test run.
def test_ok_with_random_data():
...
To fix the seed used for a test case mark the test function with the
desired seed:
@pytest.mark.seed(1)
def test_not_ok_with_random_data():
'''This testcase actually works.'''
assert 17 == random.randint(0, 100)
When a test fails, the fixture outputs the seed used. The user can then set
the environment variable MXNET_TEST_SEED to the value reported, then rerun
the test with:
pytest --verbose -s <test_module_name.py> -k <failing_test>
To run a test repeatedly, install pytest-repeat and add the --count argument:
pip install pytest-repeat
pytest --verbose -s <test_module_name.py> -k <failing_test> --count 1000
| function_scope_seed | python | dmlc/gluon-nlp | conftest.py | https://github.com/dmlc/gluon-nlp/blob/master/conftest.py | Apache-2.0 |
def predict_extended(original_feature,
chunked_features,
results,
n_best_size,
max_answer_length=64,
start_top_n=5,
end_top_n=5):
"""Get prediction results for SQuAD.
Start Logits: (B, N_start)
End Logits: (B, N_start, N_end)
Parameters
----------
original_feature:
The original SquadFeature before chunked
chunked_features
List of ChunkFeatures
results
List of model predictions for span start and span end.
n_best_size
Best N results written to file
max_answer_length
Maximum length of the answer tokens.
start_top_n
Number of start-position candidates
end_top_n
Number of end-position candidates
Returns
-------
not_answerable_score
Model's estimate that the question is not answerable.
prediction
The final prediction.
nbest_json
n-best predictions with their probabilities.
"""
not_answerable_score = 1000000 # Score for not-answerable. We set it to be a large and positive
# If one chunk votes for answerable, we will treat the context as answerable,
# Thus, the overall not_answerable_score = min(chunk_not_answerable_score)
all_start_idx = []
all_end_idx = []
all_pred_score = []
context_length = len(original_feature.context_token_ids)
token_max_context_score = _np.full((len(chunked_features), context_length),
-_np.inf,
dtype=_np.float32)
for i, chunked_feature in enumerate(chunked_features):
chunk_start = chunked_feature.chunk_start
chunk_length = chunked_feature.chunk_length
for j in range(chunk_start, chunk_start + chunk_length):
# This is a heuristic score
# TODO investigate the impact
token_max_context_score[i, j] = min(j - chunk_start,
chunk_start + chunk_length - 1 - j) \
+ 0.01 * chunk_length
token_max_chunk_id = token_max_context_score.argmax(axis=0)
for chunk_id, (result, chunk_feature) in enumerate(zip(results, chunked_features)):
# We use the log-likelihood as the not answerable score.
# Thus, a high score indicates that the answer is not answerable
cur_not_answerable_score = float(result.answerable_logits[1])
not_answerable_score = min(not_answerable_score, cur_not_answerable_score)
# Calculate the start_logits + end_logits as the overall score
context_offset = chunk_feature.context_offset
chunk_start = chunk_feature.chunk_start
chunk_length = chunk_feature.chunk_length
for i in range(start_top_n):
for j in range(end_top_n):
pred_score = result.start_top_logits[i] + result.end_top_logits[i, j]
start_index = result.start_top_index[i]
end_index = result.end_top_index[i, j]
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the answer span is in the query tokens or out of
# the chunk. We throw out all invalid predictions.
if not (context_offset <= start_index < context_offset + chunk_length) or \
not (context_offset <= end_index < context_offset + chunk_length) or \
end_index < start_index:
continue
pred_answer_length = end_index - start_index + 1
if pred_answer_length > max_answer_length:
continue
start_idx = int(start_index - context_offset + chunk_start)
end_idx = int(end_index - context_offset + chunk_start)
if token_max_chunk_id[start_idx] != chunk_id:
continue
all_start_idx.append(start_idx)
all_end_idx.append(end_idx)
all_pred_score.append(pred_score)
sorted_start_end_score = sorted(zip(all_start_idx, all_end_idx, all_pred_score),
key=lambda args: args[-1], reverse=True)
nbest = []
context_text = original_feature.context_text
context_token_offsets = original_feature.context_token_offsets
seen_predictions = set()
for start_idx, end_idx, pred_score in sorted_start_end_score:
if len(seen_predictions) >= n_best_size:
break
pred_answer = context_text[context_token_offsets[start_idx][0]:
context_token_offsets[end_idx][1]]
seen_predictions.add(pred_answer)
nbest.append((pred_answer, pred_score))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if len(nbest) == 0:
nbest.append(('', float('-inf')))
all_scores = _np.array([ele[1] for ele in nbest], dtype=_np.float32)
probs = _np.exp(all_scores) / _np.sum(_np.exp(all_scores))
nbest_json = []
for i, (entry, prob) in enumerate(zip(nbest, probs)):
output = collections.OrderedDict()
output['text'] = entry[0]
output['probability'] = float(prob)
nbest_json.append(output)
assert len(nbest_json) >= 1
return not_answerable_score, nbest[0][0], nbest_json | Get prediction results for SQuAD.
Start Logits: (B, N_start)
End Logits: (B, N_start, N_end)
Parameters
----------
original_feature:
The original SquadFeature before chunked
chunked_features
List of ChunkFeatures
results
List of model predictions for span start and span end.
n_best_size
Best N results written to file
max_answer_length
Maximum length of the answer tokens.
start_top_n
Number of start-position candidates
end_top_n
Number of end-position candidates
Returns
-------
not_answerable_score
Model's estimate that the question is not answerable.
prediction
The final prediction.
nbest_json
n-best predictions with their probabilities.
| predict_extended | python | dmlc/gluon-nlp | docs/tutorials/question_answering/squad_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py | Apache-2.0 |
def get_end_logits(self, contextual_embedding, start_positions, p_mask):
"""
Parameters
----------
contextual_embedding
Shape (batch_size, sequence_length, C)
start_positions
Shape (batch_size, N)
We process multiple candidates simultaneously
p_mask
Shape (batch_size, sequence_length)
Returns
-------
end_logits
Shape (batch_size, N, sequence_length)
"""
# Select the features at the start_positions
# start_feature will have shape (batch_size, N, C)
start_features = select_vectors_by_position(contextual_embedding, start_positions)
# Concatenate the start_feature and the contextual_embedding
contextual_embedding = np.expand_dims(contextual_embedding, axis=1) # (B, 1, T, C)
start_features = np.expand_dims(start_features, axis=2) # (B, N, 1, C)
concat_features = np.concatenate([npx.broadcast_like(start_features,
contextual_embedding, 2, 2),
npx.broadcast_like(contextual_embedding,
start_features, 1, 1)],
axis=-1) # (B, N, T, 2C)
end_scores = self.end_scores(concat_features)
end_scores = np.squeeze(end_scores, -1)
end_logits = masked_logsoftmax(end_scores, mask=np.expand_dims(p_mask, axis=1),
axis=-1)
return end_logits |
Parameters
----------
contextual_embedding
Shape (batch_size, sequence_length, C)
start_positions
Shape (batch_size, N)
We process multiple candidates simultaneously
p_mask
Shape (batch_size, sequence_length)
Returns
-------
end_logits
Shape (batch_size, N, sequence_length)
| get_end_logits | python | dmlc/gluon-nlp | docs/tutorials/question_answering/squad_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py | Apache-2.0 |
def get_answerable_logits(self, contextual_embedding, p_mask):
"""Get the answerable logits.
Parameters
----------
contextual_embedding
Shape (batch_size, sequence_length, C)
p_mask
Shape (batch_size, sequence_length)
Mask the sequence.
0 --> Denote that the element is masked,
1 --> Denote that the element is not masked
Returns
-------
answerable_logits
Shape (batch_size, 2)
"""
# Shape (batch_size, sequence_length)
start_scores = np.squeeze(self.start_scores(contextual_embedding), -1)
start_score_weights = masked_softmax(start_scores, p_mask, axis=-1)
start_agg_feature = npx.batch_dot(np.expand_dims(start_score_weights, axis=1),
contextual_embedding)
start_agg_feature = np.squeeze(start_agg_feature, 1)
cls_feature = contextual_embedding[:, 0, :]
answerable_scores = self.answerable_scores(np.concatenate([start_agg_feature,
cls_feature], axis=-1))
answerable_logits = npx.log_softmax(answerable_scores, axis=-1)
return answerable_logits | Get the answerable logits.
Parameters
----------
contextual_embedding
Shape (batch_size, sequence_length, C)
p_mask
Shape (batch_size, sequence_length)
Mask the sequence.
0 --> Denote that the element is masked,
1 --> Denote that the element is not masked
Returns
-------
answerable_logits
Shape (batch_size, 2)
| get_answerable_logits | python | dmlc/gluon-nlp | docs/tutorials/question_answering/squad_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py | Apache-2.0 |
def forward(self, tokens, token_types, valid_length, p_mask, start_position):
"""
Parameters
----------
tokens
Shape (batch_size, sequence_length)
token_types
Shape (batch_size, sequence_length)
valid_length
Shape (batch_size,)
p_mask
Shape (batch_size, sequence_length)
start_position
Shape (batch_size,)
Returns
-------
start_logits
Shape (batch_size, sequence_length)
end_logits
Shape (batch_size, sequence_length)
answerable_logits
"""
if self.use_segmentation:
contextual_embeddings = self.backbone(tokens, token_types, valid_length)
else:
contextual_embeddings = self.backbone(tokens, valid_length)
start_logits = self.get_start_logits(contextual_embeddings, p_mask)
end_logits = self.get_end_logits(contextual_embeddings,
np.expand_dims(start_position, axis=1),
p_mask)
end_logits = np.squeeze(end_logits, axis=1)
answerable_logits = self.get_answerable_logits(contextual_embeddings, p_mask)
return start_logits, end_logits, answerable_logits |
Parameters
----------
tokens
Shape (batch_size, sequence_length)
token_types
Shape (batch_size, sequence_length)
valid_length
Shape (batch_size,)
p_mask
Shape (batch_size, sequence_length)
start_position
Shape (batch_size,)
Returns
-------
start_logits
Shape (batch_size, sequence_length)
end_logits
Shape (batch_size, sequence_length)
answerable_logits
| forward | python | dmlc/gluon-nlp | docs/tutorials/question_answering/squad_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py | Apache-2.0 |
def inference(self, tokens, token_types, valid_length, p_mask,
start_top_n: int = 5, end_top_n: int = 5):
"""Get the inference result with beam search
Parameters
----------
tokens
The input tokens. Shape (batch_size, sequence_length)
token_types
The input token types. Shape (batch_size, sequence_length)
valid_length
The valid length of the tokens. Shape (batch_size,)
p_mask
The mask which indicates that some tokens won't be used in the calculation.
Shape (batch_size, sequence_length)
start_top_n
The number of candidates to select for the start position.
end_top_n
The number of candidates to select for the end position.
Returns
-------
start_top_logits
The top start logits
Shape (batch_size, start_top_n)
start_top_index
Index of the top start logits
Shape (batch_size, start_top_n)
end_top_logits
The top end logits.
Shape (batch_size, start_top_n, end_top_n)
end_top_index
Index of the top end logits
Shape (batch_size, start_top_n, end_top_n)
answerable_logits
The answerable logits. Here 0 --> answerable and 1 --> not answerable.
Shape (batch_size, sequence_length, 2)
"""
# Shape (batch_size, sequence_length, C)
if self.use_segmentation:
contextual_embeddings = self.backbone(tokens, token_types, valid_length)
else:
contextual_embeddings = self.backbone(tokens, valid_length)
start_logits = self.get_start_logits(contextual_embeddings, p_mask)
# The shape of start_top_index will be (..., start_top_n)
start_top_logits, start_top_index = npx.topk(start_logits, k=start_top_n, axis=-1,
ret_typ='both')
end_logits = self.get_end_logits(contextual_embeddings, start_top_index, p_mask)
# Note that end_top_index and end_top_log_probs have shape (bsz, start_n_top, end_n_top)
# So that for each start position, there are end_n_top end positions on the third dim.
end_top_logits, end_top_index = npx.topk(end_logits, k=end_top_n, axis=-1,
ret_typ='both')
answerable_logits = self.get_answerable_logits(contextual_embeddings, p_mask)
return start_top_logits, start_top_index, end_top_logits, end_top_index, \
answerable_logits | Get the inference result with beam search
Parameters
----------
tokens
The input tokens. Shape (batch_size, sequence_length)
token_types
The input token types. Shape (batch_size, sequence_length)
valid_length
The valid length of the tokens. Shape (batch_size,)
p_mask
The mask which indicates that some tokens won't be used in the calculation.
Shape (batch_size, sequence_length)
start_top_n
The number of candidates to select for the start position.
end_top_n
The number of candidates to select for the end position.
Returns
-------
start_top_logits
The top start logits
Shape (batch_size, start_top_n)
start_top_index
Index of the top start logits
Shape (batch_size, start_top_n)
end_top_logits
The top end logits.
Shape (batch_size, start_top_n, end_top_n)
end_top_index
Index of the top end logits
Shape (batch_size, start_top_n, end_top_n)
answerable_logits
The answerable logits. Here 0 --> answerable and 1 --> not answerable.
Shape (batch_size, sequence_length, 2)
| inference | python | dmlc/gluon-nlp | docs/tutorials/question_answering/squad_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py | Apache-2.0 |
def get_chunks(self, doc_stride, max_chunk_length=None):
"""Get a sequence of chunks for the squad feature.
In reality, the document will be too long for the NLP model, and we will split it into
multiple chunks.
For example, consider the following
Doc: the man went to the store and bought a gallon of milk
We may divide it into four chunks:
Chunk 1: the man went to the
Chunk 2: to the store and bought
Chunk 3: and bought a gallon of
Chunk 4: gallon of milk
We will use our network to extract features for each chunk,
and do the aggregation afterwards. Here, one token may appear in multiple chunks.
We can vote the output based on some heuristic score functions.
Parameters
----------
doc_stride
The stride used when the context is too large and is split across several features.
max_chunk_length
The maximum size of the chunk
Returns
-------
ret
List of DocChunk objects
"""
doc_ptr = 0
max_chunk_length = max_chunk_length if max_chunk_length is not None else \
len(self.context_token_ids)
ret = []
while doc_ptr < len(self.context_token_ids):
chunk_length = min(max_chunk_length, len(self.context_token_ids) - doc_ptr)
if self.gt_answer_text is None:
chunk_gt_start_pos = None
chunk_gt_end_pos = None
chunk_is_impossible = True
else:
if self.gt_start_pos is not None and self.gt_end_pos is not None and\
self.gt_start_pos >= doc_ptr and self.gt_end_pos < doc_ptr + chunk_length:
# The chunk contains the ground-truth annotation
chunk_gt_start_pos = self.gt_start_pos - doc_ptr
chunk_gt_end_pos = self.gt_end_pos - doc_ptr
chunk_is_impossible = False
else:
chunk_gt_start_pos = None
chunk_gt_end_pos = None
chunk_is_impossible = True
ret.append(DocChunk(start=doc_ptr,
length=chunk_length,
is_impossible=chunk_is_impossible,
gt_start_pos=chunk_gt_start_pos,
gt_end_pos=chunk_gt_end_pos))
if doc_ptr + chunk_length == len(self.context_token_ids):
break
doc_ptr += doc_stride
return ret | Get a sequence of chunks for the squad feature.
In reality, the document will be too long for the NLP model, and we will split it into
multiple chunks.
For example, consider the following
Doc: the man went to the store and bought a gallon of milk
We may divide it into four chunks:
Chunk 1: the man went to the
Chunk 2: to the store and bought
Chunk 3: and bought a gallon of
Chunk 4: gallon of milk
We will use our network to extract features for each chunk,
and do the aggregation afterwards. Here, one token may appear in multiple chunks.
We can vote the output based on some heuristic score functions.
Parameters
----------
doc_stride
The stride used when the context is too large and is split across several features.
max_chunk_length
The maximum size of the chunk
Returns
-------
ret
List of DocChunk objects
| get_chunks | python | dmlc/gluon-nlp | docs/tutorials/question_answering/squad_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py | Apache-2.0 |
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace.
This is from the official evaluate-v2.0.py in SQuAD.
"""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s)))) | Lower text and remove punctuation, articles and extra whitespace.
This is from the official evaluate-v2.0.py in SQuAD.
| normalize_answer | python | dmlc/gluon-nlp | docs/tutorials/question_answering/squad_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py | Apache-2.0 |
def get_squad_examples_from_json(json_file: str, is_training: bool) -> List[SquadExample]:
"""
Read the whole entry of raw json file and convert it to examples.
Parameters
----------
json_file
The path to the json file
is_training
Whether or not training
Returns
-------
ret
List of SquadExample objects
"""
with open(json_file, 'r') as f:
data = json.load(f)
examples = []
for entry in tqdm(data['data']):
title = entry['title']
for paragraph in entry['paragraphs']:
context_text = paragraph['context']
for qa in paragraph['qas']:
qas_id = qa['id']
query_text = qa['question']
start_position = None
end_position = None
answer_text = None
answers = None
if "is_impossible" in qa:
is_impossible = qa["is_impossible"]
else:
is_impossible = False
if not is_impossible:
if is_training:
answer = qa["answers"][0]
answer_text = answer["text"]
start_position = answer["answer_start"]
end_position = start_position + len(answer_text)
if context_text[start_position:end_position] != answer_text:
warnings.warn(
'Mismatch start/end and answer_text, start/end={}/{},'
' answer text={}. qas={}'
.format(start_position, end_position, answer_text, qas_id))
else:
answers = qa["answers"]
example = SquadExample(
qas_id=qas_id,
query_text=query_text,
context_text=context_text,
answer_text=answer_text,
start_position=start_position,
end_position=end_position,
title=title,
is_impossible=is_impossible,
answers=answers,
)
examples.append(example)
return examples |
Read the whole entry of raw json file and convert it to examples.
Parameters
----------
json_file
The path to the json file
is_training
Whether or not training
Returns
-------
ret
List of SquadExample objects
| get_squad_examples_from_json | python | dmlc/gluon-nlp | docs/tutorials/question_answering/squad_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py | Apache-2.0 |
def convert_squad_example_to_feature(example: SquadExample,
tokenizer: BaseTokenizerWithVocab,
is_training: bool):
"""
Convert a SquadExample object to a SquadFeature object with the designated tokenizer.
There are accually few examples can not be converted properly with token level tokenization,
due to the ground-truth are given by the start position and the answer text, and some examples
are annotated with wrong labels. Thus, attribute unreliable_span and token_answer_mismatch are
used to indicate these senarios.
Parameters
----------
example
A single squad example
tokenizer
The trained tokenizer
is_training
Whether to deal with the training case
Returns
-------
feature
A SquadFeature
"""
context_text = example.context_text
answer_text = example.answer_text
query_text = example.query_text
context_token_ids, offsets = tokenizer.encode_with_offsets(context_text, int)
query_token_ids = tokenizer.encode(query_text, int)
gt_answer_text = answer_text
gt_span_start_pos, gt_span_end_pos = None, None
token_answer_mismatch = False
unreliable_span = False
np_offsets = _np.array(offsets)
if is_training and not example.is_impossible:
assert example.start_position >= 0 and example.end_position >= 0
# We convert the character-level offsets to token-level offsets
# Also, if the answer after tokenization + detokenization is not the same as the original
# answer, we try to localize the answer text and do a rematch
candidates = [(example.start_position, example.end_position)]
all_possible_start_pos = {example.start_position}
find_all_candidates = False
lower_idx, upper_idx = None, None
first_lower_idx, first_upper_idx = None, None
while len(candidates) > 0:
start_position, end_position = candidates.pop()
# Match the token offsets
token_start_ends = match_tokens_with_char_spans(np_offsets,
_np.array([[start_position,
end_position]]))
lower_idx = int(token_start_ends[0][0])
upper_idx = int(token_start_ends[0][1])
if not find_all_candidates:
first_lower_idx = lower_idx
first_upper_idx = upper_idx
# The new start pos and end_pos are the lower_idx and upper_idx
sliced_answer = context_text[offsets[lower_idx][0]:offsets[upper_idx][1]]
norm_sliced_answer = normalize_answer(sliced_answer)
norm_answer = normalize_answer(answer_text)
if norm_sliced_answer != norm_answer:
if not find_all_candidates:
# Try to find a better start+end of the answer and insert all positions to the
# candidates
find_all_candidates = True
pos = context_text.find(answer_text)
while pos != -1:
if pos not in all_possible_start_pos:
all_possible_start_pos.add(pos)
candidates.append((pos, pos + len(answer_text)))
pos = context_text.find(answer_text, pos + 1)
elif len(candidates) == 0:
token_answer_mismatch = True
lower_idx = first_lower_idx
upper_idx = first_upper_idx
if int_float_regex.match(answer_text):
# Find an integer/float and the sample won't be reliable.
# The span-based approach is not suitable for this scenario and we will
# set the unreliable span flag.
unreliable_span = True
else:
break
gt_span_start_pos = lower_idx
gt_span_end_pos = upper_idx
feature = SquadFeature(qas_id=example.qas_id,
query_token_ids=query_token_ids,
context_text=context_text,
context_token_ids=context_token_ids,
context_token_offsets=offsets,
is_impossible=example.is_impossible,
token_answer_mismatch=token_answer_mismatch,
unreliable_span=unreliable_span,
gt_answer_text=gt_answer_text,
gt_start_pos=gt_span_start_pos,
gt_end_pos=gt_span_end_pos)
return feature |
Convert a SquadExample object to a SquadFeature object with the designated tokenizer.
There are accually few examples can not be converted properly with token level tokenization,
due to the ground-truth are given by the start position and the answer text, and some examples
are annotated with wrong labels. Thus, attribute unreliable_span and token_answer_mismatch are
used to indicate these senarios.
Parameters
----------
example
A single squad example
tokenizer
The trained tokenizer
is_training
Whether to deal with the training case
Returns
-------
feature
A SquadFeature
| convert_squad_example_to_feature | python | dmlc/gluon-nlp | docs/tutorials/question_answering/squad_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py | Apache-2.0 |
def __init__(self, tokenizer, doc_stride, max_seq_length, max_query_length):
"""
Parameters
----------
tokenizer
The tokenizer
doc_stride
The stride to chunk the document
max_seq_length
Maximum length of the merged data
max_query_length
Maximum query length
"""
self._tokenizer = tokenizer
self._doc_stride = doc_stride
self._max_seq_length = max_seq_length
self._max_query_length = max_query_length
vocab = tokenizer.vocab
self.pad_id = vocab.pad_id
# For roberta model, taking sepecial token <s> as [CLS] and </s> as [SEP]
self.cls_id = vocab.bos_id if 'cls_token' not in vocab.special_token_keys else vocab.cls_id
self.sep_id = vocab.eos_id if 'sep_token' not in vocab.special_token_keys else vocab.sep_id
# TODO(sxjscience) Consider to combine the NamedTuple and batchify functionality.
self.ChunkFeature = collections.namedtuple('ChunkFeature',
['qas_id',
'data',
'valid_length',
'segment_ids',
'masks',
'is_impossible',
'gt_start',
'gt_end',
'context_offset',
'chunk_start',
'chunk_length'])
self.BatchifyFunction = bf.NamedTuple(self.ChunkFeature,
{'qas_id': bf.List(),
'data': bf.Pad(val=self.pad_id),
'valid_length': bf.Stack(),
'segment_ids': bf.Pad(),
'masks': bf.Pad(val=1),
'is_impossible': bf.Stack(),
'gt_start': bf.Stack(),
'gt_end': bf.Stack(),
'context_offset': bf.Stack(),
'chunk_start': bf.Stack(),
'chunk_length': bf.Stack()}) |
Parameters
----------
tokenizer
The tokenizer
doc_stride
The stride to chunk the document
max_seq_length
Maximum length of the merged data
max_query_length
Maximum query length
| __init__ | python | dmlc/gluon-nlp | docs/tutorials/question_answering/squad_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py | Apache-2.0 |
def process_sample(self, feature: SquadFeature):
"""Process the data to the following format.
Note that we mask all the special tokens except the CLS token. The reason for not masking
the CLS token is that if the question is not answerable, we will set the start and end to
be 0.
Merged: <CLS> Question <SEP> Context <SEP>
Segment IDs: 0 0 0 1 1
Mask: 0 1 1 0 1
Here, we need to emphasize that when mask = 1, the data are actually not masked!
Parameters
----------
feature
Tokenized SQuAD feature
Returns
-------
ret
Divide the feature into multiple chunks and extract the feature which contains
the following:
- data
The data that concatenates the query and the context + special tokens
- valid_length
The valid_length of the data
- segment_ids
We assign the query part as segment 0 and the context part as segment 1.
- masks
We mask all the special tokens. 1 --> not masked, 0 --> masked.
- is_impossible
Whether the provided context is impossible to answer or not.
- gt_start
The ground-truth start location of the span
- gt_end
The ground-truth end location of the span
- chunk_start
The start of the chunk
- chunk_length
The length of the chunk
"""
ret = []
truncated_query_ids = feature.query_token_ids[:self._max_query_length]
chunks = feature.get_chunks(
doc_stride=self._doc_stride,
max_chunk_length=self._max_seq_length - len(truncated_query_ids) - 3)
for chunk in chunks:
data = _np.array([self.cls_id] + truncated_query_ids + [self.sep_id] +
feature.context_token_ids[chunk.start:(chunk.start + chunk.length)] +
[self.sep_id], dtype=_np.int32)
valid_length = len(data)
segment_ids = _np.array([0] + [0] * len(truncated_query_ids) +
[0] + [1] * chunk.length + [1], dtype=_np.int32)
masks = _np.array([0] + [1] * len(truncated_query_ids) + [1] + [0] * chunk.length + [1],
dtype=_np.int32)
context_offset = len(truncated_query_ids) + 2
if chunk.gt_start_pos is None and chunk.gt_end_pos is None:
start_pos = 0
end_pos = 0
else:
# Here, we increase the start and end because we put query before context
start_pos = chunk.gt_start_pos + context_offset
end_pos = chunk.gt_end_pos + context_offset
chunk_feature = self.ChunkFeature(qas_id=feature.qas_id,
data=data,
valid_length=valid_length,
segment_ids=segment_ids,
masks=masks,
is_impossible=chunk.is_impossible,
gt_start=start_pos,
gt_end=end_pos,
context_offset=context_offset,
chunk_start=chunk.start,
chunk_length=chunk.length)
ret.append(chunk_feature)
return ret | Process the data to the following format.
Note that we mask all the special tokens except the CLS token. The reason for not masking
the CLS token is that if the question is not answerable, we will set the start and end to
be 0.
Merged: <CLS> Question <SEP> Context <SEP>
Segment IDs: 0 0 0 1 1
Mask: 0 1 1 0 1
Here, we need to emphasize that when mask = 1, the data are actually not masked!
Parameters
----------
feature
Tokenized SQuAD feature
Returns
-------
ret
Divide the feature into multiple chunks and extract the feature which contains
the following:
- data
The data that concatenates the query and the context + special tokens
- valid_length
The valid_length of the data
- segment_ids
We assign the query part as segment 0 and the context part as segment 1.
- masks
We mask all the special tokens. 1 --> not masked, 0 --> masked.
- is_impossible
Whether the provided context is impossible to answer or not.
- gt_start
The ground-truth start location of the span
- gt_end
The ground-truth end location of the span
- chunk_start
The start of the chunk
- chunk_length
The length of the chunk
| process_sample | python | dmlc/gluon-nlp | docs/tutorials/question_answering/squad_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py | Apache-2.0 |
def get_train(self, features, skip_unreliable=True):
"""Get the training dataset
Parameters
----------
features
skip_unreliable
Whether to skip the unreliable spans in the training set
Returns
-------
train_dataset
num_token_answer_mismatch
num_unreliable
"""
train_dataset = []
num_token_answer_mismatch = 0
num_unreliable = 0
for feature in features:
if feature.token_answer_mismatch:
num_token_answer_mismatch += 1
if feature.unreliable_span:
num_unreliable += 1
if skip_unreliable and feature.unreliable_span:
# Skip when not reliable
continue
# Process the feature
chunk_features = self.process_sample(feature)
train_dataset.extend(chunk_features)
return train_dataset, num_token_answer_mismatch, num_unreliable | Get the training dataset
Parameters
----------
features
skip_unreliable
Whether to skip the unreliable spans in the training set
Returns
-------
train_dataset
num_token_answer_mismatch
num_unreliable
| get_train | python | dmlc/gluon-nlp | docs/tutorials/question_answering/squad_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py | Apache-2.0 |
def separate_process_wrapper_fn(func: Callable[[], None], do_multi_processing: bool) -> Callable[[], None]:
"""
This function wraps another function into its own separated process.
In order to ensure accurate memory measurements it is important that the function
is executed in a separate process
Args:
- `func`: (`callable`): function() -> ...
generic function which will be executed in its own separate process
- `do_multi_processing`: (`bool`)
Whether to run function on separate process or not
"""
def multi_process_func(*args, **kwargs):
# run function in an individual
# process to get correct memory
def wrapper_func(queue: Queue, *args):
try:
result = func(*args)
except Exception as e:
logger.error(e)
print(e)
result = "N/A"
queue.put(result)
queue = Queue()
p = Process(target=wrapper_func, args=[queue] + list(args))
p.start()
result = queue.get()
p.join()
return result
if do_multi_processing:
logging.info("fFunction {func} is executed in its own process...")
return multi_process_func
else:
return func |
This function wraps another function into its own separated process.
In order to ensure accurate memory measurements it is important that the function
is executed in a separate process
Args:
- `func`: (`callable`): function() -> ...
generic function which will be executed in its own separate process
- `do_multi_processing`: (`bool`)
Whether to run function on separate process or not
| separate_process_wrapper_fn | python | dmlc/gluon-nlp | scripts/benchmarks/benchmark_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/benchmarks/benchmark_utils.py | Apache-2.0 |
def get_cpu_memory(process_id: int) -> int:
"""
measures current cpu memory usage of a given `process_id`
Args:
- `process_id`: (`int`)
process_id for which to measure memory
Returns
- `memory`: (`int`)
cosumed memory in Bytes
"""
process = psutil.Process(process_id)
try:
meminfo_attr = "memory_info" if hasattr(process, "memory_info") else "get_memory_info"
memory = getattr(process, meminfo_attr)()[0]
except psutil.AccessDenied:
raise ValueError("Error with Psutil.")
return memory |
measures current cpu memory usage of a given `process_id`
Args:
- `process_id`: (`int`)
process_id for which to measure memory
Returns
- `memory`: (`int`)
cosumed memory in Bytes
| get_cpu_memory | python | dmlc/gluon-nlp | scripts/benchmarks/benchmark_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/benchmarks/benchmark_utils.py | Apache-2.0 |
def measure_peak_memory_cpu(function: Callable[[], None], interval=0.5, device_idx=None) -> int:
"""
measures peak cpu memory consumption of a given `function`
running the function for at least interval seconds
and at most 20 * interval seconds.
This function is heavily inspired by: `memory_usage`
of the package `memory_profiler`: https://github.com/pythonprofilers/memory_profiler/blob/895c4ac7a08020d66ae001e24067da6dcea42451/memory_profiler.py#L239
Args:
- `function`: (`callable`): function() -> ...
function without any arguments to measure for which to measure the peak memory
- `interval`: (`float`, `optional`, defaults to `0.5`)
interval in second for which to measure the memory usage
- `device_idx`: (`int`, `optional`, defaults to `None`)
device id for which to measure gpu usage
Returns:
- `max_memory`: (`int`)
cosumed memory peak in Bytes
"""
if not is_psutil_available():
logger.warning(
"Psutil not installed, we won't log CPU memory usage. "
"Install Psutil (pip install psutil) to use CPU memory tracing."
)
max_memory = "N/A"
else:
while True:
# create child, parent connection
child_connection, parent_connection = Pipe()
# instantiate process
mem_process = MemoryMeasureProcess(os.getpid(), child_connection, interval)
mem_process.start()
# wait until we get memory
parent_connection.recv()
try:
# execute function
function()
# start parent connection
parent_connection.send(0)
# receive memory and num measurements
max_memory = parent_connection.recv()
num_measurements = parent_connection.recv()
except Exception:
# kill process in a clean way
parent = psutil.Process(os.getpid())
for child in parent.children(recursive=True):
os.kill(child.pid, SIGKILL)
mem_process.join(0)
raise RuntimeError("Process killed. Error in Process")
# run process at least 20 * interval or until it finishes
mem_process.join(20 * interval)
if (num_measurements > 4) or (interval < 1e-6):
break
# reduce interval
interval /= 10
return max_memory |
measures peak cpu memory consumption of a given `function`
running the function for at least interval seconds
and at most 20 * interval seconds.
This function is heavily inspired by: `memory_usage`
of the package `memory_profiler`: https://github.com/pythonprofilers/memory_profiler/blob/895c4ac7a08020d66ae001e24067da6dcea42451/memory_profiler.py#L239
Args:
- `function`: (`callable`): function() -> ...
function without any arguments to measure for which to measure the peak memory
- `interval`: (`float`, `optional`, defaults to `0.5`)
interval in second for which to measure the memory usage
- `device_idx`: (`int`, `optional`, defaults to `None`)
device id for which to measure gpu usage
Returns:
- `max_memory`: (`int`)
cosumed memory peak in Bytes
| measure_peak_memory_cpu | python | dmlc/gluon-nlp | scripts/benchmarks/benchmark_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/benchmarks/benchmark_utils.py | Apache-2.0 |
def traceit(frame, event, args):
""" Tracing method executed before running each line in a module or sub-module
Record memory allocated in a list with debugging information
"""
global _is_memory_tracing_enabled
if not _is_memory_tracing_enabled:
return traceit
# Filter events
if events_to_trace is not None:
if isinstance(events_to_trace, str) and event != events_to_trace:
return traceit
elif isinstance(events_to_trace, (list, tuple)) and event not in events_to_trace:
return traceit
if "__name__" not in frame.f_globals:
return traceit
# Filter modules
name = frame.f_globals["__name__"]
if not isinstance(name, str):
return traceit
else:
# Filter whitelist of modules to trace
if modules_to_trace is not None:
if isinstance(modules_to_trace, str) and modules_to_trace not in name:
return traceit
elif isinstance(modules_to_trace, (list, tuple)) and all(m not in name for m in modules_to_trace):
return traceit
# Filter blacklist of modules not to trace
if modules_not_to_trace is not None:
if isinstance(modules_not_to_trace, str) and modules_not_to_trace in name:
return traceit
elif isinstance(modules_not_to_trace, (list, tuple)) and any(m in name for m in modules_not_to_trace):
return traceit
# Record current tracing state (file, location in file...)
lineno = frame.f_lineno
filename = frame.f_globals["__file__"]
if filename.endswith(".pyc") or filename.endswith(".pyo"):
filename = filename[:-1]
line = linecache.getline(filename, lineno).rstrip()
traced_state = Frame(filename, name, lineno, event, line)
# Record current memory state (rss memory) and compute difference with previous memory state
cpu_mem = 0
if process is not None:
mem = process.memory_info()
cpu_mem = mem.rss
gpu_mem = 0
if log_gpu:
# Clear GPU caches
if is_mxnet_available():
for ctx in mx_all_contexts:
ctx.empty_cache()
if is_torch_available():
torch_empty_cache()
if is_tf_available():
tf_context.context()._clear_caches() # See https://github.com/tensorflow/tensorflow/issues/20218#issuecomment-416771802
# Sum used memory for all GPUs
nvml.nvmlInit()
for i in devices:
handle = nvml.nvmlDeviceGetHandleByIndex(i)
meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
gpu_mem += meminfo.used
nvml.nvmlShutdown()
mem_state = UsedMemoryState(traced_state, cpu_mem, gpu_mem)
memory_trace.append(mem_state)
return traceit | Tracing method executed before running each line in a module or sub-module
Record memory allocated in a list with debugging information
| traceit | python | dmlc/gluon-nlp | scripts/benchmarks/benchmark_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/benchmarks/benchmark_utils.py | Apache-2.0 |
def stop_memory_tracing(
memory_trace: Optional[MemoryTrace] = None, ignore_released_memory: bool = True
) -> Optional[MemorySummary]:
""" Stop memory tracing cleanly and return a summary of the memory trace if a trace is given.
Args:
- `memory_trace` (optional output of start_memory_tracing, default: None): memory trace to convert in summary
- `ignore_released_memory` (boolean, default: None): if True we only sum memory increase to compute total memory
Return:
- None if `memory_trace` is None
- `MemorySummary` namedtuple otherwise with the fields:
- `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace`
by substracting the memory after executing each line from the memory before executing said line.
- `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each line
obtained by summing repeated memory increase for a line if it's executed several times.
The list is sorted from the frame with the largest memory consumption to the frame with the smallest (can be negative if memory is released)
- `total`: total memory increase during the full tracing as a `Memory` named tuple (see below).
Line with memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default).
`Memory` named tuple have fields
- `byte` (integer): number of bytes,
- `string` (string): same as human readable string (ex: "3.5MB")
`Frame` are namedtuple used to list the current frame state and have the following fields:
- 'filename' (string): Name of the file currently executed
- 'module' (string): Name of the module currently executed
- 'line_number' (int): Number of the line currently executed
- 'event' (string): Event that triggered the tracing (default will be "line")
- 'line_text' (string): Text of the line in the python script
`MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields:
- `frame` (`Frame`): the current frame (see above)
- `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple
- `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple
- `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple
"""
global _is_memory_tracing_enabled
_is_memory_tracing_enabled = False
if memory_trace is not None and len(memory_trace) > 1:
memory_diff_trace = []
memory_curr_trace = []
cumulative_memory_dict = defaultdict(lambda: [0, 0, 0])
for ((frame, cpu_mem, gpu_mem), (next_frame, next_cpu_mem, next_gpu_mem),) in zip(
memory_trace[:-1], memory_trace[1:]
):
cpu_mem_inc = next_cpu_mem - cpu_mem
gpu_mem_inc = next_gpu_mem - gpu_mem
cpu_gpu_mem_inc = cpu_mem_inc + gpu_mem_inc
memory_diff_trace.append(
MemoryState(
frame=frame, cpu=Memory(cpu_mem_inc), gpu=Memory(gpu_mem_inc), cpu_gpu=Memory(cpu_gpu_mem_inc),
)
)
memory_curr_trace.append(
MemoryState(
frame=frame,
cpu=Memory(next_cpu_mem),
gpu=Memory(next_gpu_mem),
cpu_gpu=Memory(next_gpu_mem + next_cpu_mem),
)
)
cumulative_memory_dict[frame][0] += cpu_mem_inc
cumulative_memory_dict[frame][1] += gpu_mem_inc
cumulative_memory_dict[frame][2] += cpu_gpu_mem_inc
cumulative_memory = sorted(
list(cumulative_memory_dict.items()), key=lambda x: x[1][2], reverse=True
) # order by the total CPU + GPU memory increase
cumulative_memory = list(
MemoryState(
frame=frame, cpu=Memory(cpu_mem_inc), gpu=Memory(gpu_mem_inc), cpu_gpu=Memory(cpu_gpu_mem_inc),
)
for frame, (cpu_mem_inc, gpu_mem_inc, cpu_gpu_mem_inc) in cumulative_memory
)
memory_curr_trace = sorted(memory_curr_trace, key=lambda x: x.cpu_gpu.bytes, reverse=True)
if ignore_released_memory:
total_memory = sum(max(0, step_trace.cpu_gpu.bytes) for step_trace in memory_diff_trace)
else:
total_memory = sum(step_trace.cpu_gpu.bytes for step_trace in memory_diff_trace)
total_memory = Memory(total_memory)
return MemorySummary(
sequential=memory_diff_trace, cumulative=cumulative_memory, current=memory_curr_trace, total=total_memory,
)
return None | Stop memory tracing cleanly and return a summary of the memory trace if a trace is given.
Args:
- `memory_trace` (optional output of start_memory_tracing, default: None): memory trace to convert in summary
- `ignore_released_memory` (boolean, default: None): if True we only sum memory increase to compute total memory
Return:
- None if `memory_trace` is None
- `MemorySummary` namedtuple otherwise with the fields:
- `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace`
by substracting the memory after executing each line from the memory before executing said line.
- `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each line
obtained by summing repeated memory increase for a line if it's executed several times.
The list is sorted from the frame with the largest memory consumption to the frame with the smallest (can be negative if memory is released)
- `total`: total memory increase during the full tracing as a `Memory` named tuple (see below).
Line with memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default).
`Memory` named tuple have fields
- `byte` (integer): number of bytes,
- `string` (string): same as human readable string (ex: "3.5MB")
`Frame` are namedtuple used to list the current frame state and have the following fields:
- 'filename' (string): Name of the file currently executed
- 'module' (string): Name of the module currently executed
- 'line_number' (int): Number of the line currently executed
- 'event' (string): Event that triggered the tracing (default will be "line")
- 'line_text' (string): Text of the line in the python script
`MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields:
- `frame` (`Frame`): the current frame (see above)
- `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple
- `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple
- `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple
| stop_memory_tracing | python | dmlc/gluon-nlp | scripts/benchmarks/benchmark_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/benchmarks/benchmark_utils.py | Apache-2.0 |
def __init__(self, workloads, model_names, use_fp16=False,
repeat=3, use_gpu=True,
device_idx=0,
profile_inference=True,
profile_train=True,
env_print=True,
to_csv=False,
use_tvm=False,
instance_type=None,
layout='NT',
compute_layout='auto',
inference_out_csv_file='inference_time_memory.csv',
train_out_csv_file='train_time_memory.csv',
env_info_file='env_info.csv'):
"""
Parameters
----------
workloads
List of workloads to profile
model_names
List of model names to profile
use_fp16
Whether to use fp16
repeat
The number of repeat
use_gpu
Whether to use GPU
device_idx
The GPU ID
profile_inference
Whether to profile inference
profile_train
Whether to profile training
env_print
Whether to print the environment
to_csv
Whether to dump to csv file
use_tvm
Whether to use TVM to accelerate the
instance_type
Type of the instance. This will only be used to set the
layout
The input + output layout
compute_layout
The computation layout
inference_out_csv_file
train_out_csv_file
env_info_file
"""
self._workloads = workloads
if not isinstance(workloads, list):
workloads = [workloads]
if not isinstance(model_names, (list, tuple)):
model_names = [model_names]
self._workloads = workloads
self._model_names = model_names
self._use_fp16 = use_fp16
self._repeat = repeat
self._use_gpu = use_gpu
self._device_idx = device_idx
self._environment_info = None
self._profile_inference = profile_inference
self._profile_train = profile_train
self._env_print = env_print
self._to_csv = to_csv
self._use_tvm = use_tvm
self._instance_type = instance_type
self._layout = layout
self._compute_layout = compute_layout
self._inference_out_csv_file = inference_out_csv_file
self._train_out_csv_file = train_out_csv_file
self._env_info_file = env_info_file |
Parameters
----------
workloads
List of workloads to profile
model_names
List of model names to profile
use_fp16
Whether to use fp16
repeat
The number of repeat
use_gpu
Whether to use GPU
device_idx
The GPU ID
profile_inference
Whether to profile inference
profile_train
Whether to profile training
env_print
Whether to print the environment
to_csv
Whether to dump to csv file
use_tvm
Whether to use TVM to accelerate the
instance_type
Type of the instance. This will only be used to set the
layout
The input + output layout
compute_layout
The computation layout
inference_out_csv_file
train_out_csv_file
env_info_file
| __init__ | python | dmlc/gluon-nlp | scripts/benchmarks/benchmark_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/benchmarks/benchmark_utils.py | Apache-2.0 |
def get_network(model_name,
ctx_l,
checkpoint_path=None,
backbone_path=None,
task=None):
"""
Get the network that fine-tune the Question Answering Task
"""
use_segmentation = 'roberta' not in model_name and 'xlmr' not in model_name
Model, cfg, tokenizer, download_params_path, _ = \
get_backbone(model_name, load_backbone=not backbone_path)
backbone = Model.from_cfg(cfg)
# Load local backbone parameters if backbone_path provided.
# Otherwise, download backbone parameters from gluon zoo.
backbone_params_path = backbone_path if backbone_path else download_params_path
if checkpoint_path is None:
backbone.load_parameters(backbone_params_path, ignore_extra=True,
ctx=ctx_l, cast_dtype=True)
num_params, num_fixed_params \
= count_parameters(deduplicate_param_dict(backbone.collect_params()))
logging.info(
'Loading Backbone Model from {}, with total/fixd parameters={}/{}'.format(
backbone_params_path, num_params, num_fixed_params))
classify_net = TextPredictionNet(backbone, task.class_num)
if checkpoint_path is None:
# Ignore the UserWarning during initialization,
# There is no need to re-initialize the parameters of backbone
classify_net.initialize(ctx=ctx_l)
else:
classify_net.load_parameters(checkpoint_path, ctx=ctx_l, cast_dtype=True)
classify_net.hybridize()
return cfg, tokenizer, classify_net, use_segmentation |
Get the network that fine-tune the Question Answering Task
| get_network | python | dmlc/gluon-nlp | scripts/classification/train_classification.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/classification/train_classification.py | Apache-2.0 |
def convert_tf_assets(tf_assets_dir, model_size, electra_path):
"""Convert the assets file including config, vocab and tokenizer model"""
file_names = os.listdir(tf_assets_dir)
vocab_path = None
for ele in file_names:
if ele.endswith('.txt'):
assert vocab_path is None
vocab_path = ele
assert vocab_path is not None
if vocab_path:
vocab_path = os.path.join(tf_assets_dir, vocab_path)
vocab_size = len(open(vocab_path, 'r', encoding='utf-8').readlines())
config_dict = get_dict_config(model_size, electra_path)
cfg = convert_tf_config(config_dict, vocab_size)
return cfg, vocab_path | Convert the assets file including config, vocab and tokenizer model | convert_tf_assets | python | dmlc/gluon-nlp | scripts/conversion_toolkits/convert_electra.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/conversion_toolkits/convert_electra.py | Apache-2.0 |
def get_name_map(tf_names, convert_type='backbone'):
"""
Get the converting mapping between tensor names and mxnet names.
The above mapping CONVERT_MAP is effectively adaptive to Bert and Albert,
but there is no guarantee that it can match to other tf models in case of
some sepecial variable_scope (tensorflow) and prefix (mxnet).
Redefined mapping is encouraged to adapt the personalization model.
Parameters
----------
tf_names
the parameters names of tensorflow model
convert_type
choices=['backbone', 'disc', 'gen']
Returns
-------
A dictionary with the following format:
{tf_names : mx_names}
"""
name_map = {}
for source_name in tf_names:
target_name = source_name
if convert_type == 'backbone':
if 'electra' not in source_name:
continue
elif convert_type == 'disc':
target_name = 'backbone_model.' + target_name
if 'generator' in source_name:
continue
elif convert_type == 'gen':
target_name = 'backbone_model.' + target_name
if 'generator' not in source_name:
continue
else:
raise NotImplementedError
# skip the qkv weights
if 'self/' in source_name:
name_map[source_name] = None
continue
for old, new in CONVERT_MAP:
target_name = target_name.replace(old, new)
name_map[source_name] = target_name
return name_map |
Get the converting mapping between tensor names and mxnet names.
The above mapping CONVERT_MAP is effectively adaptive to Bert and Albert,
but there is no guarantee that it can match to other tf models in case of
some sepecial variable_scope (tensorflow) and prefix (mxnet).
Redefined mapping is encouraged to adapt the personalization model.
Parameters
----------
tf_names
the parameters names of tensorflow model
convert_type
choices=['backbone', 'disc', 'gen']
Returns
-------
A dictionary with the following format:
{tf_names : mx_names}
| get_name_map | python | dmlc/gluon-nlp | scripts/conversion_toolkits/convert_electra.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/conversion_toolkits/convert_electra.py | Apache-2.0 |
def convert_qkv_weights(tf_prefix, mx_prefix):
"""
To convert the qkv weights with different prefix.
In tensorflow framework, the prefix of query/key/value for the albert model is
'bert/encoder/transformer/group_0/inner_group_0/attention_1/self/query/kernel',
and that for the bert model is 'bert/encoder/layer_{}/attention/self/key/bias'.
In gluonnlp framework, the prefix is slightly different as
'encoder.all_encoder_groups.0.attn_qkv.weight' for albert model and
'encoder.all_layers.{}.attn_qkv.weight' for bert model, as the
curly braces {} can be filled with the layer number.
"""
# Merge query_weight, key_weight, value_weight to mx_params
query_weight = tf_params[
'{}/query/kernel'.format(tf_prefix)]
key_weight = tf_params[
'{}/key/kernel'.format(tf_prefix)]
value_weight = tf_params[
'{}/value/kernel'.format(tf_prefix)]
mx_params['{}.attn_qkv.weight'.format(mx_prefix)].set_data(
np.concatenate([query_weight, key_weight, value_weight], axis=1).T)
# Merge query_bias, key_bias, value_bias to mx_params
query_bias = tf_params[
'{}/query/bias'.format(tf_prefix)]
key_bias = tf_params[
'{}/key/bias'.format(tf_prefix)]
value_bias = tf_params[
'{}/value/bias'.format(tf_prefix)]
mx_params['{}.attn_qkv.bias'.format(mx_prefix)].set_data(
np.concatenate([query_bias, key_bias, value_bias], axis=0)) |
To convert the qkv weights with different prefix.
In tensorflow framework, the prefix of query/key/value for the albert model is
'bert/encoder/transformer/group_0/inner_group_0/attention_1/self/query/kernel',
and that for the bert model is 'bert/encoder/layer_{}/attention/self/key/bias'.
In gluonnlp framework, the prefix is slightly different as
'encoder.all_encoder_groups.0.attn_qkv.weight' for albert model and
'encoder.all_layers.{}.attn_qkv.weight' for bert model, as the
curly braces {} can be filled with the layer number.
| convert_qkv_weights | python | dmlc/gluon-nlp | scripts/conversion_toolkits/convert_electra.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/conversion_toolkits/convert_electra.py | Apache-2.0 |
def convert_tf_assets(tf_assets_dir):
"""Convert the assets file including config, vocab and tokenizer model"""
file_names = os.listdir(tf_assets_dir)
vocab_path = None
json_cfg_path = None
for ele in file_names:
if ele.endswith('.txt'):
assert vocab_path is None
vocab_path = ele
elif ele.endswith('.json'):
assert json_cfg_path is None
json_cfg_path = ele
assert vocab_path is not None and json_cfg_path is not None
vocab_path = os.path.join(tf_assets_dir, vocab_path)
vocab_size = len(open(vocab_path, 'r', encoding='utf-8').readlines())
json_cfg_path = os.path.join(tf_assets_dir, json_cfg_path)
cfg = convert_tf_config(json_cfg_path, vocab_size)
return cfg, json_cfg_path, vocab_path | Convert the assets file including config, vocab and tokenizer model | convert_tf_assets | python | dmlc/gluon-nlp | scripts/conversion_toolkits/convert_mobilebert.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/conversion_toolkits/convert_mobilebert.py | Apache-2.0 |
def get_name_map(tf_names, num_stacked_ffn):
"""
Get the converting mapping between tensor names and mxnet names.
The above mapping CONVERT_MAP is effectively adaptive to Bert and Albert,
but there is no guarantee that it can match to other tf models in case of
some sepecial variable_scope (tensorflow) and prefix (mxnet).
Redefined mapping is encouraged to adapt the personalization model.
Parameters
----------
tf_names
the parameters names of tensorflow model
Returns
-------
A dictionary with the following format:
{tf_names : mx_names}
"""
name_map = {}
for source_name in tf_names:
target_name = source_name
ffn_idx = re.findall(r'ffn_layer_\d+', target_name)
if ffn_idx:
target_name = target_name.replace(ffn_idx[0], 'ffn_layers_xxx')
for old, new in CONVERT_MAP:
target_name = target_name.replace(old, new)
if ffn_idx:
target_name = target_name.replace('stacked_ffn.xxx', 'stacked_ffn.' + ffn_idx[0][10:])
if 'stacked_ffn.xxy' in target_name:
target_name = target_name.replace(
'stacked_ffn.xxy', 'stacked_ffn.' + str(num_stacked_ffn - 1))
name_map[source_name] = target_name
return name_map |
Get the converting mapping between tensor names and mxnet names.
The above mapping CONVERT_MAP is effectively adaptive to Bert and Albert,
but there is no guarantee that it can match to other tf models in case of
some sepecial variable_scope (tensorflow) and prefix (mxnet).
Redefined mapping is encouraged to adapt the personalization model.
Parameters
----------
tf_names
the parameters names of tensorflow model
Returns
-------
A dictionary with the following format:
{tf_names : mx_names}
| get_name_map | python | dmlc/gluon-nlp | scripts/conversion_toolkits/convert_mobilebert.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/conversion_toolkits/convert_mobilebert.py | Apache-2.0 |
def convert_tf_assets(tf_assets_dir, model_type):
"""Convert the assets file including config, vocab and tokenizer model"""
file_names = os.listdir(tf_assets_dir)
json_cfg_path = None
spm_model_path = None
vocab_path = None
for ele in file_names:
if ele.endswith('.model'):
assert spm_model_path is None
spm_model_path = ele
elif ele.endswith('.json'):
assert json_cfg_path is None
json_cfg_path = ele
elif ele.endswith('.txt'):
assert vocab_path is None
vocab_path = ele
assert json_cfg_path is not None and \
(spm_model_path is not None or vocab_path is not None), "The file to be" \
"converted is missing and at least one word segmentation tool or dictionary exists"
json_cfg_path = os.path.join(tf_assets_dir, json_cfg_path)
if spm_model_path:
spm_model_path = os.path.join(tf_assets_dir, spm_model_path)
tokenizer = SentencepieceTokenizer(spm_model_path)
vocab_size = len(tokenizer.vocab)
elif vocab_path:
vocab_path = os.path.join(tf_assets_dir, vocab_path)
vocab_size = len(open(vocab_path, 'r', encoding='utf-8').readlines())
cfg = convert_tf_config(json_cfg_path, vocab_size, model_type)
return cfg, vocab_path, spm_model_path | Convert the assets file including config, vocab and tokenizer model | convert_tf_assets | python | dmlc/gluon-nlp | scripts/conversion_toolkits/convert_tf_hub_model.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/conversion_toolkits/convert_tf_hub_model.py | Apache-2.0 |
def get_name_map(tf_names, is_TF1=True):
"""
Get the converting mapping between TF names and mxnet names.
The above mapping CONVERT_MAP is effectively adaptive to Bert and Albert,
but there is no guarantee that it can match to other tf models in case of
some special variable_scope (tensorflow) and prefix (mxnet).
Redefined mapping is encouraged to adapt the personalization model.
Parameters
----------
tf_names
the parameters names of tensorflow model
is_TF1
whether load from TF1 Hub Modules
Returns
-------
A dictionary with the following format:
{tf_names : mx_names} or {tf_names : th_names}
"""
convert_map = CONVERT_MAP_TF1 if is_TF1 else CONVERT_MAP_TF2
if args.torch and is_TF1:
CONVERT_MAP_TF1.insert(10, ('embeddings/position_embeddings', 'token_pos_embed.weight'))
CONVERT_MAP_TF1.insert(-1, ('beta', 'bias'))
CONVERT_MAP_TF1.insert(-1, ('gamma', 'weight'))
elif is_TF1:
CONVERT_MAP_TF1.insert(10, ('embeddings/position_embeddings', 'token_pos_embed._embed.weight'))
elif args.torch:
CONVERT_MAP_TF2.insert(10, ('position_embedding/embeddings', 'token_pos_embed.weight'))
else:
CONVERT_MAP_TF2.insert(10, ('position_embedding/embeddings', 'token_pos_embed._embed.weight'))
name_map = {}
for source_name in tf_names:
target_name = source_name
# skip the qkv weights
if 'self/' in source_name:
name_map[source_name] = None
continue
if re.match(r'^transformer\/layer_[\d]+\/self_attention\/(key|value|query)\/(kernel|bias)$',
source_name) is not None:
name_map[source_name] = None
continue
for old, new in convert_map:
target_name = target_name.replace(old, new)
name_map[source_name] = target_name
return name_map |
Get the converting mapping between TF names and mxnet names.
The above mapping CONVERT_MAP is effectively adaptive to Bert and Albert,
but there is no guarantee that it can match to other tf models in case of
some special variable_scope (tensorflow) and prefix (mxnet).
Redefined mapping is encouraged to adapt the personalization model.
Parameters
----------
tf_names
the parameters names of tensorflow model
is_TF1
whether load from TF1 Hub Modules
Returns
-------
A dictionary with the following format:
{tf_names : mx_names} or {tf_names : th_names}
| get_name_map | python | dmlc/gluon-nlp | scripts/conversion_toolkits/convert_tf_hub_model.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/conversion_toolkits/convert_tf_hub_model.py | Apache-2.0 |
def convert_qkv_weights(tf_prefix, prefix, is_mlm):
"""
To convert the qkv weights with different prefix.
In tensorflow framework, the prefix of query/key/value for the albert model is
'bert/encoder/transformer/group_0/inner_group_0/attention_1/self/query/kernel',
and that for the bert model is 'bert/encoder/layer_{}/attention/self/key/bias'.
In gluonnlp framework, the prefix is slightly different as
'encoder.all_encoder_groups.0.attn_qkv.weight' for albert model and
'encoder.all_layers.{}.attn_qkv.weight' for bert model, as the
curly braces {} can be filled with the layer number.
"""
query_weight = tf_params['{}/query/kernel'.format(tf_prefix)]
key_weight = tf_params['{}/key/kernel'.format(tf_prefix)]
value_weight = tf_params['{}/value/kernel'.format(tf_prefix)]
query_bias = tf_params['{}/query/bias'.format(tf_prefix)]
key_bias = tf_params['{}/key/bias'.format(tf_prefix)]
value_bias = tf_params['{}/value/bias'.format(tf_prefix)]
if 'self_attention' in tf_prefix:
query_weight = query_weight.reshape((cfg.MODEL.units, -1))
key_weight = key_weight.reshape((cfg.MODEL.units, -1))
value_weight = value_weight.reshape((cfg.MODEL.units, -1))
query_bias = query_bias.reshape((-1, ))
key_bias = key_bias.reshape((-1, ))
value_bias = value_bias.reshape((-1, ))
# Merge query_weight, key_weight, value_weight to params
weight_name = 'encoder.{}.attn_qkv.weight'.format(prefix)
bias_name = 'encoder.{}.attn_qkv.bias'.format(prefix)
if is_mlm:
weight_name = 'backbone_model.' + weight_name
bias_name = 'backbone_model.' + bias_name
if args.torch:
params[weight_name].data = th.from_numpy(np.concatenate(
[query_weight, key_weight, value_weight], axis=1).T).contiguous()
else:
params[weight_name].set_data(
np.concatenate([query_weight, key_weight, value_weight], axis=1).T)
all_keys.remove(weight_name)
# Merge query_bias, key_bias, value_bias to params
if args.torch:
params[bias_name].data = th.from_numpy(
np.concatenate([query_bias, key_bias, value_bias], axis=0)).contiguous()
else:
params[bias_name].set_data(
np.concatenate([query_bias, key_bias, value_bias], axis=0))
all_keys.remove(bias_name) |
To convert the qkv weights with different prefix.
In tensorflow framework, the prefix of query/key/value for the albert model is
'bert/encoder/transformer/group_0/inner_group_0/attention_1/self/query/kernel',
and that for the bert model is 'bert/encoder/layer_{}/attention/self/key/bias'.
In gluonnlp framework, the prefix is slightly different as
'encoder.all_encoder_groups.0.attn_qkv.weight' for albert model and
'encoder.all_layers.{}.attn_qkv.weight' for bert model, as the
curly braces {} can be filled with the layer number.
| convert_qkv_weights | python | dmlc/gluon-nlp | scripts/conversion_toolkits/convert_tf_hub_model.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/conversion_toolkits/convert_tf_hub_model.py | Apache-2.0 |
def get_hash_and_size(obj, retries=5, algorithm='sha1', cache=None, save_path=None,
verify_ssl=True):
"""Fetch sha1 hash of all urls in the input obj"""
def _get_hash_and_size(obj, retries, algorithm, cache=None, save_path=None):
if isinstance(obj, str):
if obj.startswith('http://') or obj.startswith('https://'):
url = obj
hex_hash = None
file_size = None
if cache is not None and obj in cache:
return obj, cache[obj]
while retries + 1 > 0:
# Disable pyling too broad Exception
# pylint: disable=W0703
try:
if algorithm == 'sha1':
m = hashlib.sha1()
elif algorithm == 'sha256':
m = hashlib.sha256()
elif algorithm == 'md5':
m = hashlib.md5()
else:
raise NotImplementedError
print('Calculating hash of the file downloaded from {}...'.format(url))
start = time.time()
r = requests.get(url, stream=True, verify=verify_ssl)
if r.status_code != 200:
raise RuntimeError('Failed downloading url {}'.format(url))
f_size = 0
for chunk in r.iter_content(chunk_size=10240):
if chunk: # filter out keep-alive new chunks
m.update(chunk)
f_size += len(chunk)
hex_hash = m.hexdigest()
file_size = f_size
end = time.time()
print('{}={}, size={}, Time spent={}'.format(algorithm, hex_hash, file_size,
end - start))
if cache is None:
cache = OrderedDict()
cache[url] = (hex_hash, file_size)
if save_path is not None:
with open(save_path, 'a', encoding='utf-8') as of:
of.write('{} {} {}\n'.format(url, hex_hash, file_size))
break
except Exception as e:
retries -= 1
if retries <= 0:
raise e
print('download failed due to {}, retrying, {} attempt{} left'
.format(repr(e), retries, 's' if retries > 1 else ''))
return obj, (hex_hash, file_size)
else:
return obj
elif isinstance(obj, tuple):
return tuple((_get_hash_and_size(ele, retries, algorithm, cache, save_path)
for ele in obj))
elif isinstance(obj, list):
return [_get_hash_and_size(ele, retries, algorithm, cache, save_path) for ele in obj]
elif isinstance(obj, dict):
return {k: _get_hash_and_size(v, retries, algorithm, cache, save_path)
for k, v in obj.items()}
else:
return obj
if cache is None:
cache = OrderedDict()
else:
cache = copy.deepcopy(cache)
if save_path is not None and os.path.exists(save_path):
with open(save_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
url, hex_hash, file_size = line.split()
cache[url] = (hex_hash, file_size)
_get_hash_and_size(obj, retries, algorithm, cache, save_path)
return obj, cache | Fetch sha1 hash of all urls in the input obj | get_hash_and_size | python | dmlc/gluon-nlp | scripts/datasets/update_download_stats.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/update_download_stats.py | Apache-2.0 |
def build_vocab(corpus_path_l: List, eos_token: Optional[str] = '<eos>') -> Vocab:
"""Build the default vocabulary used in datasets like
- wikitext2
- wikitext103
- text8
- enwiki8
The strategy is to split with white-space and store all appeared tokens.
Also, the tokens will be sorted with a descending order of their frequency.
Parameters
----------
corpus_path_l
The corpus path
eos_token
If it is not None, the eos_token will be added to the vocabulary.
Returns
-------
vocab
The vocabulary
"""
counter = Counter()
ntokens = 0
print('Build the default vocabulary used in benchmarks:')
for corpus_path in corpus_path_l:
with open(corpus_path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
line = line.strip()
tokens = line.split()
counter.update(tokens)
ntokens += len(tokens)
if eos_token is not None and eos_token in counter:
raise ValueError('eos_token is set to be "{}", which appears in the text. '
'Is it intended? You may choose another token as the eos_token.'
.format(eos_token))
vocab = Vocab(counter, unk_token=None, eos_token=eos_token)
print('Processed {} tokens, vocab={}'.format(ntokens, vocab))
return vocab | Build the default vocabulary used in datasets like
- wikitext2
- wikitext103
- text8
- enwiki8
The strategy is to split with white-space and store all appeared tokens.
Also, the tokens will be sorted with a descending order of their frequency.
Parameters
----------
corpus_path_l
The corpus path
eos_token
If it is not None, the eos_token will be added to the vocabulary.
Returns
-------
vocab
The vocabulary
| build_vocab | python | dmlc/gluon-nlp | scripts/datasets/language_modeling/prepare_lm.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/language_modeling/prepare_lm.py | Apache-2.0 |
def parse_sgm(path_or_buffer: Union[str, IO[AnyStr]],
out_path_or_buffer: Optional[Union[str, IO[AnyStr]]] = None,
return_sentences=False,
clean_space=True) -> Optional[List[str]]:
"""Returns sentences from a single SGML file. This is compatible to the behavior of
`input-from-sgm.perl` in
https://github.com/moses-smt/mosesdecoder/blob/a89691fee395bb7eb6dfd51e368825f0578f437d/scripts/ems/support/input-from-sgm.perl
Parameters
----------
path_or_buffer
The source path to parse the file
out_path_or_buffer
The output path
return_sentences
Whether to return the parsed sentences
clean_space
Whether to clean the spaces in the sentence with the similar strategy in
input-from-sgm.perl.
Returns
-------
sentences
The list contains the parsed sentences in the input file.
If the return_sentences is False, return None.
"""
if out_path_or_buffer is None:
assert return_sentences, 'Must return sentences if the output path is not specified!'
if return_sentences:
sentences = []
else:
sentences = None
f_buffer = _get_buffer(path_or_buffer, 'r')
of_buffer = _get_buffer(out_path_or_buffer, 'w')
seg_re = re.compile(r'<seg.*?>(.*)</seg>.*?')
for line in f_buffer:
if isinstance(line, bytes):
line = line.decode('utf-8')
seg_match = re.match(seg_re, line)
if seg_match:
assert len(seg_match.groups()) == 1,\
'File content is not supported, unmatched line: {}'.format(line)
line = seg_match.groups()[0]
if clean_space:
line = _clean_space(line)
if of_buffer is not None:
of_buffer.write(line + '\n')
if sentences is not None:
sentences.append(line)
if of_buffer is not None:
of_buffer.close()
return sentences | Returns sentences from a single SGML file. This is compatible to the behavior of
`input-from-sgm.perl` in
https://github.com/moses-smt/mosesdecoder/blob/a89691fee395bb7eb6dfd51e368825f0578f437d/scripts/ems/support/input-from-sgm.perl
Parameters
----------
path_or_buffer
The source path to parse the file
out_path_or_buffer
The output path
return_sentences
Whether to return the parsed sentences
clean_space
Whether to clean the spaces in the sentence with the similar strategy in
input-from-sgm.perl.
Returns
-------
sentences
The list contains the parsed sentences in the input file.
If the return_sentences is False, return None.
| parse_sgm | python | dmlc/gluon-nlp | scripts/datasets/machine_translation/prepare_wmt.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/machine_translation/prepare_wmt.py | Apache-2.0 |
def concatenate_files(fname_l: List[str],
out_fname: Optional[str] = None,
chunk_size: int = 128 * 1024) -> str:
"""Concatenate multiple files into a single file. This is used to recover a large file that has
been split into multiple parts. E.g.,
UNv1.0.en-zh.tar.gz.00, UNv1.0.en-zh.tar.gz.01 --> UNv1.0.en-zh.tar.gz
Parameters
----------
fname_l
out_fname
chunk_size
Returns
-------
ret
"""
assert len(fname_l) > 1
ext_l = []
base_prefix, ext = os.path.splitext(fname_l[0])
ext_l.append(ext)
for i in range(1, len(fname_l)):
prefix, ext = os.path.splitext(fname_l[i])
ext_l.append(ext)
if prefix != base_prefix:
raise ValueError('Cannot concatenate the input files! The prefix does not match! '
'Find prefix={}, Expected prefix={}'.format(prefix, base_prefix))
fname_ext_l = sorted(zip(fname_l, ext_l), key=lambda ele: ele[1])
if out_fname is None:
out_fname = base_prefix
with open(out_fname, 'wb') as of:
for fname, _ in fname_ext_l:
with open(fname, 'rb') as infile:
for block in iter(functools.partial(infile.read, chunk_size), b''):
of.write(block)
return out_fname | Concatenate multiple files into a single file. This is used to recover a large file that has
been split into multiple parts. E.g.,
UNv1.0.en-zh.tar.gz.00, UNv1.0.en-zh.tar.gz.01 --> UNv1.0.en-zh.tar.gz
Parameters
----------
fname_l
out_fname
chunk_size
Returns
-------
ret
| concatenate_files | python | dmlc/gluon-nlp | scripts/datasets/machine_translation/prepare_wmt.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/machine_translation/prepare_wmt.py | Apache-2.0 |
def fetch_mono_dataset(selection: Union[str, List[str], List[List[str]]],
lang: str = 'de',
path: Optional[str] = _BASE_DATASET_PATH,
overwrite: bool = False) -> List[str]:
"""Fetch the monolingual dataset provided by WMT
Parameters
----------
selection
The selected datasets
lang
Language of the monolingual corpus
path
overwrite
Whether to overwrite the downloaded dataset
Returns
-------
src_corpus_paths
"""
base_url_info = _MONOLINGUAL_URLS
if isinstance(selection, str):
selection = [selection]
elif isinstance(selection, list):
if isinstance(selection[0], list):
corpus_paths = []
for ele in selection:
ele_corpus_paths =\
fetch_mono_dataset(ele, lang, path, overwrite)
corpus_paths.extend(ele_corpus_paths)
return corpus_paths
else:
raise NotImplementedError
for sel in selection:
base_url_info = base_url_info[sel]
# Check the pair is valid
available_lang = set(base_url_info.keys())
if 'url' in available_lang:
available_lang.remove('url')
if lang in available_lang:
matched_lang = '{}'.format(lang)
else:
raise ValueError('Unsupported lang, lang={}. All supported: {}'
.format(lang, available_lang))
save_dir_path = os.path.join(path, *(selection + [matched_lang]))
if not os.path.exists(save_dir_path):
os.makedirs(save_dir_path)
out_path = os.path.join(save_dir_path, lang + '.txt')
# Check for whether we can load the cached version
if os.path.exists(out_path) and not overwrite:
print('Found data in {}, skip:\n'
'\tSource: {}\n'.format(selection + [lang], out_path))
return [out_path]
lang_data_info = base_url_info[matched_lang]
if 'url' in lang_data_info:
url_l = lang_data_info['url']
else:
url_l = base_url_info['url']
# Download the data + Concatenate the file-parts (if necessary)
download_fname_l = []
if isinstance(url_l, str):
url_l = [url_l]
for url in url_l:
original_filename = url[url.rfind("/") + 1:]
sha1_hash = _URL_FILE_STATS[url]
if 'url' in lang_data_info:
save_path_l = [path] + selection + [matched_lang, original_filename]
else:
save_path_l = [path] + selection + [original_filename]
download_fname = _download_with_mirror(
url,
path=os.path.join(*save_path_l),
sha1_hash=sha1_hash
)
download_fname_l.append(download_fname)
if len(download_fname_l) > 1:
data_path = concatenate_files(download_fname_l)
else:
data_path = download_fname_l[0]
src_name = lang_data_info[lang]
print('Prepare data for {}\n'
'\tCompressed File: {}\n'
'\t{}: {}\n'.format(selection + [lang],
data_path,
lang, out_path))
extract_mono_corpus(data_path,
lang=lang,
name=src_name,
out_src_path=out_path)
return [out_path] | Fetch the monolingual dataset provided by WMT
Parameters
----------
selection
The selected datasets
lang
Language of the monolingual corpus
path
overwrite
Whether to overwrite the downloaded dataset
Returns
-------
src_corpus_paths
| fetch_mono_dataset | python | dmlc/gluon-nlp | scripts/datasets/machine_translation/prepare_wmt.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/machine_translation/prepare_wmt.py | Apache-2.0 |
def download_mono_newscrawl(lang: str = 'de', path: str = _BASE_DATASET_PATH)\
-> List[str]:
"""Download the train dataset used for WMT2014
Parameters
----------
lang
path
Returns
-------
train_src_paths
"""
if lang == 'de':
train_src_paths =\
fetch_mono_dataset([['newscrawl', '2017'],
['newscrawl', '2018']],
lang=lang,
path=path)
else:
raise NotImplementedError
return train_src_paths | Download the train dataset used for WMT2014
Parameters
----------
lang
path
Returns
-------
train_src_paths
| download_mono_newscrawl | python | dmlc/gluon-nlp | scripts/datasets/machine_translation/prepare_wmt.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/machine_translation/prepare_wmt.py | Apache-2.0 |
def download_wmt14_train(lang_pair: str = 'en-de', path: str = _BASE_DATASET_PATH)\
-> Tuple[List[str], List[str]]:
"""Download the train dataset used for WMT2014
Parameters
----------
lang_pair
path
Returns
-------
train_src_paths
train_tgt_paths
"""
if lang_pair == 'en-de' or lang_pair == 'de-en':
train_src_paths, train_tgt_paths =\
fetch_wmt_parallel_dataset([['europarl', 'v7'],
['commoncrawl', 'wmt13'],
['newscommentary', 'v9']], lang_pair, path=path)
else:
raise NotImplementedError
return train_src_paths, train_tgt_paths | Download the train dataset used for WMT2014
Parameters
----------
lang_pair
path
Returns
-------
train_src_paths
train_tgt_paths
| download_wmt14_train | python | dmlc/gluon-nlp | scripts/datasets/machine_translation/prepare_wmt.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/machine_translation/prepare_wmt.py | Apache-2.0 |
def download_wmt16_train(lang_pair: str = 'en-de', path: str = _BASE_DATASET_PATH)\
-> Tuple[List[str], List[str]]:
"""Download the train dataset used for WMT2016
Parameters
----------
lang_pair
path
Returns
-------
train_src_paths
train_tgt_paths
"""
if lang_pair == 'en-de' or lang_pair == 'de-en':
train_src_paths, train_tgt_paths = \
fetch_wmt_parallel_dataset([['europarl', 'v7'],
['commoncrawl', 'wmt13'],
['newscommentary', 'v11']], lang_pair, path=path)
else:
raise NotImplementedError
return train_src_paths, train_tgt_paths | Download the train dataset used for WMT2016
Parameters
----------
lang_pair
path
Returns
-------
train_src_paths
train_tgt_paths
| download_wmt16_train | python | dmlc/gluon-nlp | scripts/datasets/machine_translation/prepare_wmt.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/machine_translation/prepare_wmt.py | Apache-2.0 |
def download_wmt17_train(lang_pair: str = 'en-de', path: str = _BASE_DATASET_PATH)\
-> Tuple[List[str], List[str]]:
"""Download the train dataset used for WMT2017
Parameters
----------
lang_pair
path
Returns
-------
train_src_paths
train_tgt_paths
"""
if lang_pair == 'en-de' or lang_pair == 'de-en':
train_src_paths, train_tgt_paths = \
fetch_wmt_parallel_dataset([['europarl', 'v7'],
['commoncrawl', 'wmt13'],
['newscommentary', 'v12'],
['rapid', '2016']], lang_pair, path=path)
elif lang_pair == 'zh-en' or lang_pair == 'en-zh':
train_src_paths, train_tgt_paths = \
fetch_wmt_parallel_dataset([['newscommentary', 'v13'],
['uncorpus', 'v1'],
['cwmt']], lang_pair, path=path)
else:
raise NotImplementedError
return train_src_paths, train_tgt_paths | Download the train dataset used for WMT2017
Parameters
----------
lang_pair
path
Returns
-------
train_src_paths
train_tgt_paths
| download_wmt17_train | python | dmlc/gluon-nlp | scripts/datasets/machine_translation/prepare_wmt.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/machine_translation/prepare_wmt.py | Apache-2.0 |
def extract_files(full_name, output_dir, shuffle=False):
"""
Extract the file and concatenate all the TXT files it archives
"""
if not full_name.endswith(".xz"):
return
file_prefix = re.split(r'\.|/', full_name)[-2]
file_prefix = file_prefix.replace('urlsf_subset', 'openwebtext-prepared-')
with open("{}.txt".format(os.path.join(output_dir, file_prefix)), "w") as fp:
with tarfile.open(full_name) as t:
txt_names = t.getnames()
if shuffle:
random.shuffle(txt_names)
for txt_name in txt_names:
f = t.extractfile(txt_name)
for line in f.readlines():
# skip empty line
line = line.strip()
if line:
fp.write(line.decode() + '\n')
# Two extra line break to mark the document separation
fp.write('\n') |
Extract the file and concatenate all the TXT files it archives
| extract_files | python | dmlc/gluon-nlp | scripts/datasets/pretrain_corpus/prepare_openwebtext.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/pretrain_corpus/prepare_openwebtext.py | Apache-2.0 |
def get_formatting_list(wiki_path, recursive=False):
"""
get formatting list of file names from extracted content
"""
filenames = []
for dirname in glob.glob(os.path.join(wiki_path, '*'), recursive=False):
for filename in glob.glob(os.path.join(dirname, 'wiki_*'), recursive=recursive):
filenames.append(filename)
return filenames |
get formatting list of file names from extracted content
| get_formatting_list | python | dmlc/gluon-nlp | scripts/datasets/pretrain_corpus/prepare_wikipedia.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/pretrain_corpus/prepare_wikipedia.py | Apache-2.0 |
def download_wikicorpus(lang, date, output):
"""
lang: the language code such as en, zh
date: string, the date of the Wikipedia with format of YYYYMMDD, or 'latest'.
"""
if not os.path.exists(output):
os.makedirs(output)
if lang not in __LANGUAGES_BANK:
raise ValueError('Unsupported language code')
language = lang.replace('-', '_')
output_file = os.path.join(output, 'download', language, date,
'wikicorpus.xml.bz2')
download(get_url(language, date), output_file)
return output_file |
lang: the language code such as en, zh
date: string, the date of the Wikipedia with format of YYYYMMDD, or 'latest'.
| download_wikicorpus | python | dmlc/gluon-nlp | scripts/datasets/pretrain_corpus/prepare_wikipedia.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/pretrain_corpus/prepare_wikipedia.py | Apache-2.0 |
def calculate_self_bleu4(sample_strs, num_bleu_samples):
"""Self-BLEU is calculated by computing the BLEU score of each generated document
using all other generations in the evaluation set as references.
"""
pool = Pool(processes=os.cpu_count())
return sum(tqdm(
pool.imap_unordered(
partial(bleu, sample_strs),
random.sample(range(len(sample_strs)), num_bleu_samples)),
total=num_bleu_samples)) / num_bleu_samples | Self-BLEU is calculated by computing the BLEU score of each generated document
using all other generations in the evaluation set as references.
| calculate_self_bleu4 | python | dmlc/gluon-nlp | scripts/generation/calculate_metrics.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/generation/calculate_metrics.py | Apache-2.0 |
def calculate_zipf_coefficient(sample_ids, tokenizer):
"""The Zipfian coefficient (R-squared) can be used to compare the distribution in a given
text to a theoretically perfect exponential curve.
"""
cnt = Counter()
for sample_id in sample_ids:
cnt.update(sample_id)
xs = np.arange(1, min(len(cnt), len(tokenizer.vocab)) + 1)
ys = np.array(sorted(cnt.values(), key=operator.neg)[:len(tokenizer.vocab)])
_, _, r, _, _ = stats.linregress(np.log(xs), np.log(ys))
return r ** 2 | The Zipfian coefficient (R-squared) can be used to compare the distribution in a given
text to a theoretically perfect exponential curve.
| calculate_zipf_coefficient | python | dmlc/gluon-nlp | scripts/generation/calculate_metrics.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/generation/calculate_metrics.py | Apache-2.0 |
def calculate_repetition(sample_ids):
"""The repetition rate in generated samples.
"""
max_n = 90
n_repeated_examples = 0
for sample_id in sample_ids:
rev = list(reversed(sample_id))
last_n_repeats = [0 for _ in range(max_n)]
for n in range(1, max_n + 1):
n_repeat = 1
while len(rev[n*n_repeat:n*(n_repeat+1)]) == n and \
rev[n*n_repeat:n*(n_repeat+1)] == rev[:n]:
n_repeat += 1
last_n_repeats[n-1] = n_repeat
max_repeated_n = max(range(max_n), key=lambda x: last_n_repeats[x])
if last_n_repeats[max_repeated_n] > 1 and \
(max_repeated_n+1 >= 3 or last_n_repeats[max_repeated_n] > 50):
n_repeated_examples += 1
return n_repeated_examples / len(sample_ids) | The repetition rate in generated samples.
| calculate_repetition | python | dmlc/gluon-nlp | scripts/generation/calculate_metrics.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/generation/calculate_metrics.py | Apache-2.0 |
def get_base_tokenizer(method, lang):
"""The base tokenization method
Parameters
----------
method
lang
Returns
-------
"""
if method == 'moses':
return tokenizers.create('moses', lang)
elif method == 'whitespace':
return tokenizers.create('whitespace')
elif method == 'no':
return None
else:
raise NotImplementedError | The base tokenization method
Parameters
----------
method
lang
Returns
-------
| get_base_tokenizer | python | dmlc/gluon-nlp | scripts/machine_translation/evaluate_transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/machine_translation/evaluate_transformer.py | Apache-2.0 |
def validation(model, data_loader, inference_model, sequence_sampler,
tgt_tokenizer, ctx_l):
"""Validate the model on the dataset
Parameters
----------
model : TransformerModel
The transformer model
data_loader : DataLoader
DataLoader
inference_model
The model for inference
sequence_sampler:
The sequence sampler for doing beam search
tgt_tokenizer
The target tokenizer
ctx_l : list
List of mx.ctx.Context
Returns
-------
avg_nll_loss : float
The average negative log-likelihood loss
ntokens : int
The total number of tokens
pred_sentences
The predicted sentences. Each element will be a numpy array.
pred_lengths
The length of the predicted sentences.
sentence_ids
IDs of the predicted sentences.
"""
avg_nll_loss = mx.np.array(0, dtype=np.float32, ctx=mx.cpu())
ntokens = 0
pred_sentences = []
sentence_ids = []
pred_lengths = []
for sample_data_l in grouper(data_loader, len(ctx_l)):
loss_l = []
ntokens += sum([ele[3].sum().asnumpy() - ele[0].shape[0] for ele in sample_data_l
if ele is not None])
for sample_data, ctx in zip(sample_data_l, ctx_l):
if sample_data is None:
continue
src_token_ids, tgt_token_ids, src_valid_length, tgt_valid_length, sample_ids = sample_data
src_token_ids = src_token_ids.as_in_ctx(ctx)
tgt_token_ids = tgt_token_ids.as_in_ctx(ctx)
src_valid_length = src_valid_length.as_in_ctx(ctx)
tgt_valid_length = tgt_valid_length.as_in_ctx(ctx)
if model.layout == 'NT':
tgt_pred = model(src_token_ids, src_valid_length, tgt_token_ids[:, :-1],
tgt_valid_length - 1)
tgt_labels = tgt_token_ids[:, 1:]
tgt_pred_logits = mx.npx.log_softmax(tgt_pred, axis=-1)
nll_loss = - mx.npx.pick(tgt_pred_logits, tgt_labels, axis=-1)
loss = mx.npx.sequence_mask(nll_loss,
sequence_length=tgt_valid_length - 1,
use_sequence_length=True,
axis=1)
loss_l.append(loss.sum())
elif model.layout == 'TN':
tgt_pred = model(src_token_ids.T, src_valid_length, tgt_token_ids.T[:-1, :],
tgt_valid_length - 1)
tgt_labels = tgt_token_ids.T[1:, :]
tgt_pred_logits = mx.npx.log_softmax(tgt_pred, axis=-1)
nll_loss = - mx.npx.pick(tgt_pred_logits, tgt_labels, axis=-1)
loss = mx.npx.sequence_mask(nll_loss,
sequence_length=tgt_valid_length - 1,
use_sequence_length=True,
axis=0)
loss_l.append(loss.sum())
init_input = mx.np.array(
[tgt_tokenizer.vocab.bos_id for _ in range(src_token_ids.shape[0])],
ctx=ctx)
# Perform beam search
if model.layout == 'NT':
states = inference_model.init_states(src_token_ids, src_valid_length)
elif model.layout == 'TN':
states = inference_model.init_states(src_token_ids.T, src_valid_length)
samples, scores, sample_valid_length = sequence_sampler(init_input, states,
src_valid_length)
samples = samples.asnumpy()
sample_valid_length = sample_valid_length.asnumpy()
for j in range(samples.shape[0]):
valid_length = sample_valid_length[j, 0]
# Ignore the BOS + EOS tokens
pred_sentences.append(samples[j, 0, 1:(valid_length - 1)])
pred_lengths.append(valid_length - 2)
sentence_ids.append(sample_ids.asnumpy())
avg_nll_loss += sum([loss.as_in_ctx(mx.cpu()) for loss in loss_l])
mx.npx.waitall()
avg_loss = avg_nll_loss.asnumpy() / ntokens
pred_lengths = np.array(pred_lengths)
sentence_ids = np.concatenate(sentence_ids, axis=0)
return avg_loss, ntokens, pred_sentences, pred_lengths, sentence_ids | Validate the model on the dataset
Parameters
----------
model : TransformerModel
The transformer model
data_loader : DataLoader
DataLoader
inference_model
The model for inference
sequence_sampler:
The sequence sampler for doing beam search
tgt_tokenizer
The target tokenizer
ctx_l : list
List of mx.ctx.Context
Returns
-------
avg_nll_loss : float
The average negative log-likelihood loss
ntokens : int
The total number of tokens
pred_sentences
The predicted sentences. Each element will be a numpy array.
pred_lengths
The length of the predicted sentences.
sentence_ids
IDs of the predicted sentences.
| validation | python | dmlc/gluon-nlp | scripts/machine_translation/train_transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/machine_translation/train_transformer.py | Apache-2.0 |
def tokenize_lines_to_ids(lines, tokenizer):
"""
Worker function to tokenize lines based on the tokenizer, and perform vocabulary lookup.
Parameters
----------
lines
Lines to be tokenized of the whole file
tokenizer
The trained tokenizer
Returns
-------
results
A list storing the valid tokenized lines
"""
results = []
# tag line delimiters or doc delimiters
for line in lines:
if not line:
break
line = line.strip()
# Single empty lines are used as line delimiters
# Double empty lines are used as document delimiters
if not line:
results.append([])
else:
token_ids = tokenizer.encode(line, int)
if token_ids:
results.append(token_ids)
return results |
Worker function to tokenize lines based on the tokenizer, and perform vocabulary lookup.
Parameters
----------
lines
Lines to be tokenized of the whole file
tokenizer
The trained tokenizer
Returns
-------
results
A list storing the valid tokenized lines
| tokenize_lines_to_ids | python | dmlc/gluon-nlp | scripts/pretraining/pretraining_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py | Apache-2.0 |
def get_all_features(x):
"""
Get the feature data in numpy form.
Parameters
----------
x
List/tuple that contains:
- file_list
A list of text files
- output_file
The path to a output file that store the np_features
- tokenizer
The trained tokenizer
- max_seq_length
Maximum sequence length of the training features
- short_seq_prob
The probability of sampling sequences shorter than the max_seq_length.
Returns
-------
np_features
A tuple of (input_ids, segment_ids, valid_lengths),
in which each item is a list of numpy arrays.
"""
file_list, output_file, tokenizer, max_seq_length, short_seq_prob = x
all_features = []
for text_file in file_list:
features = process_a_text(text_file, tokenizer, max_seq_length, short_seq_prob)
all_features.extend(features)
np_features = convert_to_npz(all_features, output_file)
return np_features |
Get the feature data in numpy form.
Parameters
----------
x
List/tuple that contains:
- file_list
A list of text files
- output_file
The path to a output file that store the np_features
- tokenizer
The trained tokenizer
- max_seq_length
Maximum sequence length of the training features
- short_seq_prob
The probability of sampling sequences shorter than the max_seq_length.
Returns
-------
np_features
A tuple of (input_ids, segment_ids, valid_lengths),
in which each item is a list of numpy arrays.
| get_all_features | python | dmlc/gluon-nlp | scripts/pretraining/pretraining_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py | Apache-2.0 |
def process_a_text(text_file, tokenizer, max_seq_length, short_seq_prob=0.05):
"""
Create features from a single raw text file, in which one line is treated
as a sentence, and double blank lines represent document separators.
In this process, mxnet-unrelated features are generated, to easily convert
to features of a particular deep learning framework in subsequent steps
Parameters
----------
text_file
The path to a single text file
tokenizer
The trained tokenizer
max_seq_length
Maximum sequence length of the training features
short_seq_prob
The probability of sampling sequences shorter than the max_seq_length.
Returns
-------
features
A list of processed features from a single text file
"""
vocab = tokenizer.vocab
features = []
# TODO(zheyuye), support whole word masking
with io.open(text_file, 'r', encoding='utf-8') as reader:
lines = reader.readlines()
tokenized_lines = tokenize_lines_to_ids(lines, tokenizer)
target_seq_length = max_seq_length
current_sentences = []
current_length = 0
for tokenized_line in tokenized_lines:
current_sentences.append(tokenized_line)
current_length += len(tokenized_line)
# Create feature when meets the empty line or reaches the target length
if (not tokenized_line and current_length != 0) or (
current_length >= target_seq_length):
first_segment, second_segment = \
sentenceize(current_sentences, max_seq_length, target_seq_length)
input_id = [vocab.cls_id] + first_segment + [vocab.sep_id]
segment_id = [0] * len(input_id)
if second_segment:
input_id += second_segment + [vocab.sep_id]
segment_id += [1] * (len(second_segment) + 1)
# Padding with zeros for parallel storage
valid_length = len(input_id)
input_id += [0] * (max_seq_length - len(input_id))
segment_id += [0] * (max_seq_length - len(segment_id))
feature = PretrainFeature(input_id=input_id,
segment_id=segment_id,
valid_length=valid_length)
features.append(feature)
current_sentences = []
current_length = 0
# small chance for random-length instead of max_length-length feature
if random.random() < short_seq_prob:
target_seq_length = random.randint(5, max_seq_length)
else:
target_seq_length = max_seq_length
return features |
Create features from a single raw text file, in which one line is treated
as a sentence, and double blank lines represent document separators.
In this process, mxnet-unrelated features are generated, to easily convert
to features of a particular deep learning framework in subsequent steps
Parameters
----------
text_file
The path to a single text file
tokenizer
The trained tokenizer
max_seq_length
Maximum sequence length of the training features
short_seq_prob
The probability of sampling sequences shorter than the max_seq_length.
Returns
-------
features
A list of processed features from a single text file
| process_a_text | python | dmlc/gluon-nlp | scripts/pretraining/pretraining_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py | Apache-2.0 |
def convert_to_npz(all_features, output_file=None):
"""
Convert features to numpy array and store if output_file provided
Parameters
----------
all_features
A list of processed features.
output_file
The path to a output file that store the np_features.
Returns
-------
input_ids
A tuple of features
segment_ids
The segment ids
valid_lengths
The valid lengths
"""
input_ids = []
segment_ids = []
valid_lengths = []
for fea_index, feature in enumerate(all_features):
input_ids.append(np.ascontiguousarray(feature.input_id, dtype='int32'))
segment_ids.append(np.ascontiguousarray(feature.segment_id, dtype='int32'))
valid_lengths.append(feature.valid_length)
if fea_index < 1:
logging.debug('*** Example Feature ***')
logging.debug('Generated {}'.format(feature))
if output_file:
# The length numpy array are fixed to max_seq_length with zero padding
npz_outputs = collections.OrderedDict()
npz_outputs['input_ids'] = np.array(input_ids, dtype='int32')
npz_outputs['segment_ids'] = np.array(segment_ids, dtype='int32')
npz_outputs['valid_lengths'] = np.array(valid_lengths, dtype='int32')
np.savez_compressed(output_file, **npz_outputs)
logging.info("Saved {} features in {} ".format(len(all_features), output_file))
return input_ids, segment_ids, valid_lengths |
Convert features to numpy array and store if output_file provided
Parameters
----------
all_features
A list of processed features.
output_file
The path to a output file that store the np_features.
Returns
-------
input_ids
A tuple of features
segment_ids
The segment ids
valid_lengths
The valid lengths
| convert_to_npz | python | dmlc/gluon-nlp | scripts/pretraining/pretraining_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py | Apache-2.0 |
def sentenceize(current_sentences, max_seq_length, target_seq_length):
"""
Generate a pair of sentences based on a segmentation strategy
cloned from official electra model.
Parameters
----------
current_sentences
max_seq_length
Maximum sequence length of the training features
target_seq_length
Target sequence length of the training features
Returns
-------
first_segment
The first sentence of the pretraining sequence
second_segment
The second sentence of the pretraining sequence.
Could be None for diversity of training instances.
"""
# 10% chance to only produce one segment
if random.random() < 0.1:
first_segment_target_length = 100000
else:
# The reserved space for [CLS] and [SEP] tokens
first_segment_target_length = (target_seq_length - 3) // 2
first_segment = []
second_segment = []
for sentence in current_sentences:
if sentence:
# the sentence goes to the first segment if (1) the first segment is
# empty, (2) the sentence doesn't put the first segment over length or
# (3) 50% of the time when it does put the first segment over length
if (len(first_segment) == 0 or
len(first_segment) + len(sentence) < first_segment_target_length or
(len(second_segment) == 0 and
len(first_segment) < first_segment_target_length and
random.random() < 0.5)):
first_segment += sentence
else:
second_segment += sentence
# trim to max_length while accounting for not-yet-added [CLS]/[SEP] tokens
first_segment = first_segment[:max_seq_length - 2]
second_segment = second_segment[:max(0, max_seq_length -
len(first_segment) - 3)]
return first_segment, second_segment |
Generate a pair of sentences based on a segmentation strategy
cloned from official electra model.
Parameters
----------
current_sentences
max_seq_length
Maximum sequence length of the training features
target_seq_length
Target sequence length of the training features
Returns
-------
first_segment
The first sentence of the pretraining sequence
second_segment
The second sentence of the pretraining sequence.
Could be None for diversity of training instances.
| sentenceize | python | dmlc/gluon-nlp | scripts/pretraining/pretraining_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py | Apache-2.0 |
def prepare_pretrain_npz_dataset(filename, allow_pickle=False):
"""Create dataset based on the numpy npz file"""
if isinstance(filename, (list, tuple)):
assert len(filename) == 1, \
'When .npy/.npz data file is loaded, len(filename) must be 1.' \
' Received len(filename)={}.'.format(len(filename))
filename = filename[0]
logging.debug('start to load file %s ...', filename)
return NumpyDataset(filename, allow_pickle=allow_pickle) | Create dataset based on the numpy npz file | prepare_pretrain_npz_dataset | python | dmlc/gluon-nlp | scripts/pretraining/pretraining_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py | Apache-2.0 |
def prepare_pretrain_text_dataset(
filenames,
tokenizer,
max_seq_length,
short_seq_prob,
cached_file_path):
"""Create dataset based on the raw text files"""
if not isinstance(filenames, (list, tuple)):
filenames = [filenames]
if cached_file_path:
# generate a filename based on the input filename ensuring no crash.
# filename example: urlsf_subset00-130_data.txt
suffix = re.split(r'\.|/', filenames[0])[-2]
output_file = os.path.join(cached_file_path, "{}-pretrain-record.npz".format(suffix))
else:
output_file = None
np_features = get_all_features(
(filenames, output_file, tokenizer, max_seq_length, short_seq_prob))
return ArrayDataset(*np_features) | Create dataset based on the raw text files | prepare_pretrain_text_dataset | python | dmlc/gluon-nlp | scripts/pretraining/pretraining_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py | Apache-2.0 |
def prepare_pretrain_bucket_sampler(dataset, batch_size, shuffle=False, num_buckets=1):
"""Create data sampler based on the dataset"""
if isinstance(dataset, NumpyDataset):
lengths = dataset.get_field('valid_lengths')
else:
lengths = dataset.transform(lambda input_ids, segment_ids,
valid_lengths: valid_lengths, lazy=False)
sampler = FixedBucketSampler(lengths,
batch_size=batch_size,
num_buckets=num_buckets,
ratio=0,
shuffle=shuffle)
logging.debug('Sampler created for a new dataset:\n {}'.format(sampler))
return sampler | Create data sampler based on the dataset | prepare_pretrain_bucket_sampler | python | dmlc/gluon-nlp | scripts/pretraining/pretraining_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py | Apache-2.0 |
def get_pretrain_data_npz(data, batch_size, shuffle, num_buckets,
vocab, num_parts=1, part_idx=0,
num_dataset_workers=1, num_batch_workers=1,
circle_length=1, repeat=1,
dataset_cached=False,
num_max_dataset_cached=0):
"""Get a data iterator from pre-processed npz files.
Parameters
----------
data: str
The path to the dataset directory
batch_size : int
The batch size per GPU.
shuffle : bool
Whether to shuffle the data.
num_buckets : int
The number of buckets for the FixedBucketSampler for training.
vocab : Vocab
The vocabulary.
num_parts : int
The number of partitions for the dataset.
part_idx : int
The index of the partition to read.
num_dataset_workers : int
The number of worker processes for dataset construction.
num_batch_workers : int
The number of worker processes for batch contruction.
circle_length : int, default is 1
The number of files to be read for a single worker at the same time.
When circle_length is larger than 1, we merge circle_length files.
repeat : int, default is 1
The number of times that files are repeated.
dataset_cached : bool, default is False
Whether or not to cache last processed dataset.
Each processed dataset can only be cached for once.
When there is no new available processed dataset to be fetched,
we pop a cached processed dataset.
num_max_dataset_cached : int, default is 0
Maximum number of cached datasets. It is valid only if dataset_cached is True
"""
num_files = len(glob(data))
logging.info('%d files are found.', num_files)
assert num_files >= num_parts, \
'The number of text files must be no less than the number of ' \
'workers/partitions (%d). Only %d files at %s are found.' % (num_parts, num_files, data)
split_sampler = SplitSampler(num_files, num_parts=num_parts,
part_index=part_idx, repeat=repeat)
dataset_fn = prepare_pretrain_npz_dataset
sampler_fn = prepare_pretrain_bucket_sampler
dataset_params = {'allow_pickle': True}
sampler_params = {'batch_size': batch_size, 'shuffle': shuffle, 'num_buckets': num_buckets}
batchify_fn = bf.Tuple(
bf.Pad(val=vocab.pad_id), # input_ids
bf.Pad(val=0), # segment_ids
bf.Stack(), # valid_lengths
)
dataloader = DatasetLoader(data,
file_sampler=split_sampler,
dataset_fn=dataset_fn,
batch_sampler_fn=sampler_fn,
dataset_params=dataset_params,
batch_sampler_params=sampler_params,
batchify_fn=batchify_fn,
num_dataset_workers=num_dataset_workers,
num_batch_workers=num_batch_workers,
pin_memory=False,
circle_length=circle_length)
return dataloader | Get a data iterator from pre-processed npz files.
Parameters
----------
data: str
The path to the dataset directory
batch_size : int
The batch size per GPU.
shuffle : bool
Whether to shuffle the data.
num_buckets : int
The number of buckets for the FixedBucketSampler for training.
vocab : Vocab
The vocabulary.
num_parts : int
The number of partitions for the dataset.
part_idx : int
The index of the partition to read.
num_dataset_workers : int
The number of worker processes for dataset construction.
num_batch_workers : int
The number of worker processes for batch contruction.
circle_length : int, default is 1
The number of files to be read for a single worker at the same time.
When circle_length is larger than 1, we merge circle_length files.
repeat : int, default is 1
The number of times that files are repeated.
dataset_cached : bool, default is False
Whether or not to cache last processed dataset.
Each processed dataset can only be cached for once.
When there is no new available processed dataset to be fetched,
we pop a cached processed dataset.
num_max_dataset_cached : int, default is 0
Maximum number of cached datasets. It is valid only if dataset_cached is True
| get_pretrain_data_npz | python | dmlc/gluon-nlp | scripts/pretraining/pretraining_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py | Apache-2.0 |
def dynamic_masking(self, input_ids, valid_lengths):
# TODO(zheyuye), two additional flag `disallow_from_mask` and `already_masked`
# that control the masking status for each positions in the sequence.
"""
Generate masking positions on-the-fly instead of during preprocessing
Parameters
----------
input_ids
The batchified input_ids with shape (batch_size, max_seq_length)
valid_lengths
The batchified valid_lengths with shape (batch_size, )
Returns
------
masked_input_ids
The masked input sequence with 15% tokens are masked with [MASK]
shape (batch_size, max_seq_length)
length_masks
The masking matrix for the whole sequence that indicates the positions
are greater than valid_length.
shape (batch_size, max_seq_length)
unmasked_tokens
The original tokens that appear in the unmasked input sequence
shape (batch_size, num_masked_positions)
masked_positions
The masking positions in mx.np.ndarray with shape (batch_size, num_masked_positions)
shape (batch_size, num_masked_positions)
masked_lm_weights
The weight matrix containing 0 or 1 to mark the actual effect of masked positions
shape (batch_size, num_masked_positions)
"""
N = self._max_num_masked_position
# Only valid token without special token are allowed to mask
valid_candidates = np.ones_like(input_ids, dtype=np.bool)
ignore_tokens = [self.vocab.cls_id, self.vocab.sep_id, self.vocab.pad_id]
for ignore_token in ignore_tokens:
# TODO(zheyuye), Update when operation += supported
valid_candidates = valid_candidates * \
np.not_equal(input_ids, ignore_token)
valid_lengths = valid_lengths.astype(np.float32)
valid_candidates = valid_candidates.astype(np.float32)
num_masked_position = mxnp.maximum(
1, np.minimum(N, round(valid_lengths * self._mask_prob)))
# Get the masking probability of each position
sample_probs = self._proposal_distribution * valid_candidates
sample_probs /= mxnp.sum(sample_probs, axis=-1, keepdims=True)
sample_probs = npx.stop_gradient(sample_probs)
gumbels = mxnp.random.gumbel(np.zeros_like(sample_probs))
# Following the instruction of official repo to avoid deduplicate postions
# with Top_k Sampling as https://github.com/google-research/electra/issues/41
masked_positions = npx.topk(
mxnp.log(sample_probs) + gumbels, k=N,
axis=-1, ret_typ='indices', dtype=np.int32)
masked_weights = npx.sequence_mask(
mxnp.ones_like(masked_positions),
sequence_length=num_masked_position,
use_sequence_length=True, axis=1, value=0)
masked_positions = masked_positions * masked_weights
length_masks = npx.sequence_mask(
mxnp.ones_like(input_ids, dtype=np.float32),
sequence_length=valid_lengths,
use_sequence_length=True, axis=1, value=0)
unmasked_tokens = select_vectors_by_position(
input_ids, masked_positions) * masked_weights
masked_weights = masked_weights.astype(np.float32)
replaced_positions = (
mxnp.random.uniform(
mxnp.zeros_like(masked_positions),
mxnp.ones_like(masked_positions)) < self._replace_prob) * masked_positions
# dealing with multiple zero values in replaced_positions which causes
# the [CLS] being replaced
filled = mxnp.where(
replaced_positions,
self.vocab.mask_id,
self.vocab.cls_id).astype(
np.int32)
# Masking token by replacing with [MASK]
masked_input_ids = update_vectors_by_position(input_ids, filled, replaced_positions)
# Note: It is likely have multiple zero values in masked_positions if number of masked of
# positions not reached the maximum. However, this example hardly exists since valid_length
# is almost always equal to max_seq_length
masked_input = self.MaskedInput(input_ids=masked_input_ids,
masks=length_masks,
unmasked_tokens=unmasked_tokens,
masked_positions=masked_positions,
masked_weights=masked_weights)
return masked_input |
Generate masking positions on-the-fly instead of during preprocessing
Parameters
----------
input_ids
The batchified input_ids with shape (batch_size, max_seq_length)
valid_lengths
The batchified valid_lengths with shape (batch_size, )
Returns
------
masked_input_ids
The masked input sequence with 15% tokens are masked with [MASK]
shape (batch_size, max_seq_length)
length_masks
The masking matrix for the whole sequence that indicates the positions
are greater than valid_length.
shape (batch_size, max_seq_length)
unmasked_tokens
The original tokens that appear in the unmasked input sequence
shape (batch_size, num_masked_positions)
masked_positions
The masking positions in mx.np.ndarray with shape (batch_size, num_masked_positions)
shape (batch_size, num_masked_positions)
masked_lm_weights
The weight matrix containing 0 or 1 to mark the actual effect of masked positions
shape (batch_size, num_masked_positions)
| dynamic_masking | python | dmlc/gluon-nlp | scripts/pretraining/pretraining_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.