python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# --------------------------------------------------------
# DIT: SELF-SUPERVISED PRE-TRAINING FOR DOCUMENT IMAGE TRANSFORMER
# Based on Beit
# --------------------------------------------------------'
import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
import os
from pathlib import Path
from timm.data.mixup import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import ModelEma
from optim_factory import create_optimizer, get_parameter_groups, LayerDecayValueAssigner
import webdataset as wds
from datasets import build_dataset
from engine_for_finetuning import train_one_epoch, evaluate
from utils import NativeScalerWithGradNormCount as NativeScaler
import utils
from scipy import interpolate
def get_args():
parser = argparse.ArgumentParser('BEiT fine-tuning and evaluation script for image classification', add_help=False)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--epochs', default=30, type=int)
parser.add_argument('--update_freq', default=1, type=int)
parser.add_argument('--save_ckpt_freq', default=5, type=int)
parser.add_argument('--eval_freq', default=5, type=int)
# Model parameters
parser.add_argument('--model', default='deit_base_patch16_224', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--rel_pos_bias', action='store_true')
parser.add_argument('--disable_rel_pos_bias', action='store_false', dest='rel_pos_bias')
parser.set_defaults(rel_pos_bias=True)
parser.add_argument('--abs_pos_emb', action='store_true')
parser.add_argument('--qkv_bias', action='store_true')
parser.add_argument('--layer_scale_init_value', default=0.1, type=float,
help="0.1 for base, 1e-5 for large. set 0 to disable layer scale")
parser.add_argument('--input_size', default=224, type=int,
help='images input size')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--attn_drop_rate', type=float, default=0.0, metavar='PCT',
help='Attention dropout rate (default: 0.)')
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--disable_eval_during_finetuning', action='store_true', default=False)
parser.add_argument('--model_ema', action='store_true', default=False)
parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='')
parser.add_argument('--model_ema_force_cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""")
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--layer_decay', type=float, default=0.9)
parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N',
help='num of steps to warmup LR, will overload warmup_epochs if set > 0')
# Augmentation parameters
parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--train_interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
# Evaluation parameters
parser.add_argument('--crop_pct', type=float, default=None)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix', type=float, default=0,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup_prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup_switch_prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup_mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# * Finetuning params
parser.add_argument('--finetune', default='',
help='finetune from checkpoint')
parser.add_argument('--model_key', default='model|module', type=str)
parser.add_argument('--model_prefix', default='', type=str)
parser.add_argument('--init_scale', default=0.001, type=float)
parser.add_argument('--use_mean_pooling', action='store_true')
parser.set_defaults(use_mean_pooling=True)
parser.add_argument('--use_cls', action='store_false', dest='use_mean_pooling')
parser.add_argument('--disable_weight_decay_on_rel_pos_bias', action='store_true', default=False)
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--eval_data_path', default=None, type=str,
help='dataset path for evaluation')
parser.add_argument('--nb_classes', default=0, type=int,
help='number of the classification types')
parser.add_argument('--imagenet_default_mean_and_std', default=False, action='store_true')
parser.add_argument('--data_set', default='IMNET', choices=['CIFAR', 'IMNET', 'image_folder', "rvlcdip", "rvlcdip_wds"],
type=str, help='ImageNet dataset path')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None,
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--auto_resume', action='store_true')
parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume')
parser.set_defaults(auto_resume=True)
parser.add_argument('--save_ckpt', action='store_true')
parser.add_argument('--no_save_ckpt', action='store_false', dest='save_ckpt')
parser.set_defaults(save_ckpt=True)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true',
help='Perform evaluation only')
parser.add_argument('--dist_eval', action='store_true', default=False,
help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
parser.add_argument('--enable_deepspeed', action='store_true', default=False)
parser.add_argument('--zero_stage', default=0, type=int,
help='ZeRO optimizer stage (default: 0)')
known_args, _ = parser.parse_known_args()
if known_args.enable_deepspeed:
try:
import deepspeed
from deepspeed import DeepSpeedConfig
parser = deepspeed.add_config_arguments(parser)
ds_init = deepspeed.initialize
except:
print("Please 'pip install deepspeed==0.4.0'")
exit(0)
else:
ds_init = None
return parser.parse_args(), ds_init
def main(args, ds_init):
utils.init_distributed_mode(args)
if ds_init is not None:
utils.create_ds_config(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
# random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
if args.disable_eval_during_finetuning:
dataset_val = None
else:
dataset_val, _ = build_dataset(is_train=False, args=args)
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if not isinstance(dataset_train, torch.utils.data.IterableDataset):
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if 'AMLT_OUTPUT_DIR' in os.environ:
args.log_dir = os.environ['AMLT_OUTPUT_DIR']
print(f'update log_dir to {args.log_dir}')
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
dataset_size_train = len(dataset_train)
if isinstance(dataset_train, torch.utils.data.IterableDataset):
dataset_train = dataset_train.batched(args.batch_size, partial=False)
data_loader_train = wds.WebLoader(
dataset_train, num_workers=args.num_workers, batch_size=None, shuffle=False, )
data_loader_train = data_loader_train.ddp_equalize(dataset_size_train // args.batch_size, with_length=True)
else:
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
if dataset_val is not None:
dataset_size_val = len(dataset_val)
if not isinstance(dataset_val, torch.utils.data.IterableDataset):
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=int(1.5 * args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
else:
dataset_val = dataset_val.batched(args.batch_size, partial=False)
data_loader_val = wds.WebLoader(
dataset_val, num_workers=args.num_workers, batch_size=None, shuffle=False, )
data_loader_val = data_loader_val.ddp_equalize(dataset_size_val // args.batch_size, with_length=True)
else:
data_loader_val = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
if "beit" not in args.model:
model = create_model(args.model, pretrained=False, num_classes=args.nb_classes, distilled=False)
else:
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
attn_drop_rate=args.attn_drop_rate,
drop_block_rate=None,
use_mean_pooling=args.use_mean_pooling,
init_scale=args.init_scale,
use_rel_pos_bias=args.rel_pos_bias,
use_abs_pos_emb=args.abs_pos_emb,
init_values=args.layer_scale_init_value,
)
patch_size = model.patch_embed.patch_size
print("Patch size = %s" % str(patch_size))
args.window_size = (args.input_size // patch_size[0], args.input_size // patch_size[1])
args.patch_size = patch_size
if args.finetune:
if args.finetune.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.finetune, map_location='cpu', check_hash=False)
else:
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load ckpt from %s" % args.finetune)
checkpoint_model = None
for model_key in args.model_key.split('|'):
if model_key in checkpoint:
checkpoint_model = checkpoint[model_key]
print("Load state_dict by model_key = %s" % model_key)
break
if checkpoint_model is None:
checkpoint_model = checkpoint
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
if getattr(model, "use_rel_pos_bias", False) and "rel_pos_bias.relative_position_bias_table" in checkpoint_model:
print("Expand the shared relative position embedding to each transformer block. ")
num_layers = model.get_num_layers()
rel_pos_bias = checkpoint_model["rel_pos_bias.relative_position_bias_table"]
for i in range(num_layers):
checkpoint_model["blocks.%d.attn.relative_position_bias_table" % i] = rel_pos_bias.clone()
checkpoint_model.pop("rel_pos_bias.relative_position_bias_table")
all_keys = list(checkpoint_model.keys())
for key in all_keys:
if "relative_position_index" in key:
checkpoint_model.pop(key)
if "relative_position_bias_table" in key:
rel_pos_bias = checkpoint_model[key]
src_num_pos, num_attn_heads = rel_pos_bias.size()
dst_num_pos, _ = model.state_dict()[key].size()
dst_patch_shape = model.patch_embed.patch_shape
if dst_patch_shape[0] != dst_patch_shape[1]:
raise NotImplementedError()
num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1)
src_size = int((src_num_pos - num_extra_tokens) ** 0.5)
dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)
if src_size != dst_size:
print("Position interpolate for %s from %dx%d to %dx%d" % (
key, src_size, src_size, dst_size, dst_size))
extra_tokens = rel_pos_bias[-num_extra_tokens:, :]
rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]
def geometric_progression(a, r, n):
return a * (1.0 - r ** n) / (1.0 - r)
left, right = 1.01, 1.5
while right - left > 1e-6:
q = (left + right) / 2.0
gp = geometric_progression(1, q, src_size // 2)
if gp > dst_size // 2:
right = q
else:
left = q
# if q > 1.090307:
# q = 1.090307
dis = []
cur = 1
for i in range(src_size // 2):
dis.append(cur)
cur += q ** (i + 1)
r_ids = [-_ for _ in reversed(dis)]
x = r_ids + [0] + dis
y = r_ids + [0] + dis
t = dst_size // 2.0
dx = np.arange(-t, t + 0.1, 1.0)
dy = np.arange(-t, t + 0.1, 1.0)
print("Original positions = %s" % str(x))
print("Target positions = %s" % str(dx))
all_rel_pos_bias = []
for i in range(num_attn_heads):
z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()
f = interpolate.interp2d(x, y, z, kind='cubic')
all_rel_pos_bias.append(
torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device))
rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)
new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)
checkpoint_model[key] = new_rel_pos_bias
# interpolate position embedding
if 'pos_embed' in checkpoint_model:
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
utils.load_state_dict(model, checkpoint_model, prefix=args.model_prefix)
# model.load_state_dict(checkpoint_model, strict=False)
model.to(device)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
print("Using EMA with decay = %.8f" % args.model_ema_decay)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params:', n_parameters)
total_batch_size = args.batch_size * args.update_freq * utils.get_world_size()
num_training_steps_per_epoch = dataset_size_train // total_batch_size
print("LR = %.8f" % args.lr)
print("Batch size = %d" % total_batch_size)
print("Update frequent = %d" % args.update_freq)
print("Number of training examples = %d" % dataset_size_train)
print("Number of training training per epoch = %d" % num_training_steps_per_epoch)
# num_layers = model_without_ddp.get_num_layers()
num_layers = len(model_without_ddp.blocks)
if args.layer_decay < 1.0:
assigner = LayerDecayValueAssigner(list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2)))
else:
assigner = None
if assigner is not None:
print("Assigned values = %s" % str(assigner.values))
skip_weight_decay_list = model.no_weight_decay()
if args.disable_weight_decay_on_rel_pos_bias:
for i in range(num_layers):
skip_weight_decay_list.add("blocks.%d.attn.relative_position_bias_table" % i)
if args.distributed:
torch.distributed.barrier()
if args.enable_deepspeed:
loss_scaler = None
optimizer_params = get_parameter_groups(
model, args.weight_decay, skip_weight_decay_list,
assigner.get_layer_id if assigner is not None else None,
assigner.get_scale if assigner is not None else None)
model, optimizer, _, _ = ds_init(
args=args, model=model, model_parameters=optimizer_params,
dist_init_required=not args.distributed,
)
print("model.gradient_accumulation_steps() = %d" % model.gradient_accumulation_steps())
assert model.gradient_accumulation_steps() == args.update_freq
else:
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
optimizer = create_optimizer(
args, model_without_ddp, skip_list=skip_weight_decay_list,
get_num_layer=assigner.get_layer_id if assigner is not None else None,
get_layer_scale=assigner.get_scale if assigner is not None else None)
loss_scaler = NativeScaler()
print("Use step level LR scheduler!")
lr_schedule_values = utils.cosine_scheduler(
args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch,
warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps,
)
if args.weight_decay_end is None:
args.weight_decay_end = args.weight_decay
wd_schedule_values = utils.cosine_scheduler(
args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch)
print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values)))
if mixup_fn is not None:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
print("criterion = %s" % str(criterion))
utils.auto_load_model(
args=args, model=model, model_without_ddp=model_without_ddp,
optimizer=optimizer, loss_scaler=loss_scaler, model_ema=model_ema)
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {dataset_size_val} test images: {test_stats['acc1']:.1f}%")
exit(0)
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
sampler = getattr(data_loader_train, "sampler", None)
if sampler is not None and hasattr(sampler, "set_epoch"):
sampler.set_epoch(epoch)
if log_writer is not None:
log_writer.set_step(epoch * num_training_steps_per_epoch * args.update_freq)
train_stats = train_one_epoch(
model, criterion, data_loader_train, optimizer,
device, epoch, loss_scaler, args.clip_grad, model_ema, mixup_fn,
log_writer=log_writer, start_steps=epoch * num_training_steps_per_epoch,
lr_schedule_values=lr_schedule_values, wd_schedule_values=wd_schedule_values,
num_training_steps_per_epoch=num_training_steps_per_epoch, update_freq=args.update_freq,
)
if args.output_dir and args.save_ckpt:
if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=epoch, model_ema=model_ema)
if data_loader_val is not None and ((epoch + 1) % args.eval_freq == 0 or epoch + 1 == args.epochs):
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {dataset_size_val} test images: {test_stats['acc1']:.1f}%")
if max_accuracy < test_stats["acc1"]:
max_accuracy = test_stats["acc1"]
if args.output_dir and args.save_ckpt:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch="best", model_ema=model_ema)
print(f'Max accuracy: {max_accuracy:.2f}%')
if log_writer is not None:
log_writer.update(test_acc1=test_stats['acc1'], head="perf", step=epoch)
log_writer.update(test_acc5=test_stats['acc5'], head="perf", step=epoch)
log_writer.update(test_loss=test_stats['loss'], head="perf", step=epoch)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
else:
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
# **{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
opts, ds_init = get_args()
if opts.output_dir:
Path(opts.output_dir).mkdir(parents=True, exist_ok=True)
main(opts, ds_init)
| EXA-1-master | exa/models/unilm-master/dit/classification/run_class_finetuning.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Modified on torchvision code bases
# https://github.com/pytorch/vision
# --------------------------------------------------------'
import os
import os.path
import random
from typing import Any, Callable, cast, Dict, List, Optional, Tuple
from PIL import Image
from torchvision.datasets.vision import VisionDataset
def has_file_allowed_extension(filename: str, extensions: Tuple[str, ...]) -> bool:
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
extensions (tuple of strings): extensions to consider (lowercase)
Returns:
bool: True if the filename ends with one of given extensions
"""
return filename.lower().endswith(extensions)
def is_image_file(filename: str) -> bool:
"""Checks if a file is an allowed image extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
return has_file_allowed_extension(filename, IMG_EXTENSIONS)
def make_dataset(
directory: str,
class_to_idx: Dict[str, int],
extensions: Optional[Tuple[str, ...]] = None,
is_valid_file: Optional[Callable[[str], bool]] = None,
) -> List[Tuple[str, int]]:
instances = []
directory = os.path.expanduser(directory)
both_none = extensions is None and is_valid_file is None
both_something = extensions is not None and is_valid_file is not None
if both_none or both_something:
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def is_valid_file(x: str) -> bool:
return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions))
is_valid_file = cast(Callable[[str], bool], is_valid_file)
for target_class in sorted(class_to_idx.keys()):
class_index = class_to_idx[target_class]
target_dir = os.path.join(directory, target_class)
if not os.path.isdir(target_dir):
continue
for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
if is_valid_file(path):
item = path, class_index
instances.append(item)
return instances
class DatasetFolder(VisionDataset):
"""A generic data loader where the samples are arranged in this way: ::
root/class_x/xxx.ext
root/class_x/xxy.ext
root/class_x/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
root/class_y/asd932_.ext
Args:
root (string): Root directory path.
loader (callable): A function to load a sample given its path.
extensions (tuple[string]): A list of allowed extensions.
both extensions and is_valid_file should not be passed.
transform (callable, optional): A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
target_transform (callable, optional): A function/transform that takes
in the target and transforms it.
is_valid_file (callable, optional): A function that takes path of a file
and check if the file is a valid file (used to check of corrupt files)
both extensions and is_valid_file should not be passed.
Attributes:
classes (list): List of the class names sorted alphabetically.
class_to_idx (dict): Dict with items (class_name, class_index).
samples (list): List of (sample path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(
self,
root: str,
loader: Callable[[str], Any],
extensions: Optional[Tuple[str, ...]] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
is_valid_file: Optional[Callable[[str], bool]] = None,
) -> None:
super(DatasetFolder, self).__init__(root, transform=transform,
target_transform=target_transform)
classes, class_to_idx = self._find_classes(self.root)
samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)
if len(samples) == 0:
msg = "Found 0 files in subfolders of: {}\n".format(self.root)
if extensions is not None:
msg += "Supported extensions are: {}".format(",".join(extensions))
raise RuntimeError(msg)
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
def _find_classes(self, dir: str) -> Tuple[List[str], Dict[str, int]]:
"""
Finds the class folders in a dataset.
Args:
dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
"""
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
classes.sort()
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
return classes, class_to_idx
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
while True:
try:
path, target = self.samples[index]
sample = self.loader(path)
break
except Exception as e:
print(e)
index = random.randint(0, len(self.samples) - 1)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self) -> int:
return len(self.samples)
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
def pil_loader(path: str) -> Image.Image:
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
# TODO: specify the return type
def accimage_loader(path: str) -> Any:
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path: str) -> Any:
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class RvlcdipDatasetFolder(VisionDataset):
def __init__(
self,
root: str,
loader: Callable[[str], Any],
extensions: Optional[Tuple[str, ...]] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
split: str = None,
dataset_size: Optional[int] = None
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self.dataset_size = int(dataset_size) if dataset_size is not None else 42948004
classes = ["letter",
"form",
"email",
"handwritten",
"advertisement",
"scientific report",
"scientific publication",
"specification",
"file folder",
"news article",
"budget",
"invoice",
"presentation",
"questionnaire",
"resume",
"memo"]
class_to_idx = {c: i for i, c in enumerate(classes)}
with open(os.path.join(self.root, "labels", split + ".txt"), "r") as f:
labels = f.read().splitlines()
samples = [(line.split()[0], int(line.split()[1])) for line in labels]
try:
assert len(samples) > 0 and os.path.exists(os.path.join(self.root, "images", samples[0][0]))
except:
msg = "Found 0 files in subfolders of: {}\n".format(self.root)
msg += "Expected first file: {}".format(os.path.join(self.root, "images", samples[0][0]))
raise RuntimeError(msg)
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
while True:
try:
path, target = self.samples[index]
sample = self.loader(os.path.join(self.root, "images", path))
break
except Exception as e:
print(e)
index = random.randint(0, len(self.samples) - 1)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self) -> int:
return len(self.samples)
class RvlcdipImageFolder(RvlcdipDatasetFolder):
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
loader: Callable[[str], Any] = default_loader,
split: str = None,
dataset_size: Optional[int] = None
):
super().__init__(root, loader, IMG_EXTENSIONS if split is None else None,
transform=transform,
target_transform=target_transform,
split=split,
dataset_size=dataset_size)
self.imgs = self.samples
| EXA-1-master | exa/models/unilm-master/dit/classification/dataset_folder.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import datetime
import io
import os
import math
import time
import json
from collections import defaultdict, deque
import datetime
import numpy as np
from timm.utils import get_state_dict
from pathlib import Path
import torch
import torch.distributed as dist
from torch._six import inf
# from modeling_discrete_vae import Dalle_VAE, DiscreteVAE, DiscreteVAE2, VQGanVAE, DiscreteVAEforBEiT
from torch.utils.tensorboard import SummaryWriter
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
class TensorboardLogger(object):
def __init__(self, log_dir):
self.writer = SummaryWriter(log_dir=log_dir)
self.step = 0
def set_step(self, step=None):
if step is not None:
self.step = step
else:
self.step += 1
def update(self, head='scalar', step=None, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.writer.add_scalar(head + "/" + k, v, self.step if step is None else step)
def flush(self):
self.writer.flush()
def _load_checkpoint_for_ema(model_ema, checkpoint):
"""
Workaround for ModelEma._load_checkpoint to accept an already-loaded object
"""
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
# ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}'.format(
args.rank, args.dist_url, args.gpu), flush=True)
torch.distributed.init_process_group(
backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank,
timeout=datetime.timedelta(0, 7200)
)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def load_state_dict(model, state_dict, prefix='', ignore_missing="relative_position_index"):
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix=prefix)
warn_missing_keys = []
ignore_missing_keys = []
for key in missing_keys:
keep_flag = True
for ignore_key in ignore_missing.split('|'):
if ignore_key in key:
keep_flag = False
break
if keep_flag:
warn_missing_keys.append(key)
else:
ignore_missing_keys.append(key)
missing_keys = warn_missing_keys
if len(missing_keys) > 0:
print("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
print("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(ignore_missing_keys) > 0:
print("Ignored weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, ignore_missing_keys))
if len(error_msgs) > 0:
print('\n'.join(error_msgs))
class NativeScalerWithGradNormCount:
state_dict_key = "amp_scaler"
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_graph=create_graph)
if update_grad:
if clip_grad is not None:
assert parameters is not None
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
else:
self._scaler.unscale_(optimizer)
norm = get_grad_norm_(parameters)
self._scaler.step(optimizer)
self._scaler.update()
else:
norm = None
return norm
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor:
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
norm_type = float(norm_type)
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].grad.device
if norm_type == inf:
total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters)
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
return total_norm
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0,
start_warmup_value=0, warmup_steps=-1):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_steps > 0:
warmup_iters = warmup_steps
print("Set warmup steps = %d" % warmup_iters)
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = np.array(
[final_value + 0.5 * (base_value - final_value) * (1 + math.cos(math.pi * i / (len(iters)))) for i in iters])
schedule = np.concatenate((warmup_schedule, schedule))
# assert len(schedule) == epochs * niter_per_ep
return schedule
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
if loss_scaler is not None:
checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)]
for checkpoint_path in checkpoint_paths:
to_save = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
'scaler': loss_scaler.state_dict(),
'args': args,
}
if model_ema is not None:
to_save['model_ema'] = get_state_dict(model_ema)
save_on_master(to_save, checkpoint_path)
else:
client_state = {'epoch': epoch}
if model_ema is not None:
client_state['model_ema'] = get_state_dict(model_ema)
model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch_name, client_state=client_state)
def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
if loss_scaler is not None:
# torch.amp
if args.auto_resume and len(args.resume) == 0:
import glob
all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth'))
latest_ckpt = -1
for ckpt in all_checkpoints:
t = ckpt.split('-')[-1].split('.')[0]
if t.isdigit():
latest_ckpt = max(int(t), latest_ckpt)
if latest_ckpt >= 0:
args.resume = os.path.join(output_dir, 'checkpoint-%d.pth' % latest_ckpt)
print("Auto resume checkpoint: %s" % args.resume)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
print("Resume checkpoint %s" % args.resume)
if 'optimizer' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
args.start_epoch = checkpoint['epoch'] + 1
if hasattr(args, 'model_ema') and args.model_ema:
_load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
print("With optim & sched!")
else:
# deepspeed, only support '--auto_resume'.
if args.auto_resume:
import glob
all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*'))
latest_ckpt = -1
for ckpt in all_checkpoints:
t = ckpt.split('-')[-1].split('.')[0]
if t.isdigit():
latest_ckpt = max(int(t), latest_ckpt)
if latest_ckpt >= 0:
args.resume = os.path.join(output_dir, 'checkpoint-%d' % latest_ckpt)
print("Auto resume checkpoint: %d" % latest_ckpt)
_, client_states = model.load_checkpoint(args.output_dir, tag='checkpoint-%d' % latest_ckpt)
args.start_epoch = client_states['epoch'] + 1
if model_ema is not None:
if args.model_ema:
_load_checkpoint_for_ema(model_ema, client_states['model_ema'])
def create_ds_config(args):
args.deepspeed_config = os.path.join(args.output_dir, "deepspeed_config.json")
with open(args.deepspeed_config, mode="w") as writer:
ds_config = {
"train_batch_size": args.batch_size * args.update_freq * get_world_size(),
"train_micro_batch_size_per_gpu": args.batch_size,
"steps_per_print": 1000,
"optimizer": {
"type": "Adam",
"adam_w_mode": True,
"params": {
"lr": args.lr,
"weight_decay": args.weight_decay,
"bias_correction": True,
"betas": [
0.9,
0.999
],
"eps": 1e-8
}
},
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 16,
"loss_scale_window": 1000,
"hysteresis": 2,
"min_loss_scale": 1
},
"zero_optimization": {
"stage": args.zero_stage
},
"amp": {
"enabled": False,
"opt_level": "O2"
}
}
if args.clip_grad is not None:
ds_config.update({'gradient_clipping': args.clip_grad})
writer.write(json.dumps(ds_config, indent=2))
| EXA-1-master | exa/models/unilm-master/dit/classification/utils.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
from timm.models.registry import register_model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
**kwargs
}
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
# x = self.drop(x)
# commit this for the orignal BERT implement
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
proj_drop=0., window_size=None, attn_head_dim=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.v_bias = None
if window_size:
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
else:
self.window_size = None
self.relative_position_bias_table = None
self.relative_position_index = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, rel_pos_bias=None):
B, N, C = x.shape
qkv_bias = None
if self.q_bias is not None:
qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
# qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
if self.relative_position_bias_table is not None:
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if rel_pos_bias is not None:
attn = attn + rel_pos_bias
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm,
window_size=None, attn_head_dim=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if init_values > 0:
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
else:
self.gamma_1, self.gamma_2 = None, None
def forward(self, x, rel_pos_bias=None):
if self.gamma_1 is None:
x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias))
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x, **kwargs):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class RelativePositionBias(nn.Module):
def __init__(self, window_size, num_heads):
super().__init__()
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
# trunc_normal_(self.relative_position_bias_table, std=.02)
def forward(self):
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
class VisionTransformer(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None,
use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False,
use_mean_pooling=True, init_scale=0.001):
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
# self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_abs_pos_emb:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
else:
self.rel_pos_bias = None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.use_rel_pos_bias = use_rel_pos_bias
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None)
for i in range(depth)])
self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim)
self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
# trunc_normal_(self.mask_token, std=.02)
trunc_normal_(self.head.weight, std=.02)
self.apply(self._init_weights)
self.fix_init_weight()
self.head.weight.data.mul_(init_scale)
self.head.bias.data.mul_(init_scale)
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_num_layers(self):
return len(self.blocks)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
batch_size, seq_len, _ = x.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
for blk in self.blocks:
x = blk(x, rel_pos_bias=rel_pos_bias)
x = self.norm(x)
if self.fc_norm is not None:
t = x[:, 1:, :]
return self.fc_norm(t.mean(1))
else:
return x[:, 0]
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
@register_model
def beit_small_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=384, depth=12, num_heads=8, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def beit_base_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def beit_base_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def beit_large_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def beit_large_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def beit_large_patch16_512(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
| EXA-1-master | exa/models/unilm-master/dit/classification/modeling_finetune.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# --------------------------------------------------------'
import torch
from torch import optim as optim
from timm.optim.adafactor import Adafactor
from timm.optim.adahessian import Adahessian
from timm.optim.adamp import AdamP
from timm.optim.lookahead import Lookahead
from timm.optim.nadam import Nadam
from timm.optim.nvnovograd import NvNovoGrad
from timm.optim.radam import RAdam
from timm.optim.rmsprop_tf import RMSpropTF
from timm.optim.sgdp import SGDP
import json
try:
from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD
has_apex = True
except ImportError:
has_apex = False
def get_num_layer_for_vit(var_name, num_max_layer):
if var_name in ("cls_token", "mask_token", "pos_embed"):
return 0
elif var_name.startswith("patch_embed"):
return 0
elif var_name.startswith("rel_pos_bias"):
return num_max_layer - 1
elif var_name.startswith("blocks"):
layer_id = int(var_name.split('.')[1])
return layer_id + 1
else:
return num_max_layer - 1
class LayerDecayValueAssigner(object):
def __init__(self, values):
self.values = values
def get_scale(self, layer_id):
return self.values[layer_id]
def get_layer_id(self, var_name):
return get_num_layer_for_vit(var_name, len(self.values))
def get_parameter_groups(model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None):
parameter_group_names = {}
parameter_group_vars = {}
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list:
group_name = "no_decay"
this_weight_decay = 0.
else:
group_name = "decay"
this_weight_decay = weight_decay
if get_num_layer is not None:
layer_id = get_num_layer(name)
group_name = "layer_%d_%s" % (layer_id, group_name)
else:
layer_id = None
if group_name not in parameter_group_names:
if get_layer_scale is not None:
scale = get_layer_scale(layer_id)
else:
scale = 1.
parameter_group_names[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"lr_scale": scale
}
parameter_group_vars[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"lr_scale": scale
}
parameter_group_vars[group_name]["params"].append(param)
parameter_group_names[group_name]["params"].append(name)
print("Param groups = %s" % json.dumps(parameter_group_names, indent=2))
return list(parameter_group_vars.values())
def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None):
opt_lower = args.opt.lower()
weight_decay = args.weight_decay
if weight_decay and filter_bias_and_bn:
skip = {}
if skip_list is not None:
skip = skip_list
elif hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale)
weight_decay = 0.
else:
parameters = model.parameters()
if 'fused' in opt_lower:
assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'
opt_args = dict(lr=args.lr, weight_decay=weight_decay)
if hasattr(args, 'opt_eps') and args.opt_eps is not None:
opt_args['eps'] = args.opt_eps
if hasattr(args, 'opt_betas') and args.opt_betas is not None:
opt_args['betas'] = args.opt_betas
opt_split = opt_lower.split('_')
opt_lower = opt_split[-1]
if opt_lower == 'sgd' or opt_lower == 'nesterov':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'momentum':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'adam':
optimizer = optim.Adam(parameters, **opt_args)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, **opt_args)
elif opt_lower == 'nadam':
optimizer = Nadam(parameters, **opt_args)
elif opt_lower == 'radam':
optimizer = RAdam(parameters, **opt_args)
elif opt_lower == 'adamp':
optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)
elif opt_lower == 'sgdp':
optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'adadelta':
optimizer = optim.Adadelta(parameters, **opt_args)
elif opt_lower == 'adafactor':
if not args.lr:
opt_args['lr'] = None
optimizer = Adafactor(parameters, **opt_args)
elif opt_lower == 'adahessian':
optimizer = Adahessian(parameters, **opt_args)
elif opt_lower == 'rmsprop':
optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'rmsproptf':
optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'novograd' or opt_lower == 'nvnovograd':
optimizer = NvNovoGrad(parameters, **opt_args)
elif opt_lower == 'fusedsgd':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'fusedmomentum':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'fusedadam':
optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)
elif opt_lower == 'fusedadamw':
optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)
elif opt_lower == 'fusedlamb':
optimizer = FusedLAMB(parameters, **opt_args)
elif opt_lower == 'fusednovograd':
opt_args.setdefault('betas', (0.95, 0.98))
optimizer = FusedNovoGrad(parameters, **opt_args)
else:
assert False and "Invalid optimizer"
raise ValueError
if len(opt_split) > 1:
if opt_split[0] == 'lookahead':
optimizer = Lookahead(optimizer)
return optimizer
| EXA-1-master | exa/models/unilm-master/dit/classification/optim_factory.py |
#!/usr/bin/env python
# --------------------------------------------------------------------------------
# MPViT: Multi-Path Vision Transformer for Dense Prediction
# Copyright (c) 2022 Electronics and Telecommunications Research Institute (ETRI).
# All Rights Reserved.
# Written by Youngwan Lee
# --------------------------------------------------------------------------------
"""
Detection Training Script for MPViT.
"""
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import default_argument_parser, default_setup, launch
from detectron2.data.datasets import register_coco_instances
from ditod import MyTrainer, add_vit_config
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
# add_coat_config(cfg)
add_vit_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
register_coco_instances(
"funsd_train",
{},
"data/instances_training.json",
"data/imgs"
)
register_coco_instances(
"funsd_test",
{},
"data/instances_test.json",
"data/imgs"
)
cfg = setup(args)
if args.eval_only:
model = MyTrainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = MyTrainer.test(cfg, model)
return res
trainer = MyTrainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
parser = default_argument_parser()
parser.add_argument("--debug", action="store_true", help="enable debug mode")
args = parser.parse_args()
print("Command Line Args:", args)
if args.debug:
import debugpy
print("Enabling attach starts.")
debugpy.listen(address=('0.0.0.0', 9310))
debugpy.wait_for_client()
print("Enabling attach ends.")
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| EXA-1-master | exa/models/unilm-master/dit/text_detection/train_net.py |
"""
Mostly copy-paste from DINO and timm library:
https://github.com/facebookresearch/dino
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import warnings
import math
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.models.layers import trunc_normal_, drop_path, to_2tuple
from functools import partial
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
**kwargs
}
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
q, k, v = self.qkv(x).reshape(B, N, 3, self.num_heads,
C // self.num_heads).permute(2, 0, 3, 1, 4)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(
drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.window_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches_w, self.num_patches_h = self.window_size
self.num_patches = self.window_size[0] * self.window_size[1]
self.img_size = img_size
self.patch_size = patch_size
self.proj = nn.Conv2d(in_chans, embed_dim,
kernel_size=patch_size, stride=patch_size)
def forward(self, x):
x = self.proj(x)
return x
class HybridEmbed(nn.Module):
""" CNN Feature Map Embedding
Extract feature map from CNN, flatten, project to embedding dim.
"""
def __init__(self, backbone, img_size=224, feature_size=None, in_chans=3, embed_dim=768):
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
self.img_size = img_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
# FIXME this is hacky, but most reliable way of determining the exact dim of the output feature
# map for all networks, the feature metadata has reliable channel and stride info, but using
# stride to calc feature dim requires info about padding of each stage that isn't captured.
training = backbone.training
if training:
backbone.eval()
o = self.backbone(torch.zeros(
1, in_chans, img_size[0], img_size[1]))[-1]
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
feature_dim = self.backbone.feature_info.channels()[-1]
self.num_patches = feature_size[0] * feature_size[1]
self.proj = nn.Linear(feature_dim, embed_dim)
def forward(self, x):
x = self.backbone(x)[-1]
x = x.flatten(2).transpose(1, 2)
x = self.proj(x)
return x
class ViT(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self,
model_name='vit_base_patch16_224',
img_size=384,
patch_size=16,
in_chans=3,
embed_dim=1024,
depth=24,
num_heads=16,
num_classes=19,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=0.1,
attn_drop_rate=0.,
drop_path_rate=0.,
hybrid_backbone=None,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
norm_cfg=None,
pos_embed_interp=False,
random_init=False,
align_corners=False,
use_checkpoint=False,
num_extra_tokens=1,
out_features=None,
**kwargs,
):
super(ViT, self).__init__()
self.model_name = model_name
self.img_size = img_size
self.patch_size = patch_size
self.in_chans = in_chans
self.embed_dim = embed_dim
self.depth = depth
self.num_heads = num_heads
self.num_classes = num_classes
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.qk_scale = qk_scale
self.drop_rate = drop_rate
self.attn_drop_rate = attn_drop_rate
self.drop_path_rate = drop_path_rate
self.hybrid_backbone = hybrid_backbone
self.norm_layer = norm_layer
self.norm_cfg = norm_cfg
self.pos_embed_interp = pos_embed_interp
self.random_init = random_init
self.align_corners = align_corners
self.use_checkpoint = use_checkpoint
self.num_extra_tokens = num_extra_tokens
self.out_features = out_features
self.out_indices = [int(name[5:]) for name in out_features]
# self.num_stages = self.depth
# self.out_indices = tuple(range(self.num_stages))
if self.hybrid_backbone is not None:
self.patch_embed = HybridEmbed(
self.hybrid_backbone, img_size=self.img_size, in_chans=self.in_chans, embed_dim=self.embed_dim)
else:
self.patch_embed = PatchEmbed(
img_size=self.img_size, patch_size=self.patch_size, in_chans=self.in_chans, embed_dim=self.embed_dim)
self.num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
if self.num_extra_tokens == 2:
self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(
1, self.num_patches + self.num_extra_tokens, self.embed_dim))
self.pos_drop = nn.Dropout(p=self.drop_rate)
# self.num_extra_tokens = self.pos_embed.shape[-2] - self.num_patches
dpr = [x.item() for x in torch.linspace(0, self.drop_path_rate,
self.depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=self.embed_dim, num_heads=self.num_heads, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias,
qk_scale=self.qk_scale,
drop=self.drop_rate, attn_drop=self.attn_drop_rate, drop_path=dpr[i], norm_layer=self.norm_layer)
for i in range(self.depth)])
# NOTE as per official impl, we could have a pre-logits representation dense layer + tanh here
# self.repr = nn.Linear(embed_dim, representation_size)
# self.repr_act = nn.Tanh()
if patch_size == 16:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
nn.SyncBatchNorm(embed_dim),
nn.GELU(),
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn3 = nn.Identity()
self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
elif patch_size == 8:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Identity()
self.fpn3 = nn.Sequential(
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fpn4 = nn.Sequential(
nn.MaxPool2d(kernel_size=4, stride=4),
)
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
if self.num_extra_tokens==2:
trunc_normal_(self.dist_token, std=0.2)
self.apply(self._init_weights)
# self.fix_init_weight()
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
'''
def init_weights(self):
logger = get_root_logger()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
if self.init_cfg is None:
logger.warn(f'No pre-trained weights for '
f'{self.__class__.__name__}, '
f'training start from scratch')
else:
assert 'checkpoint' in self.init_cfg, f'Only support ' \
f'specify `Pretrained` in ' \
f'`init_cfg` in ' \
f'{self.__class__.__name__} '
logger.info(f"Will load ckpt from {self.init_cfg['checkpoint']}")
load_checkpoint(self, filename=self.init_cfg['checkpoint'], strict=False, logger=logger)
'''
def get_num_layers(self):
return len(self.blocks)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def _conv_filter(self, state_dict, patch_size=16):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k:
v = v.reshape((v.shape[0], 3, patch_size, patch_size))
out_dict[k] = v
return out_dict
def to_2D(self, x):
n, hw, c = x.shape
h = w = int(math.sqrt(hw))
x = x.transpose(1, 2).reshape(n, c, h, w)
return x
def to_1D(self, x):
n, c, h, w = x.shape
x = x.reshape(n, c, -1).transpose(1, 2)
return x
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - self.num_extra_tokens
N = self.pos_embed.shape[1] - self.num_extra_tokens
if npatch == N and w == h:
return self.pos_embed
class_ORdist_pos_embed = self.pos_embed[:, 0:self.num_extra_tokens]
patch_pos_embed = self.pos_embed[:, self.num_extra_tokens:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size[0]
h0 = h // self.patch_embed.patch_size[1]
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_ORdist_pos_embed, patch_pos_embed), dim=1)
def prepare_tokens(self, x, mask=None):
B, nc, w, h = x.shape
# patch linear embedding
x = self.patch_embed(x)
# mask image modeling
if mask is not None:
x = self.mask_model(x, mask)
x = x.flatten(2).transpose(1, 2)
# add the [CLS] token to the embed patch tokens
all_tokens = [self.cls_token.expand(B, -1, -1)]
if self.num_extra_tokens == 2:
dist_tokens = self.dist_token.expand(B, -1, -1)
all_tokens.append(dist_tokens)
all_tokens.append(x)
x = torch.cat(all_tokens, dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward_features(self, x):
# print(f"==========shape of x is {x.shape}==========")
B, _, H, W = x.shape
Hp, Wp = H // self.patch_size, W // self.patch_size
x = self.prepare_tokens(x)
features = []
for i, blk in enumerate(self.blocks):
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if i in self.out_indices:
xp = x[:, self.num_extra_tokens:, :].permute(0, 2, 1).reshape(B, -1, Hp, Wp)
features.append(xp.contiguous())
ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
for i in range(len(features)):
features[i] = ops[i](features[i])
feat_out = {}
for name, value in zip(self.out_features, features):
feat_out[name] = value
return feat_out
def forward(self, x):
x = self.forward_features(x)
return x
def deit_base_patch16(pretrained=False, **kwargs):
model = ViT(
patch_size=16,
drop_rate=0.,
embed_dim=768,
depth=12,
num_heads=12,
num_classes=1000,
mlp_ratio=4.,
qkv_bias=True,
use_checkpoint=True,
num_extra_tokens=2,
**kwargs)
model.default_cfg = _cfg()
return model
def mae_base_patch16(pretrained=False, **kwargs):
model = ViT(
patch_size=16,
drop_rate=0.,
embed_dim=768,
depth=12,
num_heads=12,
num_classes=1000,
mlp_ratio=4.,
qkv_bias=True,
use_checkpoint=True,
num_extra_tokens=1,
**kwargs)
model.default_cfg = _cfg()
return model | EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/deit.py |
import os
import json
import copy
import itertools
from collections import OrderedDict
import detectron2.utils.comm as comm
from detectron2.evaluation import COCOEvaluator
from .concern.icdar2015_eval.detection.iou import DetectionIoUEvaluator
class FUNSDEvaluator(COCOEvaluator):
def evaluate(self, img_ids=None):
"""
Args:
img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset
"""
if self._distributed:
comm.synchronize()
predictions = comm.gather(self._predictions, dst=0)
predictions = list(itertools.chain(*predictions))
if not comm.is_main_process():
return {}
else:
predictions = self._predictions
if len(predictions) == 0:
self._logger.warning("[COCOEvaluator] Did not receive valid predictions.")
return {}
self._logger.warning("[evaluating...]The evaluator may take long time")
id2img = {}
gt = {}
with open('data/instances_test.json', 'r',
encoding='utf-8') as fr:
data = json.load(fr)
for img in data['images']:
id = img['id']
name = os.path.basename(img['file_name'])[:-len('.jpg')]
assert id not in id2img.keys()
id2img[id] = name
assert len(id2img) == len(data['images'])
img2id, id2bbox = {}, {}
for i in range(len(data['images'])):
key = os.path.basename(data['images'][i]['file_name'][:-len('.png')])
assert key not in img2id.keys()
img2id[key] = data['images'][i]['id']
for i in range(len(data['annotations'])):
img_id = data['annotations'][i]['image_id']
if img_id not in id2bbox.keys():
id2bbox[img_id] = []
x0, y0, w, h = data['annotations'][i]['bbox']
x1, y1 = x0 + w, y0 + h
line = [(x0, y0), (x1, y0), (x1, y1), (x0, y1)]
id2bbox[img_id].append(
{
'points': line,
'text': 1234,
'ignore': False,
}
)
for key, val in img2id.items():
assert key not in gt.keys()
gt[key] = id2bbox[val]
self._results = OrderedDict()
evaluator = DetectionIoUEvaluator()
for iter in range(3, 10):
thr = iter * 0.1
self._results[thr] = {}
total_prediction = {}
for cur_pred in predictions:
assert cur_pred['image_id'] in id2img.keys()
id = id2img[cur_pred['image_id']]
if id not in total_prediction.keys(): total_prediction[id] = []
for cur_inst in cur_pred['instances']:
x0, y0, w, h = cur_inst['bbox']
cur_score = cur_inst['score']
if cur_score < thr:
continue
x1, y1 = x0 + w, y0 + h
x0, x1 = int(x0 + 0.5), int(x1 + 0.5)
y0, y1 = int(y0 + 0.5), int(y1 + 0.5)
min_x, max_x = min([x0, x1]), max([x0, x1])
min_y, max_y = min([y0, y1]), max([y0, y1])
pred_line = [min_x, min_y, max_x, min_y, max_x, max_y, min_x, max_y]
pred_line_str = ','.join(list(map(str, pred_line)))
total_prediction[id].append(pred_line_str)
final_gt = []
final_res = []
for key, _ in gt.items():
final_gt.append(copy.deepcopy(gt[key]))
cur_res = []
pred = total_prediction[key]
for i in range(len(pred)):
line = list(map(int, pred[i].split(',')))
line = [(line[0], line[1]), (line[2], line[3]), (line[4], line[5]), (line[6], line[7])]
cur_res.append(
{
'points': line,
'text': 1234,
'ignore': False,
}
)
final_res.append(cur_res)
results = []
for cur_gt, pred in zip(final_gt, final_res):
results.append(evaluator.evaluate_image(cur_gt, pred))
metrics = evaluator.combine_results(results)
for key, val in metrics.items():
self._results["{:.1f}_{}".format(thr, key)] = val
return copy.deepcopy(self._results)
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/funsd_evaluation.py |
""" Vision Transformer (ViT) in PyTorch
A PyTorch implement of Vision Transformers as described in
'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929
The official jax code is released and available at https://github.com/google-research/vision_transformer
Status/TODO:
* Models updated to be compatible with official impl. Args added to support backward compat for old PyTorch weights.
* Weights ported from official jax impl for 384x384 base and small models, 16x16 and 32x32 patches.
* Trained (supervised on ImageNet-1k) my custom 'small' patch model to 77.9, 'base' to 79.4 top-1 with this code.
* Hopefully find time and GPUs for SSL or unsupervised pretraining on OpenImages w/ ImageNet fine-tune in future.
Acknowledgments:
* The paper authors for releasing code and weights, thanks!
* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out
for some einops/einsum fun
* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT
* Bert reference code checks against Huggingface Transformers and Tensorflow Bert
Hacked together by / Copyright 2020 Ross Wightman
"""
import warnings
import math
import torch
from functools import partial
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
**kwargs
}
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
# x = self.drop(x)
# commit this for the orignal BERT implement
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
proj_drop=0., window_size=None, attn_head_dim=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.v_bias = None
if window_size:
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
# trunc_normal_(self.relative_position_bias_table, std=.0)
else:
self.window_size = None
self.relative_position_bias_table = None
self.relative_position_index = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def cogview_attn(self, attention_scores, alpha=32):
'''
https://arxiv.org/pdf/2105.13290.pdf
Section 2.4 Stabilization of training: Precision Bottleneck Relaxation (PB-Relax).
A replacement of the original nn.Softmax(dim=-1)(attention_scores)
Seems the new attention_probs will result in a slower speed and a little bias
Can use torch.allclose(standard_attention_probs, cogview_attention_probs, atol=1e-08) for comparison
The smaller atol (e.g., 1e-08), the better.
'''
scaled_attention_scores = attention_scores / alpha
max_value = scaled_attention_scores.amax(dim=(-1)).unsqueeze(-1)
# max_value = scaled_attention_scores.amax(dim=(-2, -1)).unsqueeze(-1).unsqueeze(-1)
new_attention_scores = (scaled_attention_scores - max_value) * alpha
return nn.Softmax(dim=-1)(new_attention_scores)
def forward(self, x, rel_pos_bias=None, training_window_size=None):
B, N, C = x.shape
qkv_bias = None
if self.q_bias is not None:
qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
# qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
if self.relative_position_bias_table is not None:
if training_window_size == self.window_size:
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
else:
training_window_size = tuple(training_window_size.tolist())
new_num_relative_distance = (2 * training_window_size[0] - 1) * (2 * training_window_size[1] - 1) + 3
# new_num_relative_dis 为 所有可能的相对位置选项,包含cls-cls,tok-cls,与cls-tok
new_relative_position_bias_table = F.interpolate(
self.relative_position_bias_table[:-3, :].permute(1, 0).view(1, self.num_heads,
2 * self.window_size[0] - 1,
2 * self.window_size[1] - 1),
size=(2 * training_window_size[0] - 1, 2 * training_window_size[1] - 1), mode='bicubic',
align_corners=False)
new_relative_position_bias_table = new_relative_position_bias_table.view(self.num_heads,
new_num_relative_distance - 3).permute(
1, 0)
new_relative_position_bias_table = torch.cat(
[new_relative_position_bias_table, self.relative_position_bias_table[-3::]], dim=0)
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(training_window_size[0])
coords_w = torch.arange(training_window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += training_window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += training_window_size[1] - 1
relative_coords[:, :, 0] *= 2 * training_window_size[1] - 1
relative_position_index = \
torch.zeros(size=(training_window_size[0] * training_window_size[1] + 1,) * 2,
dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = new_num_relative_distance - 3
relative_position_index[0:, 0] = new_num_relative_distance - 2
relative_position_index[0, 0] = new_num_relative_distance - 1
relative_position_bias = \
new_relative_position_bias_table[relative_position_index.view(-1)].view(
training_window_size[0] * training_window_size[1] + 1,
training_window_size[0] * training_window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if rel_pos_bias is not None:
attn = attn + rel_pos_bias
attn = attn.softmax(dim=-1)
# attn = self.cogview_attn(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm,
window_size=None, attn_head_dim=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if init_values is not None:
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True)
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True)
else:
self.gamma_1, self.gamma_2 = None, None
def forward(self, x, rel_pos_bias=None, training_window_size=None):
if self.gamma_1 is None:
x = x + self.drop_path(
self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, training_window_size=training_window_size))
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias,
training_window_size=training_window_size))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=[224, 224], patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches_w = self.patch_shape[0]
self.num_patches_h = self.patch_shape[1]
# the so-called patch_shape is the patch shape during pre-training
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x, position_embedding=None, **kwargs):
# FIXME look at relaxing size constraints
# assert H == self.img_size[0] and W == self.img_size[1], \
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
Hp, Wp = x.shape[2], x.shape[3]
if position_embedding is not None:
# interpolate the position embedding to the corresponding size
position_embedding = position_embedding.view(1, self.patch_shape[0], self.patch_shape[1], -1).permute(0, 3,
1, 2)
position_embedding = F.interpolate(position_embedding, size=(Hp, Wp), mode='bicubic')
x = x + position_embedding
x = x.flatten(2).transpose(1, 2)
return x, (Hp, Wp)
class HybridEmbed(nn.Module):
""" CNN Feature Map Embedding
Extract feature map from CNN, flatten, project to embedding dim.
"""
def __init__(self, backbone, img_size=[224, 224], feature_size=None, in_chans=3, embed_dim=768):
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
self.img_size = img_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
# FIXME this is hacky, but most reliable way of determining the exact dim of the output feature
# map for all networks, the feature metadata has reliable channel and stride info, but using
# stride to calc feature dim requires info about padding of each stage that isn't captured.
training = backbone.training
if training:
backbone.eval()
o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1]))[-1]
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
feature_dim = self.backbone.feature_info.channels()[-1]
self.num_patches = feature_size[0] * feature_size[1]
self.proj = nn.Linear(feature_dim, embed_dim)
def forward(self, x):
x = self.backbone(x)[-1]
x = x.flatten(2).transpose(1, 2)
x = self.proj(x)
return x
class RelativePositionBias(nn.Module):
def __init__(self, window_size, num_heads):
super().__init__()
self.window_size = window_size
self.num_heads = num_heads
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
# trunc_normal_(self.relative_position_bias_table, std=.02)
def forward(self, training_window_size):
if training_window_size == self.window_size:
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
else:
training_window_size = tuple(training_window_size.tolist())
new_num_relative_distance = (2 * training_window_size[0] - 1) * (2 * training_window_size[1] - 1) + 3
# new_num_relative_dis 为 所有可能的相对位置选项,包含cls-cls,tok-cls,与cls-tok
new_relative_position_bias_table = F.interpolate(
self.relative_position_bias_table[:-3, :].permute(1, 0).view(1, self.num_heads,
2 * self.window_size[0] - 1,
2 * self.window_size[1] - 1),
size=(2 * training_window_size[0] - 1, 2 * training_window_size[1] - 1), mode='bicubic',
align_corners=False)
new_relative_position_bias_table = new_relative_position_bias_table.view(self.num_heads,
new_num_relative_distance - 3).permute(
1, 0)
new_relative_position_bias_table = torch.cat(
[new_relative_position_bias_table, self.relative_position_bias_table[-3::]], dim=0)
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(training_window_size[0])
coords_w = torch.arange(training_window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += training_window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += training_window_size[1] - 1
relative_coords[:, :, 0] *= 2 * training_window_size[1] - 1
relative_position_index = \
torch.zeros(size=(training_window_size[0] * training_window_size[1] + 1,) * 2,
dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = new_num_relative_distance - 3
relative_position_index[0:, 0] = new_num_relative_distance - 2
relative_position_index[0, 0] = new_num_relative_distance - 1
relative_position_bias = \
new_relative_position_bias_table[relative_position_index.view(-1)].view(
training_window_size[0] * training_window_size[1] + 1,
training_window_size[0] * training_window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
return relative_position_bias
class BEiT(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self,
img_size=[224, 224],
patch_size=16,
in_chans=3,
num_classes=80,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
hybrid_backbone=None,
norm_layer=None,
init_values=None,
use_abs_pos_emb=False,
use_rel_pos_bias=False,
use_shared_rel_pos_bias=False,
use_checkpoint=True,
pretrained=None,
out_features=None,
):
super(BEiT, self).__init__()
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.use_checkpoint = use_checkpoint
if hybrid_backbone is not None:
self.patch_embed = HybridEmbed(
hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim)
else:
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.out_features = out_features
self.out_indices = [int(name[5:]) for name in out_features]
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
# self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_abs_pos_emb:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
self.use_shared_rel_pos_bias = use_shared_rel_pos_bias
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
else:
self.rel_pos_bias = None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.use_rel_pos_bias = use_rel_pos_bias
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None)
for i in range(depth)])
# trunc_normal_(self.mask_token, std=.02)
if patch_size == 16:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
# nn.SyncBatchNorm(embed_dim),
nn.BatchNorm2d(embed_dim),
nn.GELU(),
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn3 = nn.Identity()
self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
elif patch_size == 8:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Identity()
self.fpn3 = nn.Sequential(
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fpn4 = nn.Sequential(
nn.MaxPool2d(kernel_size=4, stride=4),
)
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
self.fix_init_weight()
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
'''
def init_weights(self):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
logger = get_root_logger()
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
self.fix_init_weight()
if self.init_cfg is None:
logger.warn(f'No pre-trained weights for '
f'{self.__class__.__name__}, '
f'training start from scratch')
else:
assert 'checkpoint' in self.init_cfg, f'Only support ' \
f'specify `Pretrained` in ' \
f'`init_cfg` in ' \
f'{self.__class__.__name__} '
logger.info(f"Will load ckpt from {self.init_cfg['checkpoint']}")
load_checkpoint(self,
filename=self.init_cfg['checkpoint'],
strict=False,
logger=logger,
beit_spec_expand_rel_pos = self.use_rel_pos_bias,
)
'''
def get_num_layers(self):
return len(self.blocks)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def forward_features(self, x):
B, C, H, W = x.shape
x, (Hp, Wp) = self.patch_embed(x, self.pos_embed[:, 1:, :] if self.pos_embed is not None else None)
# Hp, Wp are HW for patches
batch_size, seq_len, _ = x.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
if self.pos_embed is not None:
cls_tokens = cls_tokens + self.pos_embed[:, :1, :]
x = torch.cat((cls_tokens, x), dim=1)
x = self.pos_drop(x)
features = []
training_window_size = torch.tensor([Hp, Wp])
rel_pos_bias = self.rel_pos_bias(training_window_size) if self.rel_pos_bias is not None else None
for i, blk in enumerate(self.blocks):
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x, rel_pos_bias, training_window_size)
else:
x = blk(x, rel_pos_bias=rel_pos_bias, training_window_size=training_window_size)
if i in self.out_indices:
xp = x[:, 1:, :].permute(0, 2, 1).reshape(B, -1, Hp, Wp)
features.append(xp.contiguous())
ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
for i in range(len(features)):
features[i] = ops[i](features[i])
feat_out = {}
for name, value in zip(self.out_features, features):
feat_out[name] = value
return feat_out
def forward(self, x):
x = self.forward_features(x)
return x
def beit_base_patch16(pretrained=False, **kwargs):
model = BEiT(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
init_values=None,
**kwargs)
model.default_cfg = _cfg()
return model
def beit_large_patch16(pretrained=False, **kwargs):
model = BEiT(
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
init_values=None,
**kwargs)
model.default_cfg = _cfg()
return model
def dit_base_patch16(pretrained=False, **kwargs):
model = BEiT(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
init_values=0.1,
**kwargs)
model.default_cfg = _cfg()
return model
def dit_large_patch16(pretrained=False, **kwargs):
model = BEiT(
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
init_values=1e-5,
**kwargs)
model.default_cfg = _cfg()
return model
if __name__ == '__main__':
model = BEiT(use_checkpoint=True, use_shared_rel_pos_bias=True)
model = model.to("cuda:0")
input1 = torch.rand(2, 3, 512, 762).to("cuda:0")
input2 = torch.rand(2, 3, 800, 1200).to("cuda:0")
input3 = torch.rand(2, 3, 720, 1000).to("cuda:0")
output1 = model(input1)
output2 = model(input2)
output3 = model(input3)
print("all done")
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/beit.py |
from detectron2.config import CfgNode as CN
def add_vit_config(cfg):
"""
Add config for VIT.
"""
_C = cfg
_C.MODEL.VIT = CN()
# CoaT model name.
_C.MODEL.VIT.NAME = ""
# Output features from CoaT backbone.
_C.MODEL.VIT.OUT_FEATURES = ["layer3", "layer5", "layer7", "layer11"]
_C.MODEL.VIT.IMG_SIZE = [224, 224]
_C.MODEL.VIT.POS_TYPE = "shared_rel"
_C.MODEL.VIT.DROP_PATH = 0.
_C.MODEL.VIT.MODEL_KWARGS = "{}"
_C.SOLVER.OPTIMIZER = "ADAMW"
_C.SOLVER.BACKBONE_MULTIPLIER = 1.0
_C.AUG = CN()
_C.AUG.DETR = False
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/config.py |
from detectron2.checkpoint import DetectionCheckpointer
from typing import Any
import torch
import torch.nn as nn
#from fvcore.common.checkpoint import _IncompatibleKeys, _strip_prefix_if_present, TORCH_VERSION, quantization, \
# ObserverBase, FakeQuantizeBase
from fvcore.common.checkpoint import _IncompatibleKeys, _strip_prefix_if_present, TORCH_VERSION
from torch import distributed as dist
from scipy import interpolate
import numpy as np
import torch.nn.functional as F
def append_prefix(k):
prefix = 'backbone.bottom_up.backbone.'
# return prefix + k if not k.startswith(prefix) else k
return k
def modify_ckpt_state(model, state_dict, logger=None):
# reshape absolute position embedding for Swin
if state_dict.get(append_prefix('absolute_pos_embed')) is not None:
absolute_pos_embed = state_dict[append_prefix('absolute_pos_embed')]
N1, L, C1 = absolute_pos_embed.size()
N2, C2, H, W = model.backbone.bottom_up.backbone.absolute_pos_embed.size()
if N1 != N2 or C1 != C2 or L != H * W:
logger.warning("Error in loading absolute_pos_embed, pass")
else:
state_dict[append_prefix('absolute_pos_embed')] = absolute_pos_embed.view(N2, H, W, C2).permute(0, 3, 1, 2)
def get_dist_info():
if dist.is_available() and dist.is_initialized():
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return rank, world_size
rank, _ = get_dist_info()
all_keys = list(state_dict.keys())
for key in all_keys:
if "relative_position_index" in key:
state_dict.pop(key)
if "relative_position_bias_table" in key:
rel_pos_bias = state_dict[key]
src_num_pos, num_attn_heads = rel_pos_bias.size()
if key not in model.state_dict():
continue
dst_num_pos, _ = model.state_dict()[key].size()
dst_patch_shape = model.backbone.bottom_up.backbone.patch_embed.patch_shape
if dst_patch_shape[0] != dst_patch_shape[1]:
raise NotImplementedError()
num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1)
src_size = int((src_num_pos - num_extra_tokens) ** 0.5)
dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)
if src_size != dst_size:
if rank == 0:
print("Position interpolate for %s from %dx%d to %dx%d" % (
key, src_size, src_size, dst_size, dst_size))
extra_tokens = rel_pos_bias[-num_extra_tokens:, :]
rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]
def geometric_progression(a, r, n):
return a * (1.0 - r ** n) / (1.0 - r)
left, right = 1.01, 1.5
while right - left > 1e-6:
q = (left + right) / 2.0
gp = geometric_progression(1, q, src_size // 2)
if gp > dst_size // 2:
right = q
else:
left = q
# if q > 1.13492:
# q = 1.13492
dis = []
cur = 1
for i in range(src_size // 2):
dis.append(cur)
cur += q ** (i + 1)
r_ids = [-_ for _ in reversed(dis)]
x = r_ids + [0] + dis
y = r_ids + [0] + dis
t = dst_size // 2.0
dx = np.arange(-t, t + 0.1, 1.0)
dy = np.arange(-t, t + 0.1, 1.0)
if rank == 0:
print("x = {}".format(x))
print("dx = {}".format(dx))
all_rel_pos_bias = []
for i in range(num_attn_heads):
z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()
f = interpolate.interp2d(x, y, z, kind='cubic')
all_rel_pos_bias.append(
torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device))
rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)
new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)
state_dict[key] = new_rel_pos_bias
if append_prefix('pos_embed') in state_dict:
pos_embed_checkpoint = state_dict[append_prefix('pos_embed')]
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.backbone.bottom_up.backbone.patch_embed.num_patches
num_extra_tokens = model.backbone.bottom_up.backbone.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
# new_size = int(num_patches ** 0.5)
new_size_w = model.backbone.bottom_up.backbone.patch_embed.num_patches_w
new_size_h = model.backbone.bottom_up.backbone.patch_embed.num_patches_h
# class_token and dist_token are kept unchanged
if orig_size != new_size_h or orig_size != new_size_w:
if rank == 0:
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size_w, new_size_h))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size_w, new_size_h), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
state_dict[append_prefix('pos_embed')] = new_pos_embed
# interpolate position bias table if needed
relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k]
for table_key in relative_position_bias_table_keys:
table_pretrained = state_dict[table_key]
if table_key not in model.state_dict():
continue
table_current = model.state_dict()[table_key]
L1, nH1 = table_pretrained.size()
L2, nH2 = table_current.size()
if nH1 != nH2:
logger.warning(f"Error in loading {table_key}, pass")
else:
if L1 != L2:
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
table_pretrained_resized = F.interpolate(
table_pretrained.permute(1, 0).view(1, nH1, S1, S1),
size=(S2, S2), mode='bicubic')
state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0)
if append_prefix('rel_pos_bias.relative_position_bias_table') in state_dict and \
model.backbone.bottom_up.backbone.use_rel_pos_bias and \
not model.backbone.bottom_up.backbone.use_shared_rel_pos_bias and \
append_prefix('blocks.0.attn.relative_position_bias_table') not in state_dict:
logger.info("[BEIT] Expand the shared relative position embedding to each transformer block. ")
num_layers = model.backbone.bottom_up.backbone.get_num_layers()
rel_pos_bias = state_dict[append_prefix("rel_pos_bias.relative_position_bias_table")]
for i in range(num_layers):
state_dict["blocks.%d.attn.relative_position_bias_table" % i] = rel_pos_bias.clone()
state_dict.pop(append_prefix("rel_pos_bias.relative_position_bias_table"))
return state_dict
class MyDetectionCheckpointer(DetectionCheckpointer):
def _load_model(self, checkpoint: Any) -> _IncompatibleKeys:
"""
Load weights from a checkpoint.
Args:
checkpoint (Any): checkpoint contains the weights.
Returns:
``NamedTuple`` with ``missing_keys``, ``unexpected_keys``,
and ``incorrect_shapes`` fields:
* **missing_keys** is a list of str containing the missing keys
* **unexpected_keys** is a list of str containing the unexpected keys
* **incorrect_shapes** is a list of (key, shape in checkpoint, shape in model)
This is just like the return value of
:func:`torch.nn.Module.load_state_dict`, but with extra support
for ``incorrect_shapes``.
"""
checkpoint_state_dict = checkpoint.pop("model")
self._convert_ndarray_to_tensor(checkpoint_state_dict)
# if the state_dict comes from a model that was wrapped in a
# DataParallel or DistributedDataParallel during serialization,
# remove the "module" prefix before performing the matching.
_strip_prefix_if_present(checkpoint_state_dict, "module.")
# workaround https://github.com/pytorch/pytorch/issues/24139
model_state_dict = self.model.state_dict()
incorrect_shapes = []
# rename the para in checkpoint_state_dict
# some bug here, do not support re load
checkpoint_state_dict = {
append_prefix(k): checkpoint_state_dict[k]
for k in checkpoint_state_dict.keys()
}
checkpoint_state_dict = modify_ckpt_state(self.model, checkpoint_state_dict, logger=self.logger)
for k in list(checkpoint_state_dict.keys()):
if k in model_state_dict:
model_param = model_state_dict[k]
# Allow mismatch for uninitialized parameters
if TORCH_VERSION >= (1, 8) and isinstance(
model_param, nn.parameter.UninitializedParameter
):
continue
shape_model = tuple(model_param.shape)
shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
if shape_model != shape_checkpoint:
has_observer_base_classes = (
TORCH_VERSION >= (1, 8)
and hasattr(quantization, "ObserverBase")
and hasattr(quantization, "FakeQuantizeBase")
)
if has_observer_base_classes:
# Handle the special case of quantization per channel observers,
# where buffer shape mismatches are expected.
def _get_module_for_key(
model: torch.nn.Module, key: str
) -> torch.nn.Module:
# foo.bar.param_or_buffer_name -> [foo, bar]
key_parts = key.split(".")[:-1]
cur_module = model
for key_part in key_parts:
cur_module = getattr(cur_module, key_part)
return cur_module
cls_to_skip = (
ObserverBase,
FakeQuantizeBase,
)
target_module = _get_module_for_key(self.model, k)
if isinstance(target_module, cls_to_skip):
# Do not remove modules with expected shape mismatches
# them from the state_dict loading. They have special logic
# in _load_from_state_dict to handle the mismatches.
continue
incorrect_shapes.append((k, shape_checkpoint, shape_model))
checkpoint_state_dict.pop(k)
incompatible = self.model.load_state_dict(checkpoint_state_dict, strict=False)
return _IncompatibleKeys(
missing_keys=incompatible.missing_keys,
unexpected_keys=incompatible.unexpected_keys,
incorrect_shapes=incorrect_shapes,
)
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/mycheckpointer.py |
# --------------------------------------------------------------------------------
# VIT: Multi-Path Vision Transformer for Dense Prediction
# Copyright (c) 2022 Electronics and Telecommunications Research Institute (ETRI).
# All Rights Reserved.
# Written by Youngwan Lee
# This source code is licensed(Dual License(GPL3.0 & Commercial)) under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------------------------------
# References:
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# CoaT: https://github.com/mlpc-ucsd/CoaT
# --------------------------------------------------------------------------------
import torch
from detectron2.layers import (
ShapeSpec,
)
from detectron2.modeling import Backbone, BACKBONE_REGISTRY, FPN
from detectron2.modeling.backbone.fpn import LastLevelP6P7, LastLevelMaxPool
from .beit import beit_base_patch16, dit_base_patch16, dit_large_patch16, beit_large_patch16
from .deit import deit_base_patch16, mae_base_patch16
__all__ = [
"build_vit_fpn_backbone",
]
class VIT_Backbone(Backbone):
"""
Implement VIT backbone.
"""
def __init__(self, name, out_features, drop_path, img_size, pos_type, model_kwargs):
super().__init__()
self._out_features = out_features
if 'base' in name:
self._out_feature_strides = {"layer3": 4, "layer5": 8, "layer7": 16, "layer11": 32}
else:
self._out_feature_strides = {"layer7": 4, "layer11": 8, "layer15": 16, "layer23": 32}
if name == 'beit_base_patch16':
model_func = beit_base_patch16
self._out_feature_channels = {"layer3": 768, "layer5": 768, "layer7": 768, "layer11": 768}
elif name == 'dit_base_patch16':
model_func = dit_base_patch16
self._out_feature_channels = {"layer3": 768, "layer5": 768, "layer7": 768, "layer11": 768}
elif name == "deit_base_patch16":
model_func = deit_base_patch16
self._out_feature_channels = {"layer3": 768, "layer5": 768, "layer7": 768, "layer11": 768}
elif name == "mae_base_patch16":
model_func = mae_base_patch16
self._out_feature_channels = {"layer3": 768, "layer5": 768, "layer7": 768, "layer11": 768}
elif name == "dit_large_patch16":
model_func = dit_large_patch16
self._out_feature_channels = {"layer7": 1024, "layer11": 1024, "layer15": 1024, "layer23": 1024}
elif name == "beit_large_patch16":
model_func = beit_large_patch16
self._out_feature_channels = {"layer7": 1024, "layer11": 1024, "layer15": 1024, "layer23": 1024}
else:
raise ValueError("Unsupported VIT name yet.")
if 'beit' in name or 'dit' in name:
if pos_type == "abs":
self.backbone = model_func(img_size=img_size,
out_features=out_features,
drop_path_rate=drop_path,
use_abs_pos_emb=True,
**model_kwargs)
elif pos_type == "shared_rel":
self.backbone = model_func(img_size=img_size,
out_features=out_features,
drop_path_rate=drop_path,
use_shared_rel_pos_bias=True,
**model_kwargs)
elif pos_type == "rel":
self.backbone = model_func(img_size=img_size,
out_features=out_features,
drop_path_rate=drop_path,
use_rel_pos_bias=True,
**model_kwargs)
else:
raise ValueError()
else:
self.backbone = model_func(img_size=img_size,
out_features=out_features,
drop_path_rate=drop_path,
**model_kwargs)
def forward(self, x):
"""
Args:
x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.
Returns:
dict[str->Tensor]: names and the corresponding features
"""
assert x.dim() == 4, f"VIT takes an input of shape (N, C, H, W). Got {x.shape} instead!"
return self.backbone.forward_features(x)
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
def build_VIT_backbone(cfg):
"""
Create a VIT instance from config.
Args:
cfg: a detectron2 CfgNode
Returns:
A VIT backbone instance.
"""
# fmt: off
name = cfg.MODEL.VIT.NAME
out_features = cfg.MODEL.VIT.OUT_FEATURES
drop_path = cfg.MODEL.VIT.DROP_PATH
img_size = cfg.MODEL.VIT.IMG_SIZE
pos_type = cfg.MODEL.VIT.POS_TYPE
model_kwargs = eval(str(cfg.MODEL.VIT.MODEL_KWARGS).replace("`", ""))
return VIT_Backbone(name, out_features, drop_path, img_size, pos_type, model_kwargs)
@BACKBONE_REGISTRY.register()
def build_vit_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Create a VIT w/ FPN backbone.
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_VIT_backbone(cfg)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/backbone.py |
# --------------------------------------------------------------------------------
# MPViT: Multi-Path Vision Transformer for Dense Prediction
# Copyright (c) 2022 Electronics and Telecommunications Research Institute (ETRI).
# All Rights Reserved.
# Written by Youngwan Lee
# This source code is licensed(Dual License(GPL3.0 & Commercial)) under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------------------------------
from .config import add_vit_config
from .backbone import build_vit_fpn_backbone
from .dataset_mapper import DetrDatasetMapper
from .mycheckpointer import MyDetectionCheckpointer
from .funsd_evaluation import FUNSDEvaluator
from .mytrainer import MyTrainer | EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# from https://github.com/facebookresearch/detr/blob/main/d2/detr/dataset_mapper.py
import copy
import logging
import numpy as np
import torch
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
__all__ = ["DetrDatasetMapper"]
def build_transform_gen(cfg, is_train):
"""
Create a list of :class:`TransformGen` from config.
Returns:
list[TransformGen]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
if sample_style == "range":
assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size))
logger = logging.getLogger(__name__)
tfm_gens = []
if is_train:
tfm_gens.append(T.RandomFlip())
tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
if is_train:
logger.info("TransformGens used in training: " + str(tfm_gens))
return tfm_gens
class DetrDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by DETR.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
def __init__(self, cfg, is_train=True):
if cfg.INPUT.CROP.ENABLED and is_train:
self.crop_gen = [
T.ResizeShortestEdge([400, 500, 600], sample_style="choice"),
T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE),
]
else:
self.crop_gen = None
self.mask_on = cfg.MODEL.MASK_ON
self.tfm_gens = build_transform_gen(cfg, is_train)
logging.getLogger(__name__).info(
"Full TransformGens used in training: {}, crop: {}".format(str(self.tfm_gens), str(self.crop_gen))
)
self.img_format = cfg.INPUT.FORMAT
self.is_train = is_train
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
if self.crop_gen is None:
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else:
if np.random.rand() > 0.5:
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else:
image, transforms = T.apply_transform_gens(
self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image
)
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if not self.is_train:
# USER: Modify this if you want to keep them for some reason.
dataset_dict.pop("annotations", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
utils.transform_instance_annotations(obj, transforms, image_shape)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(annos, image_shape)
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict | EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/dataset_mapper.py |
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""
This file contains components with some default boilerplate logic user may need
in training / testing. They will not work for everyone, but many users may find them useful.
The behavior of functions/classes in this file is subject to change,
since they are meant to represent the "common default behavior" people need in their projects.
"""
import argparse
import logging
import os
import sys
import weakref
from collections import OrderedDict
from typing import Optional
import torch
from fvcore.nn.precise_bn import get_bn_modules
from omegaconf import OmegaConf
from torch.nn.parallel import DistributedDataParallel
import detectron2.data.transforms as T
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import CfgNode, LazyConfig
from detectron2.data import (
MetadataCatalog,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.evaluation import (
DatasetEvaluator,
inference_on_dataset,
print_csv_format,
verify_results,
)
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils import comm
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.env import seed_all_rng
from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
from detectron2.engine import hooks
from detectron2.engine.train_loop import AMPTrainer, SimpleTrainer, TrainerBase
from .mycheckpointer import MyDetectionCheckpointer
from typing import Any, Dict, List, Set
import itertools
from detectron2.solver.build import maybe_add_gradient_clipping
from .dataset_mapper import DetrDatasetMapper
from .funsd_evaluation import FUNSDEvaluator
__all__ = [
"create_ddp_model",
"default_argument_parser",
"default_setup",
"default_writers",
"DefaultPredictor",
"MyTrainer",
]
def create_ddp_model(model, *, fp16_compression=False, **kwargs):
"""
Create a DistributedDataParallel model if there are >1 processes.
Args:
model: a torch.nn.Module
fp16_compression: add fp16 compression hooks to the ddp object.
See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook
kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`.
""" # noqa
if comm.get_world_size() == 1:
return model
if "device_ids" not in kwargs:
kwargs["device_ids"] = [comm.get_local_rank()]
ddp = DistributedDataParallel(model, **kwargs)
if fp16_compression:
from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks
ddp.register_comm_hook(state=None, hook=comm_hooks.fp16_compress_hook)
return ddp
def default_argument_parser(epilog=None):
"""
Create a parser with some common arguments used by detectron2 users.
Args:
epilog (str): epilog passed to ArgumentParser describing the usage.
Returns:
argparse.ArgumentParser:
"""
parser = argparse.ArgumentParser(
epilog=epilog
or f"""
Examples:
Run on single machine:
$ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml
Change some config options:
$ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001
Run on multiple machines:
(machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url <URL> [--other-flags]
(machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url <URL> [--other-flags]
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument(
"--resume",
action="store_true",
help="Whether to attempt to resume from the checkpoint directory. "
"See documentation of `MyTrainer.resume_or_load()` for what it means.",
)
parser.add_argument("--eval-only", action="store_true", help="perform evaluation only")
parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*")
parser.add_argument("--num-machines", type=int, default=1, help="total number of machines")
parser.add_argument(
"--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)"
)
# PyTorch still may leave orphan processes in multi-gpu training.
# Therefore we use a deterministic way to obtain port,
# so that users are aware of orphan processes by seeing the port occupied.
port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14
parser.add_argument(
"--dist-url",
default="tcp://127.0.0.1:{}".format(port),
help="initialization URL for pytorch distributed backend. See "
"https://pytorch.org/docs/stable/distributed.html for details.",
)
parser.add_argument(
"opts",
help="""
Modify config options at the end of the command. For Yacs configs, use
space-separated "PATH.KEY VALUE" pairs.
For python-based LazyConfig, use "path.key=value".
""".strip(),
default=None,
nargs=argparse.REMAINDER,
)
return parser
def _try_get_key(cfg, *keys, default=None):
"""
Try select keys from cfg until the first key that exists. Otherwise return default.
"""
if isinstance(cfg, CfgNode):
cfg = OmegaConf.create(cfg.dump())
for k in keys:
none = object()
p = OmegaConf.select(cfg, k, default=none)
if p is not none:
return p
return default
def _highlight(code, filename):
try:
import pygments
except ImportError:
return code
from pygments.lexers import Python3Lexer, YamlLexer
from pygments.formatters import Terminal256Formatter
lexer = Python3Lexer() if filename.endswith(".py") else YamlLexer()
code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai"))
return code
def default_setup(cfg, args):
"""
Perform some basic common setups at the beginning of a job, including:
1. Set up the detectron2 logger
2. Log basic information about environment, cmdline arguments, and config
3. Backup the config to the output directory
Args:
cfg (CfgNode or omegaconf.DictConfig): the full config to be used
args (argparse.NameSpace): the command line arguments to be logged
"""
output_dir = _try_get_key(cfg, "OUTPUT_DIR", "output_dir", "train.output_dir")
if comm.is_main_process() and output_dir:
PathManager.mkdirs(output_dir)
rank = comm.get_rank()
setup_logger(output_dir, distributed_rank=rank, name="fvcore")
logger = setup_logger(output_dir, distributed_rank=rank)
logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size()))
logger.info("Environment info:\n" + collect_env_info())
logger.info("Command line arguments: " + str(args))
if hasattr(args, "config_file") and args.config_file != "":
logger.info(
"Contents of args.config_file={}:\n{}".format(
args.config_file,
_highlight(PathManager.open(args.config_file, "r").read(), args.config_file),
)
)
if comm.is_main_process() and output_dir:
# Note: some of our scripts may expect the existence of
# config.yaml in output directory
path = os.path.join(output_dir, "config.yaml")
if isinstance(cfg, CfgNode):
logger.info("Running with full config:\n{}".format(_highlight(cfg.dump(), ".yaml")))
with PathManager.open(path, "w") as f:
f.write(cfg.dump())
else:
LazyConfig.save(cfg, path)
logger.info("Full config saved to {}".format(path))
# make sure each worker has a different, yet deterministic seed if specified
seed = _try_get_key(cfg, "SEED", "train.seed", default=-1)
seed_all_rng(None if seed < 0 else seed + rank)
# cudnn benchmark has large overhead. It shouldn't be used considering the small size of
# typical validation set.
if not (hasattr(args, "eval_only") and args.eval_only):
torch.backends.cudnn.benchmark = _try_get_key(
cfg, "CUDNN_BENCHMARK", "train.cudnn_benchmark", default=False
)
def default_writers(output_dir: str, max_iter: Optional[int] = None):
"""
Build a list of :class:`EventWriter` to be used.
It now consists of a :class:`CommonMetricPrinter`,
:class:`TensorboardXWriter` and :class:`JSONWriter`.
Args:
output_dir: directory to store JSON metrics and tensorboard events
max_iter: the total number of iterations
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
"""
PathManager.mkdirs(output_dir)
return [
# It may not always print what you want to see, since it prints "common" metrics only.
CommonMetricPrinter(max_iter),
JSONWriter(os.path.join(output_dir, "metrics.json")),
TensorboardXWriter(output_dir),
]
class DefaultPredictor:
"""
Create a simple end-to-end predictor with the given config that runs on
single device for a single input image.
Compared to using the model directly, this class does the following additions:
1. Load checkpoint from `cfg.MODEL.WEIGHTS`.
2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.
3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.
4. Take one input image and produce a single output, instead of a batch.
This is meant for simple demo purposes, so it does the above steps automatically.
This is not meant for benchmarks or running complicated inference logic.
If you'd like to do anything more complicated, please refer to its source code as
examples to build and use the model manually.
Attributes:
metadata (Metadata): the metadata of the underlying dataset, obtained from
cfg.DATASETS.TEST.
Examples:
::
pred = DefaultPredictor(cfg)
inputs = cv2.imread("input.jpg")
outputs = pred(inputs)
"""
def __init__(self, cfg):
self.cfg = cfg.clone() # cfg can be modified by model
self.model = build_model(self.cfg)
self.model.eval()
if len(cfg.DATASETS.TEST):
self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
checkpointer = DetectionCheckpointer(self.model)
checkpointer.load(cfg.MODEL.WEIGHTS)
self.aug = T.ResizeShortestEdge(
[cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
)
self.input_format = cfg.INPUT.FORMAT
assert self.input_format in ["RGB", "BGR"], self.input_format
def __call__(self, original_image):
"""
Args:
original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
Returns:
predictions (dict):
the output of the model for one image only.
See :doc:`/tutorials/models` for details about the format.
"""
with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
# Apply pre-processing to image.
if self.input_format == "RGB":
# whether the model expects BGR inputs or RGB
original_image = original_image[:, :, ::-1]
height, width = original_image.shape[:2]
image = self.aug.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
predictions = self.model([inputs])[0]
return predictions
class MyTrainer(TrainerBase):
"""
A trainer with default training logic. It does the following:
1. Create a :class:`SimpleTrainer` using model, optimizer, dataloader
defined by the given config. Create a LR scheduler defined by the config.
2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when
`resume_or_load` is called.
3. Register a few common hooks defined by the config.
It is created to simplify the **standard model training workflow** and reduce code boilerplate
for users who only need the standard training workflow, with standard features.
It means this class makes *many assumptions* about your training logic that
may easily become invalid in a new research. In fact, any assumptions beyond those made in the
:class:`SimpleTrainer` are too much for research.
The code of this class has been annotated about restrictive assumptions it makes.
When they do not work for you, you're encouraged to:
1. Overwrite methods of this class, OR:
2. Use :class:`SimpleTrainer`, which only does minimal SGD training and
nothing else. You can then add your own hooks if needed. OR:
3. Write your own training loop similar to `tools/plain_train_net.py`.
See the :doc:`/tutorials/training` tutorials for more details.
Note that the behavior of this class, like other functions/classes in
this file, is not stable, since it is meant to represent the "common default behavior".
It is only guaranteed to work well with the standard models and training workflow in detectron2.
To obtain more stable behavior, write your own training logic with other public APIs.
Examples:
::
trainer = MyTrainer(cfg)
trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS
trainer.train()
Attributes:
scheduler:
checkpointer (DetectionCheckpointer):
cfg (CfgNode):
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode):
"""
super().__init__()
logger = logging.getLogger("detectron2")
if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2
setup_logger()
cfg = MyTrainer.auto_scale_workers(cfg, comm.get_world_size())
self.cfg = cfg
# Assume these objects must be constructed in this order.
model = self.build_model(cfg)
optimizer = self.build_optimizer(cfg, model)
data_loader = self.build_train_loader(cfg)
model = create_ddp_model(model, broadcast_buffers=False)
self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(
model, data_loader, optimizer
)
self.scheduler = self.build_lr_scheduler(cfg, optimizer)
self.checkpointer = MyDetectionCheckpointer(
# Assume you want to save checkpoints together with logs/statistics
model,
cfg.OUTPUT_DIR,
trainer=weakref.proxy(self),
)
self.start_iter = 0
self.max_iter = cfg.SOLVER.MAX_ITER
self.cfg = cfg
self.register_hooks(self.build_hooks())
def resume_or_load(self, resume=True):
"""
If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by
a `last_checkpoint` file), resume from the file. Resuming means loading all
available states (eg. optimizer and scheduler) and update iteration counter
from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used.
Otherwise, this is considered as an independent training. The method will load model
weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start
from iteration 0.
Args:
resume (bool): whether to do resume or not
"""
self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume)
if resume and self.checkpointer.has_checkpoint():
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration
self.start_iter = self.iter + 1
def build_hooks(self):
"""
Build a list of default hooks, including timing, evaluation,
checkpointing, lr scheduling, precise BN, writing events.
Returns:
list[HookBase]:
"""
cfg = self.cfg.clone()
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(),
hooks.PreciseBN(
# Run at the same freq as (but before) evaluation.
cfg.TEST.EVAL_PERIOD,
self.model,
# Build a new data loader to not affect training
self.build_train_loader(cfg),
cfg.TEST.PRECISE_BN.NUM_ITER,
)
if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)
else None,
]
# Do PreciseBN before checkpointer, because it updates the model and need to
# be saved by checkpointer.
# This is not always the best: if checkpointing has a different frequency,
# some checkpoints may have more precise statistics than others.
if comm.is_main_process():
ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD))
def test_and_save_results():
self._last_eval_results = self.test(self.cfg, self.model)
return self._last_eval_results
# Do evaluation after checkpointer, because then if it fails,
# we can use the saved checkpoint to debug.
ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
if comm.is_main_process():
# Here the default print/log frequency of each writer is used.
# run writers in the end, so that evaluation metrics are written
ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))
return ret
def build_writers(self):
"""
Build a list of writers to be used using :func:`default_writers()`.
If you'd like a different list of writers, you can overwrite it in
your trainer.
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
"""
return default_writers(self.cfg.OUTPUT_DIR, self.max_iter)
def train(self):
"""
Run training.
Returns:
OrderedDict of results, if evaluation is enabled. Otherwise None.
"""
super().train(self.start_iter, self.max_iter)
if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process():
assert hasattr(
self, "_last_eval_results"
), "No evaluation results obtained during training!"
verify_results(self.cfg, self._last_eval_results)
return self._last_eval_results
def run_step(self):
self._trainer.iter = self.iter
self._trainer.run_step()
@classmethod
def build_model(cls, cfg):
"""
Returns:
torch.nn.Module:
It now calls :func:`detectron2.modeling.build_model`.
Overwrite it if you'd like a different model.
"""
model = build_model(cfg)
logger = logging.getLogger(__name__)
logger.info("Model:\n{}".format(model))
return model
@classmethod
def build_optimizer(cls, cfg, model):
params: List[Dict[str, Any]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
for key, value in model.named_parameters(recurse=True):
if not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if "backbone" in key:
lr = lr * cfg.SOLVER.BACKBONE_MULTIPLIER
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
def maybe_add_full_model_gradient_clipping(optim): # optim: the optimizer class
# detectron2 doesn't have full model gradient clipping now
clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
enable = (
cfg.SOLVER.CLIP_GRADIENTS.ENABLED
and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model"
and clip_norm_val > 0.0
)
class FullModelGradientClippingOptimizer(optim):
def step(self, closure=None):
all_params = itertools.chain(*[x["params"] for x in self.param_groups])
torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
super().step(closure=closure)
return FullModelGradientClippingOptimizer if enable else optim
optimizer_type = cfg.SOLVER.OPTIMIZER
if optimizer_type == "SGD":
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)(
params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM
)
elif optimizer_type == "ADAMW":
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(
params, cfg.SOLVER.BASE_LR
)
else:
raise NotImplementedError(f"no optimizer type {optimizer_type}")
if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model":
optimizer = maybe_add_gradient_clipping(cfg, optimizer)
return optimizer
@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
"""
It now calls :func:`detectron2.solver.build_lr_scheduler`.
Overwrite it if you'd like a different scheduler.
"""
return build_lr_scheduler(cfg, optimizer)
@classmethod
def build_train_loader(cls, cfg):
if cfg.AUG.DETR:
mapper = DetrDatasetMapper(cfg, is_train=True)
else:
mapper = None
return build_detection_train_loader(cfg, mapper=mapper)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
"""
Returns:
iterable
It now calls :func:`detectron2.data.build_detection_test_loader`.
Overwrite it if you'd like a different data loader.
"""
return build_detection_test_loader(cfg, dataset_name)
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return FUNSDEvaluator(dataset_name, output_dir=output_folder)
@classmethod
def test(cls, cfg, model, evaluators=None):
"""
Evaluate the given model. The given model is expected to already contain
weights to evaluate.
Args:
cfg (CfgNode):
model (nn.Module):
evaluators (list[DatasetEvaluator] or None): if None, will call
:meth:`build_evaluator`. Otherwise, must have the same length as
``cfg.DATASETS.TEST``.
Returns:
dict: a dict of result metrics
"""
logger = logging.getLogger(__name__)
if isinstance(evaluators, DatasetEvaluator):
evaluators = [evaluators]
if evaluators is not None:
assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
len(cfg.DATASETS.TEST), len(evaluators)
)
results = OrderedDict()
for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
data_loader = cls.build_test_loader(cfg, dataset_name)
# When evaluators are passed in as arguments,
# implicitly assume that evaluators can be created before data_loader.
if evaluators is not None:
evaluator = evaluators[idx]
else:
try:
evaluator = cls.build_evaluator(cfg, dataset_name)
except NotImplementedError:
logger.warn(
"No evaluator found. Use `MyTrainer.test(evaluators=)`, "
"or implement its `build_evaluator` method."
)
results[dataset_name] = {}
continue
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
assert isinstance(
results_i, dict
), "Evaluator must return a dict on the main process. Got {} instead.".format(
results_i
)
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
@staticmethod
def auto_scale_workers(cfg, num_workers: int):
"""
When the config is defined for certain number of workers (according to
``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of
workers currently in use, returns a new cfg where the total batch size
is scaled so that the per-GPU batch size stays the same as the
original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``.
Other config options are also scaled accordingly:
* training steps and warmup steps are scaled inverse proportionally.
* learning rate are scaled proportionally, following :paper:`ImageNet in 1h`.
For example, with the original config like the following:
.. code-block:: yaml
IMS_PER_BATCH: 16
BASE_LR: 0.1
REFERENCE_WORLD_SIZE: 8
MAX_ITER: 5000
STEPS: (4000,)
CHECKPOINT_PERIOD: 1000
When this config is used on 16 GPUs instead of the reference number 8,
calling this method will return a new config with:
.. code-block:: yaml
IMS_PER_BATCH: 32
BASE_LR: 0.2
REFERENCE_WORLD_SIZE: 16
MAX_ITER: 2500
STEPS: (2000,)
CHECKPOINT_PERIOD: 500
Note that both the original config and this new config can be trained on 16 GPUs.
It's up to user whether to enable this feature (by setting ``REFERENCE_WORLD_SIZE``).
Returns:
CfgNode: a new config. Same as original if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``.
"""
old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE
if old_world_size == 0 or old_world_size == num_workers:
return cfg
cfg = cfg.clone()
frozen = cfg.is_frozen()
cfg.defrost()
assert (
cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0
), "Invalid REFERENCE_WORLD_SIZE in config!"
scale = num_workers / old_world_size
bs = cfg.SOLVER.IMS_PER_BATCH = int(round(cfg.SOLVER.IMS_PER_BATCH * scale))
lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale
max_iter = cfg.SOLVER.MAX_ITER = int(round(cfg.SOLVER.MAX_ITER / scale))
warmup_iter = cfg.SOLVER.WARMUP_ITERS = int(round(cfg.SOLVER.WARMUP_ITERS / scale))
cfg.SOLVER.STEPS = tuple(int(round(s / scale)) for s in cfg.SOLVER.STEPS)
cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale))
cfg.SOLVER.CHECKPOINT_PERIOD = int(round(cfg.SOLVER.CHECKPOINT_PERIOD / scale))
cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant
logger = logging.getLogger(__name__)
logger.info(
f"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, "
f"max_iter={max_iter}, warmup={warmup_iter}."
)
if frozen:
cfg.freeze()
return cfg
# Access basic attributes from the underlying trainer
for _attr in ["model", "data_loader", "optimizer"]:
setattr(
MyTrainer,
_attr,
property(
# getter
lambda self, x=_attr: getattr(self._trainer, x),
# setter
lambda self, value, x=_attr: setattr(self._trainer, x, value),
),
)
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/mytrainer.py |
import importlib
from collections import OrderedDict
import anyconfig
import munch
class Config(object):
def __init__(self):
pass
def load(self, conf):
conf = anyconfig.load(conf)
return munch.munchify(conf)
def compile(self, conf, return_packages=False):
packages = conf.get('package', [])
defines = {}
for path in conf.get('import', []):
parent_conf = self.load(path)
parent_packages, parent_defines = self.compile(
parent_conf, return_packages=True)
packages.extend(parent_packages)
defines.update(parent_defines)
modules = []
for package in packages:
module = importlib.import_module(package)
modules.append(module)
if isinstance(conf['define'], dict):
conf['define'] = [conf['define']]
for define in conf['define']:
name = define.copy().pop('name')
if not isinstance(name, str):
raise RuntimeError('name must be str')
defines[name] = self.compile_conf(define, defines, modules)
if return_packages:
return packages, defines
else:
return defines
def compile_conf(self, conf, defines, modules):
if isinstance(conf, (int, float)):
return conf
elif isinstance(conf, str):
if conf.startswith('^'):
return defines[conf[1:]]
if conf.startswith('$'):
return {'class': self.find_class_in_modules(conf[1:], modules)}
return conf
elif isinstance(conf, dict):
if 'class' in conf:
conf['class'] = self.find_class_in_modules(
conf['class'], modules)
if 'base' in conf:
base = conf.copy().pop('base')
if not isinstance(base, str):
raise RuntimeError('base must be str')
conf = {
**defines[base],
**conf,
}
return {key: self.compile_conf(value, defines, modules) for key, value in conf.items()}
elif isinstance(conf, (list, tuple)):
return [self.compile_conf(value, defines, modules) for value in conf]
else:
return conf
def find_class_in_modules(self, cls, modules):
if not isinstance(cls, str):
raise RuntimeError('class name must be str')
if cls.find('.') != -1:
package, cls = cls.rsplit('.', 1)
module = importlib.import_module(package)
if hasattr(module, cls):
return module.__name__ + '.' + cls
for module in modules:
if hasattr(module, cls):
return module.__name__ + '.' + cls
raise RuntimeError('class not found ' + cls)
class State:
def __init__(self, autoload=True, default=None):
self.autoload = autoload
self.default = default
class StateMeta(type):
def __new__(mcs, name, bases, attrs):
current_states = []
for key, value in attrs.items():
if isinstance(value, State):
current_states.append((key, value))
current_states.sort(key=lambda x: x[0])
attrs['states'] = OrderedDict(current_states)
new_class = super(StateMeta, mcs).__new__(mcs, name, bases, attrs)
# Walk through the MRO
states = OrderedDict()
for base in reversed(new_class.__mro__):
if hasattr(base, 'states'):
states.update(base.states)
new_class.states = states
for key, value in states.items():
setattr(new_class, key, value.default)
return new_class
class Configurable(metaclass=StateMeta):
def __init__(self, *args, cmd={}, **kwargs):
self.load_all(cmd=cmd, **kwargs)
@staticmethod
def construct_class_from_config(args):
cls = Configurable.extract_class_from_args(args)
return cls(**args)
@staticmethod
def extract_class_from_args(args):
cls = args.copy().pop('class')
package, cls = cls.rsplit('.', 1)
module = importlib.import_module(package)
cls = getattr(module, cls)
return cls
def load_all(self, **kwargs):
for name, state in self.states.items():
if state.autoload:
self.load(name, **kwargs)
def load(self, state_name, **kwargs):
# FIXME: kwargs should be filtered
# Args passed from command line
cmd = kwargs.pop('cmd', dict())
if state_name in kwargs:
setattr(self, state_name, self.create_member_from_config(
(kwargs[state_name], cmd)))
else:
setattr(self, state_name, self.states[state_name].default)
def create_member_from_config(self, conf):
args, cmd = conf
if args is None or isinstance(args, (int, float, str)):
return args
elif isinstance(args, (list, tuple)):
return [self.create_member_from_config((subargs, cmd)) for subargs in args]
elif isinstance(args, dict):
if 'class' in args:
cls = self.extract_class_from_args(args)
return cls(**args, cmd=cmd)
return {key: self.create_member_from_config((subargs, cmd)) for key, subargs in args.items()}
else:
return args
def dump(self):
state = {}
state['class'] = self.__class__.__module__ + \
'.' + self.__class__.__name__
for name, value in self.states.items():
obj = getattr(self, name)
state[name] = self.dump_obj(obj)
return state
def dump_obj(self, obj):
if obj is None:
return None
elif hasattr(obj, 'dump'):
return obj.dump()
elif isinstance(obj, (int, float, str)):
return obj
elif isinstance(obj, (list, tuple)):
return [self.dump_obj(value) for value in obj]
elif isinstance(obj, dict):
return {key: self.dump_obj(value) for key, value in obj.items()}
else:
return str(obj)
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/concern/config.py |
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
return self
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/concern/average_meter.py |
import os
import logging
import functools
import json
import time
from datetime import datetime
# from tensorboardX import SummaryWriter
import yaml
import cv2
import numpy as np
from .config import Configurable, State
class Logger(Configurable):
SUMMARY_DIR_NAME = 'summaries'
VISUALIZE_NAME = 'visualize'
LOG_FILE_NAME = 'output.log'
ARGS_FILE_NAME = 'args.log'
METRICS_FILE_NAME = 'metrics.log'
database_dir = State(default='./outputs/')
log_dir = State(default='workspace')
verbose = State(default=False)
level = State(default='info')
log_interval = State(default=100)
def __init__(self, **kwargs):
self.load_all(**kwargs)
self._make_storage()
cmd = kwargs['cmd']
self.name = cmd['name']
self.log_dir = os.path.join(self.log_dir, self.name)
try:
self.verbose = cmd['verbose']
except:
print('verbose:', self.verbose)
if self.verbose:
print('Initializing log dir for', self.log_dir)
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.message_logger = self._init_message_logger()
summary_path = os.path.join(self.log_dir, self.SUMMARY_DIR_NAME)
self.tf_board_logger = SummaryWriter(summary_path)
self.metrics_writer = open(os.path.join(
self.log_dir, self.METRICS_FILE_NAME), 'at')
self.timestamp = time.time()
self.logged = -1
self.speed = None
self.eta_time = None
def _make_storage(self):
application = os.path.basename(os.getcwd())
storage_dir = os.path.join(
self.database_dir, self.log_dir, application)
if not os.path.exists(storage_dir):
os.makedirs(storage_dir)
if not os.path.exists(self.log_dir):
os.symlink(storage_dir, self.log_dir)
def save_dir(self, dir_name):
return os.path.join(self.log_dir, dir_name)
def _init_message_logger(self):
message_logger = logging.getLogger('messages')
message_logger.setLevel(
logging.DEBUG if self.verbose else logging.INFO)
formatter = logging.Formatter(
'[%(levelname)s] [%(asctime)s] %(message)s')
std_handler = logging.StreamHandler()
std_handler.setLevel(message_logger.level)
std_handler.setFormatter(formatter)
file_handler = logging.FileHandler(
os.path.join(self.log_dir, self.LOG_FILE_NAME))
file_handler.setLevel(message_logger.level)
file_handler.setFormatter(formatter)
message_logger.addHandler(std_handler)
message_logger.addHandler(file_handler)
return message_logger
def report_time(self, name: str):
if self.verbose:
self.info(name + " time :" + str(time.time() - self.timestamp))
self.timestamp = time.time()
def report_eta(self, steps, total, epoch):
self.logged = self.logged % total + 1
steps = steps % total
if self.eta_time is None:
self.eta_time = time.time()
speed = -1
else:
eta_time = time.time()
speed = eta_time - self.eta_time
if self.speed is not None:
speed = ((self.logged - 1) * self.speed + speed) / self.logged
self.speed = speed
self.eta_time = eta_time
seconds = (total - steps) * speed
hours = seconds // 3600
minutes = (seconds - (hours * 3600)) // 60
seconds = seconds % 60
print('%d/%d batches processed in epoch %d, ETA: %2d:%2d:%2d' %
(steps, total, epoch,
hours, minutes, seconds), end='\r')
def args(self, parameters=None):
if parameters is None:
with open(os.path.join(self.log_dir, self.ARGS_FILE_NAME), 'rt') as reader:
return yaml.load(reader.read())
with open(os.path.join(self.log_dir, self.ARGS_FILE_NAME), 'wt') as writer:
yaml.dump(parameters.dump(), writer)
def metrics(self, epoch, steps, metrics_dict):
results = {}
for name, a in metrics_dict.items():
results[name] = {'count': a.count, 'value': float(a.avg)}
self.add_scalar('metrics/' + name, a.avg, steps)
result_dict = {
str(datetime.now()): {
'epoch': epoch,
'steps': steps,
**results
}
}
string_result = yaml.dump(result_dict)
self.info(string_result)
self.metrics_writer.write(string_result)
self.metrics_writer.flush()
def named_number(self, name, num=None, default=0):
if num is None:
return int(self.has_signal(name)) or default
else:
with open(os.path.join(self.log_dir, name), 'w') as writer:
writer.write(str(num))
return num
epoch = functools.partialmethod(named_number, 'epoch')
iter = functools.partialmethod(named_number, 'iter')
def message(self, level, content):
self.message_logger.__getattribute__(level)(content)
def images(self, prefix, image_dict, step):
for name, image in image_dict.items():
self.add_image(prefix + '/' + name, image, step, dataformats='HWC')
def merge_save_images(self, name, images):
for i, image in enumerate(images):
if i == 0:
result = image
else:
result = np.concatenate([result, image], 0)
cv2.imwrite(os.path.join(self.vis_dir(), name+'.jpg'), result)
def vis_dir(self):
vis_dir = os.path.join(self.log_dir, self.VISUALIZE_NAME)
if not os.path.exists(vis_dir):
os.mkdir(vis_dir)
return vis_dir
def save_image_dict(self, images, max_size=1024):
for file_name, image in images.items():
height, width = image.shape[:2]
if height > width:
actual_height = min(height, max_size)
actual_width = int(round(actual_height * width / height))
else:
actual_width = min(width, max_size)
actual_height = int(round(actual_width * height / width))
image = cv2.resize(image, (actual_width, actual_height))
cv2.imwrite(os.path.join(self.vis_dir(), file_name+'.jpg'), image)
def __getattr__(self, name):
message_levels = set(['debug', 'info', 'warning', 'error', 'critical'])
if name == '__setstate__':
raise AttributeError('haha')
if name in message_levels:
return functools.partial(self.message, name)
elif hasattr(self.__dict__.get('tf_board_logger'), name):
return self.tf_board_logger.__getattribute__(name)
else:
super()
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/concern/log.py |
from PIL import Image
import cv2
import base64
import io
import numpy as np
def convert(data):
if isinstance(data, dict):
ndata = {}
for key, value in data.items():
nkey = key.decode()
if nkey == 'img':
img = Image.open(io.BytesIO(value))
img = img.convert('RGB')
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
nvalue = img
else:
nvalue = convert(value)
ndata[nkey] = nvalue
return ndata
elif isinstance(data, list):
return [convert(item) for item in data]
elif isinstance(data, bytes):
return data.decode()
else:
return data
def to_np(x):
return x.cpu().data.numpy()
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/concern/convert.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : __init__.py
# Author : Zhaoyi Wan <[email protected]>
# Date : 21.11.2018
# Last Modified Date: 08.01.2019
# Last Modified By : Zhaoyi Wan <[email protected]>
from .log import Logger
from .average_meter import AverageMeter
from .visualizer import Visualize
from .box2seg import resize_with_coordinates, box2seg
from .convert import convert
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/concern/__init__.py |
import os
class SignalMonitor(object):
def __init__(self, file_path):
self.file_path = file_path
def get_signal(self):
if self.file_path is None:
return None
if os.path.exists(self.file_path):
with open(self.file_path) as f:
data = self.file.read()
os.remove(f)
return data
else:
return None
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/concern/signal_monitor.py |
import cv2
import numpy as np
from scipy import interpolate
def intersection(x, p1, p2):
x1, y1 = p1
x2, y2 = p2
if x2 == x1:
return 0
k = (x - x1) / (x2 - x1)
return k * (y2 - y1) + y1
def midpoint(p1, p2, typed=float):
return [typed((p1[0] + p2[0]) / 2), typed((p1[1] + p2[1]) / 2)]
def resize_with_coordinates(image, width, height, coordinates):
original_height, original_width = image.shape[:2]
resized_image = cv2.resize(image, (width, height))
if coordinates is not None:
assert coordinates.ndim == 2
assert coordinates.shape[-1] == 2
rate_x = width / original_width
rate_y = height / original_height
coordinates = coordinates * (rate_x, rate_y)
return resized_image, coordinates
def box2seg(image, boxes, label):
height, width = image.shape[:2]
mask = np.zeros((height, width), dtype=np.float32)
seg = np.zeros((height, width), dtype=np.float32)
points = []
for box_index in range(boxes.shape[0]):
box = boxes[box_index, :, :] # 4x2
left_top = box[0]
right_top = box[1]
right_bottom = box[2]
left_bottom = box[3]
left = [(left_top[0] + left_bottom[0]) / 2, (left_top[1] + left_bottom[1]) / 2]
right = [(right_top[0] + right_bottom[0]) / 2, (right_top[1] + right_bottom[1]) / 2]
center = midpoint(left, right)
points.append(midpoint(left, center))
points.append(midpoint(right, center))
poly = np.array([midpoint(left_top, center),
midpoint(right_top, center),
midpoint(right_bottom, center),
midpoint(left_bottom, center)
])
seg = cv2.fillPoly(seg, [poly.reshape(4, 1, 2).astype(np.int32)], int(label[box_index]))
left_y = intersection(0, points[0], points[1])
right_y = intersection(width, points[-1], points[-2])
points.insert(0, [0, left_y])
points.append([width, right_y])
points = np.array(points)
f = interpolate.interp1d(points[:, 0], points[:, 1], fill_value='extrapolate')
xnew = np.arange(0, width, 1)
ynew = f(xnew).clip(0, height-1)
for x in range(width - 1):
mask[int(ynew[x]), x] = 1
return ynew.reshape(1, -1).round(), seg
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/concern/box2seg.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : visualizer.py
# Author : Zhaoyi Wan <[email protected]>
# Date : 08.01.2019
# Last Modified Date: 02.12.2019
# Last Modified By : Minghui Liao
import torch
import numpy as np
import cv2
class Visualize:
@classmethod
def visualize(cls, x):
dimension = len(x.shape)
if dimension == 2:
pass
elif dimension == 3:
pass
@classmethod
def to_np(cls, x):
return x.cpu().data.numpy()
@classmethod
def visualize_weights(cls, tensor, format='HW', normalize=True):
if isinstance(tensor, torch.Tensor):
x = cls.to_np(tensor.permute(format.index('H'), format.index('W')))
else:
x = tensor.transpose(format.index('H'), format.index('W'))
if normalize:
x = (x - x.min()) / (x.max() - x.min())
# return np.tile(x * 255., (3, 1, 1)).swapaxes(0, 2).swapaxes(1, 0).astype(np.uint8)
return cv2.applyColorMap((x * 255).astype(np.uint8), cv2.COLORMAP_JET)
@classmethod
def visualize_points(cls, image, tensor, radius=5, normalized=True):
if isinstance(tensor, torch.Tensor):
points = cls.to_np(tensor)
else:
points = tensor
if normalized:
points = points * image.shape[:2][::-1]
for i in range(points.shape[0]):
color = np.random.randint(
0, 255, (3, ), dtype=np.uint8).astype(np.float)
image = cv2.circle(image,
tuple(points[i].astype(np.int32).tolist()),
radius, color, thickness=radius//2)
return image
@classmethod
def visualize_heatmap(cls, tensor, format='CHW'):
if isinstance(tensor, torch.Tensor):
x = cls.to_np(tensor.permute(format.index('H'),
format.index('W'), format.index('C')))
else:
x = tensor.transpose(
format.index('H'), format.index('W'), format.index('C'))
canvas = np.zeros((x.shape[0], x.shape[1], 3), dtype=np.float)
for c in range(0, x.shape[-1]):
color = np.random.randint(
0, 255, (3, ), dtype=np.uint8).astype(np.float)
canvas += np.tile(x[:, :, c], (3, 1, 1)
).swapaxes(0, 2).swapaxes(1, 0) * color
canvas = canvas.astype(np.uint8)
return canvas
@classmethod
def visualize_classes(cls, x):
canvas = np.zeros((x.shape[0], x.shape[1], 3), dtype=np.uint8)
for c in range(int(x.max())):
color = np.random.randint(
0, 255, (3, ), dtype=np.uint8).astype(np.float)
canvas[np.where(x == c)] = color
return canvas
@classmethod
def visualize_grid(cls, x, y, stride=16, color=(0, 0, 255), canvas=None):
h, w = x.shape
if canvas is None:
canvas = np.zeros((h, w, 3), dtype=np.uint8)
# canvas = np.concatenate([canvas, canvas], axis=1)
i, j = 0, 0
while i < w:
j = 0
while j < h:
canvas = cv2.circle(canvas, (int(x[i, j] * w + 0.5), int(y[i, j] * h + 0.5)), radius=max(stride//4, 1), color=color, thickness=stride//8)
j += stride
i += stride
return canvas
@classmethod
def visualize_rect(cls, canvas, _rect, color=(0, 0, 255)):
rect = (_rect + 0.5).astype(np.int32)
return cv2.rectangle(canvas, (rect[0], rect[1]), (rect[2], rect[3]), color)
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/concern/visualizer.py |
#!/usr/bin/env mdl
import os
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
import time
import json
import select
import traceback
import socket
from multiprocessing import Process, Pipe
import gevent
from gevent.pywsgi import WSGIServer
from geventwebsocket.handler import WebSocketHandler
from flask import Flask, request, render_template, abort
def log_important_msg(msg, *, padding=3):
msg_len = len(msg)
width = msg_len + padding * 2 + 2
print('#' * width)
print('#' + ' ' * (width - 2) + '#')
print('#' + ' ' * padding + msg + ' ' * padding + '#')
print('#' + ' ' * (width - 2) + '#')
print('#' * width)
def hint_url(url, port):
log_important_msg(
'The server is running at: {}'.format(url))
def _set_server(conn, name='webcv2', port=7788):
package = None
package_alive = False
app = Flask(name)
app.root_path = BASE_DIR
@app.route('/')
def index():
return render_template('index.html', title=name)
@app.route('/stream')
def stream():
def poll_ws(ws, delay):
return len(select.select([ws.stream.handler.rfile], [], [], delay / 1000.)[0]) > 0
if request.environ.get('wsgi.websocket'):
ws = request.environ['wsgi.websocket']
if ws is None:
abort(404)
else:
should_send = True
while not ws.closed:
global package
global package_alive
if conn.poll():
package = conn.recv()
package_alive = True
should_send = True
if not should_send:
continue
should_send = False
if package is None:
ws.send(None)
else:
delay, info_lst = package
ws.send(json.dumps((time.time(), package_alive, delay, info_lst)))
if package_alive:
if delay <= 0 or poll_ws(ws, delay):
message = ws.receive()
if ws.closed or message is None:
break
try:
if isinstance(message, bytes):
message = message.decode('utf8')
message = int(message)
except:
traceback.print_exc()
message = -1
else:
message = -1
conn.send(message)
package_alive = False
return ""
http_server = WSGIServer(('', port), app, handler_class=WebSocketHandler)
hint_url('http://{}:{}'.format(socket.getfqdn(), port), port)
http_server.serve_forever()
def get_server(name='webcv2', port=7788):
conn_server, conn_factory = Pipe()
p_server = Process(
target=_set_server,
args=(conn_server,),
kwargs=dict(
name=name, port=port,
),
)
p_server.daemon = True
p_server.start()
return p_server, conn_factory
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/concern/webcv2/server.py |
#!/usr/bin/env mdl
class WebCV2:
def __init__(self):
import cv2
self._cv2 = cv2
from .manager import global_manager as gm
self._gm = gm
def __getattr__(self, name):
if hasattr(self._gm, name):
return getattr(self._gm, name)
elif hasattr(self._cv2, name):
return getattr(self._cv2, name)
else:
raise AttributeError
import sys
sys.modules[__name__] = WebCV2()
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/concern/webcv2/__init__.py |
#!/usr/bin/env mdl
import socket
import base64
import cv2
import numpy as np
from collections import OrderedDict
from .server import get_server
def jpeg_encode(img):
return cv2.imencode('.png', img)[1]
def get_free_port(rng, low=2000, high=10000):
in_use = True
while in_use:
port = rng.randint(high - low) + low
in_use = False
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("0.0.0.0", port))
except socket.error as e:
if e.errno == 98: # port already in use
in_use = True
s.close()
return port
class Manager:
def __init__(self, img_encode_method=jpeg_encode, rng=None):
self._queue = OrderedDict()
self._server = None
self.img_encode_method = img_encode_method
if rng is None:
rng = np.random.RandomState(self.get_default_seed())
self.rng = rng
def get_default_seed(self):
return 0
def imshow(self, title, img):
data = self.img_encode_method(img)
data = base64.b64encode(data)
data = data.decode('utf8')
self._queue[title] = data
def waitKey(self, delay=0):
if self._server is None:
self.port = get_free_port(self.rng)
self._server, self._conn = get_server(port=self.port)
self._conn.send([delay, list(self._queue.items())])
# self._queue = OrderedDict()
return self._conn.recv()
global_manager = Manager()
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/concern/webcv2/manager.py |
EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/concern/icdar2015_eval/__init__.py |
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
from collections import namedtuple
import numpy as np
from shapely.geometry import Polygon
class DetectionMTWI2018Evaluator(object):
def __init__(
self,
area_recall_constraint=0.7, area_precision_constraint=0.7,
ev_param_ind_center_diff_thr=1,
):
self.area_recall_constraint = area_recall_constraint
self.area_precision_constraint = area_precision_constraint
self.ev_param_ind_center_diff_thr = ev_param_ind_center_diff_thr
def evaluate_image(self, gt, pred):
def get_union(pD,pG):
return Polygon(pD).union(Polygon(pG)).area
def get_intersection_over_union(pD,pG):
return get_intersection(pD, pG) / get_union(pD, pG)
def get_intersection(pD,pG):
return Polygon(pD).intersection(Polygon(pG)).area
def one_to_one_match(row, col):
cont = 0
for j in range(len(recallMat[0])):
if recallMat[row,j] >= self.area_recall_constraint and precisionMat[row,j] >= self.area_precision_constraint:
cont = cont +1
if (cont != 1):
return False
cont = 0
for i in range(len(recallMat)):
if recallMat[i,col] >= self.area_recall_constraint and precisionMat[i,col] >= self.area_precision_constraint:
cont = cont +1
if (cont != 1):
return False
if recallMat[row,col] >= self.area_recall_constraint and precisionMat[row,col] >= self.area_precision_constraint:
return True
return False
def one_to_many_match(gtNum):
many_sum = 0
detRects = []
for detNum in range(len(recallMat[0])):
if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and detNum not in detDontCareRectsNum:
if precisionMat[gtNum,detNum] >= self.area_precision_constraint:
many_sum += recallMat[gtNum,detNum]
detRects.append(detNum)
if round(many_sum,4) >= self.area_recall_constraint:
return True,detRects
else:
return False,[]
def many_to_one_match(detNum):
many_sum = 0
gtRects = []
for gtNum in range(len(recallMat)):
if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCareRectsNum:
if recallMat[gtNum,detNum] >= self.area_recall_constraint:
many_sum += precisionMat[gtNum,detNum]
gtRects.append(gtNum)
if round(many_sum,4) >= self.area_precision_constraint:
return True,gtRects
else:
return False,[]
def center_distance(r1, r2):
return ((np.mean(r1, axis=0) - np.mean(r2, axis=0)) ** 2).sum() ** 0.5
def diag(r):
r = np.array(r)
return ((r[:, 0].max() - r[:, 0].min()) ** 2 + (r[:, 1].max() - r[:, 1].min()) ** 2) ** 0.5
perSampleMetrics = {}
recall = 0
precision = 0
hmean = 0
recallAccum = 0.
precisionAccum = 0.
gtRects = []
detRects = []
gtPolPoints = []
detPolPoints = []
gtDontCareRectsNum = []#Array of Ground Truth Rectangles' keys marked as don't Care
detDontCareRectsNum = []#Array of Detected Rectangles' matched with a don't Care GT
pairs = []
evaluationLog = ""
recallMat = np.empty([1,1])
precisionMat = np.empty([1,1])
for n in range(len(gt)):
points = gt[n]['points']
# transcription = gt[n]['text']
dontCare = gt[n]['ignore']
if not Polygon(points).is_valid or not Polygon(points).is_simple:
continue
gtRects.append(points)
gtPolPoints.append(points)
if dontCare:
gtDontCareRectsNum.append( len(gtRects)-1 )
evaluationLog += "GT rectangles: " + str(len(gtRects)) + (" (" + str(len(gtDontCareRectsNum)) + " don't care)\n" if len(gtDontCareRectsNum)>0 else "\n")
for n in range(len(pred)):
points = pred[n]['points']
if not Polygon(points).is_valid or not Polygon(points).is_simple:
continue
detRect = points
detRects.append(detRect)
detPolPoints.append(points)
if len(gtDontCareRectsNum)>0 :
for dontCareRectNum in gtDontCareRectsNum:
dontCareRect = gtRects[dontCareRectNum]
intersected_area = get_intersection(dontCareRect,detRect)
rdDimensions = Polygon(detRect).area
if (rdDimensions==0) :
precision = 0
else:
precision= intersected_area / rdDimensions
if (precision > 0.5):
detDontCareRectsNum.append( len(detRects)-1 )
break
evaluationLog += "DET rectangles: " + str(len(detRects)) + (" (" + str(len(detDontCareRectsNum)) + " don't care)\n" if len(detDontCareRectsNum)>0 else "\n")
if len(gtRects)==0:
recall = 1
precision = 0 if len(detRects)>0 else 1
if len(detRects)>0:
#Calculate recall and precision matrixs
outputShape=[len(gtRects),len(detRects)]
recallMat = np.empty(outputShape)
precisionMat = np.empty(outputShape)
gtRectMat = np.zeros(len(gtRects),np.int8)
detRectMat = np.zeros(len(detRects),np.int8)
for gtNum in range(len(gtRects)):
for detNum in range(len(detRects)):
rG = gtRects[gtNum]
rD = detRects[detNum]
intersected_area = get_intersection(rG,rD)
rgDimensions = Polygon(rG).area
rdDimensions = Polygon(rD).area
recallMat[gtNum,detNum] = 0 if rgDimensions==0 else intersected_area / rgDimensions
precisionMat[gtNum,detNum] = 0 if rdDimensions==0 else intersected_area / rdDimensions
# Find one-to-one matches
evaluationLog += "Find one-to-one matches\n"
for gtNum in range(len(gtRects)):
for detNum in range(len(detRects)):
if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCareRectsNum and detNum not in detDontCareRectsNum :
match = one_to_one_match(gtNum, detNum)
if match is True :
#in deteval we have to make other validation before mark as one-to-one
rG = gtRects[gtNum]
rD = detRects[detNum]
normDist = center_distance(rG, rD);
normDist /= diag(rG) + diag(rD);
normDist *= 2.0;
if normDist < self.ev_param_ind_center_diff_thr:
gtRectMat[gtNum] = 1
detRectMat[detNum] = 1
recallAccum += 1.0
precisionAccum += 1.0
pairs.append({'gt':gtNum,'det':detNum,'type':'OO'})
evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(detNum) + "\n"
else:
evaluationLog += "Match Discarded GT #" + str(gtNum) + " with Det #" + str(detNum) + " normDist: " + str(normDist) + " \n"
# Find one-to-many matches
evaluationLog += "Find one-to-many matches\n"
for gtNum in range(len(gtRects)):
if gtNum not in gtDontCareRectsNum:
match,matchesDet = one_to_many_match(gtNum)
if match is True :
gtRectMat[gtNum] = 1
recallAccum += 1.0
precisionAccum += len(matchesDet) / (1 + math.log(len(matchesDet)))
pairs.append({'gt':gtNum,'det':matchesDet,'type': 'OO' if len(matchesDet)==1 else 'OM'})
for detNum in matchesDet :
detRectMat[detNum] = 1
evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(matchesDet) + "\n"
# Find many-to-one matches
evaluationLog += "Find many-to-one matches\n"
for detNum in range(len(detRects)):
if detNum not in detDontCareRectsNum:
match,matchesGt = many_to_one_match(detNum)
if match is True :
detRectMat[detNum] = 1
recallAccum += len(matchesGt) / (1 + math.log(len(matchesGt)))
precisionAccum += 1.0
pairs.append({'gt':matchesGt,'det':detNum,'type': 'OO' if len(matchesGt)==1 else 'MO'})
for gtNum in matchesGt :
gtRectMat[gtNum] = 1
evaluationLog += "Match GT #" + str(matchesGt) + " with Det #" + str(detNum) + "\n"
numGtCare = (len(gtRects) - len(gtDontCareRectsNum))
if numGtCare == 0:
recall = float(1)
precision = float(0) if len(detRects)>0 else float(1)
else:
recall = float(recallAccum) / numGtCare
precision = float(0) if (len(detRects) - len(detDontCareRectsNum))==0 else float(precisionAccum) / (len(detRects) - len(detDontCareRectsNum))
hmean = 0 if (precision + recall)==0 else 2.0 * precision * recall / (precision + recall)
numGtCare = len(gtRects) - len(gtDontCareRectsNum)
numDetCare = len(detRects) - len(detDontCareRectsNum)
perSampleMetrics = {
'precision':precision,
'recall':recall,
'hmean':hmean,
'pairs':pairs,
'recallMat':[] if len(detRects)>100 else recallMat.tolist(),
'precisionMat':[] if len(detRects)>100 else precisionMat.tolist(),
'gtPolPoints':gtPolPoints,
'detPolPoints':detPolPoints,
'gtCare': numGtCare,
'detCare': numDetCare,
'gtDontCare':gtDontCareRectsNum,
'detDontCare':detDontCareRectsNum,
'recallAccum':recallAccum,
'precisionAccum':precisionAccum,
'evaluationLog': evaluationLog
}
return perSampleMetrics
def combine_results(self, results):
numGt = 0
numDet = 0
methodRecallSum = 0
methodPrecisionSum = 0
for result in results:
numGt += result['gtCare']
numDet += result['detCare']
methodRecallSum += result['recallAccum']
methodPrecisionSum += result['precisionAccum']
methodRecall = 0 if numGt==0 else methodRecallSum/numGt
methodPrecision = 0 if numDet==0 else methodPrecisionSum/numDet
methodHmean = 0 if methodRecall + methodPrecision==0 else 2* methodRecall * methodPrecision / (methodRecall + methodPrecision)
methodMetrics = {'precision':methodPrecision, 'recall':methodRecall,'hmean': methodHmean }
return methodMetrics
if __name__=='__main__':
evaluator = DetectionICDAR2013Evaluator()
gts = [[{
'points': [(0, 0), (1, 0), (1, 1), (0, 1)],
'text': 1234,
'ignore': False,
}, {
'points': [(2, 2), (3, 2), (3, 3), (2, 3)],
'text': 5678,
'ignore': True,
}]]
preds = [[{
'points': [(0.1, 0.1), (1, 0), (1, 1), (0, 1)],
'text': 123,
'ignore': False,
}]]
results = []
for gt, pred in zip(gts, preds):
results.append(evaluator.evaluate_image(gt, pred))
metrics = evaluator.combine_results(results)
print(metrics)
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/concern/icdar2015_eval/detection/mtwi2018.py |
EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/concern/icdar2015_eval/detection/__init__.py |
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
from collections import namedtuple
import numpy as np
from shapely.geometry import Polygon
class DetectionICDAR2013Evaluator(object):
def __init__(
self,
area_recall_constraint=0.8, area_precision_constraint=0.4,
ev_param_ind_center_diff_thr=1,
mtype_oo_o=1.0, mtype_om_o=0.8, mtype_om_m=1.0
):
self.area_recall_constraint = area_recall_constraint
self.area_precision_constraint = area_precision_constraint
self.ev_param_ind_center_diff_thr = ev_param_ind_center_diff_thr
self.mtype_oo_o = mtype_oo_o
self.mtype_om_o = mtype_om_o
self.mtype_om_m = mtype_om_m
def evaluate_image(self, gt, pred):
def get_union(pD,pG):
return Polygon(pD).union(Polygon(pG)).area
def get_intersection_over_union(pD,pG):
return get_intersection(pD, pG) / get_union(pD, pG)
def get_intersection(pD,pG):
return Polygon(pD).intersection(Polygon(pG)).area
def one_to_one_match(row, col):
cont = 0
for j in range(len(recallMat[0])):
if recallMat[row,j] >= self.area_recall_constraint and precisionMat[row,j] >= self.area_precision_constraint:
cont = cont +1
if (cont != 1):
return False
cont = 0
for i in range(len(recallMat)):
if recallMat[i,col] >= self.area_recall_constraint and precisionMat[i,col] >= self.area_precision_constraint:
cont = cont +1
if (cont != 1):
return False
if recallMat[row,col] >= self.area_recall_constraint and precisionMat[row,col] >= self.area_precision_constraint:
return True
return False
def one_to_many_match(gtNum):
many_sum = 0
detRects = []
for detNum in range(len(recallMat[0])):
if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and detNum not in detDontCareRectsNum:
if precisionMat[gtNum,detNum] >= self.area_precision_constraint:
many_sum += recallMat[gtNum,detNum]
detRects.append(detNum)
if round(many_sum,4) >= self.area_recall_constraint:
return True,detRects
else:
return False,[]
def many_to_one_match(detNum):
many_sum = 0
gtRects = []
for gtNum in range(len(recallMat)):
if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCareRectsNum:
if recallMat[gtNum,detNum] >= self.area_recall_constraint:
many_sum += precisionMat[gtNum,detNum]
gtRects.append(gtNum)
if round(many_sum,4) >= self.area_precision_constraint:
return True,gtRects
else:
return False,[]
def center_distance(r1, r2):
return ((np.mean(r1, axis=0) - np.mean(r2, axis=0)) ** 2).sum() ** 0.5
def diag(r):
r = np.array(r)
return ((r[:, 0].max() - r[:, 0].min()) ** 2 + (r[:, 1].max() - r[:, 1].min()) ** 2) ** 0.5
perSampleMetrics = {}
recall = 0
precision = 0
hmean = 0
recallAccum = 0.
precisionAccum = 0.
gtRects = []
detRects = []
gtPolPoints = []
detPolPoints = []
gtDontCareRectsNum = []#Array of Ground Truth Rectangles' keys marked as don't Care
detDontCareRectsNum = []#Array of Detected Rectangles' matched with a don't Care GT
pairs = []
evaluationLog = ""
recallMat = np.empty([1,1])
precisionMat = np.empty([1,1])
for n in range(len(gt)):
points = gt[n]['points']
# transcription = gt[n]['text']
dontCare = gt[n]['ignore']
if not Polygon(points).is_valid or not Polygon(points).is_simple:
continue
gtRects.append(points)
gtPolPoints.append(points)
if dontCare:
gtDontCareRectsNum.append( len(gtRects)-1 )
evaluationLog += "GT rectangles: " + str(len(gtRects)) + (" (" + str(len(gtDontCareRectsNum)) + " don't care)\n" if len(gtDontCareRectsNum)>0 else "\n")
for n in range(len(pred)):
points = pred[n]['points']
if not Polygon(points).is_valid or not Polygon(points).is_simple:
continue
detRect = points
detRects.append(detRect)
detPolPoints.append(points)
if len(gtDontCareRectsNum)>0 :
for dontCareRectNum in gtDontCareRectsNum:
dontCareRect = gtRects[dontCareRectNum]
intersected_area = get_intersection(dontCareRect,detRect)
rdDimensions = Polygon(detRect).area
if (rdDimensions==0) :
precision = 0
else:
precision= intersected_area / rdDimensions
if (precision > self.area_precision_constraint):
detDontCareRectsNum.append( len(detRects)-1 )
break
evaluationLog += "DET rectangles: " + str(len(detRects)) + (" (" + str(len(detDontCareRectsNum)) + " don't care)\n" if len(detDontCareRectsNum)>0 else "\n")
if len(gtRects)==0:
recall = 1
precision = 0 if len(detRects)>0 else 1
if len(detRects)>0:
#Calculate recall and precision matrixs
outputShape=[len(gtRects),len(detRects)]
recallMat = np.empty(outputShape)
precisionMat = np.empty(outputShape)
gtRectMat = np.zeros(len(gtRects),np.int8)
detRectMat = np.zeros(len(detRects),np.int8)
for gtNum in range(len(gtRects)):
for detNum in range(len(detRects)):
rG = gtRects[gtNum]
rD = detRects[detNum]
intersected_area = get_intersection(rG,rD)
rgDimensions = Polygon(rG).area
rdDimensions = Polygon(rD).area
recallMat[gtNum,detNum] = 0 if rgDimensions==0 else intersected_area / rgDimensions
precisionMat[gtNum,detNum] = 0 if rdDimensions==0 else intersected_area / rdDimensions
# Find one-to-one matches
evaluationLog += "Find one-to-one matches\n"
for gtNum in range(len(gtRects)):
for detNum in range(len(detRects)):
if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCareRectsNum and detNum not in detDontCareRectsNum :
match = one_to_one_match(gtNum, detNum)
if match is True :
#in deteval we have to make other validation before mark as one-to-one
rG = gtRects[gtNum]
rD = detRects[detNum]
normDist = center_distance(rG, rD);
normDist /= diag(rG) + diag(rD);
normDist *= 2.0;
if normDist < self.ev_param_ind_center_diff_thr:
gtRectMat[gtNum] = 1
detRectMat[detNum] = 1
recallAccum += self.mtype_oo_o
precisionAccum += self.mtype_oo_o
pairs.append({'gt':gtNum,'det':detNum,'type':'OO'})
evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(detNum) + "\n"
else:
evaluationLog += "Match Discarded GT #" + str(gtNum) + " with Det #" + str(detNum) + " normDist: " + str(normDist) + " \n"
# Find one-to-many matches
evaluationLog += "Find one-to-many matches\n"
for gtNum in range(len(gtRects)):
if gtNum not in gtDontCareRectsNum:
match,matchesDet = one_to_many_match(gtNum)
if match is True :
evaluationLog += "num_overlaps_gt=" + str(num_overlaps_gt(gtNum))
gtRectMat[gtNum] = 1
recallAccum += (self.mtype_oo_o if len(matchesDet)==1 else self.mtype_om_o)
precisionAccum += (self.mtype_oo_o if len(matchesDet)==1 else self.mtype_om_o*len(matchesDet))
pairs.append({'gt':gtNum,'det':matchesDet,'type': 'OO' if len(matchesDet)==1 else 'OM'})
for detNum in matchesDet :
detRectMat[detNum] = 1
evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(matchesDet) + "\n"
# Find many-to-one matches
evaluationLog += "Find many-to-one matches\n"
for detNum in range(len(detRects)):
if detNum not in detDontCareRectsNum:
match,matchesGt = many_to_one_match(detNum)
if match is True :
detRectMat[detNum] = 1
recallAccum += (self.mtype_oo_o if len(matchesGt)==1 else self.mtype_om_m*len(matchesGt))
precisionAccum += (self.mtype_oo_o if len(matchesGt)==1 else self.mtype_om_m)
pairs.append({'gt':matchesGt,'det':detNum,'type': 'OO' if len(matchesGt)==1 else 'MO'})
for gtNum in matchesGt :
gtRectMat[gtNum] = 1
evaluationLog += "Match GT #" + str(matchesGt) + " with Det #" + str(detNum) + "\n"
numGtCare = (len(gtRects) - len(gtDontCareRectsNum))
if numGtCare == 0:
recall = float(1)
precision = float(0) if len(detRects)>0 else float(1)
else:
recall = float(recallAccum) / numGtCare
precision = float(0) if (len(detRects) - len(detDontCareRectsNum))==0 else float(precisionAccum) / (len(detRects) - len(detDontCareRectsNum))
hmean = 0 if (precision + recall)==0 else 2.0 * precision * recall / (precision + recall)
numGtCare = len(gtRects) - len(gtDontCareRectsNum)
numDetCare = len(detRects) - len(detDontCareRectsNum)
perSampleMetrics = {
'precision':precision,
'recall':recall,
'hmean':hmean,
'pairs':pairs,
'recallMat':[] if len(detRects)>100 else recallMat.tolist(),
'precisionMat':[] if len(detRects)>100 else precisionMat.tolist(),
'gtPolPoints':gtPolPoints,
'detPolPoints':detPolPoints,
'gtCare': numGtCare,
'detCare': numDetCare,
'gtDontCare':gtDontCareRectsNum,
'detDontCare':detDontCareRectsNum,
'recallAccum':recallAccum,
'precisionAccum':precisionAccum,
'evaluationLog': evaluationLog
}
return perSampleMetrics
def combine_results(self, results):
numGt = 0
numDet = 0
methodRecallSum = 0
methodPrecisionSum = 0
for result in results:
numGt += result['gtCare']
numDet += result['detCare']
methodRecallSum += result['recallAccum']
methodPrecisionSum += result['precisionAccum']
methodRecall = 0 if numGt==0 else methodRecallSum/numGt
methodPrecision = 0 if numDet==0 else methodPrecisionSum/numDet
methodHmean = 0 if methodRecall + methodPrecision==0 else 2* methodRecall * methodPrecision / (methodRecall + methodPrecision)
methodMetrics = {'precision':methodPrecision, 'recall':methodRecall,'hmean': methodHmean }
return methodMetrics
if __name__=='__main__':
evaluator = DetectionICDAR2013Evaluator()
gts = [[{
'points': [(0, 0), (1, 0), (1, 1), (0, 1)],
'text': 1234,
'ignore': False,
}, {
'points': [(2, 2), (3, 2), (3, 3), (2, 3)],
'text': 5678,
'ignore': True,
}]]
preds = [[{
'points': [(0.1, 0.1), (1, 0), (1, 1), (0, 1)],
'text': 123,
'ignore': False,
}]]
results = []
for gt, pred in zip(gts, preds):
results.append(evaluator.evaluate_image(gt, pred))
metrics = evaluator.combine_results(results)
print(metrics)
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/concern/icdar2015_eval/detection/icdar2013.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import namedtuple
import numpy as np
from shapely.geometry import Polygon
class DetectionIoUEvaluator(object):
def __init__(self, iou_constraint=0.5, area_precision_constraint=0.5):
self.iou_constraint = iou_constraint
self.area_precision_constraint = area_precision_constraint
def evaluate_image(self, gt, pred):
def get_union(pD, pG):
return Polygon(pD).union(Polygon(pG)).area
def get_intersection_over_union(pD, pG):
return get_intersection(pD, pG) / get_union(pD, pG)
def get_intersection(pD, pG):
return Polygon(pD).intersection(Polygon(pG)).area
def compute_ap(confList, matchList, numGtCare):
correct = 0
AP = 0
if len(confList) > 0:
confList = np.array(confList)
matchList = np.array(matchList)
sorted_ind = np.argsort(-confList)
confList = confList[sorted_ind]
matchList = matchList[sorted_ind]
for n in range(len(confList)):
match = matchList[n]
if match:
correct += 1
AP += float(correct)/(n + 1)
if numGtCare > 0:
AP /= numGtCare
return AP
perSampleMetrics = {}
matchedSum = 0
Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')
numGlobalCareGt = 0
numGlobalCareDet = 0
arrGlobalConfidences = []
arrGlobalMatches = []
recall = 0
precision = 0
hmean = 0
detMatched = 0
iouMat = np.empty([1, 1])
gtPols = []
detPols = []
gtPolPoints = []
detPolPoints = []
# Array of Ground Truth Polygons' keys marked as don't Care
gtDontCarePolsNum = []
# Array of Detected Polygons' matched with a don't Care GT
detDontCarePolsNum = []
pairs = []
detMatchedNums = []
arrSampleConfidences = []
arrSampleMatch = []
evaluationLog = ""
for n in range(len(gt)):
points = gt[n]['points']
# transcription = gt[n]['text']
dontCare = gt[n]['ignore']
if not Polygon(points).is_valid or not Polygon(points).is_simple:
continue
gtPol = points
gtPols.append(gtPol)
gtPolPoints.append(points)
if dontCare:
gtDontCarePolsNum.append(len(gtPols)-1)
evaluationLog += "GT polygons: " + str(len(gtPols)) + (" (" + str(len(
gtDontCarePolsNum)) + " don't care)\n" if len(gtDontCarePolsNum) > 0 else "\n")
for n in range(len(pred)):
points = pred[n]['points']
if not Polygon(points).is_valid or not Polygon(points).is_simple:
continue
detPol = points
detPols.append(detPol)
detPolPoints.append(points)
if len(gtDontCarePolsNum) > 0:
for dontCarePol in gtDontCarePolsNum:
dontCarePol = gtPols[dontCarePol]
intersected_area = get_intersection(dontCarePol, detPol)
pdDimensions = Polygon(detPol).area
precision = 0 if pdDimensions == 0 else intersected_area / pdDimensions
if (precision > self.area_precision_constraint):
detDontCarePolsNum.append(len(detPols)-1)
break
evaluationLog += "DET polygons: " + str(len(detPols)) + (" (" + str(len(
detDontCarePolsNum)) + " don't care)\n" if len(detDontCarePolsNum) > 0 else "\n")
if len(gtPols) > 0 and len(detPols) > 0:
# Calculate IoU and precision matrixs
outputShape = [len(gtPols), len(detPols)]
iouMat = np.empty(outputShape)
gtRectMat = np.zeros(len(gtPols), np.int8)
detRectMat = np.zeros(len(detPols), np.int8)
for gtNum in range(len(gtPols)):
for detNum in range(len(detPols)):
pG = gtPols[gtNum]
pD = detPols[detNum]
iouMat[gtNum, detNum] = get_intersection_over_union(pD, pG)
for gtNum in range(len(gtPols)):
for detNum in range(len(detPols)):
if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCarePolsNum and detNum not in detDontCarePolsNum:
if iouMat[gtNum, detNum] > self.iou_constraint:
gtRectMat[gtNum] = 1
detRectMat[detNum] = 1
detMatched += 1
pairs.append({'gt': gtNum, 'det': detNum})
detMatchedNums.append(detNum)
evaluationLog += "Match GT #" + \
str(gtNum) + " with Det #" + str(detNum) + "\n"
numGtCare = (len(gtPols) - len(gtDontCarePolsNum))
numDetCare = (len(detPols) - len(detDontCarePolsNum))
if numGtCare == 0:
recall = float(1)
precision = float(0) if numDetCare > 0 else float(1)
else:
recall = float(detMatched) / numGtCare
precision = 0 if numDetCare == 0 else float(
detMatched) / numDetCare
hmean = 0 if (precision + recall) == 0 else 2.0 * \
precision * recall / (precision + recall)
matchedSum += detMatched
numGlobalCareGt += numGtCare
numGlobalCareDet += numDetCare
perSampleMetrics = {
'precision': precision,
'recall': recall,
'hmean': hmean,
'pairs': pairs,
'iouMat': [] if len(detPols) > 100 else iouMat.tolist(),
'gtPolPoints': gtPolPoints,
'detPolPoints': detPolPoints,
'gtCare': numGtCare,
'detCare': numDetCare,
'gtDontCare': gtDontCarePolsNum,
'detDontCare': detDontCarePolsNum,
'detMatched': detMatched,
'evaluationLog': evaluationLog
}
return perSampleMetrics
def combine_results(self, results):
numGlobalCareGt = 0
numGlobalCareDet = 0
matchedSum = 0
for result in results:
numGlobalCareGt += result['gtCare']
numGlobalCareDet += result['detCare']
matchedSum += result['detMatched']
methodRecall = 0 if numGlobalCareGt == 0 else float(
matchedSum)/numGlobalCareGt
methodPrecision = 0 if numGlobalCareDet == 0 else float(
matchedSum)/numGlobalCareDet
methodHmean = 0 if methodRecall + methodPrecision == 0 else 2 * \
methodRecall * methodPrecision / (methodRecall + methodPrecision)
methodMetrics = {'precision': methodPrecision,
'recall': methodRecall, 'hmean': methodHmean}
return methodMetrics
if __name__ == '__main__':
evaluator = DetectionIoUEvaluator()
gts = [[{
'points': [(0, 0), (1, 0), (1, 1), (0, 1)],
'text': 1234,
'ignore': False,
}, {
'points': [(2, 2), (3, 2), (3, 3), (2, 3)],
'text': 5678,
'ignore': False,
}]]
preds = [[{
'points': [(0.1, 0.1), (1, 0), (1, 1), (0, 1)],
'text': 123,
'ignore': False,
}]]
results = []
for gt, pred in zip(gts, preds):
results.append(evaluator.evaluate_image(gt, pred))
metrics = evaluator.combine_results(results)
print(metrics)
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/concern/icdar2015_eval/detection/iou.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
from collections import namedtuple
import numpy as np
from shapely.geometry import Polygon
class DetectionDetEvalEvaluator(object):
def __init__(
self,
area_recall_constraint=0.8, area_precision_constraint=0.4,
ev_param_ind_center_diff_thr=1,
mtype_oo_o=1.0, mtype_om_o=0.8, mtype_om_m=1.0
):
self.area_recall_constraint = area_recall_constraint
self.area_precision_constraint = area_precision_constraint
self.ev_param_ind_center_diff_thr = ev_param_ind_center_diff_thr
self.mtype_oo_o = mtype_oo_o
self.mtype_om_o = mtype_om_o
self.mtype_om_m = mtype_om_m
def evaluate_image(self, gt, pred):
def get_union(pD,pG):
return Polygon(pD).union(Polygon(pG)).area
def get_intersection_over_union(pD,pG):
return get_intersection(pD, pG) / get_union(pD, pG)
def get_intersection(pD,pG):
return Polygon(pD).intersection(Polygon(pG)).area
def one_to_one_match(row, col):
cont = 0
for j in range(len(recallMat[0])):
if recallMat[row,j] >= self.area_recall_constraint and precisionMat[row,j] >= self.area_precision_constraint:
cont = cont +1
if (cont != 1):
return False
cont = 0
for i in range(len(recallMat)):
if recallMat[i,col] >= self.area_recall_constraint and precisionMat[i,col] >= self.area_precision_constraint:
cont = cont +1
if (cont != 1):
return False
if recallMat[row,col] >= self.area_recall_constraint and precisionMat[row,col] >= self.area_precision_constraint:
return True
return False
def num_overlaps_gt(gtNum):
cont = 0
for detNum in range(len(detRects)):
if detNum not in detDontCareRectsNum:
if recallMat[gtNum,detNum] > 0 :
cont = cont +1
return cont
def num_overlaps_det(detNum):
cont = 0
for gtNum in range(len(recallMat)):
if gtNum not in gtDontCareRectsNum:
if recallMat[gtNum,detNum] > 0 :
cont = cont +1
return cont
def is_single_overlap(row, col):
if num_overlaps_gt(row)==1 and num_overlaps_det(col)==1:
return True
else:
return False
def one_to_many_match(gtNum):
many_sum = 0
detRects = []
for detNum in range(len(recallMat[0])):
if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and detNum not in detDontCareRectsNum:
if precisionMat[gtNum,detNum] >= self.area_precision_constraint:
many_sum += recallMat[gtNum,detNum]
detRects.append(detNum)
if round(many_sum,4) >= self.area_recall_constraint:
return True,detRects
else:
return False,[]
def many_to_one_match(detNum):
many_sum = 0
gtRects = []
for gtNum in range(len(recallMat)):
if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCareRectsNum:
if recallMat[gtNum,detNum] >= self.area_recall_constraint:
many_sum += precisionMat[gtNum,detNum]
gtRects.append(gtNum)
if round(many_sum,4) >= self.area_precision_constraint:
return True,gtRects
else:
return False,[]
def center_distance(r1, r2):
return ((np.mean(r1, axis=0) - np.mean(r2, axis=0)) ** 2).sum() ** 0.5
def diag(r):
r = np.array(r)
return ((r[:, 0].max() - r[:, 0].min()) ** 2 + (r[:, 1].max() - r[:, 1].min()) ** 2) ** 0.5
perSampleMetrics = {}
recall = 0
precision = 0
hmean = 0
recallAccum = 0.
precisionAccum = 0.
gtRects = []
detRects = []
gtPolPoints = []
detPolPoints = []
gtDontCareRectsNum = []#Array of Ground Truth Rectangles' keys marked as don't Care
detDontCareRectsNum = []#Array of Detected Rectangles' matched with a don't Care GT
pairs = []
evaluationLog = ""
recallMat = np.empty([1,1])
precisionMat = np.empty([1,1])
for n in range(len(gt)):
points = gt[n]['points']
# transcription = gt[n]['text']
dontCare = gt[n]['ignore']
if not Polygon(points).is_valid or not Polygon(points).is_simple:
continue
gtRects.append(points)
gtPolPoints.append(points)
if dontCare:
gtDontCareRectsNum.append( len(gtRects)-1 )
evaluationLog += "GT rectangles: " + str(len(gtRects)) + (" (" + str(len(gtDontCareRectsNum)) + " don't care)\n" if len(gtDontCareRectsNum)>0 else "\n")
for n in range(len(pred)):
points = pred[n]['points']
if not Polygon(points).is_valid or not Polygon(points).is_simple:
continue
detRect = points
detRects.append(detRect)
detPolPoints.append(points)
if len(gtDontCareRectsNum)>0 :
for dontCareRectNum in gtDontCareRectsNum:
dontCareRect = gtRects[dontCareRectNum]
intersected_area = get_intersection(dontCareRect,detRect)
rdDimensions = Polygon(detRect).area
if (rdDimensions==0) :
precision = 0
else:
precision= intersected_area / rdDimensions
if (precision > self.area_precision_constraint):
detDontCareRectsNum.append( len(detRects)-1 )
break
evaluationLog += "DET rectangles: " + str(len(detRects)) + (" (" + str(len(detDontCareRectsNum)) + " don't care)\n" if len(detDontCareRectsNum)>0 else "\n")
if len(gtRects)==0:
recall = 1
precision = 0 if len(detRects)>0 else 1
if len(detRects)>0:
#Calculate recall and precision matrixs
outputShape=[len(gtRects),len(detRects)]
recallMat = np.empty(outputShape)
precisionMat = np.empty(outputShape)
gtRectMat = np.zeros(len(gtRects),np.int8)
detRectMat = np.zeros(len(detRects),np.int8)
for gtNum in range(len(gtRects)):
for detNum in range(len(detRects)):
rG = gtRects[gtNum]
rD = detRects[detNum]
intersected_area = get_intersection(rG,rD)
rgDimensions = Polygon(rG).area
rdDimensions = Polygon(rD).area
recallMat[gtNum,detNum] = 0 if rgDimensions==0 else intersected_area / rgDimensions
precisionMat[gtNum,detNum] = 0 if rdDimensions==0 else intersected_area / rdDimensions
# Find one-to-one matches
evaluationLog += "Find one-to-one matches\n"
for gtNum in range(len(gtRects)):
for detNum in range(len(detRects)):
if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCareRectsNum and detNum not in detDontCareRectsNum :
match = one_to_one_match(gtNum, detNum)
if match is True :
#in deteval we have to make other validation before mark as one-to-one
if is_single_overlap(gtNum, detNum) is True :
rG = gtRects[gtNum]
rD = detRects[detNum]
normDist = center_distance(rG, rD);
normDist /= diag(rG) + diag(rD);
normDist *= 2.0;
if normDist < self.ev_param_ind_center_diff_thr:
gtRectMat[gtNum] = 1
detRectMat[detNum] = 1
recallAccum += self.mtype_oo_o
precisionAccum += self.mtype_oo_o
pairs.append({'gt':gtNum,'det':detNum,'type':'OO'})
evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(detNum) + "\n"
else:
evaluationLog += "Match Discarded GT #" + str(gtNum) + " with Det #" + str(detNum) + " normDist: " + str(normDist) + " \n"
else:
evaluationLog += "Match Discarded GT #" + str(gtNum) + " with Det #" + str(detNum) + " not single overlap\n"
# Find one-to-many matches
evaluationLog += "Find one-to-many matches\n"
for gtNum in range(len(gtRects)):
if gtNum not in gtDontCareRectsNum:
match,matchesDet = one_to_many_match(gtNum)
if match is True :
evaluationLog += "num_overlaps_gt=" + str(num_overlaps_gt(gtNum))
#in deteval we have to make other validation before mark as one-to-one
if num_overlaps_gt(gtNum)>=2 :
gtRectMat[gtNum] = 1
recallAccum += (self.mtype_oo_o if len(matchesDet)==1 else self.mtype_om_o)
precisionAccum += (self.mtype_oo_o if len(matchesDet)==1 else self.mtype_om_o*len(matchesDet))
pairs.append({'gt':gtNum,'det':matchesDet,'type': 'OO' if len(matchesDet)==1 else 'OM'})
for detNum in matchesDet :
detRectMat[detNum] = 1
evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(matchesDet) + "\n"
else:
evaluationLog += "Match Discarded GT #" + str(gtNum) + " with Det #" + str(matchesDet) + " not single overlap\n"
# Find many-to-one matches
evaluationLog += "Find many-to-one matches\n"
for detNum in range(len(detRects)):
if detNum not in detDontCareRectsNum:
match,matchesGt = many_to_one_match(detNum)
if match is True :
#in deteval we have to make other validation before mark as one-to-one
if num_overlaps_det(detNum)>=2 :
detRectMat[detNum] = 1
recallAccum += (self.mtype_oo_o if len(matchesGt)==1 else self.mtype_om_m*len(matchesGt))
precisionAccum += (self.mtype_oo_o if len(matchesGt)==1 else self.mtype_om_m)
pairs.append({'gt':matchesGt,'det':detNum,'type': 'OO' if len(matchesGt)==1 else 'MO'})
for gtNum in matchesGt :
gtRectMat[gtNum] = 1
evaluationLog += "Match GT #" + str(matchesGt) + " with Det #" + str(detNum) + "\n"
else:
evaluationLog += "Match Discarded GT #" + str(matchesGt) + " with Det #" + str(detNum) + " not single overlap\n"
numGtCare = (len(gtRects) - len(gtDontCareRectsNum))
if numGtCare == 0:
recall = float(1)
precision = float(0) if len(detRects)>0 else float(1)
else:
recall = float(recallAccum) / numGtCare
precision = float(0) if (len(detRects) - len(detDontCareRectsNum))==0 else float(precisionAccum) / (len(detRects) - len(detDontCareRectsNum))
hmean = 0 if (precision + recall)==0 else 2.0 * precision * recall / (precision + recall)
numGtCare = len(gtRects) - len(gtDontCareRectsNum)
numDetCare = len(detRects) - len(detDontCareRectsNum)
perSampleMetrics = {
'precision':precision,
'recall':recall,
'hmean':hmean,
'pairs':pairs,
'recallMat':[] if len(detRects)>100 else recallMat.tolist(),
'precisionMat':[] if len(detRects)>100 else precisionMat.tolist(),
'gtPolPoints':gtPolPoints,
'detPolPoints':detPolPoints,
'gtCare': numGtCare,
'detCare': numDetCare,
'gtDontCare':gtDontCareRectsNum,
'detDontCare':detDontCareRectsNum,
'recallAccum':recallAccum,
'precisionAccum':precisionAccum,
'evaluationLog': evaluationLog
}
return perSampleMetrics
def combine_results(self, results):
numGt = 0
numDet = 0
methodRecallSum = 0
methodPrecisionSum = 0
for result in results:
numGt += result['gtCare']
numDet += result['detCare']
methodRecallSum += result['recallAccum']
methodPrecisionSum += result['precisionAccum']
methodRecall = 0 if numGt==0 else methodRecallSum/numGt
methodPrecision = 0 if numDet==0 else methodPrecisionSum/numDet
methodHmean = 0 if methodRecall + methodPrecision==0 else 2* methodRecall * methodPrecision / (methodRecall + methodPrecision)
methodMetrics = {'precision':methodPrecision, 'recall':methodRecall,'hmean': methodHmean }
return methodMetrics
if __name__=='__main__':
evaluator = DetectionDetEvalEvaluator()
gts = [[{
'points': [(0, 0), (1, 0), (1, 1), (0, 1)],
'text': 1234,
'ignore': False,
}, {
'points': [(2, 2), (3, 2), (3, 3), (2, 3)],
'text': 5678,
'ignore': True,
}]]
preds = [[{
'points': [(0.1, 0.1), (1, 0), (1, 1), (0, 1)],
'text': 123,
'ignore': False,
}]]
results = []
for gt, pred in zip(gts, preds):
results.append(evaluator.evaluate_image(gt, pred))
metrics = evaluator.combine_results(results)
print(metrics)
| EXA-1-master | exa/models/unilm-master/dit/text_detection/ditod/concern/icdar2015_eval/detection/deteval.py |
import os
from PIL import Image
import xml.etree.ElementTree as ET
import numpy as np
import json
from PIL import Image
from shutil import copyfile
def convert(ROOT, TRACK, SPLIT):
coco_data = {
"images": [],
"annotations": [],
"categories": [{"id": 1, "name": "table"}, ],
}
DATA_DIR = f"{ROOT}/{TRACK}/{SPLIT}"
prefix = "cTDaR_t0" if TRACK == "trackA_archival" else "cTDaR_t1"
print(TRACK, SPLIT, prefix)
table_count = 0
for file in sorted(os.listdir(DATA_DIR)):
if file.startswith(prefix) and file.endswith(".jpg"):
img = Image.open(os.path.join(DATA_DIR, file))
coco_data["images"].append(
{
"file_name": file,
"height": img.height,
"width": img.width,
"id": int(file[7:-4]),
}
)
elif file.startswith(prefix) and file.endswith(".xml"):
# print(file)
tree = ET.parse(os.path.join(DATA_DIR, file))
root = tree.getroot()
assert len(root.findall("./table/Coords")) > 0
for table_id in range(len(root.findall("./table/Coords"))):
four_points = root.findall("./table/Coords")[table_id].attrib["points"]
four_points = list(map(lambda x: x.split(","), four_points.split()))
four_points = [[int(j) for j in i] for i in four_points]
segmentation = [j for i in four_points for j in i]
bbox = [
four_points[0][0],
four_points[0][1],
four_points[2][0] - four_points[0][0],
four_points[2][1] - four_points[0][1],
]
coco_data["annotations"].append(
{
"segmentation": [segmentation],
"area": bbox[2] * bbox[3],
"iscrowd": 0,
"image_id": int(file[7:-4]),
"bbox": bbox,
"category_id": 1,
"id": table_count,
}
)
table_count += 1
with open(f"{ROOT}/{TRACK}/{SPLIT}.json", "w") as f:
json.dump(coco_data, f)
def clean_img(DATA_DIR):
for file in sorted(os.listdir(DATA_DIR)):
if file.endswith(".JPG"):
os.rename(os.path.join(DATA_DIR, file), os.path.join(DATA_DIR, file.replace(".JPG", ".jpg")))
elif file.endswith(".TIFF"):
img = Image.open(os.path.join(DATA_DIR, file))
img.save(os.path.join(DATA_DIR, file.replace(".TIFF", ".jpg")))
os.remove(os.path.join(DATA_DIR, file))
elif file.endswith(".png"):
img = Image.open(os.path.join(DATA_DIR, file))
img.save(os.path.join(DATA_DIR, file.replace(".png", ".jpg")))
os.remove(os.path.join(DATA_DIR, file))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir', required=True)
parser.add_argument('--target_dir', required=True)
args = parser.parse_args()
test_data_dir = os.path.join(args.root_dir, 'test', 'TRACKA')
test_gt_dir = os.path.join(args.root_dir, 'test_ground_truth', 'TRACKA')
training_data_dir = os.path.join(args.root_dir, 'training', 'TRACKA', 'ground_truth')
raw_datas = {"train": [training_data_dir], "test": [test_data_dir, test_gt_dir]}
TRACKS = ["trackA_modern", "trackA_archival"]
SPLITS = ["train", "test"]
for track in TRACKS:
prefix = "cTDaR_t0" if track == "trackA_archival" else "cTDaR_t1"
for split in SPLITS:
os.makedirs(os.path.join(args.target_dir, track, split))
for source_dir in raw_datas[split]:
for fn in os.listdir(source_dir):
if fn.startswith(prefix):
ffn = os.path.join(source_dir, fn)
copyfile(ffn, os.path.join(args.target_dir, track, split, fn))
clean_img(os.path.join(args.target_dir, track, split))
convert(args.target_dir, track, split)
| EXA-1-master | exa/models/unilm-master/dit/object_detection/convert_to_coco_format.py |
import argparse
import cv2
from ditod import add_vit_config
import torch
from detectron2.config import get_cfg
from detectron2.utils.visualizer import ColorMode, Visualizer
from detectron2.data import MetadataCatalog
from detectron2.engine import DefaultPredictor
def main():
parser = argparse.ArgumentParser(description="Detectron2 inference script")
parser.add_argument(
"--image_path",
help="Path to input image",
type=str,
required=True,
)
parser.add_argument(
"--output_file_name",
help="Name of the output visualization file.",
type=str,
)
parser.add_argument(
"--config-file",
default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
# Step 1: instantiate config
cfg = get_cfg()
add_vit_config(cfg)
cfg.merge_from_file(args.config_file)
# Step 2: add model weights URL to config
cfg.merge_from_list(args.opts)
# Step 3: set device
device = "cuda" if torch.cuda.is_available() else "cpu"
cfg.MODEL.DEVICE = device
# Step 4: define model
predictor = DefaultPredictor(cfg)
# Step 5: run inference
img = cv2.imread(args.image_path)
md = MetadataCatalog.get(cfg.DATASETS.TEST[0])
if cfg.DATASETS.TEST[0]=='icdar2019_test':
md.set(thing_classes=["table"])
else:
md.set(thing_classes=["text","title","list","table","figure"])
output = predictor(img)["instances"]
v = Visualizer(img[:, :, ::-1],
md,
scale=1.0,
instance_mode=ColorMode.SEGMENTATION)
result = v.draw_instance_predictions(output.to("cpu"))
result_image = result.get_image()[:, :, ::-1]
# step 6: save
cv2.imwrite(args.output_file_name, result_image)
if __name__ == '__main__':
main()
| EXA-1-master | exa/models/unilm-master/dit/object_detection/inference.py |
#!/usr/bin/env python
# --------------------------------------------------------------------------------
# MPViT: Multi-Path Vision Transformer for Dense Prediction
# Copyright (c) 2022 Electronics and Telecommunications Research Institute (ETRI).
# All Rights Reserved.
# Written by Youngwan Lee
# --------------------------------------------------------------------------------
"""
Detection Training Script for MPViT.
"""
import os
import itertools
import torch
from typing import Any, Dict, List, Set
from detectron2.data import build_detection_train_loader
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import COCOEvaluator
from detectron2.solver.build import maybe_add_gradient_clipping
from ditod import add_vit_config
from ditod import DetrDatasetMapper
from detectron2.data.datasets import register_coco_instances
import logging
from detectron2.utils.logger import setup_logger
from detectron2.utils import comm
from detectron2.engine.defaults import create_ddp_model
import weakref
from detectron2.engine.train_loop import AMPTrainer, SimpleTrainer
from ditod import MyDetectionCheckpointer, ICDAREvaluator
from ditod import MyTrainer
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
# add_coat_config(cfg)
add_vit_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
"""
register publaynet first
"""
register_coco_instances(
"publaynet_train",
{},
"./publaynet_data/train.json",
"./publaynet_data/train"
)
register_coco_instances(
"publaynet_val",
{},
"./publaynet_data/val.json",
"./publaynet_data/val"
)
register_coco_instances(
"icdar2019_train",
{},
"data/train.json",
"data/train"
)
register_coco_instances(
"icdar2019_test",
{},
"data/test.json",
"data/test"
)
cfg = setup(args)
if args.eval_only:
model = MyTrainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = MyTrainer.test(cfg, model)
return res
trainer = MyTrainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
parser = default_argument_parser()
parser.add_argument("--debug", action="store_true", help="enable debug mode")
args = parser.parse_args()
print("Command Line Args:", args)
if args.debug:
import debugpy
print("Enabling attach starts.")
debugpy.listen(address=('0.0.0.0', 9310))
debugpy.wait_for_client()
print("Enabling attach ends.")
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| EXA-1-master | exa/models/unilm-master/dit/object_detection/train_net.py |
import argparse
import os
import cv2
import tqdm
def convert(fn):
# given a file name, convert it into binary and store at the same position
img = cv2.imread(fn)
gim = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gim = cv2.adaptiveThreshold(gim, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 45, 11)
g3im = cv2.cvtColor(gim, cv2.COLOR_GRAY2BGR)
cv2.imwrite(fn, g3im)
if __name__ == '__main__':
"""
Now only feasible for trackA_XX
"""
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir', default="../datasets/icdar2019/at_trackA_archival")
args = parser.parse_args()
for fdname in os.listdir(args.root_dir):
if fdname.endswith(".json"):
continue
ffdname = os.path.join(args.root_dir, fdname)
for file in tqdm.tqdm(os.listdir(ffdname)):
if file.endswith(".xml"):
continue
ffile = os.path.join(ffdname, file)
convert(ffile)
| EXA-1-master | exa/models/unilm-master/dit/object_detection/adaptive_binarize.py |
"""
Mostly copy-paste from DINO and timm library:
https://github.com/facebookresearch/dino
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import warnings
import math
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.models.layers import trunc_normal_, drop_path, to_2tuple
from functools import partial
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
**kwargs
}
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
q, k, v = self.qkv(x).reshape(B, N, 3, self.num_heads,
C // self.num_heads).permute(2, 0, 3, 1, 4)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(
drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.window_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches_w, self.num_patches_h = self.window_size
self.num_patches = self.window_size[0] * self.window_size[1]
self.img_size = img_size
self.patch_size = patch_size
self.proj = nn.Conv2d(in_chans, embed_dim,
kernel_size=patch_size, stride=patch_size)
def forward(self, x):
x = self.proj(x)
return x
class HybridEmbed(nn.Module):
""" CNN Feature Map Embedding
Extract feature map from CNN, flatten, project to embedding dim.
"""
def __init__(self, backbone, img_size=224, feature_size=None, in_chans=3, embed_dim=768):
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
self.img_size = img_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
# FIXME this is hacky, but most reliable way of determining the exact dim of the output feature
# map for all networks, the feature metadata has reliable channel and stride info, but using
# stride to calc feature dim requires info about padding of each stage that isn't captured.
training = backbone.training
if training:
backbone.eval()
o = self.backbone(torch.zeros(
1, in_chans, img_size[0], img_size[1]))[-1]
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
feature_dim = self.backbone.feature_info.channels()[-1]
self.num_patches = feature_size[0] * feature_size[1]
self.proj = nn.Linear(feature_dim, embed_dim)
def forward(self, x):
x = self.backbone(x)[-1]
x = x.flatten(2).transpose(1, 2)
x = self.proj(x)
return x
class ViT(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self,
model_name='vit_base_patch16_224',
img_size=384,
patch_size=16,
in_chans=3,
embed_dim=1024,
depth=24,
num_heads=16,
num_classes=19,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=0.1,
attn_drop_rate=0.,
drop_path_rate=0.,
hybrid_backbone=None,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
norm_cfg=None,
pos_embed_interp=False,
random_init=False,
align_corners=False,
use_checkpoint=False,
num_extra_tokens=1,
out_features=None,
**kwargs,
):
super(ViT, self).__init__()
self.model_name = model_name
self.img_size = img_size
self.patch_size = patch_size
self.in_chans = in_chans
self.embed_dim = embed_dim
self.depth = depth
self.num_heads = num_heads
self.num_classes = num_classes
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.qk_scale = qk_scale
self.drop_rate = drop_rate
self.attn_drop_rate = attn_drop_rate
self.drop_path_rate = drop_path_rate
self.hybrid_backbone = hybrid_backbone
self.norm_layer = norm_layer
self.norm_cfg = norm_cfg
self.pos_embed_interp = pos_embed_interp
self.random_init = random_init
self.align_corners = align_corners
self.use_checkpoint = use_checkpoint
self.num_extra_tokens = num_extra_tokens
self.out_features = out_features
self.out_indices = [int(name[5:]) for name in out_features]
# self.num_stages = self.depth
# self.out_indices = tuple(range(self.num_stages))
if self.hybrid_backbone is not None:
self.patch_embed = HybridEmbed(
self.hybrid_backbone, img_size=self.img_size, in_chans=self.in_chans, embed_dim=self.embed_dim)
else:
self.patch_embed = PatchEmbed(
img_size=self.img_size, patch_size=self.patch_size, in_chans=self.in_chans, embed_dim=self.embed_dim)
self.num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
if self.num_extra_tokens == 2:
self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(
1, self.num_patches + self.num_extra_tokens, self.embed_dim))
self.pos_drop = nn.Dropout(p=self.drop_rate)
# self.num_extra_tokens = self.pos_embed.shape[-2] - self.num_patches
dpr = [x.item() for x in torch.linspace(0, self.drop_path_rate,
self.depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=self.embed_dim, num_heads=self.num_heads, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias,
qk_scale=self.qk_scale,
drop=self.drop_rate, attn_drop=self.attn_drop_rate, drop_path=dpr[i], norm_layer=self.norm_layer)
for i in range(self.depth)])
# NOTE as per official impl, we could have a pre-logits representation dense layer + tanh here
# self.repr = nn.Linear(embed_dim, representation_size)
# self.repr_act = nn.Tanh()
if patch_size == 16:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
nn.SyncBatchNorm(embed_dim),
nn.GELU(),
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn3 = nn.Identity()
self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
elif patch_size == 8:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Identity()
self.fpn3 = nn.Sequential(
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fpn4 = nn.Sequential(
nn.MaxPool2d(kernel_size=4, stride=4),
)
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
if self.num_extra_tokens==2:
trunc_normal_(self.dist_token, std=0.2)
self.apply(self._init_weights)
# self.fix_init_weight()
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
'''
def init_weights(self):
logger = get_root_logger()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
if self.init_cfg is None:
logger.warn(f'No pre-trained weights for '
f'{self.__class__.__name__}, '
f'training start from scratch')
else:
assert 'checkpoint' in self.init_cfg, f'Only support ' \
f'specify `Pretrained` in ' \
f'`init_cfg` in ' \
f'{self.__class__.__name__} '
logger.info(f"Will load ckpt from {self.init_cfg['checkpoint']}")
load_checkpoint(self, filename=self.init_cfg['checkpoint'], strict=False, logger=logger)
'''
def get_num_layers(self):
return len(self.blocks)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def _conv_filter(self, state_dict, patch_size=16):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k:
v = v.reshape((v.shape[0], 3, patch_size, patch_size))
out_dict[k] = v
return out_dict
def to_2D(self, x):
n, hw, c = x.shape
h = w = int(math.sqrt(hw))
x = x.transpose(1, 2).reshape(n, c, h, w)
return x
def to_1D(self, x):
n, c, h, w = x.shape
x = x.reshape(n, c, -1).transpose(1, 2)
return x
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - self.num_extra_tokens
N = self.pos_embed.shape[1] - self.num_extra_tokens
if npatch == N and w == h:
return self.pos_embed
class_ORdist_pos_embed = self.pos_embed[:, 0:self.num_extra_tokens]
patch_pos_embed = self.pos_embed[:, self.num_extra_tokens:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size[0]
h0 = h // self.patch_embed.patch_size[1]
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_ORdist_pos_embed, patch_pos_embed), dim=1)
def prepare_tokens(self, x, mask=None):
B, nc, w, h = x.shape
# patch linear embedding
x = self.patch_embed(x)
# mask image modeling
if mask is not None:
x = self.mask_model(x, mask)
x = x.flatten(2).transpose(1, 2)
# add the [CLS] token to the embed patch tokens
all_tokens = [self.cls_token.expand(B, -1, -1)]
if self.num_extra_tokens == 2:
dist_tokens = self.dist_token.expand(B, -1, -1)
all_tokens.append(dist_tokens)
all_tokens.append(x)
x = torch.cat(all_tokens, dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward_features(self, x):
# print(f"==========shape of x is {x.shape}==========")
B, _, H, W = x.shape
Hp, Wp = H // self.patch_size, W // self.patch_size
x = self.prepare_tokens(x)
features = []
for i, blk in enumerate(self.blocks):
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if i in self.out_indices:
xp = x[:, self.num_extra_tokens:, :].permute(0, 2, 1).reshape(B, -1, Hp, Wp)
features.append(xp.contiguous())
ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
for i in range(len(features)):
features[i] = ops[i](features[i])
feat_out = {}
for name, value in zip(self.out_features, features):
feat_out[name] = value
return feat_out
def forward(self, x):
x = self.forward_features(x)
return x
def deit_base_patch16(pretrained=False, **kwargs):
model = ViT(
patch_size=16,
drop_rate=0.,
embed_dim=768,
depth=12,
num_heads=12,
num_classes=1000,
mlp_ratio=4.,
qkv_bias=True,
use_checkpoint=True,
num_extra_tokens=2,
**kwargs)
model.default_cfg = _cfg()
return model
def mae_base_patch16(pretrained=False, **kwargs):
model = ViT(
patch_size=16,
drop_rate=0.,
embed_dim=768,
depth=12,
num_heads=12,
num_classes=1000,
mlp_ratio=4.,
qkv_bias=True,
use_checkpoint=True,
num_extra_tokens=1,
**kwargs)
model.default_cfg = _cfg()
return model | EXA-1-master | exa/models/unilm-master/dit/object_detection/ditod/deit.py |
import copy
import itertools
import os
import os.path as osp
import shutil
from collections import OrderedDict
from xml.dom.minidom import Document
import detectron2.utils.comm as comm
import torch
from detectron2.evaluation import COCOEvaluator
from detectron2.utils.file_io import PathManager
from .table_evaluation.evaluate import calc_table_score
class ICDAREvaluator(COCOEvaluator):
def evaluate(self, img_ids=None):
"""
Args:
img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset
"""
if self._distributed:
comm.synchronize()
predictions = comm.gather(self._predictions, dst=0)
predictions = list(itertools.chain(*predictions))
if not comm.is_main_process():
return {}
else:
predictions = self._predictions
if len(predictions) == 0:
self._logger.warning("[COCOEvaluator] Did not receive valid predictions.")
return {}
if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "instances_predictions.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(predictions, f)
self._results = OrderedDict()
if "proposals" in predictions[0]:
self._eval_box_proposals(predictions)
if "instances" in predictions[0]:
self._eval_predictions(predictions, img_ids=img_ids)
self.evaluate_table(predictions)
# Copy so the caller can do whatever with results
return copy.deepcopy(self._results)
def evaluate_table(self, predictions):
xml_dir = self.convert_to_xml(predictions)
results = calc_table_score(xml_dir)
self._results["wF1"] = results['wF1']
def convert_to_xml(self, predictions):
output_dir = osp.join(self._output_dir, "xml_results")
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir, exist_ok=True)
coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
results_dict = {}
for result in coco_results:
if result["score"] < 0.7:
continue
image_id = result["image_id"]
if image_id not in results_dict:
results_dict[image_id] = []
results_dict[image_id].append(result)
for image_id, tables in results_dict.items():
file_name = f"cTDaR_t{image_id:05d}.jpg"
doc = Document()
root = doc.createElement('document')
root.setAttribute('filename', file_name)
doc.appendChild(root)
for table_id, table in enumerate(tables, start=1):
nodeManager = doc.createElement('table')
nodeManager.setAttribute('id', str(table_id))
bbox = list(map(int, table['bbox']))
bbox_str = '{},{} {},{} {},{} {},{}'.format(bbox[0], bbox[1],
bbox[0], bbox[1] + bbox[3],
bbox[0] + bbox[2], bbox[1] + bbox[3],
bbox[0] + bbox[2], bbox[1])
nodeCoords = doc.createElement('Coords')
nodeCoords.setAttribute('points', bbox_str)
nodeManager.appendChild(nodeCoords)
root.appendChild(nodeManager)
filename = '{}-result.xml'.format(file_name[:-4])
fp = open(os.path.join(output_dir, filename), 'w')
doc.writexml(fp, indent='', addindent='\t', newl='\n', encoding="utf-8")
fp.flush()
fp.close()
return output_dir
if __name__ == '__main__':
pass
| EXA-1-master | exa/models/unilm-master/dit/object_detection/ditod/icdar_evaluation.py |
""" Vision Transformer (ViT) in PyTorch
A PyTorch implement of Vision Transformers as described in
'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929
The official jax code is released and available at https://github.com/google-research/vision_transformer
Status/TODO:
* Models updated to be compatible with official impl. Args added to support backward compat for old PyTorch weights.
* Weights ported from official jax impl for 384x384 base and small models, 16x16 and 32x32 patches.
* Trained (supervised on ImageNet-1k) my custom 'small' patch model to 77.9, 'base' to 79.4 top-1 with this code.
* Hopefully find time and GPUs for SSL or unsupervised pretraining on OpenImages w/ ImageNet fine-tune in future.
Acknowledgments:
* The paper authors for releasing code and weights, thanks!
* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out
for some einops/einsum fun
* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT
* Bert reference code checks against Huggingface Transformers and Tensorflow Bert
Hacked together by / Copyright 2020 Ross Wightman
"""
import warnings
import math
import torch
from functools import partial
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
**kwargs
}
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
# x = self.drop(x)
# commit this for the orignal BERT implement
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
proj_drop=0., window_size=None, attn_head_dim=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.v_bias = None
if window_size:
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
# trunc_normal_(self.relative_position_bias_table, std=.0)
else:
self.window_size = None
self.relative_position_bias_table = None
self.relative_position_index = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, rel_pos_bias=None, training_window_size=None):
B, N, C = x.shape
qkv_bias = None
if self.q_bias is not None:
qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
# qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
if self.relative_position_bias_table is not None:
if training_window_size == self.window_size:
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
else:
training_window_size = tuple(training_window_size.tolist())
new_num_relative_distance = (2 * training_window_size[0] - 1) * (2 * training_window_size[1] - 1) + 3
# new_num_relative_dis 为 所有可能的相对位置选项,包含cls-cls,tok-cls,与cls-tok
new_relative_position_bias_table = F.interpolate(
self.relative_position_bias_table[:-3, :].permute(1, 0).view(1, self.num_heads,
2 * self.window_size[0] - 1,
2 * self.window_size[1] - 1),
size=(2 * training_window_size[0] - 1, 2 * training_window_size[1] - 1), mode='bicubic',
align_corners=False)
new_relative_position_bias_table = new_relative_position_bias_table.view(self.num_heads,
new_num_relative_distance - 3).permute(
1, 0)
new_relative_position_bias_table = torch.cat(
[new_relative_position_bias_table, self.relative_position_bias_table[-3::]], dim=0)
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(training_window_size[0])
coords_w = torch.arange(training_window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += training_window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += training_window_size[1] - 1
relative_coords[:, :, 0] *= 2 * training_window_size[1] - 1
relative_position_index = \
torch.zeros(size=(training_window_size[0] * training_window_size[1] + 1,) * 2,
dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = new_num_relative_distance - 3
relative_position_index[0:, 0] = new_num_relative_distance - 2
relative_position_index[0, 0] = new_num_relative_distance - 1
relative_position_bias = \
new_relative_position_bias_table[relative_position_index.view(-1)].view(
training_window_size[0] * training_window_size[1] + 1,
training_window_size[0] * training_window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if rel_pos_bias is not None:
attn = attn + rel_pos_bias
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm,
window_size=None, attn_head_dim=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if init_values is not None:
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True)
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True)
else:
self.gamma_1, self.gamma_2 = None, None
def forward(self, x, rel_pos_bias=None, training_window_size=None):
if self.gamma_1 is None:
x = x + self.drop_path(
self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, training_window_size=training_window_size))
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias,
training_window_size=training_window_size))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=[224, 224], patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches_w = self.patch_shape[0]
self.num_patches_h = self.patch_shape[1]
# the so-called patch_shape is the patch shape during pre-training
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x, position_embedding=None, **kwargs):
# FIXME look at relaxing size constraints
# assert H == self.img_size[0] and W == self.img_size[1], \
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
Hp, Wp = x.shape[2], x.shape[3]
if position_embedding is not None:
# interpolate the position embedding to the corresponding size
position_embedding = position_embedding.view(1, self.patch_shape[0], self.patch_shape[1], -1).permute(0, 3,
1, 2)
position_embedding = F.interpolate(position_embedding, size=(Hp, Wp), mode='bicubic')
x = x + position_embedding
x = x.flatten(2).transpose(1, 2)
return x, (Hp, Wp)
class HybridEmbed(nn.Module):
""" CNN Feature Map Embedding
Extract feature map from CNN, flatten, project to embedding dim.
"""
def __init__(self, backbone, img_size=[224, 224], feature_size=None, in_chans=3, embed_dim=768):
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
self.img_size = img_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
# FIXME this is hacky, but most reliable way of determining the exact dim of the output feature
# map for all networks, the feature metadata has reliable channel and stride info, but using
# stride to calc feature dim requires info about padding of each stage that isn't captured.
training = backbone.training
if training:
backbone.eval()
o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1]))[-1]
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
feature_dim = self.backbone.feature_info.channels()[-1]
self.num_patches = feature_size[0] * feature_size[1]
self.proj = nn.Linear(feature_dim, embed_dim)
def forward(self, x):
x = self.backbone(x)[-1]
x = x.flatten(2).transpose(1, 2)
x = self.proj(x)
return x
class RelativePositionBias(nn.Module):
def __init__(self, window_size, num_heads):
super().__init__()
self.window_size = window_size
self.num_heads = num_heads
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
# trunc_normal_(self.relative_position_bias_table, std=.02)
def forward(self, training_window_size):
if training_window_size == self.window_size:
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
else:
training_window_size = tuple(training_window_size.tolist())
new_num_relative_distance = (2 * training_window_size[0] - 1) * (2 * training_window_size[1] - 1) + 3
# new_num_relative_dis 为 所有可能的相对位置选项,包含cls-cls,tok-cls,与cls-tok
new_relative_position_bias_table = F.interpolate(
self.relative_position_bias_table[:-3, :].permute(1, 0).view(1, self.num_heads,
2 * self.window_size[0] - 1,
2 * self.window_size[1] - 1),
size=(2 * training_window_size[0] - 1, 2 * training_window_size[1] - 1), mode='bicubic',
align_corners=False)
new_relative_position_bias_table = new_relative_position_bias_table.view(self.num_heads,
new_num_relative_distance - 3).permute(
1, 0)
new_relative_position_bias_table = torch.cat(
[new_relative_position_bias_table, self.relative_position_bias_table[-3::]], dim=0)
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(training_window_size[0])
coords_w = torch.arange(training_window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += training_window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += training_window_size[1] - 1
relative_coords[:, :, 0] *= 2 * training_window_size[1] - 1
relative_position_index = \
torch.zeros(size=(training_window_size[0] * training_window_size[1] + 1,) * 2,
dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = new_num_relative_distance - 3
relative_position_index[0:, 0] = new_num_relative_distance - 2
relative_position_index[0, 0] = new_num_relative_distance - 1
relative_position_bias = \
new_relative_position_bias_table[relative_position_index.view(-1)].view(
training_window_size[0] * training_window_size[1] + 1,
training_window_size[0] * training_window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
return relative_position_bias
class BEiT(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self,
img_size=[224, 224],
patch_size=16,
in_chans=3,
num_classes=80,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
hybrid_backbone=None,
norm_layer=None,
init_values=None,
use_abs_pos_emb=False,
use_rel_pos_bias=False,
use_shared_rel_pos_bias=False,
use_checkpoint=True,
pretrained=None,
out_features=None,
):
super(BEiT, self).__init__()
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.use_checkpoint = use_checkpoint
if hybrid_backbone is not None:
self.patch_embed = HybridEmbed(
hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim)
else:
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.out_features = out_features
self.out_indices = [int(name[5:]) for name in out_features]
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
# self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_abs_pos_emb:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
self.use_shared_rel_pos_bias = use_shared_rel_pos_bias
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
else:
self.rel_pos_bias = None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.use_rel_pos_bias = use_rel_pos_bias
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None)
for i in range(depth)])
# trunc_normal_(self.mask_token, std=.02)
if patch_size == 16:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
# nn.SyncBatchNorm(embed_dim),
nn.BatchNorm2d(embed_dim),
nn.GELU(),
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn3 = nn.Identity()
self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
elif patch_size == 8:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Identity()
self.fpn3 = nn.Sequential(
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fpn4 = nn.Sequential(
nn.MaxPool2d(kernel_size=4, stride=4),
)
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
self.fix_init_weight()
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
'''
def init_weights(self):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
logger = get_root_logger()
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
self.fix_init_weight()
if self.init_cfg is None:
logger.warn(f'No pre-trained weights for '
f'{self.__class__.__name__}, '
f'training start from scratch')
else:
assert 'checkpoint' in self.init_cfg, f'Only support ' \
f'specify `Pretrained` in ' \
f'`init_cfg` in ' \
f'{self.__class__.__name__} '
logger.info(f"Will load ckpt from {self.init_cfg['checkpoint']}")
load_checkpoint(self,
filename=self.init_cfg['checkpoint'],
strict=False,
logger=logger,
beit_spec_expand_rel_pos = self.use_rel_pos_bias,
)
'''
def get_num_layers(self):
return len(self.blocks)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def forward_features(self, x):
B, C, H, W = x.shape
x, (Hp, Wp) = self.patch_embed(x, self.pos_embed[:, 1:, :] if self.pos_embed is not None else None)
# Hp, Wp are HW for patches
batch_size, seq_len, _ = x.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
if self.pos_embed is not None:
cls_tokens = cls_tokens + self.pos_embed[:, :1, :]
x = torch.cat((cls_tokens, x), dim=1)
x = self.pos_drop(x)
features = []
training_window_size = torch.tensor([Hp, Wp])
rel_pos_bias = self.rel_pos_bias(training_window_size) if self.rel_pos_bias is not None else None
for i, blk in enumerate(self.blocks):
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x, rel_pos_bias, training_window_size)
else:
x = blk(x, rel_pos_bias=rel_pos_bias, training_window_size=training_window_size)
if i in self.out_indices:
xp = x[:, 1:, :].permute(0, 2, 1).reshape(B, -1, Hp, Wp)
features.append(xp.contiguous())
ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
for i in range(len(features)):
features[i] = ops[i](features[i])
feat_out = {}
for name, value in zip(self.out_features, features):
feat_out[name] = value
return feat_out
def forward(self, x):
x = self.forward_features(x)
return x
def beit_base_patch16(pretrained=False, **kwargs):
model = BEiT(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
init_values=None,
**kwargs)
model.default_cfg = _cfg()
return model
def beit_large_patch16(pretrained=False, **kwargs):
model = BEiT(
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
init_values=None,
**kwargs)
model.default_cfg = _cfg()
return model
def dit_base_patch16(pretrained=False, **kwargs):
model = BEiT(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
init_values=0.1,
**kwargs)
model.default_cfg = _cfg()
return model
def dit_large_patch16(pretrained=False, **kwargs):
model = BEiT(
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
init_values=1e-5,
**kwargs)
model.default_cfg = _cfg()
return model
if __name__ == '__main__':
model = BEiT(use_checkpoint=True, use_shared_rel_pos_bias=True)
model = model.to("cuda:0")
input1 = torch.rand(2, 3, 512, 762).to("cuda:0")
input2 = torch.rand(2, 3, 800, 1200).to("cuda:0")
input3 = torch.rand(2, 3, 720, 1000).to("cuda:0")
output1 = model(input1)
output2 = model(input2)
output3 = model(input3)
print("all done")
| EXA-1-master | exa/models/unilm-master/dit/object_detection/ditod/beit.py |
from detectron2.config import CfgNode as CN
def add_vit_config(cfg):
"""
Add config for VIT.
"""
_C = cfg
_C.MODEL.VIT = CN()
# CoaT model name.
_C.MODEL.VIT.NAME = ""
# Output features from CoaT backbone.
_C.MODEL.VIT.OUT_FEATURES = ["layer3", "layer5", "layer7", "layer11"]
_C.MODEL.VIT.IMG_SIZE = [224, 224]
_C.MODEL.VIT.POS_TYPE = "shared_rel"
_C.MODEL.VIT.DROP_PATH = 0.
_C.MODEL.VIT.MODEL_KWARGS = "{}"
_C.SOLVER.OPTIMIZER = "ADAMW"
_C.SOLVER.BACKBONE_MULTIPLIER = 1.0
_C.AUG = CN()
_C.AUG.DETR = False
| EXA-1-master | exa/models/unilm-master/dit/object_detection/ditod/config.py |
from detectron2.checkpoint import DetectionCheckpointer
from typing import Any
import torch
import torch.nn as nn
from fvcore.common.checkpoint import _IncompatibleKeys, _strip_prefix_if_present, TORCH_VERSION, quantization, \
ObserverBase, FakeQuantizeBase
from torch import distributed as dist
from scipy import interpolate
import numpy as np
import torch.nn.functional as F
def append_prefix(k):
prefix = 'backbone.bottom_up.backbone.'
return prefix + k if not k.startswith(prefix) else k
def modify_ckpt_state(model, state_dict, logger=None):
# reshape absolute position embedding for Swin
if state_dict.get(append_prefix('absolute_pos_embed')) is not None:
absolute_pos_embed = state_dict[append_prefix('absolute_pos_embed')]
N1, L, C1 = absolute_pos_embed.size()
N2, C2, H, W = model.backbone.bottom_up.backbone.absolute_pos_embed.size()
if N1 != N2 or C1 != C2 or L != H * W:
logger.warning("Error in loading absolute_pos_embed, pass")
else:
state_dict[append_prefix('absolute_pos_embed')] = absolute_pos_embed.view(N2, H, W, C2).permute(0, 3, 1, 2)
def get_dist_info():
if dist.is_available() and dist.is_initialized():
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return rank, world_size
rank, _ = get_dist_info()
all_keys = list(state_dict.keys())
for key in all_keys:
if "relative_position_index" in key:
state_dict.pop(key)
if "relative_position_bias_table" in key:
rel_pos_bias = state_dict[key]
src_num_pos, num_attn_heads = rel_pos_bias.size()
if key not in model.state_dict():
continue
dst_num_pos, _ = model.state_dict()[key].size()
dst_patch_shape = model.backbone.bottom_up.backbone.patch_embed.patch_shape
if dst_patch_shape[0] != dst_patch_shape[1]:
raise NotImplementedError()
num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1)
src_size = int((src_num_pos - num_extra_tokens) ** 0.5)
dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)
if src_size != dst_size:
if rank == 0:
print("Position interpolate for %s from %dx%d to %dx%d" % (
key, src_size, src_size, dst_size, dst_size))
extra_tokens = rel_pos_bias[-num_extra_tokens:, :]
rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]
def geometric_progression(a, r, n):
return a * (1.0 - r ** n) / (1.0 - r)
left, right = 1.01, 1.5
while right - left > 1e-6:
q = (left + right) / 2.0
gp = geometric_progression(1, q, src_size // 2)
if gp > dst_size // 2:
right = q
else:
left = q
# if q > 1.13492:
# q = 1.13492
dis = []
cur = 1
for i in range(src_size // 2):
dis.append(cur)
cur += q ** (i + 1)
r_ids = [-_ for _ in reversed(dis)]
x = r_ids + [0] + dis
y = r_ids + [0] + dis
t = dst_size // 2.0
dx = np.arange(-t, t + 0.1, 1.0)
dy = np.arange(-t, t + 0.1, 1.0)
if rank == 0:
print("x = {}".format(x))
print("dx = {}".format(dx))
all_rel_pos_bias = []
for i in range(num_attn_heads):
z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()
f = interpolate.interp2d(x, y, z, kind='cubic')
all_rel_pos_bias.append(
torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device))
rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)
new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)
state_dict[key] = new_rel_pos_bias
if append_prefix('pos_embed') in state_dict:
pos_embed_checkpoint = state_dict[append_prefix('pos_embed')]
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.backbone.bottom_up.backbone.patch_embed.num_patches
num_extra_tokens = model.backbone.bottom_up.backbone.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
# new_size = int(num_patches ** 0.5)
new_size_w = model.backbone.bottom_up.backbone.patch_embed.num_patches_w
new_size_h = model.backbone.bottom_up.backbone.patch_embed.num_patches_h
# class_token and dist_token are kept unchanged
if orig_size != new_size_h or orig_size != new_size_w:
if rank == 0:
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size_w, new_size_h))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size_w, new_size_h), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
state_dict[append_prefix('pos_embed')] = new_pos_embed
# interpolate position bias table if needed
relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k]
for table_key in relative_position_bias_table_keys:
table_pretrained = state_dict[table_key]
if table_key not in model.state_dict():
continue
table_current = model.state_dict()[table_key]
L1, nH1 = table_pretrained.size()
L2, nH2 = table_current.size()
if nH1 != nH2:
logger.warning(f"Error in loading {table_key}, pass")
else:
if L1 != L2:
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
table_pretrained_resized = F.interpolate(
table_pretrained.permute(1, 0).view(1, nH1, S1, S1),
size=(S2, S2), mode='bicubic')
state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0)
if append_prefix('rel_pos_bias.relative_position_bias_table') in state_dict and \
model.backbone.bottom_up.backbone.use_rel_pos_bias and \
not model.backbone.bottom_up.backbone.use_shared_rel_pos_bias and \
append_prefix('blocks.0.attn.relative_position_bias_table') not in state_dict:
logger.info("[BEIT] Expand the shared relative position embedding to each transformer block. ")
num_layers = model.backbone.bottom_up.backbone.get_num_layers()
rel_pos_bias = state_dict[append_prefix("rel_pos_bias.relative_position_bias_table")]
for i in range(num_layers):
state_dict["blocks.%d.attn.relative_position_bias_table" % i] = rel_pos_bias.clone()
state_dict.pop(append_prefix("rel_pos_bias.relative_position_bias_table"))
return state_dict
class MyDetectionCheckpointer(DetectionCheckpointer):
def _load_model(self, checkpoint: Any) -> _IncompatibleKeys:
"""
Load weights from a checkpoint.
Args:
checkpoint (Any): checkpoint contains the weights.
Returns:
``NamedTuple`` with ``missing_keys``, ``unexpected_keys``,
and ``incorrect_shapes`` fields:
* **missing_keys** is a list of str containing the missing keys
* **unexpected_keys** is a list of str containing the unexpected keys
* **incorrect_shapes** is a list of (key, shape in checkpoint, shape in model)
This is just like the return value of
:func:`torch.nn.Module.load_state_dict`, but with extra support
for ``incorrect_shapes``.
"""
checkpoint_state_dict = checkpoint.pop("model")
self._convert_ndarray_to_tensor(checkpoint_state_dict)
# if the state_dict comes from a model that was wrapped in a
# DataParallel or DistributedDataParallel during serialization,
# remove the "module" prefix before performing the matching.
_strip_prefix_if_present(checkpoint_state_dict, "module.")
# workaround https://github.com/pytorch/pytorch/issues/24139
model_state_dict = self.model.state_dict()
incorrect_shapes = []
# rename the para in checkpoint_state_dict
# some bug here, do not support re load
checkpoint_state_dict = {
append_prefix(k): checkpoint_state_dict[k]
for k in checkpoint_state_dict.keys()
}
checkpoint_state_dict = modify_ckpt_state(self.model, checkpoint_state_dict, logger=self.logger)
for k in list(checkpoint_state_dict.keys()):
if k in model_state_dict:
model_param = model_state_dict[k]
# Allow mismatch for uninitialized parameters
if TORCH_VERSION >= (1, 8) and isinstance(
model_param, nn.parameter.UninitializedParameter
):
continue
shape_model = tuple(model_param.shape)
shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
if shape_model != shape_checkpoint:
has_observer_base_classes = (
TORCH_VERSION >= (1, 8)
and hasattr(quantization, "ObserverBase")
and hasattr(quantization, "FakeQuantizeBase")
)
if has_observer_base_classes:
# Handle the special case of quantization per channel observers,
# where buffer shape mismatches are expected.
def _get_module_for_key(
model: torch.nn.Module, key: str
) -> torch.nn.Module:
# foo.bar.param_or_buffer_name -> [foo, bar]
key_parts = key.split(".")[:-1]
cur_module = model
for key_part in key_parts:
cur_module = getattr(cur_module, key_part)
return cur_module
cls_to_skip = (
ObserverBase,
FakeQuantizeBase,
)
target_module = _get_module_for_key(self.model, k)
if isinstance(target_module, cls_to_skip):
# Do not remove modules with expected shape mismatches
# them from the state_dict loading. They have special logic
# in _load_from_state_dict to handle the mismatches.
continue
incorrect_shapes.append((k, shape_checkpoint, shape_model))
checkpoint_state_dict.pop(k)
incompatible = self.model.load_state_dict(checkpoint_state_dict, strict=False)
return _IncompatibleKeys(
missing_keys=incompatible.missing_keys,
unexpected_keys=incompatible.unexpected_keys,
incorrect_shapes=incorrect_shapes,
)
| EXA-1-master | exa/models/unilm-master/dit/object_detection/ditod/mycheckpointer.py |
# --------------------------------------------------------------------------------
# VIT: Multi-Path Vision Transformer for Dense Prediction
# Copyright (c) 2022 Electronics and Telecommunications Research Institute (ETRI).
# All Rights Reserved.
# Written by Youngwan Lee
# This source code is licensed(Dual License(GPL3.0 & Commercial)) under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------------------------------
# References:
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# CoaT: https://github.com/mlpc-ucsd/CoaT
# --------------------------------------------------------------------------------
import torch
from detectron2.layers import (
ShapeSpec,
)
from detectron2.modeling import Backbone, BACKBONE_REGISTRY, FPN
from detectron2.modeling.backbone.fpn import LastLevelP6P7, LastLevelMaxPool
from .beit import beit_base_patch16, dit_base_patch16, dit_large_patch16, beit_large_patch16
from .deit import deit_base_patch16, mae_base_patch16
__all__ = [
"build_vit_fpn_backbone",
]
class VIT_Backbone(Backbone):
"""
Implement VIT backbone.
"""
def __init__(self, name, out_features, drop_path, img_size, pos_type, model_kwargs):
super().__init__()
self._out_features = out_features
if 'base' in name:
self._out_feature_strides = {"layer3": 4, "layer5": 8, "layer7": 16, "layer11": 32}
else:
self._out_feature_strides = {"layer7": 4, "layer11": 8, "layer15": 16, "layer23": 32}
if name == 'beit_base_patch16':
model_func = beit_base_patch16
self._out_feature_channels = {"layer3": 768, "layer5": 768, "layer7": 768, "layer11": 768}
elif name == 'dit_base_patch16':
model_func = dit_base_patch16
self._out_feature_channels = {"layer3": 768, "layer5": 768, "layer7": 768, "layer11": 768}
elif name == "deit_base_patch16":
model_func = deit_base_patch16
self._out_feature_channels = {"layer3": 768, "layer5": 768, "layer7": 768, "layer11": 768}
elif name == "mae_base_patch16":
model_func = mae_base_patch16
self._out_feature_channels = {"layer3": 768, "layer5": 768, "layer7": 768, "layer11": 768}
elif name == "dit_large_patch16":
model_func = dit_large_patch16
self._out_feature_channels = {"layer7": 1024, "layer11": 1024, "layer15": 1024, "layer23": 1024}
elif name == "beit_large_patch16":
model_func = beit_large_patch16
self._out_feature_channels = {"layer7": 1024, "layer11": 1024, "layer15": 1024, "layer23": 1024}
else:
raise ValueError("Unsupported VIT name yet.")
if 'beit' in name or 'dit' in name:
if pos_type == "abs":
self.backbone = model_func(img_size=img_size,
out_features=out_features,
drop_path_rate=drop_path,
use_abs_pos_emb=True,
**model_kwargs)
elif pos_type == "shared_rel":
self.backbone = model_func(img_size=img_size,
out_features=out_features,
drop_path_rate=drop_path,
use_shared_rel_pos_bias=True,
**model_kwargs)
elif pos_type == "rel":
self.backbone = model_func(img_size=img_size,
out_features=out_features,
drop_path_rate=drop_path,
use_rel_pos_bias=True,
**model_kwargs)
else:
raise ValueError()
else:
self.backbone = model_func(img_size=img_size,
out_features=out_features,
drop_path_rate=drop_path,
**model_kwargs)
def forward(self, x):
"""
Args:
x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.
Returns:
dict[str->Tensor]: names and the corresponding features
"""
assert x.dim() == 4, f"VIT takes an input of shape (N, C, H, W). Got {x.shape} instead!"
return self.backbone.forward_features(x)
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
def build_VIT_backbone(cfg):
"""
Create a VIT instance from config.
Args:
cfg: a detectron2 CfgNode
Returns:
A VIT backbone instance.
"""
# fmt: off
name = cfg.MODEL.VIT.NAME
out_features = cfg.MODEL.VIT.OUT_FEATURES
drop_path = cfg.MODEL.VIT.DROP_PATH
img_size = cfg.MODEL.VIT.IMG_SIZE
pos_type = cfg.MODEL.VIT.POS_TYPE
model_kwargs = eval(str(cfg.MODEL.VIT.MODEL_KWARGS).replace("`", ""))
return VIT_Backbone(name, out_features, drop_path, img_size, pos_type, model_kwargs)
@BACKBONE_REGISTRY.register()
def build_vit_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Create a VIT w/ FPN backbone.
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_VIT_backbone(cfg)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
| EXA-1-master | exa/models/unilm-master/dit/object_detection/ditod/backbone.py |
# --------------------------------------------------------------------------------
# MPViT: Multi-Path Vision Transformer for Dense Prediction
# Copyright (c) 2022 Electronics and Telecommunications Research Institute (ETRI).
# All Rights Reserved.
# Written by Youngwan Lee
# This source code is licensed(Dual License(GPL3.0 & Commercial)) under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------------------------------
from .config import add_vit_config
from .backbone import build_vit_fpn_backbone
from .dataset_mapper import DetrDatasetMapper
from .mycheckpointer import MyDetectionCheckpointer
from .icdar_evaluation import ICDAREvaluator
from .mytrainer import MyTrainer
from .table_evaluation import calc_table_score | EXA-1-master | exa/models/unilm-master/dit/object_detection/ditod/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# from https://github.com/facebookresearch/detr/blob/main/d2/detr/dataset_mapper.py
import copy
import logging
import numpy as np
import torch
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
__all__ = ["DetrDatasetMapper"]
def build_transform_gen(cfg, is_train):
"""
Create a list of :class:`TransformGen` from config.
Returns:
list[TransformGen]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
if sample_style == "range":
assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size))
logger = logging.getLogger(__name__)
tfm_gens = []
if is_train:
tfm_gens.append(T.RandomFlip())
tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
if is_train:
logger.info("TransformGens used in training: " + str(tfm_gens))
return tfm_gens
class DetrDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by DETR.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
def __init__(self, cfg, is_train=True):
if cfg.INPUT.CROP.ENABLED and is_train:
self.crop_gen = [
T.ResizeShortestEdge([400, 500, 600], sample_style="choice"),
T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE),
]
else:
self.crop_gen = None
self.mask_on = cfg.MODEL.MASK_ON
self.tfm_gens = build_transform_gen(cfg, is_train)
logging.getLogger(__name__).info(
"Full TransformGens used in training: {}, crop: {}".format(str(self.tfm_gens), str(self.crop_gen))
)
self.img_format = cfg.INPUT.FORMAT
self.is_train = is_train
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
if self.crop_gen is None:
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else:
if np.random.rand() > 0.5:
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else:
image, transforms = T.apply_transform_gens(
self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image
)
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if not self.is_train:
# USER: Modify this if you want to keep them for some reason.
dataset_dict.pop("annotations", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
utils.transform_instance_annotations(obj, transforms, image_shape)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(annos, image_shape)
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict | EXA-1-master | exa/models/unilm-master/dit/object_detection/ditod/dataset_mapper.py |
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""
This file contains components with some default boilerplate logic user may need
in training / testing. They will not work for everyone, but many users may find them useful.
The behavior of functions/classes in this file is subject to change,
since they are meant to represent the "common default behavior" people need in their projects.
"""
import argparse
import logging
import os
import sys
import weakref
from collections import OrderedDict
from typing import Optional
import torch
from fvcore.nn.precise_bn import get_bn_modules
from omegaconf import OmegaConf
from torch.nn.parallel import DistributedDataParallel
import detectron2.data.transforms as T
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import CfgNode, LazyConfig
from detectron2.data import (
MetadataCatalog,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.evaluation import (
DatasetEvaluator,
inference_on_dataset,
print_csv_format,
verify_results,
)
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils import comm
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.env import seed_all_rng
from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
from detectron2.engine import hooks
from detectron2.engine.train_loop import AMPTrainer, SimpleTrainer, TrainerBase
from .mycheckpointer import MyDetectionCheckpointer
from typing import Any, Dict, List, Set
import itertools
from detectron2.solver.build import maybe_add_gradient_clipping
from .dataset_mapper import DetrDatasetMapper
from .icdar_evaluation import ICDAREvaluator
from detectron2.evaluation import COCOEvaluator
__all__ = [
"create_ddp_model",
"default_argument_parser",
"default_setup",
"default_writers",
"DefaultPredictor",
"MyTrainer",
]
def create_ddp_model(model, *, fp16_compression=False, **kwargs):
"""
Create a DistributedDataParallel model if there are >1 processes.
Args:
model: a torch.nn.Module
fp16_compression: add fp16 compression hooks to the ddp object.
See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook
kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`.
""" # noqa
if comm.get_world_size() == 1:
return model
if "device_ids" not in kwargs:
kwargs["device_ids"] = [comm.get_local_rank()]
ddp = DistributedDataParallel(model, **kwargs)
if fp16_compression:
from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks
ddp.register_comm_hook(state=None, hook=comm_hooks.fp16_compress_hook)
return ddp
def default_argument_parser(epilog=None):
"""
Create a parser with some common arguments used by detectron2 users.
Args:
epilog (str): epilog passed to ArgumentParser describing the usage.
Returns:
argparse.ArgumentParser:
"""
parser = argparse.ArgumentParser(
epilog=epilog
or f"""
Examples:
Run on single machine:
$ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml
Change some config options:
$ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001
Run on multiple machines:
(machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url <URL> [--other-flags]
(machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url <URL> [--other-flags]
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument(
"--resume",
action="store_true",
help="Whether to attempt to resume from the checkpoint directory. "
"See documentation of `MyTrainer.resume_or_load()` for what it means.",
)
parser.add_argument("--eval-only", action="store_true", help="perform evaluation only")
parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*")
parser.add_argument("--num-machines", type=int, default=1, help="total number of machines")
parser.add_argument(
"--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)"
)
# PyTorch still may leave orphan processes in multi-gpu training.
# Therefore we use a deterministic way to obtain port,
# so that users are aware of orphan processes by seeing the port occupied.
port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14
parser.add_argument(
"--dist-url",
default="tcp://127.0.0.1:{}".format(port),
help="initialization URL for pytorch distributed backend. See "
"https://pytorch.org/docs/stable/distributed.html for details.",
)
parser.add_argument(
"opts",
help="""
Modify config options at the end of the command. For Yacs configs, use
space-separated "PATH.KEY VALUE" pairs.
For python-based LazyConfig, use "path.key=value".
""".strip(),
default=None,
nargs=argparse.REMAINDER,
)
return parser
def _try_get_key(cfg, *keys, default=None):
"""
Try select keys from cfg until the first key that exists. Otherwise return default.
"""
if isinstance(cfg, CfgNode):
cfg = OmegaConf.create(cfg.dump())
for k in keys:
none = object()
p = OmegaConf.select(cfg, k, default=none)
if p is not none:
return p
return default
def _highlight(code, filename):
try:
import pygments
except ImportError:
return code
from pygments.lexers import Python3Lexer, YamlLexer
from pygments.formatters import Terminal256Formatter
lexer = Python3Lexer() if filename.endswith(".py") else YamlLexer()
code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai"))
return code
def default_setup(cfg, args):
"""
Perform some basic common setups at the beginning of a job, including:
1. Set up the detectron2 logger
2. Log basic information about environment, cmdline arguments, and config
3. Backup the config to the output directory
Args:
cfg (CfgNode or omegaconf.DictConfig): the full config to be used
args (argparse.NameSpace): the command line arguments to be logged
"""
output_dir = _try_get_key(cfg, "OUTPUT_DIR", "output_dir", "train.output_dir")
if comm.is_main_process() and output_dir:
PathManager.mkdirs(output_dir)
rank = comm.get_rank()
setup_logger(output_dir, distributed_rank=rank, name="fvcore")
logger = setup_logger(output_dir, distributed_rank=rank)
logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size()))
logger.info("Environment info:\n" + collect_env_info())
logger.info("Command line arguments: " + str(args))
if hasattr(args, "config_file") and args.config_file != "":
logger.info(
"Contents of args.config_file={}:\n{}".format(
args.config_file,
_highlight(PathManager.open(args.config_file, "r").read(), args.config_file),
)
)
if comm.is_main_process() and output_dir:
# Note: some of our scripts may expect the existence of
# config.yaml in output directory
path = os.path.join(output_dir, "config.yaml")
if isinstance(cfg, CfgNode):
logger.info("Running with full config:\n{}".format(_highlight(cfg.dump(), ".yaml")))
with PathManager.open(path, "w") as f:
f.write(cfg.dump())
else:
LazyConfig.save(cfg, path)
logger.info("Full config saved to {}".format(path))
# make sure each worker has a different, yet deterministic seed if specified
seed = _try_get_key(cfg, "SEED", "train.seed", default=-1)
seed_all_rng(None if seed < 0 else seed + rank)
# cudnn benchmark has large overhead. It shouldn't be used considering the small size of
# typical validation set.
if not (hasattr(args, "eval_only") and args.eval_only):
torch.backends.cudnn.benchmark = _try_get_key(
cfg, "CUDNN_BENCHMARK", "train.cudnn_benchmark", default=False
)
def default_writers(output_dir: str, max_iter: Optional[int] = None):
"""
Build a list of :class:`EventWriter` to be used.
It now consists of a :class:`CommonMetricPrinter`,
:class:`TensorboardXWriter` and :class:`JSONWriter`.
Args:
output_dir: directory to store JSON metrics and tensorboard events
max_iter: the total number of iterations
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
"""
PathManager.mkdirs(output_dir)
return [
# It may not always print what you want to see, since it prints "common" metrics only.
CommonMetricPrinter(max_iter),
JSONWriter(os.path.join(output_dir, "metrics.json")),
TensorboardXWriter(output_dir),
]
class DefaultPredictor:
"""
Create a simple end-to-end predictor with the given config that runs on
single device for a single input image.
Compared to using the model directly, this class does the following additions:
1. Load checkpoint from `cfg.MODEL.WEIGHTS`.
2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.
3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.
4. Take one input image and produce a single output, instead of a batch.
This is meant for simple demo purposes, so it does the above steps automatically.
This is not meant for benchmarks or running complicated inference logic.
If you'd like to do anything more complicated, please refer to its source code as
examples to build and use the model manually.
Attributes:
metadata (Metadata): the metadata of the underlying dataset, obtained from
cfg.DATASETS.TEST.
Examples:
::
pred = DefaultPredictor(cfg)
inputs = cv2.imread("input.jpg")
outputs = pred(inputs)
"""
def __init__(self, cfg):
self.cfg = cfg.clone() # cfg can be modified by model
self.model = build_model(self.cfg)
self.model.eval()
if len(cfg.DATASETS.TEST):
self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
checkpointer = DetectionCheckpointer(self.model)
checkpointer.load(cfg.MODEL.WEIGHTS)
self.aug = T.ResizeShortestEdge(
[cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
)
self.input_format = cfg.INPUT.FORMAT
assert self.input_format in ["RGB", "BGR"], self.input_format
def __call__(self, original_image):
"""
Args:
original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
Returns:
predictions (dict):
the output of the model for one image only.
See :doc:`/tutorials/models` for details about the format.
"""
with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
# Apply pre-processing to image.
if self.input_format == "RGB":
# whether the model expects BGR inputs or RGB
original_image = original_image[:, :, ::-1]
height, width = original_image.shape[:2]
image = self.aug.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
predictions = self.model([inputs])[0]
return predictions
class MyTrainer(TrainerBase):
"""
A trainer with default training logic. It does the following:
1. Create a :class:`SimpleTrainer` using model, optimizer, dataloader
defined by the given config. Create a LR scheduler defined by the config.
2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when
`resume_or_load` is called.
3. Register a few common hooks defined by the config.
It is created to simplify the **standard model training workflow** and reduce code boilerplate
for users who only need the standard training workflow, with standard features.
It means this class makes *many assumptions* about your training logic that
may easily become invalid in a new research. In fact, any assumptions beyond those made in the
:class:`SimpleTrainer` are too much for research.
The code of this class has been annotated about restrictive assumptions it makes.
When they do not work for you, you're encouraged to:
1. Overwrite methods of this class, OR:
2. Use :class:`SimpleTrainer`, which only does minimal SGD training and
nothing else. You can then add your own hooks if needed. OR:
3. Write your own training loop similar to `tools/plain_train_net.py`.
See the :doc:`/tutorials/training` tutorials for more details.
Note that the behavior of this class, like other functions/classes in
this file, is not stable, since it is meant to represent the "common default behavior".
It is only guaranteed to work well with the standard models and training workflow in detectron2.
To obtain more stable behavior, write your own training logic with other public APIs.
Examples:
::
trainer = MyTrainer(cfg)
trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS
trainer.train()
Attributes:
scheduler:
checkpointer (DetectionCheckpointer):
cfg (CfgNode):
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode):
"""
super().__init__()
logger = logging.getLogger("detectron2")
if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2
setup_logger()
cfg = MyTrainer.auto_scale_workers(cfg, comm.get_world_size())
self.cfg = cfg
# Assume these objects must be constructed in this order.
model = self.build_model(cfg)
optimizer = self.build_optimizer(cfg, model)
data_loader = self.build_train_loader(cfg)
model = create_ddp_model(model, broadcast_buffers=False)
self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(
model, data_loader, optimizer
)
self.scheduler = self.build_lr_scheduler(cfg, optimizer)
self.checkpointer = MyDetectionCheckpointer(
# Assume you want to save checkpoints together with logs/statistics
model,
cfg.OUTPUT_DIR,
trainer=weakref.proxy(self),
)
self.start_iter = 0
self.max_iter = cfg.SOLVER.MAX_ITER
self.cfg = cfg
self.register_hooks(self.build_hooks())
def resume_or_load(self, resume=True):
"""
If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by
a `last_checkpoint` file), resume from the file. Resuming means loading all
available states (eg. optimizer and scheduler) and update iteration counter
from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used.
Otherwise, this is considered as an independent training. The method will load model
weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start
from iteration 0.
Args:
resume (bool): whether to do resume or not
"""
self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume)
if resume and self.checkpointer.has_checkpoint():
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration
self.start_iter = self.iter + 1
def build_hooks(self):
"""
Build a list of default hooks, including timing, evaluation,
checkpointing, lr scheduling, precise BN, writing events.
Returns:
list[HookBase]:
"""
cfg = self.cfg.clone()
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(),
hooks.PreciseBN(
# Run at the same freq as (but before) evaluation.
cfg.TEST.EVAL_PERIOD,
self.model,
# Build a new data loader to not affect training
self.build_train_loader(cfg),
cfg.TEST.PRECISE_BN.NUM_ITER,
)
if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)
else None,
]
# Do PreciseBN before checkpointer, because it updates the model and need to
# be saved by checkpointer.
# This is not always the best: if checkpointing has a different frequency,
# some checkpoints may have more precise statistics than others.
if comm.is_main_process():
ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD))
def test_and_save_results():
self._last_eval_results = self.test(self.cfg, self.model)
return self._last_eval_results
# Do evaluation after checkpointer, because then if it fails,
# we can use the saved checkpoint to debug.
ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
if comm.is_main_process():
# Here the default print/log frequency of each writer is used.
# run writers in the end, so that evaluation metrics are written
ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))
return ret
def build_writers(self):
"""
Build a list of writers to be used using :func:`default_writers()`.
If you'd like a different list of writers, you can overwrite it in
your trainer.
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
"""
return default_writers(self.cfg.OUTPUT_DIR, self.max_iter)
def train(self):
"""
Run training.
Returns:
OrderedDict of results, if evaluation is enabled. Otherwise None.
"""
super().train(self.start_iter, self.max_iter)
if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process():
assert hasattr(
self, "_last_eval_results"
), "No evaluation results obtained during training!"
verify_results(self.cfg, self._last_eval_results)
return self._last_eval_results
def run_step(self):
self._trainer.iter = self.iter
self._trainer.run_step()
@classmethod
def build_model(cls, cfg):
"""
Returns:
torch.nn.Module:
It now calls :func:`detectron2.modeling.build_model`.
Overwrite it if you'd like a different model.
"""
model = build_model(cfg)
logger = logging.getLogger(__name__)
logger.info("Model:\n{}".format(model))
return model
@classmethod
def build_optimizer(cls, cfg, model):
params: List[Dict[str, Any]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
for key, value in model.named_parameters(recurse=True):
if not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if "backbone" in key:
lr = lr * cfg.SOLVER.BACKBONE_MULTIPLIER
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
def maybe_add_full_model_gradient_clipping(optim): # optim: the optimizer class
# detectron2 doesn't have full model gradient clipping now
clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
enable = (
cfg.SOLVER.CLIP_GRADIENTS.ENABLED
and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model"
and clip_norm_val > 0.0
)
class FullModelGradientClippingOptimizer(optim):
def step(self, closure=None):
all_params = itertools.chain(*[x["params"] for x in self.param_groups])
torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
super().step(closure=closure)
return FullModelGradientClippingOptimizer if enable else optim
optimizer_type = cfg.SOLVER.OPTIMIZER
if optimizer_type == "SGD":
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)(
params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM
)
elif optimizer_type == "ADAMW":
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(
params, cfg.SOLVER.BASE_LR
)
else:
raise NotImplementedError(f"no optimizer type {optimizer_type}")
if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model":
optimizer = maybe_add_gradient_clipping(cfg, optimizer)
return optimizer
@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
"""
It now calls :func:`detectron2.solver.build_lr_scheduler`.
Overwrite it if you'd like a different scheduler.
"""
return build_lr_scheduler(cfg, optimizer)
@classmethod
def build_train_loader(cls, cfg):
if cfg.AUG.DETR:
mapper = DetrDatasetMapper(cfg, is_train=True)
else:
mapper = None
return build_detection_train_loader(cfg, mapper=mapper)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
"""
Returns:
iterable
It now calls :func:`detectron2.data.build_detection_test_loader`.
Overwrite it if you'd like a different data loader.
"""
return build_detection_test_loader(cfg, dataset_name)
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
if 'icdar' not in dataset_name:
return COCOEvaluator(dataset_name, output_dir=output_folder)
else:
return ICDAREvaluator(dataset_name, output_dir=output_folder)
@classmethod
def test(cls, cfg, model, evaluators=None):
"""
Evaluate the given model. The given model is expected to already contain
weights to evaluate.
Args:
cfg (CfgNode):
model (nn.Module):
evaluators (list[DatasetEvaluator] or None): if None, will call
:meth:`build_evaluator`. Otherwise, must have the same length as
``cfg.DATASETS.TEST``.
Returns:
dict: a dict of result metrics
"""
logger = logging.getLogger(__name__)
if isinstance(evaluators, DatasetEvaluator):
evaluators = [evaluators]
if evaluators is not None:
assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
len(cfg.DATASETS.TEST), len(evaluators)
)
results = OrderedDict()
for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
data_loader = cls.build_test_loader(cfg, dataset_name)
# When evaluators are passed in as arguments,
# implicitly assume that evaluators can be created before data_loader.
if evaluators is not None:
evaluator = evaluators[idx]
else:
try:
evaluator = cls.build_evaluator(cfg, dataset_name)
except NotImplementedError:
logger.warn(
"No evaluator found. Use `MyTrainer.test(evaluators=)`, "
"or implement its `build_evaluator` method."
)
results[dataset_name] = {}
continue
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
assert isinstance(
results_i, dict
), "Evaluator must return a dict on the main process. Got {} instead.".format(
results_i
)
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
@staticmethod
def auto_scale_workers(cfg, num_workers: int):
"""
When the config is defined for certain number of workers (according to
``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of
workers currently in use, returns a new cfg where the total batch size
is scaled so that the per-GPU batch size stays the same as the
original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``.
Other config options are also scaled accordingly:
* training steps and warmup steps are scaled inverse proportionally.
* learning rate are scaled proportionally, following :paper:`ImageNet in 1h`.
For example, with the original config like the following:
.. code-block:: yaml
IMS_PER_BATCH: 16
BASE_LR: 0.1
REFERENCE_WORLD_SIZE: 8
MAX_ITER: 5000
STEPS: (4000,)
CHECKPOINT_PERIOD: 1000
When this config is used on 16 GPUs instead of the reference number 8,
calling this method will return a new config with:
.. code-block:: yaml
IMS_PER_BATCH: 32
BASE_LR: 0.2
REFERENCE_WORLD_SIZE: 16
MAX_ITER: 2500
STEPS: (2000,)
CHECKPOINT_PERIOD: 500
Note that both the original config and this new config can be trained on 16 GPUs.
It's up to user whether to enable this feature (by setting ``REFERENCE_WORLD_SIZE``).
Returns:
CfgNode: a new config. Same as original if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``.
"""
old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE
if old_world_size == 0 or old_world_size == num_workers:
return cfg
cfg = cfg.clone()
frozen = cfg.is_frozen()
cfg.defrost()
assert (
cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0
), "Invalid REFERENCE_WORLD_SIZE in config!"
scale = num_workers / old_world_size
bs = cfg.SOLVER.IMS_PER_BATCH = int(round(cfg.SOLVER.IMS_PER_BATCH * scale))
lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale
max_iter = cfg.SOLVER.MAX_ITER = int(round(cfg.SOLVER.MAX_ITER / scale))
warmup_iter = cfg.SOLVER.WARMUP_ITERS = int(round(cfg.SOLVER.WARMUP_ITERS / scale))
cfg.SOLVER.STEPS = tuple(int(round(s / scale)) for s in cfg.SOLVER.STEPS)
cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale))
cfg.SOLVER.CHECKPOINT_PERIOD = int(round(cfg.SOLVER.CHECKPOINT_PERIOD / scale))
cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant
logger = logging.getLogger(__name__)
logger.info(
f"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, "
f"max_iter={max_iter}, warmup={warmup_iter}."
)
if frozen:
cfg.freeze()
return cfg
# Access basic attributes from the underlying trainer
for _attr in ["model", "data_loader", "optimizer"]:
setattr(
MyTrainer,
_attr,
property(
# getter
lambda self, x=_attr: getattr(self._trainer, x),
# setter
lambda self, value, x=_attr: setattr(self._trainer, x, value),
),
)
| EXA-1-master | exa/models/unilm-master/dit/object_detection/ditod/mytrainer.py |
from .evaluate import calc_table_score | EXA-1-master | exa/models/unilm-master/dit/object_detection/ditod/table_evaluation/__init__.py |
"""
Evaluation of -.tar.gz file.
Yu Fang - March 2019
"""
import os
import xml.dom.minidom
# from eval import eval
reg_gt_path = os.path.abspath("data/test")
reg_gt_path_archival = os.path.abspath("data/test")
reg_gt_path_modern = os.path.abspath("data/test")
str_gt_path_1 = os.path.abspath("data/test")
str_gt_path_2 = os.path.abspath("data/test")
str_gt_path_archival = os.path.abspath("data/test")
str_gt_path_modern = os.path.abspath("data/test")
import xml.dom.minidom
# from functools import cmp_to_key
from os.path import join as osj
from .data_structure import *
class eval:
STR = "-str"
REG = "-reg"
DEFAULT_ENCODING = "UTF-8"
# reg_gt_path = "./annotations/trackA/"
# str_gt_path = "./annotations/trackB/"
# reg_gt_path = os.path.abspath("data/test")
# reg_gt_path_archival = os.path.abspath("data/test")
# reg_gt_path_modern = os.path.abspath("data/test")
# str_gt_path_1 = os.path.abspath("data/test")
# str_gt_path_2 = os.path.abspath("data/test")
# str_gt_path_archival = os.path.abspath("data/test")
# str_gt_path_modern = os.path.abspath("data/test")
# dummyDom = xml.dom.minidom.parse("./dummyXML.xml")
def __init__(self, track, res_path):
self.return_result = None
self.reg = True
self.str = False
self.resultFile = res_path
self.inPrefix = os.path.split(res_path)[-1].split(".")[0][:-7]
if track == "-trackA":
self.reg = True
self.GTFile = osj(reg_gt_path, self.inPrefix + ".xml")
# self.GTFile = osj(self.reg_gt_path, self.inPrefix)
elif track == "-trackA1": # archival documents
self.reg = True
self.GTFile = osj(reg_gt_path_archival, self.inPrefix + ".xml")
elif track == "-trackA2": # modern documents
self.reg = True
self.GTFile = osj(reg_gt_path_modern, self.inPrefix + ".xml")
elif track == "-trackB1":
self.str = True
self.GTFile = osj(str_gt_path_1, self.inPrefix + ".xml")
# self.GTFile = osj(self.str_gt_path_1, self.inPrefix)
elif track == "-trackB2":
self.str = True
self.GTFile = osj(str_gt_path_2, self.inPrefix + ".xml")
# print(self.GTFile)
# self.GTFile = osj(self.str_gt_path_2, self.inPrefix)
elif track == "-trackB2_a":
self.str = True
self.GTFile = osj(str_gt_path_archival, self.inPrefix + ".xml")
elif track == "-trackB2_m":
self.str = True
self.GTFile = osj(str_gt_path_modern, self.inPrefix + ".xml")
else:
print(track)
print("Not a valid track, please check your spelling.")
# self.resultFile = res_path
# self.inPrefix = os.path.split(res_path)[-1].split("-")[0]
# if self.str:
# # self.GTFile = osj(self.str_gt_path, self.inPrefix + "-str.xml")
# self.GTFile = osj(self.str_gt_path, self.inPrefix + ".xml")
# elif self.reg:
# # self.GTFile = osj(self.reg_gt_path, self.inPrefix + "-reg.xml")
# self.GTFile = osj(self.reg_gt_path, self.inPrefix + ".xml")
# else:
# print("Not a valid track, please check your spelling.")
self.gene_ret_lst()
@property
def result(self):
return self.return_result
def gene_ret_lst(self):
ret_lst = []
for iou in [0.6, 0.7, 0.8, 0.9]:
temp = self.compute_retVal(iou)
ret_lst.append(temp)
# ret_lst.append(self.compute_retVal(iou))
ret_lst.append(self.inPrefix + ".xml")
# ret_lst.append(self.inPrefix)
# print("Done processing {}\n".format(self.resultFile))
self.return_result = ret_lst
def compute_retVal(self, iou):
gt_dom = xml.dom.minidom.parse(self.GTFile)
# incorrect submission format handling
try:
result_dom = xml.dom.minidom.parse(self.resultFile)
except Exception as e:
# result_dom = xml.dom.minidom.parse(dummyDom)
gt_tables = eval.get_table_list(gt_dom)
retVal = ResultStructure(truePos=0, gtTotal=len(gt_tables), resTotal=0)
return retVal
# result_dom = xml.dom.minidom.parse(self.resultFile)
if self.reg:
ret = self.evaluate_result_reg(gt_dom, result_dom, iou)
return ret
if self.str:
ret = self.evaluate_result_str(gt_dom, result_dom, iou)
return ret
@staticmethod
def get_table_list(dom):
"""
return a list of Table objects corresponding to the table element of the DOM.
"""
return [Table(_nd) for _nd in dom.documentElement.getElementsByTagName("table")]
@staticmethod
def evaluate_result_reg(gt_dom, result_dom, iou_value):
# parse the tables in input elements
gt_tables = eval.get_table_list(gt_dom)
result_tables = eval.get_table_list(result_dom)
# duplicate result table list
remaining_tables = result_tables.copy()
# map the tables in gt and result file
table_matches = [] # @param: table_matches - list of mapping of tables in gt and res file, in order (gt, res)
for gtt in gt_tables:
for rest in remaining_tables:
if gtt.compute_table_iou(rest) >= iou_value:
remaining_tables.remove(rest)
table_matches.append((gtt, rest))
break
assert len(table_matches) <= len(gt_tables)
assert len(table_matches) <= len(result_tables)
retVal = ResultStructure(truePos=len(table_matches), gtTotal=len(gt_tables), resTotal=len(result_tables))
return retVal
@staticmethod
def evaluate_result_str(gt_dom, result_dom, iou_value, table_iou_value=0.8):
# parse the tables in input elements
gt_tables = eval.get_table_list(gt_dom)
result_tables = eval.get_table_list(result_dom)
# duplicate result table list
remaining_tables = result_tables.copy()
gt_remaining = gt_tables.copy()
# map the tables in gt and result file
table_matches = [] # @param: table_matches - list of mapping of tables in gt and res file, in order (gt, res)
for gtt in gt_remaining:
for rest in remaining_tables:
# note: for structural analysis, use 0.8 for table mapping
if gtt.compute_table_iou(rest) >= table_iou_value:
table_matches.append((gtt, rest))
remaining_tables.remove(rest) # unsafe... should be ok with the break below
gt_remaining.remove(gtt)
break
total_gt_relation, total_res_relation, total_correct_relation = 0, 0, 0
for gt_table, ress_table in table_matches:
# set up the cell mapping for matching tables
cell_mapping = gt_table.find_cell_mapping(ress_table, iou_value)
# set up the adj relations, convert the one for result table to a dictionary for faster searching
gt_AR = gt_table.find_adj_relations()
total_gt_relation += len(gt_AR)
res_AR = ress_table.find_adj_relations()
total_res_relation += len(res_AR)
if False: # for DEBUG
Table.printCellMapping(cell_mapping)
Table.printAdjacencyRelationList(gt_AR, "GT")
Table.printAdjacencyRelationList(res_AR, "run")
# Now map GT adjacency relations to result
lMappedAR = []
for ar in gt_AR:
try:
resFromCell = cell_mapping[ar.fromText]
resToCell = cell_mapping[ar.toText]
# make a mapped adjacency relation
lMappedAR.append(AdjRelation(resFromCell, resToCell, ar.direction))
except:
# no mapping is possible
pass
# compare two list of adjacency relation
correct_dect = 0
for ar1 in res_AR:
for ar2 in lMappedAR:
if ar1.isEqual(ar2):
correct_dect += 1
break
total_correct_relation += correct_dect
# handle gt_relations in unmatched gt table
for gtt_remain in gt_remaining:
total_gt_relation += len(gtt_remain.find_adj_relations())
# handle gt_relation in unmatched res table
for res_remain in remaining_tables:
total_res_relation += len(res_remain.find_adj_relations())
retVal = ResultStructure(truePos=total_correct_relation, gtTotal=total_gt_relation, resTotal=total_res_relation)
return retVal
# calculate the gt adj_relations of the missing file
# @param: file_lst - list of missing ground truth file
# @param: cur_gt_num - current total of ground truth objects (tables / cells)
def process_missing_files(track, gt_file_lst, cur_gt_num):
if track in ["-trackA", "-trackA1", "-trackA2"]:
gt_file_lst_full = [osj(reg_gt_path, filename) for filename in gt_file_lst]
for file in gt_file_lst_full:
if os.path.split(file)[-1].split(".")[-1] == "xml":
gt_dom = xml.dom.minidom.parse(file)
gt_root = gt_dom.documentElement
# tables = []
table_elements = gt_root.getElementsByTagName("table")
for res_table in table_elements:
# t = Table(res_table)
# tables.append(t)
cur_gt_num += 1
return cur_gt_num
elif track == "-trackB1":
gt_file_lst_full = [osj(str_gt_path_1, filename) for filename in gt_file_lst]
for file in gt_file_lst_full:
if os.path.split(file)[-1].split(".")[-1] == "xml":
gt_dom = xml.dom.minidom.parse(file)
gt_root = gt_dom.documentElement
tables = []
table_elements = gt_root.getElementsByTagName("table")
for res_table in table_elements:
t = Table(res_table)
tables.append(t)
for table in tables:
cur_gt_num += len(table.find_adj_relations())
return cur_gt_num
elif track == "-trackB2":
gt_file_lst_full = [osj(str_gt_path_2, filename) for filename in gt_file_lst]
for file in gt_file_lst_full:
if os.path.split(file)[-1].split(".")[-1] == "xml":
gt_dom = xml.dom.minidom.parse(file)
gt_root = gt_dom.documentElement
tables = []
table_elements = gt_root.getElementsByTagName("table")
for res_table in table_elements:
t = Table(res_table)
tables.append(t)
for table in tables:
cur_gt_num += len(table.find_adj_relations())
return cur_gt_num
def calc(F1):
sum_a = 0.6 * F1[0] + 0.7 * F1[1] + 0.8 * F1[2] + 0.9 * F1[3]
sum_b = 0.6 + 0.7 + 0.8 + 0.9
return sum_a / sum_b
def calc_table_score(result_path):
# measure = eval(*sys.argv[1:])
gt_file_lst = os.listdir(reg_gt_path_archival)
track = "-trackA1"
untar_path = result_path
res_lst = []
for root, files, dirs in os.walk(untar_path):
for name in dirs:
if name.split(".")[-1] == "xml":
cur_filepath = osj(os.path.abspath(root), name)
res_lst.append(eval(track, cur_filepath))
# printing for debug
# print("Processing... {}".format(name))
# print("DONE WITH FILE PROCESSING\n")
# note: results are stored as list of each when iou at [0.6, 0.7, 0.8, 0.9, gt_filename]
# gt number should be the same for all files
gt_num = 0
correct_six, res_six = 0, 0
correct_seven, res_seven = 0, 0
correct_eight, res_eight = 0, 0
correct_nine, res_nine = 0, 0
for each_file in res_lst:
# print(each_file)
try:
gt_file_lst.remove(each_file.result[-1])
if each_file.result[-1].replace('.xml', '.jpg') in gt_file_lst:
gt_file_lst.remove(each_file.result[-1].replace('.xml', '.jpg'))
correct_six += each_file.result[0].truePos
gt_num += each_file.result[0].gtTotal
res_six += each_file.result[0].resTotal
# print("{} {} {}".format(each_file.result[0].truePos, each_file.result[0].gtTotal, each_file.result[0].resTotal))
correct_seven += each_file.result[1].truePos
res_seven += each_file.result[1].resTotal
correct_eight += each_file.result[2].truePos
res_eight += each_file.result[2].resTotal
correct_nine += each_file.result[3].truePos
res_nine += each_file.result[3].resTotal
except:
print("Error occur in processing result list.")
print(each_file.result[-1])
break
# print(each_file.result[-1])
# print(each_file)
# for file in gt_file_lst:
# if file.split(".") != "xml":
# gt_file_lst.remove(file)
# # print(gt_file_lst)
for i in range(len(gt_file_lst) - 1, -1, -1):
if gt_file_lst[i].split(".")[-1] != "xml":
del gt_file_lst[i]
if len(gt_file_lst) > 0:
print("\nWarning: missing result annotations for file: {}\n".format(gt_file_lst))
gt_total = process_missing_files(track, gt_file_lst, gt_num)
else:
gt_total = gt_num
try:
# print("Evaluation of {}".format(track.replace("-", "")))
# iou @ 0.6
p_six = correct_six / res_six
r_six = correct_six / gt_total
f1_six = 2 * p_six * r_six / (p_six + r_six)
print("IOU @ 0.6 -\nprecision: {}\nrecall: {}\nf1: {}".format(p_six, r_six, f1_six))
print("correct: {}, gt: {}, res: {}\n".format(correct_six, gt_total, res_six))
# iou @ 0.7
p_seven = correct_seven / res_seven
r_seven = correct_seven / gt_total
f1_seven = 2 * p_seven * r_seven / (p_seven + r_seven)
print("IOU @ 0.7 -\nprecision: {}\nrecall: {}\nf1: {}".format(p_seven, r_seven, f1_seven))
print("correct: {}, gt: {}, res: {}\n".format(correct_seven, gt_total, res_seven))
# iou @ 0.8
p_eight = correct_eight / res_eight
r_eight = correct_eight / gt_total
f1_eight = 2 * p_eight * r_eight / (p_eight + r_eight)
print("IOU @ 0.8 -\nprecision: {}\nrecall: {}\nf1: {}".format(p_eight, r_eight, f1_eight))
print("correct: {}, gt: {}, res: {}\n".format(correct_eight, gt_total, res_eight))
# iou @ 0.9
p_nine = correct_nine / res_nine
r_nine = correct_nine / gt_total
f1_nine = 2 * p_nine * r_nine / (p_nine + r_nine)
print("IOU @ 0.9 -\nprecision: {}\nrecall: {}\nf1: {}".format(p_nine, r_nine, f1_nine))
print("correct: {}, gt: {}, res: {}".format(correct_nine, gt_total, res_nine))
F1 = [f1_six, f1_seven, f1_eight, f1_nine]
wF1 = calc(F1)
print("Average weight F1: {}".format(wF1))
return {
'p_six':p_six * 100,
"r_six":r_six * 100,
"f1_six":f1_six * 100,
"p_seven":p_seven * 100,
"r_seven":r_seven * 100,
"f1_seven":f1_seven * 100,
"p_eight":p_eight * 100,
"r_eight":r_eight * 100,
"f1_eight":f1_eight * 100,
"p_nine":p_nine * 100,
"r_nine":r_nine * 100,
"f1_nine":f1_nine * 100,
"wF1":wF1 * 100
}
except ZeroDivisionError:
print(
"Error: zero devision error found, (possible that no adjacency relations are found), please check the file input.")
return {"wF1": 0}
if __name__=="__main__":
pass
| EXA-1-master | exa/models/unilm-master/dit/object_detection/ditod/table_evaluation/evaluate.py |
"""
Data structures used by the evaluation process.
Yu Fang - March 2019
"""
from collections import Iterable
import numpy as np
from shapely.geometry import Polygon
# helper functions
def flatten(lis):
for item in lis:
if isinstance(item, Iterable) and not isinstance(item, str):
for x in flatten(item):
yield x
else:
yield item
# derived from https://blog.csdn.net/u012433049/article/details/82909484
def compute_poly_iou(list1, list2):
a1 = np.array(list1, dtype=int).reshape(-1, 2)
poly1 = Polygon(a1)
poly1_clean = poly1.buffer(0)
a2 = np.array(list2, dtype=int).reshape(-1, 2)
poly2 = Polygon(a2)
poly2_clean = poly2.buffer(0)
try:
# iou = poly1.intersection(poly2).area / poly1.union(poly2).area
iou = poly1_clean.intersection(poly2_clean).area / poly1_clean.union(poly2_clean).area
except ZeroDivisionError:
iou = 0
return iou
class Cell(object):
# @:param start_row : start row index of the Cell
# @:param start_col : start column index of the Cell
# @:param end-row : end row index of the Cell
# @:param end-col : end column index of the Cell
# @:param cell_box: bounding-box of the Cell (coordinates are saved as a string)
# @:param content_box: bounding-box of the text content within Cell (unused variable)
# @:param cell_id: unique id of the Cell
def __init__(self, table_id, start_row, start_col, cell_box, end_row, end_col, content_box=""):
self._start_row = int(start_row)
self._start_col = int(start_col)
self._cell_box = cell_box
self._content_box = content_box
self._table_id = table_id # the table_id this cell belongs to
# self._cell_name = cell_id # specify the cell using passed-in cell_id
self._cell_id = id(self)
# self._region = region
# check for end-row and end-col special case
if end_row == -1:
self._end_row = self.start_row
else:
self._end_row = int(end_row)
if end_col == -1:
self._end_col = self._start_col
else:
self._end_col = int(end_col)
@property
def start_row(self):
return self._start_row
@property
def start_col(self):
return self._start_col
@property
def end_row(self):
return self._end_row
@property
def end_col(self):
return self._end_col
@property
def cell_box(self):
return self._cell_box
@property
def content_box(self):
return self._content_box
@property
def cell_id(self):
return self._cell_id
@property
def table_id(self):
return self._table_id
def __str__(self):
return "CELL row=[%d, %d] col=[%d, %d] (coords=%s)" %(self.start_row, self.end_row
, self.start_col, self.end_col
, self.cell_box)
# return the IoU value of two cell blocks
def compute_cell_iou(self, another_cell):
cell_box_1_temp = []
for el in self.cell_box.split():
cell_box_1_temp.append((el.split(",")))
cell_box_1 = list(flatten(cell_box_1_temp))
cell_box_1 = [int(x) for x in cell_box_1]
cell_box_2_temp = []
for el in another_cell.cell_box.split():
cell_box_2_temp.append((el.split(",")))
cell_box_2 = list(flatten(cell_box_2_temp))
cell_box_2 = [int(x) for x in cell_box_2]
return compute_poly_iou(cell_box_1, cell_box_2)
# check if the two cell object denotes same cell area in table
def check_same(self, another_cell):
return self._start_row == another_cell.start_row and self._end_row == another_cell.end_row and \
self._start_col == another_cell.start_col and self._end_col == another_cell.end_col
# Note: currently save the relation with two cell object involved,
# can be replaced by cell_id in follow-up memory clean up
class AdjRelation:
DIR_HORIZ = 1
DIR_VERT = 2
def __init__(self, fromText, toText, direction):
# @param: fromText, toText are Cell objects (may be changed to cell-ID for further development)
self._fromText = fromText
self._toText = toText
self._direction = direction
@property
def fromText(self):
return self._fromText
@property
def toText(self):
return self._toText
@property
def direction(self):
return self._direction
def __str__(self):
if self.direction == self.DIR_VERT:
dir = "vertical"
else:
dir = "horizontal"
return 'ADJ_RELATION: ' + str(self._fromText) + ' ' + str(self._toText) + ' ' + dir
def isEqual(self, otherRelation):
return self.fromText.cell_id == otherRelation.fromText.cell_id and \
self.toText.cell_id == otherRelation.toText.cell_id and self.direction == otherRelation.direction
class Table:
def __init__(self, tableNode):
self._root = tableNode
self._id = id(self)
self._table_coords = ""
self._maxRow = 0 # PS: indexing from 0
self._maxCol = 0
self._cells = [] # save a table as list of <Cell>s
self.adj_relations = [] # save the adj_relations for the table
self.parsed = False
self.found = False # check if the find_adj_relations() has been called once
self.parse_table()
def __str__(self):
return "TABLE object - {} row x {} col".format(self._maxRow+1, self._maxCol+1)
@property
def id(self):
return self._id
@property
def table_coords(self):
return self._table_coords
@property
def table_cells(self):
return self._cells
# parse input xml to cell lists
def parse_table(self):
# get the table bbox
self._table_coords = str(self._root.getElementsByTagName("Coords")[0].getAttribute("points"))
# get info for each cell
cells = self._root.getElementsByTagName("cell")
max_row = max_col = 0
for cell in cells:
sr = cell.getAttribute("start-row")
sc = cell.getAttribute("start-col")
cell_id = cell.getAttribute("id")
b_points = str(cell.getElementsByTagName("Coords")[0].getAttribute("points"))
# try:
# try:
# text = cell.getElementsByTagName("content")[0].firstChild.nodeValue
# except AttributeError:
# text = ""
# except IndexError:
# text = "initialized cell as no content"
er = cell.getAttribute("end-row") if cell.hasAttribute("end-row") else -1
ec = cell.getAttribute("end-col") if cell.hasAttribute("end-col") else -1
new_cell = Cell(table_id=str(self.id), start_row=sr, start_col=sc, cell_box=b_points,
end_row=er, end_col=ec)
max_row = max(max_row, int(sr), int(er))
max_col = max(max_col, int(sc), int(ec))
self._cells.append(new_cell)
self._maxCol = max_col
self._maxRow = max_row
self.parsed = True
# generate a table-like structure for finding adj_relations
def convert_2d(self):
table = [[0 for x in range(self._maxCol+1)] for y in range(self._maxRow+1)] # init blank cell with int 0
for cell in self._cells:
cur_row = cell.start_row
while cur_row <= cell.end_row:
cur_col = cell.start_col
while cur_col <= cell.end_col:
temp = table[cur_row][cur_col]
if temp == 0:
table[cur_row][cur_col] = cell
elif type(temp) == list:
temp.append(cell)
table[cur_row][cur_col] = temp
else:
table[cur_row][cur_col] = [temp, cell]
cur_col += 1
cur_row += 1
return table
def find_adj_relations(self):
if self.found:
return self.adj_relations
else:
# if len(self._cells) == 0:
if self.parsed == False:
# fix: cases where there's no cell in table?
print("table is not parsed for further steps.")
self.parse_table()
self.find_adj_relations()
else:
retVal = []
tab = self.convert_2d()
# find horizontal relations
for r in range(self._maxRow+1):
for c_from in range(self._maxCol):
temp_pos = tab[r][c_from]
if temp_pos == 0:
continue
elif type(temp_pos) == list:
for cell in temp_pos:
c_to = c_from + 1
if tab[r][c_to] != 0:
# find relation between two adjacent cells
if type(tab[r][c_to]) == list:
for cell_to in tab[r][c_to]:
if cell != cell_to and (not cell.check_same(cell_to)):
adj_relation = AdjRelation(cell, cell_to, AdjRelation.DIR_HORIZ)
retVal.append(adj_relation)
else:
if cell != tab[r][c_to]:
adj_relation = AdjRelation(cell, tab[r][c_to], AdjRelation.DIR_HORIZ)
retVal.append(adj_relation)
else:
# find the next non-blank cell, if exists
for temp in range(c_from + 1, self._maxCol + 1):
if tab[r][temp] != 0:
if type(tab[r][temp]) == list:
for cell_to in tab[r][temp]:
adj_relation = AdjRelation(cell, cell_to,
AdjRelation.DIR_HORIZ)
retVal.append(adj_relation)
else:
adj_relation = AdjRelation(cell, tab[r][temp],
AdjRelation.DIR_HORIZ)
retVal.append(adj_relation)
break
else:
c_to = c_from + 1
if tab[r][c_to] != 0:
# find relation between two adjacent cells
if type(tab[r][c_to]) == list:
for cell_to in tab[r][c_to]:
if temp_pos != cell_to:
adj_relation = AdjRelation(temp_pos, cell_to, AdjRelation.DIR_HORIZ)
retVal.append(adj_relation)
else:
if temp_pos != tab[r][c_to]:
adj_relation = AdjRelation(temp_pos, tab[r][c_to], AdjRelation.DIR_HORIZ)
retVal.append(adj_relation)
else:
# find the next non-blank cell, if exists
for temp in range(c_from + 1, self._maxCol + 1):
if tab[r][temp] != 0:
if type(tab[r][temp]) == list:
for cell_to in tab[r][temp]:
adj_relation = AdjRelation(temp_pos, cell_to,
AdjRelation.DIR_HORIZ)
retVal.append(adj_relation)
else:
adj_relation = AdjRelation(temp_pos, tab[r][temp], AdjRelation.DIR_HORIZ)
retVal.append(adj_relation)
break
# find vertical relations
for c in range(self._maxCol+1):
for r_from in range(self._maxRow):
temp_pos = tab[r_from][c]
if temp_pos == 0:
continue
elif type(temp_pos) == list:
for cell in temp_pos:
r_to = r_from + 1
if tab[r_to][c] != 0:
# find relation between two adjacent cells
if type(tab[r_to][c]) == list:
for cell_to in tab[r_to][c]:
if cell != cell_to and (not cell.check_same(cell_to)):
adj_relation = AdjRelation(cell, cell_to, AdjRelation.DIR_VERT)
retVal.append(adj_relation)
else:
if cell != tab[r_to][c]:
adj_relation = AdjRelation(cell, tab[r_to][c], AdjRelation.DIR_VERT)
retVal.append(adj_relation)
else:
# find the next non-blank cell, if exists
for temp in range(r_from + 1, self._maxRow + 1):
if tab[temp][c] != 0:
if type(tab[temp][c]) == list:
for cell_to in tab[temp][c]:
adj_relation = AdjRelation(cell, cell_to,
AdjRelation.DIR_VERT)
retVal.append(adj_relation)
else:
adj_relation = AdjRelation(cell, tab[temp][c],
AdjRelation.DIR_VERT)
retVal.append(adj_relation)
break
else:
r_to = r_from + 1
if tab[r_to][c] != 0:
# find relation between two adjacent cells
if type(tab[r_to][c]) == list:
for cell_to in tab[r_to][c]:
if temp_pos != cell_to:
adj_relation = AdjRelation(temp_pos, cell_to, AdjRelation.DIR_VERT)
retVal.append(adj_relation)
else:
if temp_pos != tab[r_to][c]:
adj_relation = AdjRelation(temp_pos, tab[r_to][c], AdjRelation.DIR_VERT)
retVal.append(adj_relation)
else:
# find the next non-blank cell, if exists
for temp in range(r_from + 1, self._maxRow + 1):
if tab[temp][c] != 0:
if type(tab[temp][c]) == list:
for cell_to in tab[temp][c]:
adj_relation = AdjRelation(temp_pos, cell_to, AdjRelation.DIR_VERT)
retVal.append(adj_relation)
else:
adj_relation = AdjRelation(temp_pos, tab[temp][c], AdjRelation.DIR_VERT)
retVal.append(adj_relation)
break
# eliminate duplicates
repeat = True
while repeat:
repeat = False
duplicates = []
for ar1 in retVal:
for ar2 in retVal:
if ar1 != ar2:
if ar1.direction == ar2.direction and ar1.fromText == ar2.fromText and\
ar1.toText == ar2.toText:
duplicates.append(ar2)
break
else:
continue
break
if len(duplicates) > 0:
repeat = True
retVal.remove(duplicates[0])
self.found = True
self.adj_relations = retVal
return self.adj_relations
# compute the IOU of table, pass-in var is another Table object
def compute_table_iou(self, another_table):
table_box_1_temp = []
for el in self.table_coords.split():
table_box_1_temp.append((el.split(",")))
table_box_1 = list(flatten(table_box_1_temp))
table_box_1 = [int(x) for x in table_box_1]
table_box_2_temp = []
for el in another_table.table_coords.split():
table_box_2_temp.append((el.split(",")))
table_box_2 = list(flatten(table_box_2_temp))
table_box_2 = [int(x) for x in table_box_2]
return compute_poly_iou(table_box_1, table_box_2)
# find the cell mapping of tables as dictionary, pass-in var is another table and the desired IOU value
def find_cell_mapping(self, target_table, iou_value):
mapped_cell = [] # store the matches as tuples - (gt, result) mind the order of table when passing in
for cell_1 in self.table_cells:
for cell_2 in target_table.table_cells:
if cell_1.compute_cell_iou(cell_2) >= iou_value:
mapped_cell.append((cell_1, cell_2))
break
ret = dict(mapped_cell)
# print(ret)
return ret
# to print a table cell mapping
@classmethod
def printCellMapping(cls, dMappedCell):
print("-"*25)
for cell1, cell2 in dMappedCell.items():
print(" ", cell1, " --> ", cell2)
# to print a table set of adjacency relations
@classmethod
def printAdjacencyRelationList(cls, lAdjRel, title=""):
print("--- %s "%title + "-"*25)
for adj in lAdjRel:
print(adj)
class ResultStructure:
def __init__(self, truePos, gtTotal, resTotal):
self._truePos = truePos
self._gtTotal = gtTotal
self._resTotal = resTotal
@property
def truePos(self):
return self._truePos
@property
def gtTotal(self):
return self._gtTotal
@property
def resTotal(self):
return self._resTotal
def __str__(self):
return "true: {}, gt: {}, res: {}".format(self._truePos, self._gtTotal, self._resTotal) | EXA-1-master | exa/models/unilm-master/dit/object_detection/ditod/table_evaluation/data_structure.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import math
import sys
from typing import Iterable, Optional
import torch
from timm.data import Mixup
from timm.utils import accuracy, ModelEma
import utils
def train_class_batch(model, samples, target, criterion):
outputs = model(samples)
loss = criterion(outputs, target)
return loss, outputs
def get_loss_scale_for_deepspeed(model):
optimizer = model.optimizer
return optimizer.loss_scale if hasattr(optimizer, "loss_scale") else optimizer.cur_scale
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None, log_writer=None,
start_steps=None, lr_schedule_values=None, wd_schedule_values=None,
num_training_steps_per_epoch=None, update_freq=None):
model.train(True)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
if loss_scaler is None:
model.zero_grad()
model.micro_steps = 0
else:
optimizer.zero_grad()
for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
step = data_iter_step // update_freq
if step >= num_training_steps_per_epoch:
continue
it = start_steps + step # global training iteration
# Update LR & WD for the first acc
if lr_schedule_values is not None or wd_schedule_values is not None and data_iter_step % update_freq == 0:
for i, param_group in enumerate(optimizer.param_groups):
if lr_schedule_values is not None:
param_group["lr"] = lr_schedule_values[it] * param_group["lr_scale"]
if wd_schedule_values is not None and param_group["weight_decay"] > 0:
param_group["weight_decay"] = wd_schedule_values[it]
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
if loss_scaler is None:
samples = samples.half()
loss, output = train_class_batch(
model, samples, targets, criterion)
else:
with torch.cuda.amp.autocast():
loss, output = train_class_batch(
model, samples, targets, criterion)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
if loss_scaler is None:
loss /= update_freq
model.backward(loss)
model.step()
if (data_iter_step + 1) % update_freq == 0:
# model.zero_grad()
# Deepspeed will call step() & model.zero_grad() automatic
if model_ema is not None:
model_ema.update(model)
grad_norm = None
loss_scale_value = get_loss_scale_for_deepspeed(model)
else:
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss /= update_freq
grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order,
update_grad=(data_iter_step + 1) % update_freq == 0)
if (data_iter_step + 1) % update_freq == 0:
optimizer.zero_grad()
if model_ema is not None:
model_ema.update(model)
loss_scale_value = loss_scaler.state_dict()["scale"]
torch.cuda.synchronize()
if mixup_fn is None:
class_acc = (output.max(-1)[-1] == targets).float().mean()
else:
class_acc = None
metric_logger.update(loss=loss_value)
metric_logger.update(class_acc=class_acc)
metric_logger.update(loss_scale=loss_scale_value)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
weight_decay_value = None
for group in optimizer.param_groups:
if group["weight_decay"] > 0:
weight_decay_value = group["weight_decay"]
metric_logger.update(weight_decay=weight_decay_value)
metric_logger.update(grad_norm=grad_norm)
if log_writer is not None:
log_writer.update(loss=loss_value, head="loss")
log_writer.update(class_acc=class_acc, head="loss")
log_writer.update(loss_scale=loss_scale_value, head="opt")
log_writer.update(lr=max_lr, head="opt")
log_writer.update(min_lr=min_lr, head="opt")
log_writer.update(weight_decay=weight_decay_value, head="opt")
log_writer.update(grad_norm=grad_norm, head="opt")
log_writer.set_step()
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[-1]
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
| EXA-1-master | exa/models/unilm-master/beit/engine_for_finetuning.py |
"""
Originally inspired by impl at https://github.com/zhunzhong07/Random-Erasing, Apache 2.0
Copyright Zhun Zhong & Liang Zheng
Hacked together by / Copyright 2020 Ross Wightman
Modified by Hangbo Bao, for generating the masked position for visual image transformer
"""
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# Originally inspired by impl at https://github.com/zhunzhong07/Random-Erasing, Apache 2.0
# Copyright Zhun Zhong & Liang Zheng
#
# Hacked together by / Copyright 2020 Ross Wightman
#
# Modified by Hangbo Bao, for generating the masked position for visual image transformer
# --------------------------------------------------------'
import random
import math
import numpy as np
class MaskingGenerator:
def __init__(
self, input_size, num_masking_patches, min_num_patches=4, max_num_patches=None,
min_aspect=0.3, max_aspect=None):
if not isinstance(input_size, tuple):
input_size = (input_size, ) * 2
self.height, self.width = input_size
self.num_patches = self.height * self.width
self.num_masking_patches = num_masking_patches
self.min_num_patches = min_num_patches
self.max_num_patches = num_masking_patches if max_num_patches is None else max_num_patches
max_aspect = max_aspect or 1 / min_aspect
self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))
def __repr__(self):
repr_str = "Generator(%d, %d -> [%d ~ %d], max = %d, %.3f ~ %.3f)" % (
self.height, self.width, self.min_num_patches, self.max_num_patches,
self.num_masking_patches, self.log_aspect_ratio[0], self.log_aspect_ratio[1])
return repr_str
def get_shape(self):
return self.height, self.width
def _mask(self, mask, max_mask_patches):
delta = 0
for attempt in range(10):
target_area = random.uniform(self.min_num_patches, max_mask_patches)
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < self.width and h < self.height:
top = random.randint(0, self.height - h)
left = random.randint(0, self.width - w)
num_masked = mask[top: top + h, left: left + w].sum()
# Overlap
if 0 < h * w - num_masked <= max_mask_patches:
for i in range(top, top + h):
for j in range(left, left + w):
if mask[i, j] == 0:
mask[i, j] = 1
delta += 1
if delta > 0:
break
return delta
def __call__(self):
mask = np.zeros(shape=self.get_shape(), dtype=np.int)
mask_count = 0
while mask_count < self.num_masking_patches:
max_mask_patches = self.num_masking_patches - mask_count
max_mask_patches = min(max_mask_patches, self.max_num_patches)
delta = self._mask(mask, max_mask_patches)
if delta == 0:
break
else:
mask_count += delta
return mask
| EXA-1-master | exa/models/unilm-master/beit/masking_generator.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on OpenAI DALL-E and lucidrains' DALLE-pytorch code bases
# https://github.com/openai/DALL-E
# https://github.com/lucidrains/DALLE-pytorch
# --------------------------------------------------------'
from math import sqrt
import os
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
class BasicVAE(nn.Module):
def get_codebook_indices(self, images):
raise NotImplementedError()
def decode(self, img_seq):
raise NotImplementedError()
def get_codebook_probs(self, img_seq):
raise NotImplementedError()
def get_image_tokens_size(self):
pass
def get_image_size(self):
pass
class ResBlock(nn.Module):
def __init__(self, chan_in, hidden_size, chan_out):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(chan_in, hidden_size, 3, padding=1),
nn.ReLU(),
nn.Conv2d(hidden_size, hidden_size, 3, padding=1),
nn.ReLU(),
nn.Conv2d(hidden_size, chan_out, 1)
)
def forward(self, x):
return self.net(x) + x
class DiscreteVAE(BasicVAE):
def __init__(
self,
image_size = 256,
num_tokens = 512,
codebook_dim = 512,
num_layers = 3,
hidden_dim = 64,
channels = 3,
smooth_l1_loss = False,
temperature = 0.9,
straight_through = False,
kl_div_loss_weight = 0.
):
super().__init__()
# assert log2(image_size).is_integer(), 'image size must be a power of 2'
assert num_layers >= 1, 'number of layers must be greater than or equal to 1'
self.image_size = image_size
self.num_tokens = num_tokens
self.num_layers = num_layers
self.temperature = temperature
self.straight_through = straight_through
self.codebook = nn.Embedding(num_tokens, codebook_dim)
enc_layers = []
dec_layers = []
enc_in = channels
dec_in = codebook_dim
for layer_id in range(num_layers):
enc_layers.append(nn.Sequential(nn.Conv2d(enc_in, hidden_dim, 4, stride=2, padding=1), nn.ReLU()))
enc_layers.append(ResBlock(chan_in=hidden_dim, hidden_size=hidden_dim, chan_out=hidden_dim))
enc_in = hidden_dim
dec_layers.append(nn.Sequential(nn.ConvTranspose2d(dec_in, hidden_dim, 4, stride=2, padding=1), nn.ReLU()))
dec_layers.append(ResBlock(chan_in=hidden_dim, hidden_size=hidden_dim, chan_out=hidden_dim))
dec_in = hidden_dim
enc_layers.append(nn.Conv2d(hidden_dim, num_tokens, 1))
dec_layers.append(nn.Conv2d(hidden_dim, channels, 1))
self.encoder = nn.Sequential(*enc_layers)
self.decoder = nn.Sequential(*dec_layers)
self.loss_fn = F.smooth_l1_loss if smooth_l1_loss else F.mse_loss
self.kl_div_loss_weight = kl_div_loss_weight
def get_image_size(self):
return self.image_size
def get_image_tokens_size(self):
return self.image_size // 8
@torch.no_grad()
@eval_decorator
def get_codebook_indices(self, images):
logits = self.forward(images, return_logits = True)
codebook_indices = logits.argmax(dim = 1)
return codebook_indices
@torch.no_grad()
@eval_decorator
def get_codebook_probs(self, images):
logits = self.forward(images, return_logits = True)
return nn.Softmax(dim=1)(logits)
def decode(
self,
img_seq
):
image_embeds = self.codebook(img_seq)
b, n, d = image_embeds.shape
h = w = int(sqrt(n))
image_embeds = rearrange(image_embeds, 'b (h w) d -> b d h w', h = h, w = w)
images = self.decoder(image_embeds)
return images
def forward(
self,
img,
return_loss = False,
return_recons = False,
return_logits = False,
temp = None
):
device, num_tokens, image_size, kl_div_loss_weight = img.device, self.num_tokens, self.image_size, self.kl_div_loss_weight
assert img.shape[-1] == image_size and img.shape[-2] == image_size, f'input must have the correct image size {image_size}'
logits = self.encoder(img)
if return_logits:
return logits # return logits for getting hard image indices for DALL-E training
temp = default(temp, self.temperature)
soft_one_hot = F.gumbel_softmax(logits, tau = temp, dim = 1, hard = self.straight_through)
sampled = einsum('b n h w, n d -> b d h w', soft_one_hot, self.codebook.weight)
out = self.decoder(sampled)
if not return_loss:
return out
# reconstruction loss
recon_loss = self.loss_fn(img, out)
# kl divergence
logits = rearrange(logits, 'b n h w -> b (h w) n')
qy = F.softmax(logits, dim = -1)
log_qy = torch.log(qy + 1e-10)
log_uniform = torch.log(torch.tensor([1. / num_tokens], device = device))
kl_div = F.kl_div(log_uniform, log_qy, None, None, 'batchmean', log_target = True)
loss = recon_loss + (kl_div * kl_div_loss_weight)
if not return_recons:
return loss
return loss, out
from dall_e import load_model
class Dalle_VAE(BasicVAE):
def __init__(self, image_size):
super().__init__()
self.encoder = None
self.decoder = None
self.image_size = image_size
def load_model(self, model_dir, device):
self.encoder = load_model(os.path.join(model_dir, "encoder.pkl"), device)
self.decoder = load_model(os.path.join(model_dir, "decoder.pkl"), device)
def decode(self, img_seq):
bsz = img_seq.size()[0]
img_seq = img_seq.view(bsz, self.image_size // 8, self.image_size // 8)
z = F.one_hot(img_seq, num_classes=self.encoder.vocab_size).permute(0, 3, 1, 2).float()
return self.decoder(z).float()
def get_codebook_indices(self, images):
z_logits = self.encoder(images)
return torch.argmax(z_logits, axis=1)
def get_codebook_probs(self, images):
z_logits = self.encoder(images)
return nn.Softmax(dim=1)(z_logits)
def forward(self, img_seq_prob, no_process=False):
if no_process:
return self.decoder(img_seq_prob.float()).float()
else:
bsz, seq_len, num_class = img_seq_prob.size()
z = img_seq_prob.view(bsz, self.image_size // 8, self.image_size // 8, self.encoder.vocab_size)
return self.decoder(z.permute(0, 3, 1, 2).float()).float()
| EXA-1-master | exa/models/unilm-master/beit/modeling_discrete_vae.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# --------------------------------------------------------'
import torch
import torchvision.transforms.functional as F
from PIL import Image
import warnings
import math
import random
import numpy as np
class ToNumpy:
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.rollaxis(np_img, 2) # HWC to CHW
return np_img
class ToTensor:
def __init__(self, dtype=torch.float32):
self.dtype = dtype
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.rollaxis(np_img, 2) # HWC to CHW
return torch.from_numpy(np_img).to(dtype=self.dtype)
_pil_interpolation_to_str = {
Image.NEAREST: 'PIL.Image.NEAREST',
Image.BILINEAR: 'PIL.Image.BILINEAR',
Image.BICUBIC: 'PIL.Image.BICUBIC',
Image.LANCZOS: 'PIL.Image.LANCZOS',
Image.HAMMING: 'PIL.Image.HAMMING',
Image.BOX: 'PIL.Image.BOX',
}
def _pil_interp(method):
if method == 'bicubic':
return Image.BICUBIC
elif method == 'lanczos':
return Image.LANCZOS
elif method == 'hamming':
return Image.HAMMING
else:
# default bilinear, do we want to allow nearest?
return Image.BILINEAR
_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)
class RandomResizedCropAndInterpolationWithTwoPic:
"""Crop the given PIL Image to random size and aspect ratio with random interpolation.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, second_size=None, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.),
interpolation='bilinear', second_interpolation='lanczos'):
if isinstance(size, tuple):
self.size = size
else:
self.size = (size, size)
if second_size is not None:
if isinstance(second_size, tuple):
self.second_size = second_size
else:
self.second_size = (second_size, second_size)
else:
self.second_size = None
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("range should be of kind (min, max)")
if interpolation == 'random':
self.interpolation = _RANDOM_INTERPOLATION
else:
self.interpolation = _pil_interp(interpolation)
self.second_interpolation = _pil_interp(second_interpolation)
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for attempt in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if in_ratio < min(ratio):
w = img.size[0]
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = img.size[1]
w = int(round(h * max(ratio)))
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
if isinstance(self.interpolation, (tuple, list)):
interpolation = random.choice(self.interpolation)
else:
interpolation = self.interpolation
if self.second_size is None:
return F.resized_crop(img, i, j, h, w, self.size, interpolation)
else:
return F.resized_crop(img, i, j, h, w, self.size, interpolation), \
F.resized_crop(img, i, j, h, w, self.second_size, self.second_interpolation)
def __repr__(self):
if isinstance(self.interpolation, (tuple, list)):
interpolate_str = ' '.join([_pil_interpolation_to_str[x] for x in self.interpolation])
else:
interpolate_str = _pil_interpolation_to_str[self.interpolation]
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
format_string += ', interpolation={0}'.format(interpolate_str)
if self.second_size is not None:
format_string += ', second_size={0}'.format(self.second_size)
format_string += ', second_interpolation={0}'.format(_pil_interpolation_to_str[self.second_interpolation])
format_string += ')'
return format_string
| EXA-1-master | exa/models/unilm-master/beit/transforms.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import math
import sys
from typing import Iterable
import torch
import torch.nn as nn
import utils
def train_one_epoch(model: torch.nn.Module, d_vae: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
log_writer=None, lr_scheduler=None, start_steps=None,
lr_schedule_values=None, wd_schedule_values=None):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for step, (batch, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
# assign learning rate & weight decay for each step
it = start_steps + step # global training iteration
if lr_schedule_values is not None or wd_schedule_values is not None:
for i, param_group in enumerate(optimizer.param_groups):
if lr_schedule_values is not None:
param_group["lr"] = lr_schedule_values[it] * param_group["lr_scale"]
if wd_schedule_values is not None and param_group["weight_decay"] > 0:
param_group["weight_decay"] = wd_schedule_values[it]
samples, images, bool_masked_pos = batch
images = images.to(device, non_blocking=True)
samples = samples.to(device, non_blocking=True)
bool_masked_pos = bool_masked_pos.to(device, non_blocking=True)
with torch.no_grad():
input_ids = d_vae.get_codebook_indices(images).flatten(1)
bool_masked_pos = bool_masked_pos.flatten(1).to(torch.bool)
labels = input_ids[bool_masked_pos]
with torch.cuda.amp.autocast():
outputs = model(samples, bool_masked_pos=bool_masked_pos, return_all_tokens=False)
loss = nn.CrossEntropyLoss()(input=outputs, target=labels)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
optimizer.zero_grad()
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
loss_scale_value = loss_scaler.state_dict()["scale"]
torch.cuda.synchronize()
mlm_acc = (outputs.max(-1)[1] == labels).float().mean().item()
metric_logger.update(mlm_acc=mlm_acc)
if log_writer is not None:
log_writer.update(mlm_acc=mlm_acc, head="loss")
metric_logger.update(loss=loss_value)
metric_logger.update(loss_scale=loss_scale_value)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
weight_decay_value = None
for group in optimizer.param_groups:
if group["weight_decay"] > 0:
weight_decay_value = group["weight_decay"]
metric_logger.update(weight_decay=weight_decay_value)
metric_logger.update(grad_norm=grad_norm)
if log_writer is not None:
log_writer.update(loss=loss_value, head="loss")
log_writer.update(loss_scale=loss_scale_value, head="opt")
log_writer.update(lr=max_lr, head="opt")
log_writer.update(min_lr=min_lr, head="opt")
log_writer.update(weight_decay=weight_decay_value, head="opt")
log_writer.update(grad_norm=grad_norm, head="opt")
log_writer.set_step()
if lr_scheduler is not None:
lr_scheduler.step_update(start_steps + step)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
| EXA-1-master | exa/models/unilm-master/beit/engine_for_pretraining.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# --------------------------------------------------------'
import math
import torch
import torch.nn as nn
from functools import partial
from modeling_finetune import Block, _cfg, PatchEmbed, RelativePositionBias
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_ as __call_trunc_normal_
def trunc_normal_(tensor, mean=0., std=1.):
__call_trunc_normal_(tensor, mean=mean, std=std, a=-std, b=std)
__all__ = [
'beit_base_patch16_224_8k_vocab',
'beit_large_patch16_224_8k_vocab',
]
class VisionTransformerForMaskedImageModeling(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_chans=3, vocab_size=8192, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=None, init_values=None, attn_head_dim=None,
use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, init_std=0.02, **kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_abs_pos_emb:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
else:
self.rel_pos_bias = None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None,
attn_head_dim=attn_head_dim,
)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
self.init_std = init_std
self.lm_head = nn.Linear(embed_dim, vocab_size)
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=self.init_std)
trunc_normal_(self.cls_token, std=self.init_std)
trunc_normal_(self.mask_token, std=self.init_std)
trunc_normal_(self.lm_head.weight, std=self.init_std)
self.apply(self._init_weights)
self.fix_init_weight()
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=self.init_std)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=self.init_std)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_num_layers(self):
return len(self.blocks)
def forward_features(self, x, bool_masked_pos):
x = self.patch_embed(x, bool_masked_pos=bool_masked_pos)
batch_size, seq_len, _ = x.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
mask_token = self.mask_token.expand(batch_size, seq_len, -1)
# replace the masked visual tokens by mask_token
w = bool_masked_pos.unsqueeze(-1).type_as(mask_token)
x = x * (1 - w) + mask_token * w
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
for blk in self.blocks:
x = blk(x, rel_pos_bias=rel_pos_bias)
return self.norm(x)
def forward(self, x, bool_masked_pos, return_all_tokens=False):
x = self.forward_features(x, bool_masked_pos=bool_masked_pos)
x = x[:, 1:]
if return_all_tokens:
return self.lm_head(x)
else:
# return the masked tokens
return self.lm_head(x[bool_masked_pos])
@register_model
def beit_base_patch16_224_8k_vocab(pretrained=False, **kwargs):
model = VisionTransformerForMaskedImageModeling(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), vocab_size=8192, **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.load(
kwargs["init_ckpt"], map_location="cpu"
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def beit_large_patch16_224_8k_vocab(pretrained=False, **kwargs):
model = VisionTransformerForMaskedImageModeling(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), vocab_size=8192, **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.load(
kwargs["init_ckpt"], map_location="cpu"
)
model.load_state_dict(checkpoint["model"])
return model
| EXA-1-master | exa/models/unilm-master/beit/modeling_pretrain.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import os
import torch
from torchvision import datasets, transforms
from timm.data.constants import \
IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from transforms import RandomResizedCropAndInterpolationWithTwoPic
from timm.data import create_transform
from dall_e.utils import map_pixels
from masking_generator import MaskingGenerator
from dataset_folder import ImageFolder
class DataAugmentationForBEiT(object):
def __init__(self, args):
imagenet_default_mean_and_std = args.imagenet_default_mean_and_std
mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN
std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD
self.common_transform = transforms.Compose([
transforms.ColorJitter(0.4, 0.4, 0.4),
transforms.RandomHorizontalFlip(p=0.5),
RandomResizedCropAndInterpolationWithTwoPic(
size=args.input_size, second_size=args.second_input_size,
interpolation=args.train_interpolation, second_interpolation=args.second_interpolation,
),
])
self.patch_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=torch.tensor(mean),
std=torch.tensor(std))
])
if args.discrete_vae_type == "dall-e":
self.visual_token_transform = transforms.Compose([
transforms.ToTensor(),
map_pixels,
])
elif args.discrete_vae_type == "customized":
self.visual_token_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=IMAGENET_INCEPTION_MEAN,
std=IMAGENET_INCEPTION_STD,
),
])
else:
raise NotImplementedError()
self.masked_position_generator = MaskingGenerator(
args.window_size, num_masking_patches=args.num_mask_patches,
max_num_patches=args.max_mask_patches_per_block,
min_num_patches=args.min_mask_patches_per_block,
)
def __call__(self, image):
for_patches, for_visual_tokens = self.common_transform(image)
return \
self.patch_transform(for_patches), self.visual_token_transform(for_visual_tokens), \
self.masked_position_generator()
def __repr__(self):
repr = "(DataAugmentationForBEiT,\n"
repr += " common_transform = %s,\n" % str(self.common_transform)
repr += " patch_transform = %s,\n" % str(self.patch_transform)
repr += " visual_tokens_transform = %s,\n" % str(self.visual_token_transform)
repr += " Masked position generator = %s,\n" % str(self.masked_position_generator)
repr += ")"
return repr
def build_beit_pretraining_dataset(args):
transform = DataAugmentationForBEiT(args)
print("Data Aug = %s" % str(transform))
return ImageFolder(args.data_path, transform=transform)
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
print("Transform = ")
if isinstance(transform, tuple):
for trans in transform:
print(" - - - - - - - - - - ")
for t in trans.transforms:
print(t)
else:
for t in transform.transforms:
print(t)
print("---------------------------")
if args.data_set == 'CIFAR':
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform)
nb_classes = 100
elif args.data_set == 'IMNET':
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif args.data_set == "image_folder":
root = args.data_path if is_train else args.eval_data_path
dataset = ImageFolder(root, transform=transform)
nb_classes = args.nb_classes
assert len(dataset.class_to_idx) == nb_classes
else:
raise NotImplementedError()
assert nb_classes == args.nb_classes
print("Number of the class = %d" % args.nb_classes)
return dataset, nb_classes
def build_transform(is_train, args):
resize_im = args.input_size > 32
imagenet_default_mean_and_std = args.imagenet_default_mean_and_std
mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN
std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
mean=mean,
std=std,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
return transform
t = []
if resize_im:
if args.crop_pct is None:
if args.input_size < 384:
args.crop_pct = 224 / 256
else:
args.crop_pct = 1.0
size = int(args.input_size / args.crop_pct)
t.append(
transforms.Resize(size, interpolation=3), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(mean, std))
return transforms.Compose(t)
| EXA-1-master | exa/models/unilm-master/beit/datasets.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
import os
from pathlib import Path
from timm.data.mixup import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import ModelEma
from optim_factory import create_optimizer, get_parameter_groups, LayerDecayValueAssigner
from datasets import build_dataset
from engine_for_finetuning import train_one_epoch, evaluate
from utils import NativeScalerWithGradNormCount as NativeScaler
import utils
from scipy import interpolate
import modeling_finetune
def get_args():
parser = argparse.ArgumentParser('BEiT fine-tuning and evaluation script for image classification', add_help=False)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--epochs', default=30, type=int)
parser.add_argument('--update_freq', default=1, type=int)
parser.add_argument('--save_ckpt_freq', default=5, type=int)
# Model parameters
parser.add_argument('--model', default='beit_base_patch16_224', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--rel_pos_bias', action='store_true')
parser.add_argument('--disable_rel_pos_bias', action='store_false', dest='rel_pos_bias')
parser.set_defaults(rel_pos_bias=True)
parser.add_argument('--abs_pos_emb', action='store_true')
parser.set_defaults(abs_pos_emb=False)
parser.add_argument('--layer_scale_init_value', default=0.1, type=float,
help="0.1 for base, 1e-5 for large. set 0 to disable layer scale")
parser.add_argument('--input_size', default=224, type=int,
help='images input size')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--attn_drop_rate', type=float, default=0.0, metavar='PCT',
help='Attention dropout rate (default: 0.)')
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--disable_eval_during_finetuning', action='store_true', default=False)
parser.add_argument('--model_ema', action='store_true', default=False)
parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='')
parser.add_argument('--model_ema_force_cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""")
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--layer_decay', type=float, default=0.9)
parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N',
help='num of steps to warmup LR, will overload warmup_epochs if set > 0')
# Augmentation parameters
parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--train_interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
# Evaluation parameters
parser.add_argument('--crop_pct', type=float, default=None)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix', type=float, default=0,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup_prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup_switch_prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup_mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# * Finetuning params
parser.add_argument('--finetune', default='',
help='finetune from checkpoint')
parser.add_argument('--model_key', default='model|module', type=str)
parser.add_argument('--model_prefix', default='', type=str)
parser.add_argument('--init_scale', default=0.001, type=float)
parser.add_argument('--use_mean_pooling', action='store_true')
parser.set_defaults(use_mean_pooling=True)
parser.add_argument('--use_cls', action='store_false', dest='use_mean_pooling')
parser.add_argument('--disable_weight_decay_on_rel_pos_bias', action='store_true', default=False)
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--eval_data_path', default=None, type=str,
help='dataset path for evaluation')
parser.add_argument('--nb_classes', default=0, type=int,
help='number of the classification types')
parser.add_argument('--imagenet_default_mean_and_std', default=False, action='store_true')
parser.add_argument('--data_set', default='IMNET', choices=['CIFAR', 'IMNET', 'image_folder'],
type=str, help='ImageNet dataset path')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None,
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--auto_resume', action='store_true')
parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume')
parser.set_defaults(auto_resume=True)
parser.add_argument('--save_ckpt', action='store_true')
parser.add_argument('--no_save_ckpt', action='store_false', dest='save_ckpt')
parser.set_defaults(save_ckpt=True)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true',
help='Perform evaluation only')
parser.add_argument('--dist_eval', action='store_true', default=False,
help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
parser.add_argument('--enable_deepspeed', action='store_true', default=False)
known_args, _ = parser.parse_known_args()
if known_args.enable_deepspeed:
try:
import deepspeed
from deepspeed import DeepSpeedConfig
parser = deepspeed.add_config_arguments(parser)
ds_init = deepspeed.initialize
except:
print("Please 'pip install deepspeed==0.4.0'")
exit(0)
else:
ds_init = None
return parser.parse_args(), ds_init
def main(args, ds_init):
utils.init_distributed_mode(args)
if ds_init is not None:
utils.create_ds_config(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
# random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
if args.disable_eval_during_finetuning:
dataset_val = None
else:
dataset_val, _ = build_dataset(is_train=False, args=args)
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
if dataset_val is not None:
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=int(1.5 * args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
else:
data_loader_val = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
attn_drop_rate=args.attn_drop_rate,
drop_block_rate=None,
use_mean_pooling=args.use_mean_pooling,
init_scale=args.init_scale,
use_rel_pos_bias=args.rel_pos_bias,
use_abs_pos_emb=args.abs_pos_emb,
init_values=args.layer_scale_init_value,
)
patch_size = model.patch_embed.patch_size
print("Patch size = %s" % str(patch_size))
args.window_size = (args.input_size // patch_size[0], args.input_size // patch_size[1])
args.patch_size = patch_size
if args.finetune:
if args.finetune.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.finetune, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load ckpt from %s" % args.finetune)
checkpoint_model = None
for model_key in args.model_key.split('|'):
if model_key in checkpoint:
checkpoint_model = checkpoint[model_key]
print("Load state_dict by model_key = %s" % model_key)
break
if checkpoint_model is None:
checkpoint_model = checkpoint
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
if model.use_rel_pos_bias and "rel_pos_bias.relative_position_bias_table" in checkpoint_model:
print("Expand the shared relative position embedding to each transformer block. ")
num_layers = model.get_num_layers()
rel_pos_bias = checkpoint_model["rel_pos_bias.relative_position_bias_table"]
for i in range(num_layers):
checkpoint_model["blocks.%d.attn.relative_position_bias_table" % i] = rel_pos_bias.clone()
checkpoint_model.pop("rel_pos_bias.relative_position_bias_table")
all_keys = list(checkpoint_model.keys())
for key in all_keys:
if "relative_position_index" in key:
checkpoint_model.pop(key)
if "relative_position_bias_table" in key:
rel_pos_bias = checkpoint_model[key]
src_num_pos, num_attn_heads = rel_pos_bias.size()
dst_num_pos, _ = model.state_dict()[key].size()
dst_patch_shape = model.patch_embed.patch_shape
if dst_patch_shape[0] != dst_patch_shape[1]:
raise NotImplementedError()
num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1)
src_size = int((src_num_pos - num_extra_tokens) ** 0.5)
dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)
if src_size != dst_size:
print("Position interpolate for %s from %dx%d to %dx%d" % (
key, src_size, src_size, dst_size, dst_size))
extra_tokens = rel_pos_bias[-num_extra_tokens:, :]
rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]
def geometric_progression(a, r, n):
return a * (1.0 - r ** n) / (1.0 - r)
left, right = 1.01, 1.5
while right - left > 1e-6:
q = (left + right) / 2.0
gp = geometric_progression(1, q, src_size // 2)
if gp > dst_size // 2:
right = q
else:
left = q
# if q > 1.090307:
# q = 1.090307
dis = []
cur = 1
for i in range(src_size // 2):
dis.append(cur)
cur += q ** (i + 1)
r_ids = [-_ for _ in reversed(dis)]
x = r_ids + [0] + dis
y = r_ids + [0] + dis
t = dst_size // 2.0
dx = np.arange(-t, t + 0.1, 1.0)
dy = np.arange(-t, t + 0.1, 1.0)
print("Original positions = %s" % str(x))
print("Target positions = %s" % str(dx))
all_rel_pos_bias = []
for i in range(num_attn_heads):
z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()
f = interpolate.interp2d(x, y, z, kind='cubic')
all_rel_pos_bias.append(
torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device))
rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)
new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)
checkpoint_model[key] = new_rel_pos_bias
# interpolate position embedding
if 'pos_embed' in checkpoint_model:
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
utils.load_state_dict(model, checkpoint_model, prefix=args.model_prefix)
# model.load_state_dict(checkpoint_model, strict=False)
model.to(device)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
print("Using EMA with decay = %.8f" % args.model_ema_decay)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params:', n_parameters)
total_batch_size = args.batch_size * args.update_freq * utils.get_world_size()
num_training_steps_per_epoch = len(dataset_train) // total_batch_size
print("LR = %.8f" % args.lr)
print("Batch size = %d" % total_batch_size)
print("Update frequent = %d" % args.update_freq)
print("Number of training examples = %d" % len(dataset_train))
print("Number of training training per epoch = %d" % num_training_steps_per_epoch)
num_layers = model_without_ddp.get_num_layers()
if args.layer_decay < 1.0:
assigner = LayerDecayValueAssigner(list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2)))
else:
assigner = None
if assigner is not None:
print("Assigned values = %s" % str(assigner.values))
skip_weight_decay_list = model.no_weight_decay()
if args.disable_weight_decay_on_rel_pos_bias:
for i in range(num_layers):
skip_weight_decay_list.add("blocks.%d.attn.relative_position_bias_table" % i)
if args.enable_deepspeed:
loss_scaler = None
optimizer_params = get_parameter_groups(
model, args.weight_decay, skip_weight_decay_list,
assigner.get_layer_id if assigner is not None else None,
assigner.get_scale if assigner is not None else None)
model, optimizer, _, _ = ds_init(
args=args, model=model, model_parameters=optimizer_params, dist_init_required=not args.distributed,
)
print("model.gradient_accumulation_steps() = %d" % model.gradient_accumulation_steps())
assert model.gradient_accumulation_steps() == args.update_freq
else:
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
optimizer = create_optimizer(
args, model_without_ddp, skip_list=skip_weight_decay_list,
get_num_layer=assigner.get_layer_id if assigner is not None else None,
get_layer_scale=assigner.get_scale if assigner is not None else None)
loss_scaler = NativeScaler()
print("Use step level LR scheduler!")
lr_schedule_values = utils.cosine_scheduler(
args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch,
warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps,
)
if args.weight_decay_end is None:
args.weight_decay_end = args.weight_decay
wd_schedule_values = utils.cosine_scheduler(
args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch)
print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values)))
if mixup_fn is not None:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
print("criterion = %s" % str(criterion))
utils.auto_load_model(
args=args, model=model, model_without_ddp=model_without_ddp,
optimizer=optimizer, loss_scaler=loss_scaler, model_ema=model_ema)
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
exit(0)
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if log_writer is not None:
log_writer.set_step(epoch * num_training_steps_per_epoch * args.update_freq)
train_stats = train_one_epoch(
model, criterion, data_loader_train, optimizer,
device, epoch, loss_scaler, args.clip_grad, model_ema, mixup_fn,
log_writer=log_writer, start_steps=epoch * num_training_steps_per_epoch,
lr_schedule_values=lr_schedule_values, wd_schedule_values=wd_schedule_values,
num_training_steps_per_epoch=num_training_steps_per_epoch, update_freq=args.update_freq,
)
if args.output_dir and args.save_ckpt:
if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=epoch, model_ema=model_ema)
if data_loader_val is not None:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
if max_accuracy < test_stats["acc1"]:
max_accuracy = test_stats["acc1"]
if args.output_dir and args.save_ckpt:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch="best", model_ema=model_ema)
print(f'Max accuracy: {max_accuracy:.2f}%')
if log_writer is not None:
log_writer.update(test_acc1=test_stats['acc1'], head="perf", step=epoch)
log_writer.update(test_acc5=test_stats['acc5'], head="perf", step=epoch)
log_writer.update(test_loss=test_stats['loss'], head="perf", step=epoch)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
else:
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
# **{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
opts, ds_init = get_args()
if opts.output_dir:
Path(opts.output_dir).mkdir(parents=True, exist_ok=True)
main(opts, ds_init)
| EXA-1-master | exa/models/unilm-master/beit/run_class_finetuning.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Modified on torchvision code bases
# https://github.com/pytorch/vision
# --------------------------------------------------------'
from torchvision.datasets.vision import VisionDataset
from PIL import Image
import os
import os.path
import random
from typing import Any, Callable, cast, Dict, List, Optional, Tuple
def has_file_allowed_extension(filename: str, extensions: Tuple[str, ...]) -> bool:
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
extensions (tuple of strings): extensions to consider (lowercase)
Returns:
bool: True if the filename ends with one of given extensions
"""
return filename.lower().endswith(extensions)
def is_image_file(filename: str) -> bool:
"""Checks if a file is an allowed image extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
return has_file_allowed_extension(filename, IMG_EXTENSIONS)
def make_dataset(
directory: str,
class_to_idx: Dict[str, int],
extensions: Optional[Tuple[str, ...]] = None,
is_valid_file: Optional[Callable[[str], bool]] = None,
) -> List[Tuple[str, int]]:
instances = []
directory = os.path.expanduser(directory)
both_none = extensions is None and is_valid_file is None
both_something = extensions is not None and is_valid_file is not None
if both_none or both_something:
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def is_valid_file(x: str) -> bool:
return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions))
is_valid_file = cast(Callable[[str], bool], is_valid_file)
for target_class in sorted(class_to_idx.keys()):
class_index = class_to_idx[target_class]
target_dir = os.path.join(directory, target_class)
if not os.path.isdir(target_dir):
continue
for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
if is_valid_file(path):
item = path, class_index
instances.append(item)
return instances
class DatasetFolder(VisionDataset):
"""A generic data loader where the samples are arranged in this way: ::
root/class_x/xxx.ext
root/class_x/xxy.ext
root/class_x/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
root/class_y/asd932_.ext
Args:
root (string): Root directory path.
loader (callable): A function to load a sample given its path.
extensions (tuple[string]): A list of allowed extensions.
both extensions and is_valid_file should not be passed.
transform (callable, optional): A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
target_transform (callable, optional): A function/transform that takes
in the target and transforms it.
is_valid_file (callable, optional): A function that takes path of a file
and check if the file is a valid file (used to check of corrupt files)
both extensions and is_valid_file should not be passed.
Attributes:
classes (list): List of the class names sorted alphabetically.
class_to_idx (dict): Dict with items (class_name, class_index).
samples (list): List of (sample path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(
self,
root: str,
loader: Callable[[str], Any],
extensions: Optional[Tuple[str, ...]] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
is_valid_file: Optional[Callable[[str], bool]] = None,
) -> None:
super(DatasetFolder, self).__init__(root, transform=transform,
target_transform=target_transform)
classes, class_to_idx = self._find_classes(self.root)
samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)
if len(samples) == 0:
msg = "Found 0 files in subfolders of: {}\n".format(self.root)
if extensions is not None:
msg += "Supported extensions are: {}".format(",".join(extensions))
raise RuntimeError(msg)
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
def _find_classes(self, dir: str) -> Tuple[List[str], Dict[str, int]]:
"""
Finds the class folders in a dataset.
Args:
dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
"""
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
classes.sort()
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
return classes, class_to_idx
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
while True:
try:
path, target = self.samples[index]
sample = self.loader(path)
break
except Exception as e:
print(e)
index = random.randint(0, len(self.samples) - 1)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self) -> int:
return len(self.samples)
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
def pil_loader(path: str) -> Image.Image:
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
# TODO: specify the return type
def accimage_loader(path: str) -> Any:
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path: str) -> Any:
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class ImageFolder(DatasetFolder):
"""A generic data loader where the images are arranged in this way: ::
root/dog/xxx.png
root/dog/xxy.png
root/dog/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
is_valid_file (callable, optional): A function that takes path of an Image file
and check if the file is a valid file (used to check of corrupt files)
Attributes:
classes (list): List of the class names sorted alphabetically.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
"""
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
loader: Callable[[str], Any] = default_loader,
is_valid_file: Optional[Callable[[str], bool]] = None,
):
super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None,
transform=transform,
target_transform=target_transform,
is_valid_file=is_valid_file)
self.imgs = self.samples
| EXA-1-master | exa/models/unilm-master/beit/dataset_folder.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
import os
from pathlib import Path
from timm.models import create_model
from optim_factory import create_optimizer
from datasets import build_beit_pretraining_dataset
from engine_for_pretraining import train_one_epoch
from utils import NativeScalerWithGradNormCount as NativeScaler
import utils
import modeling_pretrain
def get_args():
parser = argparse.ArgumentParser('BEiT pre-training script', add_help=False)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--save_ckpt_freq', default=20, type=int)
parser.add_argument("--discrete_vae_weight_path", type=str)
parser.add_argument("--discrete_vae_type", type=str, default="dall-e")
# Model parameters
parser.add_argument('--model', default='beit_base_patch16_224_8k_vocab', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--rel_pos_bias', action='store_true')
parser.add_argument('--disable_rel_pos_bias', action='store_false', dest='rel_pos_bias')
parser.set_defaults(rel_pos_bias=True)
parser.add_argument('--abs_pos_emb', action='store_true')
parser.set_defaults(abs_pos_emb=False)
parser.add_argument('--layer_scale_init_value', default=0.1, type=float,
help="0.1 for base, 1e-5 for large. set 0 to disable layer scale")
parser.add_argument('--num_mask_patches', default=75, type=int,
help='number of the visual tokens/patches need be masked')
parser.add_argument('--max_mask_patches_per_block', type=int, default=None)
parser.add_argument('--min_mask_patches_per_block', type=int, default=16)
parser.add_argument('--input_size', default=224, type=int,
help='images input size for backbone')
parser.add_argument('--second_input_size', default=112, type=int,
help='images input size for discrete vae')
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt_betas', default=[0.9, 0.999], type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: 0.9, 0.999, use opt default)')
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the
weight decay. We use a cosine schedule for WD.
(Set the same value with args.weight_decay to keep weight decay no change)""")
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min_lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N',
help='epochs to warmup LR, if scheduler supports')
# Augmentation parameters
parser.add_argument('--train_interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--second_interpolation', type=str, default='lanczos',
help='Interpolation for discrete vae (random, bilinear, bicubic default: "lanczos")')
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--imagenet_default_mean_and_std', default=False, action='store_true')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None,
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--auto_resume', action='store_true')
parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume')
parser.set_defaults(auto_resume=True)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser.parse_args()
def get_model(args):
print(f"Creating model: {args.model}")
model = create_model(
args.model,
pretrained=False,
drop_path_rate=args.drop_path,
drop_block_rate=None,
use_shared_rel_pos_bias=args.rel_pos_bias,
use_abs_pos_emb=args.abs_pos_emb,
init_values=args.layer_scale_init_value,
)
return model
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
# random.seed(seed)
cudnn.benchmark = True
model = get_model(args)
patch_size = model.patch_embed.patch_size
print("Patch size = %s" % str(patch_size))
args.window_size = (args.input_size // patch_size[0], args.input_size // patch_size[1])
args.patch_size = patch_size
# get dataset
dataset_train = build_beit_pretraining_dataset(args)
# prepare discrete vae
d_vae = utils.create_d_vae(
weight_path=args.discrete_vae_weight_path, d_vae_type=args.discrete_vae_type,
device=device, image_size=args.second_input_size)
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
sampler_rank = global_rank
num_training_steps_per_epoch = len(dataset_train) // args.batch_size // num_tasks
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=sampler_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params:', n_parameters)
total_batch_size = args.batch_size * utils.get_world_size()
print("LR = %.8f" % args.lr)
print("Batch size = %d" % total_batch_size)
print("Number of training steps = %d" % num_training_steps_per_epoch)
print("Number of training examples per epoch = %d" % (total_batch_size * num_training_steps_per_epoch))
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
optimizer = create_optimizer(
args, model_without_ddp)
loss_scaler = NativeScaler()
print("Use step level LR & WD scheduler!")
lr_schedule_values = utils.cosine_scheduler(
args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch,
warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps,
)
if args.weight_decay_end is None:
args.weight_decay_end = args.weight_decay
wd_schedule_values = utils.cosine_scheduler(
args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch)
print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values)))
utils.auto_load_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if log_writer is not None:
log_writer.set_step(epoch * num_training_steps_per_epoch)
train_stats = train_one_epoch(
model, d_vae, data_loader_train,
optimizer, device, epoch, loss_scaler,
args.clip_grad, log_writer=log_writer,
start_steps=epoch * num_training_steps_per_epoch,
lr_schedule_values=lr_schedule_values,
wd_schedule_values=wd_schedule_values,
)
if args.output_dir:
if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=epoch)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch, 'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
opts = get_args()
if opts.output_dir:
Path(opts.output_dir).mkdir(parents=True, exist_ok=True)
main(opts)
| EXA-1-master | exa/models/unilm-master/beit/run_beit_pretraining.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import io
import os
import math
import time
import json
from collections import defaultdict, deque
import datetime
import numpy as np
from timm.utils import get_state_dict
from pathlib import Path
import torch
import torch.distributed as dist
from torch._six import inf
from modeling_discrete_vae import Dalle_VAE, DiscreteVAE
from tensorboardX import SummaryWriter
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
class TensorboardLogger(object):
def __init__(self, log_dir):
self.writer = SummaryWriter(logdir=log_dir)
self.step = 0
def set_step(self, step=None):
if step is not None:
self.step = step
else:
self.step += 1
def update(self, head='scalar', step=None, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.writer.add_scalar(head + "/" + k, v, self.step if step is None else step)
def flush(self):
self.writer.flush()
def _load_checkpoint_for_ema(model_ema, checkpoint):
"""
Workaround for ModelEma._load_checkpoint to accept an already-loaded object
"""
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
# ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}'.format(
args.rank, args.dist_url, args.gpu), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def load_state_dict(model, state_dict, prefix='', ignore_missing="relative_position_index"):
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix=prefix)
warn_missing_keys = []
ignore_missing_keys = []
for key in missing_keys:
keep_flag = True
for ignore_key in ignore_missing.split('|'):
if ignore_key in key:
keep_flag = False
break
if keep_flag:
warn_missing_keys.append(key)
else:
ignore_missing_keys.append(key)
missing_keys = warn_missing_keys
if len(missing_keys) > 0:
print("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
print("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(ignore_missing_keys) > 0:
print("Ignored weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, ignore_missing_keys))
if len(error_msgs) > 0:
print('\n'.join(error_msgs))
class NativeScalerWithGradNormCount:
state_dict_key = "amp_scaler"
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_graph=create_graph)
if update_grad:
if clip_grad is not None:
assert parameters is not None
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
else:
self._scaler.unscale_(optimizer)
norm = get_grad_norm_(parameters)
self._scaler.step(optimizer)
self._scaler.update()
else:
norm = None
return norm
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor:
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
norm_type = float(norm_type)
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].grad.device
if norm_type == inf:
total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters)
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
return total_norm
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0,
start_warmup_value=0, warmup_steps=-1):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_steps > 0:
warmup_iters = warmup_steps
print("Set warmup steps = %d" % warmup_iters)
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = np.array(
[final_value + 0.5 * (base_value - final_value) * (1 + math.cos(math.pi * i / (len(iters)))) for i in iters])
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
return [correct[:k].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
if loss_scaler is not None:
checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)]
for checkpoint_path in checkpoint_paths:
to_save = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
'scaler': loss_scaler.state_dict(),
'args': args,
}
if model_ema is not None:
to_save['model_ema'] = get_state_dict(model_ema)
save_on_master(to_save, checkpoint_path)
else:
client_state = {'epoch': epoch}
if model_ema is not None:
client_state['model_ema'] = get_state_dict(model_ema)
model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch_name, client_state=client_state)
def restart_from_checkpoint(ckp_path, run_variables=None, **kwargs):
"""
Re-start from checkpoint
"""
if not os.path.isfile(ckp_path):
return
print("Found checkpoint at {}".format(ckp_path))
# open checkpoint file
checkpoint = torch.load(ckp_path, map_location="cpu")
# key is what to look for in the checkpoint file
# value is the object to load
# example: {'state_dict': model}
for key, value in kwargs.items():
if key in checkpoint and value is not None:
try:
msg = value.load_state_dict(checkpoint[key], strict=False)
print("=> loaded '{}' from checkpoint '{}' with msg {}".format(key, ckp_path, msg))
except TypeError:
try:
msg = value.load_state_dict(checkpoint[key])
print("=> loaded '{}' from checkpoint: '{}'".format(key, ckp_path))
except ValueError:
print("=> failed to load '{}' from checkpoint: '{}'".format(key, ckp_path))
else:
print("=> key '{}' not found in checkpoint: '{}'".format(key, ckp_path))
# re load variable important for the run
if run_variables is not None:
for var_name in run_variables:
if var_name in checkpoint:
run_variables[var_name] = checkpoint[var_name]
def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
if loss_scaler is not None:
# torch.amp
if args.auto_resume and len(args.resume) == 0:
import glob
all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth'))
latest_ckpt = -1
for ckpt in all_checkpoints:
t = ckpt.split('-')[-1].split('.')[0]
if t.isdigit():
latest_ckpt = max(int(t), latest_ckpt)
if latest_ckpt >= 0:
args.resume = os.path.join(output_dir, 'checkpoint-%d.pth' % latest_ckpt)
print("Auto resume checkpoint: %s" % args.resume)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
print("Resume checkpoint %s" % args.resume)
if 'optimizer' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
args.start_epoch = checkpoint['epoch'] + 1
if hasattr(args, 'model_ema') and args.model_ema:
_load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
print("With optim & sched!")
else:
# deepspeed, only support '--auto_resume'.
if args.auto_resume:
import glob
all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*'))
latest_ckpt = -1
for ckpt in all_checkpoints:
t = ckpt.split('-')[-1].split('.')[0]
if t.isdigit():
latest_ckpt = max(int(t), latest_ckpt)
if latest_ckpt >= 0:
args.resume = os.path.join(output_dir, 'checkpoint-%d' % latest_ckpt)
print("Auto resume checkpoint: %d" % latest_ckpt)
_, client_states = model.load_checkpoint(args.output_dir, tag='checkpoint-%d' % latest_ckpt)
args.start_epoch = client_states['epoch'] + 1
if model_ema is not None:
if args.model_ema:
_load_checkpoint_for_ema(model_ema, client_states['model_ema'])
def create_d_vae(weight_path, d_vae_type, image_size, device):
if d_vae_type == "dall-e":
return get_dalle_vae(weight_path, image_size, device)
elif d_vae_type == "customized":
return get_d_vae(weight_path, image_size, device)
else:
raise NotImplementedError()
def get_dalle_vae(weight_path, image_size, device):
vae = Dalle_VAE(image_size)
vae.load_model(model_dir=weight_path, device=device)
return vae
def get_d_vae(weight_path, image_size, device):
NUM_TOKENS = 8192
NUM_LAYERS = 3
EMB_DIM = 512
HID_DIM = 256
state_dict = torch.load(os.path.join(weight_path, "pytorch_model.bin"), map_location="cpu")["weights"]
model = DiscreteVAE(
image_size=image_size,
num_layers=NUM_LAYERS,
num_tokens=NUM_TOKENS,
codebook_dim=EMB_DIM,
hidden_dim=HID_DIM,
).to(device)
model.load_state_dict(state_dict)
return model
def create_ds_config(args):
args.deepspeed_config = os.path.join(args.output_dir, "deepspeed_config.json")
with open(args.deepspeed_config, mode="w") as writer:
ds_config = {
"train_batch_size": args.batch_size * args.update_freq * get_world_size(),
"train_micro_batch_size_per_gpu": args.batch_size,
"steps_per_print": 1000,
"optimizer": {
"type": "Adam",
"adam_w_mode": True,
"params": {
"lr": args.lr,
"weight_decay": args.weight_decay,
"bias_correction": True,
"betas": [
0.9,
0.999
],
"eps": 1e-8
}
},
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 7,
"loss_scale_window": 128
}
}
writer.write(json.dumps(ds_config, indent=2))
| EXA-1-master | exa/models/unilm-master/beit/utils.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on DINO code bases
# https://github.com/facebookresearch/dino/blob/main/eval_linear.py
# --------------------------------------------------------'
import os
import argparse
import json
from pathlib import Path
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import datasets
from torchvision import transforms as pth_transforms
import utils
import modeling_finetune
from timm.models import create_model
def load_model(model, checkpoint_file, model_key, model_prefix):
if checkpoint_file.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
checkpoint_file, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(checkpoint_file, map_location='cpu')
checkpoint_model = None
for model_key in model_key.split('|'):
if model_key in checkpoint:
checkpoint_model = checkpoint[model_key]
print("Load state_dict by model_key = %s" % model_key)
break
if checkpoint_model is None:
checkpoint_model = checkpoint
utils.load_state_dict(model, checkpoint_model, prefix=model_prefix)
def eval_linear(args):
utils.init_distributed_mode(args)
# print("git:\n {}\n".format(utils.get_sha()))
print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
cudnn.benchmark = True
mean = (0.485, 0.456, 0.406) if args.imagenet_default_mean_and_std else (0.5, 0.5, 0.5)
std = (0.229, 0.224, 0.225) if args.imagenet_default_mean_and_std else (0.5, 0.5, 0.5)
# ============ preparing data ... ============
train_transform = pth_transforms.Compose([
pth_transforms.RandomResizedCrop(224),
pth_transforms.RandomHorizontalFlip(),
pth_transforms.ToTensor(),
pth_transforms.Normalize(mean, std),
])
val_transform = pth_transforms.Compose([
pth_transforms.Resize(256, interpolation=3),
pth_transforms.CenterCrop(224),
pth_transforms.ToTensor(),
pth_transforms.Normalize(mean, std),
])
print("train_transform = %s" % str(train_transform))
print("val_transform = %s" % str(val_transform))
dataset_train = datasets.ImageFolder(os.path.join(args.data_path, "train"), transform=train_transform)
dataset_val = datasets.ImageFolder(os.path.join(args.data_path, "val"), transform=val_transform)
global_rank = utils.get_rank()
world_size = utils.get_world_size()
sampler = torch.utils.data.distributed.DistributedSampler(
dataset_train, num_replicas=world_size, rank=global_rank, shuffle=True)
train_loader = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
)
val_loader = torch.utils.data.DataLoader(
dataset_val,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
)
print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.")
# ============ building network ... ============
model = create_model(
args.model, pretrained=False, num_classes=0, drop_rate=0, drop_path_rate=0,
attn_drop_rate=0, drop_block_rate=None, use_mean_pooling=False,
use_shared_rel_pos_bias=args.rel_pos_bias, use_abs_pos_emb=args.abs_pos_emb,
init_values=args.layer_scale_init_value,
)
model.cuda()
model.eval()
print(f"Model {args.model} built.")
# load weights to evaluate
load_model(model=model, checkpoint_file=args.pretrained_weights, model_key=args.checkpoint_key, model_prefix="")
linear_classifier = LinearClassifier(
dim=model.embed_dim * (1 + int(args.avgpool_patchtokens)),
num_labels=args.num_labels, num_layers=model.get_num_layers())
linear_classifier = linear_classifier.cuda()
if world_size > 1:
linear_classifier = nn.parallel.DistributedDataParallel(linear_classifier, device_ids=[args.gpu])
print("Model = %s" % str(linear_classifier))
# set optimizer
learning_rate = args.lr or args.base_lr * (args.batch_size_per_gpu * utils.get_world_size()) / 256
# use absolute or linear scaled learning rate
if args.optimizer.lower() == "sgd":
optimizer = torch.optim.SGD(
linear_classifier.parameters(), learning_rate, momentum=0.9,
weight_decay=0, # we do not apply weight decay
)
else:
optimizer = torch.optim.AdamW(
linear_classifier.parameters(), learning_rate, weight_decay=1e-4,
)
print(f"Optimizer = %s" % str(optimizer))
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs, eta_min=0)
# Optionally resume from a checkpoint
to_restore = {"epoch": 0, "best_acc": 0.}
utils.restart_from_checkpoint(
os.path.join(args.output_dir, "checkpoint.pth.tar"),
run_variables=to_restore,
state_dict=linear_classifier,
optimizer=optimizer,
scheduler=scheduler,
)
start_epoch = to_restore["epoch"]
best_acc = to_restore["best_acc"]
for epoch in range(start_epoch, args.epochs):
train_loader.sampler.set_epoch(epoch)
train_stats = train(
model, linear_classifier, optimizer, train_loader, epoch, args.avgpool_patchtokens, args.amp_forward)
scheduler.step()
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch}
if epoch % args.val_freq == 0 or epoch == args.epochs - 1:
test_stats = validate_network(
val_loader, model, linear_classifier, args.avgpool_patchtokens, args.amp_forward)
for classifier_key in test_stats:
classifier = test_stats[classifier_key]
print(f"Accuracy at epoch {epoch} of the network on the {len(dataset_val)} test images: {classifier['acc1']:.1f}%")
best_acc = max(best_acc, classifier["acc1"])
print(f'Max accuracy so far: {best_acc:.2f}%')
log_stats = {**{k: v for k, v in log_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()}}
if utils.is_main_process():
with (Path(args.output_dir) / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
save_dict = {
"epoch": epoch + 1,
"state_dict": linear_classifier.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"best_acc": best_acc,
}
torch.save(save_dict, os.path.join(args.output_dir, "checkpoint.pth.tar"))
print("Training of the supervised linear classifier on frozen features completed.\n"
"Top-1 test accuracy: {acc:.1f}".format(acc=best_acc))
def train(model, linear_classifier, optimizer, loader, epoch, avgpool, amp_forward):
linear_classifier.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
assert avgpool
for (inp, target) in metric_logger.log_every(loader, 20, header):
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# forward
with torch.no_grad():
if amp_forward:
with torch.cuda.amp.autocast():
intermediate_output = model.get_intermediate_layers(inp)
else:
intermediate_output = model.get_intermediate_layers(inp)
output = []
for each_layer in intermediate_output:
cls_rep = each_layer[:, 0]
mean_rep = torch.mean(each_layer[:, 1:], dim=1)
output.append(torch.cat((cls_rep, mean_rep), dim=-1).float())
output = linear_classifier(output)
# compute cross entropy loss
loss = 0
for each_output in output:
loss += nn.CrossEntropyLoss()(each_output, target)
# compute the gradients
optimizer.zero_grad()
loss.backward()
# step
optimizer.step()
# log
torch.cuda.synchronize()
metric_logger.update(loss=loss.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def validate_network(val_loader, model, linear_classifier, avgpool, amp_forward):
linear_classifier.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
assert avgpool
module = linear_classifier.module if hasattr(linear_classifier, 'module') else linear_classifier
for inp, target in metric_logger.log_every(val_loader, 20, header):
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# forward
with torch.no_grad():
if amp_forward:
with torch.cuda.amp.autocast():
intermediate_output = model.get_intermediate_layers(inp)
else:
intermediate_output = model.get_intermediate_layers(inp)
output = []
for each_layer in intermediate_output:
cls_rep = each_layer[:, 0]
mean_rep = torch.mean(each_layer[:, 1:], dim=1)
output.append(torch.cat((cls_rep, mean_rep), dim=-1).float())
all_output = linear_classifier(output)
for i, output in enumerate(all_output):
loss = nn.CrossEntropyLoss()(output, target)
if module.num_labels >= 5:
acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
else:
acc1, = utils.accuracy(output, target, topk=(1,))
batch_size = inp.shape[0]
post_str = '_layer%d' % i
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1' + post_str].update(acc1.item(), n=batch_size)
if module.num_labels >= 5:
metric_logger.meters['acc5' + post_str].update(acc5.item(), n=batch_size)
eval_results = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
updated_results = {}
for key in eval_results:
if '_' in key:
this_key, classifier_idx = key.split('_')
if classifier_idx not in updated_results:
updated_results[classifier_idx] = {}
updated_results[classifier_idx][this_key] = eval_results[key]
print("Eval result = %s" % json.dumps(updated_results, indent=2))
return updated_results
class LinearClassifier(nn.Module):
"""Linear layer to train on top of frozen features"""
def __init__(self, num_layers, dim, num_labels=1000):
super(LinearClassifier, self).__init__()
self.num_labels = num_labels
self.linear = nn.ModuleList()
self.num_classifier = num_layers
for i in range(self.num_classifier):
linear = nn.Linear(dim, num_labels)
linear.weight.data.normal_(mean=0.0, std=0.01)
linear.bias.data.zero_()
self.linear.append(linear)
def forward(self, x_list):
results = []
for i, linear in enumerate(self.linear):
results.append(linear(x_list[i]))
return results
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
FALSY_STRINGS = {"off", "false", "0"}
TRUTHY_STRINGS = {"on", "true", "1"}
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag")
if __name__ == '__main__':
parser = argparse.ArgumentParser('Evaluation with linear classification on ImageNet')
parser.add_argument('--avgpool_patchtokens', default=True, type=bool_flag,
help="""Whether ot not to concatenate the global average pooled features to the [CLS] token.
We typically set this to True for BEiT pretrained models. """)
parser.add_argument('--model', default='beit_base_patch16_224', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--rel_pos_bias', action='store_true')
parser.add_argument('--disable_rel_pos_bias', action='store_false', dest='rel_pos_bias')
parser.set_defaults(rel_pos_bias=True)
parser.add_argument('--abs_pos_emb', action='store_true')
parser.set_defaults(abs_pos_emb=False)
parser.add_argument('--layer_scale_init_value', default=0.1, type=float,
help="0.1 for base, 1e-5 for large. set 0 to disable layer scale")
parser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.')
parser.add_argument('--optimizer', default="adamw", type=str, help='optimizer type')
parser.add_argument('--pretrained_weights', default='', type=str, help="Path to pretrained weights to evaluate.")
parser.add_argument("--checkpoint_key", default="model|module|teacher", type=str, help='Key to use in the checkpoint (example: "teacher")')
parser.add_argument('--epochs', default=100, type=int, help='Number of epochs of training.')
parser.add_argument('--lr', type=float, default=None, metavar='LR', help='learning rate (absolute lr)')
parser.add_argument("--base_lr", default=0.001, type=float, help="""Learning rate at the beginning of
training (highest LR used during training). The learning rate is linearly scaled
with the batch size, and specified here for a reference batch size of 256.
We recommend tweaking the LR depending on the checkpoint evaluated.""")
parser.add_argument('--batch_size_per_gpu', default=128, type=int, help='Per-GPU batch-size')
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up
distributed training; see https://pytorch.org/docs/stable/distributed.html""")
parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.")
parser.add_argument('--data_path', default='/path/to/imagenet/', type=str)
parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.')
parser.add_argument('--val_freq', default=1, type=int, help="Epoch frequency for validation.")
parser.add_argument('--output_dir', default=".", help='Path to save logs and checkpoints')
parser.add_argument('--num_labels', default=1000, type=int, help='Number of labels for linear classifier')
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--imagenet_default_mean_and_std', default=False, type=bool_flag,
help="""Set True to use the imagenet default mean and std, Set False will use the mean and std in Inception.
We recommand keep it same to the pre-training stage. """)
parser.add_argument('--amp_forward', default=True, type=bool_flag, help='Use amp to inference the pre-trained model, which can speed up the evaluation. ')
args = parser.parse_args()
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
eval_linear(args)
| EXA-1-master | exa/models/unilm-master/beit/run_linear_eval.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
from timm.models.registry import register_model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
**kwargs
}
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
# x = self.drop(x)
# commit this for the orignal BERT implement
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
proj_drop=0., window_size=None, attn_head_dim=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.v_bias = None
if window_size:
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
else:
self.window_size = None
self.relative_position_bias_table = None
self.relative_position_index = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, rel_pos_bias=None):
B, N, C = x.shape
qkv_bias = None
if self.q_bias is not None:
qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
# qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
if self.relative_position_bias_table is not None:
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if rel_pos_bias is not None:
attn = attn + rel_pos_bias
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm,
window_size=None, attn_head_dim=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if init_values is not None and init_values > 0:
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
else:
self.gamma_1, self.gamma_2 = None, None
def forward(self, x, rel_pos_bias=None):
if self.gamma_1 is None:
x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias))
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x, **kwargs):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class RelativePositionBias(nn.Module):
def __init__(self, window_size, num_heads):
super().__init__()
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
# trunc_normal_(self.relative_position_bias_table, std=.02)
def forward(self):
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
class VisionTransformer(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None,
use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False,
use_mean_pooling=True, init_scale=0.001):
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
# self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_abs_pos_emb:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
else:
self.rel_pos_bias = None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.use_rel_pos_bias = use_rel_pos_bias
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None)
for i in range(depth)])
self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim)
self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
# trunc_normal_(self.mask_token, std=.02)
if isinstance(self.head, nn.Linear):
trunc_normal_(self.head.weight, std=.02)
self.apply(self._init_weights)
self.fix_init_weight()
if isinstance(self.head, nn.Linear):
self.head.weight.data.mul_(init_scale)
self.head.bias.data.mul_(init_scale)
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_num_layers(self):
return len(self.blocks)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
batch_size, seq_len, _ = x.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
for blk in self.blocks:
x = blk(x, rel_pos_bias=rel_pos_bias)
x = self.norm(x)
if self.fc_norm is not None:
t = x[:, 1:, :]
return self.fc_norm(t.mean(1))
else:
return x[:, 0]
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def get_intermediate_layers(self, x):
x = self.patch_embed(x)
batch_size, seq_len, _ = x.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
features = []
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
for blk in self.blocks:
x = blk(x, rel_pos_bias)
features.append(x)
return features
@register_model
def beit_base_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def beit_base_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def beit_large_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def beit_large_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def beit_large_patch16_512(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
| EXA-1-master | exa/models/unilm-master/beit/modeling_finetune.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# --------------------------------------------------------'
import torch
from torch import optim as optim
from timm.optim.adafactor import Adafactor
from timm.optim.adahessian import Adahessian
from timm.optim.adamp import AdamP
from timm.optim.lookahead import Lookahead
from timm.optim.nadam import Nadam
from timm.optim.novograd import NovoGrad
from timm.optim.nvnovograd import NvNovoGrad
from timm.optim.radam import RAdam
from timm.optim.rmsprop_tf import RMSpropTF
from timm.optim.sgdp import SGDP
import json
try:
from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD
has_apex = True
except ImportError:
has_apex = False
def get_num_layer_for_vit(var_name, num_max_layer):
if var_name in ("cls_token", "mask_token", "pos_embed"):
return 0
elif var_name.startswith("patch_embed"):
return 0
elif var_name.startswith("rel_pos_bias"):
return num_max_layer - 1
elif var_name.startswith("blocks"):
layer_id = int(var_name.split('.')[1])
return layer_id + 1
else:
return num_max_layer - 1
class LayerDecayValueAssigner(object):
def __init__(self, values):
self.values = values
def get_scale(self, layer_id):
return self.values[layer_id]
def get_layer_id(self, var_name):
return get_num_layer_for_vit(var_name, len(self.values))
def get_parameter_groups(model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None):
parameter_group_names = {}
parameter_group_vars = {}
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list:
group_name = "no_decay"
this_weight_decay = 0.
else:
group_name = "decay"
this_weight_decay = weight_decay
if get_num_layer is not None:
layer_id = get_num_layer(name)
group_name = "layer_%d_%s" % (layer_id, group_name)
else:
layer_id = None
if group_name not in parameter_group_names:
if get_layer_scale is not None:
scale = get_layer_scale(layer_id)
else:
scale = 1.
parameter_group_names[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"lr_scale": scale
}
parameter_group_vars[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"lr_scale": scale
}
parameter_group_vars[group_name]["params"].append(param)
parameter_group_names[group_name]["params"].append(name)
print("Param groups = %s" % json.dumps(parameter_group_names, indent=2))
return list(parameter_group_vars.values())
def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None):
opt_lower = args.opt.lower()
weight_decay = args.weight_decay
if weight_decay and filter_bias_and_bn:
skip = {}
if skip_list is not None:
skip = skip_list
elif hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale)
weight_decay = 0.
else:
parameters = model.parameters()
if 'fused' in opt_lower:
assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'
opt_args = dict(lr=args.lr, weight_decay=weight_decay)
if hasattr(args, 'opt_eps') and args.opt_eps is not None:
opt_args['eps'] = args.opt_eps
if hasattr(args, 'opt_betas') and args.opt_betas is not None:
opt_args['betas'] = args.opt_betas
opt_split = opt_lower.split('_')
opt_lower = opt_split[-1]
if opt_lower == 'sgd' or opt_lower == 'nesterov':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'momentum':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'adam':
optimizer = optim.Adam(parameters, **opt_args)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, **opt_args)
elif opt_lower == 'nadam':
optimizer = Nadam(parameters, **opt_args)
elif opt_lower == 'radam':
optimizer = RAdam(parameters, **opt_args)
elif opt_lower == 'adamp':
optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)
elif opt_lower == 'sgdp':
optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'adadelta':
optimizer = optim.Adadelta(parameters, **opt_args)
elif opt_lower == 'adafactor':
if not args.lr:
opt_args['lr'] = None
optimizer = Adafactor(parameters, **opt_args)
elif opt_lower == 'adahessian':
optimizer = Adahessian(parameters, **opt_args)
elif opt_lower == 'rmsprop':
optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'rmsproptf':
optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'novograd':
optimizer = NovoGrad(parameters, **opt_args)
elif opt_lower == 'nvnovograd':
optimizer = NvNovoGrad(parameters, **opt_args)
elif opt_lower == 'fusedsgd':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'fusedmomentum':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'fusedadam':
optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)
elif opt_lower == 'fusedadamw':
optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)
elif opt_lower == 'fusedlamb':
optimizer = FusedLAMB(parameters, **opt_args)
elif opt_lower == 'fusednovograd':
opt_args.setdefault('betas', (0.95, 0.98))
optimizer = FusedNovoGrad(parameters, **opt_args)
else:
assert False and "Invalid optimizer"
raise ValueError
if len(opt_split) > 1:
if opt_split[0] == 'lookahead':
optimizer = Lookahead(optimizer)
return optimizer
| EXA-1-master | exa/models/unilm-master/beit/optim_factory.py |
import attr
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from functools import partial
from dall_e.utils import Conv2d
@attr.s(eq=False, repr=False)
class DecoderBlock(nn.Module):
n_in: int = attr.ib(validator=lambda i, a, x: x >= 1)
n_out: int = attr.ib(validator=lambda i, a, x: x >= 1 and x % 4 ==0)
n_layers: int = attr.ib(validator=lambda i, a, x: x >= 1)
device: torch.device = attr.ib(default=None)
requires_grad: bool = attr.ib(default=False)
def __attrs_post_init__(self) -> None:
super().__init__()
self.n_hid = self.n_out // 4
self.post_gain = 1 / (self.n_layers ** 2)
make_conv = partial(Conv2d, device=self.device, requires_grad=self.requires_grad)
self.id_path = make_conv(self.n_in, self.n_out, 1) if self.n_in != self.n_out else nn.Identity()
self.res_path = nn.Sequential(OrderedDict([
('relu_1', nn.ReLU()),
('conv_1', make_conv(self.n_in, self.n_hid, 1)),
('relu_2', nn.ReLU()),
('conv_2', make_conv(self.n_hid, self.n_hid, 3)),
('relu_3', nn.ReLU()),
('conv_3', make_conv(self.n_hid, self.n_hid, 3)),
('relu_4', nn.ReLU()),
('conv_4', make_conv(self.n_hid, self.n_out, 3)),]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.id_path(x) + self.post_gain * self.res_path(x)
@attr.s(eq=False, repr=False)
class Decoder(nn.Module):
group_count: int = 4
n_init: int = attr.ib(default=128, validator=lambda i, a, x: x >= 8)
n_hid: int = attr.ib(default=256, validator=lambda i, a, x: x >= 64)
n_blk_per_group: int = attr.ib(default=2, validator=lambda i, a, x: x >= 1)
output_channels: int = attr.ib(default=3, validator=lambda i, a, x: x >= 1)
vocab_size: int = attr.ib(default=8192, validator=lambda i, a, x: x >= 512)
device: torch.device = attr.ib(default=torch.device('cpu'))
requires_grad: bool = attr.ib(default=False)
use_mixed_precision: bool = attr.ib(default=True)
def __attrs_post_init__(self) -> None:
super().__init__()
blk_range = range(self.n_blk_per_group)
n_layers = self.group_count * self.n_blk_per_group
make_conv = partial(Conv2d, device=self.device, requires_grad=self.requires_grad)
make_blk = partial(DecoderBlock, n_layers=n_layers, device=self.device,
requires_grad=self.requires_grad)
self.blocks = nn.Sequential(OrderedDict([
('input', make_conv(self.vocab_size, self.n_init, 1, use_float16=False)),
('group_1', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(self.n_init if i == 0 else 8 * self.n_hid, 8 * self.n_hid)) for i in blk_range],
('upsample', nn.Upsample(scale_factor=2, mode='nearest')),
]))),
('group_2', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(8 * self.n_hid if i == 0 else 4 * self.n_hid, 4 * self.n_hid)) for i in blk_range],
('upsample', nn.Upsample(scale_factor=2, mode='nearest')),
]))),
('group_3', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(4 * self.n_hid if i == 0 else 2 * self.n_hid, 2 * self.n_hid)) for i in blk_range],
('upsample', nn.Upsample(scale_factor=2, mode='nearest')),
]))),
('group_4', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(2 * self.n_hid if i == 0 else 1 * self.n_hid, 1 * self.n_hid)) for i in blk_range],
]))),
('output', nn.Sequential(OrderedDict([
('relu', nn.ReLU()),
('conv', make_conv(1 * self.n_hid, 2 * self.output_channels, 1)),
]))),
]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
if len(x.shape) != 4:
raise ValueError(f'input shape {x.shape} is not 4d')
if x.shape[1] != self.vocab_size:
raise ValueError(f'input has {x.shape[1]} channels but model built for {self.vocab_size}')
if x.dtype != torch.float32:
raise ValueError('input must have dtype torch.float32')
return self.blocks(x)
| EXA-1-master | exa/models/unilm-master/beit/dall_e/decoder.py |
import io, requests
import torch
import torch.nn as nn
from dall_e.encoder import Encoder
from dall_e.decoder import Decoder
from dall_e.utils import map_pixels, unmap_pixels
def load_model(path: str, device: torch.device = None) -> nn.Module:
if path.startswith('http://') or path.startswith('https://'):
resp = requests.get(path)
resp.raise_for_status()
with io.BytesIO(resp.content) as buf:
return torch.load(buf, map_location=device)
else:
with open(path, 'rb') as f:
return torch.load(f, map_location=device)
| EXA-1-master | exa/models/unilm-master/beit/dall_e/__init__.py |
import attr
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from functools import partial
from dall_e.utils import Conv2d
@attr.s(eq=False, repr=False)
class EncoderBlock(nn.Module):
n_in: int = attr.ib(validator=lambda i, a, x: x >= 1)
n_out: int = attr.ib(validator=lambda i, a, x: x >= 1 and x % 4 ==0)
n_layers: int = attr.ib(validator=lambda i, a, x: x >= 1)
device: torch.device = attr.ib(default=None)
requires_grad: bool = attr.ib(default=False)
def __attrs_post_init__(self) -> None:
super().__init__()
self.n_hid = self.n_out // 4
self.post_gain = 1 / (self.n_layers ** 2)
make_conv = partial(Conv2d, device=self.device, requires_grad=self.requires_grad)
self.id_path = make_conv(self.n_in, self.n_out, 1) if self.n_in != self.n_out else nn.Identity()
self.res_path = nn.Sequential(OrderedDict([
('relu_1', nn.ReLU()),
('conv_1', make_conv(self.n_in, self.n_hid, 3)),
('relu_2', nn.ReLU()),
('conv_2', make_conv(self.n_hid, self.n_hid, 3)),
('relu_3', nn.ReLU()),
('conv_3', make_conv(self.n_hid, self.n_hid, 3)),
('relu_4', nn.ReLU()),
('conv_4', make_conv(self.n_hid, self.n_out, 1)),]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.id_path(x) + self.post_gain * self.res_path(x)
@attr.s(eq=False, repr=False)
class Encoder(nn.Module):
group_count: int = 4
n_hid: int = attr.ib(default=256, validator=lambda i, a, x: x >= 64)
n_blk_per_group: int = attr.ib(default=2, validator=lambda i, a, x: x >= 1)
input_channels: int = attr.ib(default=3, validator=lambda i, a, x: x >= 1)
vocab_size: int = attr.ib(default=8192, validator=lambda i, a, x: x >= 512)
device: torch.device = attr.ib(default=torch.device('cpu'))
requires_grad: bool = attr.ib(default=False)
use_mixed_precision: bool = attr.ib(default=True)
def __attrs_post_init__(self) -> None:
super().__init__()
blk_range = range(self.n_blk_per_group)
n_layers = self.group_count * self.n_blk_per_group
make_conv = partial(Conv2d, device=self.device, requires_grad=self.requires_grad)
make_blk = partial(EncoderBlock, n_layers=n_layers, device=self.device,
requires_grad=self.requires_grad)
self.blocks = nn.Sequential(OrderedDict([
('input', make_conv(self.input_channels, 1 * self.n_hid, 7)),
('group_1', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(1 * self.n_hid, 1 * self.n_hid)) for i in blk_range],
('pool', nn.MaxPool2d(kernel_size=2)),
]))),
('group_2', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(1 * self.n_hid if i == 0 else 2 * self.n_hid, 2 * self.n_hid)) for i in blk_range],
('pool', nn.MaxPool2d(kernel_size=2)),
]))),
('group_3', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(2 * self.n_hid if i == 0 else 4 * self.n_hid, 4 * self.n_hid)) for i in blk_range],
('pool', nn.MaxPool2d(kernel_size=2)),
]))),
('group_4', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(4 * self.n_hid if i == 0 else 8 * self.n_hid, 8 * self.n_hid)) for i in blk_range],
]))),
('output', nn.Sequential(OrderedDict([
('relu', nn.ReLU()),
('conv', make_conv(8 * self.n_hid, self.vocab_size, 1, use_float16=False)),
]))),
]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
if len(x.shape) != 4:
raise ValueError(f'input shape {x.shape} is not 4d')
if x.shape[1] != self.input_channels:
raise ValueError(f'input has {x.shape[1]} channels but model built for {self.input_channels}')
if x.dtype != torch.float32:
raise ValueError('input must have dtype torch.float32')
return self.blocks(x)
| EXA-1-master | exa/models/unilm-master/beit/dall_e/encoder.py |
import attr
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
logit_laplace_eps: float = 0.1
@attr.s(eq=False)
class Conv2d(nn.Module):
n_in: int = attr.ib(validator=lambda i, a, x: x >= 1)
n_out: int = attr.ib(validator=lambda i, a, x: x >= 1)
kw: int = attr.ib(validator=lambda i, a, x: x >= 1 and x % 2 == 1)
use_float16: bool = attr.ib(default=True)
device: torch.device = attr.ib(default=torch.device('cpu'))
requires_grad: bool = attr.ib(default=False)
def __attrs_post_init__(self) -> None:
super().__init__()
w = torch.empty((self.n_out, self.n_in, self.kw, self.kw), dtype=torch.float32,
device=self.device, requires_grad=self.requires_grad)
w.normal_(std=1 / math.sqrt(self.n_in * self.kw ** 2))
b = torch.zeros((self.n_out,), dtype=torch.float32, device=self.device,
requires_grad=self.requires_grad)
self.w, self.b = nn.Parameter(w), nn.Parameter(b)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.use_float16 and 'cuda' in self.w.device.type:
if x.dtype != torch.float16:
x = x.half()
w, b = self.w.half(), self.b.half()
else:
if x.dtype != torch.float32:
x = x.float()
w, b = self.w, self.b
return F.conv2d(x, w, b, padding=(self.kw - 1) // 2)
def map_pixels(x: torch.Tensor) -> torch.Tensor:
if x.dtype != torch.float:
raise ValueError('expected input to have type float')
return (1 - 2 * logit_laplace_eps) * x + logit_laplace_eps
def unmap_pixels(x: torch.Tensor) -> torch.Tensor:
if len(x.shape) != 4:
raise ValueError('expected input to be 4d')
if x.dtype != torch.float:
raise ValueError('expected input to have type float')
return torch.clamp((x - logit_laplace_eps) / (1 - 2 * logit_laplace_eps), 0, 1)
| EXA-1-master | exa/models/unilm-master/beit/dall_e/utils.py |
import argparse
import os
import mmcv
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from mmcv.utils import DictAction
from mmseg.apis import multi_gpu_test, single_gpu_test
from mmseg.datasets import build_dataloader, build_dataset
from mmseg.models import build_segmentor
from backbone import beit
def parse_args():
parser = argparse.ArgumentParser(
description='mmseg test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--aug-test', action='store_true', help='Use Flip and Multi scale aug')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "mIoU"'
' for generic datasets, and "cityscapes" for Cityscapes')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu_collect is not specified')
parser.add_argument(
'--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if args.aug_test:
# hard code index
cfg.data.test.pipeline[1].img_ratios = [
0.5, 0.75, 1.0, 1.25, 1.5, 1.75
]
cfg.data.test.pipeline[1].flip = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg'))
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
model.CLASSES = checkpoint['meta']['CLASSES']
model.PALETTE = checkpoint['meta']['PALETTE']
efficient_test = False
if args.eval_options is not None:
efficient_test = args.eval_options.get('efficient_test', False)
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
efficient_test)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect, efficient_test)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
dataset.evaluate(outputs, args.eval, **kwargs)
if __name__ == '__main__':
main()
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/tools/test.py |
import argparse
import copy
import os
import os.path as osp
import time
import mmcv
import mmcv_custom
import torch
from mmcv.runner import init_dist
from mmcv.utils import Config, DictAction, get_git_hash
from mmseg import __version__
from mmseg.apis import set_random_seed
from mmcv_custom import train_segmentor
from mmseg.datasets import build_dataset
from mmseg.models import build_segmentor
from mmseg.utils import collect_env, get_root_logger
from backbone import beit
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--load-from', help='the checkpoint file to load weights from')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.load_from is not None:
cfg.load_from = args.load_from
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, deterministic: '
f'{args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_segmentor(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
logger.info(model)
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmseg version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmseg_version=f'{__version__}+{get_git_hash()[:7]}',
config=cfg.pretty_text,
CLASSES=datasets[0].CLASSES,
PALETTE=datasets[0].PALETTE)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_segmentor(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/tools/train.py |
import json
from mmcv.runner import OPTIMIZER_BUILDERS, DefaultOptimizerConstructor
from mmcv.runner import get_dist_info
def get_num_layer_for_vit(var_name, num_max_layer):
if var_name in ("backbone.cls_token", "backbone.mask_token", "backbone.pos_embed"):
return 0
elif var_name.startswith("backbone.patch_embed"):
return 0
elif var_name.startswith("backbone.blocks"):
layer_id = int(var_name.split('.')[2])
return layer_id + 1
else:
return num_max_layer - 1
@OPTIMIZER_BUILDERS.register_module()
class LayerDecayOptimizerConstructor(DefaultOptimizerConstructor):
def add_params(self, params, module, prefix='', is_dcn_module=None):
"""Add all parameters of module to the params list.
The parameters of the given module will be added to the list of param
groups, with specific rules defined by paramwise_cfg.
Args:
params (list[dict]): A list of param groups, it will be modified
in place.
module (nn.Module): The module to be added.
prefix (str): The prefix of the module
is_dcn_module (int|float|None): If the current module is a
submodule of DCN, `is_dcn_module` will be passed to
control conv_offset layer's learning rate. Defaults to None.
"""
parameter_groups = {}
print(self.paramwise_cfg)
num_layers = self.paramwise_cfg.get('num_layers') + 2
layer_decay_rate = self.paramwise_cfg.get('layer_decay_rate')
print("Build LayerDecayOptimizerConstructor %f - %d" % (layer_decay_rate, num_layers))
weight_decay = self.base_wd
for name, param in module.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in ('pos_embed', 'cls_token'):
group_name = "no_decay"
this_weight_decay = 0.
else:
group_name = "decay"
this_weight_decay = weight_decay
layer_id = get_num_layer_for_vit(name, num_layers)
group_name = "layer_%d_%s" % (layer_id, group_name)
if group_name not in parameter_groups:
scale = layer_decay_rate ** (num_layers - layer_id - 1)
parameter_groups[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"param_names": [],
"lr_scale": scale,
"group_name": group_name,
"lr": scale * self.base_lr,
}
parameter_groups[group_name]["params"].append(param)
parameter_groups[group_name]["param_names"].append(name)
rank, _ = get_dist_info()
if rank == 0:
to_display = {}
for key in parameter_groups:
to_display[key] = {
"param_names": parameter_groups[key]["param_names"],
"lr_scale": parameter_groups[key]["lr_scale"],
"lr": parameter_groups[key]["lr"],
"weight_decay": parameter_groups[key]["weight_decay"],
}
print("Param groups = %s" % json.dumps(to_display, indent=2))
# state_dict = module.state_dict()
# for group_name in parameter_groups:
# group = parameter_groups[group_name]
# for name in group["param_names"]:
# group["params"].append(state_dict[name])
params.extend(parameter_groups.values())
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/mmcv_custom/layer_decay_optimizer_constructor.py |
import random
import warnings
import numpy as np
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import build_optimizer, build_runner
from mmseg.core import DistEvalHook, EvalHook
from mmseg.datasets import build_dataloader, build_dataset
from mmseg.utils import get_root_logger
try:
import apex
except:
print('apex is not installed')
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def train_segmentor(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
"""Launch segmentor training."""
logger = get_root_logger(cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [
build_dataloader(
ds,
cfg.data.samples_per_gpu,
cfg.data.workers_per_gpu,
# cfg.gpus will be ignored if distributed
len(cfg.gpu_ids),
dist=distributed,
seed=cfg.seed,
drop_last=True) for ds in dataset
]
# build optimizer
optimizer = build_optimizer(model, cfg.optimizer)
# use apex fp16 optimizer
if cfg.optimizer_config.get("type", None) and cfg.optimizer_config["type"] == "DistOptimizerHook":
if cfg.optimizer_config.get("use_fp16", False):
model, optimizer = apex.amp.initialize(
model.cuda(), optimizer, opt_level="O1")
for m in model.modules():
if hasattr(m, "fp16_enabled"):
m.fp16_enabled = True
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
if cfg.get('runner') is None:
cfg.runner = {'type': 'IterBasedRunner', 'max_iters': cfg.total_iters}
warnings.warn(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.', UserWarning)
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta))
# register hooks
runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
cfg.checkpoint_config, cfg.log_config,
cfg.get('momentum_config', None))
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
# register eval hooks
if validate:
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = 'IterBasedRunner' not in cfg.runner['type']
eval_hook = DistEvalHook if distributed else EvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow)
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/mmcv_custom/train_api.py |
import mmcv
import numpy as np
from mmseg.datasets.builder import PIPELINES
@PIPELINES.register_module()
class SETR_Resize(object):
"""Resize images & seg.
This transform resizes the input image to some scale. If the input dict
contains the key "scale", then the scale in the input dict is used,
otherwise the specified scale in the init method is used.
``img_scale`` can either be a tuple (single-scale) or a list of tuple
(multi-scale). There are 3 multiscale modes:
- ``ratio_range is not None``: randomly sample a ratio from the ratio range
and multiply it with the image scale.
- ``ratio_range is None and multiscale_mode == "range"``: randomly sample a
scale from the a range.
- ``ratio_range is None and multiscale_mode == "value"``: randomly sample a
scale from multiple scales.
Args:
img_scale (tuple or list[tuple]): Images scales for resizing.
multiscale_mode (str): Either "range" or "value".
ratio_range (tuple[float]): (min_ratio, max_ratio)
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
image.
"""
def __init__(self,
img_scale=None,
multiscale_mode='range',
ratio_range=None,
keep_ratio=True,
crop_size=None,
setr_multi_scale=False):
if img_scale is None:
self.img_scale = None
else:
if isinstance(img_scale, list):
self.img_scale = img_scale
else:
self.img_scale = [img_scale]
# assert mmcv.is_list_of(self.img_scale, tuple)
if ratio_range is not None:
# mode 1: given a scale and a range of image ratio
assert len(self.img_scale) == 1
else:
# mode 2: given multiple scales or a range of scales
assert multiscale_mode in ['value', 'range']
self.multiscale_mode = multiscale_mode
self.ratio_range = ratio_range
self.keep_ratio = keep_ratio
self.crop_size = crop_size
self.setr_multi_scale = setr_multi_scale
@staticmethod
def random_select(img_scales):
"""Randomly select an img_scale from given candidates.
Args:
img_scales (list[tuple]): Images scales for selection.
Returns:
(tuple, int): Returns a tuple ``(img_scale, scale_dix)``,
where ``img_scale`` is the selected image scale and
``scale_idx`` is the selected index in the given candidates.
"""
assert mmcv.is_list_of(img_scales, tuple)
scale_idx = np.random.randint(len(img_scales))
img_scale = img_scales[scale_idx]
return img_scale, scale_idx
@staticmethod
def random_sample(img_scales):
"""Randomly sample an img_scale when ``multiscale_mode=='range'``.
Args:
img_scales (list[tuple]): Images scale range for sampling.
There must be two tuples in img_scales, which specify the lower
and uper bound of image scales.
Returns:
(tuple, None): Returns a tuple ``(img_scale, None)``, where
``img_scale`` is sampled scale and None is just a placeholder
to be consistent with :func:`random_select`.
"""
assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
img_scale_long = [max(s) for s in img_scales]
img_scale_short = [min(s) for s in img_scales]
long_edge = np.random.randint(
min(img_scale_long),
max(img_scale_long) + 1)
short_edge = np.random.randint(
min(img_scale_short),
max(img_scale_short) + 1)
img_scale = (long_edge, short_edge)
return img_scale, None
@staticmethod
def random_sample_ratio(img_scale, ratio_range):
"""Randomly sample an img_scale when ``ratio_range`` is specified.
A ratio will be randomly sampled from the range specified by
``ratio_range``. Then it would be multiplied with ``img_scale`` to
generate sampled scale.
Args:
img_scale (tuple): Images scale base to multiply with ratio.
ratio_range (tuple[float]): The minimum and maximum ratio to scale
the ``img_scale``.
Returns:
(tuple, None): Returns a tuple ``(scale, None)``, where
``scale`` is sampled ratio multiplied with ``img_scale`` and
None is just a placeholder to be consistent with
:func:`random_select`.
"""
assert isinstance(img_scale, tuple) and len(img_scale) == 2
min_ratio, max_ratio = ratio_range
assert min_ratio <= max_ratio
ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
return scale, None
def _random_scale(self, results):
"""Randomly sample an img_scale according to ``ratio_range`` and
``multiscale_mode``.
If ``ratio_range`` is specified, a ratio will be sampled and be
multiplied with ``img_scale``.
If multiple scales are specified by ``img_scale``, a scale will be
sampled according to ``multiscale_mode``.
Otherwise, single scale will be used.
Args:
results (dict): Result dict from :obj:`dataset`.
Returns:
dict: Two new keys 'scale` and 'scale_idx` are added into
``results``, which would be used by subsequent pipelines.
"""
if self.ratio_range is not None:
scale, scale_idx = self.random_sample_ratio(
self.img_scale[0], self.ratio_range)
elif len(self.img_scale) == 1:
scale, scale_idx = self.img_scale[0], 0
elif self.multiscale_mode == 'range':
scale, scale_idx = self.random_sample(self.img_scale)
elif self.multiscale_mode == 'value':
scale, scale_idx = self.random_select(self.img_scale)
else:
raise NotImplementedError
results['scale'] = scale
results['scale_idx'] = scale_idx
def _resize_img(self, results):
"""Resize images with ``results['scale']``."""
if self.keep_ratio:
if self.setr_multi_scale:
if min(results['scale']) < self.crop_size[0]:
new_short = self.crop_size[0]
else:
new_short = min(results['scale'])
h, w = results['img'].shape[:2]
if h > w:
new_h, new_w = new_short * h / w, new_short
else:
new_h, new_w = new_short, new_short * w / h
results['scale'] = (new_h, new_w)
img, scale_factor = mmcv.imrescale(
results['img'], results['scale'], return_scale=True)
# the w_scale and h_scale has minor difference
# a real fix should be done in the mmcv.imrescale in the future
new_h, new_w = img.shape[:2]
h, w = results['img'].shape[:2]
w_scale = new_w / w
h_scale = new_h / h
else:
img, w_scale, h_scale = mmcv.imresize(
results['img'], results['scale'], return_scale=True)
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
dtype=np.float32)
results['img'] = img
results['img_shape'] = img.shape
results['pad_shape'] = img.shape # in case that there is no padding
results['scale_factor'] = scale_factor
results['keep_ratio'] = self.keep_ratio
def _resize_seg(self, results):
"""Resize semantic segmentation map with ``results['scale']``."""
for key in results.get('seg_fields', []):
if self.keep_ratio:
gt_seg = mmcv.imrescale(
results[key], results['scale'], interpolation='nearest')
else:
gt_seg = mmcv.imresize(
results[key], results['scale'], interpolation='nearest')
results['gt_semantic_seg'] = gt_seg
def __call__(self, results):
"""Call function to resize images, bounding boxes, masks, semantic
segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor',
'keep_ratio' keys are added into result dict.
"""
if 'scale' not in results:
self._random_scale(results)
self._resize_img(results)
self._resize_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(img_scale={self.img_scale}, '
f'multiscale_mode={self.multiscale_mode}, '
f'ratio_range={self.ratio_range}, '
f'keep_ratio={self.keep_ratio})')
return repr_str
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/mmcv_custom/resize_transform.py |
# Copyright (c) Open-MMLab. All rights reserved.
import io
import os
import os.path as osp
import pkgutil
import time
import warnings
from collections import OrderedDict
from importlib import import_module
from tempfile import TemporaryDirectory
import torch
import torchvision
from torch.optim import Optimizer
from torch.utils import model_zoo
from torch.nn import functional as F
import mmcv
from mmcv.fileio import FileClient
from mmcv.fileio import load as load_file
from mmcv.parallel import is_module_wrapper
from mmcv.utils import mkdir_or_exist
from mmcv.runner import get_dist_info
from scipy import interpolate
import numpy as np
import math
ENV_MMCV_HOME = 'MMCV_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
def _get_mmcv_home():
mmcv_home = os.path.expanduser(
os.getenv(
ENV_MMCV_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv')))
mkdir_or_exist(mmcv_home)
return mmcv_home
def load_state_dict(module, state_dict, strict=False, logger=None):
"""Load state_dict to a module.
This method is modified from :meth:`torch.nn.Module.load_state_dict`.
Default value for ``strict`` is set to ``False`` and the message for
param mismatch will be shown even if strict is False.
Args:
module (Module): Module that receives the state_dict.
state_dict (OrderedDict): Weights.
strict (bool): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
logger (:obj:`logging.Logger`, optional): Logger to log the error
message. If not specified, print function will be used.
"""
unexpected_keys = []
all_missing_keys = []
err_msg = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# use _load_from_state_dict to enable checkpoint version control
def load(module, prefix=''):
# recursively check parallel module in case that the model has a
# complicated structure, e.g., nn.Module(nn.Module(DDP))
if is_module_wrapper(module):
module = module.module
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(state_dict, prefix, local_metadata, True,
all_missing_keys, unexpected_keys,
err_msg)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(module)
load = None # break load->load reference cycle
# ignore "num_batches_tracked" of BN layers
missing_keys = [
key for key in all_missing_keys if 'num_batches_tracked' not in key
]
if unexpected_keys:
err_msg.append('unexpected key in source '
f'state_dict: {", ".join(unexpected_keys)}\n')
if missing_keys:
err_msg.append(
f'missing keys in source state_dict: {", ".join(missing_keys)}\n')
rank, _ = get_dist_info()
if len(err_msg) > 0 and rank == 0:
err_msg.insert(
0, 'The model and loaded state dict do not match exactly\n')
err_msg = '\n'.join(err_msg)
if strict:
raise RuntimeError(err_msg)
elif logger is not None:
logger.warning(err_msg)
else:
print(err_msg)
def load_url_dist(url, model_dir=None, map_location="cpu"):
"""In distributed setting, this function only download checkpoint at local
rank 0."""
rank, world_size = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
if rank == 0:
checkpoint = model_zoo.load_url(url, model_dir=model_dir, map_location=map_location)
if world_size > 1:
torch.distributed.barrier()
if rank > 0:
checkpoint = model_zoo.load_url(url, model_dir=model_dir, map_location=map_location)
return checkpoint
def load_pavimodel_dist(model_path, map_location=None):
"""In distributed setting, this function only download checkpoint at local
rank 0."""
try:
from pavi import modelcloud
except ImportError as e:
raise ImportError(
'Please install pavi to load checkpoint from modelcloud.') from e
rank, world_size = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
if rank == 0:
model = modelcloud.get(model_path)
with TemporaryDirectory() as tmp_dir:
downloaded_file = osp.join(tmp_dir, model.name)
model.download(downloaded_file)
checkpoint = torch.load(downloaded_file, map_location=map_location)
if world_size > 1:
torch.distributed.barrier()
if rank > 0:
model = modelcloud.get(model_path)
with TemporaryDirectory() as tmp_dir:
downloaded_file = osp.join(tmp_dir, model.name)
model.download(downloaded_file)
checkpoint = torch.load(
downloaded_file, map_location=map_location)
return checkpoint
def load_fileclient_dist(filename, backend, map_location):
"""In distributed setting, this function only download checkpoint at local
rank 0."""
rank, world_size = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
allowed_backends = ['ceph']
if backend not in allowed_backends:
raise ValueError(f'Load from Backend {backend} is not supported.')
if rank == 0:
fileclient = FileClient(backend=backend)
buffer = io.BytesIO(fileclient.get(filename))
checkpoint = torch.load(buffer, map_location=map_location)
if world_size > 1:
torch.distributed.barrier()
if rank > 0:
fileclient = FileClient(backend=backend)
buffer = io.BytesIO(fileclient.get(filename))
checkpoint = torch.load(buffer, map_location=map_location)
return checkpoint
def get_torchvision_models():
model_urls = dict()
for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__):
if ispkg:
continue
_zoo = import_module(f'torchvision.models.{name}')
if hasattr(_zoo, 'model_urls'):
_urls = getattr(_zoo, 'model_urls')
model_urls.update(_urls)
return model_urls
def get_external_models():
mmcv_home = _get_mmcv_home()
default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')
default_urls = load_file(default_json_path)
assert isinstance(default_urls, dict)
external_json_path = osp.join(mmcv_home, 'open_mmlab.json')
if osp.exists(external_json_path):
external_urls = load_file(external_json_path)
assert isinstance(external_urls, dict)
default_urls.update(external_urls)
return default_urls
def get_mmcls_models():
mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json')
mmcls_urls = load_file(mmcls_json_path)
return mmcls_urls
def get_deprecated_model_names():
deprecate_json_path = osp.join(mmcv.__path__[0],
'model_zoo/deprecated.json')
deprecate_urls = load_file(deprecate_json_path)
assert isinstance(deprecate_urls, dict)
return deprecate_urls
def _process_mmcls_checkpoint(checkpoint):
state_dict = checkpoint['state_dict']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if k.startswith('backbone.'):
new_state_dict[k[9:]] = v
new_checkpoint = dict(state_dict=new_state_dict)
return new_checkpoint
def _load_checkpoint(filename, map_location=None):
"""Load checkpoint from somewhere (modelzoo, file, url).
Args:
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str | None): Same as :func:`torch.load`. Default: None.
Returns:
dict | OrderedDict: The loaded checkpoint. It can be either an
OrderedDict storing model weights or a dict containing other
information, which depends on the checkpoint.
"""
if filename.startswith('modelzoo://'):
warnings.warn('The URL scheme of "modelzoo://" is deprecated, please '
'use "torchvision://" instead')
model_urls = get_torchvision_models()
model_name = filename[11:]
checkpoint = load_url_dist(model_urls[model_name])
elif filename.startswith('torchvision://'):
model_urls = get_torchvision_models()
model_name = filename[14:]
checkpoint = load_url_dist(model_urls[model_name])
elif filename.startswith('open-mmlab://'):
model_urls = get_external_models()
model_name = filename[13:]
deprecated_urls = get_deprecated_model_names()
if model_name in deprecated_urls:
warnings.warn(f'open-mmlab://{model_name} is deprecated in favor '
f'of open-mmlab://{deprecated_urls[model_name]}')
model_name = deprecated_urls[model_name]
model_url = model_urls[model_name]
# check if is url
if model_url.startswith(('http://', 'https://')):
checkpoint = load_url_dist(model_url)
else:
filename = osp.join(_get_mmcv_home(), model_url)
if not osp.isfile(filename):
raise IOError(f'{filename} is not a checkpoint file')
checkpoint = torch.load(filename, map_location=map_location)
elif filename.startswith('mmcls://'):
model_urls = get_mmcls_models()
model_name = filename[8:]
checkpoint = load_url_dist(model_urls[model_name])
checkpoint = _process_mmcls_checkpoint(checkpoint)
elif filename.startswith(('http://', 'https://')):
checkpoint = load_url_dist(filename)
elif filename.startswith('pavi://'):
model_path = filename[7:]
checkpoint = load_pavimodel_dist(model_path, map_location=map_location)
elif filename.startswith('s3://'):
checkpoint = load_fileclient_dist(
filename, backend='ceph', map_location=map_location)
else:
if not osp.isfile(filename):
raise IOError(f'{filename} is not a checkpoint file')
checkpoint = torch.load(filename, map_location=map_location)
return checkpoint
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0,
start_warmup_value=0, warmup_steps=-1):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_steps > 0:
warmup_iters = warmup_steps
print("Set warmup steps = %d" % warmup_iters)
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = np.array(
[final_value + 0.5 * (base_value - final_value) * (1 + math.cos(math.pi * i / (len(iters)))) for i in iters])
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
def load_checkpoint(model,
filename,
map_location='cpu',
strict=False,
logger=None):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
logger (:mod:`logging.Logger` or None): The logger for error message.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint = _load_checkpoint(filename, map_location)
# OrderedDict is a subclass of dict
if not isinstance(checkpoint, dict):
raise RuntimeError(
f'No state_dict found in checkpoint file {filename}')
# get state_dict from checkpoint
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
elif 'model' in checkpoint:
state_dict = checkpoint['model']
elif 'module' in checkpoint:
state_dict = checkpoint['module']
else:
state_dict = checkpoint
# strip prefix of state_dict
if list(state_dict.keys())[0].startswith('module.'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
# for MoBY, load model of online branch
if sorted(list(state_dict.keys()))[0].startswith('encoder'):
state_dict = {k.replace('encoder.', ''): v for k, v in state_dict.items() if k.startswith('encoder.')}
# reshape absolute position embedding for Swin
if state_dict.get('absolute_pos_embed') is not None:
absolute_pos_embed = state_dict['absolute_pos_embed']
N1, L, C1 = absolute_pos_embed.size()
N2, C2, H, W = model.absolute_pos_embed.size()
if N1 != N2 or C1 != C2 or L != H*W:
logger.warning("Error in loading absolute_pos_embed, pass")
else:
state_dict['absolute_pos_embed'] = absolute_pos_embed.view(N2, H, W, C2).permute(0, 3, 1, 2)
rank, _ = get_dist_info()
if "rel_pos_bias.relative_position_bias_table" in state_dict:
if rank == 0:
print("Expand the shared relative position embedding to each layers. ")
num_layers = model.get_num_layers()
rel_pos_bias = state_dict["rel_pos_bias.relative_position_bias_table"]
for i in range(num_layers):
state_dict["blocks.%d.attn.relative_position_bias_table" % i] = rel_pos_bias.clone()
state_dict.pop("rel_pos_bias.relative_position_bias_table")
all_keys = list(state_dict.keys())
for key in all_keys:
if "relative_position_index" in key:
state_dict.pop(key)
if "relative_position_bias_table" in key:
rel_pos_bias = state_dict[key]
src_num_pos, num_attn_heads = rel_pos_bias.size()
dst_num_pos, _ = model.state_dict()[key].size()
dst_patch_shape = model.patch_embed.patch_shape
if dst_patch_shape[0] != dst_patch_shape[1]:
raise NotImplementedError()
num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1)
src_size = int((src_num_pos - num_extra_tokens) ** 0.5)
dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)
if src_size != dst_size:
if rank == 0:
print("Position interpolate for %s from %dx%d to %dx%d" % (
key, src_size, src_size, dst_size, dst_size))
extra_tokens = rel_pos_bias[-num_extra_tokens:, :]
rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]
def geometric_progression(a, r, n):
return a * (1.0 - r ** n) / (1.0 - r)
left, right = 1.01, 1.5
while right - left > 1e-6:
q = (left + right) / 2.0
gp = geometric_progression(1, q, src_size // 2)
if gp > dst_size // 2:
right = q
else:
left = q
# if q > 1.13492:
# q = 1.13492
dis = []
cur = 1
for i in range(src_size // 2):
dis.append(cur)
cur += q ** (i + 1)
r_ids = [-_ for _ in reversed(dis)]
x = r_ids + [0] + dis
y = r_ids + [0] + dis
t = dst_size // 2.0
dx = np.arange(-t, t + 0.1, 1.0)
dy = np.arange(-t, t + 0.1, 1.0)
if rank == 0:
print("x = {}".format(x))
print("dx = {}".format(dx))
all_rel_pos_bias = []
for i in range(num_attn_heads):
z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()
f = interpolate.interp2d(x, y, z, kind='cubic')
all_rel_pos_bias.append(
torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device))
rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)
new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)
state_dict[key] = new_rel_pos_bias
if 'pos_embed' in state_dict:
pos_embed_checkpoint = state_dict['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
if rank == 0:
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
state_dict['pos_embed'] = new_pos_embed
# interpolate position bias table if needed
relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k]
for table_key in relative_position_bias_table_keys:
table_pretrained = state_dict[table_key]
table_current = model.state_dict()[table_key]
L1, nH1 = table_pretrained.size()
L2, nH2 = table_current.size()
if nH1 != nH2:
logger.warning(f"Error in loading {table_key}, pass")
else:
if L1 != L2:
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
table_pretrained_resized = F.interpolate(
table_pretrained.permute(1, 0).view(1, nH1, S1, S1),
size=(S2, S2), mode='bicubic')
state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0)
# load state_dict
load_state_dict(model, state_dict, strict, logger)
return checkpoint
def weights_to_cpu(state_dict):
"""Copy a model state_dict to cpu.
Args:
state_dict (OrderedDict): Model weights on GPU.
Returns:
OrderedDict: Model weights on GPU.
"""
state_dict_cpu = OrderedDict()
for key, val in state_dict.items():
state_dict_cpu[key] = val.cpu()
return state_dict_cpu
def _save_to_state_dict(module, destination, prefix, keep_vars):
"""Saves module state to `destination` dictionary.
This method is modified from :meth:`torch.nn.Module._save_to_state_dict`.
Args:
module (nn.Module): The module to generate state_dict.
destination (dict): A dict where state will be stored.
prefix (str): The prefix for parameters and buffers used in this
module.
"""
for name, param in module._parameters.items():
if param is not None:
destination[prefix + name] = param if keep_vars else param.detach()
for name, buf in module._buffers.items():
# remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d
if buf is not None:
destination[prefix + name] = buf if keep_vars else buf.detach()
def get_state_dict(module, destination=None, prefix='', keep_vars=False):
"""Returns a dictionary containing a whole state of the module.
Both parameters and persistent buffers (e.g. running averages) are
included. Keys are corresponding parameter and buffer names.
This method is modified from :meth:`torch.nn.Module.state_dict` to
recursively check parallel module in case that the model has a complicated
structure, e.g., nn.Module(nn.Module(DDP)).
Args:
module (nn.Module): The module to generate state_dict.
destination (OrderedDict): Returned dict for the state of the
module.
prefix (str): Prefix of the key.
keep_vars (bool): Whether to keep the variable property of the
parameters. Default: False.
Returns:
dict: A dictionary containing a whole state of the module.
"""
# recursively check parallel module in case that the model has a
# complicated structure, e.g., nn.Module(nn.Module(DDP))
if is_module_wrapper(module):
module = module.module
# below is the same as torch.nn.Module.state_dict()
if destination is None:
destination = OrderedDict()
destination._metadata = OrderedDict()
destination._metadata[prefix[:-1]] = local_metadata = dict(
version=module._version)
_save_to_state_dict(module, destination, prefix, keep_vars)
for name, child in module._modules.items():
if child is not None:
get_state_dict(
child, destination, prefix + name + '.', keep_vars=keep_vars)
for hook in module._state_dict_hooks.values():
hook_result = hook(module, destination, prefix, local_metadata)
if hook_result is not None:
destination = hook_result
return destination
def save_checkpoint(model, filename, optimizer=None, meta=None):
"""Save checkpoint to file.
The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
``optimizer``. By default ``meta`` will contain version and time info.
Args:
model (Module): Module whose params are to be saved.
filename (str): Checkpoint filename.
optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
meta (dict, optional): Metadata to be saved in checkpoint.
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
if is_module_wrapper(model):
model = model.module
if hasattr(model, 'CLASSES') and model.CLASSES is not None:
# save class name to the meta
meta.update(CLASSES=model.CLASSES)
checkpoint = {
'meta': meta,
'state_dict': weights_to_cpu(get_state_dict(model))
}
# save optimizer state dict in the checkpoint
if isinstance(optimizer, Optimizer):
checkpoint['optimizer'] = optimizer.state_dict()
elif isinstance(optimizer, dict):
checkpoint['optimizer'] = {}
for name, optim in optimizer.items():
checkpoint['optimizer'][name] = optim.state_dict()
if filename.startswith('pavi://'):
try:
from pavi import modelcloud
from pavi.exception import NodeNotFoundError
except ImportError as e:
raise ImportError(
'Please install pavi to load checkpoint from modelcloud.') from e
model_path = filename[7:]
root = modelcloud.Folder()
model_dir, model_name = osp.split(model_path)
try:
model = modelcloud.get(model_dir)
except NodeNotFoundError:
model = root.create_training_model(model_dir)
with TemporaryDirectory() as tmp_dir:
checkpoint_file = osp.join(tmp_dir, model_name)
with open(checkpoint_file, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
model.create_file(checkpoint_file, name=model_name)
else:
mmcv.mkdir_or_exist(osp.dirname(filename))
# immediately flush buffer
with open(filename, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/mmcv_custom/checkpoint.py |
# -*- coding: utf-8 -*-
from .checkpoint import load_checkpoint
from .layer_decay_optimizer_constructor import LayerDecayOptimizerConstructor
from .resize_transform import SETR_Resize
from .apex_runner.optimizer import DistOptimizerHook
from .train_api import train_segmentor
__all__ = ['load_checkpoint', 'LayerDecayOptimizerConstructor', 'SETR_Resize', 'DistOptimizerHook', 'train_segmentor']
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/mmcv_custom/__init__.py |
# Copyright (c) Open-MMLab. All rights reserved.
import os.path as osp
import platform
import shutil
import torch
from torch.optim import Optimizer
import mmcv
from mmcv.runner import RUNNERS, IterBasedRunner
from .checkpoint import save_checkpoint
try:
import apex
except:
print('apex is not installed')
@RUNNERS.register_module()
class IterBasedRunnerAmp(IterBasedRunner):
"""Iteration-based Runner with AMP support.
This runner train models iteration by iteration.
"""
def save_checkpoint(self,
out_dir,
filename_tmpl='iter_{}.pth',
meta=None,
save_optimizer=True,
create_symlink=False):
"""Save checkpoint to file.
Args:
out_dir (str): Directory to save checkpoint files.
filename_tmpl (str, optional): Checkpoint file template.
Defaults to 'iter_{}.pth'.
meta (dict, optional): Metadata to be saved in checkpoint.
Defaults to None.
save_optimizer (bool, optional): Whether save optimizer.
Defaults to True.
create_symlink (bool, optional): Whether create symlink to the
latest checkpoint file. Defaults to True.
"""
if meta is None:
meta = dict(iter=self.iter + 1, epoch=self.epoch + 1)
elif isinstance(meta, dict):
meta.update(iter=self.iter + 1, epoch=self.epoch + 1)
else:
raise TypeError(
f'meta should be a dict or None, but got {type(meta)}')
if self.meta is not None:
meta.update(self.meta)
filename = filename_tmpl.format(self.iter + 1)
filepath = osp.join(out_dir, filename)
optimizer = self.optimizer if save_optimizer else None
save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
# in some environments, `os.symlink` is not supported, you may need to
# set `create_symlink` to False
# if create_symlink:
# dst_file = osp.join(out_dir, 'latest.pth')
# if platform.system() != 'Windows':
# mmcv.symlink(filename, dst_file)
# else:
# shutil.copy(filepath, dst_file)
def resume(self,
checkpoint,
resume_optimizer=True,
map_location='default'):
if map_location == 'default':
if torch.cuda.is_available():
device_id = torch.cuda.current_device()
checkpoint = self.load_checkpoint(
checkpoint,
map_location=lambda storage, loc: storage.cuda(device_id))
else:
checkpoint = self.load_checkpoint(checkpoint)
else:
checkpoint = self.load_checkpoint(
checkpoint, map_location=map_location)
self._epoch = checkpoint['meta']['epoch']
self._iter = checkpoint['meta']['iter']
self._inner_iter = checkpoint['meta']['iter']
if 'optimizer' in checkpoint and resume_optimizer:
if isinstance(self.optimizer, Optimizer):
self.optimizer.load_state_dict(checkpoint['optimizer'])
elif isinstance(self.optimizer, dict):
for k in self.optimizer.keys():
self.optimizer[k].load_state_dict(
checkpoint['optimizer'][k])
else:
raise TypeError(
'Optimizer should be dict or torch.optim.Optimizer '
f'but got {type(self.optimizer)}')
if 'amp' in checkpoint:
apex.amp.load_state_dict(checkpoint['amp'])
self.logger.info('load amp state dict')
self.logger.info(f'resumed from epoch: {self.epoch}, iter {self.iter}')
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/mmcv_custom/apex_runner/apex_iter_based_runner.py |
# Copyright (c) Open-MMLab. All rights reserved.
import os.path as osp
import time
from tempfile import TemporaryDirectory
import torch
from torch.optim import Optimizer
import mmcv
from mmcv.parallel import is_module_wrapper
from mmcv.runner.checkpoint import weights_to_cpu, get_state_dict
try:
import apex
except:
print('apex is not installed')
def save_checkpoint(model, filename, optimizer=None, meta=None):
"""Save checkpoint to file.
The checkpoint will have 4 fields: ``meta``, ``state_dict`` and
``optimizer``, ``amp``. By default ``meta`` will contain version
and time info.
Args:
model (Module): Module whose params are to be saved.
filename (str): Checkpoint filename.
optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
meta (dict, optional): Metadata to be saved in checkpoint.
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
if is_module_wrapper(model):
model = model.module
if hasattr(model, 'CLASSES') and model.CLASSES is not None:
# save class name to the meta
meta.update(CLASSES=model.CLASSES)
checkpoint = {
'meta': meta,
'state_dict': weights_to_cpu(get_state_dict(model))
}
# save optimizer state dict in the checkpoint
if isinstance(optimizer, Optimizer):
checkpoint['optimizer'] = optimizer.state_dict()
elif isinstance(optimizer, dict):
checkpoint['optimizer'] = {}
for name, optim in optimizer.items():
checkpoint['optimizer'][name] = optim.state_dict()
# save amp state dict in the checkpoint
checkpoint['amp'] = apex.amp.state_dict()
if filename.startswith('pavi://'):
try:
from pavi import modelcloud
from pavi.exception import NodeNotFoundError
except ImportError:
raise ImportError(
'Please install pavi to load checkpoint from modelcloud.')
model_path = filename[7:]
root = modelcloud.Folder()
model_dir, model_name = osp.split(model_path)
try:
model = modelcloud.get(model_dir)
except NodeNotFoundError:
model = root.create_training_model(model_dir)
with TemporaryDirectory() as tmp_dir:
checkpoint_file = osp.join(tmp_dir, model_name)
with open(checkpoint_file, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
model.create_file(checkpoint_file, name=model_name)
else:
mmcv.mkdir_or_exist(osp.dirname(filename))
# immediately flush buffer
with open(filename, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/mmcv_custom/apex_runner/checkpoint.py |
# Copyright (c) Open-MMLab. All rights reserved.
from .checkpoint import save_checkpoint
from .apex_iter_based_runner import IterBasedRunnerAmp
__all__ = [
'save_checkpoint', 'IterBasedRunnerAmp',
]
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/mmcv_custom/apex_runner/__init__.py |
from mmcv.runner import OptimizerHook, HOOKS
try:
import apex
except:
print('apex is not installed')
@HOOKS.register_module()
class DistOptimizerHook(OptimizerHook):
"""Optimizer hook for distributed training."""
def __init__(self, update_interval=1, grad_clip=None, coalesce=True, bucket_size_mb=-1, use_fp16=False):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
self.update_interval = update_interval
self.use_fp16 = use_fp16
def before_run(self, runner):
runner.optimizer.zero_grad()
def after_train_iter(self, runner):
runner.outputs['loss'] /= self.update_interval
if self.use_fp16:
with apex.amp.scale_loss(runner.outputs['loss'], runner.optimizer) as scaled_loss:
scaled_loss.backward()
else:
runner.outputs['loss'].backward()
if self.every_n_iters(runner, self.update_interval):
if self.grad_clip is not None:
self.clip_grads(runner.model.parameters())
runner.optimizer.step()
runner.optimizer.zero_grad()
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/mmcv_custom/apex_runner/optimizer.py |
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
cudnn_benchmark = True
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/configs/_base_/default_runtime.py |
# dataset settings
dataset_type = 'ADE20KDataset'
data_root = 'data/ade/ADEChallengeData2016'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (640, 640)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(type='Resize', img_scale=(2560, 640), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2560, 640),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/training',
ann_dir='annotations/training',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline))
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/configs/_base_/datasets/ade20k_640x640.py |
# dataset settings
dataset_type = 'ADE20KDataset'
data_root = 'data/ade/ADEChallengeData2016'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 512),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/training',
ann_dir='annotations/training',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline))
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/configs/_base_/datasets/ade20k.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='XCiT',
patch_size=16,
embed_dim=384,
depth=12,
num_heads=8,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=True,
use_rel_pos_bias=False,
),
decode_head=dict(
type='UPerHead',
in_channels=[384, 384, 384, 384],
in_index=[0, 1, 2, 3],
pool_scales=(1, 2, 3, 6),
channels=512,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=384,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/configs/_base_/models/upernet_beit.py |
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=160000)
checkpoint_config = dict(by_epoch=False, interval=16000)
evaluation = dict(interval=16000, metric='mIoU')
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/configs/_base_/schedules/schedule_160k.py |
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=320000)
checkpoint_config = dict(by_epoch=False, interval=32000)
evaluation = dict(interval=32000, metric='mIoU')
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/configs/_base_/schedules/schedule_320k.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
# recommand use this config for BEiT models which are self-supervised pretrained and then intermediate fine-tuned on imagenet
_base_ = [
'../../_base_/models/upernet_beit.py', '../../_base_/datasets/ade20k.py',
'../../_base_/default_runtime.py', '../../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
model = dict(
backbone=dict(
type='BEiT',
img_size=512,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=1e-6,
drop_path_rate=0.2,
out_indices=[7, 11, 15, 23],
),
decode_head=dict(
in_channels=[1024, 1024, 1024, 1024],
num_classes=150,
channels=1024,
),
auxiliary_head=dict(
in_channels=1024,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
# optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
# paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
# 'relative_position_bias_table': dict(decay_mult=0.),
# 'norm': dict(decay_mult=0.)}))
optimizer = dict(_delete_=True, type='AdamW', lr=2e-5, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.95))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/configs/beit/upernet/upernet_beit_large_24_512_slide_160k_ade20k_pt2ft.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
# recommand use this config for BEiT models which are self-supervised pretrained and then intermediate fine-tuned on imagenet
_base_ = [
'../../_base_/models/upernet_beit.py', '../../_base_/datasets/ade20k.py',
'../../_base_/default_runtime.py', '../../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
model = dict(
backbone=dict(
type='BEiT',
img_size=512,
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=0.1,
drop_path_rate=0.1,
out_indices=[3, 5, 7, 11]
),
decode_head=dict(
in_channels=[768, 768, 768, 768],
num_classes=150,
channels=768,
),
auxiliary_head=dict(
in_channels=768,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
# optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
# paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
# 'relative_position_bias_table': dict(decay_mult=0.),
# 'norm': dict(decay_mult=0.)}))
optimizer = dict(_delete_=True, type='AdamW', lr=3e-5, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.9))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/configs/beit/upernet/upernet_beit_base_12_512_slide_160k_ade20k_pt2ft.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
# recommand use this config for BEiT models which are self-supervised pretrained and then intermediate fine-tuned on imagenet
_base_ = [
'../../_base_/models/upernet_beit.py', '../../_base_/datasets/ade20k_640x640.py',
'../../_base_/default_runtime.py', '../../_base_/schedules/schedule_160k.py'
]
crop_size = (640, 640)
model = dict(
backbone=dict(
type='BEiT',
img_size=640,
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=0.1,
drop_path_rate=0.1,
out_indices=[3, 5, 7, 11]
),
decode_head=dict(
in_channels=[768, 768, 768, 768],
num_classes=150,
channels=768,
),
auxiliary_head=dict(
in_channels=768,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(426, 426))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
# optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
# paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
# 'relative_position_bias_table': dict(decay_mult=0.),
# 'norm': dict(decay_mult=0.)}))
optimizer = dict(_delete_=True, type='AdamW', lr=3e-5, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.9))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/configs/beit/upernet/upernet_beit_base_12_640_slide_160k_ade20k_pt2ft.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
# recommand use this config for BEiT models which are self-supervised pretrained on imagenet
_base_ = [
'../../_base_/models/upernet_beit.py', '../../_base_/datasets/ade20k.py',
'../../_base_/default_runtime.py', '../../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
model = dict(
backbone=dict(
type='BEiT',
img_size=512,
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=0.1,
drop_path_rate=0.1,
out_indices=[3, 5, 7, 11]
),
decode_head=dict(
in_channels=[768, 768, 768, 768],
num_classes=150,
channels=768,
),
auxiliary_head=dict(
in_channels=768,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
# optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
# paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
# 'relative_position_bias_table': dict(decay_mult=0.),
# 'norm': dict(decay_mult=0.)}))
optimizer = dict(_delete_=True, type='AdamW', lr=7e-4, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.65))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/configs/beit/upernet/upernet_beit_base_12_512_slide_160k_ade20k_pt.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
# recommand use this config for BEiT models which are self-supervised pretrained and then intermediate fine-tuned on imagenet
_base_ = [
'../../_base_/models/upernet_beit.py', '../../_base_/datasets/ade20k_640x640.py',
'../../_base_/default_runtime.py', '../../_base_/schedules/schedule_320k.py'
]
# We set samples_per_gpu to 1 and optimizer_config.update_interval to 2, the total update step keep 160k.
crop_size = (640, 640)
model = dict(
backbone=dict(
type='BEiT',
img_size=640,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=1e-6,
drop_path_rate=0.2,
out_indices=[7, 11, 15, 23],
),
decode_head=dict(
in_channels=[1024, 1024, 1024, 1024],
num_classes=150,
channels=1024,
),
auxiliary_head=dict(
in_channels=1024,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(426, 426))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
# optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
# paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
# 'relative_position_bias_table': dict(decay_mult=0.),
# 'norm': dict(decay_mult=0.)}))
optimizer = dict(_delete_=True, type='AdamW', lr=2e-5, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.95))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=3000,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=1)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
# We set samples_per_gpu to 1 and optimizer_config.update_interval to 2, the total update step keep 160k.
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=2,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/configs/beit/upernet/upernet_beit_large_24_640_slide_160k_ade20k_pt2ft.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
_base_ = [
'../../_base_/models/upernet_beit.py', '../../_base_/datasets/ade20k.py',
'../../_base_/default_runtime.py', '../../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
model = dict(
backbone=dict(
type='BEiT',
img_size=512,
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=0.1,
drop_path_rate=0.1,
out_indices=[3, 5, 7, 11]
),
decode_head=dict(
in_channels=[768, 768, 768, 768],
num_classes=150,
channels=768,
),
auxiliary_head=dict(
in_channels=768,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
# optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
# paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
# 'relative_position_bias_table': dict(decay_mult=0.),
# 'norm': dict(decay_mult=0.)}))
optimizer = dict(_delete_=True, type='AdamW', lr=3e-5, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.9))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data = dict(samples_per_gpu=2)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
# test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341))
find_unused_parameters = True
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 512),
img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=True,
transforms=[
dict(type='SETR_Resize', keep_ratio=True,
crop_size=crop_size, setr_multi_scale=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/configs/beit/upernet/upernet_beit_base_12_512_slide_160k_ade20k_ms.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
_base_ = [
'../../_base_/models/upernet_beit.py', '../../_base_/datasets/ade20k.py',
'../../_base_/default_runtime.py', '../../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
model = dict(
backbone=dict(
type='BEiT',
img_size=512,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=1e-6,
drop_path_rate=0.2,
out_indices=[7, 11, 15, 23],
),
decode_head=dict(
in_channels=[1024, 1024, 1024, 1024],
num_classes=150,
channels=1024,
),
auxiliary_head=dict(
in_channels=1024,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
# optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
# paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
# 'relative_position_bias_table': dict(decay_mult=0.),
# 'norm': dict(decay_mult=0.)}))
optimizer = dict(_delete_=True, type='AdamW', lr=2e-5, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.95))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data = dict(samples_per_gpu=2)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
# test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341))
find_unused_parameters = True
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 512),
img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=True,
transforms=[
dict(type='SETR_Resize', keep_ratio=True,
crop_size=crop_size, setr_multi_scale=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/configs/beit/upernet/upernet_beit_large_24_512_slide_160k_ade20k_ms.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
_base_ = [
'../../_base_/models/upernet_beit.py', '../../_base_/datasets/ade20k_640x640.py',
'../../_base_/default_runtime.py', '../../_base_/schedules/schedule_160k.py'
]
crop_size = (640, 640)
model = dict(
backbone=dict(
type='BEiT',
img_size=640,
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=0.1,
drop_path_rate=0.1,
out_indices=[3, 5, 7, 11]
),
decode_head=dict(
in_channels=[768, 768, 768, 768],
num_classes=150,
channels=768,
),
auxiliary_head=dict(
in_channels=768,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(426, 426))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
# optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
# paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
# 'relative_position_bias_table': dict(decay_mult=0.),
# 'norm': dict(decay_mult=0.)}))
optimizer = dict(_delete_=True, type='AdamW', lr=3e-5, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.9))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (640, 640)
# test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341))
find_unused_parameters = True
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2560, 640),
img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=True,
transforms=[
dict(type='SETR_Resize', keep_ratio=True,
crop_size=crop_size, setr_multi_scale=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline),
samples_per_gpu=2,
)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/configs/beit/upernet/upernet_beit_base_12_640_slide_160k_ade20k_ms.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
_base_ = [
'../../_base_/models/upernet_beit.py', '../../_base_/datasets/ade20k_640x640.py',
'../../_base_/default_runtime.py', '../../_base_/schedules/schedule_320k.py'
]
crop_size = (640, 640)
model = dict(
backbone=dict(
type='BEiT',
img_size=640,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=1e-6,
drop_path_rate=0.2,
out_indices=[7, 11, 15, 23],
),
decode_head=dict(
in_channels=[1024, 1024, 1024, 1024],
num_classes=150,
channels=1024,
),
auxiliary_head=dict(
in_channels=1024,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(426, 426))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
# optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
# paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
# 'relative_position_bias_table': dict(decay_mult=0.),
# 'norm': dict(decay_mult=0.)}))
optimizer = dict(_delete_=True, type='AdamW', lr=2e-5, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.95))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=3000,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=1)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (640, 640)
# test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341))
find_unused_parameters = True
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2560, 640),
img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=True,
transforms=[
dict(type='SETR_Resize', keep_ratio=True,
crop_size=crop_size, setr_multi_scale=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=2,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/configs/beit/upernet/upernet_beit_large_24_640_slide_160k_ade20k_ms.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
import math
import torch
from functools import partial
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
from mmcv_custom import load_checkpoint
from mmseg.utils import get_root_logger
from mmseg.models.builder import BACKBONES
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
# x = self.drop(x)
# commit this for the orignal BERT implement
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
proj_drop=0., window_size=None, attn_head_dim=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.v_bias = None
if window_size:
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
# trunc_normal_(self.relative_position_bias_table, std=.0)
else:
self.window_size = None
self.relative_position_bias_table = None
self.relative_position_index = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, rel_pos_bias=None):
B, N, C = x.shape
qkv_bias = None
if self.q_bias is not None:
qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
# qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
if self.relative_position_bias_table is not None:
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if rel_pos_bias is not None:
attn = attn + rel_pos_bias
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm,
window_size=None, attn_head_dim=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if init_values is not None:
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
else:
self.gamma_1, self.gamma_2 = None, None
def forward(self, x, rel_pos_bias=None):
if self.gamma_1 is None:
x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias))
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x, **kwargs):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
# assert H == self.img_size[0] and W == self.img_size[1], \
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
Hp, Wp = x.shape[2], x.shape[3]
x = x.flatten(2).transpose(1, 2)
return x, (Hp, Wp)
class HybridEmbed(nn.Module):
""" CNN Feature Map Embedding
Extract feature map from CNN, flatten, project to embedding dim.
"""
def __init__(self, backbone, img_size=224, feature_size=None, in_chans=3, embed_dim=768):
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
self.img_size = img_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
# FIXME this is hacky, but most reliable way of determining the exact dim of the output feature
# map for all networks, the feature metadata has reliable channel and stride info, but using
# stride to calc feature dim requires info about padding of each stage that isn't captured.
training = backbone.training
if training:
backbone.eval()
o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1]))[-1]
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
feature_dim = self.backbone.feature_info.channels()[-1]
self.num_patches = feature_size[0] * feature_size[1]
self.proj = nn.Linear(feature_dim, embed_dim)
def forward(self, x):
x = self.backbone(x)[-1]
x = x.flatten(2).transpose(1, 2)
x = self.proj(x)
return x
class RelativePositionBias(nn.Module):
def __init__(self, window_size, num_heads):
super().__init__()
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
# trunc_normal_(self.relative_position_bias_table, std=.02)
def forward(self):
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
@BACKBONES.register_module()
class BEiT(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=80, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., hybrid_backbone=None, norm_layer=None, init_values=None, use_checkpoint=False,
use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False,
out_indices=[3, 5, 7, 11]):
super().__init__()
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
if hybrid_backbone is not None:
self.patch_embed = HybridEmbed(
hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim)
else:
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.out_indices = out_indices
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
# self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_abs_pos_emb:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
else:
self.rel_pos_bias = None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.use_rel_pos_bias = use_rel_pos_bias
self.use_checkpoint = use_checkpoint
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None)
for i in range(depth)])
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
# trunc_normal_(self.mask_token, std=.02)
self.out_indices = out_indices
if patch_size == 16:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
nn.SyncBatchNorm(embed_dim),
nn.GELU(),
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn3 = nn.Identity()
self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
elif patch_size == 8:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Identity()
self.fpn3 = nn.Sequential(
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fpn4 = nn.Sequential(
nn.MaxPool2d(kernel_size=4, stride=4),
)
self.apply(self._init_weights)
self.fix_init_weight()
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if isinstance(pretrained, str):
self.apply(_init_weights)
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
self.apply(_init_weights)
else:
raise TypeError('pretrained must be a str or None')
def get_num_layers(self):
return len(self.blocks)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def forward_features(self, x):
B, C, H, W = x.shape
x, (Hp, Wp) = self.patch_embed(x)
batch_size, seq_len, _ = x.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
features = []
for i, blk in enumerate(self.blocks):
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x, rel_pos_bias)
else:
x = blk(x, rel_pos_bias)
if i in self.out_indices:
xp = x[:, 1:, :].permute(0, 2, 1).reshape(B, -1, Hp, Wp)
features.append(xp.contiguous())
ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
for i in range(len(features)):
features[i] = ops[i](features[i])
return tuple(features)
def forward(self, x):
x = self.forward_features(x)
return x
| EXA-1-master | exa/models/unilm-master/beit/semantic_segmentation/backbone/beit.py |
"""
Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py
To create the package for pypi.
1. Change the version in __init__.py, setup.py as well as docs/source/conf.py.
2. Commit these changes with the message: "Release: VERSION"
3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' "
Push the tag to git: git push --tags origin master
4. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level directory.
(this will build a wheel for the python version you use to build it).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions.
5. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
You may have to specify the repository url, use the following command then:
twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi transformers
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
8. Update the documentation commit in .circleci/deploy.sh for the accurate documentation to be displayed
9. Update README.md to redirect to correct documentation.
"""
import shutil
from pathlib import Path
from setuptools import find_packages, setup
# Remove stale transformers.egg-info directory to avoid https://github.com/pypa/pip/issues/5466
stale_egg_info = Path(__file__).parent / "transformers.egg-info"
if stale_egg_info.exists():
print(
(
"Warning: {} exists.\n\n"
"If you recently updated transformers to 3.0 or later, this is expected,\n"
"but it may prevent transformers from installing in editable mode.\n\n"
"This directory is automatically generated by Python's packaging tools.\n"
"I will remove it now.\n\n"
"See https://github.com/pypa/pip/issues/5466 for details.\n"
).format(stale_egg_info)
)
shutil.rmtree(stale_egg_info)
extras = {}
extras["mecab"] = ["mecab-python3"]
extras["sklearn"] = ["scikit-learn==0.22.1"]
extras["tf"] = ["tensorflow"]
extras["tf-cpu"] = ["tensorflow-cpu"]
extras["torch"] = ["torch"]
extras["serving"] = ["pydantic", "uvicorn", "fastapi", "starlette"]
extras["all"] = extras["serving"] + ["tensorflow", "torch"]
extras["testing"] = ["pytest", "pytest-xdist"]
extras["quality"] = ["black", "isort", "flake8"]
extras["docs"] = ["recommonmark", "sphinx", "sphinx-markdown-tables", "sphinx-rtd-theme"]
extras["dev"] = extras["testing"] + extras["quality"] + ["mecab-python3", "scikit-learn", "tensorflow", "torch"]
setup(
name="transformers",
version="2.5.1",
author="Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Sam Shleifer, Google AI Language Team Authors, Open AI team Authors, Facebook AI Authors, Carnegie Mellon University Authors",
author_email="[email protected]",
description="State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch",
long_description="",
long_description_content_type="text/markdown",
keywords="NLP deep learning transformer pytorch tensorflow BERT GPT GPT-2 google openai CMU",
license="Apache",
url="https://github.com/huggingface/transformers",
package_dir={"": "src"},
packages=find_packages("src"),
install_requires=[
"numpy",
"tokenizers == 0.5.2",
# accessing files from S3 directly
"boto3",
# filesystem locks e.g. to prevent parallel downloads
"filelock",
# for downloading models over HTTPS
"requests",
# progress bars in model download and training scripts
"tqdm >= 4.27",
# for OpenAI GPT
"regex != 2019.12.17",
# for XLNet
"sentencepiece == 0.1.91",
# for XLM
"sacremoses",
# for ndcg
"scikit-learn == 0.22",
# for tensorboard
"tensorboardX",
# for ner
"seqeval == 0.0.12",
# for torch
"torch",
# for preprocessing
"networkx == 1.11",
],
extras_require=extras,
scripts=["transformers-cli"],
python_requires=">=3.5.0",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
| EXA-1-master | exa/models/unilm-master/xtune/setup.py |
# coding=utf-8
# Copyright 2020 Google and DeepMind.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import argparse
from transformers import BertTokenizer, XLMTokenizer, XLMRobertaTokenizer
import os
from collections import defaultdict
import csv
import random
import os
import shutil
import json
TOKENIZERS = {
'bert': BertTokenizer,
'xlm': XLMTokenizer,
'xlmr': XLMRobertaTokenizer,
}
def panx_tokenize_preprocess(args):
def _preprocess_one_file(infile, outfile, idxfile, tokenizer, max_len):
if not os.path.exists(infile):
print(f'{infile} not exists')
return 0
special_tokens_count = 3 if isinstance(tokenizer, XLMRobertaTokenizer) else 2
max_seq_len = max_len - special_tokens_count
subword_len_counter = idx = 0
with open(infile, "rt") as fin, open(outfile, "w") as fout, open(idxfile, "w") as fidx:
for line in fin:
line = line.strip()
if not line:
fout.write('\n')
fidx.write('\n')
idx += 1
subword_len_counter = 0
continue
items = line.split()
token = items[0].strip()
if len(items) == 2:
label = items[1].strip()
else:
label = 'O'
current_subwords_len = len(tokenizer.tokenize(token))
if (current_subwords_len == 0 or current_subwords_len > max_seq_len) and len(token) != 0:
token = tokenizer.unk_token
current_subwords_len = 1
if (subword_len_counter + current_subwords_len) > max_seq_len:
fout.write(f"\n{token}\t{label}\n")
fidx.write(f"\n{idx}\n")
subword_len_counter = current_subwords_len
else:
fout.write(f"{token}\t{label}\n")
fidx.write(f"{idx}\n")
subword_len_counter += current_subwords_len
return 1
model_type = args.model_type
tokenizer = TOKENIZERS[model_type].from_pretrained(args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
for lang in args.languages.split(','):
out_dir = os.path.join(args.output_dir, lang)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if lang == 'en':
files = ['dev', 'test', 'train']
else:
files = ['dev', 'test']
for file in files:
infile = os.path.join(args.data_dir, f'{file}-{lang}.tsv')
outfile = os.path.join(out_dir, "{}.{}".format(file, args.model_name_or_path))
idxfile = os.path.join(out_dir, "{}.{}.idx".format(file, args.model_name_or_path))
if os.path.exists(outfile) and os.path.exists(idxfile):
print(f'{outfile} and {idxfile} exist')
else:
code = _preprocess_one_file(infile, outfile, idxfile, tokenizer, args.max_len)
if code > 0:
print(f'finish preprocessing {outfile}')
def panx_preprocess(args):
def _process_one_file(infile, outfile):
lines = open(infile, 'r').readlines()
if lines[-1].strip() == '':
lines = lines[:-1]
with open(outfile, 'w') as fout:
for l in lines:
items = l.strip().split('\t')
if len(items) == 2:
label = items[1].strip()
idx = items[0].find(':')
if idx != -1:
token = items[0][idx+1:].strip()
# if 'test' in infile:
# fout.write(f'{token}\n')
# else:
# fout.write(f'{token}\t{label}\n')
fout.write(f'{token}\t{label}\n')
else:
fout.write('\n')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
langs = 'ar he vi id jv ms tl eu ml ta te af nl en de el bn hi mr ur fa fr it pt es bg ru ja ka ko th sw yo my zh kk tr et fi hu'.split(' ')
for lg in langs:
for split in ['train', 'test', 'dev']:
infile = os.path.join(args.data_dir, f'{lg}-{split}')
outfile = os.path.join(args.output_dir, f'{split}-{lg}.tsv')
_process_one_file(infile, outfile)
def udpos_tokenize_preprocess(args):
def _preprocess_one_file(infile, outfile, idxfile, tokenizer, max_len):
if not os.path.exists(infile):
print(f'{infile} does not exist')
return
subword_len_counter = idx = 0
special_tokens_count = 3 if isinstance(tokenizer, XLMRobertaTokenizer) else 2
max_seq_len = max_len - special_tokens_count
with open(infile, "rt") as fin, open(outfile, "w") as fout, open(idxfile, "w") as fidx:
for line in fin:
line = line.strip()
if len(line) == 0 or line == '':
fout.write('\n')
fidx.write('\n')
idx += 1
subword_len_counter = 0
continue
items = line.split()
if len(items) == 2:
label = items[1].strip()
else:
label = "X"
token = items[0].strip()
current_subwords_len = len(tokenizer.tokenize(token))
if (current_subwords_len == 0 or current_subwords_len > max_seq_len) and len(token) != 0:
token = tokenizer.unk_token
current_subwords_len = 1
if (subword_len_counter + current_subwords_len) > max_seq_len:
fout.write(f"\n{token}\t{label}\n")
fidx.write(f"\n{idx}\n")
subword_len_counter = current_subwords_len
else:
fout.write(f"{token}\t{label}\n")
fidx.write(f"{idx}\n")
subword_len_counter += current_subwords_len
model_type = args.model_type
tokenizer = TOKENIZERS[model_type].from_pretrained(args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
for lang in args.languages.split(','):
out_dir = os.path.join(args.output_dir, lang)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if lang == 'en':
files = ['dev', 'test', 'train']
else:
files = ['dev', 'test']
for file in files:
infile = os.path.join(args.data_dir, "{}-{}.tsv".format(file, lang))
outfile = os.path.join(out_dir, "{}.{}".format(file, args.model_name_or_path))
idxfile = os.path.join(out_dir, "{}.{}.idx".format(file, args.model_name_or_path))
if os.path.exists(outfile) and os.path.exists(idxfile):
print(f'{outfile} and {idxfile} exist')
else:
_preprocess_one_file(infile, outfile, idxfile, tokenizer, args.max_len)
print(f'finish preprocessing {outfile}')
def udpos_preprocess(args):
def _read_one_file(file):
data = []
sent, tag, lines = [], [], []
for line in open(file, 'r'):
items = line.strip().split('\t')
if len(items) != 10:
empty = all(w == '_' for w in sent)
num_empty = sum([int(w == '_') for w in sent])
if num_empty == 0 or num_empty < len(sent) - 1:
data.append((sent, tag, lines))
sent, tag, lines = [], [], []
else:
sent.append(items[1].strip())
tag.append(items[3].strip())
lines.append(line.strip())
assert len(sent) == int(items[0]), 'line={}, sent={}, tag={}'.format(line, sent, tag)
return data
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def remove_empty_space(data):
new_data = {}
for split in data:
new_data[split] = []
for sent, tag, lines in data[split]:
new_sent = [''.join(w.replace('\u200c', '').split(' ')) for w in sent]
lines = [line.replace('\u200c', '') for line in lines]
assert len(" ".join(new_sent).split(' ')) == len(tag)
new_data[split].append((new_sent, tag, lines))
return new_data
def check_file(file):
for i, l in enumerate(open(file)):
items = l.strip().split('\t')
assert len(items[0].split(' ')) == len(items[1].split(' ')), 'idx={}, line={}'.format(i, l)
def _write_files(data, output_dir, lang, suffix):
for split in data:
if len(data[split]) > 0:
prefix = os.path.join(output_dir, f'{split}-{lang}')
if suffix == 'mt':
with open(prefix + '.mt.tsv', 'w') as fout:
for idx, (sent, tag, _) in enumerate(data[split]):
newline = '\n' if idx != len(data[split]) - 1 else ''
# if split == 'test':
# fout.write('{}{}'.format(' '.join(sent, newline)))
# else:
# fout.write('{}\t{}{}'.format(' '.join(sent), ' '.join(tag), newline))
fout.write('{}\t{}{}'.format(' '.join(sent), ' '.join(tag), newline))
check_file(prefix + '.mt.tsv')
print(' - finish checking ' + prefix + '.mt.tsv')
elif suffix == 'tsv':
with open(prefix + '.tsv', 'w') as fout:
for sidx, (sent, tag, _) in enumerate(data[split]):
for widx, (w, t) in enumerate(zip(sent, tag)):
newline = '' if (sidx == len(data[split]) - 1) and (widx == len(sent) - 1) else '\n'
# if split == 'test':
# fout.write('{}{}'.format(w, newline))
# else:
# fout.write('{}\t{}{}'.format(w, t, newline))
fout.write('{}\t{}{}'.format(w, t, newline))
fout.write('\n')
elif suffix == 'conll':
with open(prefix + '.conll', 'w') as fout:
for _, _, lines in data[split]:
for l in lines:
fout.write(l.strip() + '\n')
fout.write('\n')
print(f'finish writing file to {prefix}.{suffix}')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
languages = 'af ar bg de el en es et eu fa fi fr he hi hu id it ja kk ko mr nl pt ru ta te th tl tr ur vi yo zh'.split(' ')
for root, dirs, files in os.walk(args.data_dir):
lg = root.strip().split('/')[-1]
if root == args.data_dir or lg not in languages:
continue
data = {k: [] for k in ['train', 'dev', 'test']}
for f in sorted(files):
if f.endswith('conll'):
file = os.path.join(root, f)
examples = _read_one_file(file)
if 'train' in f:
data['train'].extend(examples)
elif 'dev' in f:
data['dev'].extend(examples)
elif 'test' in f:
data['test'].extend(examples)
else:
print('split not found: ', file)
print(' - finish reading {}, {}'.format(file, [(k, len(v)) for k,v in data.items()]))
data = remove_empty_space(data)
for sub in ['tsv']:
_write_files(data, args.output_dir, lg, sub)
def pawsx_preprocess(args):
def _preprocess_one_file(infile, outfile, remove_label=False):
data = []
for i, line in enumerate(open(infile, 'r')):
if i == 0:
continue
items = line.strip().split('\t')
sent1 = ' '.join(items[1].strip().split(' '))
sent2 = ' '.join(items[2].strip().split(' '))
label = items[3]
data.append([sent1, sent2, label])
with open(outfile, 'w') as fout:
writer = csv.writer(fout, delimiter='\t', quoting=csv.QUOTE_NONE, quotechar='')
for sent1, sent2, label in data:
# if remove_label:
# writer.writerow([sent1, sent2])
# else:
# writer.writerow([sent1, sent2, label])
writer.writerow([sent1, sent2, label])
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
split2file = {'train': 'train', 'test': 'test_2k', 'dev': 'dev_2k'}
for lang in ['en', 'de', 'es', 'fr', 'ja', 'ko', 'zh']:
for split in ['train', 'test', 'dev']:
if split == 'train' and lang != 'en':
continue
file = split2file[split]
infile = os.path.join(args.data_dir, lang, "{}.tsv".format(file))
outfile = os.path.join(args.output_dir, "{}-{}.tsv".format(split, lang))
_preprocess_one_file(infile, outfile, remove_label=(split == 'test'))
print(f'finish preprocessing {outfile}')
def xnli_preprocess(args):
def _preprocess_file(infile, output_dir, split):
all_langs = defaultdict(list)
for i, line in enumerate(open(infile, 'r')):
if i == 0:
continue
items = line.strip().split('\t')
lang = items[0].strip()
label = "contradiction" if items[1].strip() == "contradictory" else items[1].strip()
sent1 = ' '.join(items[6].strip().split(' '))
sent2 = ' '.join(items[7].strip().split(' '))
all_langs[lang].append((sent1, sent2, label))
print(f'# langs={len(all_langs)}')
for lang, pairs in all_langs.items():
outfile = os.path.join(output_dir, '{}-{}.tsv'.format(split, lang))
with open(outfile, 'w') as fout:
writer = csv.writer(fout, delimiter='\t', quoting=csv.QUOTE_NONE, quotechar='')
for (sent1, sent2, label) in pairs:
# if split == 'test':
# writer.writerow([sent1, sent2])
# else:
# writer.writerow([sent1, sent2, label])
writer.writerow([sent1, sent2, label])
print(f'finish preprocess {outfile}')
def _preprocess_train_file(infile, outfile):
with open(outfile, 'w') as fout:
writer = csv.writer(fout, delimiter='\t', quoting=csv.QUOTE_NONE, quotechar='')
for i, line in enumerate(open(infile, 'r')):
if i == 0:
continue
items = line.strip().split('\t')
sent1 = ' '.join(items[0].strip().split(' '))
sent2 = ' '.join(items[1].strip().split(' '))
label = "contradiction" if items[2].strip() == "contradictory" else items[2].strip()
writer.writerow([sent1, sent2, label])
print(f'finish preprocess {outfile}')
infile = os.path.join(args.data_dir, 'XNLI-MT-1.0/multinli/multinli.train.en.tsv')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
outfile = os.path.join(args.output_dir, 'train-en.tsv')
_preprocess_train_file(infile, outfile)
for split in ['test', 'dev']:
infile = os.path.join(args.data_dir, 'XNLI-1.0/xnli.{}.tsv'.format(split))
print(f'reading file {infile}')
_preprocess_file(infile, args.output_dir, split)
def tatoeba_preprocess(args):
lang3_dict = {
'afr':'af', 'ara':'ar', 'bul':'bg', 'ben':'bn',
'deu':'de', 'ell':'el', 'spa':'es', 'est':'et',
'eus':'eu', 'pes':'fa', 'fin':'fi', 'fra':'fr',
'heb':'he', 'hin':'hi', 'hun':'hu', 'ind':'id',
'ita':'it', 'jpn':'ja', 'jav':'jv', 'kat':'ka',
'kaz':'kk', 'kor':'ko', 'mal':'ml', 'mar':'mr',
'nld':'nl', 'por':'pt', 'rus':'ru', 'swh':'sw',
'tam':'ta', 'tel':'te', 'tha':'th', 'tgl':'tl',
'tur':'tr', 'urd':'ur', 'vie':'vi', 'cmn':'zh',
'eng':'en',
}
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
for sl3, sl2 in lang3_dict.items():
if sl3 != 'eng':
src_file = f'{args.data_dir}/tatoeba.{sl3}-eng.{sl3}'
tgt_file = f'{args.data_dir}/tatoeba.{sl3}-eng.eng'
src_out = f'{args.output_dir}/{sl2}-en.{sl2}'
tgt_out = f'{args.output_dir}/{sl2}-en.en'
shutil.copy(src_file, src_out)
tgts = [l.strip() for l in open(tgt_file)]
idx = range(len(tgts))
data = zip(tgts, idx)
with open(tgt_out, 'w') as ftgt:
for t, i in sorted(data, key=lambda x: x[0]):
ftgt.write(f'{t}\n')
def xquad_preprocess(args):
# Remove the test annotations to prevent accidental cheating
# remove_qa_test_annotations(args.data_dir)
pass
def mlqa_preprocess(args):
# Remove the test annotations to prevent accidental cheating
# remove_qa_test_annotations(args.data_dir)
pass
def tydiqa_preprocess(args):
LANG2ISO = {'arabic': 'ar', 'bengali': 'bn', 'english': 'en', 'finnish': 'fi',
'indonesian': 'id', 'korean': 'ko', 'russian': 'ru',
'swahili': 'sw', 'telugu': 'te'}
assert os.path.exists(args.data_dir)
train_file = os.path.join(args.data_dir, 'tydiqa-goldp-v1.1-train.json')
os.makedirs(args.output_dir, exist_ok=True)
# Split the training file into language-specific files
lang2data = defaultdict(list)
with open(train_file, 'r') as f_in:
data = json.load(f_in)
version = data['version']
for doc in data['data']:
for par in doc['paragraphs']:
context = par['context']
for qa in par['qas']:
question = qa['question']
question_id = qa['id']
example_lang = question_id.split('-')[0]
q_id = question_id.split('-')[-1]
for answer in qa['answers']:
a_start, a_text = answer['answer_start'], answer['text']
a_end = a_start + len(a_text)
assert context[a_start:a_end] == a_text
lang2data[example_lang].append({'paragraphs': [{
'context': context,
'qas': [{'answers': qa['answers'],
'question': question,
'id': q_id}]}]})
for lang, data in lang2data.items():
out_file = os.path.join(
args.output_dir, 'tydiqa.%s.train.json' % LANG2ISO[lang])
with open(out_file, 'w') as f:
json.dump({'data': data, 'version': version}, f)
# Rename the dev files
dev_dir = os.path.join(args.data_dir, 'tydiqa-goldp-v1.1-dev')
assert os.path.exists(dev_dir)
for lang, iso in LANG2ISO.items():
src_file = os.path.join(dev_dir, 'tydiqa-goldp-dev-%s.json' % lang)
dst_file = os.path.join(dev_dir, 'tydiqa.%s.dev.json' % iso)
os.rename(src_file, dst_file)
# Remove the test annotations to prevent accidental cheating
# remove_qa_test_annotations(dev_dir)
def remove_qa_test_annotations(test_dir):
assert os.path.exists(test_dir)
for file_name in os.listdir(test_dir):
new_data = []
test_file = os.path.join(test_dir, file_name)
with open(test_file, 'r') as f:
data = json.load(f)
version = data['version']
for doc in data['data']:
for par in doc['paragraphs']:
context = par['context']
for qa in par['qas']:
question = qa['question']
question_id = qa['id']
for answer in qa['answers']:
a_start, a_text = answer['answer_start'], answer['text']
a_end = a_start + len(a_text)
assert context[a_start:a_end] == a_text
new_data.append({'paragraphs': [{
'context': context,
'qas': [{'answers': [{'answer_start': 0, 'text': ''}],
'question': question,
'id': question_id}]}]})
with open(test_file, 'w') as f:
json.dump({'data': new_data, 'version': version}, f)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output data dir where any processed files will be written to.")
parser.add_argument("--task", default="panx", type=str, required=True,
help="The task name")
parser.add_argument("--model_name_or_path", default="bert-base-multilingual-cased", type=str,
help="The pre-trained model")
parser.add_argument("--model_type", default="bert", type=str,
help="model type")
parser.add_argument("--max_len", default=512, type=int,
help="the maximum length of sentences")
parser.add_argument("--do_lower_case", action='store_true',
help="whether to do lower case")
parser.add_argument("--cache_dir", default=None, type=str,
help="cache directory")
parser.add_argument("--languages", default="en", type=str,
help="process language")
parser.add_argument("--remove_last_token", action='store_true',
help="whether to remove the last token")
parser.add_argument("--remove_test_label", action='store_true',
help="whether to remove test set label")
args = parser.parse_args()
if args.task == 'panx_tokenize':
panx_tokenize_preprocess(args)
if args.task == 'panx':
panx_preprocess(args)
if args.task == 'udpos_tokenize':
udpos_tokenize_preprocess(args)
if args.task == 'udpos':
udpos_preprocess(args)
if args.task == 'pawsx':
pawsx_preprocess(args)
if args.task == 'xnli':
xnli_preprocess(args)
if args.task == 'tatoeba':
tatoeba_preprocess(args)
if args.task == 'xquad':
xquad_preprocess(args)
if args.task == 'mlqa':
mlqa_preprocess(args)
if args.task == 'tydiqa':
tydiqa_preprocess(args)
| EXA-1-master | exa/models/unilm-master/xtune/utils_preprocess.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for question-answering on SQuAD (DistilBERT, Bert, XLM, XLNet)."""
import argparse
import glob
import logging
import os
import random
import timeit
import itertools
import json
import copy
import math
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertForQuestionAnswering,
AlbertTokenizer,
BertConfig,
BertForQuestionAnswering,
BertTokenizer,
XLMRobertaConfig,
XLMRobertaForQuestionAnsweringStable,
XLMRobertaTokenizer,
CamembertConfig,
CamembertForQuestionAnswering,
CamembertTokenizer,
DistilBertConfig,
DistilBertForQuestionAnswering,
DistilBertTokenizer,
RobertaConfig,
RobertaForQuestionAnswering,
RobertaTokenizer,
XLMConfig,
XLMForQuestionAnswering,
XLMTokenizer,
XLNetConfig,
XLNetForQuestionAnswering,
XLNetTokenizer,
get_linear_schedule_with_warmup,
squad_convert_examples_to_features,
)
from transformers.data.metrics.squad_metrics import (
compute_predictions_log_probs,
compute_predictions_logits,
)
from transformers.data.metrics.evaluate_mlqa import evaluate_with_path as mlqa_evaluate_with_path
from transformers.data.metrics.evaluate_squad import evaluate_with_path as squad_evaluate_with_path
from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor, MLQAProcessor, \
TyDiQAProcessor, XQuADProcessor
from transformers.tokenization_bert import whitespace_tokenize
from transformers.data.processors.squad import _improve_answer_span, _new_check_is_max_context, SquadFeatures
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (BertConfig, CamembertConfig, RobertaConfig, XLNetConfig, XLMConfig)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertForQuestionAnswering, BertTokenizer),
"camembert": (CamembertConfig, CamembertForQuestionAnswering, CamembertTokenizer),
"roberta": (RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer),
"xlnet": (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer),
"xlm": (XLMConfig, XLMForQuestionAnswering, XLMTokenizer),
"distilbert": (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer),
"albert": (AlbertConfig, AlbertForQuestionAnswering, AlbertTokenizer),
"xlmr": (XLMRobertaConfig, XLMRobertaForQuestionAnsweringStable, XLMRobertaTokenizer),
}
class NoisedDataGenerator(object):
def __init__(self,
task_name="mlqa",
r1_lambda=5.0,
enable_r1_loss=False,
original_loss=True,
noised_loss=False,
keep_boundary_unchanged=False,
r1_on_boundary_only=False,
noised_max_seq_length=512,
max_seq_length=512,
doc_stride=128,
max_query_length=64,
overall_ratio=1.0,
enable_bpe_switch=False,
bpe_switch_ratio=0.5,
tokenizer_dir=None,
do_lower_case=False,
tokenizer_languages=None,
enable_bpe_sampling=False,
bpe_sampling_ratio=0.5,
tokenizer=None,
sampling_alpha=0.3,
sampling_nbest_size=-1,
enable_random_noise=False,
noise_detach_embeds=False,
noise_eps=1e-5,
noise_type='uniform',
enable_code_switch=False,
code_switch_ratio=0.5,
dict_dir=None,
dict_languages=None,
translation_path=None,
disable_translate_labels=False,
translate_languages=None,
enable_data_augmentation=False,
augment_ratio=0.0,
augment_method=None,
r2_lambda=1.0,
use_hard_labels=False):
if enable_code_switch:
assert dict_dir is not None
assert dict_languages is not None
assert tokenizer is not None
if enable_random_noise:
assert noise_type in ['uniform', 'normal']
self.task_name = task_name.lower()
self.n_tokens = 0
self.n_cs_tokens = 0
self.r1_lambda = r1_lambda
self.original_loss = original_loss
self.noised_loss = noised_loss
self.enable_r1_loss = enable_r1_loss
self.keep_boundary_unchanged = keep_boundary_unchanged
self.r1_on_boundary_only = r1_on_boundary_only
self.max_seq_length = max_seq_length
self.noised_max_seq_length = noised_max_seq_length
self.doc_stride = doc_stride
self.max_query_length = max_query_length
self.overall_ratio = overall_ratio
self.enable_bpe_switch = enable_bpe_switch
self.bpe_switch_ratio = bpe_switch_ratio / self.overall_ratio
assert not self.enable_bpe_switch or self.bpe_switch_ratio <= 1.0
self.tokenizer_dir = tokenizer_dir
self.tokenizer_languages = tokenizer_languages
self.enable_bpe_sampling = enable_bpe_sampling
self.bpe_sampling_ratio = bpe_sampling_ratio / self.overall_ratio
assert not self.enable_bpe_sampling or self.bpe_sampling_ratio <= 1.0
self.tokenizer = tokenizer
self.sampling_alpha = sampling_alpha
self.sampling_nbest_size = sampling_nbest_size
self.enable_random_noise = enable_random_noise
self.noise_detach_embeds = noise_detach_embeds
self.noise_eps = noise_eps
self.noise_type = noise_type
self.enable_code_switch = enable_code_switch
self.code_switch_ratio = code_switch_ratio / self.overall_ratio
assert not self.enable_code_switch or self.code_switch_ratio <= 1.0
self.dict_dir = dict_dir
self.dict_languages = dict_languages
self.lang2dict = {}
for lang in copy.deepcopy(dict_languages):
dict_path = os.path.join(self.dict_dir, "en-{}.txt".format(lang))
if not os.path.exists(dict_path):
logger.info("dictionary en-{} doesn't exist.".format(lang))
self.dict_languages.remove(lang)
continue
logger.info("reading dictionary from {}".format(dict_path))
assert os.path.exists(dict_path)
with open(dict_path, "r", encoding="utf-8") as reader:
raw = reader.readlines()
self.lang2dict[lang] = {}
for line in raw:
line = line.strip()
try:
src, tgt = line.split("\t")
except:
src, tgt = line.split(" ")
if src not in self.lang2dict[lang]:
self.lang2dict[lang][src] = [tgt]
else:
self.lang2dict[lang][src].append(tgt)
self.lang2tokenizer = {}
for lang in tokenizer_languages:
self.lang2tokenizer[lang] = XLMRobertaTokenizer.from_pretrained(
os.path.join(tokenizer_dir, "{}".format(lang)), do_lower_case=do_lower_case)
self.translation_path = translation_path
self.disable_translate_labels = disable_translate_labels
self.translate_languages = translate_languages
self.enable_data_augmentation = enable_data_augmentation
self.augment_ratio = augment_ratio
self.augment_method = augment_method
self.r2_lambda = r2_lambda
self.use_hard_labels = use_hard_labels
self.id2ex = None
if self.enable_data_augmentation and self.augment_method == "mt":
# drop_languages = ["en", "zh-CN", "zh", "ja", "ko", "th", "my", "ml", "ta"]
drop_languages = ["en"]
for lang in drop_languages:
if lang in self.translate_languages:
self.translate_languages.remove(lang)
self.id2ex = {}
for lang in self.translate_languages:
if self.task_name == "tydiqa":
file_name = "tydiqa.translate.train.en-{}.json".format(lang)
else:
file_name = "squad.translate.train.en-{}.json".format(lang)
logger.info("Reading translation from {}".format(os.path.join(self.translation_path, file_name)))
processor = MLQAProcessor()
examples = processor.get_train_examples(self.translation_path,
file_name)
for ex in examples:
if ex.qas_id not in self.id2ex:
self.id2ex[ex.qas_id] = []
if self.disable_translate_labels:
ex.is_impossible = True
self.id2ex[ex.qas_id].append(ex)
def augment_examples(self, examples):
n_augment = math.ceil(len(examples) * self.augment_ratio)
augment_examples = []
while n_augment > 0:
examples = copy.deepcopy(examples)
augment_examples += examples[:n_augment]
n_augment -= len(examples[:n_augment])
random.shuffle(examples)
return augment_examples
def get_translate_data(self, examples):
translate_examples = []
n_unfound = 0
qas_ids = list(self.id2ex.keys())
for ex_idx, example in enumerate(examples):
qas_id = example.qas_id
if self.task_name == "tydiqa" or qas_id not in self.id2ex:
rand_qas_id = qas_ids[random.randint(0, len(qas_ids) - 1)]
# logger.info(
# "qas_id {} is not found in translate data, using {} as replacement.".format(qas_id, rand_qas_id))
n_unfound += 1
qas_id = rand_qas_id
idx = random.randint(0, len(self.id2ex[qas_id]) - 1)
tgt_ex = self.id2ex[qas_id][idx]
translate_examples.append(tgt_ex)
logger.info("{} qas_ids unfound.".format(n_unfound))
return translate_examples
def get_noised_dataset(self, examples):
# maybe do not save augmented examples
examples = copy.deepcopy(examples)
is_augmented = [0] * len(examples)
if self.enable_data_augmentation:
augment_examples = self.augment_examples(examples)
if self.augment_method == "mt":
assert not self.enable_code_switch
augment_examples = self.get_translate_data(augment_examples)
is_augmented += [1] * len(augment_examples)
examples += augment_examples
if self.enable_code_switch:
self.n_tokens = 0
self.n_cs_tokens = 0
dataset = self.convert_examples_to_dataset(examples, is_augmented)
if self.enable_code_switch:
logger.info("{:.2f}% tokens have been code-switched.".format(self.n_cs_tokens / self.n_tokens * 100))
return dataset
def tokenize_token(self, token, switch_text=False, can_be_switched=True,
enable_code_switch=False,
enable_bpe_switch=False,
enable_bpe_sampling=False, ):
switch_token = (random.random() <= self.overall_ratio) and can_be_switched
is_switched = False
self.n_tokens += 1
if enable_code_switch and switch_text and switch_token and random.random() <= self.code_switch_ratio:
lang = self.dict_languages[random.randint(0, len(self.dict_languages) - 1)]
if token.lower() in self.lang2dict[lang]:
self.n_cs_tokens += 1
token = self.lang2dict[lang][token.lower()][
random.randint(0, len(self.lang2dict[lang][token.lower()]) - 1)]
is_switched = True
if enable_bpe_switch and switch_text and switch_token and random.random() <= self.bpe_switch_ratio:
lang = self.tokenizer_languages[random.randint(0, len(self.tokenizer_languages) - 1)]
tokenizer = self.lang2tokenizer[lang]
is_switched = True
else:
tokenizer = self.tokenizer
if enable_bpe_sampling and switch_text and switch_token and random.random() <= self.bpe_sampling_ratio:
sub_tokens = tokenizer.tokenize(token, nbest_size=self.sampling_nbest_size,
alpha=self.sampling_alpha)
is_switched = True
else:
sub_tokens = tokenizer.tokenize(token)
return sub_tokens, switch_token and is_switched
def tokenize_sentence(self, sentence, switch_text=False):
all_sub_tokens = []
tokens = sentence.split(" ")
for token in tokens:
sub_tokens, switch_token = self.tokenize_token(token, switch_text)
all_sub_tokens += sub_tokens
return all_sub_tokens
def convert_examples_to_dataset(self, examples, is_augmented=None, is_training=True):
all_original_input_ids = []
all_original_attention_mask = []
all_original_token_type_ids = []
all_original_r1_mask = []
all_original_start_positions = []
all_original_end_positions = []
all_noised_input_ids = []
all_noised_attention_mask = []
all_noised_token_type_ids = []
all_noised_r1_mask = []
all_noised_start_positions = []
all_noised_end_positions = []
all_is_augmented = []
for (ex_index, example) in enumerate(examples):
if is_training and not example.is_impossible:
# Get start and end position
start_position = example.start_position
end_position = example.end_position
# If the answer cannot be found in the text, then skip this example.
actual_text = " ".join(example.doc_tokens[start_position: (end_position + 1)])
cleaned_answer_text = " ".join(whitespace_tokenize(example.answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text)
# exit(0)
else:
start_position, end_position = None, None
if ex_index % 1000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len(examples)))
# if ex_index == 1000:
# break
# switch all examples
switch_text = True
noised_orig_to_tok_index = []
noised_all_doc_tokens = []
noised_tok_to_orig_index = []
original_orig_to_tok_index = []
original_all_doc_tokens = []
original_tok_to_orig_index = []
is_token_switched = [False] * len(example.doc_tokens)
for (i, token) in enumerate(example.doc_tokens):
original_orig_to_tok_index.append(len(original_all_doc_tokens))
can_be_switched = False if self.keep_boundary_unchanged and (
i == start_position or i == end_position) else True
if self.enable_data_augmentation and is_augmented[ex_index]:
if self.augment_method == "cs":
if start_position <= i <= end_position:
can_be_switched = False
original_sub_tokens, switch_token = self.tokenize_token(token, switch_text,
can_be_switched=can_be_switched,
enable_code_switch=True)
elif self.augment_method == "ss":
original_sub_tokens, switch_token = self.tokenize_token(token, switch_text,
can_be_switched=can_be_switched,
enable_bpe_sampling=True)
elif self.augment_method == "mt" or self.augment_method == "gn":
original_sub_tokens, switch_token = self.tokenize_token(token, switch_text=False)
else:
assert False
else:
original_sub_tokens, switch_token = self.tokenize_token(token, switch_text=False)
# original_sub_tokens = self.tokenizer.tokenize(token)
is_token_switched[i] = is_token_switched[i] or switch_token
for sub_token in original_sub_tokens:
original_tok_to_orig_index.append(i)
original_all_doc_tokens.append(sub_token)
keep_answer_unchanged = False
if is_training and not example.is_impossible:
original_tok_start_position = original_orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
original_tok_end_position = original_orig_to_tok_index[example.end_position + 1] - 1
else:
original_tok_end_position = len(original_all_doc_tokens) - 1
(new_original_tok_start_position, new_original_tok_end_position) = _improve_answer_span(
original_all_doc_tokens, original_tok_start_position, original_tok_end_position, self.tokenizer,
example.answer_text
)
keep_answer_unchanged = (original_tok_start_position != new_original_tok_start_position) or (
original_tok_end_position != new_original_tok_end_position)
for (i, token) in enumerate(example.doc_tokens):
noised_orig_to_tok_index.append(len(noised_all_doc_tokens))
can_be_switched = False if self.keep_boundary_unchanged and (
i == start_position or i == end_position) else True
if keep_answer_unchanged and i >= start_position and i <= end_position:
can_be_switched = False
noised_sub_tokens, switch_token = self.tokenize_token(token, switch_text,
can_be_switched=can_be_switched,
enable_code_switch=self.enable_code_switch,
enable_bpe_switch=self.enable_bpe_switch,
enable_bpe_sampling=self.enable_bpe_sampling)
is_token_switched[i] = is_token_switched[i] or switch_token
for sub_token in noised_sub_tokens:
noised_tok_to_orig_index.append(i)
noised_all_doc_tokens.append(sub_token)
if is_training and not example.is_impossible:
noised_tok_start_position = noised_orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
noised_tok_end_position = noised_orig_to_tok_index[example.end_position + 1] - 1
else:
noised_tok_end_position = len(noised_all_doc_tokens) - 1
(noised_tok_start_position, noised_tok_end_position) = _improve_answer_span(
noised_all_doc_tokens, noised_tok_start_position, noised_tok_end_position, self.tokenizer,
example.answer_text
)
original_truncated_query = self.tokenizer.encode(example.question_text, add_special_tokens=False,
truncation=True, max_length=self.max_query_length)
noised_question_sub_tokens = self.tokenize_sentence(example.question_text, switch_text)
noised_truncated_query = self.tokenizer.encode(noised_question_sub_tokens, add_special_tokens=False,
truncation=True, max_length=self.max_query_length)
sequence_added_tokens = (
self.tokenizer.max_len - self.tokenizer.max_len_single_sentence + 1
if "roberta" in str(type(self.tokenizer)) or "camembert" in str(type(self.tokenizer))
else self.tokenizer.max_len - self.tokenizer.max_len_single_sentence
)
sequence_pair_added_tokens = self.tokenizer.max_len - self.tokenizer.max_len_sentences_pair
spans = []
span_doc_tokens = original_all_doc_tokens
while len(spans) * self.doc_stride < len(original_all_doc_tokens):
original_encoded_dict = self.tokenizer.encode_plus( # TODO(thom) update this logic
original_truncated_query if self.tokenizer.padding_side == "right" else span_doc_tokens,
span_doc_tokens if self.tokenizer.padding_side == "right" else original_truncated_query,
max_length=self.max_seq_length,
return_overflowing_tokens=True,
pad_to_max_length=True,
stride=self.max_seq_length - self.doc_stride - len(
original_truncated_query) - sequence_pair_added_tokens,
truncation_strategy="only_second" if self.tokenizer.padding_side == "right" else "only_first",
)
paragraph_len = min(
len(original_all_doc_tokens) - len(spans) * self.doc_stride,
self.max_seq_length - len(original_truncated_query) - sequence_pair_added_tokens,
)
if self.tokenizer.pad_token_id in original_encoded_dict["input_ids"]:
if self.tokenizer.padding_side == "right":
non_padded_ids = original_encoded_dict["input_ids"][
: original_encoded_dict["input_ids"].index(self.tokenizer.pad_token_id)]
else:
last_padding_id_position = (
len(original_encoded_dict["input_ids"]) - 1 - original_encoded_dict["input_ids"][
::-1].index(
self.tokenizer.pad_token_id)
)
non_padded_ids = original_encoded_dict["input_ids"][last_padding_id_position + 1:]
else:
non_padded_ids = original_encoded_dict["input_ids"]
tokens = self.tokenizer.convert_ids_to_tokens(non_padded_ids)
original_encoded_dict["tokens"] = tokens
original_encoded_dict["start"] = len(spans) * self.doc_stride
original_encoded_dict["length"] = paragraph_len
noised_tokens = []
noised_r1_mask = []
original_r1_mask = []
token_to_orig_map = {}
span_start = None
break_flag = False
for i in range(paragraph_len):
index = len(
original_truncated_query) + sequence_added_tokens + i if self.tokenizer.padding_side == "right" else i
token_to_orig_map[index] = original_tok_to_orig_index[len(spans) * self.doc_stride + i]
original_index = len(spans) * self.doc_stride + i
cur_orig_index = original_tok_to_orig_index[original_index]
pre_orig_index = original_tok_to_orig_index[original_index - 1] if i > 0 else -1
if not is_token_switched[cur_orig_index]:
noised_index = original_index - original_orig_to_tok_index[cur_orig_index] + \
noised_orig_to_tok_index[cur_orig_index]
assert original_all_doc_tokens[original_index] == noised_all_doc_tokens[noised_index]
if span_start is None:
span_start = noised_index
if len(noised_tokens) + len(
noised_truncated_query) + sequence_pair_added_tokens == self.noised_max_seq_length:
break
noised_tokens.append(noised_all_doc_tokens[noised_index])
noised_r1_mask.append(1)
elif is_token_switched[cur_orig_index] and cur_orig_index != pre_orig_index:
noised_index = noised_orig_to_tok_index[cur_orig_index]
while noised_index < len(noised_tok_to_orig_index):
if noised_tok_to_orig_index[noised_index] != cur_orig_index:
break
if span_start is None:
span_start = noised_index
if len(noised_tokens) + len(
noised_truncated_query) + sequence_pair_added_tokens == self.noised_max_seq_length:
break_flag = True
break
noised_tokens.append(noised_all_doc_tokens[noised_index])
noised_r1_mask.append(0)
noised_index += 1
if break_flag:
break
original_r1_mask.append(1 if not is_token_switched[cur_orig_index] else 0)
assert len(noised_tokens) + len(
noised_truncated_query) + sequence_pair_added_tokens <= self.noised_max_seq_length
if self.tokenizer.padding_side == "right":
noised_r1_mask = [0] * (len(noised_truncated_query) + 3) + noised_r1_mask + [0]
original_r1_mask = [0] * (len(original_truncated_query) + 3) + original_r1_mask + [0]
else:
assert False
noised_r1_mask += (self.noised_max_seq_length - len(noised_r1_mask)) * [0]
original_r1_mask += (self.max_seq_length - len(original_r1_mask)) * [0]
noised_encoded_dict = self.tokenizer.encode_plus( # TODO(thom) update this logic
noised_truncated_query if self.tokenizer.padding_side == "right" else noised_tokens,
noised_tokens if self.tokenizer.padding_side == "right" else original_truncated_query,
max_length=self.noised_max_seq_length,
pad_to_max_length=True,
truncation_strategy="only_second" if self.tokenizer.padding_side == "right" else "only_first",
)
if self.tokenizer.pad_token_id in noised_encoded_dict["input_ids"]:
if self.tokenizer.padding_side == "right":
non_padded_ids = noised_encoded_dict["input_ids"][
: noised_encoded_dict["input_ids"].index(self.tokenizer.pad_token_id)]
else:
last_padding_id_position = (
len(noised_encoded_dict["input_ids"]) - 1 - noised_encoded_dict["input_ids"][
::-1].index(
self.tokenizer.pad_token_id)
)
non_padded_ids = noised_encoded_dict["input_ids"][last_padding_id_position + 1:]
else:
non_padded_ids = noised_encoded_dict["input_ids"]
tokens = self.tokenizer.convert_ids_to_tokens(non_padded_ids)
noised_encoded_dict["tokens"] = tokens
noised_encoded_dict["r1_mask"] = noised_r1_mask
assert span_start is not None
noised_encoded_dict["start"] = span_start
noised_encoded_dict["length"] = len(noised_tokens)
original_encoded_dict["r1_mask"] = original_r1_mask
spans.append((original_encoded_dict, noised_encoded_dict))
if "overflowing_tokens" not in original_encoded_dict:
break
span_doc_tokens = original_encoded_dict["overflowing_tokens"]
for (original_span, noised_span) in spans:
# Identify the position of the CLS token
original_cls_index = original_span["input_ids"].index(self.tokenizer.cls_token_id)
noised_cls_index = noised_span["input_ids"].index(self.tokenizer.cls_token_id)
# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
# Original TF implem also keep the classification token (set to 0) (not sure why...)
original_p_mask = np.array(original_span["token_type_ids"])
noised_p_mask = np.array(noised_span["token_type_ids"])
original_p_mask = np.minimum(original_p_mask, 1)
noised_p_mask = np.minimum(noised_p_mask, 1)
if self.tokenizer.padding_side == "right":
# Limit positive values to one
original_p_mask = 1 - original_p_mask
noised_p_mask = 1 - noised_p_mask
original_p_mask[np.where(np.array(original_span["input_ids"]) == self.tokenizer.sep_token_id)[0]] = 1
noised_p_mask[np.where(np.array(noised_span["input_ids"]) == self.tokenizer.sep_token_id)[0]] = 1
# Set the CLS index to '0'
original_p_mask[original_cls_index] = 0
noised_p_mask[noised_cls_index] = 0
# TODO cls_index in xlm-r is 0
assert original_cls_index == 0
assert noised_cls_index == 0
original_span["r1_mask"][original_cls_index] = 1
noised_span["r1_mask"][noised_cls_index] = 1
span_is_impossible = example.is_impossible
original_start_position = 0
original_end_position = 0
noised_start_position = 0
noised_end_position = 0
if is_training and not span_is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
noised_doc_start = noised_span["start"]
noised_doc_end = noised_span["start"] + noised_span["length"] - 1
noised_out_of_span = False
original_doc_start = original_span["start"]
original_doc_end = original_span["start"] + original_span["length"] - 1
original_out_of_span = False
if not (
noised_tok_start_position >= noised_doc_start and noised_tok_end_position <= noised_doc_end):
noised_out_of_span = True
if not (
new_original_tok_start_position >= original_doc_start and new_original_tok_end_position <= original_doc_end):
original_out_of_span = True
if noised_out_of_span:
noised_start_position = noised_cls_index
noised_end_position = noised_cls_index
span_is_impossible = True
else:
if self.tokenizer.padding_side == "left":
doc_offset = 0
else:
doc_offset = len(noised_truncated_query) + sequence_added_tokens
noised_start_position = noised_tok_start_position - noised_doc_start + doc_offset
noised_end_position = noised_tok_end_position - noised_doc_start + doc_offset
if original_out_of_span:
original_start_position = original_cls_index
original_end_position = original_cls_index
span_is_impossible = True
else:
if self.tokenizer.padding_side == "left":
doc_offset = 0
else:
doc_offset = len(original_truncated_query) + sequence_added_tokens
original_start_position = new_original_tok_start_position - original_doc_start + doc_offset
original_end_position = new_original_tok_end_position - original_doc_start + doc_offset
all_original_input_ids += [original_span["input_ids"]]
all_original_attention_mask += [original_span["attention_mask"]]
all_original_token_type_ids += [original_span["token_type_ids"]]
all_original_r1_mask += [original_span["r1_mask"]]
all_original_start_positions += [original_start_position]
all_original_end_positions += [original_end_position]
all_noised_input_ids += [noised_span["input_ids"]]
all_noised_attention_mask += [noised_span["attention_mask"]]
all_noised_token_type_ids += [noised_span["token_type_ids"]]
all_noised_r1_mask += [noised_span["r1_mask"]]
all_noised_start_positions += [noised_start_position]
all_noised_end_positions += [noised_end_position]
all_is_augmented += [is_augmented[ex_index]]
# Convert to Tensors and build dataset
all_original_input_ids = torch.tensor([input_ids for input_ids in all_original_input_ids], dtype=torch.long)
all_original_attention_mask = torch.tensor([attention_mask for attention_mask in all_original_attention_mask],
dtype=torch.long)
all_original_token_type_ids = torch.tensor([token_type_ids for token_type_ids in all_original_token_type_ids],
dtype=torch.long)
all_original_r1_mask = torch.tensor([original_r1_mask for original_r1_mask in all_original_r1_mask],
dtype=torch.long)
all_original_start_positions = torch.tensor([start_position for start_position in all_original_start_positions],
dtype=torch.long)
all_original_end_positions = torch.tensor([end_position for end_position in all_original_end_positions],
dtype=torch.long)
all_noised_input_ids = torch.tensor([input_ids for input_ids in all_noised_input_ids], dtype=torch.long)
all_noised_attention_mask = torch.tensor([attention_mask for attention_mask in all_noised_attention_mask],
dtype=torch.long)
all_noised_token_type_ids = torch.tensor([token_type_ids for token_type_ids in all_noised_token_type_ids],
dtype=torch.long)
all_noised_r1_mask = torch.tensor([noised_r1_mask for noised_r1_mask in all_noised_r1_mask],
dtype=torch.long)
all_noised_start_positions = torch.tensor([start_position for start_position in all_noised_start_positions],
dtype=torch.long)
all_noised_end_positions = torch.tensor([end_position for end_position in all_noised_end_positions],
dtype=torch.long)
all_is_augmented = torch.tensor([is_augmented for is_augmented in all_is_augmented])
dataset = TensorDataset(all_original_input_ids, all_original_attention_mask, all_original_token_type_ids,
all_original_start_positions, all_original_end_positions, all_original_attention_mask,
all_original_attention_mask, all_original_attention_mask,
all_noised_input_ids, all_noised_attention_mask, all_noised_token_type_ids,
all_noised_r1_mask, all_original_r1_mask, all_noised_start_positions,
all_noised_end_positions, all_is_augmented)
return dataset
def get_train_steps(self, examples, args):
if args.max_steps > 0:
t_total = args.max_steps
else:
assert False
return t_total
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def to_list(tensor):
return tensor.detach().cpu().tolist()
def train(args, train_examples, train_dataset, model, first_stage_model, tokenizer, noised_data_generator=None):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_log_dir = os.getenv("PHILLY_JOB_DIRECTORY", None)
tb_writer = SummaryWriter(log_dir=tb_log_dir)
log_writer = open(os.path.join(args.output_dir, "evaluate_logs.txt"), 'w')
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
# args.warmup_steps == -1 means 0.1 warmup ratio
if args.warmup_steps == -1:
args.warmup_steps = int(t_total * 0.1)
logger.info("Warmup steps: %d" % args.warmup_steps)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 1
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
tr_loss, logging_loss, best_avg_f1 = 0.0, 0.0, 0.0
tr_original_loss, logging_original_loss = 0.0, 0.0
tr_noised_loss, logging_noised_loss = 0.0, 0.0
tr_r1_loss, logging_r1_loss = 0.0, 0.0
tr_r2_loss, logging_r2_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
# Added here for reproductibility
set_seed(args)
def logging(eval=False):
results = None
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank in [-1, 0] and args.evaluate_during_training and eval:
results = evaluate(args, model, tokenizer)
for key, value in results.items():
logger.info("eval_{}: {}".format(key, value))
# for key, value in results.items():
# tb_writer.add_scalar("eval_{}".format(key), value, global_step)
log_writer.write("{0}\t{1}".format(global_step, json.dumps(results)) + '\n')
log_writer.flush()
logger.info(
"global_step: {}, lr: {:.6f}, loss: {:.6f}, original_loss: {:.6f}, noised_loss: {:.6f}, r1_loss: {:.6f}, r2_loss: {:.6f}".format(
global_step, scheduler.get_lr()[0], (tr_loss - logging_loss) / args.logging_steps,
(tr_original_loss - logging_original_loss) / args.logging_steps,
(tr_noised_loss - logging_noised_loss) / args.logging_steps,
(tr_r1_loss - logging_r1_loss) / args.logging_steps,
(tr_r2_loss - logging_r2_loss) / args.logging_steps))
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
tb_writer.add_scalar("original_loss", (tr_original_loss - logging_original_loss) / args.logging_steps,
global_step)
tb_writer.add_scalar("noised_loss", (tr_noised_loss - logging_noised_loss) / args.logging_steps, global_step)
tb_writer.add_scalar("r1_loss", (tr_r1_loss - logging_r1_loss) / args.logging_steps, global_step)
tb_writer.add_scalar("r2_loss", (tr_r2_loss - logging_r2_loss) / args.logging_steps, global_step)
if results is not None:
return results["dev_avg"]["f1"]
else:
return None
for _ in train_iterator:
use_noised_ids = False
if noised_data_generator is not None:
assert noised_data_generator.enable_r1_loss or noised_data_generator.noised_loss or noised_data_generator.enable_data_augmentation
noised_train_dataset = noised_data_generator.get_noised_dataset(train_examples)
train_sampler = RandomSampler(noised_train_dataset) if args.local_rank == -1 else DistributedSampler(
noised_train_dataset)
train_dataloader = DataLoader(noised_train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=True)
# epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
if first_stage_model is not None:
first_stage_model.eval()
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
"start_positions": batch[3],
"end_positions": batch[4],
}
if first_stage_model is not None:
with torch.no_grad():
inputs["first_stage_model_start_logits"], inputs["first_stage_model_end_logits"] = first_stage_model(**inputs)[1:3]
if noised_data_generator is not None:
inputs.update({"noised_input_ids": batch[8], "noised_attention_mask": batch[9],
"noised_token_type_ids": batch[10], "noised_r1_mask": batch[11],
"original_r1_mask": batch[12], "noised_start_positions": batch[13],
"noised_end_positions": batch[14], "is_augmented": batch[15]})
if args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if use_noised_ids:
del inputs["noised_token_type_ids"]
if args.model_type in ["xlnet", "xlm"]:
assert False
inputs.update({"cls_index": batch[5], "p_mask": batch[6]})
if args.version_2_with_negative:
inputs.update({"is_impossible": batch[7]})
if hasattr(model, "config") and hasattr(model.config, "lang2id"):
inputs.update(
{"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)}
)
outputs = model(**inputs)
# model outputs are always tuple in transformers (see doc)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if True or noised_data_generator is not None:
original_loss, noised_loss, r1_loss, r2_loss = outputs[1:5]
if args.n_gpu > 1:
original_loss = original_loss.mean()
noised_loss = noised_loss.mean()
r1_loss = r1_loss.mean()
r2_loss = r2_loss.mean()
if args.gradient_accumulation_steps > 1:
original_loss = original_loss / args.gradient_accumulation_steps
noised_loss = noised_loss / args.gradient_accumulation_steps
r1_loss = r1_loss / args.gradient_accumulation_steps
r2_loss = r2_loss / args.gradient_accumulation_steps
tr_original_loss += original_loss.item()
tr_noised_loss += noised_loss.item()
tr_r1_loss += r1_loss.item()
tr_r2_loss += r2_loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
cur_result = logging(eval=args.evaluate_steps > 0 and global_step % args.evaluate_steps == 0)
logging_loss = tr_loss
logging_original_loss = tr_original_loss
logging_noised_loss = tr_noised_loss
logging_r1_loss = tr_r1_loss
logging_r2_loss = tr_r2_loss
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.local_rank in [-1, 0] and args.logging_each_epoch:
avg_f1 = logging(eval=True)
logging_loss = tr_loss
logging_original_loss = tr_original_loss
logging_noised_loss = tr_noised_loss
logging_r1_loss = tr_r1_loss
logging_r2_loss = tr_r2_loss
if avg_f1 > best_avg_f1:
best_avg_f1 = avg_f1
output_dir = os.path.join(args.output_dir, "checkpoint-best")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
log_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix=""):
languages = args.language.split(',')
all_languages_results = {}
if args.task_name.lower() == "mlqa" or args.task_name == "mlqa_dev":
processor = MLQAProcessor()
elif args.task_name.lower() == "xquad":
processor = XQuADProcessor()
elif args.task_name.lower() == "tydiqa":
processor = TyDiQAProcessor()
elif args.task_name.lower() == "squad":
processor = SquadV1Processor()
else:
assert False
split_lang_list = []
# split_lang_list.append(("run_dev", "en"))
for lang in languages:
split_lang_list.append(("dev", lang))
if args.task_name.lower() == "mlqa":
for lang in languages:
split_lang_list.append(("test", lang))
for split, lang in split_lang_list:
# for split, lang in itertools.product(["dev", "test"], languages):
print("evaluating on {0} {1}".format(split, lang))
dataset, examples, features = load_and_cache_examples(args, tokenizer, language=lang, split=split,
output_examples=True)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
all_results = []
start_time = timeit.default_timer()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
}
if args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
example_indices = batch[3]
# XLNet and XLM use more arguments for their predictions
if args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": batch[4], "p_mask": batch[5]})
# for lang_id-sensitive xlm models
if hasattr(model, "config") and hasattr(model.config, "lang2id"):
inputs.update(
{"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)}
)
outputs = model(**inputs)
for i, example_index in enumerate(example_indices):
eval_feature = features[example_index.item()]
unique_id = int(eval_feature.unique_id)
output = [to_list(output[i]) for output in outputs]
# Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler"
# models only use two.
if len(output) >= 5:
start_logits = output[0]
start_top_index = output[1]
end_logits = output[2]
end_top_index = output[3]
cls_logits = output[4]
result = SquadResult(
unique_id,
start_logits,
end_logits,
start_top_index=start_top_index,
end_top_index=end_top_index,
cls_logits=cls_logits,
)
else:
start_logits, end_logits = output
result = SquadResult(unique_id, start_logits, end_logits)
all_results.append(result)
evalTime = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset))
# Compute predictions
output_prediction_file = os.path.join(args.output_dir, "{}.prediction".format(lang))
output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}_{}_{}.json".format(prefix, split, lang))
if args.version_2_with_negative:
output_null_log_odds_file = os.path.join(args.output_dir,
"null_odds_{}_{}_{}.json".format(prefix, split, lang))
else:
output_null_log_odds_file = None
# XLNet and XLM use a more complex post-processing procedure
if args.model_type in ["xlnet", "xlm"]:
start_n_top = model.config.start_n_top if hasattr(model, "config") else model.module.config.start_n_top
end_n_top = model.config.end_n_top if hasattr(model, "config") else model.module.config.end_n_top
predictions = compute_predictions_log_probs(
examples,
features,
all_results,
args.n_best_size,
args.max_answer_length,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
start_n_top,
end_n_top,
args.version_2_with_negative,
tokenizer,
args.verbose_logging,
)
else:
predictions = compute_predictions_logits(
examples,
features,
all_results,
args.n_best_size,
args.max_answer_length,
args.do_lower_case,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
args.verbose_logging,
args.version_2_with_negative,
args.null_score_diff_threshold,
tokenizer,
map_to_origin=not (args.model_type == "xlmr" and (lang == 'zh' or lang == "ko")),
# map_to_origin=False,
)
# Compute the F1 and exact scores.
if args.task_name.lower() == "mlqa" or args.task_name.lower() == "mlqa_dev":
results = mlqa_evaluate_with_path(processor.get_dataset_path(args.data_dir, split, lang),
output_prediction_file, lang)
else:
results = squad_evaluate_with_path(processor.get_dataset_path(args.data_dir, split, lang),
output_prediction_file)
# results = squad_evaluate(examples, predictions)
# results = evaluate_with_path(processor.get_dataset_path(args.data_dir, split, lang), output_prediction_file,
# lang)
all_languages_results["{0}_{1}".format(split, lang)] = results
for split in ["dev", "test"]:
all_languages_results["{0}_avg".format(split)] = average_dic(
[value for key, value in all_languages_results.items() if split in key])
return all_languages_results
def average_dic(dic_list):
if len(dic_list) == 0:
return {}
dic_sum = {}
for dic in dic_list:
if len(dic_sum) == 0:
for key, value in dic.items():
dic_sum[key] = value
else:
assert set(dic_sum.keys()) == set(dic.keys()), "sum_keys:{0}, dic_keys:{1}".format(set(dic_sum.keys()),
set(dic.keys()))
for key, value in dic.items():
dic_sum[key] += value
for key in dic_sum:
dic_sum[key] /= len(dic_list)
return dic_sum
def load_and_cache_examples(args, tokenizer, language, split="train", output_examples=False):
if args.local_rank not in [-1, 0] and split == "train":
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
torch.distributed.barrier()
# Load data features from cache or dataset file
input_dir = args.data_dir if args.data_dir else "."
model_name = "xlmr-base-final"
cached_features_file = os.path.join(
input_dir,
"cached_{}_{}_{}_{}".format(
split,
language,
model_name,
str(args.max_seq_length),
),
)
# Init features and dataset from cache if it exists
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features_and_dataset = torch.load(cached_features_file)
features, dataset, examples = (
features_and_dataset["features"],
features_and_dataset["dataset"],
features_and_dataset["examples"],
)
else:
logger.info("Creating features from dataset file at %s", input_dir)
if not args.data_dir and (
(split != "train" and not args.predict_file) or (split == "train" and not args.train_file)):
raise ValueError("data dir can't be empty")
try:
import tensorflow_datasets as tfds
except ImportError:
raise ImportError("If not data_dir is specified, tensorflow_datasets needs to be installed.")
if args.version_2_with_negative:
logger.warn("tensorflow_datasets does not handle version 2 of SQuAD.")
tfds_examples = tfds.load("squad")
examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate)
else:
# processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor()
if args.task_name.lower() == "mlqa" or args.task_name.lower() == "mlqa_dev":
processor = MLQAProcessor()
elif args.task_name.lower() == "xquad":
processor = XQuADProcessor()
elif args.task_name.lower() == "tydiqa":
processor = TyDiQAProcessor()
elif args.task_name.lower() == "squad":
processor = SquadV1Processor()
else:
assert False
if split == "run_dev":
examples = processor.get_dev_examples(args.data_dir)
elif split == "dev":
if args.task_name.lower() == "squad":
examples = processor.get_dev_examples(args.data_dir)
else:
examples = processor.get_dev_examples_by_language(args.data_dir, language=language)
elif split == "test":
examples = processor.get_test_examples_by_language(args.data_dir, language=language)
else:
examples = processor.get_train_examples(args.data_dir)
features, dataset = squad_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=split == "train",
return_dataset="pt",
threads=args.threads,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save({"features": features, "dataset": dataset, "examples": examples}, cached_features_file)
if args.local_rank == 0 and split == "train":
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
torch.distributed.barrier()
if output_examples:
return dataset, examples, features
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--reload",
default="",
type=str,
help="path to infoxlm checkpoint",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
parser.add_argument(
"--task_name",
default="mlqa",
type=str,
help="task_name",
)
# stable fine-tuning paramters
parser.add_argument("--overall_ratio", default=1.0, type=float, help="overall ratio")
parser.add_argument("--enable_r1_loss", action="store_true", help="Whether to enable r1 loss.")
parser.add_argument("--r1_lambda", default=5.0, type=float, help="lambda of r1 loss")
parser.add_argument("--original_loss", action="store_true",
help="Whether to use cross entropy loss on the former example.")
parser.add_argument("--noised_loss", action="store_true",
help="Whether to use cross entropy loss on the latter example.")
parser.add_argument("--noised_max_seq_length", default=512, type=int, help="noised max sequence length")
parser.add_argument("--keep_boundary_unchanged", action="store_true",
help="Whether to keep the boundary of answer unchanged.")
parser.add_argument("--r1_on_boundary_only", action="store_true",
help="Whether to enable r1 loss on boundary only.")
parser.add_argument("--enable_bpe_switch", action="store_true", help="Whether to enable bpe-switch.")
parser.add_argument("--bpe_switch_ratio", default=0.5, type=float, help="bpe_switch_ratio")
parser.add_argument("--tokenizer_dir", default=None, type=str, help="tokenizer dir")
parser.add_argument("--tokenizer_languages", default=None, type=str, help="tokenizer languages")
parser.add_argument("--enable_bpe_sampling", action="store_true", help="Whether to enable bpe sampling.")
parser.add_argument("--bpe_sampling_ratio", default=0.5, type=float, help="bpe_sampling_ratio")
parser.add_argument("--sampling_alpha", default=5.0, type=float, help="alpha of sentencepiece sampling")
parser.add_argument("--sampling_nbest_size", default=-1, type=int, help="nbest_size of sentencepiece sampling")
parser.add_argument("--enable_random_noise", action="store_true", help="Whether to enable random noise.")
parser.add_argument("--noise_detach_embeds", action="store_true", help="Whether to detach noised embeddings.")
parser.add_argument("--noise_eps", default=1e-5, type=float, help="noise eps")
parser.add_argument('--noise_type', type=str, default='uniform',
choices=['normal', 'uniform'],
help='type of noises for RXF methods')
parser.add_argument("--enable_code_switch", action="store_true", help="Whether to enable code switch.")
parser.add_argument("--code_switch_ratio", default=0.5, type=float, help="code_switch_ratio")
parser.add_argument("--dict_dir", default=None, type=str, help="dict dir")
parser.add_argument("--dict_languages", default=None, type=str, help="dict languages")
parser.add_argument("--enable_translate_data", action="store_true",
help="Whether to enable translate data.")
parser.add_argument("--translation_path", default=None, type=str, help="path to translation")
parser.add_argument("--disable_translate_labels", action="store_true", help="Whether to disable translate labels.")
parser.add_argument("--translate_languages", default=None, type=str, help="translate languages")
parser.add_argument("--translate_augment_ratio", default=0.0, type=float, help="translate augment ratio")
parser.add_argument("--enable_data_augmentation", action="store_true", help="Whether to enable data augmentation.")
parser.add_argument("--augment_ratio", default=1.0, type=float, help="augmentation ratio.")
parser.add_argument("--augment_method", default=None, type=str, required=False, help="augment_method")
parser.add_argument("--first_stage_model_path", default=None, type=str, required=False,
help="stable model path")
parser.add_argument("--r2_lambda", default=1.0, type=float, required=False,
help="r2_lambda")
parser.add_argument("--use_hard_labels", action="store_true", help="Whether to use hard labels.")
# Other parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
help="The input data dir. Should contain the .json files for the task."
+ "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--train_file",
default=None,
type=str,
help="The input training file. If a data dir is specified, will look for the file there"
+ "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--predict_file",
default=None,
type=str,
help="The input evaluation file. If a data dir is specified, will look for the file there"
+ "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.",
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument(
"--max_query_length",
default=64,
type=int,
help="The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step."
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.",
)
parser.add_argument(
"--verbose_logging",
action="store_true",
help="If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.",
)
parser.add_argument(
"--lang_id",
default=0,
type=int,
help="language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)",
)
parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.")
parser.add_argument("--evaluate_steps", type=int, default=0, help="Log every X updates steps.")
parser.add_argument("--logging_each_epoch", action="store_true", help="Whether to log after each epoch.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.")
parser.add_argument("--threads", type=int, default=1, help="multiple threads for converting example to features")
# cross-lingual part
parser.add_argument(
"--language",
default=None,
type=str,
required=True,
help="Evaluation language. Also train language if `train_language` is set to None.",
)
parser.add_argument(
"--train_language", default=None, type=str, help="Train language if is different of the evaluation language."
)
args = parser.parse_args()
if args.doc_stride >= args.max_seq_length - args.max_query_length:
logger.warning(
"WARNING - You've set a doc stride which may be superior to the document length in some "
"examples. This could result in errors when building features from the examples. Please reduce the doc "
"stride or increase the maximum length to ensure the features are correctly built."
)
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.enable_r1_loss or args.noised_loss or args.enable_translate_data or args.enable_data_augmentation:
noised_data_generator = NoisedDataGenerator(
task_name=args.task_name,
r1_lambda=args.r1_lambda,
enable_r1_loss=args.enable_r1_loss,
original_loss=args.original_loss,
noised_loss=args.noised_loss,
keep_boundary_unchanged=args.keep_boundary_unchanged,
r1_on_boundary_only=args.r1_on_boundary_only,
noised_max_seq_length=args.noised_max_seq_length,
max_seq_length=args.max_seq_length,
max_query_length=args.max_query_length,
doc_stride=args.doc_stride,
overall_ratio=args.overall_ratio,
enable_bpe_switch=args.enable_bpe_switch,
bpe_switch_ratio=args.bpe_switch_ratio,
tokenizer_dir=args.tokenizer_dir,
do_lower_case=args.do_lower_case,
tokenizer_languages=args.tokenizer_languages.split(',') if args.tokenizer_languages is not None else [],
enable_bpe_sampling=args.enable_bpe_sampling,
bpe_sampling_ratio=args.bpe_sampling_ratio,
tokenizer=tokenizer,
sampling_alpha=args.sampling_alpha,
sampling_nbest_size=args.sampling_nbest_size,
enable_random_noise=args.enable_random_noise,
noise_detach_embeds=args.noise_detach_embeds,
noise_eps=args.noise_eps,
noise_type=args.noise_type,
enable_code_switch=args.enable_code_switch,
code_switch_ratio=args.code_switch_ratio,
dict_dir=args.dict_dir,
dict_languages=args.dict_languages.split(',') if args.dict_languages is not None else [],
translation_path=args.translation_path,
disable_translate_labels=args.disable_translate_labels,
translate_languages=args.translate_languages.split(
',') if args.translate_languages is not None else args.language.split(','),
enable_data_augmentation=args.enable_data_augmentation,
augment_ratio=args.augment_ratio,
augment_method=args.augment_method,
r2_lambda=args.r2_lambda,
use_hard_labels=args.use_hard_labels,
)
else:
noised_data_generator = None
if args.first_stage_model_path is not None:
first_stage_model = model_class.from_pretrained(args.first_stage_model_path,
config=config)
else:
first_stage_model = None
state_dict = None
if args.reload != "":
from tools.dump_hf_state_dict import convert_pt_to_hf
state_dict = convert_pt_to_hf(os.path.join(args.model_name_or_path, 'pytorch_model.bin'), args.reload, logger)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
noised_data_generator=noised_data_generator,
cache_dir=args.cache_dir if args.cache_dir else None,
state_dict=state_dict,
)
if args.local_rank == 0:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
model.to(args.device)
if first_stage_model is not None:
first_stage_model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.
# Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will
# remove the need for this code, but it is still valid.
if args.fp16:
try:
import apex
apex.amp.register_half_function(torch, "einsum")
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
# Training
if args.do_train:
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
train_dataset, train_examples, _ = load_and_cache_examples(args, tokenizer, language=args.train_language,
split="train", output_examples=True)
global_step, tr_loss = train(args, train_examples, train_dataset, model, first_stage_model, tokenizer,
noised_data_generator=noised_data_generator)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Save the trained model and the tokenizer
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir) # , force_download=True)
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
model.to(args.device)
# Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
if args.do_train:
logger.info("Loading checkpoints saved during training for evaluation")
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c)
for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs
else:
logger.info("Loading checkpoint %s for evaluation", args.model_name_or_path)
checkpoints = [args.model_name_or_path]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
# Reload the model
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "test"
model = model_class.from_pretrained(checkpoint) # , force_download=True)
model.to(args.device)
# Evaluate
log_writer = open(os.path.join(args.output_dir, "evaluate_logs.txt"), 'w')
result = evaluate(args, model, tokenizer, prefix=global_step)
# result = squad(args, model, tokenizer, prefix=global_step)
log_writer.write("{0}\t{1}".format(global_step, json.dumps(result)) + '\n')
result = dict((k + ("_{}".format(global_step) if global_step else ""), v) for k, v in result.items())
results.update(result)
logger.info("Results: {}".format(results))
logger.info("Task MLQA Finished!")
return results
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/xtune/src/run_qa.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors,
# The HuggingFace Inc. team, and The XTREME Benchmark Authors.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fine-tuning models for NER and POS tagging."""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import copy
import json
import random
import math
import numpy as np
import torch
from seqeval.metrics import precision_score, recall_score, f1_score
from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data import RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from utils_tag import convert_examples_to_features
from utils_tag import get_labels
from utils_tag import read_examples_from_file
from utils_tag import InputExample
from transformers import (
AdamW,
get_linear_schedule_with_warmup,
WEIGHTS_NAME,
RobertaConfig,
XLMRobertaConfig,
XLMRobertaTokenizer,
XLMRobertaForTokenClassificationPoolingStable,
)
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(tuple(conf.pretrained_config_archive_map.keys())
for conf in (RobertaConfig, XLMRobertaConfig)),
()
)
MODEL_CLASSES = {
"xlmr": (XLMRobertaConfig, XLMRobertaForTokenClassificationPoolingStable, XLMRobertaTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def get_root(x, parent):
if x == parent[x]: return x
parent[x] = get_root(parent[x], parent)
return parent[x]
class NoisedDataGenerator(object):
def __init__(self,
label_list,
pad_token_label_id,
r1_lambda=5.0,
r1_on_unswitched_tokens=False,
enable_r1_loss=False,
disable_backward_kl=False,
use_sentence_label_probs=False,
use_token_label_probs=False,
original_loss=True,
noised_loss=False,
max_seq_length=512,
noised_max_seq_length=512,
overall_ratio=1.0,
enable_bpe_switch=False,
bpe_switch_ratio=0.5,
tokenizer_dir=None,
do_lower_case=False,
tokenizer_languages=None,
enable_bpe_sampling=False,
bpe_sampling_ratio=0.5,
tokenizer=None,
sampling_alpha=0.3,
sampling_nbest_size=-1,
enable_random_noise=False,
detach_embeds=False,
noise_eps=1e-5,
noise_type='uniform',
enable_code_switch=False,
code_switch_ratio=0.5,
dict_dir=None,
dict_languages=None,
use_average_representations=False,
translation_path=None,
translate_languages=None,
use_align_label_probs=False,
enable_data_augmentation=False,
augment_ratio=0.0,
augment_method=None,
r2_lambda=1.0,
use_hard_labels=False):
if enable_code_switch:
assert dict_dir is not None
assert dict_languages is not None
assert tokenizer is not None
if enable_random_noise:
assert noise_type in ['uniform', 'normal']
if enable_r1_loss:
assert use_token_label_probs or use_sentence_label_probs or (
use_align_label_probs and enable_translate_data)
self.use_average_representations = use_average_representations
self.n_tokens = 0
self.n_cs_tokens = 0
self.r1_lambda = r1_lambda
self.r1_on_unswitched_tokens = r1_on_unswitched_tokens
self.original_loss = original_loss
self.noised_loss = noised_loss
self.enable_r1_loss = enable_r1_loss
self.disable_backward_kl = disable_backward_kl
self.use_align_label_probs = use_align_label_probs
self.use_sentence_label_probs = use_sentence_label_probs
self.use_token_label_probs = use_token_label_probs
self.max_seq_length = max_seq_length
self.noised_max_seq_length = noised_max_seq_length
self.overall_ratio = overall_ratio
self.enable_bpe_switch = enable_bpe_switch
self.bpe_switch_ratio = bpe_switch_ratio / self.overall_ratio
assert not self.enable_bpe_switch or self.bpe_switch_ratio <= 1.0
self.tokenizer_dir = tokenizer_dir
self.tokenizer_languages = tokenizer_languages
self.enable_bpe_sampling = enable_bpe_sampling
self.bpe_sampling_ratio = bpe_sampling_ratio / self.overall_ratio
assert not self.enable_bpe_sampling or self.bpe_sampling_ratio <= 1.0
self.tokenizer = tokenizer
self.sampling_alpha = sampling_alpha
self.sampling_nbest_size = sampling_nbest_size
self.enable_random_noise = enable_random_noise
self.detach_embeds = detach_embeds
self.noise_eps = noise_eps
self.noise_type = noise_type
self.enable_code_switch = enable_code_switch
self.code_switch_ratio = code_switch_ratio / self.overall_ratio
assert not self.enable_code_switch or self.code_switch_ratio <= 1.0
self.dict_dir = dict_dir
self.dict_languages = []
self.lang2dict = {}
for lang in dict_languages:
# dict_path = os.path.join(self.dict_dir, "{}2.txt".format(lang))
dict_path = os.path.join(self.dict_dir, "en-{}.txt".format(lang))
if not os.path.exists(dict_path):
logger.info("dictionary en-{} doesn't exist.".format(lang))
continue
self.dict_languages.append(lang)
logger.info("reading dictionary from {}".format(dict_path))
with open(dict_path, "r", encoding="utf-8") as reader:
raw = reader.readlines()
self.lang2dict[lang] = {}
for line in raw:
line = line.strip()
try:
src, tgt = line.split("\t")
except:
src, tgt = line.split(" ")
if src not in self.lang2dict[lang]:
self.lang2dict[lang][src] = [tgt]
else:
self.lang2dict[lang][src].append(tgt)
self.lang2tokenizer = {}
for lang in tokenizer_languages:
self.lang2tokenizer[lang] = XLMRobertaTokenizer.from_pretrained(
os.path.join(tokenizer_dir, "{}".format(lang)), do_lower_case=do_lower_case)
self.translation_path = translation_path
self.translate_languages = translate_languages
self.augment_method = augment_method
self.enable_data_augmentation = enable_data_augmentation
if self.enable_data_augmentation and self.augment_method == "mt":
drop_languages = ["en", "zh-CN", "zh", "ja", "ko", "th", "my", "ml", "ta"]
for lang in drop_languages:
if lang in self.translate_languages:
self.translate_languages.remove(lang)
# self.translate_languages = ["de"]
self.src2tgt = {}
logger.info("Reading translation from {}".format(self.translation_path))
with open(self.translation_path, encoding="utf-8") as f:
line_cnt = 0
for line in f:
# if line_cnt == 100:
# exit(0)
line_cnt += 1
if line_cnt % 10000 == 0:
print("Reading lines {}".format(line_cnt))
items = line.split("\t")
if len(items) == 3:
src_sent, tgt_lang, tgt_sent = line.split("\t")
alignment = None
else:
src_sent, tgt_lang, tgt_sent, alignment_str = line.split("\t")
alignment = []
for x in alignment_str.split(" "):
alignment.append((int(x.split("/")[0]), int(x.split("/")[1])))
if tgt_lang in drop_languages:
continue
if self.translate_languages is not None and tgt_lang not in self.translate_languages:
continue
if src_sent not in self.src2tgt:
self.src2tgt[src_sent] = []
if alignment is not None:
n_src = len(src_sent.split(" "))
n_tgt = len(tgt_sent.split(" "))
parent = list(range(0, n_src + n_tgt))
for x in alignment:
x_src = x[0]
x_tgt = x[1] + n_src
if get_root(x_src, parent) != get_root(x_tgt, parent):
parent[x_src] = get_root(x_tgt, parent)
cnt = [0] * (n_src + n_tgt)
for i in range(n_src + n_tgt):
cnt[get_root(i, parent)] += 1
align_pooling_id = [0] * (n_src + n_tgt)
root2id = {}
for i in range(n_src + n_tgt):
if cnt[get_root(i, parent)] == 1:
continue
if not get_root(i, parent) in root2id:
root2id[get_root(i, parent)] = len(root2id) + 1
align_pooling_id[i] = root2id[get_root(i, parent)]
# print(align_pooling_id[:n_src], align_pooling_id[n_src:])
self.src2tgt[src_sent].append(
(tgt_lang, tgt_sent, (align_pooling_id[:n_src], align_pooling_id[n_src:])))
else:
self.src2tgt[src_sent].append(
(tgt_lang, tgt_sent, None))
# print(align_pooling_id[:n_src], align_pooling_id[n_src:])
self.enable_data_augmentation = enable_data_augmentation
self.augment_ratio = augment_ratio
self.r2_lambda = r2_lambda
self.use_hard_labels = use_hard_labels
self.label_list = label_list
self.cls_token_at_end = False
self.cls_token = self.tokenizer.cls_token
self.cls_token_segment_id = 0
self.sep_token = self.tokenizer.sep_token
self.sep_token_extra = True
self.pad_on_left = False
self.pad_token = self.tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0]
self.pad_token_segment_id = 0
self.pad_token_label_id = pad_token_label_id
self.sequence_a_segment_id = 0
self.mask_padding_with_zero = True
def augment_examples(self, examples):
n_augment = math.ceil(len(examples) * self.augment_ratio)
augment_examples = []
while n_augment > 0:
examples = copy.deepcopy(examples)
augment_examples += examples[:n_augment]
n_augment -= len(examples[:n_augment])
random.shuffle(examples)
return augment_examples
def get_translate_data(self, examples):
translate_examples = []
n_unfound = 0
for ex_idx, example in enumerate(examples):
src_sent = " ".join(example.words)
if src_sent not in self.src2tgt:
logger.info("sentence || {} || is not found in translate data".format(src_sent))
tgt_sent = src_sent
tgt_lang = "en"
align_pooling_id = (
list(range(1, len(src_sent.split(" ")) + 1)), list(range(1, len(src_sent.split(" ")) + 1)))
n_unfound += 1
else:
# assert src_sent in self.src2tgt
idx = random.randint(0, len(self.src2tgt[src_sent]) - 1)
tgt_lang, tgt_sent, align_pooling_id = self.src2tgt[src_sent][idx]
words = tgt_sent.split(" ")
# print(len(words))
labels = ['<unk_label>'] * len(words)
translate_examples.append(InputExample(ex_idx, words, labels, langs=tgt_lang))
logger.info("{} sentences unfound.".format(n_unfound))
return translate_examples
def get_noised_dataset(self, examples):
# maybe do not save augmented examples
examples = copy.deepcopy(examples)
is_augmented = [0] * len(examples)
if self.enable_data_augmentation:
augment_examples = self.augment_examples(examples)
if self.augment_method == "mt":
assert not self.enable_code_switch
augment_examples = self.get_translate_data(augment_examples)
is_augmented += [1] * len(augment_examples)
examples += augment_examples
if self.enable_code_switch:
self.n_tokens = 0
self.n_cs_tokens = 0
dataset = self.convert_examples_to_dataset(examples, is_augmented)
if self.enable_code_switch:
logger.info("{:.2f}% tokens have been code-switched.".format(self.n_cs_tokens / self.n_tokens * 100))
return dataset, None
def tokenize_token(self, token, switch_text=False, enable_code_switch=False, enable_bpe_switch=False,
enable_bpe_sampling=False):
switch_token = random.random() <= self.overall_ratio
is_switched = False
self.n_tokens += 1
if enable_code_switch and switch_text and switch_token and random.random() <= self.code_switch_ratio:
lang = self.dict_languages[random.randint(0, len(self.dict_languages) - 1)]
if token.lower() in self.lang2dict[lang]:
self.n_cs_tokens += 1
token = self.lang2dict[lang][token.lower()][
random.randint(0, len(self.lang2dict[lang][token.lower()]) - 1)]
is_switched = True
if enable_bpe_switch and switch_text and switch_token and random.random() <= self.bpe_switch_ratio:
lang = self.tokenizer_languages[random.randint(0, len(self.tokenizer_languages) - 1)]
tokenizer = self.lang2tokenizer[lang]
is_switched = True
else:
tokenizer = self.tokenizer
if enable_bpe_sampling and switch_text and switch_token and random.random() <= self.bpe_sampling_ratio:
sub_tokens = tokenizer.tokenize(token, nbest_size=self.sampling_nbest_size,
alpha=self.sampling_alpha)
is_switched = True
else:
sub_tokens = tokenizer.tokenize(token)
return sub_tokens, is_switched
def convert_examples_to_dataset(self, examples, is_augmented):
all_original_input_ids = []
all_original_input_mask = []
all_original_segment_ids = []
all_original_label_ids = []
all_original_pooling_ids = []
all_original_r1_mask = []
all_noised_input_ids = []
all_noised_input_mask = []
all_noised_segment_ids = []
all_noised_label_ids = []
all_noised_pooling_ids = []
all_noised_r1_mask = []
all_is_augmented = []
label_map = {label: i for i, label in enumerate(self.label_list)}
for (ex_index, example) in enumerate(examples):
if ex_index % 1000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len(examples)))
noised_tokens = []
original_tokens = []
noised_label_ids = []
original_label_ids = []
noised_pooling_ids = []
original_pooling_ids = []
noised_r1_mask = []
original_r1_mask = []
switch_text = True
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if self.sep_token_extra else 2
for word, label in zip(example.words, example.labels):
noised_word_tokens, noised_is_switched = self.tokenize_token(word, switch_text=switch_text,
enable_code_switch=self.enable_code_switch,
enable_bpe_switch=self.enable_bpe_switch,
enable_bpe_sampling=self.enable_bpe_sampling)
if self.enable_data_augmentation and is_augmented[ex_index]:
if self.augment_method == "cs":
original_word_tokens, original_is_switched = self.tokenize_token(word, switch_text=switch_text,
enable_code_switch=True)
elif self.augment_method == "ss":
original_word_tokens, original_is_switched = self.tokenize_token(word, switch_text=switch_text,
enable_bpe_sampling=True)
elif self.augment_method == "mt" or self.augment_method == "gn":
original_word_tokens, original_is_switched = self.tokenize_token(word, switch_text=False)
else:
assert False
else:
original_word_tokens, original_is_switched = self.tokenize_token(word, switch_text=False)
is_switched = noised_is_switched or original_is_switched
if len(word) != 0 and len(original_word_tokens) == 0:
original_word_tokens = [self.tokenizer.unk_token]
if len(word) != 0 and len(noised_word_tokens) == 0:
noised_word_tokens = [self.tokenizer.unk_token]
if len(noised_word_tokens) == 0 or len(original_word_tokens) == 0:
continue
noised_tokens.extend(noised_word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
noised_label_ids.extend([label_map.get(label, self.pad_token_label_id)] + [self.pad_token_label_id] * (
len(noised_word_tokens) - 1))
noised_pooling_ids.extend([len(noised_pooling_ids) + 1] * len(noised_word_tokens))
original_tokens.extend(original_word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
original_label_ids.extend(
[label_map.get(label, self.pad_token_label_id)] + [self.pad_token_label_id] * (
len(original_word_tokens) - 1))
original_pooling_ids.extend([len(original_pooling_ids) + 1] * len(original_word_tokens))
if is_switched and self.r1_on_unswitched_tokens:
noised_r1_mask.extend([0] + [0] * (len(noised_word_tokens) - 1))
else:
noised_r1_mask.extend([1] + [0] * (len(noised_word_tokens) - 1))
if is_switched and self.r1_on_unswitched_tokens:
original_r1_mask.extend([0] + [0] * (len(original_word_tokens) - 1))
else:
original_r1_mask.extend([1] + [0] * (len(original_word_tokens) - 1))
break_flag = False
if len(noised_tokens) >= self.noised_max_seq_length - special_tokens_count:
logger.info('truncate noised token {} {} {}'.format(len(noised_tokens), self.noised_max_seq_length,
special_tokens_count))
noised_tokens = noised_tokens[:(self.noised_max_seq_length - special_tokens_count)]
noised_label_ids = noised_label_ids[:(self.noised_max_seq_length - special_tokens_count)]
noised_pooling_ids = noised_pooling_ids[:(self.noised_max_seq_length - special_tokens_count)]
noised_r1_mask = noised_r1_mask[:(self.noised_max_seq_length - special_tokens_count)]
break_flag = True
if len(original_tokens) >= self.max_seq_length - special_tokens_count:
logger.info('truncate original token {} {} {}'.format(len(original_tokens), self.max_seq_length,
special_tokens_count))
original_tokens = original_tokens[:(self.max_seq_length - special_tokens_count)]
original_label_ids = original_label_ids[:(self.max_seq_length - special_tokens_count)]
original_pooling_ids = original_pooling_ids[:(self.max_seq_length - special_tokens_count)]
original_r1_mask = original_r1_mask[:(self.max_seq_length - special_tokens_count)]
break_flag = True
if break_flag:
break
assert len(noised_tokens) <= self.noised_max_seq_length - special_tokens_count
original_tokens += [self.sep_token]
original_label_ids += [self.pad_token_label_id]
original_pooling_ids += [0]
original_r1_mask += [0]
noised_tokens += [self.sep_token]
noised_label_ids += [self.pad_token_label_id]
noised_pooling_ids += [0]
noised_r1_mask += [0]
if self.sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
noised_tokens += [self.sep_token]
noised_label_ids += [self.pad_token_label_id]
noised_pooling_ids += [0]
noised_r1_mask += [0]
original_tokens += [self.sep_token]
original_label_ids += [self.pad_token_label_id]
original_pooling_ids += [0]
original_r1_mask += [0]
noised_segment_ids = [self.sequence_a_segment_id] * len(noised_tokens)
original_segment_ids = [self.sequence_a_segment_id] * len(original_tokens)
if self.cls_token_at_end:
noised_tokens += [self.cls_token]
noised_label_ids += [self.pad_token_label_id]
noised_segment_ids += [self.cls_token_segment_id]
noised_pooling_ids += [0]
noised_r1_mask += [0]
original_tokens += [self.cls_token]
original_label_ids += [self.pad_token_label_id]
original_segment_ids += [self.cls_token_segment_id]
original_pooling_ids += [0]
original_r1_mask += [0]
else:
noised_tokens = [self.cls_token] + noised_tokens
noised_label_ids = [self.pad_token_label_id] + noised_label_ids
noised_segment_ids = [self.cls_token_segment_id] + noised_segment_ids
noised_pooling_ids = [0] + noised_pooling_ids
noised_r1_mask = [0] + noised_r1_mask
original_tokens = [self.cls_token] + original_tokens
original_label_ids = [self.pad_token_label_id] + original_label_ids
original_segment_ids = [self.cls_token_segment_id] + original_segment_ids
original_pooling_ids = [0] + original_pooling_ids
original_r1_mask = [0] + original_r1_mask
noised_input_ids = self.tokenizer.convert_tokens_to_ids(noised_tokens)
original_input_ids = self.tokenizer.convert_tokens_to_ids(original_tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
noised_input_mask = [1 if self.mask_padding_with_zero else 0] * len(noised_input_ids)
original_input_mask = [1 if self.mask_padding_with_zero else 0] * len(original_input_ids)
# Zero-pad up to the sequence length.
noised_padding_length = self.noised_max_seq_length - len(noised_input_ids)
original_padding_length = self.max_seq_length - len(original_input_ids)
if self.pad_on_left:
noised_input_ids = [self.pad_token] * noised_padding_length + noised_input_ids
noised_input_mask = [
0 if self.mask_padding_with_zero else 1] * noised_padding_length + noised_input_mask
noised_segment_ids = [self.pad_token_segment_id] * noised_padding_length + noised_segment_ids
noised_label_ids = [self.pad_token_label_id] * noised_padding_length + noised_label_ids
noised_pooling_ids = ([0] * noised_padding_length) + noised_pooling_ids
noised_r1_mask = [0] * noised_padding_length + noised_r1_mask
original_input_ids = [self.pad_token] * original_padding_length + original_input_ids
original_input_mask = [
0 if self.mask_padding_with_zero else 1] * original_padding_length + original_input_mask
original_segment_ids = [self.pad_token_segment_id] * original_padding_length + original_segment_ids
original_label_ids = [self.pad_token_label_id] * original_padding_length + original_label_ids
original_pooling_ids = ([0] * original_padding_length) + original_pooling_ids
original_r1_mask = [0] * original_padding_length + original_r1_mask
else:
noised_input_ids += [self.pad_token] * noised_padding_length
noised_input_mask += [0 if self.mask_padding_with_zero else 1] * noised_padding_length
noised_segment_ids += [self.pad_token_segment_id] * noised_padding_length
noised_label_ids += [self.pad_token_label_id] * noised_padding_length
noised_pooling_ids += ([0] * noised_padding_length)
noised_r1_mask += [0] * noised_padding_length
original_input_ids += [self.pad_token] * original_padding_length
original_input_mask += [0 if self.mask_padding_with_zero else 1] * original_padding_length
original_segment_ids += [self.pad_token_segment_id] * original_padding_length
original_label_ids += [self.pad_token_label_id] * original_padding_length
original_pooling_ids += ([0] * original_padding_length)
original_r1_mask += [0] * original_padding_length
assert sum(noised_r1_mask) == sum(original_r1_mask)
assert len(noised_input_ids) == self.noised_max_seq_length
assert len(noised_input_mask) == self.noised_max_seq_length
assert len(noised_segment_ids) == self.noised_max_seq_length
assert len(noised_label_ids) == self.noised_max_seq_length
assert len(noised_pooling_ids) == self.noised_max_seq_length
assert len(original_input_ids) == self.max_seq_length
assert len(original_input_mask) == self.max_seq_length
assert len(original_segment_ids) == self.max_seq_length
assert len(original_label_ids) == self.max_seq_length
assert len(original_pooling_ids) == self.max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("original_tokens: %s", " ".join([str(x) for x in original_tokens]))
logger.info("original_input_ids: %s", " ".join([str(x) for x in original_input_ids]))
logger.info("original_input_mask: %s", " ".join([str(x) for x in original_input_mask]))
logger.info("original_segment_ids: %s", " ".join([str(x) for x in original_segment_ids]))
logger.info("original_label_ids: %s", " ".join([str(x) for x in original_label_ids]))
logger.info("original_pooling_ids: %s", " ".join([str(x) for x in original_pooling_ids]))
logger.info("original_r1_mask: %s", " ".join([str(x) for x in original_r1_mask]))
logger.info("noised_tokens: %s", " ".join([str(x) for x in noised_tokens]))
logger.info("noised_input_ids: %s", " ".join([str(x) for x in noised_input_ids]))
logger.info("noised_input_mask: %s", " ".join([str(x) for x in noised_input_mask]))
logger.info("noised_segment_ids: %s", " ".join([str(x) for x in noised_segment_ids]))
logger.info("noised_label_ids: %s", " ".join([str(x) for x in noised_label_ids]))
logger.info("noised_pooling_ids: %s", " ".join([str(x) for x in noised_pooling_ids]))
logger.info("noised_r1_mask: %s", " ".join([str(x) for x in noised_r1_mask]))
all_noised_input_ids += [noised_input_ids]
all_noised_input_mask += [noised_input_mask]
all_noised_segment_ids += [noised_segment_ids]
all_noised_label_ids += [noised_label_ids]
all_noised_pooling_ids += [noised_pooling_ids]
all_noised_r1_mask += [noised_r1_mask]
all_original_input_ids += [original_input_ids]
all_original_input_mask += [original_input_mask]
all_original_segment_ids += [original_segment_ids]
all_original_label_ids += [original_label_ids]
all_original_pooling_ids += [original_pooling_ids]
all_original_r1_mask += [original_r1_mask]
all_is_augmented += [is_augmented[ex_index]]
# Convert to Tensors and build dataset
all_noised_input_ids = torch.tensor([input_ids for input_ids in all_noised_input_ids], dtype=torch.long)
all_noised_input_mask = torch.tensor([input_mask for input_mask in all_noised_input_mask], dtype=torch.long)
all_noised_segment_ids = torch.tensor([segment_ids for segment_ids in all_noised_segment_ids], dtype=torch.long)
all_noised_label_ids = torch.tensor([label_ids for label_ids in all_noised_label_ids], dtype=torch.long)
all_noised_pooling_ids = torch.tensor([pooling_ids for pooling_ids in all_noised_pooling_ids], dtype=torch.long)
all_noised_r1_mask = torch.tensor([noised_r1_mask for noised_r1_mask in all_noised_r1_mask], dtype=torch.long)
all_original_input_ids = torch.tensor([input_ids for input_ids in all_original_input_ids], dtype=torch.long)
all_original_input_mask = torch.tensor([input_mask for input_mask in all_original_input_mask], dtype=torch.long)
all_original_segment_ids = torch.tensor([segment_ids for segment_ids in all_original_segment_ids],
dtype=torch.long)
all_original_label_ids = torch.tensor([label_ids for label_ids in all_original_label_ids], dtype=torch.long)
all_original_pooling_ids = torch.tensor([pooling_ids for pooling_ids in all_original_pooling_ids],
dtype=torch.long)
all_original_r1_mask = torch.tensor([original_r1_mask for original_r1_mask in all_original_r1_mask],
dtype=torch.long)
all_is_augmented = torch.tensor([is_augmented for is_augmented in all_is_augmented])
# print(all_noised_r1_mask.sum(), all_original_r1_mask.sum())
assert all_noised_r1_mask.sum() == all_original_r1_mask.sum()
dataset = TensorDataset(all_original_input_ids, all_original_input_mask, all_original_segment_ids,
all_original_label_ids, all_original_pooling_ids,
all_noised_input_ids, all_noised_input_mask, all_noised_segment_ids,
all_noised_label_ids, all_noised_pooling_ids, all_noised_r1_mask,
all_original_r1_mask, all_is_augmented)
return dataset
def load_translate_data(self, examples):
n_unfound = 0
translate_examples = []
all_align_pooling_ids = []
for ex_idx, example in enumerate(examples):
src_sent = " ".join(example.words)
if src_sent not in self.src2tgt:
logger.info("sentence || {} || is not found in translate data".format(src_sent))
tgt_sent = src_sent
tgt_lang = "en"
align_pooling_id = (
list(range(1, len(src_sent.split(" ")) + 1)), list(range(1, len(src_sent.split(" ")) + 1)))
n_unfound += 1
else:
# assert src_sent in self.src2tgt
idx = random.randint(0, len(self.src2tgt[src_sent]) - 1)
tgt_lang, tgt_sent, align_pooling_id = self.src2tgt[src_sent][idx]
words = tgt_sent.split(" ")
# print(len(words))
labels = ['O'] * len(words)
translate_examples.append(InputExample(ex_idx, words, labels, langs=tgt_lang))
# print(align_pooling_id)
all_align_pooling_ids.append(align_pooling_id)
print("{} sentences unfound.".format(n_unfound))
features = convert_examples_to_features(translate_examples, self.label_list, self.max_seq_length,
self.tokenizer,
cls_token_at_end=self.cls_token_at_end,
cls_token=self.cls_token,
cls_token_segment_id=self.cls_token_segment_id,
sep_token=self.sep_token,
sep_token_extra=self.sep_token_extra,
pad_on_left=self.pad_on_left,
pad_token=self.pad_token,
pad_token_segment_id=self.pad_token_segment_id,
pad_token_label_id=self.pad_token_label_id,
lang=None)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_pooling_ids = torch.tensor([f.pooling_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
# not used under this setting
all_noised_r1_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_original_r1_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_pooling_ids,
all_noised_r1_mask, all_original_r1_mask, all_label_ids)
return dataset, all_align_pooling_ids
def load_translate_data_by_batch(self, examples, train_batch_size):
translate_languagse = self.translate_languages
language_cnt = [0] * len(translate_languagse)
pass
def get_train_steps(self, examples, args):
n_augment_examples = math.ceil(len(examples) * (1 + self.augment_ratio))
augment_steps = math.ceil(n_augment_examples / args.train_batch_size) // args.gradient_accumulation_steps
if args.max_steps > 0:
t_total = args.max_steps
assert False
else:
t_total = augment_steps * args.num_train_epochs
return t_total
def train(args, train_examples, train_dataset, model, first_stage_model, tokenizer, labels, pad_token_label_id,
noised_data_generator=None):
"""Train the model."""
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
tb_log_dir = os.getenv("PHILLY_JOB_DIRECTORY", None)
tb_writer = SummaryWriter(log_dir=tb_log_dir)
log_writer = open(os.path.join(args.output_dir, "evaluate_logs.txt"), 'w')
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if noised_data_generator is not None and noised_data_generator.enable_data_augmentation:
t_total = noised_data_generator.get_train_steps(train_examples, args)
else:
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
# args.warmup_steps == -1 means 0.1 warmup ratio
if args.warmup_steps == -1:
args.warmup_steps = int(t_total * 0.1)
logger.info("Warmup steps: %d" % args.warmup_steps)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
best_score = 0.0
best_checkpoint = None
patience = 0
global_step = 0
tr_loss, logging_loss, best_avg = 0.0, 0.0, 0.0
tr_original_loss, logging_original_loss = 0.0, 0.0
tr_noised_loss, logging_noised_loss = 0.0, 0.0
tr_r1_loss, logging_r1_loss = 0.0, 0.0
tr_r2_loss, logging_r2_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Add here for reproductibility (even between python 2 and 3)
def logging(eval=False):
results = None
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank in [-1, 0] and args.evaluate_during_training and eval:
results = evaluate(args, model, tokenizer, labels, pad_token_label_id)
for key, value in results.items():
logger.info("eval_{}: {}".format(key, value))
# for key, value in results.items():
# tb_writer.add_scalar("eval_{}".format(key), value, global_step)
log_writer.write("{0}\t{1}".format(global_step, json.dumps(results)) + '\n')
log_writer.flush()
logger.info(
"global_step: {}, lr: {:.6f}, loss: {:.6f}, original_loss: {:.6f}, noised_loss: {:.6f}, r1_loss: {:.6f}, r2_loss: {:.6f}".format(
global_step, scheduler.get_lr()[0], (tr_loss - logging_loss) / args.logging_steps,
(tr_original_loss - logging_original_loss) / args.logging_steps,
(tr_noised_loss - logging_noised_loss) / args.logging_steps,
(tr_r1_loss - logging_r1_loss) / args.logging_steps,
(tr_r2_loss - logging_r2_loss) / args.logging_steps))
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
tb_writer.add_scalar("original_loss", (tr_original_loss - logging_original_loss) / args.logging_steps,
global_step)
tb_writer.add_scalar("noised_loss", (tr_noised_loss - logging_noised_loss) / args.logging_steps, global_step)
tb_writer.add_scalar("r1_loss", (tr_r1_loss - logging_r1_loss) / args.logging_steps, global_step)
tb_writer.add_scalar("r2_loss", (tr_r2_loss - logging_r2_loss) / args.logging_steps, global_step)
return results
for _ in train_iterator:
if noised_data_generator is not None:
assert noised_data_generator.enable_r1_loss or noised_data_generator.noised_loss or noised_data_generator.enable_data_augmentation
noised_train_dataset, all_align_pooling_ids = noised_data_generator.get_noised_dataset(train_examples)
train_sampler = RandomSampler(noised_train_dataset) if args.local_rank == -1 else DistributedSampler(
noised_train_dataset)
train_dataloader = DataLoader(noised_train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
# if not args.max_steps > 0:
# assert t_total == len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=True)
for step, batch in enumerate(epoch_iterator):
model.train()
if first_stage_model is not None:
first_stage_model.eval()
batch = tuple(t.to(args.device) for t in batch if t is not None)
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
"pooling_ids": batch[4]}
if args.model_type != "distilbert":
# XLM and RoBERTa don"t use segment_ids
inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None
# if args.model_type == "xlm":
# inputs["langs"] = batch[5]
if first_stage_model is not None:
with torch.no_grad():
inputs["first_stage_model_logits"] = first_stage_model(**inputs)[1]
# if noised_data_generator is not None and noised_data_generator.enable_r1_loss and \
# noised_data_generator.enable_translate_data and noised_data_generator.use_align_label_probs:
# inputs.update({"src_pooling_ids": batch[-2],
# "tgt_pooling_ids": batch[-1]})
# batch = batch[:-2]
if noised_data_generator is not None:
inputs.update({"noised_input_ids": batch[5],
"noised_attention_mask": batch[6],
"noised_token_type_ids": None,
"noised_labels": batch[8],
"noised_pooling_ids": batch[9],
"noised_r1_mask": batch[10],
"original_r1_mask": batch[11],
"is_augmented": batch[12]})
outputs = model(**inputs)
loss = outputs[0]
if args.n_gpu > 1:
# mean() to average on multi-gpu parallel training
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if noised_data_generator is not None:
original_loss, noised_loss, r1_loss, r2_loss = outputs[1:5]
if args.n_gpu > 1:
original_loss = original_loss.mean()
noised_loss = noised_loss.mean()
r1_loss = r1_loss.mean()
r2_loss = r2_loss.mean()
if args.gradient_accumulation_steps > 1:
original_loss = original_loss / args.gradient_accumulation_steps
noised_loss = noised_loss / args.gradient_accumulation_steps
r1_loss = r1_loss / args.gradient_accumulation_steps
r2_loss = r2_loss / args.gradient_accumulation_steps
tr_original_loss += original_loss.item()
tr_noised_loss += noised_loss.item()
tr_r1_loss += r1_loss.item()
tr_r2_loss += r2_loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scheduler.step() # Update learning rate schedule
optimizer.step()
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
do_eval = args.evaluate_steps > 0 and global_step % args.evaluate_steps == 0
cur_result = logging(do_eval)
logging_loss = tr_loss
logging_original_loss = tr_original_loss
logging_noised_loss = tr_noised_loss
logging_r1_loss = tr_r1_loss
logging_r2_loss = tr_r2_loss
if do_eval:
print(cur_result)
if cur_result["dev_avg"]["f1"] > best_score:
logger.info(
"result['f1']={} > best_score={}".format(cur_result["dev_avg"]["f1"], best_score))
best_score = cur_result["dev_avg"]["f1"]
# Save the best model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-best")
best_checkpoint = output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving the best model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
logger.info("Reset patience to 0")
patience = 0
else:
patience += 1
logger.info("Hit patience={}".format(patience))
if args.eval_patience > 0 and patience > args.eval_patience:
logger.info("early stop! patience={}".format(patience))
epoch_iterator.close()
train_iterator.close()
if args.local_rank in [-1, 0]:
tb_writer.close()
log_writer.close()
return global_step, tr_loss / global_step
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
log_writer.close()
return global_step, tr_loss / global_step
def predict(args, model, tokenizer, labels, pad_token_label_id, mode, prefix="", lang="en", print_result=True):
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode, lang=lang)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation %s in %s *****" % (prefix, lang))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
"pooling_ids": batch[4]}
if args.model_type != "distilbert":
# XLM and RoBERTa don"t use segment_ids
inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None
if args.model_type == 'xlm':
inputs["langs"] = batch[5]
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
# mean() to average on multi-gpu parallel evaluating
tmp_eval_loss = tmp_eval_loss.mean()
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
if nb_eval_steps == 0:
results = {k: 0 for k in ["loss", "precision", "recall", "f1"]}
else:
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list)
}
if print_result:
logger.info("***** Evaluation result %s in %s *****" % (prefix, lang))
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, preds_list
def evaluate(args, model, tokenizer, labels, pad_token_label_id, prefix=""):
# eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
eval_datasets = []
eval_langs = args.predict_langs.split(',')
splits = ["dev", "test"] if args.do_train else ["test"]
for split in splits:
for lang in eval_langs:
eval_datasets.append((split, lang))
all_languages_results = {}
# leave interface for multi-task evaluation
# eval_task = eval_task_names[0]
eval_output_dir = eval_outputs_dirs[0]
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
for split, lang in eval_datasets:
task_name = "{0}-{1}".format(split, lang)
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=split, lang=lang)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation %s in %s *****" % (prefix, lang))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in eval_dataloader:
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
"pooling_ids": batch[4]}
if args.model_type != "distilbert":
# XLM and RoBERTa don"t use segment_ids
inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None
if args.model_type == 'xlm':
inputs["langs"] = batch[5]
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
# mean() to average on multi-gpu parallel evaluating
tmp_eval_loss = tmp_eval_loss.mean()
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
if nb_eval_steps == 0:
results = {k: 0 for k in ["precision", "recall", "f1"]}
continue
else:
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list)
}
all_languages_results["{0}_{1}".format(split, lang)] = results
for split in splits:
all_languages_results["{0}_avg".format(split)] = average_dic(
[value for key, value in all_languages_results.items() if split in key])
return all_languages_results
def average_dic(dic_list):
if len(dic_list) == 0:
return {}
dic_sum = {}
for dic in dic_list:
if len(dic_sum) == 0:
for key, value in dic.items():
dic_sum[key] = value
else:
assert set(dic_sum.keys()) == set(dic.keys()), "sum_keys:{0}, dic_keys:{1}".format(set(dic_sum.keys()),
set(dic.keys()))
for key, value in dic.items():
dic_sum[key] += value
for key in dic_sum:
dic_sum[key] /= len(dic_list)
return dic_sum
def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode, lang, few_shot=-1,
return_examples=False):
# Make sure only the first process in distributed training process
# the dataset, and the others will use the cache
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier()
# Load data features from cache or dataset file
model_name = "xlm-roberta-base"
if args.word_dropout_rate > 0:
assert mode != "train"
cached_features_file = os.path.join(args.data_dir, "cached_{}_{}_{}_{}_wdr{}".format(mode, lang,
model_name,
str(args.max_seq_length),
str(
args.word_dropout_rate)))
else:
cached_features_file = os.path.join(args.data_dir, "cached_{}_{}_{}_{}".format(mode, lang,
model_name,
str(args.max_seq_length)))
cached_features_file += "_pooling"
if args.languages_without_spaces is not None and lang in args.languages_without_spaces.split(','):
cached_features_file += "_lws"
data_file = os.path.join(args.data_dir, lang, "{}.{}".format(mode, model_name))
logger.info("Creating features from dataset file at {} in language {}".format(data_file, lang))
examples = read_examples_from_file(data_file, lang)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("all languages = {}".format(lang))
features = convert_examples_to_features(examples, labels, args.max_seq_length, tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta", "xlmr"]),
pad_on_left=bool(args.model_type in ["xlnet"]),
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[
0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
pad_token_label_id=pad_token_label_id,
lang=lang,
languages_without_spaces=args.languages_without_spaces.split(
',') if args.languages_without_spaces is not None else None,
word_dropout_rate=args.word_dropout_rate,
)
if args.local_rank in [-1, 0]:
logger.info(
"Saving features into cached file {}, len(features)={}".format(cached_features_file, len(features)))
torch.save(features, cached_features_file)
# Make sure only the first process in distributed training process
# the dataset, and the others will use the cache
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier()
if few_shot > 0 and mode == 'train':
logger.info("Original no. of examples = {}".format(len(features)))
features = features[: few_shot]
logger.info('Using few-shot learning on {} examples'.format(len(features)))
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_pooling_ids = torch.tensor([f.pooling_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
if args.model_type == 'xlm' and features[0].langs is not None:
all_langs = torch.tensor([f.langs for f in features], dtype=torch.long)
logger.info('all_langs[0] = {}'.format(all_langs[0]))
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_pooling_ids,
all_langs)
else:
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_pooling_ids)
if return_examples:
return dataset, examples
else:
return dataset
def ConcatDataset(dataset_list):
all_input_ids = torch.cat([dataset.tensors[0] for dataset in dataset_list], dim=0)
all_input_mask = torch.cat([dataset.tensors[1] for dataset in dataset_list], dim=0)
all_segment_ids = torch.cat([dataset.tensors[2] for dataset in dataset_list], dim=0)
all_label_ids = torch.cat([dataset.tensors[3] for dataset in dataset_list], dim=0)
all_pooling_ids = torch.cat([dataset.tensors[4] for dataset in dataset_list], dim=0)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_pooling_ids)
return dataset
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the training files for the NER/POS task.")
parser.add_argument("--model_type", default=None, type=str, required=True,
help="")
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(
ALL_MODELS))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
# stable fine-tuning paramters
parser.add_argument("--overall_ratio", default=1.0, type=float, help="overall ratio")
parser.add_argument("--enable_r1_loss", action="store_true", help="Whether to enable r1 loss.")
parser.add_argument("--disable_backward_kl", action="store_true", help="Whether to disable backward kl loss.")
parser.add_argument("--r1_lambda", default=5.0, type=float, help="lambda of r1 loss")
parser.add_argument("--r1_on_unswitched_tokens", action="store_true",
help="Whether to enable r1 loss only on unswitched tokens.")
parser.add_argument("--original_loss", action="store_true",
help="Whether to use cross entropy loss on the former example.")
parser.add_argument("--noised_loss", action="store_true",
help="Whether to use cross entropy loss on the latter example.")
parser.add_argument("--noised_max_seq_length", default=256, type=int, help="noised max sequence length")
parser.add_argument("--enable_bpe_switch", action="store_true", help="Whether to enable bpe-switch.")
parser.add_argument("--bpe_switch_ratio", default=0.5, type=float, help="bpe_switch_ratio")
parser.add_argument("--tokenizer_dir", default=None, type=str, help="tokenizer dir")
parser.add_argument("--tokenizer_languages", default=None, type=str, help="tokenizer languages")
parser.add_argument("--enable_bpe_sampling", action="store_true", help="Whether to enable bpe sampling.")
parser.add_argument("--bpe_sampling_ratio", default=0.5, type=float, help="bpe_sampling_ratio")
parser.add_argument("--sampling_alpha", default=5.0, type=float, help="alpha of sentencepiece sampling")
parser.add_argument("--sampling_nbest_size", default=-1, type=int, help="nbest_size of sentencepiece sampling")
parser.add_argument("--enable_random_noise", action="store_true", help="Whether to enable random noise.")
parser.add_argument("--detach_embeds", action="store_true", help="Whether to detach noised embeddings.")
parser.add_argument("--noise_eps", default=1e-5, type=float, help="noise eps")
parser.add_argument('--noise_type', type=str, default='uniform',
choices=['normal', 'uniform'],
help='type of noises for RXF methods')
parser.add_argument("--enable_code_switch", action="store_true", help="Whether to enable code switch.")
parser.add_argument("--code_switch_ratio", default=0.5, type=float, help="code_switch_ratio")
parser.add_argument("--dict_dir", default=None, type=str, help="dict dir")
parser.add_argument("--dict_languages", default=None, type=str, help="dict languages")
parser.add_argument("--hidden_dropout_prob", default=None, type=float, help="hidden_dropout_prob")
parser.add_argument("--attention_probs_dropout_prob", default=None, type=float, help="attention_probs_dropout_prob")
parser.add_argument("--use_pooling_strategy", action="store_true", help="Whether to use pooling strategy.")
parser.add_argument("--use_sentence_label_probs", action="store_true",
help="Whether to use r1 loss on sentence-level label probs.")
parser.add_argument("--use_token_label_probs", action="store_true",
help="Whether to use r1 loss on token-level label probs.")
parser.add_argument("--use_average_representations", action="store_true",
help="Whether to use average representation.")
parser.add_argument("--translation_path", default=None, type=str, help="path to translation")
parser.add_argument("--translate_languages", default=None, type=str, help="translate languages")
parser.add_argument("--languages_without_spaces", default=None, type=str, help="languages without spaces")
parser.add_argument("--use_align_label_probs", action="store_true",
help="Whether to use r1 loss on align label probs.")
parser.add_argument("--enable_data_augmentation", action="store_true", help="Whether to enable data augmentation.")
parser.add_argument("--augment_ratio", default=1.0, type=float, help="augmentation ratio.")
parser.add_argument("--augment_method", default=None, type=str, required=False,
help="augment method")
parser.add_argument("--first_stage_model_path", default=None, type=str, required=False,
help="stable model path")
parser.add_argument("--r2_lambda", default=1.0, type=float, required=False,
help="r2_lambda")
parser.add_argument("--use_hard_labels", action="store_true", help="Whether to use hard labels.")
parser.add_argument("--word_dropout_rate", default=0.0, type=float, required=False, help="test word dropout rate")
## Other parameters
parser.add_argument("--labels", default="", type=str,
help="Path to a file containing all labels. If not specified, NER/POS labels are used.")
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default=None, type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action="store_true",
help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true",
help="Whether to run eval on the dev set.")
parser.add_argument("--do_predict", action="store_true",
help="Whether to run predictions on the test set.")
parser.add_argument("--do_predict_dev", action="store_true",
help="Whether to run predictions on the dev set.")
parser.add_argument("--init_checkpoint", default=None, type=str,
help="initial checkpoint for train/predict")
parser.add_argument("--evaluate_during_training", action="store_true",
help="Whether to run evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action="store_true",
help="Set this flag if you are using an uncased model.")
parser.add_argument("--few_shot", default=-1, type=int,
help="num of few-shot exampes")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument("--gradient_accumulation_steps", type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.")
parser.add_argument("--evaluate_steps", type=int, default=0, help="Log every X updates steps.")
parser.add_argument("--save_only_best_checkpoint", action="store_true",
help="Save only the best checkpoint during training")
parser.add_argument("--eval_all_checkpoints", action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action="store_true",
help="Avoid using CUDA when available")
parser.add_argument("--overwrite_output_dir", action="store_true",
help="Overwrite the content of the output directory")
parser.add_argument("--overwrite_cache", action="store_true",
help="Overwrite the cached training and evaluation sets")
parser.add_argument("--seed", type=int, default=42,
help="random seed for initialization")
parser.add_argument("--fp16", action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument("--fp16_opt_level", type=str, default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
parser.add_argument("--predict_langs", type=str, default="en", help="prediction languages")
parser.add_argument("--train_langs", default="en", type=str,
help="The languages in the training sets.")
parser.add_argument("--log_file", type=str, default=None, help="log file")
parser.add_argument("--eval_patience", type=int, default=-1,
help="wait N times of decreasing dev score before early stop during training")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(
args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else:
# Initializes the distributed backend which sychronizes nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
# logging.basicConfig(handlers=[logging.FileHandler(args.log_file), logging.StreamHandler()],
# format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
# datefmt='%m/%d/%Y %H:%M:%S',
# level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logging.info("Input args: %r" % args)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Prepare NER/POS task
labels = get_labels(args.labels)
logger.info(labels)
num_labels = len(labels)
# Use cross entropy ignore index as padding label id
# so that only real label ids contribute to the loss later
pad_token_label_id = CrossEntropyLoss().ignore_index
# Load pretrained model and tokenizer
# Make sure only the first process in distributed training loads model/vocab
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.hidden_dropout_prob is not None:
config.hidden_dropout_prob = args.hidden_dropout_prob
if args.attention_probs_dropout_prob is not None:
config.attention_probs_dropout_prob = args.attention_probs_dropout_prob
if args.noised_loss or args.enable_r1_loss or args.enable_data_augmentation:
noised_data_generator = NoisedDataGenerator(
label_list=labels,
pad_token_label_id=pad_token_label_id,
r1_lambda=args.r1_lambda,
r1_on_unswitched_tokens=args.r1_on_unswitched_tokens,
enable_r1_loss=args.enable_r1_loss,
disable_backward_kl=args.disable_backward_kl,
use_sentence_label_probs=args.use_sentence_label_probs,
use_token_label_probs=args.use_token_label_probs,
original_loss=args.original_loss,
noised_loss=args.noised_loss,
max_seq_length=args.max_seq_length,
noised_max_seq_length=args.noised_max_seq_length,
overall_ratio=args.overall_ratio,
enable_bpe_switch=args.enable_bpe_switch,
bpe_switch_ratio=args.bpe_switch_ratio,
tokenizer_dir=args.tokenizer_dir,
do_lower_case=args.do_lower_case,
tokenizer_languages=args.tokenizer_languages.split(',') if args.tokenizer_languages is not None else [],
enable_bpe_sampling=args.enable_bpe_sampling,
bpe_sampling_ratio=args.bpe_sampling_ratio,
tokenizer=tokenizer,
sampling_alpha=args.sampling_alpha,
sampling_nbest_size=args.sampling_nbest_size,
enable_random_noise=args.enable_random_noise,
detach_embeds=args.detach_embeds,
noise_eps=args.noise_eps,
noise_type=args.noise_type,
enable_code_switch=args.enable_code_switch,
code_switch_ratio=args.code_switch_ratio,
dict_dir=args.dict_dir,
dict_languages=args.dict_languages.split(',') if args.dict_languages is not None else [],
use_average_representations=args.use_average_representations,
translation_path=args.translation_path,
translate_languages=args.translate_languages.split(
',') if args.translate_languages is not None else args.predict_langs.split(','),
use_align_label_probs=args.use_align_label_probs,
enable_data_augmentation=args.enable_data_augmentation,
augment_ratio=args.augment_ratio,
augment_method=args.augment_method,
r2_lambda=args.r2_lambda,
use_hard_labels=args.use_hard_labels,
)
else:
noised_data_generator = None
if args.first_stage_model_path is not None:
first_stage_model = model_class.from_pretrained(args.first_stage_model_path,
config=config,
use_pooling_strategy=args.use_pooling_strategy, )
else:
first_stage_model = None
if args.init_checkpoint:
logger.info("loading from init_checkpoint={}".format(args.init_checkpoint))
model = model_class.from_pretrained(args.init_checkpoint,
config=config,
noised_data_generator=noised_data_generator,
use_pooling_strategy=args.use_pooling_strategy,
cache_dir=args.init_checkpoint)
else:
logger.info("loading from cached model = {}".format(args.model_name_or_path))
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
noised_data_generator=noised_data_generator,
use_pooling_strategy=args.use_pooling_strategy,
cache_dir=args.cache_dir if args.cache_dir else None)
# Make sure only the first process in distributed training loads model/vocab
if args.local_rank == 0:
torch.distributed.barrier()
model.to(args.device)
if first_stage_model is not None:
first_stage_model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
train_langs = args.train_langs.split(',')
dataset_list = []
train_examples = []
for lang in train_langs:
lg_train_dataset, lg_train_examples = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id,
mode="train", lang=lang,
few_shot=args.few_shot, return_examples=True)
dataset_list.append(lg_train_dataset)
train_examples += lg_train_examples
train_dataset = ConcatDataset(dataset_list)
global_step, tr_loss = train(args, train_examples, train_dataset, model, first_stage_model, tokenizer, labels,
pad_token_label_id, noised_data_generator=noised_data_generator)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use default names for the model,
# you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
# Save model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# Take care of distributed/parallel training
logger.info("Saving model checkpoint to %s", args.output_dir)
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Initialization for evaluation
results = {}
if args.init_checkpoint:
best_checkpoint = args.init_checkpoint
elif os.path.exists(os.path.join(args.output_dir, 'checkpoint-best')):
best_checkpoint = os.path.join(args.output_dir, 'checkpoint-best')
else:
best_checkpoint = args.output_dir
best_f1 = 0
# Evaluation
if args.do_eval and args.local_rank in [-1, 0]:
# tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path, do_lower_case=args.do_lower_case)
checkpoints = [best_checkpoint]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)))
logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN)
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
model = model_class.from_pretrained(checkpoint, use_pooling_strategy=args.use_pooling_strategy)
model.to(args.device)
results = evaluate(args, model, tokenizer, labels, pad_token_label_id)
for key, value in results.items():
logger.info("eval_{}: {}".format(key, value))
log_writer = open(os.path.join(args.output_dir, "evaluate_wdr{}_logs.txt".format(args.word_dropout_rate)),
'w')
log_writer.write("{0}\t{1}".format("evaluate", json.dumps(results)) + '\n')
exit(0)
result, _ = predict(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step,
lang=args.train_langs)
if result["f1"] > best_f1:
best_checkpoint = checkpoint
best_f1 = result["f1"]
if global_step:
result = {"{}_{}".format(global_step, k): v for k, v in result.items()}
results.update(result)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
writer.write("best checkpoint = {}, best f1 = {}\n".format(best_checkpoint, best_f1))
# Prediction
if args.do_predict and args.local_rank in [-1, 0]:
logger.info("Loading the best checkpoint from {}\n".format(best_checkpoint))
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
model = model_class.from_pretrained(best_checkpoint, use_pooling_strategy=args.use_pooling_strategy)
model.to(args.device)
output_test_results_file = os.path.join(args.output_dir, "test_results.txt")
with open(output_test_results_file, "a") as result_writer:
for lang in args.predict_langs.split(','):
if not os.path.exists(os.path.join(args.data_dir, lang, 'test.{}'.format(args.model_name_or_path))):
logger.info("Language {} does not exist".format(lang))
continue
result, predictions = predict(args, model, tokenizer, labels, pad_token_label_id, mode="test",
lang=lang)
# Save results
result_writer.write("=====================\nlanguage={}\n".format(lang))
for key in sorted(result.keys()):
result_writer.write("{} = {}\n".format(key, str(result[key])))
# Save predictions
output_test_predictions_file = os.path.join(args.output_dir, "test_{}_predictions.txt".format(lang))
infile = os.path.join(args.data_dir, lang, "test.{}".format(args.model_name_or_path))
idxfile = infile + '.idx'
save_predictions(args, predictions, output_test_predictions_file, infile, idxfile)
# Predict dev set
if args.do_predict_dev and args.local_rank in [-1, 0]:
logger.info("Loading the best checkpoint from {}\n".format(best_checkpoint))
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
model = model_class.from_pretrained(best_checkpoint, use_pooling_strategy=args.use_pooling_strategy)
model.to(args.device)
output_test_results_file = os.path.join(args.output_dir, "dev_results.txt")
with open(output_test_results_file, "w") as result_writer:
for lang in args.predict_langs.split(','):
if not os.path.exists(os.path.join(args.data_dir, lang, 'dev.{}'.format(args.model_name_or_path))):
logger.info("Language {} does not exist".format(lang))
continue
result, predictions = predict(args, model, tokenizer, labels, pad_token_label_id, mode="dev",
lang=lang)
# Save results
result_writer.write("=====================\nlanguage={}\n".format(lang))
for key in sorted(result.keys()):
result_writer.write("{} = {}\n".format(key, str(result[key])))
# Save predictions
output_test_predictions_file = os.path.join(args.output_dir, "dev_{}_predictions.txt".format(lang))
infile = os.path.join(args.data_dir, lang, "dev.{}".format(args.model_name_or_path))
idxfile = infile + '.idx'
save_predictions(args, predictions, output_test_predictions_file, infile, idxfile)
def save_predictions(args, predictions, output_file, text_file, idx_file, output_word_prediction=False):
# Save predictions
with open(text_file, "r") as text_reader, open(idx_file, "r") as idx_reader:
text = text_reader.readlines()
index = idx_reader.readlines()
assert len(text) == len(index)
# Sanity check on the predictions
with open(output_file, "w") as writer:
example_id = 0
prev_id = int(index[0])
for line, idx in zip(text, index):
if line == "" or line == "\n":
example_id += 1
else:
cur_id = int(idx)
output_line = '\n' if cur_id != prev_id else ''
if output_word_prediction:
output_line += line.split()[0] + '\t'
output_line += predictions[example_id].pop(0) + '\n'
writer.write(output_line)
prev_id = cur_id
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/xtune/src/run_tag.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors,
# The HuggingFace Inc. team, and The XTREME Benchmark Authors.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for NER/POS tagging tasks."""
from __future__ import absolute_import, division, print_function
import logging
import os
import random
from io import open
from transformers import XLMTokenizer
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, words, labels, langs=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.words = words
self.labels = labels
self.langs = langs
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_ids, pooling_ids, langs=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
self.pooling_ids = pooling_ids
self.langs = langs
def read_examples_from_file(file_path, lang, lang2id=None):
if not os.path.exists(file_path):
logger.info("[Warming] file {} not exists".format(file_path))
return []
guid_index = 1
examples = []
subword_len_counter = 0
if lang2id:
lang_id = lang2id.get(lang, lang2id['en'])
else:
lang_id = 0
logger.info("lang_id={}, lang={}, lang2id={}".format(lang_id, lang, lang2id))
with open(file_path, encoding="utf-8") as f:
words = []
labels = []
langs = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if word:
examples.append(InputExample(guid="{}-{}".format(lang, guid_index),
words=words,
labels=labels,
langs=langs))
guid_index += 1
words = []
labels = []
langs = []
subword_len_counter = 0
else:
print(f'guid_index', guid_index, words, langs, labels, subword_len_counter)
else:
splits = line.split("\t")
word = splits[0]
words.append(splits[0])
langs.append(lang_id)
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(InputExample(guid="%s-%d".format(lang, guid_index),
words=words,
labels=labels,
langs=langs))
return examples
def convert_examples_to_features(examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
pad_token_label_id=-1,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
lang='en',
languages_without_spaces=None,
word_dropout_rate=0):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 1000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
tokens = []
label_ids = []
pooling_ids = []
for i, (word, label) in enumerate(zip(example.words, example.labels)):
if isinstance(tokenizer, XLMTokenizer):
word_tokens = tokenizer.tokenize(word, lang=lang)
else:
if languages_without_spaces is not None and lang in languages_without_spaces and i > 0:
word_tokens = tokenizer.tokenize(word, remove_space=True)
else:
word_tokens = tokenizer.tokenize(word)
if len(word) != 0 and len(word_tokens) == 0:
word_tokens = [tokenizer.unk_token]
if len(word_tokens) == 0:
continue
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1))
pooling_ids.extend([len(pooling_ids) + 1] * len(word_tokens))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens) > max_seq_length - special_tokens_count:
print('truncate token', len(tokens), max_seq_length, special_tokens_count)
tokens = tokens[:(max_seq_length - special_tokens_count)]
label_ids = label_ids[:(max_seq_length - special_tokens_count)]
pooling_ids = pooling_ids[:(max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
if word_dropout_rate > 0:
for i in range(len(tokens)):
if random.random() < word_dropout_rate:
tokens[i] = tokenizer.unk_token
tokens += [sep_token]
label_ids += [pad_token_label_id]
pooling_ids += [0]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
pooling_ids += [0]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
pooling_ids += [0]
else:
tokens = [cls_token] + tokens
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
pooling_ids = [0] + pooling_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
pooling_ids = ([0] * padding_length) + pooling_ids
else:
input_ids += ([pad_token] * padding_length)
input_mask += ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids += ([pad_token_segment_id] * padding_length)
label_ids += ([pad_token_label_id] * padding_length)
pooling_ids += ([0] * padding_length)
if example.langs and len(example.langs) > 0:
langs = [example.langs[0]] * max_seq_length
else:
print('example.langs', example.langs, example.words, len(example.langs))
print('ex_index', ex_index, len(examples))
langs = None
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(pooling_ids) == max_seq_length
assert len(langs) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
logger.info("pooling_ids: %s", " ".join([str(x) for x in pooling_ids]))
logger.info("langs: {}".format(langs))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids,
pooling_ids=pooling_ids,
langs=langs))
return features
def get_labels(path):
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
| EXA-1-master | exa/models/unilm-master/xtune/src/utils_tag.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning multi-lingual models on classification (Bert, DistilBERT, XLM, XLM-R). Adapted from `examples/run_glue.py`"""
import argparse
import glob
import logging
import os
import random
import json
import copy
import math
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset, ConcatDataset, Subset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForSequenceClassification,
BertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMTokenizer,
XLMRobertaConfig,
XLMRobertaForSequenceClassificationStable,
XLMRobertaTokenizer,
get_linear_schedule_with_warmup,
)
from transformers import xtreme_convert_examples_to_features as convert_examples_to_features
from transformers import xtreme_compute_metrics as compute_metrics
from transformers import xtreme_output_modes as output_modes
from transformers import xtreme_processors as processors
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(tuple(conf.pretrained_config_archive_map.keys()) for conf in
(BertConfig, DistilBertConfig, XLMConfig, XLMRobertaConfig)), ()
)
MODEL_CLASSES = {
"bert": (BertConfig, BertForSequenceClassification, BertTokenizer),
"xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
"distilbert": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
"xlmr": (XLMRobertaConfig, XLMRobertaForSequenceClassificationStable, XLMRobertaTokenizer)
}
class NoisedDataGenerator(object):
def __init__(self,
task_name="xnli",
enable_r1_loss=False,
r1_lambda=5.0,
original_loss=True,
noised_loss=False,
max_length=512,
overall_ratio=1.0,
enable_bpe_switch=False,
bpe_switch_ratio=0.5,
tokenizer_dir=None,
do_lower_case=False,
tokenizer_languages=None,
enable_bpe_sampling=False,
tokenizer=None,
bpe_sampling_ratio=0.5,
sampling_alpha=0.3,
sampling_nbest_size=-1,
enable_random_noise=False,
noise_detach_embeds=False,
noise_eps=1e-5,
noise_type='uniform',
enable_code_switch=False,
code_switch_ratio=0.5,
dict_dir=None,
dict_languages=None,
enable_word_dropout=False,
word_dropout_rate=0.1,
enable_translate_data=False,
translation_path=None,
train_language=None,
data_dir=None,
translate_different_pair=False,
translate_en_data=False,
enable_data_augmentation=False,
augment_method=None,
augment_ratio=0.0,
r2_lambda=1.0,
use_hard_labels=False):
if enable_code_switch:
assert dict_dir is not None
assert dict_languages is not None
assert tokenizer is not None
if enable_random_noise:
assert noise_type in ['uniform', 'normal']
self.task_name = task_name
self.n_tokens = 0
self.n_cs_tokens = 0
self.enable_r1_loss = enable_r1_loss
self.r1_lambda = r1_lambda
self.original_loss = original_loss
self.noised_loss = noised_loss
self.max_length = max_length
self.overall_ratio = overall_ratio
self.enable_bpe_switch = enable_bpe_switch
self.bpe_switch_ratio = bpe_switch_ratio / self.overall_ratio
assert self.bpe_switch_ratio <= 1.0
self.tokenizer_dir = tokenizer_dir
self.tokenizer_languages = tokenizer_languages
self.enable_bpe_sampling = enable_bpe_sampling
self.bpe_sampling_ratio = bpe_sampling_ratio / self.overall_ratio
assert self.bpe_sampling_ratio <= 1.0
self.tokenizer = tokenizer
self.sampling_alpha = sampling_alpha
self.sampling_nbest_size = sampling_nbest_size
self.enable_random_noise = enable_random_noise
self.noise_detach_embeds = noise_detach_embeds
self.noise_eps = noise_eps
self.noise_type = noise_type
self.enable_word_dropout = enable_word_dropout
self.word_dropout_rate = word_dropout_rate
self.enable_translate_data = enable_translate_data
self.train_languages = train_language.split(',')
self.data_dir = data_dir
self.translate_different_pair = translate_different_pair
self.translate_en_data = translate_en_data
if "en" in self.train_languages:
self.train_languages.remove("en")
self.translate_train_dicts = []
self.tgt2src_dict = {}
self.tgt2src_cnt = {}
self.translation_path = translation_path
self.enable_code_switch = enable_code_switch
self.code_switch_ratio = code_switch_ratio / self.overall_ratio
assert self.code_switch_ratio <= 1.0
self.dict_dir = dict_dir
self.dict_languages = dict_languages
self.lang2dict = {}
for lang in copy.deepcopy(dict_languages):
dict_path = os.path.join(self.dict_dir, "en-{}.txt".format(lang))
if not os.path.exists(dict_path):
logger.info("dictionary en-{} doesn't exist.".format(lang))
self.dict_languages.remove(lang)
continue
logger.info("reading dictionary from {}".format(dict_path))
assert os.path.exists(dict_path)
with open(dict_path, "r", encoding="utf-8") as reader:
raw = reader.readlines()
self.lang2dict[lang] = {}
for line in raw:
line = line.strip()
try:
src, tgt = line.split("\t")
except:
src, tgt = line.split(" ")
if src not in self.lang2dict[lang]:
self.lang2dict[lang][src] = [tgt]
else:
self.lang2dict[lang][src].append(tgt)
self.lang2tokenizer = {}
for lang in tokenizer_languages:
self.lang2tokenizer[lang] = XLMRobertaTokenizer.from_pretrained(
os.path.join(tokenizer_dir, "{}".format(lang)), do_lower_case=do_lower_case)
self.enable_data_augmentation = enable_data_augmentation
self.augment_method = augment_method
self.augment_ratio = augment_ratio
self.r2_lambda = r2_lambda
self.use_hard_labels = use_hard_labels
def augment_examples(self, examples):
n_augment = math.ceil(len(examples) * self.augment_ratio)
augment_examples = []
while n_augment > 0:
examples = copy.deepcopy(examples)
augment_examples += examples[:n_augment]
n_augment -= len(examples[:n_augment])
random.shuffle(examples)
return augment_examples
def get_noised_dataset(self, examples):
# maybe do not save augmented examples
examples = copy.deepcopy(examples)
if (self.enable_data_augmentation and self.augment_method == "mt") or self.enable_translate_data:
self.load_translate_data()
is_augmented = [0] * len(examples)
if self.enable_data_augmentation:
augment_examples = self.augment_examples(examples)
is_augmented += [1] * len(augment_examples)
examples += augment_examples
if self.enable_code_switch:
self.n_tokens = 0
self.n_cs_tokens = 0
dataset = self.convert_examples_to_dataset(examples, is_augmented)
if self.enable_code_switch:
logger.info("{:.2f}% tokens have been code-switched.".format(self.n_cs_tokens / self.n_tokens * 100))
return dataset
def encode_sentence(self, text, switch_text=False, enable_code_switch=False, enable_bpe_switch=False,
enable_bpe_sampling=False, enable_word_dropout=False, ):
if text is None:
return None
ids = []
tokens = text.split(" ")
for token in tokens:
switch_token = random.random() <= self.overall_ratio
self.n_tokens += 1
if enable_code_switch and switch_text and switch_token and random.random() <= self.code_switch_ratio:
lang = self.dict_languages[random.randint(0, len(self.dict_languages) - 1)]
if token.lower() in self.lang2dict[lang]:
self.n_cs_tokens += 1
token = self.lang2dict[lang][token.lower()][
random.randint(0, len(self.lang2dict[lang][token.lower()]) - 1)]
if enable_bpe_switch and switch_text and switch_token and random.random() <= self.bpe_switch_ratio:
lang = self.tokenizer_languages[random.randint(0, len(self.tokenizer_languages) - 1)]
tokenizer = self.lang2tokenizer[lang]
else:
tokenizer = self.tokenizer
if enable_bpe_sampling and switch_text and switch_token and random.random() <= self.bpe_sampling_ratio:
token_ids = tokenizer.encode_plus(token, add_special_tokens=True,
nbest_size=self.sampling_nbest_size,
alpha=self.sampling_alpha)["input_ids"]
else:
token_ids = tokenizer.encode_plus(token, add_special_tokens=True)["input_ids"]
if enable_word_dropout:
for token_id in token_ids[1:-1]:
if random.random() <= self.word_dropout_rate:
ids += [tokenizer.unk_token_id]
else:
ids += [token_id]
else:
ids += token_ids[1:-1]
return ids
def encode_plus(self, text_a, text_b, switch_text=False, enable_code_switch=False, enable_bpe_switch=False,
enable_bpe_sampling=False, enable_word_dropout=False, ):
# switch all sentences
ids = self.encode_sentence(text_a, switch_text, enable_code_switch, enable_bpe_switch, enable_bpe_sampling,
enable_word_dropout)
pair_ids = self.encode_sentence(text_b, switch_text, enable_code_switch, enable_bpe_switch, enable_bpe_sampling,
enable_word_dropout)
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
encoded_inputs = {}
# Handle max sequence length
total_len = len_ids + len_pair_ids + (self.tokenizer.num_added_tokens(pair=pair))
if self.max_length and total_len > self.max_length:
ids, pair_ids, overflowing_tokens = self.tokenizer.truncate_sequences(
ids,
pair_ids=pair_ids,
num_tokens_to_remove=total_len - self.max_length,
truncation_strategy="longest_first",
stride=0,
)
# Handle special_tokens
sequence = self.tokenizer.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.tokenizer.create_token_type_ids_from_sequences(ids, pair_ids)
encoded_inputs["input_ids"] = sequence
encoded_inputs["token_type_ids"] = token_type_ids
return encoded_inputs
def convert_examples_to_dataset(
self,
examples,
is_augmented=None,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True
):
processor = processors[self.task_name](language="en", train_language="en")
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, self.task_name))
label_map = {label: i for i, label in enumerate(label_list)}
output_mode = output_modes[self.task_name]
logger.info("Using output mode %s for task %s" % (output_mode, self.task_name))
all_original_input_ids = []
all_original_attention_mask = []
all_original_token_type_ids = []
all_labels = []
all_noised_input_ids = []
all_noised_attention_mask = []
all_noised_token_type_ids = []
all_r1_mask = []
all_is_augmented = []
for (ex_index, example) in enumerate(examples):
len_examples = len(examples)
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len_examples))
# if ex_index == 10000: break
if is_augmented[ex_index]:
if self.augment_method == "mt":
example.text_a, example.text_b = self.get_translation_pair(example.text_a, example.text_b)
original_inputs = self.encode_plus(example.text_a, example.text_b, switch_text=False)
all_r1_mask.append(1)
elif self.augment_method == "gn":
original_inputs = self.encode_plus(example.text_a, example.text_b, switch_text=False)
all_r1_mask.append(1)
elif self.augment_method == "cs":
original_inputs = self.encode_plus(example.text_a, example.text_b, switch_text=True,
enable_code_switch=True)
all_r1_mask.append(1)
elif self.augment_method == "ss":
original_inputs = self.encode_plus(example.text_a, example.text_b, switch_text=True,
enable_bpe_sampling=True)
all_r1_mask.append(1)
else:
assert False
else:
original_inputs = self.encode_plus(example.text_a, example.text_b, switch_text=False)
all_r1_mask.append(1)
all_is_augmented.append(is_augmented[ex_index])
original_input_ids, original_token_type_ids = original_inputs["input_ids"], original_inputs[
"token_type_ids"]
original_attention_mask = [1 if mask_padding_with_zero else 0] * len(original_input_ids)
original_padding_length = self.max_length - len(original_input_ids)
if pad_on_left:
original_input_ids = ([pad_token] * original_padding_length) + original_input_ids
original_attention_mask = ([0 if mask_padding_with_zero else 1] * original_padding_length) + \
original_attention_mask
original_token_type_ids = ([pad_token_segment_id] * original_padding_length) + original_token_type_ids
else:
original_input_ids = original_input_ids + ([pad_token] * original_padding_length)
original_attention_mask = original_attention_mask + (
[0 if mask_padding_with_zero else 1] * original_padding_length)
original_token_type_ids = original_token_type_ids + ([pad_token_segment_id] * original_padding_length)
assert len(original_input_ids) == self.max_length, "Error with input length {} vs {}".format(
len(original_input_ids), self.max_length)
assert len(original_attention_mask) == self.max_length, "Error with input length {} vs {}".format(
len(original_attention_mask), self.max_length)
assert len(original_token_type_ids) == self.max_length, "Error with input length {} vs {}".format(
len(original_token_type_ids), self.max_length)
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("original text a: %s" % (example.text_a))
logger.info("original text b: %s" % (example.text_b))
logger.info("original_input_ids: %s" % " ".join([str(x) for x in original_input_ids]))
logger.info("original_attention_mask: %s" % " ".join([str(x) for x in original_attention_mask]))
logger.info("original_token_type_ids: %s" % " ".join([str(x) for x in original_token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
all_original_input_ids.append(original_input_ids)
all_original_attention_mask.append(original_attention_mask)
all_original_token_type_ids.append(original_token_type_ids)
all_labels.append(label)
if not self.enable_r1_loss:
continue
if self.enable_translate_data:
noised_text_a, noised_text_b = self.get_translation_pair(example.text_a, example.text_b)
else:
noised_text_a, noised_text_b = example.text_a, example.text_b
noised_inputs = self.encode_plus(noised_text_a, noised_text_b, switch_text=True,
enable_code_switch=self.enable_code_switch,
enable_bpe_switch=self.enable_bpe_switch,
enable_bpe_sampling=self.enable_bpe_sampling,
enable_word_dropout=self.enable_word_dropout)
noised_input_ids, noised_token_type_ids = noised_inputs["input_ids"], noised_inputs["token_type_ids"]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
noised_attention_mask = [1 if mask_padding_with_zero else 0] * len(noised_input_ids)
# Zero-pad up to the sequence length.
noised_padding_length = self.max_length - len(noised_input_ids)
if pad_on_left:
noised_input_ids = ([pad_token] * noised_padding_length) + noised_input_ids
noised_attention_mask = ([0 if mask_padding_with_zero else 1] * noised_padding_length) + \
noised_attention_mask
noised_token_type_ids = ([pad_token_segment_id] * noised_padding_length) + noised_token_type_ids
else:
noised_input_ids = noised_input_ids + ([pad_token] * noised_padding_length)
noised_attention_mask = noised_attention_mask + (
[0 if mask_padding_with_zero else 1] * noised_padding_length)
noised_token_type_ids = noised_token_type_ids + ([pad_token_segment_id] * noised_padding_length)
assert len(noised_input_ids) == self.max_length, "Error with input length {} vs {}".format(
len(noised_input_ids), self.max_length)
assert len(noised_attention_mask) == self.max_length, "Error with input length {} vs {}".format(
len(noised_attention_mask), self.max_length)
assert len(noised_token_type_ids) == self.max_length, "Error with input length {} vs {}".format(
len(noised_token_type_ids), self.max_length)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("noised text a: %s" % (noised_text_a))
logger.info("noised text b: %s" % (noised_text_b))
logger.info("noised_input_ids: %s" % " ".join([str(x) for x in noised_input_ids]))
logger.info("noised_attention_mask: %s" % " ".join([str(x) for x in noised_attention_mask]))
logger.info("noised_token_type_ids: %s" % " ".join([str(x) for x in noised_token_type_ids]))
all_noised_input_ids.append(noised_input_ids)
all_noised_attention_mask.append(noised_attention_mask)
all_noised_token_type_ids.append(noised_token_type_ids)
all_original_input_ids = torch.tensor([input_ids for input_ids in all_original_input_ids], dtype=torch.long)
all_original_attention_mask = torch.tensor([attention_mask for attention_mask in all_original_attention_mask],
dtype=torch.long)
all_original_token_type_ids = torch.tensor([token_type_ids for token_type_ids in all_original_token_type_ids],
dtype=torch.long)
all_labels = torch.tensor([label for label in all_labels], dtype=torch.long)
is_augmented = torch.tensor([is_augmented for is_augmented in all_is_augmented], dtype=torch.long)
if self.enable_r1_loss:
all_noised_input_ids = torch.tensor([input_ids for input_ids in all_noised_input_ids], dtype=torch.long)
all_noised_attention_mask = torch.tensor([attention_mask for attention_mask in all_noised_attention_mask],
dtype=torch.long)
all_noised_token_type_ids = torch.tensor([token_type_ids for token_type_ids in all_noised_token_type_ids],
dtype=torch.long)
all_r1_mask = torch.tensor([r1_mask for r1_mask in all_r1_mask],
dtype=torch.long)
dataset = TensorDataset(all_original_input_ids, all_original_attention_mask, all_original_token_type_ids,
all_labels, is_augmented, all_noised_input_ids, all_noised_attention_mask,
all_noised_token_type_ids, all_r1_mask)
else:
dataset = TensorDataset(all_original_input_ids, all_original_attention_mask, all_original_token_type_ids,
all_labels, is_augmented)
return dataset
def get_translation_pair(self, text_a, text_b):
if text_a.strip() in self.tgt2src_dict and text_b.strip() in self.tgt2src_dict:
# tgt to {en, tgt}
en_text_a = self.tgt2src_dict[text_a.strip()]
en_text_b = self.tgt2src_dict[text_b.strip()]
lang_id_a = random.randint(0, len(self.train_languages) - 1)
if self.translate_different_pair:
lang_id_b = random.randint(0, len(self.train_languages) - 1)
else:
lang_id_b = lang_id_a
if text_a == self.translate_train_dicts[lang_id_a][en_text_a.strip()]:
text_a = en_text_a
else:
text_a = self.translate_train_dicts[lang_id_a][en_text_a.strip()]
if text_b == self.translate_train_dicts[lang_id_b][en_text_b.strip()]:
text_b = en_text_b
else:
text_b = self.translate_train_dicts[lang_id_b][en_text_b.strip()]
else:
# en to tgt
lang_id_a = random.randint(0, len(self.train_languages) - 1)
if self.translate_different_pair:
lang_id_b = random.randint(0, len(self.train_languages) - 1)
else:
lang_id_b = lang_id_a
assert text_a.strip() in self.translate_train_dicts[lang_id_a]
assert text_b.strip() in self.translate_train_dicts[lang_id_b]
text_a = self.translate_train_dicts[lang_id_a][text_a.strip()]
text_b = self.translate_train_dicts[lang_id_b][text_b.strip()]
return text_a, text_b
def load_translate_data(self):
self.translate_train_dicts = []
self.tgt2src_dict = {}
self.tgt2src_cnt = {}
for i, language in enumerate(self.train_languages):
logger.info("reading training data from lang {}".format(language))
processor = processors[self.task_name](language=language, train_language=language)
src2tgt_dict = processor.get_translate_train_dict(self.translation_path, self.tgt2src_dict, self.tgt2src_cnt)
self.translate_train_dicts.append(src2tgt_dict)
def get_train_steps(self, dataloader_size, args):
n_augment_batch = math.ceil(dataloader_size * (1 + self.augment_ratio))
augment_steps = n_augment_batch // args.gradient_accumulation_steps
if args.max_steps > 0:
t_total = args.max_steps
assert False
else:
t_total = augment_steps * args.num_train_epochs
return t_total
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def ConcatDataset(dataset_list):
all_input_ids = torch.cat([dataset.tensors[0] for dataset in dataset_list], dim=0)
all_attention_mask = torch.cat([dataset.tensors[1] for dataset in dataset_list], dim=0)
all_token_type_ids = torch.cat([dataset.tensors[2] for dataset in dataset_list], dim=0)
all_labels = torch.cat([dataset.tensors[3] for dataset in dataset_list], dim=0)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
def train(args, train_examples, train_dataset, model, first_stage_model, tokenizer, noised_data_generator=None):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter(os.path.join(args.output_dir, "tb-log"))
log_writer = open(os.path.join(args.output_dir, "evaluate_logs.txt"), 'w')
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if noised_data_generator is not None and noised_data_generator.enable_data_augmentation:
t_total = noised_data_generator.get_train_steps(len(train_dataloader), args)
else:
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
logger.info(" Logging steps = %d", args.logging_steps)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path) and False:
# set global_step to gobal_step of last saved checkpoint from model path
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tr_loss, logging_loss, best_avg = 0.0, 0.0, 0.0
tr_original_loss, logging_original_loss = 0.0, 0.0
tr_noised_loss, logging_noised_loss = 0.0, 0.0
tr_r1_loss, logging_r1_loss = 0.0, 0.0
tr_r2_loss, logging_r2_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args) # Added here for reproductibility
def logging(eval=False):
results = None
if args.evaluate_during_training and eval:
results = evaluate(args, model, tokenizer, single_gpu=True)
for task, result in results.items():
for key, value in result.items():
tb_writer.add_scalar("eval_{}_{}".format(task, key), value, global_step)
logger.info("eval_%s_%s: %s" % (task, key, value))
log_writer.write("{0}\t{1}\n".format(global_step, json.dumps(results)))
log_writer.flush()
logger.info(
"global_step: {}, lr: {:.6f}, loss: {:.6f}, original_loss: {:.6f}, noised_loss: {:.6f}, r1_loss: {:.6f}, r2_loss: {:.6f}".format(
global_step, scheduler.get_lr()[0], (tr_loss - logging_loss) / args.logging_steps,
(tr_original_loss - logging_original_loss) / args.logging_steps,
(tr_noised_loss - logging_noised_loss) / args.logging_steps,
(tr_r1_loss - logging_r1_loss) / args.logging_steps,
(tr_r2_loss - logging_r2_loss) / args.logging_steps))
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
tb_writer.add_scalar("original_loss", (tr_original_loss - logging_original_loss) / args.logging_steps,
global_step)
tb_writer.add_scalar("noised_loss", (tr_noised_loss - logging_noised_loss) / args.logging_steps, global_step)
tb_writer.add_scalar("r1_loss", (tr_r1_loss - logging_r1_loss) / args.logging_steps, global_step)
tb_writer.add_scalar("r2_loss", (tr_r2_loss - logging_r2_loss) / args.logging_steps, global_step)
return results
def save_checkpoint_best(result):
task_metric = "acc"
if args.task_name == "rel":
task_metric = "ndcg"
if result is not None and best_avg < result["valid_avg"][task_metric]:
output_dir = os.path.join(args.output_dir, "checkpoint-best")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
return result["valid_avg"][task_metric]
else:
return best_avg
for _ in train_iterator:
if noised_data_generator is not None:
assert noised_data_generator.enable_r1_loss or noised_data_generator.noised_loss or noised_data_generator.enable_data_augmentation
noised_train_dataset = noised_data_generator.get_noised_dataset(train_examples)
train_sampler = RandomSampler(noised_train_dataset) if args.local_rank == -1 else DistributedSampler(
noised_train_dataset)
train_dataloader = DataLoader(noised_train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
# if not args.max_steps > 0:
# assert t_total == len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=True)
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
if first_stage_model is not None:
first_stage_model.eval()
batch = tuple(t.to(args.device) for t in batch)
if len(batch) == 4:
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert"] else None
) # XLM and DistilBERT don't use segment_ids
elif len(batch) == 5:
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert"] else None
) # XLM and DistilBERT don't use segment_ids
inputs["is_augmented"] = batch[4]
else:
assert len(batch) == 9
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3],
"is_augmented": batch[4],
"noised_input_ids": batch[5],
"noised_attention_mask": batch[6],
"r1_mask": batch[8]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert"] else None
) # XLM and DistilBERT don't use segment_ids
inputs["noised_token_type_ids"] = (
batch[7] if args.model_type in ["bert"] else None
) # XLM and DistilBERT don't use segment_ids
if first_stage_model is not None:
first_stage_model_inputs = {"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"token_type_ids": inputs["token_type_ids"],
"labels": inputs["labels"]}
with torch.no_grad():
inputs["first_stage_model_logits"] = first_stage_model(**first_stage_model_inputs)[1]
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if noised_data_generator is not None:
original_loss, noised_loss, r1_loss, r2_loss = outputs[1:5]
if args.n_gpu > 1:
original_loss = original_loss.mean()
noised_loss = noised_loss.mean()
r1_loss = r1_loss.mean()
r2_loss = r2_loss.mean()
if args.gradient_accumulation_steps > 1:
original_loss = original_loss / args.gradient_accumulation_steps
noised_loss = noised_loss / args.gradient_accumulation_steps
r1_loss = r1_loss / args.gradient_accumulation_steps
r2_loss = r2_loss / args.gradient_accumulation_steps
tr_original_loss += original_loss.item()
tr_noised_loss += noised_loss.item()
tr_r1_loss += r1_loss.item()
tr_r2_loss += r2_loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
do_eval = args.evaluate_steps > 0 and global_step % args.evaluate_steps == 0
cur_result = logging(eval=do_eval)
logging_loss = tr_loss
logging_original_loss = tr_original_loss
logging_noised_loss = tr_noised_loss
logging_r1_loss = tr_r1_loss
logging_r2_loss = tr_r2_loss
best_avg = save_checkpoint_best(cur_result)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.local_rank in [-1, 0] and args.logging_each_epoch:
cur_result = logging(eval=True)
logging_loss = tr_loss
logging_original_loss = tr_original_loss
logging_noised_loss = tr_noised_loss
logging_r1_loss = tr_r1_loss
logging_r2_loss = tr_r2_loss
best_avg = save_checkpoint_best(cur_result)
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
log_writer.close()
return global_step, tr_loss / (global_step + 1)
def predict(args, model, tokenizer, label_list, prefix="", single_gpu=False, verbose=True):
if single_gpu:
args = copy.deepcopy(args)
args.local_rank = -1
args.n_gpu = 1
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
eval_datasets = []
eval_langs = args.language.split(',')
for split in ["test"]:
for lang in eval_langs:
eval_datasets.append((split, lang))
results = {}
# leave interface for multi-task evaluation
eval_task = eval_task_names[0]
eval_output_dir = eval_outputs_dirs[0]
# multi-gpu eval
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
for split, lang in eval_datasets:
task_name = "{0}-{1}".format(split, lang)
eval_dataset, guids = load_and_cache_examples(args, eval_task, tokenizer, lang, split=split)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
guids = np.array(guids)
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert"] else None
) # XLM and DistilBERT don't use segment_ids
outputs = model(**inputs)
logits = outputs[0]
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
else:
raise ValueError("No other `output_mode` for XGLUE.")
results[lang] = preds
for lang in results.keys():
output_eval_file = os.path.join(eval_output_dir, prefix, "{}.prediction".format(lang))
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
print("results:", results)
for item in results[lang]:
writer.write(str(label_list[item]) + "\n")
def evaluate(args, model, tokenizer, prefix="", single_gpu=False, verbose=True):
if single_gpu:
args = copy.deepcopy(args)
args.local_rank = -1
args.n_gpu = 1
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
eval_datasets = []
eval_langs = args.language.split(',')
splits = ["valid", "test"] if args.do_train else ["test"]
for split in splits:
for lang in eval_langs:
eval_datasets.append((split, lang))
results = {}
# leave interface for multi-task evaluation
eval_task = eval_task_names[0]
eval_output_dir = eval_outputs_dirs[0]
# multi-gpu eval
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
for split, lang in eval_datasets:
task_name = "{0}-{1}".format(split, lang)
eval_dataset, guids = load_and_cache_examples(args, eval_task, tokenizer, lang, split=split)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
guids = np.array(guids)
for batch in eval_dataloader:
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert"] else None
) # XLM and DistilBERT don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
else:
raise ValueError("No other `output_mode` for XGLUE.")
# print("pred:" + split + str([i for i in preds[:500]]), flush=True)
# print("label:" + split + str([i for i in out_label_ids[:500]]), flush=True)
result = compute_metrics(eval_task, preds, out_label_ids, guids)
results[task_name] = result
if args.do_train:
results["valid_avg"] = average_dic([value for key, value in results.items() if key.startswith("valid")])
results["test_avg"] = average_dic([value for key, value in results.items() if key.startswith("test")])
return results
def average_dic(dic_list):
if len(dic_list) == 0:
return {}
dic_sum = {}
for dic in dic_list:
if len(dic_sum) == 0:
for key, value in dic.items():
dic_sum[key] = value
else:
assert set(dic_sum.keys()) == set(dic.keys()), "sum_keys:{0}, dic_keys:{1}".format(set(dic_sum.keys()),
set(dic.keys()))
for key, value in dic.items():
dic_sum[key] += value
for key in dic_sum:
dic_sum[key] /= len(dic_list)
return dic_sum
def load_and_cache_examples(args, task, tokenizer, language, split="train", return_examples=False):
assert split in ["train", "valid", "test"]
if args.local_rank not in [-1, 0] and evaluate == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task](language=language, train_language=language)
output_mode = output_modes[task]
# Load data features from cache or dataset file
# data_cache_name = list(filter(None, args.model_name_or_path.split("/"))).pop()
data_cache_name = "xlmr-base-final"
if args.data_cache_name is not None:
data_cache_name = args.data_cache_name
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}_{}".format(
split,
data_cache_name,
str(args.max_seq_length),
str(task),
str(language),
),
)
if split == "test":
examples = processor.get_test_examples(args.data_dir)
elif split == "valid":
examples = processor.get_valid_examples(args.data_dir)
else: # train
examples = processor.get_train_examples(args.data_dir)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=False,
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=0,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_guids = [f.guid for f in features]
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
# if output_mode == "classification" and (not split == "test") :
# all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
# else:
# all_labels = torch.tensor([0 for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
if return_examples:
return dataset, all_guids, examples
else:
return dataset, all_guids
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--reload",
default="",
type=str,
help="path to infoxlm checkpoint",
)
parser.add_argument(
"--data_cache_name",
default=None,
type=str,
help="The name of cached data",
)
parser.add_argument(
"--language",
default=None,
type=str,
required=True,
help="Evaluation language. Also train language if `train_language` is set to None.",
)
parser.add_argument(
"--train_language", default=None, type=str, help="Train language if is different of the evaluation language."
)
parser.add_argument(
"--sample_ratio", default=0.0, type=float, help="The training sample ratio of each language"
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# stable fine-tuning paramters
parser.add_argument("--overall_ratio", default=1.0, type=float, help="overall ratio")
parser.add_argument("--enable_r1_loss", action="store_true", help="Whether to enable r1 loss.")
parser.add_argument("--r1_lambda", default=5.0, type=float, help="lambda of r1 loss")
parser.add_argument("--original_loss", action="store_true",
help="Whether to use cross entropy loss on the former example.")
parser.add_argument("--noised_loss", action="store_true",
help="Whether to use cross entropy loss on the latter example.")
parser.add_argument("--enable_bpe_switch", action="store_true", help="Whether to enable bpe-switch.")
parser.add_argument("--bpe_switch_ratio", default=0.5, type=float, help="bpe_switch_ratio")
parser.add_argument("--tokenizer_dir", default=None, type=str, help="tokenizer dir")
parser.add_argument("--tokenizer_languages", default=None, type=str, help="tokenizer languages")
parser.add_argument("--enable_bpe_sampling", action="store_true", help="Whether to enable bpe sampling.")
parser.add_argument("--bpe_sampling_ratio", default=0.5, type=float, help="bpe_sampling_ratio")
parser.add_argument("--sampling_alpha", default=5.0, type=float, help="alpha of sentencepiece sampling")
parser.add_argument("--sampling_nbest_size", default=-1, type=int, help="nbest_size of sentencepiece sampling")
parser.add_argument("--enable_random_noise", action="store_true", help="Whether to enable random noise.")
parser.add_argument("--noise_detach_embeds", action="store_true", help="Whether to detach noised embeddings.")
parser.add_argument("--noise_eps", default=1e-5, type=float, help="noise eps")
parser.add_argument('--noise_type', type=str, default='uniform',
choices=['normal', 'uniform'],
help='type of noises for RXF methods')
parser.add_argument("--enable_code_switch", action="store_true", help="Whether to enable code switch.")
parser.add_argument("--code_switch_ratio", default=0.5, type=float, help="code_switch_ratio")
parser.add_argument("--dict_dir", default=None, type=str, help="dict dir")
parser.add_argument("--dict_languages", default=None, type=str, help="dict languages")
parser.add_argument("--enable_word_dropout", action="store_true", help="Whether to enable word dropout.")
parser.add_argument("--word_dropout_rate", default=0.1, type=float, help="word dropout rate.")
parser.add_argument("--enable_translate_data", action="store_true", help="Whether to enable translate data.")
parser.add_argument("--translation_path", default=None, type=str, help="translation path")
parser.add_argument("--translate_languages", default=None, type=str, help="translate languages")
parser.add_argument("--translate_different_pair", action="store_true", help="Whether to translate different pair.")
parser.add_argument("--translate_en_data", action="store_true", help="Whether to translate en data.")
parser.add_argument("--enable_data_augmentation", action="store_true", help="Whether to enable data augmentation.")
parser.add_argument("--augment_method", default=None, type=str, help="augment method")
parser.add_argument("--augment_ratio", default=1.0, type=float, help="augmentation ratio.")
parser.add_argument("--first_stage_model_path", default=None, type=str, required=False,
help="stable model path")
parser.add_argument("--r2_lambda", default=1.0, type=float, required=False,
help="r2_lambda")
parser.add_argument("--use_hard_labels", action="store_true", help="Whether to use hard labels.")
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--gpu_id", default="", type=str, help="GPU id"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the test set.")
parser.add_argument("--do_predict", action="store_true", help="Whether to run prediction on the test set.")
parser.add_argument("--init_checkpoint", default=None, type=str,
help="initial checkpoint for train/predict")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step."
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.")
parser.add_argument("--evaluate_steps", type=int, default=5000, help="Log every X updates steps.")
parser.add_argument("--logging_each_epoch", action="store_true", help="Whether to log after each epoch.")
parser.add_argument("--logging_steps_in_sample", type=int, default=-1, help="log every X samples.")
parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
parser.add_argument("--train_cut_ratio", type=float, default=1.0, help="Cut training data to the ratio")
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# preprocess args
if args.train_language is None or args.train_language == "all":
args.train_language = args.language
assert not (
args.logging_steps != -1 and args.logging_steps_in_sample != -1), "these two parameters can't both be setted"
if args.logging_steps == -1 and args.logging_steps_in_sample != -1:
total_batch_size = args.n_gpu * args.per_gpu_train_batch_size * args.gradient_accumulation_steps
args.logging_steps = args.logging_steps_in_sample // total_batch_size
# Set seed
set_seed(args)
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name](language=args.language, train_language=args.train_language)
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.enable_r1_loss or args.noised_loss or args.enable_data_augmentation:
noised_data_generator = NoisedDataGenerator(
task_name=args.task_name,
enable_r1_loss=args.enable_r1_loss,
r1_lambda=args.r1_lambda,
original_loss=args.original_loss,
noised_loss=args.noised_loss,
max_length=args.max_seq_length,
overall_ratio=args.overall_ratio,
enable_bpe_switch=args.enable_bpe_switch,
bpe_switch_ratio=args.bpe_switch_ratio,
tokenizer_dir=args.tokenizer_dir,
do_lower_case=args.do_lower_case,
tokenizer_languages=args.tokenizer_languages.split(',') if args.tokenizer_languages is not None else [],
enable_bpe_sampling=args.enable_bpe_sampling,
bpe_sampling_ratio=args.bpe_sampling_ratio,
tokenizer=tokenizer,
sampling_alpha=args.sampling_alpha,
sampling_nbest_size=args.sampling_nbest_size,
enable_random_noise=args.enable_random_noise,
noise_detach_embeds=args.noise_detach_embeds,
noise_eps=args.noise_eps,
noise_type=args.noise_type,
enable_code_switch=args.enable_code_switch,
code_switch_ratio=args.code_switch_ratio,
dict_dir=args.dict_dir,
dict_languages=args.dict_languages.split(',') if args.dict_languages is not None else [],
enable_word_dropout=args.enable_word_dropout,
word_dropout_rate=args.word_dropout_rate,
enable_translate_data=args.enable_translate_data,
translation_path=args.translation_path,
train_language=args.language if args.translate_languages is None else args.translate_languages,
data_dir=args.data_dir,
translate_different_pair=args.translate_different_pair,
translate_en_data=args.translate_en_data,
enable_data_augmentation=args.enable_data_augmentation,
augment_method=args.augment_method,
augment_ratio=args.augment_ratio,
r2_lambda=args.r2_lambda,
use_hard_labels=args.use_hard_labels,
)
else:
noised_data_generator = None
if args.first_stage_model_path is not None:
first_stage_model = model_class.from_pretrained(args.first_stage_model_path,
config=config)
else:
first_stage_model = None
state_dict = None
if args.reload != "":
from tools.dump_hf_state_dict import convert_pt_to_hf
state_dict = convert_pt_to_hf(os.path.join(args.model_name_or_path, 'pytorch_model.bin'), args.reload, logger)
# state_dict = torch.load(args.reload)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
noised_data_generator=noised_data_generator,
cache_dir=args.cache_dir if args.cache_dir else None,
state_dict=state_dict,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
if first_stage_model is not None:
first_stage_model.to(args.device)
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
train_langs = args.train_language.split(',')
dataset_list = []
train_examples = []
for lang in train_langs:
lg_train_dataset, guids, lg_examples = load_and_cache_examples(args, args.task_name, tokenizer, lang,
split="train", return_examples=True)
dataset_list.append(lg_train_dataset)
train_examples += lg_examples
train_dataset = ConcatDataset(dataset_list)
global_step, tr_loss = train(args, train_examples, train_dataset, model, first_stage_model, tokenizer,
noised_data_generator)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.init_checkpoint:
best_checkpoint = args.init_checkpoint
elif os.path.exists(os.path.join(args.output_dir, 'checkpoint-best')):
best_checkpoint = os.path.join(args.output_dir, 'checkpoint-best')
else:
best_checkpoint = args.output_dir
best_f1 = 0
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoint = best_checkpoint
tokenizer = tokenizer_class.from_pretrained(checkpoint, do_lower_case=args.do_lower_case)
logger.info("Evaluate the following checkpoints: %s", checkpoint)
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer)
for key, value in result.items():
logger.info("eval_{}: {}".format(key, value))
log_writer = open(os.path.join(args.output_dir, "evaluate_logs.txt"), 'w')
log_writer.write("{0}\t{1}".format("evaluate", json.dumps(result)) + '\n')
if args.do_predict and args.local_rank in [-1, 0]:
# tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoint = best_checkpoint
tokenizer = tokenizer_class.from_pretrained(checkpoint, do_lower_case=args.do_lower_case)
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
predict(args, model, tokenizer, label_list)
logger.info("Task {0} finished!".format(args.task_name))
return results
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/xtune/src/run_cls.py |
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--translation_path",
default=None,
type=str,
required=True,
help="",
)
drop_languages = ["en", "zh-CN", "zh", "ja", "ko", "th", "my", "ml", "ta"]
translate_languages = None
args = parser.parse_args()
src2tgt = {}
print("Reading translation from {}".format(args.translation_path))
with open(args.translation_path, encoding="utf-8") as f:
cnt = 0
for line in f:
cnt += 1
if cnt % 10000 == 0:
print("Reading lines {}".format(cnt))
items = line.split("\t")
if items == 3:
src_sent, tgt_lang, tgt_sent = line.split("\t")
alignment = None
else:
src_sent, tgt_lang, tgt_sent, alignment_str = line.split("\t")
alignment = []
for x in alignment_str.split(" "):
alignment.append((int(x.split("/")[0]), int(x.split("/")[1])))
if tgt_lang in drop_languages:
continue
if translate_languages is not None and tgt_lang not in translate_languages:
continue
cnt_src = {}
cnt_tgt = {}
for x in alignment:
if x[0] not in cnt_src:
cnt_src[x[0]] = 0
cnt_src[x[0]] += 1
if x[1] not in cnt_tgt:
cnt_tgt[x[1]] = 0
cnt_tgt[x[1]] += 1
if not (cnt_src[x[0]] <= 1 or cnt_tgt[x[1]] <= 1):
print(cnt_src, cnt_tgt)
print(alignment)
print(src_sent, tgt_sent)
assert cnt_src[x[0]] <= 1 or cnt_tgt[x[1]] <= 1
| EXA-1-master | exa/models/unilm-master/xtune/src/tools/check_many2many_alignment.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.