text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import cv2
import torch
import numpy as np
import math
import random
from PIL import Image
from data.pix2pix_dataset import Pix2pixDataset
from data.base_dataset import get_params, get_transform
class DeepFashionHDDataset(Pix2pixDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
parser = Pix2pixDataset.modify_commandline_options(parser, is_train)
parser.set_defaults(preprocess_mode='resize_and_crop')
parser.set_defaults(no_pairing_check=True)
parser.set_defaults(load_size=550)
parser.set_defaults(crop_size=512)
parser.set_defaults(label_nc=20)
parser.set_defaults(contain_dontcare_label=False)
parser.set_defaults(cache_filelist_read=False)
parser.set_defaults(cache_filelist_write=False)
return parser
def get_paths(self, opt):
root = opt.dataroot
if opt.phase == 'train':
fd = open(os.path.join('./data/train.txt'))
lines = fd.readlines()
fd.close()
elif opt.phase == 'test':
fd = open(os.path.join('./data/val.txt'))
lines = fd.readlines()
fd.close()
image_paths = []
label_paths = []
for i in range(len(lines)):
name = lines[i].strip()
image_paths.append(name)
label_path = name.replace('img', 'pose').replace('.jpg', '_{}.txt')
label_paths.append(os.path.join(label_path))
return label_paths, image_paths
def get_ref_video_like(self, opt):
pair_path = './data/deepfashion_self_pair.txt'
with open(pair_path) as fd:
self_pair = fd.readlines()
self_pair = [it.strip() for it in self_pair]
self_pair_dict = {}
for it in self_pair:
items = it.split(',')
self_pair_dict[items[0]] = items[1:]
ref_path = './data/deepfashion_ref_test.txt' if opt.phase == 'test' else './data/deepfashion_ref.txt'
with open(ref_path) as fd:
ref = fd.readlines()
ref = [it.strip() for it in ref]
ref_dict = {}
for i in range(len(ref)):
items = ref[i].strip().split(',')
key = items[0]
if key in self_pair_dict.keys():
val = [it for it in self_pair_dict[items[0]]]
else:
val = [items[-1]]
ref_dict[key.replace('\\',"/")] = [v.replace('\\',"/") for v in val]
train_test_folder = ('', '')
return ref_dict, train_test_folder
def get_ref_vgg(self, opt):
extra = ''
if opt.phase == 'test':
extra = '_test'
with open('./data/deepfashion_ref{}.txt'.format(extra)) as fd:
lines = fd.readlines()
ref_dict = {}
for i in range(len(lines)):
items = lines[i].strip().split(',')
key = items[0]
if opt.phase == 'test':
val = [it for it in items[1:]]
else:
val = [items[-1]]
ref_dict[key.replace('\\',"/")] = [v.replace('\\',"/") for v in val]
train_test_folder = ('', '')
return ref_dict, train_test_folder
def get_ref(self, opt):
if opt.video_like:
return self.get_ref_video_like(opt)
else:
return self.get_ref_vgg(opt)
def get_label_tensor(self, path):
candidate = np.loadtxt(path.format('candidate'))
subset = np.loadtxt(path.format('subset'))
stickwidth = 20
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
[10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
[1, 16], [16, 18], [3, 17], [6, 18]]
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
canvas = np.zeros((1024, 1024, 3), dtype=np.uint8)
cycle_radius = 20
for i in range(18):
index = int(subset[i])
if index == -1:
continue
x, y = candidate[index][0:2]
cv2.circle(canvas, (int(x), int(y)), cycle_radius, colors[i], thickness=-1)
joints = []
for i in range(17):
index = subset[np.array(limbSeq[i]) - 1]
cur_canvas = canvas.copy()
if -1 in index:
joints.append(np.zeros_like(cur_canvas[:, :, 0]))
continue
Y = candidate[index.astype(int), 0]
X = candidate[index.astype(int), 1]
mX = np.mean(X)
mY = np.mean(Y)
length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
joint = np.zeros_like(cur_canvas[:, :, 0])
cv2.fillConvexPoly(joint, polygon, 255)
joint = cv2.addWeighted(joint, 0.4, joint, 0.6, 0)
joints.append(joint)
pose = Image.fromarray(cv2.cvtColor(canvas, cv2.COLOR_BGR2RGB)).resize((self.opt.load_size, self.opt.load_size), resample=Image.NEAREST)
params = get_params(self.opt, pose.size)
transform_label = get_transform(self.opt, params, method=Image.NEAREST, normalize=False)
transform_img = get_transform(self.opt, params, method=Image.BILINEAR, normalize=False)
tensors_dist = 0
e = 1
for i in range(len(joints)):
im_dist = cv2.distanceTransform(255-joints[i], cv2.DIST_L1, 3)
im_dist = np.clip((im_dist/3), 0, 255).astype(np.uint8)
tensor_dist = transform_img(Image.fromarray(im_dist))
tensors_dist = tensor_dist if e == 1 else torch.cat([tensors_dist, tensor_dist])
e += 1
tensor_pose = transform_label(pose)
label_tensor = torch.cat((tensor_pose, tensors_dist), dim=0)
return label_tensor, params
def imgpath_to_labelpath(self, path):
label_path = path.replace('/img/', '/pose/').replace('.jpg', '_{}.txt')
return label_path
def labelpath_to_imgpath(self, path):
img_path = path.replace('/pose/', '/img/').replace('_{}.txt', '.jpg')
return img_path
|
CoCosNet-v2/data/deepfashionHD_dataset.py/0
|
{
"file_path": "CoCosNet-v2/data/deepfashionHD_dataset.py",
"repo_id": "CoCosNet-v2",
"token_count": 3401
}
| 233 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import re
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.spectral_norm as spectral_norm
def get_nonspade_norm_layer(opt, norm_type='instance'):
def get_out_channel(layer):
if hasattr(layer, 'out_channels'):
return getattr(layer, 'out_channels')
return layer.weight.size(0)
def add_norm_layer(layer):
nonlocal norm_type
if norm_type.startswith('spectral'):
layer = spectral_norm(layer)
subnorm_type = norm_type[len('spectral'):]
else:
subnorm_type =norm_type
if subnorm_type == 'none' or len(subnorm_type) == 0:
return layer
if getattr(layer, 'bias', None) is not None:
delattr(layer, 'bias')
layer.register_parameter('bias', None)
if subnorm_type == 'batch':
norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True)
elif subnorm_type == 'sync_batch':
norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True)
elif subnorm_type == 'instance':
norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False)
else:
raise ValueError('normalization layer %s is not recognized' % subnorm_type)
return nn.Sequential(layer, norm_layer)
return add_norm_layer
def PositionalNorm2d(x, epsilon=1e-8):
# x: B*C*W*H normalize in C dim
mean = x.mean(dim=1, keepdim=True)
std = x.var(dim=1, keepdim=True).add(epsilon).sqrt()
output = (x - mean) / std
return output
class SPADE(nn.Module):
def __init__(self, config_text, norm_nc, label_nc, PONO=False):
super().__init__()
assert config_text.startswith('spade')
parsed = re.search('spade(\D+)(\d)x\d', config_text)
param_free_norm_type = str(parsed.group(1))
ks = int(parsed.group(2))
self.pad_type = 'nozero'
if PONO:
self.param_free_norm = PositionalNorm2d
elif param_free_norm_type == 'instance':
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'syncbatch':
self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=True)
elif param_free_norm_type == 'batch':
self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=True)
else:
raise ValueError('%s is not a recognized param-free norm type in SPADE' % param_free_norm_type)
nhidden = 128
pw = ks // 2
if self.pad_type != 'zero':
self.mlp_shared = nn.Sequential(
nn.ReflectionPad2d(pw),
nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=0),
nn.ReLU()
)
self.pad = nn.ReflectionPad2d(pw)
self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=0)
self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=0)
else:
self.mlp_shared = nn.Sequential(
nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw),
nn.ReLU()
)
self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
def forward(self, x, segmap):
normalized = self.param_free_norm(x)
segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')
actv = self.mlp_shared(segmap)
if self.pad_type != 'zero':
gamma = self.mlp_gamma(self.pad(actv))
beta = self.mlp_beta(self.pad(actv))
else:
gamma = self.mlp_gamma(actv)
beta = self.mlp_beta(actv)
out = normalized * (1 + gamma) + beta
return out
|
CoCosNet-v2/models/networks/normalization.py/0
|
{
"file_path": "CoCosNet-v2/models/networks/normalization.py",
"repo_id": "CoCosNet-v2",
"token_count": 1931
}
| 234 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
from data.pix2pix_dataset import Pix2pixDataset
from data.image_folder import make_dataset
class ADE20KDataset(Pix2pixDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
parser = Pix2pixDataset.modify_commandline_options(parser, is_train)
parser.set_defaults(preprocess_mode='resize_and_crop')
if is_train:
parser.set_defaults(load_size=286)
else:
parser.set_defaults(load_size=256)
parser.set_defaults(crop_size=256)
parser.set_defaults(display_winsize=256)
parser.set_defaults(label_nc=150)
parser.set_defaults(contain_dontcare_label=True)
parser.set_defaults(cache_filelist_read=False)
parser.set_defaults(cache_filelist_write=False)
return parser
def get_paths(self, opt):
root = opt.dataroot
phase = 'val' if opt.phase == 'test' else 'train'
subfolder = 'validation' if opt.phase == 'test' else 'training'
cache = False if opt.phase == 'test' else True
all_images = sorted(make_dataset(root + '/' + subfolder, recursive=True, read_cache=cache, write_cache=False))
image_paths = []
label_paths = []
for p in all_images:
if '_%s_' % phase not in p:
continue
if p.endswith('.jpg'):
image_paths.append(p)
elif p.endswith('.png'):
label_paths.append(p)
return label_paths, image_paths
def get_ref(self, opt):
extra = '_test' if opt.phase == 'test' else ''
with open('./data/ade20k_ref{}.txt'.format(extra)) as fd:
lines = fd.readlines()
ref_dict = {}
for i in range(len(lines)):
items = lines[i].strip().split(',')
key = items[0]
if opt.phase == 'test':
val = items[1:]
else:
val = [items[1], items[-1]]
ref_dict[key] = val
train_test_folder = ('training', 'validation')
return ref_dict, train_test_folder
|
CoCosNet/data/ade20k_dataset.py/0
|
{
"file_path": "CoCosNet/data/ade20k_dataset.py",
"repo_id": "CoCosNet",
"token_count": 1032
}
| 235 |
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch
from models.networks.base_network import BaseNetwork
from models.networks.loss import *
from models.networks.discriminator import *
from models.networks.generator import *
#from models.networks.encoder import *
from models.networks.ContextualLoss import *
from models.networks.correspondence import *
#from models.networks.progressive_sub_net import *
import util.util as util
def find_network_using_name(target_network_name, filename, add=True):
target_class_name = target_network_name + filename if add else target_network_name
module_name = 'models.networks.' + filename
network = util.find_class_in_module(target_class_name, module_name)
assert issubclass(network, BaseNetwork), \
"Class %s should be a subclass of BaseNetwork" % network
return network
def modify_commandline_options(parser, is_train):
opt, _ = parser.parse_known_args()
netG_cls = find_network_using_name(opt.netG, 'generator')
parser = netG_cls.modify_commandline_options(parser, is_train)
if is_train:
netD_cls = find_network_using_name(opt.netD, 'discriminator')
parser = netD_cls.modify_commandline_options(parser, is_train)
# netE_cls = find_network_using_name('conv', 'encoder')
# parser = netE_cls.modify_commandline_options(parser, is_train)
return parser
def create_network(cls, opt, stage1=False):
if stage1:
net = cls(opt, stage1=True)
else:
net = cls(opt)
net.print_network()
if len(opt.gpu_ids) > 0:
assert(torch.cuda.is_available())
net.cuda()
net.init_weights(opt.init_type, opt.init_variance)
return net
def define_G(opt):
netG_cls = find_network_using_name(opt.netG, 'generator')
return create_network(netG_cls, opt)
def define_G_stage1(opt):
netG_cls = find_network_using_name(opt.netG, 'generator')
return create_network(netG_cls, opt, stage1=True)
def define_D(opt):
netD_cls = find_network_using_name(opt.netD, 'discriminator')
return create_network(netD_cls, opt)
def define_D_stage1(opt):
netD_cls = find_network_using_name(opt.netD, 'discriminator')
return create_network(netD_cls, opt, stage1=True)
def define_DomainClassifier(opt):
netDomainclassifier = find_network_using_name('DomainClassifier', 'generator', add=False)
return create_network(netDomainclassifier, opt)
def define_Corr(opt):
netCoor_cls = find_network_using_name('novgg', 'correspondence')
return create_network(netCoor_cls, opt)
|
CoCosNet/models/networks/__init__.py/0
|
{
"file_path": "CoCosNet/models/networks/__init__.py",
"repo_id": "CoCosNet",
"token_count": 1010
}
| 236 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet)."""
import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (WEIGHTS_NAME, get_linear_schedule_with_warmup, AdamW,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer)
from utils import (compute_metrics, convert_examples_to_features,
output_modes, processors)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer)}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer, optimizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
scheduler = get_linear_schedule_with_warmup(optimizer, args.warmup_steps, t_total)
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
if os.path.exists(scheduler_last):
scheduler.load_state_dict(torch.load(scheduler_last))
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = args.start_step
tr_loss, logging_loss = 0.0, 0.0
best_acc = 0.0
model.zero_grad()
train_iterator = trange(args.start_epoch, int(args.num_train_epochs), desc="Epoch",
disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
model.train()
for idx, _ in enumerate(train_iterator):
tr_loss = 0.0
for step, batch in enumerate(train_dataloader):
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,
# XLM don't use segment_ids
'labels': batch[3]}
ouputs = model(**inputs)
loss = ouputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer, checkpoint=str(global_step))
for key, value in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
logger.info('loss %s', str(tr_loss - logging_loss))
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.max_steps > 0 and global_step > args.max_steps:
# epoch_iterator.close()
break
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
results = evaluate(args, model, tokenizer, checkpoint=str(args.start_epoch + idx))
last_output_dir = os.path.join(args.output_dir, 'checkpoint-last')
if not os.path.exists(last_output_dir):
os.makedirs(last_output_dir)
model_to_save = model.module if hasattr(model,
'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(last_output_dir)
logger.info("Saving model checkpoint to %s", last_output_dir)
idx_file = os.path.join(last_output_dir, 'idx_file.txt')
with open(idx_file, 'w', encoding='utf-8') as idxf:
idxf.write(str(args.start_epoch + idx) + '\n')
torch.save(optimizer.state_dict(), os.path.join(last_output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(last_output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", last_output_dir)
step_file = os.path.join(last_output_dir, 'step_file.txt')
with open(step_file, 'w', encoding='utf-8') as stepf:
stepf.write(str(global_step) + '\n')
if (results['acc'] > best_acc):
best_acc = results['acc']
output_dir = os.path.join(args.output_dir, 'checkpoint-best')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model,
'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_{}.bin'.format(idx)))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def evaluate(args, model, tokenizer, checkpoint=None, prefix="", mode='dev'):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
if (mode == 'dev'):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, ttype='dev')
elif (mode == 'test'):
eval_dataset, instances = load_and_cache_examples(args, eval_task, tokenizer, ttype='test')
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,
# XLM don't use segment_ids
'labels': batch[3]}
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
# eval_accuracy = accuracy(preds,out_label_ids)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds_label = np.argmax(preds, axis=1)
result = compute_metrics(eval_task, preds_label, out_label_ids)
results.update(result)
if (mode == 'dev'):
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "a+") as writer:
logger.info("***** Eval results {} *****".format(prefix))
writer.write('evaluate %s\n' % checkpoint)
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
elif (mode == 'test'):
output_test_file = args.test_result_dir
output_dir = os.path.dirname(output_test_file)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(output_test_file, "w") as writer:
logger.info("***** Output test results *****")
all_logits = preds.tolist()
for i, logit in tqdm(enumerate(all_logits), desc='Testing'):
instance_rep = '<CODESPLIT>'.join(
[item.encode('ascii', 'ignore').decode('ascii') for item in instances[i]])
writer.write(instance_rep + '<CODESPLIT>' + '<CODESPLIT>'.join([str(l) for l in logit]) + '\n')
for key in sorted(result.keys()):
print("%s = %s" % (key, str(result[key])))
return results
def load_and_cache_examples(args, task, tokenizer, ttype='train'):
processor = processors[task]()
output_mode = output_modes[task]
# Load data features from cache or dataset file
if ttype == 'train':
file_name = args.train_file.split('.')[0]
elif ttype == 'dev':
file_name = args.dev_file.split('.')[0]
elif ttype == 'test':
file_name = args.test_file.split('.')[0]
cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}_{}'.format(
ttype,
file_name,
list(filter(None, args.model_name_or_path.split('/'))).pop(),
str(args.max_seq_length),
str(task)))
# if os.path.exists(cached_features_file):
try:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
if ttype == 'test':
examples, instances = processor.get_test_examples(args.data_dir, args.test_file)
except:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if ttype == 'train':
examples = processor.get_train_examples(args.data_dir, args.train_file)
elif ttype == 'dev':
examples = processor.get_dev_examples(args.data_dir, args.dev_file)
elif ttype == 'test':
examples, instances = processor.get_test_examples(args.data_dir, args.test_file)
features = convert_examples_to_features(examples, label_list, args.max_seq_length, tokenizer, output_mode,
cls_token_at_end=bool(args.model_type in ['xlnet']),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
sep_token=tokenizer.sep_token,
cls_token_segment_id=2 if args.model_type in ['xlnet'] else 1,
pad_on_left=bool(args.model_type in ['xlnet']),
# pad on the left for xlnet
pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if (ttype == 'test'):
return dataset, instances
else:
return dataset
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name")
parser.add_argument("--task_name", default='codesearch', type=str, required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_predict", action='store_true',
help="Whether to run predict on the test set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Rul evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
parser.add_argument("--train_file", default="train_top10_concat.tsv", type=str,
help="train file")
parser.add_argument("--dev_file", default="shared_task_dev_top10_concat.tsv", type=str,
help="dev file")
parser.add_argument("--test_file", default="shared_task_dev_top10_concat.tsv", type=str,
help="test file")
parser.add_argument("--pred_model_dir", default=None, type=str,
help='model for prediction')
parser.add_argument("--test_result_dir", default='test_results.tsv', type=str,
help='path to store test result')
args = parser.parse_args()
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.start_epoch = 0
args.start_step = 0
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
args.model_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin')
args.config_name = os.path.join(checkpoint_last, 'config.json')
idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
with open(idx_file, encoding='utf-8') as idxf:
args.start_epoch = int(idxf.readlines()[0].strip()) + 1
step_file = os.path.join(checkpoint_last, 'step_file.txt')
if os.path.exists(step_file):
with open(step_file, encoding='utf-8') as stepf:
args.start_step = int(stepf.readlines()[0].strip())
logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch))
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels, finetuning_task=args.task_name)
if args.tokenizer_name:
tokenizer_name = args.tokenizer_name
elif args.model_name_or_path:
tokenizer_name = 'roberta-base'
tokenizer = tokenizer_class.from_pretrained(tokenizer_name, do_lower_case=args.do_lower_case)
model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path),
config=config)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
# Distributed and parallel training
model.to(args.device)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
if os.path.exists(optimizer_last):
optimizer.load_state_dict(torch.load(optimizer_last))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, ttype='train')
global_step, tr_loss = train(args, train_dataset, model, tokenizer, optimizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model,
'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
print(checkpoint)
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, checkpoint=checkpoint, prefix=global_step)
result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())
results.update(result)
if args.do_predict:
print('testing')
model = model_class.from_pretrained(args.pred_model_dir)
model.to(args.device)
evaluate(args, model, tokenizer, checkpoint=None, prefix='', mode='test')
return results
if __name__ == "__main__":
main()
|
CodeBERT/CodeBERT/codesearch/run_classifier.py/0
|
{
"file_path": "CodeBERT/CodeBERT/codesearch/run_classifier.py",
"repo_id": "CodeBERT",
"token_count": 13700
}
| 237 |
import os
import argparse
from evaluator.smooth_bleu import bleu_fromstr
import nltk
import re
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, required=True)
args = parser.parse_args()
ref = os.path.join(args.path, 'golds.txt')
hyp = os.path.join(args.path, 'preds.txt')
with open(ref, 'r') as f:
refs = f.readlines()
with open(hyp, 'r') as f:
hyps = f.readlines()
# refs = [ref.strip().lower() for ref in refs]
# hyps = [hyp.strip().lower() for hyp in hyps]
# bleu = bleu_fromstr(hyps, refs)
# print(bleu)
pred_nls, golds = hyps, refs
for i in range(len(pred_nls)):
chars = "(_)`."
for c in chars:
pred_nls[i] = pred_nls[i].replace(c, " " + c + " ")
pred_nls[i] = " ".join(pred_nls[i].split())
golds[i] = golds[i].replace(c, " " + c + " ")
golds[i] = " ".join(golds[i].split())
bleu = bleu_fromstr(pred_nls, golds, rmstop=False)
print(bleu)
# stopwords = open("stopwords.txt").readlines()
# stopwords = [stopword.strip() for stopword in stopwords]
# refs = [" ".join([word for word in ref.lower().split() if word not in stopwords]) for ref in refs]
# hyps = [" ".join([word for word in hyp.lower().split() if word not in stopwords]) for hyp in hyps]
# bleu = bleu_fromstr(hyps, refs)
# print(bleu)
if __name__ == '__main__':
main()
# s = "Can we use `mset.mirrorInfo()` directly?"
# chars = "(_)`."
# for c in chars:
# s = s.replace(c, " " + c + " ")
# print(nltk.wordpunct_tokenize(s))
|
CodeBERT/CodeReviewer/code/bleu.py/0
|
{
"file_path": "CodeBERT/CodeReviewer/code/bleu.py",
"repo_id": "CodeBERT",
"token_count": 740
}
| 238 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python implementation of BLEU and smooth-BLEU.
This module provides a Python implementation of BLEU and smooth-BLEU.
Smooth BLEU is computed following the method outlined in the paper:
Chin-Yew Lin, Franz Josef Och. ORANGE: a method for evaluating automatic
evaluation metrics for machine translation. COLING 2004.
"""
import collections
import math
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i+order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
def _bleu(ref_file, trans_file, subword_option=None):
max_order = 4
smooth = True
ref_files = [ref_file]
reference_text = []
for reference_filename in ref_files:
with open(reference_filename) as fh:
reference_text.append(fh.readlines())
per_segment_references = []
for references in zip(*reference_text):
reference_list = []
for reference in references:
reference_list.append(reference.strip().split())
per_segment_references.append(reference_list)
translations = []
with open(trans_file) as fh:
for line in fh:
translations.append(line.strip().split())
bleu_score, _, _, _, _, _ = compute_bleu(per_segment_references, translations, max_order, smooth)
return round(100 * bleu_score,2)
|
CodeBERT/CodeReviewer/code/evaluator/bleu.py/0
|
{
"file_path": "CodeBERT/CodeReviewer/code/evaluator/bleu.py",
"repo_id": "CodeBERT",
"token_count": 1767
}
| 239 |
# batch size 6 for 16 GB GPU
mnt_dir="/home/codereview"
# You may change the following block for multiple gpu training
MASTER_HOST=localhost && echo MASTER_HOST: ${MASTER_HOST}
MASTER_PORT=23333 && echo MASTER_PORT: ${MASTER_PORT}
RANK=0 && echo RANK: ${RANK}
PER_NODE_GPU=1 && echo PER_NODE_GPU: ${PER_NODE_GPU}
WORLD_SIZE=1 && echo WORLD_SIZE: ${WORLD_SIZE}
NODES=1 && echo NODES: ${NODES}
NCCL_DEBUG=INFO
bash test_nltk.sh
python -m torch.distributed.launch --nproc_per_node ${PER_NODE_GPU} --node_rank=${RANK} --nnodes=${NODES} --master_addr=${MASTER_HOST} --master_port=${MASTER_PORT} ../run_test_msg.py \
--model_name_or_path microsoft/codereviewer \
--output_dir ../../save/gen \
--load_model_path ../../save/gen/checkpoint \
--output_dir empty \
--eval_file ref-test.jsonl \
--max_source_length 512 \
--max_target_length 128 \
--eval_batch_size 12 \
--mask_rate 0.15 \
--save_steps 1800 \
--beam_size 10 \
--log_steps 100 \
--train_steps 120000 \
--gpu_per_node=${PER_NODE_GPU} \
--node_index=${RANK} \
--seed 2233 \
--raw_input
|
CodeBERT/CodeReviewer/code/sh/test-msg.sh/0
|
{
"file_path": "CodeBERT/CodeReviewer/code/sh/test-msg.sh",
"repo_id": "CodeBERT",
"token_count": 438
}
| 240 |
import re
from io import StringIO
import tokenize
def remove_comments_and_docstrings(source,lang):
if lang in ['python']:
"""
Returns 'source' minus comments and docstrings.
"""
io_obj = StringIO(source)
out = ""
prev_toktype = tokenize.INDENT
last_lineno = -1
last_col = 0
for tok in tokenize.generate_tokens(io_obj.readline):
token_type = tok[0]
token_string = tok[1]
start_line, start_col = tok[2]
end_line, end_col = tok[3]
ltext = tok[4]
if start_line > last_lineno:
last_col = 0
if start_col > last_col:
out += (" " * (start_col - last_col))
# Remove comments:
if token_type == tokenize.COMMENT:
pass
# This series of conditionals removes docstrings:
elif token_type == tokenize.STRING:
if prev_toktype != tokenize.INDENT:
# This is likely a docstring; double-check we're not inside an operator:
if prev_toktype != tokenize.NEWLINE:
if start_col > 0:
out += token_string
else:
out += token_string
prev_toktype = token_type
last_col = end_col
last_lineno = end_line
temp=[]
for x in out.split('\n'):
if x.strip()!="":
temp.append(x)
return '\n'.join(temp)
elif lang in ['ruby']:
return source
else:
def replacer(match):
s = match.group(0)
if s.startswith('/'):
return " " # note: a space and not an empty string
else:
return s
pattern = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE
)
temp=[]
for x in re.sub(pattern, replacer, source).split('\n'):
if x.strip()!="":
temp.append(x)
return '\n'.join(temp)
def tree_to_token_index(root_node):
if (len(root_node.children)==0 or root_node.type=='string') and root_node.type!='comment':
return [(root_node.start_point,root_node.end_point)]
else:
code_tokens=[]
for child in root_node.children:
code_tokens+=tree_to_token_index(child)
return code_tokens
def tree_to_variable_index(root_node,index_to_code):
if (len(root_node.children)==0 or root_node.type=='string') and root_node.type!='comment':
index=(root_node.start_point,root_node.end_point)
_,code=index_to_code[index]
if root_node.type!=code:
return [(root_node.start_point,root_node.end_point)]
else:
return []
else:
code_tokens=[]
for child in root_node.children:
code_tokens+=tree_to_variable_index(child,index_to_code)
return code_tokens
def index_to_code_token(index,code):
start_point=index[0]
end_point=index[1]
if start_point[0]==end_point[0]:
s=code[start_point[0]][start_point[1]:end_point[1]]
else:
s=""
s+=code[start_point[0]][start_point[1]:]
for i in range(start_point[0]+1,end_point[0]):
s+=code[i]
s+=code[end_point[0]][:end_point[1]]
return s
|
CodeBERT/GraphCodeBERT/clonedetection/parser/utils.py/0
|
{
"file_path": "CodeBERT/GraphCodeBERT/clonedetection/parser/utils.py",
"repo_id": "CodeBERT",
"token_count": 1812
}
| 241 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from tree_sitter import Language, Parser
from .utils import (remove_comments_and_docstrings,
tree_to_token_index,
index_to_code_token,
tree_to_variable_index)
def DFG_python(root_node,index_to_code,states):
assignment=['assignment','augmented_assignment','for_in_clause']
if_statement=['if_statement']
for_statement=['for_statement']
while_statement=['while_statement']
do_first_statement=['for_in_clause']
def_statement=['default_parameter']
states=states.copy()
if (len(root_node.children)==0 or root_node.type=='string') and root_node.type!='comment':
idx,code=index_to_code[(root_node.start_point,root_node.end_point)]
if root_node.type==code:
return [],states
elif code in states:
return [(code,idx,'comesFrom',[code],states[code].copy())],states
else:
if root_node.type=='identifier':
states[code]=[idx]
return [(code,idx,'comesFrom',[],[])],states
elif root_node.type in def_statement:
name=root_node.child_by_field_name('name')
value=root_node.child_by_field_name('value')
DFG=[]
if value is None:
indexs=tree_to_variable_index(name,index_to_code)
for index in indexs:
idx,code=index_to_code[index]
DFG.append((code,idx,'comesFrom',[],[]))
states[code]=[idx]
return sorted(DFG,key=lambda x:x[1]),states
else:
name_indexs=tree_to_variable_index(name,index_to_code)
value_indexs=tree_to_variable_index(value,index_to_code)
temp,states=DFG_python(value,index_to_code,states)
DFG+=temp
for index1 in name_indexs:
idx1,code1=index_to_code[index1]
for index2 in value_indexs:
idx2,code2=index_to_code[index2]
DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))
states[code1]=[idx1]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in assignment:
if root_node.type=='for_in_clause':
right_nodes=[root_node.children[-1]]
left_nodes=[root_node.child_by_field_name('left')]
else:
if root_node.child_by_field_name('right') is None:
return [],states
left_nodes=[x for x in root_node.child_by_field_name('left').children if x.type!=',']
right_nodes=[x for x in root_node.child_by_field_name('right').children if x.type!=',']
if len(right_nodes)!=len(left_nodes):
left_nodes=[root_node.child_by_field_name('left')]
right_nodes=[root_node.child_by_field_name('right')]
if len(left_nodes)==0:
left_nodes=[root_node.child_by_field_name('left')]
if len(right_nodes)==0:
right_nodes=[root_node.child_by_field_name('right')]
DFG=[]
for node in right_nodes:
temp,states=DFG_python(node,index_to_code,states)
DFG+=temp
for left_node,right_node in zip(left_nodes,right_nodes):
left_tokens_index=tree_to_variable_index(left_node,index_to_code)
right_tokens_index=tree_to_variable_index(right_node,index_to_code)
temp=[]
for token1_index in left_tokens_index:
idx1,code1=index_to_code[token1_index]
temp.append((code1,idx1,'computedFrom',[index_to_code[x][1] for x in right_tokens_index],
[index_to_code[x][0] for x in right_tokens_index]))
states[code1]=[idx1]
DFG+=temp
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in if_statement:
DFG=[]
current_states=states.copy()
others_states=[]
tag=False
if 'else' in root_node.type:
tag=True
for child in root_node.children:
if 'else' in child.type:
tag=True
if child.type not in ['elif_clause','else_clause']:
temp,current_states=DFG_python(child,index_to_code,current_states)
DFG+=temp
else:
temp,new_states=DFG_python(child,index_to_code,states)
DFG+=temp
others_states.append(new_states)
others_states.append(current_states)
if tag is False:
others_states.append(states)
new_states={}
for dic in others_states:
for key in dic:
if key not in new_states:
new_states[key]=dic[key].copy()
else:
new_states[key]+=dic[key]
for key in new_states:
new_states[key]=sorted(list(set(new_states[key])))
return sorted(DFG,key=lambda x:x[1]),new_states
elif root_node.type in for_statement:
DFG=[]
for i in range(2):
right_nodes=[x for x in root_node.child_by_field_name('right').children if x.type!=',']
left_nodes=[x for x in root_node.child_by_field_name('left').children if x.type!=',']
if len(right_nodes)!=len(left_nodes):
left_nodes=[root_node.child_by_field_name('left')]
right_nodes=[root_node.child_by_field_name('right')]
if len(left_nodes)==0:
left_nodes=[root_node.child_by_field_name('left')]
if len(right_nodes)==0:
right_nodes=[root_node.child_by_field_name('right')]
for node in right_nodes:
temp,states=DFG_python(node,index_to_code,states)
DFG+=temp
for left_node,right_node in zip(left_nodes,right_nodes):
left_tokens_index=tree_to_variable_index(left_node,index_to_code)
right_tokens_index=tree_to_variable_index(right_node,index_to_code)
temp=[]
for token1_index in left_tokens_index:
idx1,code1=index_to_code[token1_index]
temp.append((code1,idx1,'computedFrom',[index_to_code[x][1] for x in right_tokens_index],
[index_to_code[x][0] for x in right_tokens_index]))
states[code1]=[idx1]
DFG+=temp
if root_node.children[-1].type=="block":
temp,states=DFG_python(root_node.children[-1],index_to_code,states)
DFG+=temp
dic={}
for x in DFG:
if (x[0],x[1],x[2]) not in dic:
dic[(x[0],x[1],x[2])]=[x[3],x[4]]
else:
dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))
dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))
DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in while_statement:
DFG=[]
for i in range(2):
for child in root_node.children:
temp,states=DFG_python(child,index_to_code,states)
DFG+=temp
dic={}
for x in DFG:
if (x[0],x[1],x[2]) not in dic:
dic[(x[0],x[1],x[2])]=[x[3],x[4]]
else:
dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))
dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))
DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]
return sorted(DFG,key=lambda x:x[1]),states
else:
DFG=[]
for child in root_node.children:
if child.type in do_first_statement:
temp,states=DFG_python(child,index_to_code,states)
DFG+=temp
for child in root_node.children:
if child.type not in do_first_statement:
temp,states=DFG_python(child,index_to_code,states)
DFG+=temp
return sorted(DFG,key=lambda x:x[1]),states
def DFG_java(root_node,index_to_code,states):
assignment=['assignment_expression']
def_statement=['variable_declarator']
increment_statement=['update_expression']
if_statement=['if_statement','else']
for_statement=['for_statement']
enhanced_for_statement=['enhanced_for_statement']
while_statement=['while_statement']
do_first_statement=[]
states=states.copy()
if (len(root_node.children)==0 or root_node.type=='string') and root_node.type!='comment':
idx,code=index_to_code[(root_node.start_point,root_node.end_point)]
if root_node.type==code:
return [],states
elif code in states:
return [(code,idx,'comesFrom',[code],states[code].copy())],states
else:
if root_node.type=='identifier':
states[code]=[idx]
return [(code,idx,'comesFrom',[],[])],states
elif root_node.type in def_statement:
name=root_node.child_by_field_name('name')
value=root_node.child_by_field_name('value')
DFG=[]
if value is None:
indexs=tree_to_variable_index(name,index_to_code)
for index in indexs:
idx,code=index_to_code[index]
DFG.append((code,idx,'comesFrom',[],[]))
states[code]=[idx]
return sorted(DFG,key=lambda x:x[1]),states
else:
name_indexs=tree_to_variable_index(name,index_to_code)
value_indexs=tree_to_variable_index(value,index_to_code)
temp,states=DFG_java(value,index_to_code,states)
DFG+=temp
for index1 in name_indexs:
idx1,code1=index_to_code[index1]
for index2 in value_indexs:
idx2,code2=index_to_code[index2]
DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))
states[code1]=[idx1]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in assignment:
left_nodes=root_node.child_by_field_name('left')
right_nodes=root_node.child_by_field_name('right')
DFG=[]
temp,states=DFG_java(right_nodes,index_to_code,states)
DFG+=temp
name_indexs=tree_to_variable_index(left_nodes,index_to_code)
value_indexs=tree_to_variable_index(right_nodes,index_to_code)
for index1 in name_indexs:
idx1,code1=index_to_code[index1]
for index2 in value_indexs:
idx2,code2=index_to_code[index2]
DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))
states[code1]=[idx1]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in increment_statement:
DFG=[]
indexs=tree_to_variable_index(root_node,index_to_code)
for index1 in indexs:
idx1,code1=index_to_code[index1]
for index2 in indexs:
idx2,code2=index_to_code[index2]
DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))
states[code1]=[idx1]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in if_statement:
DFG=[]
current_states=states.copy()
others_states=[]
flag=False
tag=False
if 'else' in root_node.type:
tag=True
for child in root_node.children:
if 'else' in child.type:
tag=True
if child.type not in if_statement and flag is False:
temp,current_states=DFG_java(child,index_to_code,current_states)
DFG+=temp
else:
flag=True
temp,new_states=DFG_java(child,index_to_code,states)
DFG+=temp
others_states.append(new_states)
others_states.append(current_states)
if tag is False:
others_states.append(states)
new_states={}
for dic in others_states:
for key in dic:
if key not in new_states:
new_states[key]=dic[key].copy()
else:
new_states[key]+=dic[key]
for key in new_states:
new_states[key]=sorted(list(set(new_states[key])))
return sorted(DFG,key=lambda x:x[1]),new_states
elif root_node.type in for_statement:
DFG=[]
for child in root_node.children:
temp,states=DFG_java(child,index_to_code,states)
DFG+=temp
flag=False
for child in root_node.children:
if flag:
temp,states=DFG_java(child,index_to_code,states)
DFG+=temp
elif child.type=="local_variable_declaration":
flag=True
dic={}
for x in DFG:
if (x[0],x[1],x[2]) not in dic:
dic[(x[0],x[1],x[2])]=[x[3],x[4]]
else:
dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))
dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))
DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in enhanced_for_statement:
name=root_node.child_by_field_name('name')
value=root_node.child_by_field_name('value')
body=root_node.child_by_field_name('body')
DFG=[]
for i in range(2):
temp,states=DFG_java(value,index_to_code,states)
DFG+=temp
name_indexs=tree_to_variable_index(name,index_to_code)
value_indexs=tree_to_variable_index(value,index_to_code)
for index1 in name_indexs:
idx1,code1=index_to_code[index1]
for index2 in value_indexs:
idx2,code2=index_to_code[index2]
DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))
states[code1]=[idx1]
temp,states=DFG_java(body,index_to_code,states)
DFG+=temp
dic={}
for x in DFG:
if (x[0],x[1],x[2]) not in dic:
dic[(x[0],x[1],x[2])]=[x[3],x[4]]
else:
dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))
dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))
DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in while_statement:
DFG=[]
for i in range(2):
for child in root_node.children:
temp,states=DFG_java(child,index_to_code,states)
DFG+=temp
dic={}
for x in DFG:
if (x[0],x[1],x[2]) not in dic:
dic[(x[0],x[1],x[2])]=[x[3],x[4]]
else:
dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))
dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))
DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]
return sorted(DFG,key=lambda x:x[1]),states
else:
DFG=[]
for child in root_node.children:
if child.type in do_first_statement:
temp,states=DFG_java(child,index_to_code,states)
DFG+=temp
for child in root_node.children:
if child.type not in do_first_statement:
temp,states=DFG_java(child,index_to_code,states)
DFG+=temp
return sorted(DFG,key=lambda x:x[1]),states
def DFG_csharp(root_node,index_to_code,states):
assignment=['assignment_expression']
def_statement=['variable_declarator']
increment_statement=['postfix_unary_expression']
if_statement=['if_statement','else']
for_statement=['for_statement']
enhanced_for_statement=['for_each_statement']
while_statement=['while_statement']
do_first_statement=[]
states=states.copy()
if (len(root_node.children)==0 or root_node.type=='string') and root_node.type!='comment':
idx,code=index_to_code[(root_node.start_point,root_node.end_point)]
if root_node.type==code:
return [],states
elif code in states:
return [(code,idx,'comesFrom',[code],states[code].copy())],states
else:
if root_node.type=='identifier':
states[code]=[idx]
return [(code,idx,'comesFrom',[],[])],states
elif root_node.type in def_statement:
if len(root_node.children)==2:
name=root_node.children[0]
value=root_node.children[1]
else:
name=root_node.children[0]
value=None
DFG=[]
if value is None:
indexs=tree_to_variable_index(name,index_to_code)
for index in indexs:
idx,code=index_to_code[index]
DFG.append((code,idx,'comesFrom',[],[]))
states[code]=[idx]
return sorted(DFG,key=lambda x:x[1]),states
else:
name_indexs=tree_to_variable_index(name,index_to_code)
value_indexs=tree_to_variable_index(value,index_to_code)
temp,states=DFG_csharp(value,index_to_code,states)
DFG+=temp
for index1 in name_indexs:
idx1,code1=index_to_code[index1]
for index2 in value_indexs:
idx2,code2=index_to_code[index2]
DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))
states[code1]=[idx1]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in assignment:
left_nodes=root_node.child_by_field_name('left')
right_nodes=root_node.child_by_field_name('right')
DFG=[]
temp,states=DFG_csharp(right_nodes,index_to_code,states)
DFG+=temp
name_indexs=tree_to_variable_index(left_nodes,index_to_code)
value_indexs=tree_to_variable_index(right_nodes,index_to_code)
for index1 in name_indexs:
idx1,code1=index_to_code[index1]
for index2 in value_indexs:
idx2,code2=index_to_code[index2]
DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))
states[code1]=[idx1]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in increment_statement:
DFG=[]
indexs=tree_to_variable_index(root_node,index_to_code)
for index1 in indexs:
idx1,code1=index_to_code[index1]
for index2 in indexs:
idx2,code2=index_to_code[index2]
DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))
states[code1]=[idx1]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in if_statement:
DFG=[]
current_states=states.copy()
others_states=[]
flag=False
tag=False
if 'else' in root_node.type:
tag=True
for child in root_node.children:
if 'else' in child.type:
tag=True
if child.type not in if_statement and flag is False:
temp,current_states=DFG_csharp(child,index_to_code,current_states)
DFG+=temp
else:
flag=True
temp,new_states=DFG_csharp(child,index_to_code,states)
DFG+=temp
others_states.append(new_states)
others_states.append(current_states)
if tag is False:
others_states.append(states)
new_states={}
for dic in others_states:
for key in dic:
if key not in new_states:
new_states[key]=dic[key].copy()
else:
new_states[key]+=dic[key]
for key in new_states:
new_states[key]=sorted(list(set(new_states[key])))
return sorted(DFG,key=lambda x:x[1]),new_states
elif root_node.type in for_statement:
DFG=[]
for child in root_node.children:
temp,states=DFG_csharp(child,index_to_code,states)
DFG+=temp
flag=False
for child in root_node.children:
if flag:
temp,states=DFG_csharp(child,index_to_code,states)
DFG+=temp
elif child.type=="local_variable_declaration":
flag=True
dic={}
for x in DFG:
if (x[0],x[1],x[2]) not in dic:
dic[(x[0],x[1],x[2])]=[x[3],x[4]]
else:
dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))
dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))
DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in enhanced_for_statement:
name=root_node.child_by_field_name('left')
value=root_node.child_by_field_name('right')
body=root_node.child_by_field_name('body')
DFG=[]
for i in range(2):
temp,states=DFG_csharp(value,index_to_code,states)
DFG+=temp
name_indexs=tree_to_variable_index(name,index_to_code)
value_indexs=tree_to_variable_index(value,index_to_code)
for index1 in name_indexs:
idx1,code1=index_to_code[index1]
for index2 in value_indexs:
idx2,code2=index_to_code[index2]
DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))
states[code1]=[idx1]
temp,states=DFG_csharp(body,index_to_code,states)
DFG+=temp
dic={}
for x in DFG:
if (x[0],x[1],x[2]) not in dic:
dic[(x[0],x[1],x[2])]=[x[3],x[4]]
else:
dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))
dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))
DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in while_statement:
DFG=[]
for i in range(2):
for child in root_node.children:
temp,states=DFG_csharp(child,index_to_code,states)
DFG+=temp
dic={}
for x in DFG:
if (x[0],x[1],x[2]) not in dic:
dic[(x[0],x[1],x[2])]=[x[3],x[4]]
else:
dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))
dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))
DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]
return sorted(DFG,key=lambda x:x[1]),states
else:
DFG=[]
for child in root_node.children:
if child.type in do_first_statement:
temp,states=DFG_csharp(child,index_to_code,states)
DFG+=temp
for child in root_node.children:
if child.type not in do_first_statement:
temp,states=DFG_csharp(child,index_to_code,states)
DFG+=temp
return sorted(DFG,key=lambda x:x[1]),states
def DFG_ruby(root_node,index_to_code,states):
assignment=['assignment','operator_assignment']
if_statement=['if','elsif','else','unless','when']
for_statement=['for']
while_statement=['while_modifier','until']
do_first_statement=[]
def_statement=['keyword_parameter']
if (len(root_node.children)==0 or root_node.type=='string') and root_node.type!='comment':
states=states.copy()
idx,code=index_to_code[(root_node.start_point,root_node.end_point)]
if root_node.type==code:
return [],states
elif code in states:
return [(code,idx,'comesFrom',[code],states[code].copy())],states
else:
if root_node.type=='identifier':
states[code]=[idx]
return [(code,idx,'comesFrom',[],[])],states
elif root_node.type in def_statement:
name=root_node.child_by_field_name('name')
value=root_node.child_by_field_name('value')
DFG=[]
if value is None:
indexs=tree_to_variable_index(name,index_to_code)
for index in indexs:
idx,code=index_to_code[index]
DFG.append((code,idx,'comesFrom',[],[]))
states[code]=[idx]
return sorted(DFG,key=lambda x:x[1]),states
else:
name_indexs=tree_to_variable_index(name,index_to_code)
value_indexs=tree_to_variable_index(value,index_to_code)
temp,states=DFG_ruby(value,index_to_code,states)
DFG+=temp
for index1 in name_indexs:
idx1,code1=index_to_code[index1]
for index2 in value_indexs:
idx2,code2=index_to_code[index2]
DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))
states[code1]=[idx1]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in assignment:
left_nodes=[x for x in root_node.child_by_field_name('left').children if x.type!=',']
right_nodes=[x for x in root_node.child_by_field_name('right').children if x.type!=',']
if len(right_nodes)!=len(left_nodes):
left_nodes=[root_node.child_by_field_name('left')]
right_nodes=[root_node.child_by_field_name('right')]
if len(left_nodes)==0:
left_nodes=[root_node.child_by_field_name('left')]
if len(right_nodes)==0:
right_nodes=[root_node.child_by_field_name('right')]
if root_node.type=="operator_assignment":
left_nodes=[root_node.children[0]]
right_nodes=[root_node.children[-1]]
DFG=[]
for node in right_nodes:
temp,states=DFG_ruby(node,index_to_code,states)
DFG+=temp
for left_node,right_node in zip(left_nodes,right_nodes):
left_tokens_index=tree_to_variable_index(left_node,index_to_code)
right_tokens_index=tree_to_variable_index(right_node,index_to_code)
temp=[]
for token1_index in left_tokens_index:
idx1,code1=index_to_code[token1_index]
temp.append((code1,idx1,'computedFrom',[index_to_code[x][1] for x in right_tokens_index],
[index_to_code[x][0] for x in right_tokens_index]))
states[code1]=[idx1]
DFG+=temp
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in if_statement:
DFG=[]
current_states=states.copy()
others_states=[]
tag=False
if 'else' in root_node.type:
tag=True
for child in root_node.children:
if 'else' in child.type:
tag=True
if child.type not in if_statement:
temp,current_states=DFG_ruby(child,index_to_code,current_states)
DFG+=temp
else:
temp,new_states=DFG_ruby(child,index_to_code,states)
DFG+=temp
others_states.append(new_states)
others_states.append(current_states)
if tag is False:
others_states.append(states)
new_states={}
for dic in others_states:
for key in dic:
if key not in new_states:
new_states[key]=dic[key].copy()
else:
new_states[key]+=dic[key]
for key in new_states:
new_states[key]=sorted(list(set(new_states[key])))
return sorted(DFG,key=lambda x:x[1]),new_states
elif root_node.type in for_statement:
DFG=[]
for i in range(2):
left_nodes=[root_node.child_by_field_name('pattern')]
right_nodes=[root_node.child_by_field_name('value')]
assert len(right_nodes)==len(left_nodes)
for node in right_nodes:
temp,states=DFG_ruby(node,index_to_code,states)
DFG+=temp
for left_node,right_node in zip(left_nodes,right_nodes):
left_tokens_index=tree_to_variable_index(left_node,index_to_code)
right_tokens_index=tree_to_variable_index(right_node,index_to_code)
temp=[]
for token1_index in left_tokens_index:
idx1,code1=index_to_code[token1_index]
temp.append((code1,idx1,'computedFrom',[index_to_code[x][1] for x in right_tokens_index],
[index_to_code[x][0] for x in right_tokens_index]))
states[code1]=[idx1]
DFG+=temp
temp,states=DFG_ruby(root_node.child_by_field_name('body'),index_to_code,states)
DFG+=temp
dic={}
for x in DFG:
if (x[0],x[1],x[2]) not in dic:
dic[(x[0],x[1],x[2])]=[x[3],x[4]]
else:
dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))
dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))
DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in while_statement:
DFG=[]
for i in range(2):
for child in root_node.children:
temp,states=DFG_ruby(child,index_to_code,states)
DFG+=temp
dic={}
for x in DFG:
if (x[0],x[1],x[2]) not in dic:
dic[(x[0],x[1],x[2])]=[x[3],x[4]]
else:
dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))
dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))
DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]
return sorted(DFG,key=lambda x:x[1]),states
else:
DFG=[]
for child in root_node.children:
if child.type in do_first_statement:
temp,states=DFG_ruby(child,index_to_code,states)
DFG+=temp
for child in root_node.children:
if child.type not in do_first_statement:
temp,states=DFG_ruby(child,index_to_code,states)
DFG+=temp
return sorted(DFG,key=lambda x:x[1]),states
def DFG_go(root_node,index_to_code,states):
assignment=['assignment_statement',]
def_statement=['var_spec']
increment_statement=['inc_statement']
if_statement=['if_statement','else']
for_statement=['for_statement']
enhanced_for_statement=[]
while_statement=[]
do_first_statement=[]
states=states.copy()
if (len(root_node.children)==0 or root_node.type=='string') and root_node.type!='comment':
idx,code=index_to_code[(root_node.start_point,root_node.end_point)]
if root_node.type==code:
return [],states
elif code in states:
return [(code,idx,'comesFrom',[code],states[code].copy())],states
else:
if root_node.type=='identifier':
states[code]=[idx]
return [(code,idx,'comesFrom',[],[])],states
elif root_node.type in def_statement:
name=root_node.child_by_field_name('name')
value=root_node.child_by_field_name('value')
DFG=[]
if value is None:
indexs=tree_to_variable_index(name,index_to_code)
for index in indexs:
idx,code=index_to_code[index]
DFG.append((code,idx,'comesFrom',[],[]))
states[code]=[idx]
return sorted(DFG,key=lambda x:x[1]),states
else:
name_indexs=tree_to_variable_index(name,index_to_code)
value_indexs=tree_to_variable_index(value,index_to_code)
temp,states=DFG_go(value,index_to_code,states)
DFG+=temp
for index1 in name_indexs:
idx1,code1=index_to_code[index1]
for index2 in value_indexs:
idx2,code2=index_to_code[index2]
DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))
states[code1]=[idx1]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in assignment:
left_nodes=root_node.child_by_field_name('left')
right_nodes=root_node.child_by_field_name('right')
DFG=[]
temp,states=DFG_go(right_nodes,index_to_code,states)
DFG+=temp
name_indexs=tree_to_variable_index(left_nodes,index_to_code)
value_indexs=tree_to_variable_index(right_nodes,index_to_code)
for index1 in name_indexs:
idx1,code1=index_to_code[index1]
for index2 in value_indexs:
idx2,code2=index_to_code[index2]
DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))
states[code1]=[idx1]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in increment_statement:
DFG=[]
indexs=tree_to_variable_index(root_node,index_to_code)
for index1 in indexs:
idx1,code1=index_to_code[index1]
for index2 in indexs:
idx2,code2=index_to_code[index2]
DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))
states[code1]=[idx1]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in if_statement:
DFG=[]
current_states=states.copy()
others_states=[]
flag=False
tag=False
if 'else' in root_node.type:
tag=True
for child in root_node.children:
if 'else' in child.type:
tag=True
if child.type not in if_statement and flag is False:
temp,current_states=DFG_go(child,index_to_code,current_states)
DFG+=temp
else:
flag=True
temp,new_states=DFG_go(child,index_to_code,states)
DFG+=temp
others_states.append(new_states)
others_states.append(current_states)
if tag is False:
others_states.append(states)
new_states={}
for dic in others_states:
for key in dic:
if key not in new_states:
new_states[key]=dic[key].copy()
else:
new_states[key]+=dic[key]
for key in states:
if key not in new_states:
new_states[key]=states[key]
else:
new_states[key]+=states[key]
for key in new_states:
new_states[key]=sorted(list(set(new_states[key])))
return sorted(DFG,key=lambda x:x[1]),new_states
elif root_node.type in for_statement:
DFG=[]
for child in root_node.children:
temp,states=DFG_go(child,index_to_code,states)
DFG+=temp
flag=False
for child in root_node.children:
if flag:
temp,states=DFG_go(child,index_to_code,states)
DFG+=temp
elif child.type=="for_clause":
if child.child_by_field_name('update') is not None:
temp,states=DFG_go(child.child_by_field_name('update'),index_to_code,states)
DFG+=temp
flag=True
dic={}
for x in DFG:
if (x[0],x[1],x[2]) not in dic:
dic[(x[0],x[1],x[2])]=[x[3],x[4]]
else:
dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))
dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))
DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]
return sorted(DFG,key=lambda x:x[1]),states
else:
DFG=[]
for child in root_node.children:
if child.type in do_first_statement:
temp,states=DFG_go(child,index_to_code,states)
DFG+=temp
for child in root_node.children:
if child.type not in do_first_statement:
temp,states=DFG_go(child,index_to_code,states)
DFG+=temp
return sorted(DFG,key=lambda x:x[1]),states
def DFG_php(root_node,index_to_code,states):
assignment=['assignment_expression','augmented_assignment_expression']
def_statement=['simple_parameter']
increment_statement=['update_expression']
if_statement=['if_statement','else_clause']
for_statement=['for_statement']
enhanced_for_statement=['foreach_statement']
while_statement=['while_statement']
do_first_statement=[]
states=states.copy()
if (len(root_node.children)==0 or root_node.type=='string') and root_node.type!='comment':
idx,code=index_to_code[(root_node.start_point,root_node.end_point)]
if root_node.type==code:
return [],states
elif code in states:
return [(code,idx,'comesFrom',[code],states[code].copy())],states
else:
if root_node.type=='identifier':
states[code]=[idx]
return [(code,idx,'comesFrom',[],[])],states
elif root_node.type in def_statement:
name=root_node.child_by_field_name('name')
value=root_node.child_by_field_name('default_value')
DFG=[]
if value is None:
indexs=tree_to_variable_index(name,index_to_code)
for index in indexs:
idx,code=index_to_code[index]
DFG.append((code,idx,'comesFrom',[],[]))
states[code]=[idx]
return sorted(DFG,key=lambda x:x[1]),states
else:
name_indexs=tree_to_variable_index(name,index_to_code)
value_indexs=tree_to_variable_index(value,index_to_code)
temp,states=DFG_php(value,index_to_code,states)
DFG+=temp
for index1 in name_indexs:
idx1,code1=index_to_code[index1]
for index2 in value_indexs:
idx2,code2=index_to_code[index2]
DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))
states[code1]=[idx1]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in assignment:
left_nodes=root_node.child_by_field_name('left')
right_nodes=root_node.child_by_field_name('right')
DFG=[]
temp,states=DFG_php(right_nodes,index_to_code,states)
DFG+=temp
name_indexs=tree_to_variable_index(left_nodes,index_to_code)
value_indexs=tree_to_variable_index(right_nodes,index_to_code)
for index1 in name_indexs:
idx1,code1=index_to_code[index1]
for index2 in value_indexs:
idx2,code2=index_to_code[index2]
DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))
states[code1]=[idx1]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in increment_statement:
DFG=[]
indexs=tree_to_variable_index(root_node,index_to_code)
for index1 in indexs:
idx1,code1=index_to_code[index1]
for index2 in indexs:
idx2,code2=index_to_code[index2]
DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))
states[code1]=[idx1]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in if_statement:
DFG=[]
current_states=states.copy()
others_states=[]
flag=False
tag=False
if 'else' in root_node.type:
tag=True
for child in root_node.children:
if 'else' in child.type:
tag=True
if child.type not in if_statement and flag is False:
temp,current_states=DFG_php(child,index_to_code,current_states)
DFG+=temp
else:
flag=True
temp,new_states=DFG_php(child,index_to_code,states)
DFG+=temp
others_states.append(new_states)
others_states.append(current_states)
new_states={}
for dic in others_states:
for key in dic:
if key not in new_states:
new_states[key]=dic[key].copy()
else:
new_states[key]+=dic[key]
for key in states:
if key not in new_states:
new_states[key]=states[key]
else:
new_states[key]+=states[key]
for key in new_states:
new_states[key]=sorted(list(set(new_states[key])))
return sorted(DFG,key=lambda x:x[1]),new_states
elif root_node.type in for_statement:
DFG=[]
for child in root_node.children:
temp,states=DFG_php(child,index_to_code,states)
DFG+=temp
flag=False
for child in root_node.children:
if flag:
temp,states=DFG_php(child,index_to_code,states)
DFG+=temp
elif child.type=="assignment_expression":
flag=True
dic={}
for x in DFG:
if (x[0],x[1],x[2]) not in dic:
dic[(x[0],x[1],x[2])]=[x[3],x[4]]
else:
dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))
dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))
DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in enhanced_for_statement:
name=None
value=None
for child in root_node.children:
if child.type=='variable_name' and value is None:
value=child
elif child.type=='variable_name' and name is None:
name=child
break
body=root_node.child_by_field_name('body')
DFG=[]
for i in range(2):
temp,states=DFG_php(value,index_to_code,states)
DFG+=temp
name_indexs=tree_to_variable_index(name,index_to_code)
value_indexs=tree_to_variable_index(value,index_to_code)
for index1 in name_indexs:
idx1,code1=index_to_code[index1]
for index2 in value_indexs:
idx2,code2=index_to_code[index2]
DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))
states[code1]=[idx1]
temp,states=DFG_php(body,index_to_code,states)
DFG+=temp
dic={}
for x in DFG:
if (x[0],x[1],x[2]) not in dic:
dic[(x[0],x[1],x[2])]=[x[3],x[4]]
else:
dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))
dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))
DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in while_statement:
DFG=[]
for i in range(2):
for child in root_node.children:
temp,states=DFG_php(child,index_to_code,states)
DFG+=temp
dic={}
for x in DFG:
if (x[0],x[1],x[2]) not in dic:
dic[(x[0],x[1],x[2])]=[x[3],x[4]]
else:
dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))
dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))
DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]
return sorted(DFG,key=lambda x:x[1]),states
else:
DFG=[]
for child in root_node.children:
if child.type in do_first_statement:
temp,states=DFG_php(child,index_to_code,states)
DFG+=temp
for child in root_node.children:
if child.type not in do_first_statement:
temp,states=DFG_php(child,index_to_code,states)
DFG+=temp
return sorted(DFG,key=lambda x:x[1]),states
def DFG_javascript(root_node,index_to_code,states):
assignment=['assignment_pattern','augmented_assignment_expression']
def_statement=['variable_declarator']
increment_statement=['update_expression']
if_statement=['if_statement','else']
for_statement=['for_statement']
enhanced_for_statement=[]
while_statement=['while_statement']
do_first_statement=[]
states=states.copy()
if (len(root_node.children)==0 or root_node.type=='string') and root_node.type!='comment':
idx,code=index_to_code[(root_node.start_point,root_node.end_point)]
if root_node.type==code:
return [],states
elif code in states:
return [(code,idx,'comesFrom',[code],states[code].copy())],states
else:
if root_node.type=='identifier':
states[code]=[idx]
return [(code,idx,'comesFrom',[],[])],states
elif root_node.type in def_statement:
name=root_node.child_by_field_name('name')
value=root_node.child_by_field_name('value')
DFG=[]
if value is None:
indexs=tree_to_variable_index(name,index_to_code)
for index in indexs:
idx,code=index_to_code[index]
DFG.append((code,idx,'comesFrom',[],[]))
states[code]=[idx]
return sorted(DFG,key=lambda x:x[1]),states
else:
name_indexs=tree_to_variable_index(name,index_to_code)
value_indexs=tree_to_variable_index(value,index_to_code)
temp,states=DFG_javascript(value,index_to_code,states)
DFG+=temp
for index1 in name_indexs:
idx1,code1=index_to_code[index1]
for index2 in value_indexs:
idx2,code2=index_to_code[index2]
DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))
states[code1]=[idx1]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in assignment:
left_nodes=root_node.child_by_field_name('left')
right_nodes=root_node.child_by_field_name('right')
DFG=[]
temp,states=DFG_javascript(right_nodes,index_to_code,states)
DFG+=temp
name_indexs=tree_to_variable_index(left_nodes,index_to_code)
value_indexs=tree_to_variable_index(right_nodes,index_to_code)
for index1 in name_indexs:
idx1,code1=index_to_code[index1]
for index2 in value_indexs:
idx2,code2=index_to_code[index2]
DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))
states[code1]=[idx1]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in increment_statement:
DFG=[]
indexs=tree_to_variable_index(root_node,index_to_code)
for index1 in indexs:
idx1,code1=index_to_code[index1]
for index2 in indexs:
idx2,code2=index_to_code[index2]
DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))
states[code1]=[idx1]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in if_statement:
DFG=[]
current_states=states.copy()
others_states=[]
flag=False
tag=False
if 'else' in root_node.type:
tag=True
for child in root_node.children:
if 'else' in child.type:
tag=True
if child.type not in if_statement and flag is False:
temp,current_states=DFG_javascript(child,index_to_code,current_states)
DFG+=temp
else:
flag=True
temp,new_states=DFG_javascript(child,index_to_code,states)
DFG+=temp
others_states.append(new_states)
others_states.append(current_states)
if tag is False:
others_states.append(states)
new_states={}
for dic in others_states:
for key in dic:
if key not in new_states:
new_states[key]=dic[key].copy()
else:
new_states[key]+=dic[key]
for key in states:
if key not in new_states:
new_states[key]=states[key]
else:
new_states[key]+=states[key]
for key in new_states:
new_states[key]=sorted(list(set(new_states[key])))
return sorted(DFG,key=lambda x:x[1]),new_states
elif root_node.type in for_statement:
DFG=[]
for child in root_node.children:
temp,states=DFG_javascript(child,index_to_code,states)
DFG+=temp
flag=False
for child in root_node.children:
if flag:
temp,states=DFG_javascript(child,index_to_code,states)
DFG+=temp
elif child.type=="variable_declaration":
flag=True
dic={}
for x in DFG:
if (x[0],x[1],x[2]) not in dic:
dic[(x[0],x[1],x[2])]=[x[3],x[4]]
else:
dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))
dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))
DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]
return sorted(DFG,key=lambda x:x[1]),states
elif root_node.type in while_statement:
DFG=[]
for i in range(2):
for child in root_node.children:
temp,states=DFG_javascript(child,index_to_code,states)
DFG+=temp
dic={}
for x in DFG:
if (x[0],x[1],x[2]) not in dic:
dic[(x[0],x[1],x[2])]=[x[3],x[4]]
else:
dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))
dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))
DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]
return sorted(DFG,key=lambda x:x[1]),states
else:
DFG=[]
for child in root_node.children:
if child.type in do_first_statement:
temp,states=DFG_javascript(child,index_to_code,states)
DFG+=temp
for child in root_node.children:
if child.type not in do_first_statement:
temp,states=DFG_javascript(child,index_to_code,states)
DFG+=temp
return sorted(DFG,key=lambda x:x[1]),states
|
CodeBERT/GraphCodeBERT/refinement/parser/DFG.py/0
|
{
"file_path": "CodeBERT/GraphCodeBERT/refinement/parser/DFG.py",
"repo_id": "CodeBERT",
"token_count": 28921
}
| 242 |
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.3 BLOCK -->
## Security
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets Microsoft's [Microsoft's definition of a security vulnerability](https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)), please report it to us as described below.
## Reporting Security Issues
**Please do not report security vulnerabilities through public GitHub issues.**
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://msrc.microsoft.com/create-report).
If you prefer to submit without logging in, send email to [[email protected]](mailto:[email protected]). If possible, encrypt your message with our PGP key; please download it from the the [Microsoft Security Response Center PGP Key page](https://www.microsoft.com/en-us/msrc/pgp-key-msrc).
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc).
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
* Full paths of source file(s) related to the manifestation of the issue
* The location of the affected source code (tag/branch/commit or direct URL)
* Any special configuration required to reproduce the issue
* Step-by-step instructions to reproduce the issue
* Proof-of-concept or exploit code (if possible)
* Impact of the issue, including how an attacker might exploit the issue
This information will help us triage your report more quickly.
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://microsoft.com/msrc/bounty) page for more details about our active programs.
## Preferred Languages
We prefer all communications to be in English.
## Policy
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd).
<!-- END MICROSOFT SECURITY.MD BLOCK -->
|
CodeBERT/SECURITY.md/0
|
{
"file_path": "CodeBERT/SECURITY.md",
"repo_id": "CodeBERT",
"token_count": 701
}
| 243 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import torch.nn as nn
import torch
from torch.autograd import Variable
import copy
class Seq2Seq(nn.Module):
"""
Build Seqence-to-Sequence.
Parameters:
* `encoder`- encoder of seq2seq model. e.g. roberta
* `decoder`- decoder of seq2seq model. e.g. transformer
* `config`- configuration of encoder model.
* `beam_size`- beam size for beam search.
* `max_length`- max length of target for beam search.
* `sos_id`- start of symbol ids in target for beam search.
* `eos_id`- end of symbol ids in target for beam search.
"""
def __init__(self, encoder,decoder, config, beam_size=None, max_length=None, sos_id=None, eos_id=None):
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder=decoder
self.config=config
self.register_buffer(
"bias", torch.tril(torch.ones((1024, 1024), dtype=torch.uint8)).view(1,1024, 1024)
)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.lm_head.weight = self.encoder.embeddings.word_embeddings.weight
self.lsm = nn.LogSoftmax(dim=-1)
self.beam_size = beam_size
self.max_length = max_length
self.sos_id = sos_id
self.eos_id = eos_id
def forward(self, source_ids, target_ids=None):
if target_ids is None:
return self.generate(source_ids)
mask = source_ids.ne(1)[:,None,:]*source_ids.ne(1)[:,:,None]
encoder_output = self.encoder(source_ids,attention_mask=mask,use_cache=True)
ids = torch.cat((source_ids,target_ids),-1)
mask = self.bias[:,source_ids.size(-1):ids.size(-1),:ids.size(-1)].bool()
mask = mask & ids[:,None,:].ne(1)
out = self.decoder(target_ids,attention_mask=mask,past_key_values=encoder_output.past_key_values).last_hidden_state
lm_logits = self.lm_head(out)
# Shift so that tokens < n predict n
active_loss = target_ids[..., 1:].ne(1).view(-1)
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = target_ids[..., 1:].contiguous()
# Flatten the tokens
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1))[active_loss],
shift_labels.view(-1)[active_loss])
outputs = loss,loss*active_loss.sum(),active_loss.sum()
return outputs
def generate(self, source_ids):
mask = source_ids.ne(1)[:,None,:]*source_ids.ne(1)[:,:,None]
encoder_output = self.encoder(source_ids,attention_mask=mask,use_cache=True)
preds = []
zero = torch.cuda.LongTensor(1).fill_(0)
source_len = list(source_ids.ne(1).sum(-1).cpu().numpy())
for i in range(source_ids.shape[0]):
context = [[x[i:i+1,:,:source_len[i]].repeat(self.beam_size,1,1,1) for x in y]
for y in encoder_output.past_key_values]
beam = Beam(self.beam_size,self.sos_id,self.eos_id)
input_ids = beam.getCurrentState()
context_ids = source_ids[i:i+1,:source_len[i]].repeat(self.beam_size,1)
for _ in range(self.max_length):
if beam.done():
break
ids = torch.cat((context_ids,input_ids),-1)
mask = self.bias[:,context_ids.size(-1):ids.size(-1),:ids.size(-1)].bool()
mask = mask & ids[:,None,:].ne(1)
out = self.decoder(input_ids,attention_mask=mask,past_key_values=context).last_hidden_state
hidden_states = out[:,-1,:]
out = self.lsm(self.lm_head(hidden_states)).data
beam.advance(out)
input_ids.data.copy_(input_ids.data.index_select(0, beam.getCurrentOrigin()))
input_ids = torch.cat((input_ids,beam.getCurrentState()),-1)
hyp = beam.getHyp(beam.getFinal())
pred = beam.buildTargetTokens(hyp)[:self.beam_size]
pred = [torch.cat([x.view(-1) for x in p]+[zero]*(self.max_length-len(p))).view(1,-1) for p in pred]
preds.append(torch.cat(pred,0).unsqueeze(0))
preds = torch.cat(preds,0)
return preds
class Beam(object):
def __init__(self, size,sos,eos):
self.size = size
self.tt = torch.cuda
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
# The backpointers at each time-step.
self.prevKs = []
# The outputs at each time-step.
self.nextYs = [self.tt.LongTensor(size)
.fill_(0)]
self.nextYs[0][0] = sos
# Has EOS topped the beam yet.
self._eos = eos
self.eosTop = False
# Time and k pair for finished.
self.finished = []
def getCurrentState(self):
"Get the outputs for the current timestep."
batch = self.tt.LongTensor(self.nextYs[-1]).view(-1, 1)
return batch
def getCurrentOrigin(self):
"Get the backpointers for the current timestep."
return self.prevKs[-1]
def advance(self, wordLk):
"""
Given prob over words for every last beam `wordLk` and attention
`attnOut`: Compute and update the beam search.
Parameters:
* `wordLk`- probs of advancing from the last step (K x words)
* `attnOut`- attention at the last step
Returns: True if beam search is complete.
"""
numWords = wordLk.size(1)
# Sum the previous scores.
if len(self.prevKs) > 0:
beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
# Don't let EOS have children.
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] == self._eos:
beamLk[i] = -1e20
else:
beamLk = wordLk[0]
flatBeamLk = beamLk.view(-1)
bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
self.scores = bestScores
# bestScoresId is flattened beam x word array, so calculate which
# word and beam each score came from
prevK = bestScoresId // numWords
self.prevKs.append(prevK)
self.nextYs.append((bestScoresId - prevK * numWords))
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] == self._eos:
s = self.scores[i]
self.finished.append((s, len(self.nextYs) - 1, i))
# End condition is when top-of-beam is EOS and no global score.
if self.nextYs[-1][0] == self._eos:
self.eosTop = True
def done(self):
return self.eosTop and len(self.finished) >=self.size
def getFinal(self):
if len(self.finished) == 0:
self.finished.append((self.scores[0], len(self.nextYs) - 1, 0))
self.finished.sort(key=lambda a: -a[0])
if len(self.finished) != self.size:
unfinished=[]
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] != self._eos:
s = self.scores[i]
unfinished.append((s, len(self.nextYs) - 1, i))
unfinished.sort(key=lambda a: -a[0])
self.finished+=unfinished[:self.size-len(self.finished)]
return self.finished[:self.size]
def getHyp(self, beam_res):
"""
Walk back to construct the full hypothesis.
"""
hyps=[]
for _,timestep, k in beam_res:
hyp = []
for j in range(len(self.prevKs[:timestep]) - 1, -1, -1):
hyp.append(self.nextYs[j+1][k])
k = self.prevKs[j][k]
hyps.append(hyp[::-1])
return hyps
def buildTargetTokens(self, preds):
sentence=[]
for pred in preds:
tokens = []
for tok in pred:
if tok==self._eos:
break
tokens.append(tok)
sentence.append(tokens)
return sentence
|
CodeBERT/UniXcoder/downstream-tasks/code-generation/model.py/0
|
{
"file_path": "CodeBERT/UniXcoder/downstream-tasks/code-generation/model.py",
"repo_id": "CodeBERT",
"token_count": 4178
}
| 244 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
import sys
import argparse
import logging
import os
import pickle
import random
import torch
import json
import numpy as np
from tqdm import tqdm
from model import Model
from torch.nn import CrossEntropyLoss, MSELoss
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
RobertaConfig, RobertaModel, RobertaTokenizer)
import re
from io import StringIO
import tokenize
logger = logging.getLogger(__name__)
def remove_comments_and_docstrings(source,lang):
if lang in ['python']:
io_obj = StringIO(source)
out = ""
prev_toktype = tokenize.INDENT
last_lineno = -1
last_col = 0
for tok in tokenize.generate_tokens(io_obj.readline):
token_type = tok[0]
token_string = tok[1]
start_line, start_col = tok[2]
end_line, end_col = tok[3]
ltext = tok[4]
if start_line > last_lineno:
last_col = 0
if start_col > last_col:
out += (" " * (start_col - last_col))
# Remove comments:
if token_type == tokenize.COMMENT:
pass
# This series of conditionals removes docstrings:
elif token_type == tokenize.STRING:
if prev_toktype != tokenize.INDENT:
# This is likely a docstring; double-check we're not inside an operator:
if prev_toktype != tokenize.NEWLINE:
if start_col > 0:
out += token_string
else:
out += token_string
prev_toktype = token_type
last_col = end_col
last_lineno = end_line
temp=[]
for x in out.split('\n'):
if x.strip()!="":
temp.append(x)
return '\n'.join(temp)
elif lang in ['ruby']:
return source
else:
def replacer(match):
s = match.group(0)
if s.startswith('/'):
return " " # note: a space and not an empty string
else:
return s
pattern = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE
)
temp=[]
for x in re.sub(pattern, replacer, source).split('\n'):
if x.strip()!="":
temp.append(x)
return '\n'.join(temp)
class InputFeatures(object):
"""A single training/test features for a example."""
def __init__(self,
code_tokens,
code_ids,
index,
label
):
self.code_tokens = code_tokens
self.code_ids = code_ids
self.index = index
self.label = label
def convert_examples_to_features(js,tokenizer,args,lang):
"""convert examples to token ids"""
if "func" in js:
code = " ".join(remove_comments_and_docstrings(js['func'],lang).split())
else:
code = " ".join(remove_comments_and_docstrings(js['code'],lang).split())
code_tokens = tokenizer.tokenize(code)[:args.code_length-4]
code_tokens =[tokenizer.cls_token,"<encoder-only>",tokenizer.sep_token]+code_tokens+[tokenizer.sep_token]
code_ids = tokenizer.convert_tokens_to_ids(code_tokens)
padding_length = args.code_length - len(code_ids)
code_ids += [tokenizer.pad_token_id]*padding_length
return InputFeatures(code_tokens,code_ids,js["index"],int(js['label']))
class TextDataset(Dataset):
def __init__(self, tokenizer, args, file_path, lang):
self.examples = []
data = []
with open(file_path) as f:
for i, line in enumerate(f):
line = line.strip()
js = json.loads(line)
data.append(js)
for js in data:
self.examples.append(convert_examples_to_features(js,tokenizer,args,lang))
for idx, example in enumerate(self.examples[:1]):
logger.info("*** Example ***")
logger.info("label: {}".format(example.label))
logger.info("code_tokens: {}".format([x.replace('\u0120','_') for x in example.code_tokens]))
logger.info("code_ids: {}".format(' '.join(map(str, example.code_ids))))
self.label_examples={}
for e in self.examples:
if e.label not in self.label_examples:
self.label_examples[e.label]=[]
self.label_examples[e.label].append(e)
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return (torch.tensor(self.examples[i].code_ids),torch.tensor(self.examples[i].label))
def evaluate(args, model, tokenizer, file_name, candidate_file_name):
query_dataset = TextDataset(tokenizer, args, file_name, args.query_lang)
query_sampler = SequentialSampler(query_dataset)
query_dataloader = DataLoader(query_dataset, sampler=query_sampler, batch_size=args.eval_batch_size,num_workers=4)
candidate_dataset = TextDataset(tokenizer, args, candidate_file_name, args.candidate_lang)
candidate_sampler = SequentialSampler(candidate_dataset)
candidate_dataloader = DataLoader(candidate_dataset, sampler=candidate_sampler, batch_size=args.eval_batch_size, num_workers=4)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num Query = %d", len(query_dataset))
logger.info(" Num Candidate = %d", len(candidate_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
model.eval()
query_vecs = []
query_labels = []
candidate_vecs = []
candidate_labels = []
# Obtain query vectors
for batch in query_dataloader:
code_inputs = batch[0].to(args.device)
label = batch[1].to(args.device)
with torch.no_grad():
code_vec = model(code_inputs=code_inputs)
query_vecs.append(code_vec.cpu().numpy())
query_labels.append(label.cpu().numpy())
# Obtain candidate vectors
for batch in candidate_dataloader:
code_inputs = batch[0].to(args.device)
label = batch[1].to(args.device)
with torch.no_grad():
code_vec = model(code_inputs=code_inputs)
candidate_vecs.append(code_vec.cpu().numpy())
candidate_labels.append(label.cpu().numpy())
model.train()
# Calculate cosine score
query_vecs = np.concatenate(query_vecs,0)
candidate_vecs = np.concatenate(candidate_vecs,0)
query_labels = list(np.concatenate(query_labels,0))
candidate_labels = list(np.concatenate(candidate_labels,0))
candidate_indexs =[candidate_dataset.examples[i].index for i in range(len(candidate_dataset))]
query_indexs = [query_dataset.examples[i].index for i in range(len(query_dataset))]
scores = np.matmul(query_vecs,candidate_vecs.T)
# Calculate MAP score
sort_ids = np.argsort(scores, axis=-1, kind='quicksort', order=None)[:,::-1]
MAP=[]
results = {}
for i in range(scores.shape[0]):
cont=0
label=int(query_labels[i])
query_index = query_indexs[i]
results[query_index] = [label,candidate_labels[sort_ids[i][0]],candidate_indexs[sort_ids[i][0]]]
Avep = []
for j,index in enumerate(list(sort_ids[i])):
if query_index==candidate_indexs[index]:
cont+=1
continue
if int(candidate_labels[index])==label:
Avep.append((len(Avep)+1)/(j+1-cont))
if len(Avep)!=0:
MAP.append(sum(Avep)/len(Avep))
result = {
"eval_map":float(np.mean(MAP))
}
return result
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--query_data_file", default=None, type=str, required=False,
help="The input training data file (a json file).")
parser.add_argument("--candidate_data_file", default=None, type=str, required=False,
help="The input training data file (a json file).")
parser.add_argument("--model_name_or_path", default=None, type=str,
help="The model checkpoint for weights initialization.")
parser.add_argument("--query_lang", default=None, type=str, required=False,
help="Programming language of query.")
parser.add_argument("--candidate_lang", default=None, type=str,
help="Programming language of candidate.")
parser.add_argument("--code_length", default=256, type=int,
help="Optional Code input sequence length after tokenization.")
parser.add_argument("--eval_batch_size", default=4, type=int,
help="Batch size for evaluation.")
#print arguments
args = parser.parse_args()
#set log
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',level=logging.INFO )
#set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
args.device = device
logger.info("device: %s, n_gpu: %s",device, args.n_gpu)
#build model
tokenizer = RobertaTokenizer.from_pretrained(args.model_name_or_path)
config = RobertaConfig.from_pretrained(args.model_name_or_path)
model = RobertaModel.from_pretrained(args.model_name_or_path)
model=Model(model)
logger.info("Training/evaluation parameters %s", args)
model.to(args.device)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
result=evaluate(args, model, tokenizer,args.query_data_file,args.candidate_data_file)
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(round(result[key]*100,2)))
if __name__ == "__main__":
main()
|
CodeBERT/UniXcoder/downstream-tasks/zero-shot-search/run.py/0
|
{
"file_path": "CodeBERT/UniXcoder/downstream-tasks/zero-shot-search/run.py",
"repo_id": "CodeBERT",
"token_count": 5116
}
| 245 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import pickle
class Tools:
@staticmethod
def load_jsonl(file_path):
json_objects = []
with open(file_path, 'r', encoding='utf8') as f:
for line in f:
json_objects.append(json.loads(line.strip()))
return json_objects
@staticmethod
def load_tasks(task_path):
result = dict()
lines = Tools.load_jsonl(task_path)
for line in lines:
result[line['task_id']] = line
return result
@staticmethod
def dump_pickle(path, content):
with open(path, 'wb') as f:
pickle.dump(content, f)
@staticmethod
def load_pickle(path):
with open(path, 'rb') as f:
return pickle.load(f)
@staticmethod
def write_file(path, content):
with open(path, 'w', encoding='utf8') as f:
f.write(content)
|
CodeT/CodeT/src/io_utils.py/0
|
{
"file_path": "CodeT/CodeT/src/io_utils.py",
"repo_id": "CodeT",
"token_count": 454
}
| 246 |
$schema: http://azureml/sdk-2-0/CommandComponent.json
name: microsoft.msra.dki.verifier_data_preparing
display_name: Verifier Data Preparing
version: 0.1.8-dev2
type: CommandComponent
is_deterministic: true
description: Verifier Data Preparing
tags: {category: Verifier Data Preparing, contact: [email protected]}
inputs:
generator_result_file:
type: path
optional: false
description: The generation result file generated by GPT-3 onebox or other modules
random_seed:
type: integer
description: Random seed
default: 233
split:
type: enum
description: train / dev. randomly shuffle train dataset.
default: train
enum:
- train
- dev
dataset_name:
type: enum
description: Name of the dataset to be run. GSM8K/CLUTRR/strategyQA
default: GSM8K
enum:
- GSM8K
- CLUTRR
- strategyQA
text_entailment_model_name:
type: string
description: The text entailment model that is used in step labeling, such as roberta-large-mnli, facebook/bart-large-mnli, etc.
default: roberta-large-mnli
text_entailment_batch_size:
type: number
description: text entailment batch size
default: 256
outputs:
output_dir:
type: path
optional: false
description: The output dir that you want to save the output data, which is the verifier training module's input.
environment:
docker:
image: mcr.microsoft.com/azureml/pytorch-1.9-ubuntu18.04-py37-cuda11.0.3-gpu-inference:20220516.v3
os: Linux
conda:
conda_dependencies:
name: project_environment
channels:
- defaults
- pytorch
dependencies:
- python=3.7
- pip=20.0
- pip:
- torch==1.7.0+cu110
- -f https://download.pytorch.org/whl/torch_stable.html
- multiset
- tqdm
- nltk
- transformers==4.6.0
- datasets==1.11.0
- huggingface-hub==0.0.8
successful_return_code: Zero
meta:
requireGpu: False
command: >-
cd src &&
python verifier_data_prepare.py
--generator_result_file {inputs.generator_result_file}
--output_dir {outputs.output_dir}
--split {inputs.split}
--random_seed {inputs.random_seed}
--dataset_name {inputs.dataset_name}
--text_entailment_model_name {inputs.text_entailment_model_name}
--text_entailment_batch_size {inputs.text_entailment_batch_size}
|
CodeT/DIVERSE/code/verifier_data_prepare.yaml/0
|
{
"file_path": "CodeT/DIVERSE/code/verifier_data_prepare.yaml",
"repo_id": "CodeT",
"token_count": 955
}
| 247 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import editdistance
from collections import defaultdict
from utils import Tools
def compute_EM(target, predictions, passk):
target_lines = [line.strip() for line in target.splitlines() if line.strip()]
EM_scores = []
for prediction in predictions[:passk]:
prediction_lines = [line.strip() for line in prediction.splitlines() if line.strip()][:len(target_lines)]
if len(target_lines) != len(prediction_lines):
EM_scores.append(0)
continue
if target_lines == prediction_lines:
EM_scores.append(1)
continue
EM_scores.append(0)
return any(EM_scores)
def compute_ES(target, predictions, passk):
target_lines = [line.strip() for line in target.splitlines() if line.strip()]
target_str = '\n'.join(target_lines)
ES_scores = []
for prediction in predictions[:passk]:
prediction_lines = [line.strip() for line in prediction.splitlines() if line.strip()][:len(target_lines)]
prediction_str = '\n'.join(prediction_lines)
ES_scores.append(
1 - (editdistance.eval(target_str, prediction_str) / max(len(target_str), len(prediction_str)))
)
return max(ES_scores)
def compute_score_by_repo_with_metadata(repos, lines, stype, passk=1):
scores = defaultdict(list)
for line in lines:
repo = line['metadata']['task_id'].split('/')[0]
if repo not in repos:
continue
samples = [line['choices'][i]['text'] for i in range(len(line['choices']))]
if stype == 'EM':
score = compute_EM(line['metadata']['ground_truth'], samples, passk)
elif stype == 'ES':
score = compute_ES(line['metadata']['ground_truth'], samples, passk)
scores[repo].append(score)
avg_scores = {repo: round(sum(scores[repo]) / len(scores[repo]), 4) for repo in scores}
repo_count = {repo: len(scores[repo]) for repo in scores}
print(stype)
for repo in avg_scores.keys():
print(f'{avg_scores[repo]}\t{repo_count[repo]}\t{repo}')
if __name__ == '__main__':
repos = [
'huggingface_diffusers',
'nerfstudio-project_nerfstudio',
'awslabs_fortuna',
'huggingface_evaluate',
'google_vizier',
'alibaba_FederatedScope',
'pytorch_rl',
'opendilab_ACE',
]
'''compute single prediction'''
file_path = 'output/line-rgrg-ada-ws-20-ss-2_samples.0.jsonl'
compute_score_by_repo_with_metadata(repos, Tools.load_jsonl(file_path), 'EM', passk=1)
compute_score_by_repo_with_metadata(repos, Tools.load_jsonl(file_path), 'ES', passk=1)
|
CodeT/RepoCoder/compute_score.py/0
|
{
"file_path": "CodeT/RepoCoder/compute_score.py",
"repo_id": "CodeT",
"token_count": 1165
}
| 248 |
#!/bin/zsh
#
# A shell script to setup Codex CLI for zsh
#
# You can pass the following arguments to the script:
# -o: Your OpenAI organization id.
# -k: Your OpenAI API key.
# -e: The OpenAI engine id that provides access to a model.
#
# For example:
# ./zsh_setup.sh -o <YOUR_ORG_ID> -k <YOUR_API_KEY> -e <ENGINE_ID>
#
set -e
# Call OpenAI API with the given settings to verify everythin is in order
validateSettings()
{
echo -n "*** Testing Open AI access... "
local TEST=$(curl -s 'https://api.openai.com/v1/engines' -H "Authorization: Bearer $secret" -H "OpenAI-Organization: $orgId" -w '%{http_code}')
local STATUS_CODE=$(echo "$TEST"|tail -n 1)
if [ $STATUS_CODE -ne 200 ]; then
echo "ERROR [$STATUS_CODE]"
echo "Failed to access OpenAI API, result: $STATUS_CODE"
echo "Please check your OpenAI API key (https://beta.openai.com/account/api-keys)"
echo "and Organization ID (https://beta.openai.com/account/org-settings)."
echo "*************"
exit 1
fi
local ENGINE_FOUND=$(echo "$TEST"|grep '"id"'|grep "\"$engineId\"")
if [ -z "$ENGINE_FOUND" ]; then
echo "ERROR"
echo "Cannot find OpenAI engine: $engineId"
echo "Please check the OpenAI engine id (https://beta.openai.com/docs/engines/codex-series-private-beta)."
echo "*************"
exit 1
fi
echo "OK ***"
}
# Append settings and 'Ctrl + G' binding in .zshrc
configureZsh()
{
# Remove previous settings
sed -i '' '/### Codex CLI setup - start/,/### Codex CLI setup - end/d' $zshrcPath
echo "Removed previous settings in $zshrcPath if present"
# Update the latest settings
echo "### Codex CLI setup - start" >> $zshrcPath
echo "export CODEX_CLI_PATH=$CODEX_CLI_PATH" >> $zshrcPath
echo "source \"\$CODEX_CLI_PATH/scripts/zsh_plugin.zsh\"" >> $zshrcPath
echo "bindkey '^G' create_completion" >> $zshrcPath
echo "### Codex CLI setup - end" >> $zshrcPath
echo "Added settings in $zshrcPath"
}
# Store API key and other settings in `openaiapirc`
configureApp()
{
echo "[openai]" > $openAIConfigPath
echo "organization_id=$orgId" >> $openAIConfigPath
echo "secret_key=$secret" >> $openAIConfigPath
echo "engine=$engineId" >> $openAIConfigPath
echo "Updated OpenAI configuration file ($openAIConfigPath) with secrets"
# Change file mode of codex_query.py to allow execution
chmod +x "$CODEX_CLI_PATH/src/codex_query.py"
echo "Allow execution of $CODEX_CLI_PATH/src/codex_query.py"
}
# Start installation
# Use zparseopts to parse parameters
zmodload zsh/zutil
zparseopts -E -D -- \
o:=o_orgId \
e:=o_engineId \
k:=o_key
if (( ${+o_orgId[2]} )); then
orgId=${o_orgId[2]}
else
echo -n 'OpenAI Organization Id: '; read orgId
fi
if (( ${+o_engineId[2]} )); then
engineId=${o_engineId[2]}
else
echo -n 'OpenAI Engine Id: '; read engineId
fi
if (( ${+o_key[2]} )); then
secret=${o_key[2]}
else
# Prompt user for OpenAI access key
read -rs 'secret?OpenAI access key:'
echo -e "\n"
fi
# Detect Codex CLI folder path
CODEX_CLI_PATH="$( cd "$( dirname "$0" )" && cd .. && pwd )"
echo "CODEX_CLI_PATH is $CODEX_CLI_PATH"
validateSettings
openAIConfigPath="$CODEX_CLI_PATH/src/openaiapirc"
zshrcPath="$HOME/.zshrc"
configureZsh
configureApp
echo -e "*** Setup complete! ***\n";
echo "***********************************************"
echo "Open a new zsh terminal, type '#' followed by"
echo "your natural language command and hit Ctrl + G!"
echo "***********************************************"
|
Codex-CLI/scripts/zsh_setup.sh/0
|
{
"file_path": "Codex-CLI/scripts/zsh_setup.sh",
"repo_id": "Codex-CLI",
"token_count": 1449
}
| 249 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: large_person_group_person_face.py
Description: Large Person Group Person Face section of the Cognitive Face API.
"""
from . import util
def add(image,
large_person_group_id,
person_id,
user_data=None,
target_face=None):
"""Add a representative face to a person for identification. The input face
is specified as an image with a `target_face` rectangle. It returns a
`persisted_face_id` representing the added face and this
`persisted_face_id` will not expire.
Args:
image: A URL or a file path or a file-like object represents an image.
large_person_group_id: `large_person_group_id` of the target large
person group.
person_id: `person_id` of the target person.
user_data: Optional parameter. User-specified data about the face list
for any purpose. The maximum length is 1KB.
target_face: Optional parameter. A face rectangle to specify the target
face to be added into the face list, in the format of
"left,top,width,height". E.g. "10,10,100,100". If there are more
than one faces in the image, `target_face` is required to specify
which face to add. No `target_face` means there is only one face
detected in the entire image.
Returns:
A new `persisted_face_id`.
"""
url = 'largepersongroups/{}/persons/{}/persistedFaces'.format(
large_person_group_id, person_id)
headers, data, json = util.parse_image(image)
params = {
'userData': user_data,
'targetFace': target_face,
}
return util.request(
'POST', url, headers=headers, params=params, json=json, data=data)
def delete(large_person_group_id, person_id, persisted_face_id):
"""Delete a face from a person. Relative image for the persisted face will
also be deleted.
Args:
large_person_group_id: `large_person_group_id` of the target large
person group.
person_id: `person_id` of the target person.
persisted_face_id: The persisted face to remove.
Returns:
An empty response body.
"""
url = 'largepersongroups/{}/persons/{}/persistedFaces/{}'.format(
large_person_group_id, person_id, persisted_face_id)
return util.request('DELETE', url)
def get(large_person_group_id, person_id, persisted_face_id):
"""Retrieve information about a persisted face (specified by
`persisted_face_ids`, `person_id` and its belonging
`large_person_group_id`).
Args:
large_person_group_id: `large_person_group_id` of the target large
person group.
person_id: `person_id` of the target person.
persisted_face_id: The `persisted_face_id` of the target persisted face
of the person.
Returns:
The target persisted face's information (`persisted_face_id` and
`user_data`).
"""
url = 'largepersongroups/{}/persons/{}/persistedFaces/{}'.format(
large_person_group_id, person_id, persisted_face_id)
return util.request('GET', url)
def update(large_person_group_id, person_id, persisted_face_id, user_data):
"""Update a person persisted face's `user_data` field.
Args:
large_person_group_id: `large_person_group_id` of the target large
person group.
person_id: `person_id` of the target person.
persisted_face_id: The `persisted_face_id` of the target persisted face
of the person.
user_data: Attach `user_data` to person's persisted face. The size
limit is 1KB.
Returns:
An empty response body.
"""
url = 'largepersongroups/{}/persons/{}/persistedFaces/{}'.format(
large_person_group_id, person_id, persisted_face_id)
json = {
'userData': user_data,
}
return util.request('PATCH', url, json=json)
|
Cognitive-Face-Python/cognitive_face/large_person_group_person_face.py/0
|
{
"file_path": "Cognitive-Face-Python/cognitive_face/large_person_group_person_face.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 1559
}
| 250 |
export CUDA_VISIBLE_DEVICES=3
python t5_run_train.py \
--model_name_or_path t5-base \
--subtask Com \
--method ContrastExp \
--train_file pretrain_contrast \
--max_steps 100000 \
--save_steps 100000 \
--batch_size 8 \
--ebatch_size 16 \
--gas 1 \
--seed 1 \
--set set1
|
ContextualSP/abstraction_probing/code/t5_code/Com_ContrastExp_pretrain.sh/0
|
{
"file_path": "ContextualSP/abstraction_probing/code/t5_code/Com_ContrastExp_pretrain.sh",
"repo_id": "ContextualSP",
"token_count": 106
}
| 251 |
import absl
import nltk
import numpy
import six
import datasets
import pdb
_CITATION = ""
_DESCRIPTION = ""
_KWARGS_DESCRIPTION = ""
def simple_accuracy(preds, labels):
correct_list = [1. if pred == label else 0. for (pred, label) in zip(preds, labels)]
return sum(correct_list) / len(correct_list)
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class seq_acc(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}
),
codebase_urls=[],
reference_urls=[],
)
def _compute(self, predictions, references):
accuracy = simple_accuracy(predictions, references)
result = {'accuracy': accuracy}
return result
|
ContextualSP/abstraction_probing/code/t5_code/seq_acc/seq_acc.py/0
|
{
"file_path": "ContextualSP/abstraction_probing/code/t5_code/seq_acc/seq_acc.py",
"repo_id": "ContextualSP",
"token_count": 487
}
| 252 |
# Copyright (c) Microsoft. All rights reserved.
from enum import IntEnum
class TaskType(IntEnum):
Classification = 1
Regression = 2
Ranking = 3
Span = 4 # squad v1
SpanYN = 5 # squad v2
SeqenceLabeling = 6
MaskLM = 7
SpanSeqenceLabeling = 8
SeqenceGeneration = 9
ClozeChoice = 10
class DataFormat(IntEnum):
PremiseOnly = 1
PremiseAndOneHypothesis = 2
PremiseAndMultiHypothesis = 3
Seqence = 4
MLM = 5
CLUE_CLASSIFICATION = 6
CLUE_SPAN = 7
CLUE_SEQ = 8
CLUE_GEN = 9 # generation
ClozeChoice = 10 #
class EncoderModelType(IntEnum):
BERT = 1
ROBERTA = 2
XLNET = 3
SAN = 4
XLM = 5
DEBERTA = 6
ELECTRA = 7
T5 = 8
T5G = 9
|
ContextualSP/adaptershare/data_utils/task_def.py/0
|
{
"file_path": "ContextualSP/adaptershare/data_utils/task_def.py",
"repo_id": "ContextualSP",
"token_count": 333
}
| 253 |
import os
import argparse
import random
from sys import path
path.append(os.getcwd())
from experiments.common_utils import dump_rows
from data_utils.task_def import DataFormat
from data_utils.log_wrapper import create_logger
from experiments.glue.glue_utils import *
logger = create_logger(__name__, to_disk=True, log_file="glue_prepro.log")
def parse_args():
parser = argparse.ArgumentParser(
description="Preprocessing GLUE/SNLI/SciTail dataset."
)
parser.add_argument("--seed", type=int, default=13)
parser.add_argument("--root_dir", type=str, default="data")
parser.add_argument(
"--old_glue",
action="store_true",
help="whether it is old GLUE, refer official GLUE webpage for details",
)
args = parser.parse_args()
return args
def main(args):
is_old_glue = args.old_glue
root = args.root_dir
assert os.path.exists(root)
######################################
# GLUE tasks
######################################
multi_train_path = os.path.join(root, "MNLI/train.tsv")
multi_dev_matched_path = os.path.join(root, "MNLI/dev_matched.tsv")
multi_dev_mismatched_path = os.path.join(root, "MNLI/dev_mismatched.tsv")
multi_test_matched_path = os.path.join(root, "MNLI/test_matched.tsv")
multi_test_mismatched_path = os.path.join(root, "MNLI/test_mismatched.tsv")
mrpc_train_path = os.path.join(root, "MRPC/train.tsv")
mrpc_dev_path = os.path.join(root, "MRPC/dev.tsv")
mrpc_test_path = os.path.join(root, "MRPC/test.tsv")
qnli_train_path = os.path.join(root, "QNLI/train.tsv")
qnli_dev_path = os.path.join(root, "QNLI/dev.tsv")
qnli_test_path = os.path.join(root, "QNLI/test.tsv")
qqp_train_path = os.path.join(root, "QQP/train.tsv")
qqp_dev_path = os.path.join(root, "QQP/dev.tsv")
qqp_test_path = os.path.join(root, "QQP/test.tsv")
rte_train_path = os.path.join(root, "RTE/train.tsv")
rte_dev_path = os.path.join(root, "RTE/dev.tsv")
rte_test_path = os.path.join(root, "RTE/test.tsv")
wnli_train_path = os.path.join(root, "WNLI/train.tsv")
wnli_dev_path = os.path.join(root, "WNLI/dev.tsv")
wnli_test_path = os.path.join(root, "WNLI/test.tsv")
stsb_train_path = os.path.join(root, "STS-B/train.tsv")
stsb_dev_path = os.path.join(root, "STS-B/dev.tsv")
stsb_test_path = os.path.join(root, "STS-B/test.tsv")
sst_train_path = os.path.join(root, "SST-2/train.tsv")
sst_dev_path = os.path.join(root, "SST-2/dev.tsv")
sst_test_path = os.path.join(root, "SST-2/test.tsv")
cola_train_path = os.path.join(root, "CoLA/train.tsv")
cola_dev_path = os.path.join(root, "CoLA/dev.tsv")
cola_test_path = os.path.join(root, "CoLA/test.tsv")
######################################
# Loading DATA
######################################
multinli_train_data = load_mnli(multi_train_path)
multinli_matched_dev_data = load_mnli(multi_dev_matched_path)
multinli_mismatched_dev_data = load_mnli(multi_dev_mismatched_path)
multinli_matched_test_data = load_mnli(multi_test_matched_path, is_train=False)
multinli_mismatched_test_data = load_mnli(
multi_test_mismatched_path, is_train=False
)
logger.info("Loaded {} MNLI train samples".format(len(multinli_train_data)))
logger.info(
"Loaded {} MNLI matched dev samples".format(len(multinli_matched_dev_data))
)
logger.info(
"Loaded {} MNLI mismatched dev samples".format(
len(multinli_mismatched_dev_data)
)
)
logger.info(
"Loaded {} MNLI matched test samples".format(len(multinli_matched_test_data))
)
logger.info(
"Loaded {} MNLI mismatched test samples".format(
len(multinli_mismatched_test_data)
)
)
mrpc_train_data = load_mrpc(mrpc_train_path)
mrpc_dev_data = load_mrpc(mrpc_dev_path)
mrpc_test_data = load_mrpc(mrpc_test_path, is_train=False)
logger.info("Loaded {} MRPC train samples".format(len(mrpc_train_data)))
logger.info("Loaded {} MRPC dev samples".format(len(mrpc_dev_data)))
logger.info("Loaded {} MRPC test samples".format(len(mrpc_test_data)))
qnli_train_data = load_qnli(qnli_train_path)
qnli_dev_data = load_qnli(qnli_dev_path)
qnli_test_data = load_qnli(qnli_test_path, is_train=False)
logger.info("Loaded {} QNLI train samples".format(len(qnli_train_data)))
logger.info("Loaded {} QNLI dev samples".format(len(qnli_dev_data)))
logger.info("Loaded {} QNLI test samples".format(len(qnli_test_data)))
if is_old_glue:
random.seed(args.seed)
qnnli_train_data = load_qnnli(qnli_train_path)
qnnli_dev_data = load_qnnli(qnli_dev_path)
qnnli_test_data = load_qnnli(qnli_test_path, is_train=False)
logger.info("Loaded {} QNLI train samples".format(len(qnnli_train_data)))
logger.info("Loaded {} QNLI dev samples".format(len(qnnli_dev_data)))
logger.info("Loaded {} QNLI test samples".format(len(qnnli_test_data)))
qqp_train_data = load_qqp(qqp_train_path)
qqp_dev_data = load_qqp(qqp_dev_path)
qqp_test_data = load_qqp(qqp_test_path, is_train=False)
logger.info("Loaded {} QQP train samples".format(len(qqp_train_data)))
logger.info("Loaded {} QQP dev samples".format(len(qqp_dev_data)))
logger.info("Loaded {} QQP test samples".format(len(qqp_test_data)))
rte_train_data = load_rte(rte_train_path)
rte_dev_data = load_rte(rte_dev_path)
rte_test_data = load_rte(rte_test_path, is_train=False)
logger.info("Loaded {} RTE train samples".format(len(rte_train_data)))
logger.info("Loaded {} RTE dev samples".format(len(rte_dev_data)))
logger.info("Loaded {} RTE test samples".format(len(rte_test_data)))
wnli_train_data = load_wnli(wnli_train_path)
wnli_dev_data = load_wnli(wnli_dev_path)
wnli_test_data = load_wnli(wnli_test_path, is_train=False)
logger.info("Loaded {} WNLI train samples".format(len(wnli_train_data)))
logger.info("Loaded {} WNLI dev samples".format(len(wnli_dev_data)))
logger.info("Loaded {} WNLI test samples".format(len(wnli_test_data)))
sst_train_data = load_sst(sst_train_path)
sst_dev_data = load_sst(sst_dev_path)
sst_test_data = load_sst(sst_test_path, is_train=False)
logger.info("Loaded {} SST train samples".format(len(sst_train_data)))
logger.info("Loaded {} SST dev samples".format(len(sst_dev_data)))
logger.info("Loaded {} SST test samples".format(len(sst_test_data)))
cola_train_data = load_cola(cola_train_path, header=False)
cola_dev_data = load_cola(cola_dev_path, header=False)
cola_test_data = load_cola(cola_test_path, is_train=False)
logger.info("Loaded {} COLA train samples".format(len(cola_train_data)))
logger.info("Loaded {} COLA dev samples".format(len(cola_dev_data)))
logger.info("Loaded {} COLA test samples".format(len(cola_test_data)))
stsb_train_data = load_stsb(stsb_train_path)
stsb_dev_data = load_stsb(stsb_dev_path)
stsb_test_data = load_stsb(stsb_test_path, is_train=False)
logger.info("Loaded {} STS-B train samples".format(len(stsb_train_data)))
logger.info("Loaded {} STS-B dev samples".format(len(stsb_dev_data)))
logger.info("Loaded {} STS-B test samples".format(len(stsb_test_data)))
canonical_data_suffix = "canonical_data"
canonical_data_root = os.path.join(root, canonical_data_suffix)
if not os.path.isdir(canonical_data_root):
os.mkdir(canonical_data_root)
# BUILD MNLI
multinli_train_fout = os.path.join(canonical_data_root, "mnli_train.tsv")
multinli_matched_dev_fout = os.path.join(
canonical_data_root, "mnli_matched_dev.tsv"
)
multinli_mismatched_dev_fout = os.path.join(
canonical_data_root, "mnli_mismatched_dev.tsv"
)
multinli_matched_test_fout = os.path.join(
canonical_data_root, "mnli_matched_test.tsv"
)
multinli_mismatched_test_fout = os.path.join(
canonical_data_root, "mnli_mismatched_test.tsv"
)
dump_rows(
multinli_train_data, multinli_train_fout, DataFormat.PremiseAndOneHypothesis
)
dump_rows(
multinli_matched_dev_data,
multinli_matched_dev_fout,
DataFormat.PremiseAndOneHypothesis,
)
dump_rows(
multinli_mismatched_dev_data,
multinli_mismatched_dev_fout,
DataFormat.PremiseAndOneHypothesis,
)
dump_rows(
multinli_matched_test_data,
multinli_matched_test_fout,
DataFormat.PremiseAndOneHypothesis,
)
dump_rows(
multinli_mismatched_test_data,
multinli_mismatched_test_fout,
DataFormat.PremiseAndOneHypothesis,
)
logger.info("done with mnli")
mrpc_train_fout = os.path.join(canonical_data_root, "mrpc_train.tsv")
mrpc_dev_fout = os.path.join(canonical_data_root, "mrpc_dev.tsv")
mrpc_test_fout = os.path.join(canonical_data_root, "mrpc_test.tsv")
dump_rows(mrpc_train_data, mrpc_train_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(mrpc_dev_data, mrpc_dev_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(mrpc_test_data, mrpc_test_fout, DataFormat.PremiseAndOneHypothesis)
logger.info("done with mrpc")
qnli_train_fout = os.path.join(canonical_data_root, "qnli_train.tsv")
qnli_dev_fout = os.path.join(canonical_data_root, "qnli_dev.tsv")
qnli_test_fout = os.path.join(canonical_data_root, "qnli_test.tsv")
dump_rows(qnli_train_data, qnli_train_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(qnli_dev_data, qnli_dev_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(qnli_test_data, qnli_test_fout, DataFormat.PremiseAndOneHypothesis)
logger.info("done with qnli")
if is_old_glue:
qnli_train_fout = os.path.join(canonical_data_root, "qnnli_train.tsv")
qnli_dev_fout = os.path.join(canonical_data_root, "qnnli_dev.tsv")
qnli_test_fout = os.path.join(canonical_data_root, "qnnli_test.tsv")
dump_rows(
qnnli_train_data, qnli_train_fout, DataFormat.PremiseAndMultiHypothesis
)
dump_rows(qnnli_dev_data, qnli_dev_fout, DataFormat.PremiseAndMultiHypothesis)
dump_rows(
qnnli_train_data, qnli_test_fout, DataFormat.PremiseAndMultiHypothesis
)
logger.info("done with qnli")
qqp_train_fout = os.path.join(canonical_data_root, "qqp_train.tsv")
qqp_dev_fout = os.path.join(canonical_data_root, "qqp_dev.tsv")
qqp_test_fout = os.path.join(canonical_data_root, "qqp_test.tsv")
dump_rows(qqp_train_data, qqp_train_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(qqp_dev_data, qqp_dev_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(qqp_test_data, qqp_test_fout, DataFormat.PremiseAndOneHypothesis)
logger.info("done with qqp")
rte_train_fout = os.path.join(canonical_data_root, "rte_train.tsv")
rte_dev_fout = os.path.join(canonical_data_root, "rte_dev.tsv")
rte_test_fout = os.path.join(canonical_data_root, "rte_test.tsv")
dump_rows(rte_train_data, rte_train_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(rte_dev_data, rte_dev_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(rte_test_data, rte_test_fout, DataFormat.PremiseAndOneHypothesis)
logger.info("done with rte")
wnli_train_fout = os.path.join(canonical_data_root, "wnli_train.tsv")
wnli_dev_fout = os.path.join(canonical_data_root, "wnli_dev.tsv")
wnli_test_fout = os.path.join(canonical_data_root, "wnli_test.tsv")
dump_rows(wnli_train_data, wnli_train_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(wnli_dev_data, wnli_dev_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(wnli_test_data, wnli_test_fout, DataFormat.PremiseAndOneHypothesis)
logger.info("done with wnli")
sst_train_fout = os.path.join(canonical_data_root, "sst_train.tsv")
sst_dev_fout = os.path.join(canonical_data_root, "sst_dev.tsv")
sst_test_fout = os.path.join(canonical_data_root, "sst_test.tsv")
dump_rows(sst_train_data, sst_train_fout, DataFormat.PremiseOnly)
dump_rows(sst_dev_data, sst_dev_fout, DataFormat.PremiseOnly)
dump_rows(sst_test_data, sst_test_fout, DataFormat.PremiseOnly)
logger.info("done with sst")
cola_train_fout = os.path.join(canonical_data_root, "cola_train.tsv")
cola_dev_fout = os.path.join(canonical_data_root, "cola_dev.tsv")
cola_test_fout = os.path.join(canonical_data_root, "cola_test.tsv")
dump_rows(cola_train_data, cola_train_fout, DataFormat.PremiseOnly)
dump_rows(cola_dev_data, cola_dev_fout, DataFormat.PremiseOnly)
dump_rows(cola_test_data, cola_test_fout, DataFormat.PremiseOnly)
logger.info("done with cola")
stsb_train_fout = os.path.join(canonical_data_root, "stsb_train.tsv")
stsb_dev_fout = os.path.join(canonical_data_root, "stsb_dev.tsv")
stsb_test_fout = os.path.join(canonical_data_root, "stsb_test.tsv")
dump_rows(stsb_train_data, stsb_train_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(stsb_dev_data, stsb_dev_fout, DataFormat.PremiseAndOneHypothesis)
dump_rows(stsb_test_data, stsb_test_fout, DataFormat.PremiseAndOneHypothesis)
logger.info("done with stsb")
if __name__ == "__main__":
args = parse_args()
main(args)
|
ContextualSP/adaptershare/experiments/glue/glue_prepro.py/0
|
{
"file_path": "ContextualSP/adaptershare/experiments/glue/glue_prepro.py",
"repo_id": "ContextualSP",
"token_count": 5950
}
| 254 |
boolq:
data_format: PremiseAndOneHypothesis
dropout_p: 0.1
enable_san: false
metric_meta:
- ACC
loss: CeCriterion
kd_loss: MseCriterion
adv_loss: SymKlCriterion
n_class: 2
task_type: Classification
copa:
data_format: PremiseAndMultiHypothesis
enable_san: false
metric_meta:
- ACC
loss: RankCeCriterion
kd_loss: MseCriterion
adv_loss: SymKlCriterion
n_class: 1
task_type: Ranking
cb:
data_format: PremiseAndOneHypothesis
dropout_p: 0.1
enable_san: false
labels:
- contradiction
- neutral
- entailment
metric_meta:
- ACC
loss: CeCriterion
kd_loss: MseCriterion
adv_loss: SymKlCriterion
n_class: 3
task_type: Classification
record:
data_format: ClozeChoice
enable_san: false
metric_meta:
- EmF1
loss: RankCeCriterion
kd_loss: MseCriterion
adv_loss: SymKlCriterion
n_class: 1
task_type: ClozeChoice
wic:
data_format: PremiseAndOneHypothesis
enable_san: false
metric_meta:
- ACC
loss: CeCriterion
kd_loss: MseCriterion
adv_loss: SymKlCriterion
n_class: 2
task_type: Classification
multirc:
data_format: PremiseAndOneHypothesis
enable_san: false
metric_meta:
- ACC
loss: CeCriterion
kd_loss: MseCriterion
adv_loss: SymKlCriterion
n_class: 2
task_type: Classification
|
ContextualSP/adaptershare/experiments/superglue/superglue_task_def.yml/0
|
{
"file_path": "ContextualSP/adaptershare/experiments/superglue/superglue_task_def.yml",
"repo_id": "ContextualSP",
"token_count": 504
}
| 255 |
# coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
from copy import deepcopy
import sys
import json
import torch
import random
import numpy as np
from shutil import copyfile
from data_utils.task_def import TaskType, DataFormat
from data_utils.task_def import EncoderModelType
import tasks
from torch.utils.data import Dataset, DataLoader, BatchSampler, Sampler
from experiments.exp_def import TaskDef
from experiments.mlm.mlm_utils import truncate_seq_pair, load_loose_json
from experiments.mlm.mlm_utils import (
create_instances_from_document,
create_masked_lm_predictions,
)
UNK_ID = 100
BOS_ID = 101
def search_bin(bins, size):
idx = len(bins) - 1
for i, bin in enumerate(bins):
if size <= bin:
idx = i
break
return idx
def create_bins(bin_size, maxlen):
return [min(i + bin_size, maxlen) for i in range(0, maxlen, bin_size)]
class DistMultiTaskBatchSampler(Sampler):
def __init__(
self,
datasets,
batch_size,
mix_opt,
extra_task_ratio,
rank=0,
world_size=1,
drop_last=False,
):
self.rank = rank
self.world_size = world_size
self._datasets = datasets
self._mix_opt = mix_opt
self._extra_task_ratio = extra_task_ratio
self.drop_last = drop_last
train_data_list = []
for dataset in datasets:
train_data_list.append(
self._get_shuffled_index_batches(len(dataset), batch_size)
)
self._train_data_list = train_data_list
@staticmethod
def _get_shuffled_index_batches(dataset_len, batch_size):
index_batches = [
list(range(i, min(i + batch_size, dataset_len)))
for i in range(0, dataset_len, batch_size)
]
random.shuffle(index_batches)
return index_batches
def __len__(self):
return sum(len(train_data) for train_data in self._train_data_list)
def __iter__(self):
all_iters = [iter(item) for item in self._train_data_list]
all_indices = self._gen_task_indices(
self._train_data_list, self._mix_opt, self._extra_task_ratio
)
for local_task_idx in all_indices:
task_id = self._datasets[local_task_idx].get_task_id()
batch = next(all_iters[local_task_idx])
batch = [(task_id, sample_id) for sample_id in batch]
if len(batch) % self.world_size != 0:
if self.drop_last:
break
else:
batch.extend(
[
batch[0]
for _ in range(
self.world_size - len(batch) % self.world_size
)
]
)
chunk_size = len(batch) // self.world_size
yield batch[self.rank * chunk_size : (self.rank + 1) * chunk_size]
@staticmethod
def _gen_task_indices(train_data_list, mix_opt, extra_task_ratio):
all_indices = []
if len(train_data_list) > 1 and extra_task_ratio > 0:
main_indices = [0] * len(train_data_list[0])
extra_indices = []
for i in range(1, len(train_data_list)):
extra_indices += [i] * len(train_data_list[i])
random_picks = int(
min(len(train_data_list[0]) * extra_task_ratio, len(extra_indices))
)
extra_indices = np.random.choice(extra_indices, random_picks, replace=False)
if mix_opt > 0:
extra_indices = extra_indices.tolist()
random.shuffle(extra_indices)
all_indices = extra_indices + main_indices
else:
all_indices = main_indices + extra_indices.tolist()
else:
for i in range(1, len(train_data_list)):
all_indices += [i] * len(train_data_list[i])
if mix_opt > 0:
random.shuffle(all_indices)
all_indices += [0] * len(train_data_list[0])
if mix_opt < 1:
random.shuffle(all_indices)
return all_indices
class DistSingleTaskBatchSampler(Sampler):
def __init__(self, dataset, batch_size, rank=0, world_size=1, drop_last=False):
self.rank = rank
self.world_size = world_size
self._dataset = dataset
self.drop_last = drop_last
self._data = self._get_index_batches(len(dataset), batch_size)
@staticmethod
def _get_index_batches(dataset_len, batch_size):
index_batches = [
list(range(i, min(i + batch_size, dataset_len)))
for i in range(0, dataset_len, batch_size)
]
return index_batches
def __len__(self):
return len(self._data)
def __iter__(self):
indices = iter(self._data)
for batch in indices:
task_id = self._dataset.get_task_id()
batch = [(task_id, sample_id) for sample_id in batch]
yield batch
class MultiTaskBatchSampler(BatchSampler):
def __init__(
self,
datasets,
batch_size,
mix_opt,
extra_task_ratio,
bin_size=64,
bin_on=False,
bin_grow_ratio=0.5,
heldout=False
):
self._datasets = datasets
self._batch_size = batch_size
self._mix_opt = mix_opt
self._extra_task_ratio = extra_task_ratio
self.bin_size = bin_size
self.bin_on = bin_on
self.bin_grow_ratio = bin_grow_ratio
self.heldout = heldout
train_data_list = []
for dataset in datasets:
if bin_on and not heldout:
train_data_list.append(
self._get_shuffled_index_batches_bin(
dataset,
batch_size,
bin_size=bin_size,
bin_grow_ratio=bin_grow_ratio,
)
)
else:
train_data_list.append(
self._get_shuffled_index_batches(len(dataset), batch_size)
)
self._train_data_list = train_data_list
@staticmethod
def _get_shuffled_index_batches(dataset_len, batch_size):
index_batches = [
list(range(i, min(i + batch_size, dataset_len)))
for i in range(0, dataset_len, batch_size)
]
random.shuffle(index_batches)
return index_batches
@staticmethod
def _get_shuffled_index_batches_bin(dataset, batch_size, bin_size, bin_grow_ratio):
maxlen = dataset.maxlen
bins = create_bins(bin_size, maxlen)
data = [[] for i in range(0, len(bins))]
for idx, sample in enumerate(dataset):
bin_idx = search_bin(bins, len(sample["sample"]["token_id"]))
data[bin_idx].append(idx)
index_batches = []
for idx, sub_data in enumerate(data):
if len(sub_data) < 1:
continue
batch_size = 1 if batch_size < 1 else batch_size
sub_dataset_len = len(sub_data)
sub_batches = [
list(range(i, min(i + batch_size, sub_dataset_len)))
for i in range(0, sub_dataset_len, batch_size)
]
index_batches.extend(sub_batches)
batch_size = int(batch_size * bin_grow_ratio)
random.shuffle(index_batches)
return index_batches
def __len__(self):
return sum(len(train_data) for train_data in self._train_data_list)
def __iter__(self):
all_iters = [iter(item) for item in self._train_data_list]
all_indices = self._gen_task_indices(
self._train_data_list, self._mix_opt, self._extra_task_ratio, self.heldout
)
for local_task_idx in all_indices:
task_id = self._datasets[local_task_idx].get_task_id()
batch = next(all_iters[local_task_idx])
yield [(task_id, sample_id) for sample_id in batch]
@staticmethod
def _gen_task_indices(train_data_list, mix_opt, extra_task_ratio, heldout):
all_indices = []
if len(train_data_list) > 1 and extra_task_ratio > 0 and not heldout:
main_indices = [0] * len(train_data_list[0])
extra_indices = []
for i in range(1, len(train_data_list)):
extra_indices += [i] * len(train_data_list[i])
random_picks = int(
min(len(train_data_list[0]) * extra_task_ratio, len(extra_indices))
)
extra_indices = np.random.choice(extra_indices, random_picks, replace=False)
if mix_opt > 0:
extra_indices = extra_indices.tolist()
random.shuffle(extra_indices)
all_indices = extra_indices + main_indices
else:
all_indices = main_indices + extra_indices.tolist()
else:
for i in range(1, len(train_data_list)):
all_indices += [i] * len(train_data_list[i])
if mix_opt > 0 and not heldout:
random.shuffle(all_indices)
all_indices += [0] * len(train_data_list[0])
if mix_opt < 1 and not heldout:
random.shuffle(all_indices)
return all_indices
class TaskIterBatchSampler(BatchSampler):
def __init__(
self,
datasets,
batch_size,
mix_opt,
extra_task_ratio,
bin_size=64,
bin_on=False,
bin_grow_ratio=0.5,
ite_batch_num=500
):
self._datasets = datasets
self.task_num = len(datasets)
self._batch_size = batch_size
self._mix_opt = mix_opt
self._extra_task_ratio = extra_task_ratio
self.bin_size = bin_size
self.bin_on = bin_on
self.bin_grow_ratio = bin_grow_ratio
self.ite_batch_num = ite_batch_num
train_data_list = []
for dataset in datasets:
if bin_on:
train_data_list.append(
self._get_shuffled_index_batches_bin(
dataset,
batch_size,
bin_size=bin_size,
bin_grow_ratio=bin_grow_ratio,
)
)
else:
train_data_list.append(
self._get_shuffled_index_batches(len(dataset), batch_size)
)
max_batch_len = max([len(train_data) for train_data in train_data_list])
flatten_train_data_list = []
for train_data in train_data_list:
tmp_train_data = []
tmp_train_data.extend(train_data)
tmp_len = len(train_data)
if tmp_len < max_batch_len:
external_batch_num = max_batch_len - tmp_len
while external_batch_num > 0:
tmp_train_data.append(train_data[external_batch_num%tmp_len])
external_batch_num -= 1
flatten_train_data_list.append(train_data)
self._train_data_list = flatten_train_data_list
self.task_batch_num = max_batch_len
@staticmethod
def _get_shuffled_index_batches(dataset_len, batch_size):
index_batches = [
list(range(i, min(i + batch_size, dataset_len)))
for i in range(0, dataset_len, batch_size)
]
random.shuffle(index_batches)
return index_batches
@staticmethod
def _get_shuffled_index_batches_bin(dataset, batch_size, bin_size, bin_grow_ratio):
maxlen = dataset.maxlen
bins = create_bins(bin_size, maxlen)
data = [[] for i in range(0, len(bins))]
for idx, sample in enumerate(dataset):
bin_idx = search_bin(bins, len(sample["sample"]["token_id"]))
data[bin_idx].append(idx)
index_batches = []
for idx, sub_data in enumerate(data):
if len(sub_data) < 1:
continue
batch_size = 1 if batch_size < 1 else batch_size
sub_dataset_len = len(sub_data)
sub_batches = [
list(range(i, min(i + batch_size, sub_dataset_len)))
for i in range(0, sub_dataset_len, batch_size)
]
index_batches.extend(sub_batches)
batch_size = int(batch_size * bin_grow_ratio)
random.shuffle(index_batches)
return index_batches
def __len__(self):
return sum(len(train_data) for train_data in self._train_data_list)
def __iter__(self):
all_iters = [iter(item) for item in self._train_data_list]
all_indices = self._gen_task_indices(
self._train_data_list, self.task_batch_num, self.ite_batch_num
)
for local_task_idx in all_indices:
task_id = self._datasets[local_task_idx].get_task_id()
batch = next(all_iters[local_task_idx])
yield [(task_id, sample_id) for sample_id in batch]
@staticmethod
def _gen_task_indices(train_data_list, task_batch_num, ite_batch_num):
all_indices = []
iter_num = task_batch_num % ite_batch_num
for _ in range(iter_num):
for i in range(len(train_data_list)):
all_indices += [i] * ite_batch_num
return all_indices
class MultiTaskDataset(Dataset):
def __init__(self, datasets):
self._datasets = datasets
task_id_2_data_set_dic = {}
for dataset in datasets:
task_id = dataset.get_task_id()
assert task_id not in task_id_2_data_set_dic, (
"Duplicate task_id %s" % task_id
)
task_id_2_data_set_dic[task_id] = dataset
self._task_id_2_data_set_dic = task_id_2_data_set_dic
def __len__(self):
return sum(len(dataset) for dataset in self._datasets)
def __getitem__(self, idx):
task_id, sample_id = idx
return self._task_id_2_data_set_dic[task_id][sample_id]
class DistTaskDataset(Dataset):
def __init__(self, dataset, task_id):
self._dataset = dataset
def __len__(self):
return len(self._dataset)
def __getitem__(self, idx):
_, sample_id = idx
return self._dataset[sample_id]
def get_task_id(self):
return self._dataset.get_task_id()
class SingleTaskDataset(Dataset):
def __init__(
self,
path,
is_train=True,
maxlen=512,
factor=1.0,
task_id=0,
task_def: TaskDef = None,
bert_model="bert-base-uncased",
do_lower_case=True,
masked_lm_prob=0.15,
seed=13,
short_seq_prob=0.1,
max_seq_length=512,
max_predictions_per_seq=80,
printable=True,
heldout_scale=10,
heldout_start=-1
):
data, tokenizer = self.load(
path,
is_train,
maxlen,
factor,
task_def,
bert_model,
do_lower_case,
printable=printable,
)
if heldout_start >= 0:
data = data[heldout_start:heldout_start+heldout_scale]
self._data = data
self._tokenizer = tokenizer
self._task_id = task_id
self._task_def = task_def
# below is for MLM
if self._task_def.task_type is TaskType.MaskLM:
assert tokenizer is not None
# init vocab words
self._vocab_words = (
None if tokenizer is None else list(self._tokenizer.vocab.keys())
)
self._masked_lm_prob = masked_lm_prob
self._seed = seed
self._short_seq_prob = short_seq_prob
self._max_seq_length = max_seq_length
self._max_predictions_per_seq = max_predictions_per_seq
self._rng = random.Random(seed)
self.maxlen = maxlen
def get_task_id(self):
return self._task_id
@staticmethod
def load(
path,
is_train=True,
maxlen=512,
factor=1.0,
task_def=None,
bert_model="bert-base-uncased",
do_lower_case=True,
printable=True,
):
task_type = task_def.task_type
assert task_type is not None
if task_type == TaskType.MaskLM:
def load_mlm_data(path):
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(bert_model, cache_dir=".cache")
vocab_words = list(tokenizer.vocab.keys())
data = load_loose_json(path)
docs = []
for doc in data:
paras = doc["text"].split("\n\n")
paras = [para.strip() for para in paras if len(para.strip()) > 0]
tokens = [tokenizer.tokenize(para) for para in paras]
docs.append(tokens)
return docs, tokenizer
return load_mlm_data(path)
with open(path, "r", encoding="utf-8") as reader:
data = []
cnt = 0
for line in reader:
sample = json.loads(line)
sample["factor"] = factor
cnt += 1
if is_train:
task_obj = tasks.get_task_obj(task_def)
if task_obj is not None and not task_obj.input_is_valid_sample(
sample, maxlen
):
continue
if (task_type == TaskType.Ranking) and (
len(sample["token_id"][0]) > maxlen
or len(sample["token_id"][1]) > maxlen
):
continue
if (task_type != TaskType.Ranking) and (
len(sample["token_id"]) > maxlen
):
continue
data.append(sample)
if printable:
print("Loaded {} samples out of {}".format(len(data), cnt))
return data, None
def __len__(self):
return len(self._data)
def __getitem__(self, idx):
if self._task_def.task_type == TaskType.MaskLM:
# create a MLM instance
instances = create_instances_from_document(
self._data,
idx,
self._max_seq_length,
self._short_seq_prob,
self._masked_lm_prob,
self._max_predictions_per_seq,
self._vocab_words,
self._rng,
)
instance_ids = list(range(0, len(instances)))
choice = np.random.choice(instance_ids, 1)[0]
instance = instances[choice]
labels = self._tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
position = instance.masked_lm_positions
labels = [lab if idx in position else -1 for idx, lab in enumerate(labels)]
sample = {
"token_id": self._tokenizer.convert_tokens_to_ids(instance.tokens),
"type_id": instance.segment_ids,
"nsp_lab": 1 if instance.is_random_next else 0,
"position": instance.masked_lm_positions,
"label": labels,
"uid": idx,
}
return {
"task": {"task_id": self._task_id, "task_def": self._task_def},
"sample": sample,
}
else:
return {
"task": {"task_id": self._task_id, "task_def": self._task_def},
"sample": self._data[idx],
}
class Collater:
def __init__(
self,
is_train=True,
dropout_w=0.005,
soft_label=False,
encoder_type=EncoderModelType.BERT,
max_seq_len=512,
do_padding=False,
):
self.is_train = is_train
self.dropout_w = dropout_w
self.soft_label_on = soft_label
self.encoder_type = encoder_type
self.pairwise_size = 1
self.max_seq_len = max_seq_len
self.do_padding = do_padding
def __random_select__(self, arr):
if self.dropout_w > 0:
return [UNK_ID if random.uniform(0, 1) < self.dropout_w else e for e in arr]
else:
return arr
@staticmethod
def patch_data(device, batch_info, batch_data):
if str(device) != "cpu":
for i, part in enumerate(batch_data):
if part is None:
continue
if isinstance(part, torch.Tensor):
batch_data[i] = part.pin_memory().to(device)
elif isinstance(part, tuple):
batch_data[i] = tuple(
sub_part.pin_memory().to(device) for sub_part in part
)
elif isinstance(part, list):
batch_data[i] = [
sub_part.pin_memory().to(device) for sub_part in part
]
else:
raise TypeError("unknown batch data type at %s: %s" % (i, part))
if "soft_label" in batch_info:
batch_info["soft_label"] = (
batch_info["soft_label"].pin_memory().to(device)
)
return batch_info, batch_data
def rebatch(self, batch):
newbatch = []
sizes = []
for sample in batch:
size = len(sample["token_id"])
sizes.append(size)
self.pairwise_size = size
assert size == len(sample["type_id"])
for idx in range(0, size):
token_id = sample["token_id"][idx]
type_id = sample["type_id"][idx]
attention_mask = sample["attention_mask"][idx]
uid = sample["ruid"][idx] if "ruid" in sample else sample["uid"]
olab = sample["olabel"][idx]
new_sample = deepcopy(sample)
new_sample["uid"] = uid
new_sample["token_id"] = token_id
new_sample["type_id"] = type_id
new_sample["attention_mask"] = attention_mask
new_sample["true_label"] = olab
newbatch.append(new_sample)
return newbatch, sizes
def __if_pair__(self, data_type):
return data_type in [
DataFormat.PremiseAndOneHypothesis,
DataFormat.PremiseAndMultiHypothesis,
]
def collate_fn(self, batch):
task_id = batch[0]["task"]["task_id"]
task_def = batch[0]["task"]["task_def"]
new_batch = []
for sample in batch:
assert sample["task"]["task_id"] == task_id
assert sample["task"]["task_def"] == task_def
new_batch.append(sample["sample"])
task_type = task_def.task_type
data_type = task_def.data_type
batch = new_batch
if task_type == TaskType.Ranking or task_type == TaskType.ClozeChoice:
batch, chunk_sizes = self.rebatch(batch)
# prepare model input
batch_info, batch_data = self._prepare_model_input(batch, data_type)
batch_info["task_id"] = task_id # used for select correct decoding head
batch_info["input_len"] = len(batch_data) # used to select model inputs
# select different loss function and other difference in training and testing
# DataLoader will convert any unknown type objects to dict,
# the conversion logic also convert Enum to repr(Enum), which is a string and undesirable
# If we convert object to dict in advance, DataLoader will do nothing
batch_info["task_def"] = task_def.__dict__
batch_info["pairwise_size"] = self.pairwise_size # need for ranking task
# add label
labels = [sample["label"] if "label" in sample else None for sample in batch]
task_obj = tasks.get_task_obj(task_def)
if self.is_train:
# in training model, label is used by Pytorch, so would be tensor
if task_obj is not None:
batch_data.append(task_obj.train_prepare_label(labels))
batch_info["label"] = len(batch_data) - 1
elif task_type == TaskType.Ranking or task_type == TaskType.ClozeChoice:
batch_data.append(torch.LongTensor(labels))
batch_info["label"] = len(batch_data) - 1
elif task_type == TaskType.Span:
# support multi positions
start, end = [], []
for sample in batch:
if type(sample["start_position"]) is list and type(
sample["end_position"]
):
idx = random.choice(range(0, len(sample["start_position"])))
start.append(sample["start_position"][idx])
end.append(sample["end_position"][idx])
else:
start.append(sample["start_position"])
end.append(sample["end_position"])
batch_data.append((torch.LongTensor(start), torch.LongTensor(end)))
# unify to one type of label
batch_info["label"] = len(batch_data) - 1
elif task_type == TaskType.SpanYN:
# start = [sample['start_position'] for sample in batch]
# end = [sample['end_position'] for sample in batch]
start, end = [], []
for sample in batch:
if type(sample["start_position"]) is list and type(
sample["end_position"]
):
idx = random.choice(range(0, len(sample["start_position"])))
start.append(sample["start_position"][idx])
end.append(sample["end_position"][idx])
else:
start.append(sample["start_position"])
end.append(sample["end_position"])
# start, end, yes/no
batch_data.append(
(
torch.LongTensor(start),
torch.LongTensor(end),
torch.LongTensor(labels),
)
)
# unify to one type of label
batch_info["label"] = len(batch_data) - 1
elif task_type == TaskType.SeqenceLabeling:
batch_size = self._get_batch_size(batch)
tok_len = self._get_max_len(batch, key="token_id")
tlab = torch.LongTensor(batch_size, tok_len).fill_(-1)
for i, label in enumerate(labels):
ll = len(label)
tlab[i, :ll] = torch.LongTensor(label)
batch_data.append(tlab)
batch_info["label"] = len(batch_data) - 1
elif task_type == TaskType.MaskLM:
batch_size = self._get_batch_size(batch)
tok_len = self._get_max_len(batch, key="token_id")
tlab = torch.LongTensor(batch_size, tok_len).fill_(-1)
for i, label in enumerate(labels):
ll = len(label)
tlab[i, :ll] = torch.LongTensor(label)
labels = torch.LongTensor([sample["nsp_lab"] for sample in batch])
batch_data.append((tlab, labels))
batch_info["label"] = len(batch_data) - 1
elif task_type == TaskType.SeqenceGeneration:
batch_size = self._get_batch_size(batch)
y_idxs = torch.LongTensor([sample["label"][:-1] for sample in batch])
label = torch.LongTensor([sample["label"][1:] for sample in batch])
label.masked_fill_(label == 0, -1)
batch_data.append(y_idxs)
batch_info["y_token_id"] = len(batch_data) - 1
batch_data.append(label)
batch_info["label"] = len(batch_data) - 1
# soft label generated by ensemble models for knowledge distillation
if self.soft_label_on and "softlabel" in batch[0]:
sortlabels = [sample["softlabel"] for sample in batch]
sortlabels = task_obj.train_prepare_soft_labels(sortlabels)
batch_info["soft_label"] = sortlabels
else:
# in test model, label would be used for evaluation
if task_obj is not None:
task_obj.test_prepare_label(batch_info, labels)
else:
batch_info["label"] = labels
if task_type == TaskType.Ranking:
batch_info["true_label"] = [
sample["true_label"] for sample in batch
]
if task_type == TaskType.ClozeChoice:
batch_info["answer"] = [
sample["answer"] for sample in batch
]
batch_info["choice"] = [
sample["choice"] for sample in batch
]
batch_info["pairwise_size"] = chunk_sizes
if task_type == TaskType.Span or task_type == TaskType.SpanYN:
batch_info["offset_mapping"] = [
sample["offset_mapping"] for sample in batch
]
batch_info["token_is_max_context"] = [
sample.get("token_is_max_context", None) for sample in batch
]
batch_info["context"] = [sample["context"] for sample in batch]
batch_info["answer"] = [sample["answer"] for sample in batch]
batch_info["label"] = [
sample["label"] if "label" in sample else None
for sample in batch
]
if task_type == TaskType.SeqenceGeneration:
batch_info["answer"] = [sample["answer"] for sample in batch]
batch_info["uids"] = [sample["uid"] for sample in batch] # used in scoring
return batch_info, batch_data
def _get_max_len(self, batch, key="token_id"):
tok_len = max(len(x[key]) for x in batch)
tok_len = self.max_seq_len if self.do_padding else tok_len
return tok_len
def _get_batch_size(self, batch):
return len(batch)
def _prepare_model_input(self, batch, data_type):
batch_size = self._get_batch_size(batch)
tok_len = self._get_max_len(batch, key="token_id")
if self.encoder_type == EncoderModelType.ROBERTA:
token_ids = torch.LongTensor(batch_size, tok_len).fill_(1)
type_ids = torch.LongTensor(batch_size, tok_len).fill_(0)
masks = torch.LongTensor(batch_size, tok_len).fill_(0)
else:
token_ids = torch.LongTensor(batch_size, tok_len).fill_(0)
type_ids = torch.LongTensor(batch_size, tok_len).fill_(0)
masks = torch.LongTensor(batch_size, tok_len).fill_(0)
if self.__if_pair__(data_type):
hypothesis_masks = torch.BoolTensor(batch_size, tok_len).fill_(1)
premise_masks = torch.BoolTensor(batch_size, tok_len).fill_(1)
for i, sample in enumerate(batch):
select_len = min(len(sample["token_id"]), tok_len)
tok = sample["token_id"]
if self.is_train:
tok = self.__random_select__(tok)
token_ids[i, :select_len] = torch.LongTensor(tok[:select_len])
type_ids[i, :select_len] = torch.LongTensor(sample["type_id"][:select_len])
masks[i, :select_len] = torch.LongTensor([1] * select_len)
if self.__if_pair__(data_type):
plen = len(sample["type_id"]) - sum(sample["type_id"])
premise_masks[i, :plen] = torch.LongTensor([0] * plen)
for j in range(plen, select_len):
hypothesis_masks[i, j] = 0
if self.__if_pair__(data_type):
batch_info = {
"token_id": 0,
"segment_id": 1,
"mask": 2,
"premise_mask": 3,
"hypothesis_mask": 4,
}
batch_data = [token_ids, type_ids, masks, premise_masks, hypothesis_masks]
else:
batch_info = {"token_id": 0, "segment_id": 1, "mask": 2}
batch_data = [token_ids, type_ids, masks]
return batch_info, batch_data
|
ContextualSP/adaptershare/mt_dnn/batcher.py/0
|
{
"file_path": "ContextualSP/adaptershare/mt_dnn/batcher.py",
"repo_id": "ContextualSP",
"token_count": 17154
}
| 256 |
# because we don't specify exact software version in Dockerfile,
# the train loss could be different when you rebuild the Dockerfile
# so we hide this test. But it still useful for developer when you constantly working on exact same environment
# (Docker, hardware)
import os
import shutil
import subprocess
import re
TRAIN_LOSS_RE = re.compile("train loss\[[\d\.]+\]")
def assert_file_equal(output_file, expected_file):
output = open(output_file).read()
expected = open(expected_file).read()
assert output == expected, "file diff: %s != %s" % (output_file, expected_file)
def compare_output(output_dir, expected_dir):
config = open(os.path.join(output_dir, "config.json")).read()
expected_config = open(os.path.join(expected_dir, "config.json")).read()
assert config == expected_config, "Config diff"
train_loss = TRAIN_LOSS_RE.findall(open(os.path.join(output_dir, "log.txt")).read())
expected_train_loss = TRAIN_LOSS_RE.findall(open(os.path.join(expected_dir, "log.txt")).read())
assert train_loss == expected_train_loss, "Train loss diff:\n\ttrain_loss is %s\n\texpected_train_loss is %s\n" % (
train_loss, expected_train_loss
)
for file_name in ("mnli_matched_dev_scores_0.json", "mnli_matched_test_scores_0.json",
"mnli_mismatched_dev_scores_0.json", "mnli_mismatched_test_scores_0.json"):
assert_file_equal(os.path.join(output_dir, file_name), os.path.join(expected_dir, file_name))
def test_train():
OUTPUT_DIR = r"run_test/checkpoint"
EXPECTED_DIR = r"tests/sample_data/checkpoint"
if os.access("./run_test", os.F_OK):
shutil.rmtree("./run_test")
os.mkdir("./run_test")
shutil.copytree("./sample_data", "./run_test/sample_data")
os.mkdir("./run_test/checkpoint")
subprocess.check_output("python train.py --epoch 1 --log_per_updates 1 --data_dir run_test/sample_data/output --output_dir %(OUTPUT_DIR)s 2>&1 > %(OUTPUT_DIR)s/log.txt"
% {"OUTPUT_DIR": OUTPUT_DIR}, stderr=subprocess.STDOUT, shell=True)
compare_output(OUTPUT_DIR, EXPECTED_DIR)
|
ContextualSP/adaptershare/tests/_test_train.py/0
|
{
"file_path": "ContextualSP/adaptershare/tests/_test_train.py",
"repo_id": "ContextualSP",
"token_count": 846
}
| 257 |
# README
The official code of paper [Awakening Latent Grounding from Pretrained Language Models for Semantic Parsing](https://aclanthology.org/2021.findings-acl.100.pdf).
# Install Dependencies
Please first install [PyTorch](https://pytorch.org/), and then install all the dependencies by running:
```bash
pip install -r requirements.txt
```
Please remember to unzip the `json.zip` in the `data/wtq_grounding` folder. And the file structure should be like:
```bash
data/wtq_grounding
├── json
│ ├── 202.json
│ ├── 203.json
│ ├── ...
├── dev.json
├── test.json
└── ...
```
# Train Grounding Model
## Train Grounding Model on Spider
Please run the script `train_spider_ground.sh` to train the grounding model on Spider dataset.
## Train Grounding Model on WTQ
Please run the script `train_wtq_ground.sh` to train the grounding model on WTQ dataset.
# Evaluate Grounding Model
## Evaluate Grounding Model on Spider
Please run the script `eval_spider_ground.sh` to evaluate the grounding model on Spider dataset. Note that you should replace the model checkpoint `checkpoints/spider_grounding_model/model.pt` with yours.
You should get the following results after following the training script:
```bash
avg loss = 0.2189
table accuracy = 0.8453
column accuracy = 0.7602
value accuracy = 0.9449
overall accuracy = 0.7050
table P = 0.847, R = 0.857, F1 = 0.852
column P = 0.842, R = 0.838, F1 = 0.840
value P = 0.948, R = 0.932, F1 = 0.940
average F1 = 0.8773
```
## Evaluate Grounding Model on WTQ
Please run the script `eval_wtq_ground.sh` to evaluate the grounding model on WTQ dataset. Note that you should replace the model checkpoint `checkpoints/wtq_grounding_model/model.pt` with yours.
|
ContextualSP/awakening_latent_grounding/README.md/0
|
{
"file_path": "ContextualSP/awakening_latent_grounding/README.md",
"repo_id": "ContextualSP",
"token_count": 1332
}
| 258 |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
# Adapted from The Annotated Transformer
class MultiHeadedAttentionWithRelations(nn.Module):
def __init__(self, num_heads, hidden_size, dropout):
super(MultiHeadedAttentionWithRelations, self).__init__()
self.hidden_size = hidden_size
self.num_heads = num_heads
assert hidden_size % num_heads == 0
self.head_dim = hidden_size // num_heads
self.linears = nn.ModuleList([nn.Linear(hidden_size, hidden_size) for _ in range(4)])
self.dropout = nn.Dropout(dropout)
def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, relation_k, relation_v, mask=None):
# query shape: [batch_size, query_length, hidden_size]
# key shape: [batch_size, kv_length, hidden_size]
# value shape: [batch_size, kv_length, hidden_size]
# relation_k shape: [batch_size, query_length, kv_length, hidden_size // num_heads]
# relation_v shape: [batch_size, query_length, kv_length, hidden_size // num_heads]
batch_size = query.size(0)
# [batch_size, num_heads, query_length, head_dim]
query = self.linears[0](query).view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)
key = self.linears[1](key).view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)
value = self.linears[2](value).view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)
attn_outputs, attn_weights = self.attention_with_relations(query, key, value, relation_k, relation_v, mask=mask)
attn_outputs = attn_outputs.transpose(1, 2).contiguous().view(batch_size, -1, self.hidden_size)
attn_outputs = self.linears[-1](attn_outputs)
return attn_outputs #, attn_weights
def attention_with_relations(self, query, key, value, relation_k, relation_v, mask=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = self.relative_attention_logits(query, key, relation_k)
if mask is not None:
scores.masked_fill_(mask == 0, -1000)
p_attn_orig = F.softmax(scores, dim=-1)
# print(p_attn_orig.shape, value.shape, relation_v.shape)
#if self.dropout is not None:
p_attn = self.dropout(p_attn_orig)
return self.relative_attention_values(p_attn, value, relation_v), p_attn_orig
def relative_attention_logits(self, query, key, relation):
# We can't reuse the same logic as tensor2tensor because we don't share relation vectors across the batch.
# In this version, relation vectors are shared across heads.
# query: [batch, heads, num queries, depth].
# key: [batch, heads, num kvs, depth].
# relation: [batch, num queries, num kvs, depth].
# qk_matmul is [batch, heads, num queries, num kvs]
qk_matmul = torch.matmul(query, key.transpose(-2, -1))
# q_t is [batch, num queries, heads, depth]
q_t = query.permute(0, 2, 1, 3)
# r_t is [batch, num queries, depth, num kvs]
r_t = relation.transpose(-2, -1)
# [batch, num queries, heads, depth]
# * [batch, num queries, depth, num kvs]
# = [batch, num queries, heads, num kvs]
# For each batch and query, we have a query vector per head.
# We take its dot product with the relation vector for each kv.
q_tr_t_matmul = torch.matmul(q_t, r_t)
# qtr_t_matmul_t is [batch, heads, num queries, num kvs]
q_tr_tmatmul_t = q_tr_t_matmul.permute(0, 2, 1, 3)
# [batch, heads, num queries, num kvs]
return (qk_matmul + q_tr_tmatmul_t) / math.sqrt(query.shape[-1])
def relative_attention_values(self, weight, value, relation):
# In this version, relation vectors are shared across heads.
# weight: [batch, heads, num queries, num kvs].
# value: [batch, heads, num kvs, depth].
# relation: [batch, num queries, num kvs, depth].
# wv_matmul is [batch, heads, num queries, depth]
wv_matmul = torch.matmul(weight, value)
# w_t is [batch, num queries, heads, num kvs]
w_t = weight.permute(0, 2, 1, 3)
# [batch, num queries, heads, num kvs]
# * [batch, num queries, num kvs, depth]
# = [batch, num queries, heads, depth]
w_tr_matmul = torch.matmul(w_t, relation)
# w_tr_matmul_t is [batch, heads, num queries, depth]
w_tr_matmul_t = w_tr_matmul.permute(0, 2, 1, 3)
return wv_matmul + w_tr_matmul_t
# Adapted from The Annotated Transformer
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = nn.LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
# Adapted from The Annotated Transformer
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class RelationalEncoderLayer(nn.Module):
def __init__(self, num_heads, hidden_size, num_relations, dropout):
super(RelationalEncoderLayer, self).__init__()
assert hidden_size % num_heads == 0
self.self_attn = MultiHeadedAttentionWithRelations(num_heads, hidden_size, dropout)
self.feed_forward = PositionwiseFeedForward(hidden_size, hidden_size * 4, dropout)
self.sub_layers = nn.ModuleList([SublayerConnection(hidden_size, dropout) for _ in range(2)])
self.relation_k_embeddings = nn.Embedding(num_relations, hidden_size // num_heads)
self.relation_v_embeddings = nn.Embedding(num_relations, hidden_size // num_heads)
self.dropout = nn.Dropout(dropout)
def forward(self, inputs, relation_ids, mask=None):
# Map relation id to embedding
relation_k = self.dropout(self.relation_k_embeddings(relation_ids))
relation_v = self.dropout(self.relation_v_embeddings(relation_ids))
inputs = self.sub_layers[0](inputs, lambda x: self.self_attn(x, x, x, relation_k, relation_v, mask))
return self.sub_layers[1](inputs, self.feed_forward)
class RelationalEncoder(nn.Module):
def __init__(self, num_layers, hidden_size, num_heads, num_relations, dropout_prob):
super(RelationalEncoder, self).__init__()
self.encode_layers = nn.ModuleList([RelationalEncoderLayer(num_heads, hidden_size, num_relations, dropout_prob) for _ in range(num_layers)])
self.layer_norm = nn.LayerNorm(hidden_size)
self.dropout = nn.Dropout(dropout_prob)
def forward(self, inputs, relations, mask=None) -> torch.Tensor:
for layer in self.encode_layers:
inputs = layer(inputs, relations, mask)
return self.layer_norm(inputs)
|
ContextualSP/awakening_latent_grounding/models/nn_layers.py/0
|
{
"file_path": "ContextualSP/awakening_latent_grounding/models/nn_layers.py",
"repo_id": "ContextualSP",
"token_count": 3118
}
| 259 |
from .data_types import *
from .data_iter import *
from .evaluator import *
from .nlp_utils import *
from .sql_parser import *
from .schema_linker import *
|
ContextualSP/awakening_latent_grounding/utils/__init__.py/0
|
{
"file_path": "ContextualSP/awakening_latent_grounding/utils/__init__.py",
"repo_id": "ContextualSP",
"token_count": 50
}
| 260 |
import argparse
import os
import random
import time
import unicodedata
from functools import partial
import torch
from torch import nn
from tqdm import tqdm
from model import HRLModel, PAD_token, EOS_token
from utils import AverageMeter
from utils import VisualizeLogger
from utils import get_logger
import numpy as np
USE_CUDA = torch.cuda.is_available()
global_step = 0
class Lang:
def __init__(self, name):
self.name = name
self.trimmed = False
self.word2index = {"x1": 3, "x2": 4, "x3": 5, "x4": 6}
self.word2count = {}
self.index2word = {0: "PAD", 1: "SOS", 2: "EOS", 3: "x1", 4: "x2", 5: "x3", 6: "x4"}
self.n_words = 7 # Count default tokens
def vocab_size(self):
return len(self.word2index.keys())
def index_words(self, sentence):
for word in sentence.split(' '):
self.index_word(word)
def index_word(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
# Remove words below a certain count threshold
def trim(self, min_count):
if self.trimmed: return
self.trimmed = True
keep_words = []
for k, v in self.word2count.items():
if v >= min_count:
keep_words.append(k)
print('keep_words %s / %s = %.4f' % (
len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index)
))
# Reinitialize dictionaries
self.word2index = {}
self.word2count = {}
self.index2word = {0: "PAD", 1: "SOS", 2: "EOS"}
self.n_words = 3 # Count default tokens
for word in keep_words:
self.index_word(word)
def unicode_to_ascii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
def normalize_string(s):
s = unicode_to_ascii(s.lower().strip())
return s
def read_data(lang1, lang2, task_name):
print("Reading dataset from task {}...".format(task_name))
lines_train = open('./data/tasks_train_{}.txt'.format(task_name), encoding='utf-8'). \
read().strip().split('\n')
lines_test = open('./data/tasks_test_{}.txt'.format(task_name), encoding='utf-8'). \
read().strip().split('\n')
pairs_train = [[normalize_string(s) for s in l.lstrip('IN: ').split(' OUT: ')] for l in lines_train]
pairs_test = [[normalize_string(s) for s in l.lstrip('IN: ').split(' OUT: ')] for l in lines_test]
_input_lang = Lang(lang1)
_output_lang = Lang(lang2)
return _input_lang, _output_lang, pairs_train, pairs_test
def prepare_dataset(lang1, lang2, task_name):
global input_lang
global output_lang
input_lang, output_lang, pairs_train, pairs_test = read_data(lang1, lang2, task_name)
for pair in pairs_train:
input_lang.index_words(pair[0])
output_lang.index_words(pair[1])
if task_name == "addjump":
# remove duplicated JUMP command
pairs_train = list(set([tuple(item) for item in pairs_train]))
pairs_train = [list(item) for item in pairs_train]
return input_lang, output_lang, pairs_train, pairs_test
def get_bound_idx(pairs, length):
index = 0
for i, pair in enumerate(pairs):
if len(pair[0].split()) <= length:
index = i
else:
return index + 1
def random_batch(pair):
input_seqs = []
target_seqs = []
input_seqs.append(indexes_from_sentence(input_lang, pair[0], 'input'))
target_seqs.append(indexes_from_sentence(output_lang, pair[1], 'output'))
# Zip into pairs, sort by length (descending), unzip
seq_pairs = sorted(zip(input_seqs, target_seqs), key=lambda p: len(p[0]), reverse=True)
input_seqs, target_seqs = zip(*seq_pairs)
# For input and target sequences, get array of lengths and pad with 0s to max length
input_lengths = [len(s) for s in input_seqs]
input_padded = [pad_seq(s, max(input_lengths)) for s in input_seqs]
target_lengths = [len(s) for s in target_seqs]
target_padded = [pad_seq(s, max(target_lengths)) for s in target_seqs]
input_mask = torch.zeros((len(input_lengths), max(input_lengths)), dtype=torch.float32)
for idx, length in enumerate(input_lengths):
input_mask[idx, :length] = 1
target_mask = torch.zeros((len(target_lengths), max(target_lengths)), dtype=torch.float32)
for idx, length in enumerate(target_lengths):
target_mask[idx, :length] = 1
input_var = torch.LongTensor(input_padded)
target_var = torch.LongTensor(target_padded)
if USE_CUDA:
input_var = input_var.cuda()
target_var = target_var.cuda()
return input_var, input_lengths, input_mask, target_var, target_lengths
def indexes_from_sentence(lang, sentence, type):
if type == 'input':
return [lang.word2index[word] for word in sentence.split(' ')]
if type == 'output':
return [lang.word2index[word] for word in sentence.split(' ')] + [EOS_token]
def pad_seq(seq, max_length):
seq += [PAD_token for i in range(max_length - len(seq))]
return seq
def make_path_preparations(args, run_mode):
seed = args.random_seed
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
if run_mode == 'train':
log_dir = os.path.split(args.logs_path)[0]
if not os.path.exists(log_dir):
os.makedirs(log_dir)
_logger = get_logger(f"{args.logs_path}.log")
print(f"{args.logs_path}.log")
_logger.info(f"random seed: {seed}")
if not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
_logger.info(f"checkpoint's dir is: {args.model_dir}")
_visualizer = VisualizeLogger(summary_dir=args.model_dir)
else:
_logger = None
_visualizer = None
return _logger, _visualizer
def prepare_optimisers(args, logger, policy_parameters, environment_parameters):
if args.env_optimizer == "adam":
env_opt_class = torch.optim.Adam
elif args.env_optimizer == "amsgrad":
env_opt_class = partial(torch.optim.Adam, amsgrad=True)
elif args.env_optimizer == "adadelta":
env_opt_class = torch.optim.Adadelta
else:
env_opt_class = torch.optim.SGD
if args.pol_optimizer == "adam":
pol_opt_class = torch.optim.Adam
elif args.pol_optimizer == "amsgrad":
pol_opt_class = partial(torch.optim.Adam, amsgrad=True)
elif args.pol_optimizer == "adadelta":
pol_opt_class = torch.optim.Adadelta
else:
pol_opt_class = torch.optim.SGD
optimizer = {"policy": pol_opt_class(params=policy_parameters, lr=args.pol_lr, weight_decay=args.l2_weight),
"env": env_opt_class(params=environment_parameters, lr=args.env_lr, weight_decay=args.l2_weight)}
return optimizer
def perform_env_optimizer_step(optimizer, model, args):
if args.clip_grad_norm > 0:
nn.utils.clip_grad_norm_(parameters=model.get_environment_parameters(),
max_norm=args.clip_grad_norm,
norm_type=float("inf"))
optimizer["env"].step()
optimizer["env"].zero_grad()
def perform_policy_optimizer_step(optimizer, model, args):
if args.clip_grad_norm > 0:
nn.utils.clip_grad_norm_(parameters=model.get_policy_parameters(),
max_norm=args.clip_grad_norm,
norm_type=float("inf"))
optimizer["policy"].step()
optimizer["policy"].zero_grad()
def visualize_tree(seq, tree_actions_batch, sr_actions_batch, swr_actions_batch):
seq_list = seq.split()
assert len(seq_list) == len(swr_actions_batch)
for idx, swr_action in enumerate(swr_actions_batch):
if swr_action[0, 0] == 1:
seq_list[idx] = "[" + seq_list[idx] + "]"
for tree_action_batch, sr_action_batch in zip(tree_actions_batch, sr_actions_batch):
if tree_action_batch is None:
break
tree_action = tree_action_batch[0]
sr_action = sr_action_batch[0]
merge_idx = tree_action.tolist().index(1)
sr_idx = sr_action.tolist().index(1)
if sr_idx == 1:
seq_list = seq_list[:merge_idx] + ['(' + ' '.join(seq_list[merge_idx:merge_idx + 2]) + ')'] + seq_list[
merge_idx + 2:]
else:
seq_list = seq_list[:merge_idx] + ['[' + ' '.join(seq_list[merge_idx:merge_idx + 2]) + ']'] + seq_list[
merge_idx + 2:]
return seq_list[0]
def evaluate(test_data, model, device):
loading_time_meter = AverageMeter()
ce_loss_meter = AverageMeter()
accuracy_meter = AverageMeter()
n_entropy_meter = AverageMeter()
model.eval()
start = time.time()
debug_info = {}
with torch.no_grad():
progress_bar = tqdm(range(len(test_data)))
for idx in progress_bar:
test_data_example = test_data[idx]
tokens, tokens_length, mask, labels, labels_length = random_batch(test_data_example)
tokens = tokens.to(device=device)
mask = mask.to(device=device)
loading_time_meter.update(time.time() - start)
pred_labels, tree_sr_log_prob, tree_sr_rewards, decoder_log_probs, decode_rewards, tree_actions, sr_actions, swr_actions, normalized_entropy = \
model(test_data_example, tokens, mask, debug_info=debug_info)
normalized_entropy = normalized_entropy.mean()
accuracy = [1. if (pred_labels == test_data_example[1]) else 0.]
accuracy = torch.tensor(accuracy).mean()
ce_loss = accuracy
n = mask.shape[0]
accuracy_meter.update(accuracy.item(), n)
ce_loss_meter.update(ce_loss.item(), n)
n_entropy_meter.update(normalized_entropy.item(), n)
progress_bar.set_description("Test Acc {:.1f}%".format(accuracy_meter.avg * 100))
return accuracy_meter.avg
def validate(valid_data, model, epoch, device, logger):
loading_time_meter = AverageMeter()
batch_time_meter = AverageMeter()
ce_loss_meter = AverageMeter()
accuracy_meter = AverageMeter()
n_entropy_meter = AverageMeter()
if len(valid_data) > 1000:
# to accelerate
valid_data = [random.choice(valid_data) for _ in range(1000)]
visualizer.update_validate_size(len(valid_data))
model.eval()
start = time.time()
debug_info = {}
with torch.no_grad():
for idx, valid_data_example in enumerate(valid_data):
tokens, tokens_length, mask, labels, labels_length = random_batch(valid_data_example)
tokens = tokens.to(device=device)
mask = mask.to(device=device)
loading_time_meter.update(time.time() - start)
pred_labels, tree_sr_log_prob, tree_sr_rewards, decoder_log_probs, decode_rewards, tree_actions, sr_actions, swr_actions, normalized_entropy = \
model(valid_data_example, tokens, mask, debug_info=debug_info)
"""
logging into visualizer
"""
debug_info['tree_sr_rewards'] = tree_sr_rewards
debug_info['decode_rewards'] = decode_rewards
seq = " ".join([input_lang.index2word[token.data.item()] for token in tokens[0]])
tree = visualize_tree(seq, tree_actions, sr_actions, swr_actions)
visualizer.log_text(valid_data_example[1], tree, pred_labels, seq, debug_info)
visualizer.update_step()
normalized_entropy = normalized_entropy.mean()
accuracy = [1. if (pred_labels == valid_data_example[1]) else 0.]
accuracy = torch.tensor(accuracy).mean()
ce_loss = accuracy
n = mask.shape[0]
accuracy_meter.update(accuracy.item(), n)
ce_loss_meter.update(ce_loss.item(), n)
n_entropy_meter.update(normalized_entropy.item(), n)
batch_time_meter.update(time.time() - start)
start = time.time()
visualizer.log_performance(accuracy_meter.avg)
visualizer.update_epoch()
logger.info(f"Valid: epoch: {epoch} ce_loss: {ce_loss_meter.avg:.4f} accuracy: {accuracy_meter.avg:.4f} "
f"n_entropy: {n_entropy_meter.avg:.4f} "
f"loading_time: {loading_time_meter.avg:.4f} batch_time: {batch_time_meter.avg:.4f}")
model.train()
return accuracy_meter.avg
def train(train_data, valid_data, model, optimizer, epoch, args, logger,
total_batch_num, data_len, regular_weight):
loading_time_meter = AverageMeter()
batch_time_meter = AverageMeter()
ce_loss_meter = AverageMeter()
accuracy_meter = AverageMeter()
n_entropy_meter = AverageMeter()
prob_ratio_meter = AverageMeter()
reward_std_meter = AverageMeter()
device = args.gpu_id
model.train()
start = time.time()
# simple data augmentation for lasting longer epochs for MiniSCAN
if len(train_data) < 100:
train_data = [pair for pair in train_data for _ in range(8)]
elif len(train_data) < 500:
train_data = [pair for pair in train_data for _ in range(2)]
random.shuffle(train_data)
batch_size = args.accumulate_batch_size
if len(train_data) % batch_size == 0:
batch_num = len(train_data) // batch_size
else:
batch_num = len(train_data) // batch_size + 1
val_accuracy = 0.
for batch_idx in range(batch_num):
if (batch_idx + 1) * batch_size < len(train_data):
train_pairs = train_data[batch_idx * batch_size:(batch_idx + 1) * batch_size]
else:
train_pairs = train_data[batch_idx * batch_size:]
batch_size = len(train_pairs)
total_batch_num += batch_size
loading_time_meter.update(time.time() - start)
normalized_entropy_samples = []
ts_log_prob_samples = []
decode_log_prob_samples = []
ts_rewards_samples = []
decode_rewards_samples = []
rewards_all = []
root_rewards_all = []
accuracy_samples = []
sample_num = 10
for example_idx in range(batch_size):
for sample_idx in range(sample_num):
train_pair = train_pairs[example_idx]
tokens, tokens_length, mask, labels, labels_length = random_batch(train_pair)
tokens = tokens.to(device=device)
mask = mask.to(device=device)
pred_labels, tree_sr_log_prob, tree_sr_rewards, decoder_log_probs, decode_rewards, tree_actions, sr_actions, swr_actions, normalized_entropy = \
model(train_pair, tokens, mask, is_test=False, epoch=epoch)
accuracy = 1. if (pred_labels == train_pair[1]) else 0.
normalized_entropy_samples.append(normalized_entropy)
ts_log_prob_samples.append(tree_sr_log_prob)
ts_rewards_samples.append(tree_sr_rewards)
decode_log_prob_samples.append(decoder_log_probs)
decode_rewards_samples.append(decode_rewards)
rewards_all = rewards_all + decode_rewards
accuracy_samples.append(accuracy)
root_rewards_all.append(decode_rewards[-1])
normalized_entropy_samples = torch.cat(normalized_entropy_samples, dim=0)
accuracy_samples = torch.tensor(accuracy_samples).cuda()
rewards_all = torch.tensor(rewards_all).cuda()
baseline = rewards_all.mean()
accuracy = accuracy_samples.mean()
loss_all = []
for idy, ts_rewards in enumerate(ts_rewards_samples):
ts_actions_log_prob = torch.cat(ts_log_prob_samples[idy], dim=0)
ts_rewards = torch.tensor(ts_rewards).cuda()
if baseline:
ts_rewards = ts_rewards - baseline
ts_prob_ratio = (ts_actions_log_prob - ts_actions_log_prob.detach()).exp()
ts_loss = (ts_prob_ratio * ts_rewards).mean().unsqueeze(0)
decode_rewards = decode_rewards_samples[idy]
decode_actions_log_prob = torch.cat(decode_log_prob_samples[idy], dim=0)
decode_rewards = torch.tensor(decode_rewards).cuda()
if baseline:
decode_rewards = decode_rewards - baseline
decode_prob_ratio = (decode_actions_log_prob - decode_actions_log_prob.detach()).exp()
decode_loss = (decode_prob_ratio * decode_rewards).mean().unsqueeze(0)
loss_all.append(ts_loss + decode_loss)
loss_avg = torch.cat(loss_all, dim=0).mean()
loss = loss_avg - regular_weight * normalized_entropy_samples.mean()
loss.backward()
perform_policy_optimizer_step(optimizer, model, args)
perform_env_optimizer_step(optimizer, model, args)
normalized_entropy = normalized_entropy.mean()
n = mask.shape[0]
ce_loss = rewards_all.mean()
accuracy_meter.update(accuracy.item(), n)
ce_loss_meter.update(ce_loss.item(), n)
reward_std_meter.update(rewards_all.std().item(), n)
n_entropy_meter.update(normalized_entropy.item(), n)
prob_ratio_meter.update((1.0 - loss_avg.detach()).abs().mean().item(), n)
batch_time_meter.update(time.time() - start)
global global_step
global_step += 1
if batch_num <= 500:
val_num = batch_num
else:
val_num = 250
if (batch_idx + 1) % (val_num) == 0:
logger.info(f"Train: epoch: {epoch} batch_idx: {batch_idx + 1} ce_loss: {ce_loss_meter.avg:.4f} "
f"reward_std: {reward_std_meter.avg:.4f} "
f"n_entropy: {n_entropy_meter.avg:.4f} loading_time: {loading_time_meter.avg:.4f} "
f"batch_time: {batch_time_meter.avg:.4f}")
logger.info(f"total_batch_num: {total_batch_num} cir: {data_len}")
val_accuracy = validate(valid_data, model, epoch, device, logger)
global best_model_path
logger.info("saving model...")
best_model_path = f"{args.model_dir}/{epoch}-{batch_idx}.mdl"
torch.save({"epoch": epoch, "batch_idx": batch_idx, "state_dict": model.state_dict()}, best_model_path)
model.train()
start = time.time()
if val_accuracy >= 0.99:
break
return val_accuracy, total_batch_num
def train_model(args, task_name, logger):
global input_lang
global output_lang
input_lang, output_lang, pairs_train, _ = prepare_dataset('nl', 'action', task_name)
index = [i for i in range(len(pairs_train))]
random.shuffle(index)
train_size = int(0.8 * len(pairs_train))
dev_size = len(pairs_train) - train_size
train_idxs, dev_idxs = torch.utils.data.random_split(index, [train_size, dev_size])
train_pairs_all = [pairs_train[idx] for idx in train_idxs]
dev_pairs_all = [pairs_train[idx] for idx in dev_idxs]
for pair in dev_pairs_all:
if len(pair[0].split()) <= 4:
train_pairs_all.append(pair)
train_data, dev_data = train_pairs_all, dev_pairs_all
train_data.sort(key=lambda p: len(p[0].split()))
maximum_lesson = len(train_data[-1][0].split())
dev_data = list(set([tuple(item) for item in dev_data]))
dev_data.sort(key=lambda p: len(p[0].split()))
dev_data = [list(item) for item in dev_data]
print(random.choice(train_pairs_all))
print(random.choice(dev_pairs_all))
args.vocab_size = input_lang.n_words
args.label_size = output_lang.n_words
model = HRLModel(x_ratio_rate=args.simplicity_ratio,
encode_mode=args.encode_mode,
decay_r=args.decay_r,
vocab_size=args.vocab_size,
word_dim=args.word_dim,
hidden_dim=args.hidden_dim,
label_dim=args.label_size,
composer_leaf=args.composer_leaf,
composer_trans_hidden=args.composer_trans_hidden,
input_lang=input_lang,
output_lang=output_lang).cuda(args.gpu_id)
optimizer = prepare_optimisers(args, logger,
policy_parameters=model.get_policy_parameters(),
environment_parameters=model.get_environment_parameters())
data_len = 3
epoch_count = 0
# default is 1 lesson
cir_epoch_dict = {
3: 30,
4: 30,
5: 20,
6: 10,
7: 5
}
regular_weight = args.init_regular_weight
print('Start lesson ', data_len)
total_batch_num = 0
for epoch in range(args.max_epoch):
if data_len in cir_epoch_dict:
# training epochs
cir_epoch_num = cir_epoch_dict[data_len]
else:
cir_epoch_num = 1
train_lesson_idx = get_bound_idx(train_data, data_len)
dev_lesson_idx = get_bound_idx(dev_data, data_len)
val_accuracy, total_batch_num = train(train_data[:train_lesson_idx],
dev_data[:dev_lesson_idx], model, optimizer,
epoch, args, logger,
total_batch_num, data_len, regular_weight)
if data_len == maximum_lesson and val_accuracy >= 0.99:
print("Finish Training. Training Succeed :)")
break
epoch_count += 1
if epoch_count == cir_epoch_num or val_accuracy >= 0.99:
# validate on all dev data
if val_accuracy >= 0.99:
val_accuracy_all = validate(dev_data, model, epoch, args.gpu_id, logger)
if val_accuracy_all >= 0.99:
print("Early Stopped. Training Succeed :)")
break
if data_len < maximum_lesson:
print('Lesson ', data_len, 'completed at', epoch)
data_len += 1
regular_weight = max(args.regular_decay_rate * regular_weight, args.regular_weight)
epoch_count = 0
print('Start lesson:', data_len)
def evaluate_model(args, task_name, logger):
global input_lang
global output_lang
input_lang, output_lang, _, pairs_test = prepare_dataset('nl', 'action', task_name)
test_data = pairs_test
test_data.sort(key=lambda p: len(p[0].split()))
args.vocab_size = input_lang.n_words
args.label_size = output_lang.n_words
model = HRLModel(x_ratio_rate=args.simplicity_ratio,
encode_mode=args.encode_mode,
decay_r=args.decay_r,
vocab_size=args.vocab_size,
word_dim=args.word_dim,
hidden_dim=args.hidden_dim,
label_dim=args.label_size,
composer_leaf=args.composer_leaf,
composer_trans_hidden=args.composer_trans_hidden,
input_lang=input_lang,
output_lang=output_lang).cuda(args.gpu_id)
checkpoint_file = args.checkpoint
print("loading", checkpoint_file)
checkpoint = torch.load(checkpoint_file)
model.load_state_dict(checkpoint["state_dict"])
print("loading finished...")
print("Start testing ..")
test_acc = evaluate(test_data, model, args.gpu_id)
print("Test Acc: {} %".format(test_acc * 100))
def prepare_arguments(checkpoint_folder, parser):
composer_lr = 1.0
solver_lr = 0.1
accumulate_batch_size = 4
regular_weight = 1e-4
regular_decay_rate = 0.5
hidden_size = 128
encode_mode = 'seq'
args = {"word-dim": hidden_size,
"hidden-dim": hidden_size,
"composer_leaf": "no_transformation",
"composer-trans-hidden": hidden_size,
"regular-weight": regular_weight, # 0.0001
"clip-grad-norm": 0.5,
"env-optimizer": "adadelta", # adadelta
"pol-optimizer": "adadelta", # adadelta
"env-lr": composer_lr, # 1.
"pol-lr": solver_lr, # 0.1
"l2-weight": 0.0001,
# TODO: currently the batch size must be set as 1
# since our implementation requires it as to be.
# if you want to accumulate gradients, please use accumulate_batch_size
"batch-size": 1,
"accumulate-batch-size": accumulate_batch_size,
"max-epoch": 300,
"gpu-id": 0,
"model-dir": "checkpoint/models/" + checkpoint_folder,
"logs-path": "checkpoint/logs/" + checkpoint_folder,
"encode-mode": encode_mode,
"regular-decay-rate": regular_decay_rate}
parser.add_argument("--word-dim", required=False, default=args["word-dim"], type=int)
parser.add_argument("--hidden-dim", required=False, default=args["hidden-dim"], type=int)
parser.add_argument("--composer_leaf", required=False, default=args["composer_leaf"],
choices=["no_transformation", "lstm_transformation",
"bi_lstm_transformation", "conv_transformation"])
parser.add_argument("--composer-trans-hidden", required=False, default=args["composer-trans-hidden"], type=int)
parser.add_argument("--clip-grad-norm", default=args["clip-grad-norm"], type=float,
help="If the value is less or equal to zero clipping is not performed.")
parser.add_argument("--env-optimizer", required=False, default=args["env-optimizer"],
choices=["adam", "amsgrad", "sgd", "adadelta"])
parser.add_argument("--pol-optimizer", required=False, default=args["pol-optimizer"],
choices=["adam", "amsgrad", "sgd", "adadelta"])
parser.add_argument("--env-lr", required=False, default=args["env-lr"], type=float)
parser.add_argument("--pol-lr", required=False, default=args["pol-lr"], type=float)
parser.add_argument("--l2-weight", required=False, default=args["l2-weight"], type=float)
parser.add_argument("--batch-size", required=False, default=args["batch-size"], type=int)
parser.add_argument("--accumulate-batch-size", required=False, default=args["accumulate-batch-size"], type=int)
parser.add_argument("--max-epoch", required=False, default=args["max-epoch"], type=int)
parser.add_argument("--gpu-id", required=False, default=args["gpu-id"], type=int)
parser.add_argument("--model-dir", required=False, default=args["model-dir"], type=str)
parser.add_argument("--logs-path", required=False, default=args["logs-path"], type=str)
parser.add_argument("--encode-mode", required=False, default=args["encode-mode"], type=str)
parser.add_argument("--regular-weight", default=args["regular-weight"], type=float)
parser.add_argument("--regular-decay-rate", required=False, default=args["regular-decay-rate"], type=float)
parser.add_argument("--init-regular-weight", required=False, default=1e-1, type=float)
# default no reward decay
parser.add_argument("--decay-r", required=False, default=1.0, type=str)
return parser.parse_args()
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("--mode", required=True, default='train',
choices=['train', 'test'], type=str,
help="Determine whether to train a model or test using a trained weight file")
arg_parser.add_argument("--checkpoint", required=True, type=str,
help="When training, it is the folder to store model weights; "
"Otherwise it is the weight path to be loaded.")
arg_parser.add_argument("--task", required=True, type=str,
choices=["addjump", "around_right", "simple", "length",
"extend", "mcd1", "mcd2", "mcd3"],
help="All tasks on SCAN, the task name is used to load train or test file")
arg_parser.add_argument("--random-seed", required=False, default=1, type=int)
arg_parser.add_argument("--simplicity-ratio", required=False, default=0.0, type=float)
parsed_args = arg_parser.parse_args()
if parsed_args.mode == 'train':
args = prepare_arguments(parsed_args.checkpoint, arg_parser)
logger, visualizer = make_path_preparations(args, parsed_args.mode)
train_model(args, parsed_args.task, logger)
else:
args = prepare_arguments(parsed_args.checkpoint, arg_parser)
logger, visualizer = make_path_preparations(args, parsed_args.mode)
evaluate_model(args, parsed_args.task, logger)
|
ContextualSP/compositional_generalization/main.py/0
|
{
"file_path": "ContextualSP/compositional_generalization/main.py",
"repo_id": "ContextualSP",
"token_count": 13402
}
| 261 |
# Incomplete Utterance Rewriting <img src="https://pytorch.org/assets/images/logo-dark.svg" height = "25" align=center />
[中文版](README_zh.md)
The official pytorch implementation of our paper [Incomplete Utterance Rewriting as Semantic Segmentation](https://arxiv.org/pdf/2009.13166.pdf).
If you find our code useful for you, please consider citing our paper:
```bib
@inproceedings{qian2020incomplete,
title={Incomplete Utterance Rewriting as Semantic Segmentation},
author={Liu, Qian and Chen, Bei and Lou, Jian-Guang and Zhou, Bin and Zhang, Dongmei},
booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing},
year={2020}
}
```
## Content
- [Install Dependencies](#requirement)
- [Download and Preprocess Dataset](#data)
- [Train Model](#train)
- [Evaluate Model](#evaluate)
- [Predict Model](#predict)
- [Pre-trained Models](#pre-trained-models)
## Requirement
### Python Environment
First of all, you should setup a python environment. This code base has been tested under python 3.x, and we officially support python 3.7.
After installing python 3.7, we strongly recommend you to use `virtualenv` (a tool to create isolated Python environments) to manage the python environment. You could use following commands to create a environment.
```bash
python -m pip install virtualenv
virtualenv venv
```
### Activate Virtual Environment
Then you should activate the environment to install the dependencies. You could achieve it via using the command as below. (Please change $ENV_FOLDER to your own virtualenv folder path, e.g. venv)
```bash
$ENV_FOLDER\Scripts\activate.bat (Windows)
source $ENV_FOLDER/bin/activate (Linux)
```
### Install Libraries
The most important requirements of our code base are as following:
- pytorch >= 1.2.0 (not tested on other versions, but 1.0.0 may work though)
- allennlp == 0.9.0
Other dependencies can be installed by
```console
pip install -r requirement.txt
```
## Data
### Prepare Dataset
Although we cannot provide dataset resources (copyright issue) in our repo, we provide `download.sh` for automatically downloading and preprocessing datasets used in our paper.
> Here the preprocessing does not include exporting the distant supervision, a.k.a. the word-level edit matrix, used in our paper. Anyone interested in the distant supervision can focus on the dataset reader file `src/data_reader.py (line 178-200)`.
### Prepare Glove
If you want to train models on English datasets (i.e. `Task` and `CANARD`), please download [Glove 6B](http://nlp.stanford.edu/data/glove.6B.zip). Unzip and move the `glove.6B.100d.txt` file into the folder `glove`.
## Train
You could train models on different datasets using `*.sh` files under the `src` folder.
For example, you could train `RUN + BERT` on `Multi` by running the following command under the `src` folder as:
```console
./train_multi_bert.sh
```
### Configs Table
| Config | Model in Paper |
| :--- | :---: |
| canard.jsonnet | RUN on CANARD (Elgohary et al. 2019) |
| multi.jsonnet | RUN on Multi (Pan et al. 2019) |
| multi_bert.jsonnet | RUN + BERT on Multi (Pan et al. 2019) |
| rewrite.jsonnet | RUN on Rewrite (Su et al. 2019) |
| rewrite_bert.jsonnet | RUN + BERT on Rewrite (Su et al. 2019) |
| task.jsonnet | RUN on Task (Quan et al. 2019) |
### Tips for training
1. If you does not rely on `BLEU` metrics to pick up your best weight file on the dev set, you could disable it to achieve a faster evaluation speed.
2. By default we do not calculate any metric on the train set to save training time, but you could enable it by setting `enable_training_log` as `True` in `*.jsonnet` (Refer readers to see `task.jsonnet`).
3. All configs are tested successfully under `Tesla M40 (24GB)`, and if there is any error such as `CUDA Out Of Memory`, you could solve it by reducing the hyper-parameter `batch_size` in `*.jsonnet`. In our experience, it will not hurt the performance by a large margin.
## Evaluate
Once a model is well trained, `allennlp` will save a compressed model zip file which is usually named after `model.tar.gz` under the checkpoint folder. Our evaluation is based on it.
We provide a evaluate file under `src` folder, and you could evaluate a model file by running the following command:
```concolse
python evaluate.py --model_file model.tar.gz --test_file ../dataset/Multi/test.txt
```
The above script will generate a file `model.tar.gz.json` which records the detailed performance. For example, the performance of `RUN + BERT` on `Rewrite` is:
```json
{
"ROUGE": 0.9394040084189113,
"_ROUGE1": 0.961865057419486,
"_ROUGE2": 0.9113051224617216,
"EM": 0.688,
"_P1": 0.9451903332806824,
"_R1": 0.8668694770389685,
"F1": 0.9043373129817137,
"_P2": 0.8648273949812838,
"_R2": 0.7989241803278688,
"F2": 0.8305705345849144,
"_P3": 0.8075098814229249,
"_R3": 0.7449860216360763,
"F3": 0.774988935954985,
"_BLEU1": 0.9405510823944796,
"_BLEU2": 0.9172718486250105,
"_BLEU3": 0.8932687251641028,
"BLEU4": 0.8691863201601382,
"loss": 0.2084200546145439
}
```
Next, we will provide all pre-trained models to reproduce results reported in our paper. We recommend you to download them and put them into the folder `pretrained_weights` and run commands like below:
```concolse
python evaluate.py --model_file ../pretrianed_weights/rewrite.tar.gz --test_file ../dataset/Multi/test.txt
```
## Predict
We provide an easy function call to predict a rewriting sentence given a specific dialogue context in `src/predict.py`. You could follow it to customize your self.
## Pre-trained Models
| Dataset | BERT | Config | EM | Rewriting F1 | BLEU4 | Pretrained_Weights |
| :---: | :---: |:--- | :---: | :---: | :---: | :---: |
| Rewrite | No | rewrite.jsonnet | 53.6 | 81.3 | 79.6 | [rewrite.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/rewrite/rewrite.tar.gz)|
| Rewrite | Yes | rewrite_bert.jsonnet | 68.8 | 90.4 | 86.9 | [rewrite_bert.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/rewrite.bert/rewrite_bert.tar.gz)|
| CANARD | No | canard.jsonnet | 18.3 | 44.2 | 49.8 | [canard.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/canard/canard.tar.gz) |
| Multi | No | multi.jsonnet | 43.3 | 60.7 | 81.1 | [multi.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/multi/multi.tar.gz) |
| Multi | Yes | multi_bert.jsonnet | 49.3 | 69.5 | 83.7 | [multi_bert.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/multi.bert/multi_bert.tar.gz) |
|
ContextualSP/incomplete_utterance_rewriting/README.md/0
|
{
"file_path": "ContextualSP/incomplete_utterance_rewriting/README.md",
"repo_id": "ContextualSP",
"token_count": 2173
}
| 262 |
{
"ROUGE": 0.8954699040374693,
"_ROUGE1": 0.9248370079585566,
"_ROUGE2": 0.8548729804396925,
"EM": 0.4933385579937304,
"_P1": 0.7443478260869565,
"_R1": 0.6512335615693946,
"F1": 0.694684366123703,
"_P2": 0.6040515653775322,
"_R2": 0.5369713506139154,
"F2": 0.5685396504405605,
"_P3": 0.515867089789061,
"_R3": 0.4619306310071041,
"F3": 0.48741126151946734,
"_BLEU1": 0.9203424772022362,
"_BLEU2": 0.8919446800461631,
"_BLEU3": 0.8644065076063657,
"BLEU4": 0.836555297206264,
"loss": 0.012869063752786444
}
|
ContextualSP/incomplete_utterance_rewriting/log/multi_bert.tar.gz.json/0
|
{
"file_path": "ContextualSP/incomplete_utterance_rewriting/log/multi_bert.tar.gz.json",
"repo_id": "ContextualSP",
"token_count": 335
}
| 263 |
#!/usr/bin/env bash
export model_file=../checkpoints/run_multi
export config_file=../configs/multi.jsonnet
export train_data_path=../dataset/Multi/train.txt
export validation_data_path=../dataset/Multi/valid.txt
export seed=1
allennlp train -s ${model_file} ${config_file} \
--include-package data_reader \
--include-package model \
-o "{\"random_seed\":\"${seed}\",\"numpy_seed\":\"${seed}\",\"pytorch_seed\":\"${seed}\", \"train_data_path\":\"${train_data_path}\",\"validation_data_path\":\"${validation_data_path}\"}"
|
ContextualSP/incomplete_utterance_rewriting/src/train_multi.sh/0
|
{
"file_path": "ContextualSP/incomplete_utterance_rewriting/src/train_multi.sh",
"repo_id": "ContextualSP",
"token_count": 182
}
| 264 |
# coding: utf-8
from enum import Enum
import json
from allennlp.data.tokenizers import WordTokenizer
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
from spacy.symbols import ORTH, LEMMA
from src.context.converter import SQLConverter
from src.context.db_context import SparcDBContext
from src.utils.spider_evaluate.convert_sql_to_detail import convert_sql_to_detail
from src.utils.wikisql_lib.query import Query as WikiSQLQuery
from src.utils.semql_tree_util import parse_sql_tree
class SpecialSymbol(Enum):
history_start = '@@BEG@@'
history_end = '@@END@@'
class SemQLConverter(object):
def __init__(self):
spacy_tokenizer = SpacyWordSplitter(pos_tags=True)
spacy_tokenizer.spacy.tokenizer.add_special_case(u'id', [{ORTH: u'id', LEMMA: u'id'}])
for token in SpecialSymbol.__members__.values():
token_uni = u'{}'.format(token.value)
spacy_tokenizer.spacy.tokenizer.add_special_case(token_uni, [{ORTH: token_uni, LEMMA: token_uni}])
self._tokenizer = WordTokenizer(spacy_tokenizer)
self.log = {}
def convert_example(self, example):
raise NotImplementedError
class SpiderSemQLConverter(SemQLConverter):
def __init__(self):
super().__init__()
def convert_example(self, example):
db_id = example['db_id']
utterance = example['question']
sql_converter = self._get_converter(db_id, utterance)
semql_statements = sql_converter.translate_to_intermediate(example['sql'])
return semql_statements
def convert_sql_to_semql(self, db_info, utterance, sql):
db_id = db_info['db_id']
sql_converter = self._get_converter(db_id, utterance)
db_info['column_index'] = {name.lower(): idx for idx, (_, name) in enumerate(db_info['column_names_original'])}
db_info['table_index'] = {name.lower(): idx for idx, name in enumerate(db_info['table_names_original'])}
sql_detail = convert_sql_to_detail(sql, db_id, db_info, db_dir='data/spider/database')
sql_detail['select'] = list(sql_detail['select'])
semql_statements = sql_converter.translate_to_intermediate(sql_detail)
return semql_statements
def _get_converter(self, db_id, utterance, table_file='data/spider/tables.json', database_path='data/spider/database'):
db_context = SparcDBContext(db_id=db_id, utterance=utterance, tokenizer=self._tokenizer,
tables_file=table_file, database_path=database_path)
sql_converter = SQLConverter(db_context=db_context)
return sql_converter
class WikiSQLConverter(SemQLConverter):
def __init__(self, table_file):
super().__init__()
table_infos = [json.loads(line) for line in open(table_file, 'r', encoding='utf-8')]
self.table_infos = {table_info['id']: table_info for table_info in table_infos}
def convert_example(self, example):
table_id = example['table_id']
table_info = self.table_infos[table_id]
table_col_names = table_info['header']
table_name = 'table'
utterance = example['question']
sql_detail = example['sql']
cond_statements_raw = []
conds = sql_detail['conds']
for (col_idx, operator_idx, condition_value) in conds:
col_name = '_'.join(table_col_names[col_idx].split()).lower()
operator = WikiSQLQuery.cond_ops[operator_idx]
cond_statements_raw.append([f'Filter -> {operator} A', f'A -> none C T', f'C -> {col_name}', f'T -> {table_name}'])
self.log['n_conditions'] = self.log.get('n_conditions', {})
self.log['n_conditions'][len(cond_statements_raw)] = self.log['n_conditions'].get(len(cond_statements_raw), 0) + 1
cond_statements = []
if len(cond_statements_raw) == 1:
cond_statements = cond_statements_raw[0]
elif len(cond_statements_raw) >= 2: # add subfilter statement
for i in range(len(cond_statements_raw) - 1):
cond_statements.append(f'Filter -> Filter and Filter')
cond_statements += cond_statements_raw[i]
cond_statements += cond_statements_raw[-1]
assert isinstance(sql_detail['sel'], int), f'Selected column index is not int, which actually is {sql_detail["sel"]}'
sel_col_name = '_'.join(table_col_names[sql_detail['sel']].split()).lower()
agg_op = WikiSQLQuery.agg_ops[sql_detail['agg']].lower() # ['', 'MAX', 'MIN', 'COUNT', 'SUM', 'AVG']
if not agg_op:
agg_op = 'none'
sel_statements = ['Select -> A', f'A -> {agg_op} C T', f'C -> {sel_col_name}', f'T -> {table_name}']
semql_statements = ['Statement -> Root', 'Root -> Select Filter' if cond_statements else 'Root -> Select'] \
+ sel_statements + cond_statements
return semql_statements
if __name__ == '__main__':
# wikisql_converter = WikiSQLConverter('data/wikisql/data/test.tables.jsonl')
# examples = [json.loads(line) for line in open('data/wikisql/data/test.jsonl', 'r', encoding='utf-8')]
# semql_statements = wikisql_converter.convert_example(examples[0])
# print(semql_statements)
# sql_tree, depth = parse_sql_tree(semql_statements)
# print(sql_tree.restatement(with_table=False))
spider_converter = SpiderSemQLConverter()
examples = json.load(open('data/spider/dev.json', 'r'))
semql_states = spider_converter.convert_example(examples[0])
print(semql_states)
|
ContextualSP/interactive_text_to_sql/src/utils/semql_converter.py/0
|
{
"file_path": "ContextualSP/interactive_text_to_sql/src/utils/semql_converter.py",
"repo_id": "ContextualSP",
"token_count": 2357
}
| 265 |
import inspect
import os
import signal
import sys
import time
from collections import Mapping
from contextlib import contextmanager
import faulthandler
import line_profiler
from tqdm import tqdm, tqdm_notebook
from gtd.log import in_ipython
class Profiling(object):
@staticmethod
def start():
"""Enable the default profiler and reset its logging."""
Profiler.default().enable().reset()
@staticmethod
def report(*args, **kwargs):
Profiler.default().report(*args, **kwargs)
class Profiler(object):
"""Just a wrapper around line_profiler.
Supports some extra functionality like resetting.
"""
@classmethod
def default(cls):
if not hasattr(cls, '_default'):
profiler = Profiler()
profiler.enable_by_count()
profiler.disable()
cls._default = profiler
return cls._default
def __init__(self):
self._line_prof = line_profiler.LineProfiler()
def report(self, *args, **kwargs):
self.stats.report(*args, **kwargs)
def enable(self):
self._line_prof.enable()
self._enable = True
return self
def disable(self):
self._line_prof.disable()
self._enable = False
return self
def enable_by_count(self):
self._line_prof.enable_by_count()
self._enable_by_count = True
return self
def disable_by_count(self):
self._line_prof.disable_by_count()
self._enable_by_count = False
return self
def add_function(self, fxn):
self._line_prof.add_function(fxn)
return self
def add_module(self, mod):
"""Profile all functions and class methods inside this module.
NOTE: This includes functions that are imported into the module.
"""
from inspect import isclass, isfunction
for item in list(mod.__dict__.values()):
if isclass(item):
for k, v in list(item.__dict__.items()):
if isinstance(v, staticmethod) or isinstance(v, classmethod):
underlying_fxn = v.__get__(item)
self.add_function(underlying_fxn)
if isfunction(v):
self.add_function(v)
elif isfunction(item):
self.add_function(item)
return self
def add_this_module(self):
try:
frame = inspect.currentframe()
mod_name = frame.f_back.f_globals['__name__']
finally:
del frame # failing to delete the frame can cause garbage collection problems, due to reference counting
mod = sys.modules[mod_name]
return self.add_module(mod)
@property
def stats(self):
return ProfilerStats(self._line_prof.get_stats(), self.functions)
def reset(self):
functions = self.functions
line_prof = line_profiler.LineProfiler()
# copy settings
if self._enable:
line_prof.enable()
else:
line_prof.disable()
if self._enable_by_count:
line_prof.enable_by_count()
else:
line_prof.disable_by_count()
# add previously registered functions
for fxn in functions:
line_prof.add_function(fxn)
self._line_prof = line_prof
return self
@property
def functions(self):
return self._line_prof.functions
def function_label(fxn):
"""Return a (filename, first_lineno, func_name) tuple for a given code object.
This is the same labelling as used by the cProfile module in Python 2.5.
"""
code = fxn.__code__
if isinstance(code, str):
return ('~', 0, code) # built-in functions ('~' sorts at the end)
else:
return (code.co_filename, code.co_firstlineno, code.co_name)
class ProfilerStats(Mapping):
"""Wrapper around line_profiler.LineStats"""
def __init__(self, line_stats, functions):
"""Create a ProfilerStats object.
Args:
line_stats (LineStats): a LineStats object returned by LineProfiler
"""
self._line_stats = line_stats
self._functions = functions
def __getitem__(self, fxn):
"""Get stats for a particular fxn.
Args:
fxn: a Python function
Returns:
FunctionStats
"""
label = function_label(fxn)
return FunctionStats(fxn, self._line_stats.timings[label], self._line_stats.unit)
def __iter__(self):
return iter(self._functions)
def __len__(self):
return len(self._functions)
def report(self, fxns=None):
if fxns is None:
fxns = list(self.keys())
fxn_stats = [self[f] for f in fxns]
fxn_stats = sorted(fxn_stats, key=lambda stats: stats.total_time, reverse=True)
for stats in fxn_stats:
if stats.empty: continue
print(stats)
class FunctionStats(object):
def __init__(self, function, timing, unit):
"""Create a FunctionStats object.
Args:
function: a Python function
timing (list[(int, int, int)]): a list of (lineno, nhits, total_time) tuples, one per line
unit: unit of time (e.g. seconds)
"""
self._function = function
self._timing = timing
self._unit = unit
@property
def function(self):
return self._function
@property
def _line_stats_in_seconds(self):
"""Line stats in seconds.
Returns:
list[(int, int, float)]: a list of (line_number, number_of_hits, total_time_in_seconds) tuples, one per line
"""
return [(lineno, nhits, total_time * self._unit) for (lineno, nhits, total_time) in self._timing]
def __repr__(self):
label = function_label(self.function)
timings = {label: self._line_stats_in_seconds} # format needed for show_text
unit = 1.
class Stream(object):
def __init__(self):
self.items = []
def write(self, s):
self.items.append(s)
def get_value(self):
return ''.join(self.items)
output = Stream()
line_profiler.show_text(timings, unit, output)
s = output.get_value()
return s
@property
def empty(self):
return len(self._timing) == 0
@property
def total_time(self):
"""Total time spent by this function, in seconds."""
return sum([t for _, _, t in self._line_stats_in_seconds], 0)
def profile(f):
"""A decorator for functions you want to profile"""
Profiler.default().add_function(f)
return f
@contextmanager
def timer(name='unnamed'):
print('Start: {}'.format(name))
sys.stdout.flush()
start = time.time()
yield
stop = time.time()
print('Finish: {} ({} s)'.format(name, stop - start))
sys.stdout.flush()
def verboserate(iterable, *args, **kwargs):
"""Iterate verbosely.
Args:
desc (str): prefix for the progress bar
total (int): total length of the iterable
See more options for tqdm.tqdm.
"""
progress = tqdm_notebook if in_ipython() else tqdm
for val in progress(iterable, *args, **kwargs):
yield val
class Pulse(object):
def __init__(self, wait):
self.wait = wait
self.prev = time.time()
def __call__(self):
"""Check if it's time to pulse.
If enough time has passed since previous pulse, return True and reset timer.
Otherwise, return False (don't reset timer).
"""
now = time.time()
long_enough = now - self.prev > self.wait
if long_enough:
self.prev = now
return long_enough
def reset(self):
"""Force reset the timer."""
self.prev = time.time()
class TimeoutException(Exception):
pass
@contextmanager
def time_limit(seconds):
def signal_handler(signum, frame):
raise TimeoutException('Timed out!')
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
def monitor_call_stack():
if in_ipython():
# see this issue for why: https://github.com/ipython/ipykernel/issues/91
f = sys.__stderr__
else:
f = sys.stderr
faulthandler.register(signal.SIGUSR1, file=f)
print('To monitor call stack, type this at command line: kill -USR1 {}'.format(os.getpid()))
print('Call stack will be printed to stderr' \
'(in IPython Notebook, this will show in the terminal where you launched the notebook.)')
|
ContextualSP/lemon/executor/gtd/chrono.py/0
|
{
"file_path": "ContextualSP/lemon/executor/gtd/chrono.py",
"repo_id": "ContextualSP",
"token_count": 3776
}
| 266 |
"""
Helper functions for plotting
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from gtd.io import makedirs
from gtd.log import in_ipython
def hinton(matrix, max_weight=None, ax=None, xtick=None, ytick=None, inverted_color=False):
"""Draw Hinton diagram for visualizing a weight matrix.
Copied from: http://matplotlib.org/examples/specialty_plots/hinton_demo.html
"""
ax = ax if ax is not None else plt.gca()
if not max_weight:
max_weight = 2**np.ceil(np.log(np.abs(matrix).max())/np.log(2))
ax.patch.set_facecolor('gray')
ax.set_aspect('equal', 'box')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
for (x, y), w in np.ndenumerate(matrix):
if inverted_color:
color = 'black' if w > 0 else 'white'
else:
color = 'white' if w > 0 else 'black'
size = np.sqrt(np.abs(w))
rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,
facecolor=color, edgecolor=color)
ax.add_patch(rect)
ax.autoscale_view()
ax.invert_yaxis()
if xtick:
ax.set_xticks(np.arange(matrix.shape[0]))
ax.set_xticklabels(xtick)
if ytick:
ax.set_yticks(np.arange(matrix.shape[1]))
ax.set_yticklabels(ytick)
return ax
def show(title, directory=''):
"""If in IPython, show, otherwise, save to file."""
import matplotlib.pyplot as plt
if in_ipython():
plt.show()
else:
# ensure directory exists
makedirs(directory)
plt.savefig(os.path.join(directory, title) + '.png')
# close all figures to conserve memory
plt.close('all')
def plot_pdf(x, cov_factor=None, *args, **kwargs):
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
density = gaussian_kde(x)
xgrid = np.linspace(min(x), max(x), 200)
if cov_factor is not None:
density.covariance_factor = lambda: cov_factor
density._compute_covariance()
y = density(xgrid)
plt.plot(xgrid, y, *args, **kwargs)
def rgb_to_hex(rgb):
return '#%02x%02x%02x' % rgb
|
ContextualSP/lemon/executor/gtd/plot.py/0
|
{
"file_path": "ContextualSP/lemon/executor/gtd/plot.py",
"repo_id": "ContextualSP",
"token_count": 990
}
| 267 |
import re
import logging
import numpy as np
from gtd.utils import memoize
@memoize
def get_spacy():
"""
Loads the spaCy english processor.
Tokenizing, Parsing, and NER are enabled. All other features are disabled.
Returns:
A spaCy Language object for English
"""
logging.info('Loading spaCy...')
import spacy.en
nlp = spacy.en.English(tagger=False, parser=True, matcher=False)
return nlp
class NER(object):
def __init__(self):
self.processor = get_spacy()
def __call__(self, text):
"""Given a unicode string, return a tuple of the named entities found inside."""
if not isinstance(text, str):
text = str(text)
doc = self.processor(text)
return doc.ents
class Trie(object):
def __init__(self, token, parent, sink=False):
self.token = token
self.parent = parent
self.sink = sink
self.children = {}
def __contains__(self, phrase):
if phrase[0] == self.token:
if len(phrase) == 1:
# On our last word. Must be a sink to match.
return self.sink
else:
# doesn't match
return False
suffix = phrase[1:]
for child in list(self.children.values()):
if suffix in child:
return True
def ancestors(self):
if self.parent is None:
return []
anc = self.parent.ancestors()
anc.append(self.token)
return anc
class PhraseMatcher(object):
def __init__(self, phrases):
"""Construct a phrase matcher.
Args:
phrases (List[Tuple[str]]): a list of phrases to match, where each phrase is a tuple of strings
"""
# construct Trie
root = Trie('ROOT', None)
for phrase in phrases:
current = root
for token in phrase:
if token not in current.children:
current.children[token] = Trie(token, current)
current = current.children[token]
current.sink = True # mark last node as a sink
self.root = root
self.phrases = phrases
def has_phrase(self, phrase):
"""Check if a particular phrase is matched by the matcher.
Args:
phrase (tuple[str])
"""
return ['ROOT'] + phrase in self.root
def match(self, tokens):
"""A list of matches.
Args:
tokens (list[str]): a list of tokens
Returns:
list[tuple[str, int, int]]: A list of (match, start, end) triples. Each `match` is a tuple of tokens.
`start` and `end` are word offsets.
"""
root = self.root
candidates = [root]
matches = []
for i, token in enumerate(tokens):
# extend candidates or prune failed candidates
new_candidates = []
for cand in candidates:
if token in cand.children:
new_candidates.append(cand.children[token]) # move to child
candidates = new_candidates
candidates.append(root) # always add root
for cand in candidates:
if cand.sink:
match = tuple(cand.ancestors())
end = i + 1
start = end - len(match)
matches.append((match, start, end))
return matches
# first_cap_re = re.compile('(.)([A-Z][a-z]+)')
first_cap_re = re.compile('([^-_])([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def camel_to_snake_case(name):
"""Convert camelCase to snake_case (Python)."""
s1 = first_cap_re.sub(r'\1_\2', name)
return all_cap_re.sub(r'\1_\2', s1).lower()
def longest_common_subsequence(X, Y):
# https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Longest_common_subsequence#Computing_the_length_of_the_LCS
def LCS(X, Y):
m = len(X)
n = len(Y)
# An (m+1) times (n+1) matrix
C = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if X[i - 1] == Y[j - 1]:
C[i][j] = C[i - 1][j - 1] + 1
else:
C[i][j] = max(C[i][j - 1], C[i - 1][j])
return C
def backTrack(C, X, Y, i, j):
if i == 0 or j == 0:
return []
elif X[i - 1] == Y[j - 1]:
return backTrack(C, X, Y, i - 1, j - 1) + [X[i - 1]]
else:
if C[i][j - 1] > C[i - 1][j]:
return backTrack(C, X, Y, i, j - 1)
else:
return backTrack(C, X, Y, i - 1, j)
m = len(X)
n = len(Y)
C = LCS(X, Y)
return backTrack(C, X, Y, m, n)
def get_ngrams(s, n):
"""Get n-grams for s.
>>> s = [1, 2, 3, 4]
>>> get_ngrams(s, 2)
[(1, 2), (2, 3), (3, 4)]
>>> get_ngrams(s, 1)
[(1,), (2,), (3,), (4,)]
>>> get_ngrams(s, 4)
[(1, 2, 3, 4)]
"""
assert n <= len(s)
assert n >= 1
return [tuple(s[k:k + n]) for k in range(len(s) + 1 - n)]
def ngram_precision_recall(reference, candidate, n=None):
if n is None:
# Take the average over 1 through 4 grams.
prs = []
for m in [1, 2, 3, 4]:
prs.append(ngram_precision_recall(reference, candidate, m))
ps, rs = list(zip(*prs))
return np.mean(ps), np.mean(rs)
ref_set = set(get_ngrams(reference, n))
can_set = set(get_ngrams(candidate, n))
correct = float(len(ref_set & can_set))
rec = correct / len(ref_set)
prec = correct / len(can_set)
return prec, rec
|
ContextualSP/lemon/executor/gtd/text.py/0
|
{
"file_path": "ContextualSP/lemon/executor/gtd/text.py",
"repo_id": "ContextualSP",
"token_count": 2801
}
| 268 |
from abc import ABCMeta
class PathChecker(object, metaclass=ABCMeta):
"""Check whether a ParsePath should be included in the beam.
This is used to control the search space especially when the parameters
are not well initialized.
"""
def __init__(self, config):
"""Initialize the PathChecker.
Args:
config (Config): The decoder.prune section of the configuration.
"""
self.config = config
def __call__(self, path):
"""Check whether the path should be added to the beam.
Args:
path (ParsePath)
Returns:
True if the path should be included; False if it should be pruned.
"""
raise NotImplementedError
|
ContextualSP/lemon/executor/strongsup/path_checker.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/path_checker.py",
"repo_id": "ContextualSP",
"token_count": 283
}
| 269 |
from strongsup.predicate import Predicate
class RLongPredicate(Predicate):
"""Predicates for the RLong domain.
Conventions:
- colors are single characters (y, g, ...)
- numbers are integers, positive or negative (1, -2, ...)
- fractions start with X (X1/2, X2/3, ...)
- properties start with P (PColor, PHatColor, ...)
- actions start with A (ADrain, AMove, ...)
- built-in predicates include:
all-objects, index, argmin, argmax
- history slots start with H (H0, H1, ...)
"""
CACHE = {}
def __new__(cls, name, original_string=None):
if name not in cls.CACHE:
types = cls._compute_types(name)
# pred = super(RLongPredicate, cls).__new__(
# cls, name, original_string=original_string, types=types)
pred = super(RLongPredicate, cls).__new__(cls)
cls.CACHE[name] = pred
return cls.CACHE[name]
@classmethod
def _compute_types(cls, name):
assert isinstance(name, str)
types = []
if len(name) == 1 and name[0].isalpha():
types.append(RLongPredicateType.COLOR)
elif name[0] == '-' or name[0].isdigit():
types.append(RLongPredicateType.NUMBER)
elif name[0] == 'X':
types.append(RLongPredicateType.FRACTION)
elif name[0] == 'P':
types.append(RLongPredicateType.PROPERTY)
elif name[0] == 'D':
types.append(RLongPredicateType.DOUBLE_PROPERTY)
elif name[0] == 'A':
types.append(RLongPredicateType.ACTION)
elif name in BUILTIN_NAMES:
types.append(RLongPredicateType.BUILTIN)
elif name[0] == 'H':
types.append(RLongPredicateType.HISTORY_SLOT)
else:
raise ValueError('Unknown predicate: {}'.format(name))
return tuple(types)
@property
def types_vector(self):
"""Return the types as a k-hot vector.
Returns:
list[boolean]
"""
return [x in self.types for x in RLongPredicateType.ALL_TYPES]
BUILTIN_NAMES = ['all-objects', 'index', 'argmin', 'argmax']
class RLongPredicateType(object):
COLOR = 'color'
NUMBER = 'number'
FRACTION = 'fraction'
PROPERTY = 'property'
DOUBLE_PROPERTY = 'double_property'
ACTION = 'action'
BUILTIN = 'builtin'
HISTORY_SLOT = 'history_slot'
ALL_TYPES = (COLOR, NUMBER, FRACTION, PROPERTY,
DOUBLE_PROPERTY, ACTION, BUILTIN, HISTORY_SLOT)
|
ContextualSP/lemon/executor/strongsup/rlong/predicate.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/rlong/predicate.py",
"repo_id": "ContextualSP",
"token_count": 1160
}
| 270 |
# Copied from the official WikiTableQuestions evaluator, version 1.0
from math import isnan, isinf
from strongsup.value import Value
from strongsup.tables.utils import normalize
class StringValue(Value):
def __init__(self, content):
assert isinstance(content, str)
self._normalized = normalize(content)
self._hash = hash(self._normalized)
def __eq__(self, other):
return (isinstance(other, StringValue)
and self._normalized == other._normalized)
def __hash__(self):
return self._hash
def __str__(self):
return 'S' + str([self._normalized])
__repr__ = __str__
def match(self, other):
assert isinstance(other, Value)
return self._normalized == other._normalized
class NumberValue(Value):
def __init__(self, amount, original_string=None):
assert isinstance(amount, (int, float))
if abs(amount - round(amount)) < 1e-6:
self._amount = int(amount)
else:
self._amount = float(amount)
if not original_string:
self._normalized = str(self._amount)
else:
self._normalized = normalize(original_string)
self._hash = hash(self._amount)
def __eq__(self, other):
return (isinstance(other, NumberValue)
and self._amount == other._amount)
def __hash__(self):
return self._hash
def match(self, other):
assert isinstance(other, Value)
if self._normalized == other._normalized:
return True
if isinstance(other, NumberValue):
return abs(self._amount - other._amount) < 1e-6
return False
def __str__(self):
return ('N(%f)' % self._amount) + str([self._normalized])
__repr__ = __str__
@staticmethod
def parse(text):
"""Try to parse into a number.
Return:
the number (int or float) if successful; otherwise None.
"""
try:
return int(text)
except:
try:
amount = float(text)
assert not isnan(amount) and not isinf(amount)
return amount
except:
return None
class DateValue(Value):
def __init__(self, year, month, day, original_string=None):
"""Create a new DateValue. Placeholders are marked as -1."""
assert isinstance(year, int)
assert isinstance(month, int) and (month == -1 or 1 <= month <= 12)
assert isinstance(day, int) and (day == -1 or 1 <= day <= 31)
assert not (year == month == day == -1)
self._year = year
self._month = month
self._day = day
if not original_string:
self._normalized = '{}-{}-{}'.format(
year if year != -1 else 'xx',
month if month != -1 else 'xx',
day if day != '-1' else 'xx')
else:
self._normalized = normalize(original_string)
self._hash = hash((self._year, self._month, self._day))
def __eq__(self, other):
return (isinstance(other, DateValue)
and self._year == other._year
and self._month == other._month
and self._day == other._day)
def __hash__(self):
return self._hash
def __str__(self):
return (('D(%d,%d,%d)' % (self._year, self._month, self._day))
+ str([self._normalized]))
__repr__ = __str__
def match(self, other):
assert isinstance(other, Value)
if self._normalized == other._normalized:
return True
if isinstance(other, DateValue):
return (self._year == other._year
and self._month == other._month
and self._day == other._day)
return False
@staticmethod
def parse(text):
"""Try to parse into a date.
Return:
tuple (year, month, date) if successful; otherwise None.
"""
try:
ymd = text.lower().split('-')
assert len(ymd) == 3
year = -1 if ymd[0] in ('xx', 'xxxx') else int(ymd[0])
month = -1 if ymd[1] == 'xx' else int(ymd[1])
day = -1 if ymd[2] == 'xx' else int(ymd[2])
assert not (year == month == day == -1)
assert month == -1 or 1 <= month <= 12
assert day == -1 or 1 <= day <= 31
return (year, month, day)
except:
return None
################ Value Instantiation ################
def to_value(original_string, corenlp_value=None):
"""Convert the string to Value object.
Args:
original_string (basestring): Original string
corenlp_value (basestring): Optional value returned from CoreNLP
Returns:
Value
"""
if isinstance(original_string, Value):
# Already a Value
return original_string
if not corenlp_value:
corenlp_value = original_string
# Number?
amount = NumberValue.parse(corenlp_value)
if amount is not None:
return NumberValue(amount, original_string)
# Date?
ymd = DateValue.parse(corenlp_value)
if ymd is not None:
if ymd[1] == ymd[2] == -1:
return NumberValue(ymd[0], original_string)
else:
return DateValue(ymd[0], ymd[1], ymd[2], original_string)
# String.
return StringValue(original_string)
def to_value_list(original_strings, corenlp_values=None):
"""Convert a list of strings to a list of Values
Args:
original_strings (list[basestring])
corenlp_values (list[basestring or None])
Returns:
list[Value]
"""
assert isinstance(original_strings, (list, tuple, set))
if corenlp_values is not None:
assert isinstance(corenlp_values, (list, tuple, set))
assert len(original_strings) == len(corenlp_values)
return list(set(to_value(x, y) for (x, y)
in zip(original_strings, corenlp_values)))
else:
return list(set(to_value(x) for x in original_strings))
|
ContextualSP/lemon/executor/strongsup/tables/value.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/tables/value.py",
"repo_id": "ContextualSP",
"token_count": 2727
}
| 271 |
import math
import operator
from numpy.testing import assert_allclose
from strongsup.utils import (
epsilon_greedy_sample,
softmax, softmax_with_alpha_beta,
)
from functools import reduce
def test_epsilon_greedy_sample():
num_choices = 8
num_iters = 100000
to_sample = 4
epsilon = 0.9
def expected_count():
expected_count = epsilon * (num_choices - 1)/num_choices
expected_count *= reduce(
operator.mul,
(1 - epsilon * 1/(num_choices - num) for num in range(
1, to_sample)),
1)
expected_count = (1 - expected_count) * num_iters
return expected_count
choices = list(range(num_choices))
counts = [0] * (num_choices + 1)
for i in range(num_iters):
sample = epsilon_greedy_sample(choices, to_sample, epsilon)
for val in sample:
counts[val] += 1
expected = expected_count()
assert(0.98 * expected <= counts[1] <= 1.02 * expected)
#test_epsilon_greedy_sample()
def test_softmax():
stuff = [-1, -2, -3, -20, -400]
exped = [math.exp(x) for x in stuff]
target = [x / sum(exped) for x in exped]
assert_allclose(target, softmax(stuff))
assert_allclose(target, softmax_with_alpha_beta(stuff, 1, 1))
def test_softmax_with_alpha_beta():
for alpha in (0.0, 0.25, 0.5, 0.75, 1.0, 1.5, 2.0):
for beta in (0.0, 0.25, 0.5, 0.75, 1.0, 1.5, 2.0):
for stuff in [
[-1, -2, -3, -20, -400],
[-30, -30.4, -30.2, -31],
[float('-inf'), -30.4, -30.2, float('-inf'), float('-inf'), -31]]:
exped = [math.exp(x) for x in stuff]
exped_with_beta = [math.exp(x * beta) if x != float('-inf') else 0. for x in stuff]
target = [x / sum(exped_with_beta) * sum(exped)**(1-alpha) for x in exped_with_beta]
actual = softmax_with_alpha_beta(stuff, alpha, beta)
assert_allclose(target, actual)
|
ContextualSP/lemon/executor/strongsup/tests/test_utils.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/tests/test_utils.py",
"repo_id": "ContextualSP",
"token_count": 1005
}
| 272 |
python lemon/run_model_pretrain.py train \
--dataset-dir lemon_data/pretraining_corpus/DATASET_PREFIX/bin_large \
--exp-dir OUTPUT_PATH \
--model-path BART_MODEL_PATH \
--model-arch bart_large \
--total-num-update 10000 \
--max-tokens 1800 \
--gradient-accumulation 8 \
--warmup-steps 1500
|
ContextualSP/lemon/pretrain.sh/0
|
{
"file_path": "ContextualSP/lemon/pretrain.sh",
"repo_id": "ContextualSP",
"token_count": 139
}
| 273 |
## AI2 Reasoning Challenge (ARC) Evaluator
This script evaluates predictions for multiple-choice questions against correct answers and produces an accuracy score.
## Example
```bash
% python3 evaluator.py -qa questions.jsonl -p predictions.csv -o metrics.json
% cat metrics.json
{"accuracy": 0.85}
```
## Usage
The script takes two input files and produces one output file.
### Input question-answers
A question-answers file has question ids and answers in JSONL format. For example:
```bash
% cat questions.jsonl
{ "id": "question1", "answerKey": "C" }
{ "id": "question2", "answerKey": "B" }
{ "id": "question3", "answerKey": "C" }
{ "id": "question4", "answerKey": "D" }
{ "id": "question5", "answerKey": "D" }
```
(Attributes besides `id` and `answerKey` in each object are ignored.)
### Input predictions
A predictions file that has predictions in CSV format. For example:
```bash
% cat predictions.csv
question1,A;B;C;D
question2,B
question3,C
question4,D
question5,D
```
### Output metrics
A JSON file that has an accuracy score in the range 0.0 to 1.0. For example:
```bash
% cat metrics.json
{"accuracy": 0.85}
```
## Development
### Unit tests
Run unit tests with `python3 test_evaluator.py`.
### Docker
Ultimately this evaluator is run in a Docker container. To test that it works there, run `test.sh`.
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/arc/evaluator/README.md/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/arc/evaluator/README.md",
"repo_id": "ContextualSP",
"token_count": 431
}
| 274 |
import numpy as np
import sklearn.metrics
from sklearn.metrics import roc_curve
class F1MeasureCustomRetrievalEval:
def __init__(self, pos_label=1) -> None:
self._predictions = []
self._gt = []
self._pos_label = pos_label
self._probs = []
def __call__(self, label, score):
"""
Parameters
----------
predictions : ``torch.Tensor``, required.
A tensor of predictions of shape (batch_size, ..., num_classes).
gold_labels : ``torch.Tensor``, required.
A tensor of integer class label of shape (batch_size, ...). It must be the same
shape as the ``predictions`` tensor without the ``num_classes`` dimension.
mask: ``torch.Tensor``, optional (default = None).
A masking tensor the same size as ``gold_labels``.
"""
self._gt.append(label)
self._probs.append(score)
def get_metric(self, reset: bool = False, given_thresh=None): # -> Dict[str,Float]:
probs = np.array(self._probs)
gt = np.array(self._gt)
threshold_max = None
f1_score_given_thresh = None
if reset and len(probs) > 0:
fpr, tpr, thresholds = roc_curve(gt, probs)
f1_scores = []
for thresh in thresholds:
f1_scores.append(
sklearn.metrics.f1_score(
gt, [1 if m > thresh else 0 for m in probs]
)
)
f1_scores = np.array(f1_scores)
f1_scores_max = np.max(f1_scores)
threshold_max = thresholds[np.argmax(f1_scores)]
auc_roc = sklearn.metrics.roc_auc_score(gt, probs)
if given_thresh is not None:
f1_score_given_thresh = sklearn.metrics.f1_score(
gt, [1 if m > given_thresh else 0 for m in probs]
)
else:
auc_roc = 0
f1_scores_max = 0
if reset:
self.reset()
return {
"auc_roc": auc_roc,
"f1_scores_max": f1_scores_max,
"threshold_max": threshold_max,
"f1_score_given_thresh": f1_score_given_thresh,
}
def reset(self):
self._gt = []
self._probs = []
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/eqasc/code/allennlp_reasoning_explainqa/training/metrics/confusion_matrix.py/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/eqasc/code/allennlp_reasoning_explainqa/training/metrics/confusion_matrix.py",
"repo_id": "ContextualSP",
"token_count": 1191
}
| 275 |
FROM python:3.7.0-alpine3.8
WORKDIR /app
COPY evaluator.py /app/evaluator.py
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/openbookqa/evaluator/Dockerfile/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/openbookqa/evaluator/Dockerfile",
"repo_id": "ContextualSP",
"token_count": 38
}
| 276 |
#!/usr/bin/env python3
import argparse
import json
from typing import Dict
from evaluation import Evaluation
from process import sentences_from_sentences_file, ActionFile
from scoring import QuestionScores
from errors import corrupted_action_file, corrupted_sentences_file
def main(answers_file: str, predictions_file: str, output_file: str, diagnostics_file: str, sentences_file: str):
# create diagnostics file if needed
diagnostics = None
sentences = None
if diagnostics_file:
diagnostics = open(diagnostics_file, mode='w')
print(f"Writing diagnostics to file {diagnostics_file}")
diagnostics.write(
f"Diagnostics of evaluation of predictions in {predictions_file} against answers in {answers_file}\n")
diagnostics.write("\n")
if sentences_file:
sentences = sentences_from_sentences_file(sentences_file)
# Step 1 and 2. Read and summarize answers and predictions
predictions = ActionFile.from_file(predictions_file)
answers = ActionFile.from_file(answers_file)
# Abort if there are differences
diff_report = answers.diff_participants(predictions)
if diff_report:
print(f"Participants in predictions file {predictions_file} are not exact matches to participants")
print(f"in {answers_file}. Detailed report:")
print()
print("\n".join(diff_report))
print()
corrupted_action_file(
filename=predictions_file,
details=f"Some participants are missing or unexpected."
)
predictions_summary = predictions.summarize()
answers_summary = answers.summarize()
# Step 3. Calculate per-process scores
scores_by_process = dict() # type: Dict[int, QuestionScores]
for process_id, answer_summary in answers_summary.items():
if process_id not in predictions_summary:
corrupted_action_file(
filename=predictions_file,
details=f"Prediction for process_id {answer_summary.process_id} is missing."
)
prediction_summary = predictions_summary[process_id]
score = QuestionScores.from_summaries(answer_summary, prediction_summary)
scores_by_process[process_id] = score
if diagnostics:
diag_struct = {
"prediction_summary": prediction_summary.diagnostics(),
"answer_summary": answer_summary.diagnostics(),
"score": {
"process_id": process_id,
"inputs": score.inputs.diagnostics(),
"outputs": score.outputs.diagnostics(),
"conversions": score.conversions.diagnostics(),
"moves": score.moves.diagnostics(),
}
}
if sentences:
if process_id not in sentences:
corrupted_sentences_file(
filename=sentences_file,
details=f"Sentences for process {process_id} not found."
)
sentences_for_diag = []
for i, text in enumerate(sentences[process_id]):
sentences_for_diag.append({
"step_number": 1 + i,
"text": text,
})
diag_struct["sentences"] = sentences_for_diag # type: ignore
diagnostics.write(json.dumps(diag_struct, indent=4))
diagnostics.write("\n")
# Step 4. Calculate a final evaluation
evaluation = Evaluation(scores_by_process)
# Step 5. Print a report and generate output file
report(evaluation, len(predictions_summary), len(answers_summary))
overall_scores = {
"precision": round(evaluation.overall.precision, 3),
"recall": round(evaluation.overall.recall, 3),
"f1": round(evaluation.overall.F1(), 3)
}
if output_file:
print("Writing results to file: %s" % output_file)
with open(output_file, "wt", encoding="UTF-8") as output:
output.write(json.dumps(overall_scores))
if diagnostics:
diag_struct = {"overall_scores": overall_scores}
diagnostics.write(json.dumps(diag_struct, indent=4))
diagnostics.write("\n")
# close diagnostics file
if diagnostics:
diagnostics.close()
def report(e: Evaluation, num_predictions: int, num_answers: int):
i = e.inputs
o = e.outputs
c = e.conversions
m = e.moves
overall = e.overall
print("=================================================")
print("Question Avg. Precision Avg. Recall Avg. F1")
print("-------------------------------------------------")
print("Inputs %4.3f %4.3f %4.3f" % (i.precision, i.recall, i.F1()))
print("Outputs %4.3f %4.3f %4.3f" % (o.precision, o.recall, o.F1()))
print("Conversions %4.3f %4.3f %4.3f" % (c.precision, c.recall, c.F1()))
print("Moves %4.3f %4.3f %4.3f" % (m.precision, m.recall, m.F1()))
print("-------------------------------------------------")
print("Overall Precision %4.3f " % overall.precision)
print("Overall Recall %4.3f " % overall.recall)
print("Overall F1 %4.3f " % overall.F1())
print("=================================================")
print()
print(f"Evaluated {num_predictions} predictions against {num_answers} answers.")
print()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluator for ProPara Leaderboard')
parser.add_argument('--predictions', '-p',
action='store',
dest='predictions_file',
required=True,
help='Path to file with predictions')
parser.add_argument('--answers', '-a',
action='store',
dest='answers_file',
required=True,
help='Path to file with answers')
parser.add_argument('--output', '-o',
action='store',
dest='output_file',
help='Output results to this file.')
parser.add_argument('--diagnostics', '-d',
action='store',
dest='diagnostics_file',
help='Write diagnostics to this file.')
parser.add_argument('--sentences', '-s',
action='store',
dest='sentences_file',
help='Path to file with sentences.')
args = parser.parse_args()
main(args.answers_file, args.predictions_file, args.output_file, args.diagnostics_file, args.sentences_file)
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/evaluator.py/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/evaluator.py",
"repo_id": "ContextualSP",
"token_count": 3149
}
| 277 |
## Test case: Prediction has an invalid action.
* answers.tsv is the answer to process 1167 from the training set.
* predictions.tsv is a prediction with an invalid action.
An evaluation on this prediction should abort.
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/testfiles-6/README.md/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/testfiles-6/README.md",
"repo_id": "ContextualSP",
"token_count": 54
}
| 278 |
import json
import argparse
from pydoc import doc
import collections
import os
def get_col_states(input_str):
col_and_state = input_str.replace('state : ', '').split(' | ')
return col_and_state
def get_col_states_start(input_str):
col_and_state = input_str.split(' states : ')
cols = col_and_state[0].replace('col : ', '').split(' | ')
states = col_and_state[1].split(' | ')
states[-1] = states[-1].split(' SEP ')[0]
return cols, states
def get_action(location_before, location_after):
location_before = location_before.replace("states : ", '')
location_after = location_after.replace("states : ", "")
if location_before == location_after:
return "NONE",location_before, location_after
if location_before == '-' and location_after != '-':
return "CREATE",location_before, location_after
if location_after == '-' and location_before != '-':
return "DESTROY",location_before, location_after
if location_before != '-' and location_after != '-':
return "MOVE",location_before, location_after
def process(id_path, generate_valid_path, dummy_path, if_answer=False):
target_idx = 2 if if_answer else 1
error_num = 0
id_file = open(id_path, 'r', encoding='utf8')
pre = open(generate_valid_path, 'r', encoding='utf8')
out = open(dummy_path, 'w', encoding='utf8')
linenum_to_colandstate = {}
pre_lines = pre.readlines()[1:]
for line in pre_lines:
elements = line.rstrip().split('\t')
line_id = int(elements[-1])
col_and_state = elements
linenum_to_colandstate[line_id] = col_and_state
current_case = -1
pre_states = {}
id_lines = id_file.readlines()
step_num = 0
action_matrix = collections.OrderedDict()
for line_id, case_id in enumerate(id_lines):
case_id, step_id = case_id.rstrip().split('-') # '4-1' -> [4, 1]
if case_id != current_case:
for key in action_matrix.keys():
for step_idx in range(step_num):
try:
line_out = str(current_case) + '\t' + str(step_idx + 1) + '\t' + key + '\t' + action_matrix[key][
step_idx][0] + '\t' + action_matrix[key][step_idx][1] + '\t' + action_matrix[key][step_idx][2] + '\t'
out.write(line_out + '\n')
except:
line_out = str(current_case) + '\t' + str(step_idx + 1) + '\t' + key + '\t' + 'NONE' + '\t' + '-' + '\t' + '-' + '\t'
out.write(line_out + '\n')
action_matrix = {}
step_num = 0
current_case = case_id
start_col_and_state = linenum_to_colandstate[line_id][-2]
pre_cols, pre_states = get_col_states_start(start_col_and_state) # get the init state
for key in pre_cols:
action_matrix[key] = [] # init the action matrix
step_num += 1
col_and_state = linenum_to_colandstate[line_id][target_idx] # get the first state (after the first action)
current_states = get_col_states(col_and_state) # current_states : List : ['state1', 'state2', 'state3', 'state4']
if len(current_states) != len(pre_states):
error_num += 1
col_list = list(action_matrix.keys())
for col_idx in range(len(col_list)):
try:
action_matrix[col_list[col_idx]].append((get_action(pre_states[col_idx], current_states[col_idx])))
except:
right_col = col_list[col_idx]
pre_state = '-' if col_idx >= len(pre_states) else pre_states[col_idx]
current_state = '-' if col_idx >= len(current_states) else current_states[col_idx]
error_action = (get_action(pre_state, current_state))
action_matrix[right_col].append(error_action)
pre_states = current_states
for key in action_matrix.keys():
for step_idx in range(step_num):
try:
line_out = str(current_case) + '\t' + str(step_idx + 1) + '\t' + key + '\t' + action_matrix[key][
step_idx][0] + '\t' + action_matrix[key][step_idx][1] + '\t' + action_matrix[key][step_idx][2] + '\t'
out.write(line_out + '\n')
except:
line_out = str(current_case) + '\t' + str(step_idx + 1) + '\t' + key + '\t' + 'NONE' + '\t' + '-' + '\t' + '-' + '\t'
out.write(line_out + '\n')
print('error_num', error_num)
def eval_recipes_stage2(prediction_file, answer_file, predict_target_file, answer_target_file):
predict_list = open(prediction_file, 'r', encoding='utf8').readlines()
answer_list = open(answer_file, 'r', encoding='utf8').readlines()
predict_dict = dict()
answer_dict = dict()
for predict, answer in zip(predict_list, answer_list):
predict_item = predict.strip().split('\t')
answer_item = answer.strip().split('\t')
assert predict_item[0] == answer_item[0]
assert predict_item[1] == answer_item[1]
assert predict_item[2] == answer_item[2]
doc_id = predict_item[0]
sentence_id = predict_item[1]
entity = predict_item[2]
predicted_action = predict_item[3]
answer_action = answer_item[3]
if (doc_id, entity) not in predict_dict:
predict_dict[(doc_id, entity)] = []
if predicted_action != 'NONE':
if not (predicted_action == 'CREATE' and predict_item[5] == '?'):
predict_dict[(doc_id, entity)].append({'step':int(sentence_id)-1, 'location': predict_item[5]})
if (doc_id, entity) not in answer_dict:
answer_dict[(doc_id, entity)] = []
if answer_action != 'NONE':
if not (answer_action == 'CREATE' and answer_item[5] == '?'):
answer_dict[(doc_id, entity)].append({'step':int(sentence_id)-1, 'location': answer_item[5]})
predict_json_lines = []
for item in predict_dict:
predict_json_lines.append({'id':int(item[0]),
'entity':item[1],
'loc_change':predict_dict[item]})
json.dump(predict_json_lines, open(predict_target_file, 'w', encoding='utf8'), indent=4, ensure_ascii=False)
answer_json_lines = []
for item in answer_dict:
answer_json_lines.append({'id':int(item[0]),
'entity':item[1],
'loc_change':answer_dict[item]})
json.dump(answer_json_lines, open(answer_target_file, 'w', encoding='utf8'), indent=4, ensure_ascii=False)
def eval_recipes_stage3(prediction_file, answer_file):
predict_list = json.load(open(prediction_file, 'r', encoding='utf8'))
answer_list = json.load(open(answer_file, 'r', encoding='utf8'))
assert len(predict_list) == len(answer_list)
num_data = len(answer_list)
total_pred, total_ans, total_correct = 0, 0, 0
for idx in range(num_data):
prediction = predict_list[idx]
answer = answer_list[idx]
assert prediction['id'] == answer['id'] and prediction['entity'] == answer['entity']
pred_loc = prediction['loc_change']
ans_loc = answer['loc_change']
num_pred = len(pred_loc)
num_ans = len(ans_loc)
if num_pred == 0 or num_ans == 0:
num_correct = 0
else:
num_correct = len([loc for loc in pred_loc if loc in ans_loc])
total_pred += num_pred
total_ans += num_ans
total_correct += num_correct
precision = total_correct / total_pred
recall = total_correct / total_ans
if (precision + recall) != 0:
f1 = 2 * precision * recall / (precision + recall)
else:
f1 = 0.0
print(f'{num_data} instances evaluated.')
print(f'Total predictions: {total_pred}, total answers: {total_ans}, total correct predictions: {total_correct}')
print(f'Precision: {precision*100:.2f}, Recall: {recall*100:.2f}, F1: {f1*100:.2f}')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
def eval_all(root_path, stage):
id_path = os.path.join('./before_pretraining_tsv/before-pretraining-1/', stage+'.id')
generate_prediction_file = os.path.join(root_path, 'generate-'+stage+'.txt.eval')
prediction_file = os.path.join(root_path, stage+'-predictions.tsv')
answer_file = os.path.join(root_path, stage+'-answers.tsv')
predict_target_file = os.path.join(root_path, stage+'_predict_loc.json')
answer_target_file = os.path.join(root_path, stage+'_answer_loc.json')
process(id_path, generate_prediction_file, prediction_file, False)
process(id_path, generate_prediction_file, answer_file, True)
eval_recipes_stage2(prediction_file, answer_file, predict_target_file, answer_target_file)
eval_recipes_stage3(predict_target_file, answer_target_file)
if __name__ == '__main__':
# eval_dir = 'CHECKPOINT-DIR'
# stage = 'valid'
# eval_all(eval_dir, stage)
eval_dir = '/mnt/v-qshi/project/amlk8s/LEMON/models/finetune-recipes-after-pretraining-without-destroy-seed-44/checkpoint_115_7500'
stage = 'test'
eval_all(eval_dir, stage)
|
ContextualSP/lemon/recipes_eval.py/0
|
{
"file_path": "ContextualSP/lemon/recipes_eval.py",
"repo_id": "ContextualSP",
"token_count": 4166
}
| 279 |
import json
import re
from tqdm import tqdm
import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--start_index', type=int)
parser.add_argument('--end_index', type=int)
parser.add_argument('--indicator_type')
args = parser.parse_args()
with open(f"./{args.indicator_type}.jsonl", "r") as f :
lines = f.readlines()
start_index = max(args.start_index, 0)
end_index = None if args.end_index == -1 or args.end_index >= len(lines) else args.end_index
indicator_type = args.indicator_type
stopwords = []
with open("../data/Indicators/stopwords.txt", "r") as f:
for l in f.readlines():
l = l.strip().lower()
if f == "": continue
stopwords.append(l)
rexp = re.compile("|".join([r"(\b{}\b)".format(ind) for ind in stopwords]))
with open(f"./filter_{indicator_type}/{indicator_type}_{start_index}_{end_index}.jsonl", "w") as f:
for l in tqdm(lines[start_index:end_index]):
try:
dic = json.loads(l)
stripped_output = re.sub(rexp, "", dic["output"])
stripped_output = re.sub(r"\W+", " ", stripped_output).strip().split()
if len(stripped_output) <= 3 or len(stripped_output) >= 15: continue
dic["output"] = dic["output"].replace("_", "")
json.dump(dic, f)
f.write("\n")
except: continue
|
ContextualSP/logigan/corpus_construction/mlm_corpus/filter.py/0
|
{
"file_path": "ContextualSP/logigan/corpus_construction/mlm_corpus/filter.py",
"repo_id": "ContextualSP",
"token_count": 583
}
| 280 |
# MultiSpider: Towards Benchmarking Multilingual Text-to-SQL Semantic Parsing
In this work, we present MultiSpider, a multilingual text-to-SQL dataset which covers seven languages (English, German, French, Spanish, Japanese, Chinese, and Vietnamese).
Please find more details on [paper](https://arxiv.org/pdf/2212.13492.pdf), [code](https://github.com/longxudou/multispider) and [data](https://huggingface.co/datasets/dreamerdeo/multispider).
## Citation
If you use our dataset or codebase, please cite our paper:
```
@inproceedings{Dou2022MultiSpiderTB,
title={MultiSpider: Towards Benchmarking Multilingual Text-to-SQL Semantic Parsing},
author={Longxu Dou and Yan Gao and Mingyang Pan and Dingzirui Wang and Wanxiang Che and Dechen Zhan and Jian-Guang Lou},
booktitle={AAAI Conference on Artificial Intelligence},
year={2023},
url={https://ojs.aaai.org/index.php/AAAI/article/view/26499/26271}
}
```
|
ContextualSP/multilingual_text_to_sql/README.md/0
|
{
"file_path": "ContextualSP/multilingual_text_to_sql/README.md",
"repo_id": "ContextualSP",
"token_count": 283
}
| 281 |
#!/usr/bin/env bash
split=mcd1
data_path=./data/$split/
key=$split-sketch
model_path=./model/sketch_prediction-$key
output_file=./output/$key-output
echo $output_file
WORK_DIR=$(readlink -f "./")/sketch_prediction/
echo $WORK_DIR
CUDA_VISIBLE_DEVICES=5 python3 $WORK_DIR/main.py \
--src_path $data_path/train/train_encode.txt --trg_path $data_path/train/train_sketch.txt \
--src_vocabulary $data_path/vocab.cfq.tokens.src --trg_vocabulary $data_path/vocab.cfq.tokens.sketch \
--embedding_size 300 --batch_size 1 --validate_batch_size 1 \
--save_path $model_path/ --save_interval 500 --log_interval 500 --cuda \
--validation_src_path $data_path/test/test_encode.txt --validation_trg_path $data_path/test/test_sketch.txt \
--inference_output $model_path/test --type inference \
--model_init_path $model_path/parser_model_best.pt \
--inference_output $output_file
|
ContextualSP/poset_decoding/sketch_prediction/evaluate.sh/0
|
{
"file_path": "ContextualSP/poset_decoding/sketch_prediction/evaluate.sh",
"repo_id": "ContextualSP",
"token_count": 340
}
| 282 |
Documentation Checking Process(Only for the developers)
==========================================================
# Why
It is necessary for all the developers to generate the rst files which can help us check the documents.
# When
1. You add a new function to one of the scripts in the {MatchZoo/matchzoo} or its subdirs
1. You add a new script to {MatchZoo/matchzoo} or its subdirs
1. You add a new directory to {MatchZoo/matchzoo} or its subdirs
# How
## Make sure you have installed sphinx
1. Enter the docs directory
```
cd {MatchZoo/docs}
```
2. Generate the rst files
```
sphinx-apidoc -f -o source ../matchzoo
```
3. Commit
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/docs/DOCCHECK.md/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/docs/DOCCHECK.md",
"repo_id": "ContextualSP",
"token_count": 225
}
| 283 |
import typing
import numpy as np
import matchzoo as mz
from matchzoo.engine.base_metric import BaseMetric
from .tuner import Tuner
def tune(
params: 'mz.ParamTable',
optimizer: str = 'adam',
trainloader: mz.dataloader.DataLoader = None,
validloader: mz.dataloader.DataLoader = None,
embedding: np.ndarray = None,
fit_kwargs: dict = None,
metric: typing.Union[str, BaseMetric] = None,
mode: str = 'maximize',
num_runs: int = 10,
verbose=1
):
"""
Tune model hyper-parameters.
A simple shorthand for using :class:`matchzoo.auto.Tuner`.
`model.params.hyper_space` reprensents the model's hyper-parameters
search space, which is the cross-product of individual hyper parameter's
hyper space. When a `Tuner` builds a model, for each hyper parameter in
`model.params`, if the hyper-parameter has a hyper-space, then a sample
will be taken in the space. However, if the hyper-parameter does not
have a hyper-space, then the default value of the hyper-parameter will
be used.
See `tutorials/model_tuning.ipynb` for a detailed walkthrough on usage.
:param params: A completed parameter table to tune. Usually `model.params`
of the desired model to tune. `params.completed()` should be `True`.
:param optimizer: Str or `Optimizer` class. Optimizer for optimizing model.
:param trainloader: Training data to use. Should be a `DataLoader`.
:param validloader: Testing data to use. Should be a `DataLoader`.
:param embedding: Embedding used by model.
:param fit_kwargs: Extra keyword arguments to pass to `fit`.
(default: `dict(epochs=10, verbose=0)`)
:param metric: Metric to tune upon. Must be one of the metrics in
`model.params['task'].metrics`. (default: the first metric in
`params.['task'].metrics`.
:param mode: Either `maximize` the metric or `minimize` the metric.
(default: 'maximize')
:param num_runs: Number of runs. Each run takes a sample in
`params.hyper_space` and build a model based on the sample.
(default: 10)
:param callbacks: A list of callbacks to handle. Handled sequentially
at every callback point.
:param verbose: Verbosity. (default: 1)
Example:
>>> import matchzoo as mz
>>> import numpy as np
>>> train = mz.datasets.toy.load_data('train')
>>> valid = mz.datasets.toy.load_data('dev')
>>> prpr = mz.models.DenseBaseline.get_default_preprocessor()
>>> train = prpr.fit_transform(train, verbose=0)
>>> valid = prpr.transform(valid, verbose=0)
>>> trainset = mz.dataloader.Dataset(train)
>>> validset = mz.dataloader.Dataset(valid)
>>> padding = mz.models.DenseBaseline.get_default_padding_callback()
>>> trainloader = mz.dataloader.DataLoader(trainset, callback=padding)
>>> validloader = mz.dataloader.DataLoader(validset, callback=padding)
>>> model = mz.models.DenseBaseline()
>>> model.params['task'] = mz.tasks.Ranking()
>>> optimizer = 'adam'
>>> embedding = np.random.uniform(-0.2, 0.2,
... (prpr.context['vocab_size'], 100))
>>> tuner = mz.auto.Tuner(
... params=model.params,
... optimizer=optimizer,
... trainloader=trainloader,
... validloader=validloader,
... embedding=embedding,
... num_runs=1,
... verbose=0
... )
>>> results = tuner.tune()
>>> sorted(results['best'].keys())
['#', 'params', 'sample', 'score']
"""
tuner = Tuner(
params=params,
optimizer=optimizer,
trainloader=trainloader,
validloader=validloader,
embedding=embedding,
fit_kwargs=fit_kwargs,
metric=metric,
mode=mode,
num_runs=num_runs,
verbose=verbose
)
return tuner.tune()
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/auto/tuner/tune.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/auto/tuner/tune.py",
"repo_id": "ContextualSP",
"token_count": 1598
}
| 284 |
from .load_data import load_data
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/cfq/__init__.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/cfq/__init__.py",
"repo_id": "ContextualSP",
"token_count": 9
}
| 285 |
from .rank_cross_entropy_loss import RankCrossEntropyLoss
from .rank_hinge_loss import RankHingeLoss
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/losses/__init__.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/losses/__init__.py",
"repo_id": "ContextualSP",
"token_count": 33
}
| 286 |
"""An implementation of ArcII Model."""
import typing
import torch
import torch.nn as nn
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.base_callback import BaseCallback
from matchzoo.engine.param import Param
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine import hyper_spaces
from matchzoo.modules import Matching
from matchzoo.dataloader import callbacks
from matchzoo.utils import parse_activation
class ArcII(BaseModel):
"""
ArcII Model.
Examples:
>>> model = ArcII()
>>> model.params['embedding_output_dim'] = 300
>>> model.params['kernel_1d_count'] = 32
>>> model.params['kernel_1d_size'] = 3
>>> model.params['kernel_2d_count'] = [16, 32]
>>> model.params['kernel_2d_size'] = [[3, 3], [3, 3]]
>>> model.params['pool_2d_size'] = [[2, 2], [2, 2]]
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(with_embedding=True)
params.add(Param(name='left_length', value=10,
desc='Length of left input.'))
params.add(Param(name='right_length', value=100,
desc='Length of right input.'))
params.add(Param(name='kernel_1d_count', value=32,
desc="Kernel count of 1D convolution layer."))
params.add(Param(name='kernel_1d_size', value=3,
desc="Kernel size of 1D convolution layer."))
params.add(Param(name='kernel_2d_count', value=[32],
desc="Kernel count of 2D convolution layer in"
"each block"))
params.add(Param(name='kernel_2d_size', value=[(3, 3)],
desc="Kernel size of 2D convolution layer in"
" each block."))
params.add(Param(name='activation', value='relu',
desc="Activation function."))
params.add(Param(name='pool_2d_size', value=[(2, 2)],
desc="Size of pooling layer in each block."))
params.add(Param(
'dropout_rate', 0.0,
hyper_space=hyper_spaces.quniform(
low=0.0, high=0.8, q=0.01),
desc="The dropout rate."
))
return params
@classmethod
def get_default_padding_callback(
cls,
fixed_length_left: int = 10,
fixed_length_right: int = 100,
pad_word_value: typing.Union[int, str] = 0,
pad_word_mode: str = 'pre',
with_ngram: bool = False,
fixed_ngram_length: int = None,
pad_ngram_value: typing.Union[int, str] = 0,
pad_ngram_mode: str = 'pre'
) -> BaseCallback:
"""
Model default padding callback.
The padding callback's on_batch_unpacked would pad a batch of data to
a fixed length.
:return: Default padding callback.
"""
return callbacks.BasicPadding(
fixed_length_left=fixed_length_left,
fixed_length_right=fixed_length_right,
pad_word_value=pad_word_value,
pad_word_mode=pad_word_mode,
with_ngram=with_ngram,
fixed_ngram_length=fixed_ngram_length,
pad_ngram_value=pad_ngram_value,
pad_ngram_mode=pad_ngram_mode
)
def build(self):
"""
Build model structure.
ArcII has the desirable property of letting two sentences meet before
their own high-level representations mature.
"""
self.embedding = self._make_default_embedding_layer()
# Phrase level representations
self.conv1d_left = nn.Sequential(
nn.ConstantPad1d((0, self._params['kernel_1d_size'] - 1), 0),
nn.Conv1d(
in_channels=self._params['embedding_output_dim'],
out_channels=self._params['kernel_1d_count'],
kernel_size=self._params['kernel_1d_size']
)
)
self.conv1d_right = nn.Sequential(
nn.ConstantPad1d((0, self._params['kernel_1d_size'] - 1), 0),
nn.Conv1d(
in_channels=self._params['embedding_output_dim'],
out_channels=self._params['kernel_1d_count'],
kernel_size=self._params['kernel_1d_size']
)
)
# Interaction
self.matching = Matching(matching_type='plus')
# Build conv
activation = parse_activation(self._params['activation'])
in_channel_2d = [
self._params['kernel_1d_count'],
*self._params['kernel_2d_count'][:-1]
]
conv2d = [
self._make_conv_pool_block(ic, oc, ks, activation, ps)
for ic, oc, ks, ps in zip(in_channel_2d,
self._params['kernel_2d_count'],
self._params['kernel_2d_size'],
self._params['pool_2d_size'])
]
self.conv2d = nn.Sequential(*conv2d)
self.dropout = nn.Dropout(p=self._params['dropout_rate'])
left_length = self._params['left_length']
right_length = self._params['right_length']
for ps in self._params['pool_2d_size']:
left_length = left_length // ps[0]
for ps in self._params['pool_2d_size']:
right_length = right_length // ps[1]
# Build output
self.out = self._make_output_layer(
left_length * right_length * self._params['kernel_2d_count'][-1]
)
def forward(self, inputs):
"""Forward."""
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# D = embedding size
# L = `input_left` sequence length
# R = `input_right` sequence length
# F = number of filters
# P = pool size
# Left input and right input.
# shape = [B, L]
# shape = [B, R]
input_left, input_right = inputs['text_left'], inputs['text_right']
# Process left and right input.
# shape = [B, D, L]
# shape = [B, D, R]
embed_left = self.embedding(input_left.long()).transpose(1, 2)
embed_right = self.embedding(input_right.long()).transpose(1, 2)
# shape = [B, L, F1]
# shape = [B, R, F1]
conv1d_left = self.conv1d_left(embed_left).transpose(1, 2)
conv1d_right = self.conv1d_right(embed_right).transpose(1, 2)
# Compute matching signal
# shape = [B, L, R, F1]
embed_cross = self.matching(conv1d_left, conv1d_right)
# Convolution
# shape = [B, F2, L // P, R // P]
conv = self.conv2d(embed_cross.permute(0, 3, 1, 2))
# shape = [B, F2 * (L // P) * (R // P)]
embed_flat = self.dropout(torch.flatten(conv, start_dim=1))
# shape = [B, *]
out = self.out(embed_flat)
return out
@classmethod
def _make_conv_pool_block(
cls,
in_channels: int,
out_channels: int,
kernel_size: tuple,
activation: nn.Module,
pool_size: tuple,
) -> nn.Module:
"""Make conv pool block."""
return nn.Sequential(
# Same padding
nn.ConstantPad2d(
(0, kernel_size[1] - 1, 0, kernel_size[0] - 1), 0
),
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size
),
activation,
nn.MaxPool2d(kernel_size=pool_size)
)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/arcii.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/arcii.py",
"repo_id": "ContextualSP",
"token_count": 3863
}
| 287 |
"""An implementation of Match-SRNN Model."""
import typing
import torch
import torch.nn as nn
import torch.nn.functional as F
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.param import Param
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine import hyper_spaces
from matchzoo.modules import MatchingTensor
from matchzoo.modules import SpatialGRU
class MatchSRNN(BaseModel):
"""
Match-SRNN Model.
Examples:
>>> model = MatchSRNN()
>>> model.params['channels'] = 4
>>> model.params['units'] = 10
>>> model.params['dropout'] = 0.2
>>> model.params['direction'] = 'lt'
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(
with_embedding=True,
with_multi_layer_perceptron=False
)
params.add(Param(name='channels', value=4,
desc="Number of word interaction tensor channels"))
params.add(Param(name='units', value=10,
desc="Number of SpatialGRU units"))
params.add(Param(name='direction', value='lt',
desc="Direction of SpatialGRU scanning"))
params.add(Param(
'dropout', 0.2,
hyper_space=hyper_spaces.quniform(
low=0.0, high=0.8, q=0.01),
desc="The dropout rate."
))
return params
def build(self):
"""Build model structure."""
self.embedding = self._make_default_embedding_layer()
self.dropout = nn.Dropout(p=self._params['dropout'])
self.matching_tensor = MatchingTensor(
self._params['embedding_output_dim'],
channels=self._params["channels"])
self.spatial_gru = SpatialGRU(
units=self._params['units'],
direction=self._params['direction'])
self.out = self._make_output_layer(self._params['units'])
def forward(self, inputs):
"""Forward."""
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# D = embedding size
# L = `input_left` sequence length
# R = `input_right` sequence length
# C = number of channels
# Left input and right input
# query = [B, L]
# doc = [B, R]
query, doc = inputs["text_left"].long(), inputs["text_right"].long()
# Process left and right input
# query = [B, L, D]
# doc = [B, R, D]
query = self.embedding(query)
doc = self.embedding(doc)
# query = [B, L, D]
# doc = [B, R, D]
query = self.dropout(query)
doc = self.dropout(doc)
# Get matching tensor
# matching_tensor = [B, C, L, R]
matching_tensor = self.matching_tensor(query, doc)
# Apply spatial GRU to the word level interaction tensor
# h_ij = [B, U]
h_ij = self.spatial_gru(matching_tensor)
# h_ij = [B, U]
h_ij = self.dropout(h_ij)
# Make output layer
out = self.out(h_ij)
return out
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/match_srnn.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/match_srnn.py",
"repo_id": "ContextualSP",
"token_count": 1487
}
| 288 |
import torch
import torch.nn as nn
from torch.nn import functional as F
class StackedBRNN(nn.Module):
"""
Stacked Bi-directional RNNs.
Differs from standard PyTorch library in that it has the option to save
and concat the hidden states between layers. (i.e. the output hidden size
for each sequence input is num_layers * hidden_size).
Examples:
>>> import torch
>>> rnn = StackedBRNN(
... input_size=10,
... hidden_size=10,
... num_layers=2,
... dropout_rate=0.2,
... dropout_output=True,
... concat_layers=False
... )
>>> x = torch.randn(2, 5, 10)
>>> x.size()
torch.Size([2, 5, 10])
>>> x_mask = (torch.ones(2, 5) == 1)
>>> rnn(x, x_mask).shape
torch.Size([2, 5, 20])
"""
def __init__(self, input_size, hidden_size, num_layers,
dropout_rate=0, dropout_output=False, rnn_type=nn.LSTM,
concat_layers=False):
"""Stacked Bidirectional LSTM."""
super().__init__()
self.dropout_output = dropout_output
self.dropout_rate = dropout_rate
self.num_layers = num_layers
self.concat_layers = concat_layers
self.rnns = nn.ModuleList()
for i in range(num_layers):
input_size = input_size if i == 0 else 2 * hidden_size
self.rnns.append(rnn_type(input_size, hidden_size,
num_layers=1,
bidirectional=True))
def forward(self, x, x_mask):
"""Encode either padded or non-padded sequences."""
if x_mask.data.sum() == 0:
# No padding necessary.
output = self._forward_unpadded(x, x_mask)
output = self._forward_unpadded(x, x_mask)
return output.contiguous()
def _forward_unpadded(self, x, x_mask):
"""Faster encoding that ignores any padding."""
# Transpose batch and sequence dims
x = x.transpose(0, 1)
# Encode all layers
outputs = [x]
for i in range(self.num_layers):
rnn_input = outputs[-1]
# Apply dropout to hidden input
if self.dropout_rate > 0:
rnn_input = F.dropout(rnn_input,
p=self.dropout_rate,
training=self.training)
# Forward
rnn_output = self.rnns[i](rnn_input)[0]
outputs.append(rnn_output)
# Concat hidden layers
if self.concat_layers:
output = torch.cat(outputs[1:], 2)
else:
output = outputs[-1]
# Transpose back
output = output.transpose(0, 1)
# Dropout on output layer
if self.dropout_output and self.dropout_rate > 0:
output = F.dropout(output,
p=self.dropout_rate,
training=self.training)
return output
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/modules/stacked_brnn.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/modules/stacked_brnn.py",
"repo_id": "ContextualSP",
"token_count": 1557
}
| 289 |
import re
from .unit import Unit
class PuncRemoval(Unit):
"""Process unit for remove punctuations."""
_MATCH_PUNC = re.compile(r'[^\w\s]')
def transform(self, input_: list) -> list:
"""
Remove punctuations from list of tokens.
:param input_: list of toekns.
:return rv: tokens without punctuation.
"""
# return [token for token in input_ if
# not self._MATCH_PUNC.search(token)]
return input_
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/punc_removal.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/punc_removal.py",
"repo_id": "ContextualSP",
"token_count": 209
}
| 290 |
"""Average meter."""
class AverageMeter(object):
"""
Computes and stores the average and current value.
Examples:
>>> am = AverageMeter()
>>> am.update(1)
>>> am.avg
1.0
>>> am.update(val=2.5, n=2)
>>> am.avg
2.0
"""
def __init__(self):
"""Average meter constructor."""
self.reset()
def reset(self):
"""Reset AverageMeter."""
self._val = 0.
self._avg = 0.
self._sum = 0.
self._count = 0.
def update(self, val, n=1):
"""Update value."""
self._val = val
self._sum += val * n
self._count += n
self._avg = self._sum / self._count
@property
def avg(self):
"""Get avg."""
return self._avg
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/utils/average_meter.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/utils/average_meter.py",
"repo_id": "ContextualSP",
"token_count": 395
}
| 291 |
import pytest
import shutil
import matchzoo as mz
from matchzoo.engine.base_preprocessor import BasePreprocessor
@pytest.fixture
def base_preprocessor():
BasePreprocessor.__abstractmethods__ = set()
base_processor = BasePreprocessor()
return base_processor
def test_save_load(base_preprocessor):
dirpath = '.tmpdir'
base_preprocessor.save(dirpath)
assert mz.load_preprocessor(dirpath)
shutil.rmtree(dirpath)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/engine/test_base_preprocessor.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/engine/test_base_preprocessor.py",
"repo_id": "ContextualSP",
"token_count": 154
}
| 292 |
<jupyter_start><jupyter_code>import torch
import numpy as np
import pandas as pd
import matchzoo as mz
print('matchzoo version', mz.__version__)
classification_task = mz.tasks.Classification(num_classes=2)
classification_task.metrics = ['acc']
print("`classification_task` initialized with metrics", classification_task.metrics)
print('data loading ...')
train_pack_raw = mz.datasets.quora_qp.load_data('train', task=classification_task)
dev_pack_raw = mz.datasets.quora_qp.load_data('dev', task=classification_task)
test_pack_raw = mz.datasets.quora_qp.load_data('test', task=classification_task)
print('data loaded as `train_pack_raw` `dev_pack_raw` `test_pack_raw`')
preprocessor = mz.models.ESIM.get_default_preprocessor()
train_pack_processed = preprocessor.fit_transform(train_pack_raw)
dev_pack_processed = preprocessor.transform(dev_pack_raw)
preprocessor.context
glove_embedding = mz.datasets.embeddings.load_glove_embedding(dimension=100)
term_index = preprocessor.context['vocab_unit'].state['term_index']
embedding_matrix = glove_embedding.build_matrix(term_index)
l2_norm = np.sqrt((embedding_matrix * embedding_matrix).sum(axis=1))
embedding_matrix = embedding_matrix / l2_norm[:, np.newaxis]
trainset = mz.dataloader.Dataset(
data_pack=train_pack_processed,
mode='point'
)
devset = mz.dataloader.Dataset(
data_pack=dev_pack_processed,
mode='point'
)
padding_callback = mz.models.ESIM.get_default_padding_callback()
trainloader = mz.dataloader.DataLoader(
dataset=trainset,
batch_size=40,
stage='train',
sort=False,
callback=padding_callback
)
devloader = mz.dataloader.DataLoader(
dataset=devset,
batch_size=40,
stage='dev',
sort=False,
callback=padding_callback
)
model = mz.models.ESIM()
model.params['task'] = classification_task
model.params['embedding'] = embedding_matrix
model.params['mask_value'] = 0
model.params['dropout'] = 0.2
model.params['hidden_size'] = 200
model.params['lstm_layer'] = 1
model.build()
print(model)
print('Trainable params: ', sum(p.numel() for p in model.parameters() if p.requires_grad))
optimizer = torch.optim.Adam(model.parameters(),lr=1e-5)
trainer = mz.trainers.Trainer(
model=model,
optimizer=optimizer,
trainloader=trainloader,
validloader=devloader,
validate_interval=None,
epochs=5
)
trainer.run()<jupyter_output><empty_output>
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/classification/esim.ipynb/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/classification/esim.ipynb",
"repo_id": "ContextualSP",
"token_count": 897
}
| 293 |
<jupyter_start><jupyter_code>%run init.ipynb
preprocessor = mz.models.MatchSRNN.get_default_preprocessor()
train_pack_processed = preprocessor.fit_transform(train_pack_raw)
dev_pack_processed = preprocessor.transform(dev_pack_raw)
test_pack_processed = preprocessor.transform(test_pack_raw)
preprocessor.context
glove_embedding = mz.datasets.embeddings.load_glove_embedding(dimension=100)
term_index = preprocessor.context['vocab_unit'].state['term_index']
embedding_matrix = glove_embedding.build_matrix(term_index)
l2_norm = np.sqrt((embedding_matrix * embedding_matrix).sum(axis=1))
embedding_matrix = embedding_matrix / l2_norm[:, np.newaxis]
trainset = mz.dataloader.Dataset(
data_pack=train_pack_processed,
mode='pair',
num_dup=2,
num_neg=1
)
testset = mz.dataloader.Dataset(
data_pack=test_pack_processed
)
padding_callback = mz.models.MatchSRNN.get_default_padding_callback()
trainloader = mz.dataloader.DataLoader(
dataset=trainset,
batch_size=20,
stage='train',
resample=True,
sort=False,
callback=padding_callback
)
testloader = mz.dataloader.DataLoader(
dataset=testset,
batch_size=20,
stage='dev',
callback=padding_callback
)
model = mz.models.MatchSRNN()
model.params['task'] = ranking_task
model.params['embedding'] = embedding_matrix
model.params['channels'] = 4
model.params['units'] = 10
model.params['dropout'] = 0.2
model.params['direction'] = 'lt'
model.build()
print(model)
print('Trainable params: ', sum(p.numel() for p in model.parameters() if p.requires_grad))
optimizer = torch.optim.Adadelta(model.parameters())
trainer = mz.trainers.Trainer(
model=model,
optimizer=optimizer,
trainloader=trainloader,
validloader=testloader,
validate_interval=None,
epochs=10
)
trainer.run()<jupyter_output><empty_output>
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/match_srnn.ipynb/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/match_srnn.ipynb",
"repo_id": "ContextualSP",
"token_count": 703
}
| 294 |
#!/usr/bin/env bash
export seed=1
export config_file=train_configs_bert/concat.none.jsonnet
export model_file=checkpoints_sparc/sparc_bert_concat_none_model
export tables_file=dataset_sparc/tables.json
export database_path=dataset_sparc/database
export dataset_path=dataset_sparc
export train_data_path=dataset_sparc/train.json
export validation_data_path=dataset_sparc/dev.json
allennlp train -s ${model_file} ${config_file} \
--include-package dataset_reader.sparc_reader \
--include-package models.sparc_parser \
-o "{\"model.serialization_dir\":\"${model_file}\",\"random_seed\":\"${seed}\",\"numpy_seed\":\"${seed}\",\"pytorch_seed\":\"${seed}\",\"dataset_reader.tables_file\":\"${tables_file}\",\"dataset_reader.database_path\":\"${database_path}\",\"train_data_path\":\"${train_data_path}\",\"validation_data_path\":\"${validation_data_path}\",\"model.dataset_path\":\"${dataset_path}\"}"
|
ContextualSP/semantic_parsing_in_context/bash_files/linux/train_sparc_bert.bash/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/bash_files/linux/train_sparc_bert.bash",
"repo_id": "ContextualSP",
"token_count": 331
}
| 295 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import glob
import logging
import os
from queue import Empty
from typing import List, Iterable, Iterator, Optional
import numpy as np
from allennlp.data.instance import Instance
from torch.multiprocessing import Process, Queue, Value, log_to_stderr
class logger:
"""
multiprocessing.log_to_stderr causes some output in the logs
even when we don't use this dataset reader. This is a small hack
to instantiate the stderr logger lazily only when it's needed
(which is only when using the MultiprocessDatasetReader)
"""
_logger = None
@classmethod
def info(cls, message: str) -> None:
if cls._logger is None:
cls._logger = log_to_stderr()
cls._logger.setLevel(logging.INFO)
cls._logger.info(message)
def _worker(
call_back,
input_queue: Queue,
output_queue: Queue,
num_active_workers: Value,
num_inflight_items: Value,
worker_id: int,
) -> None:
"""
A worker that pulls filenames off the input queue, uses the dataset reader
to read them, and places the generated instances on the output queue. When
there are no filenames left on the input queue, it decrements
num_active_workers to signal completion.
"""
logger.info(f"Reader worker: {worker_id} PID: {os.getpid()}")
# Keep going until you get a file_path that's None.
while True:
file_path = input_queue.get()
if file_path is None:
# It's important that we close and join the queue here before
# decrementing num_active_workers. Otherwise our parent may join us
# before the queue's feeder thread has passed all buffered items to
# the underlying pipe resulting in a deadlock.
#
# See:
# https://docs.python.org/3.6/library/multiprocessing.html?highlight=process#pipes-and-queues
# https://docs.python.org/3.6/library/multiprocessing.html?highlight=process#programming-guidelines
output_queue.close()
output_queue.join_thread()
# Decrementing is not atomic.
# See https://docs.python.org/2/library/multiprocessing.html#multiprocessing.Value.
with num_active_workers.get_lock():
num_active_workers.value -= 1
logger.info(f"Reader worker {worker_id} finished")
break
logger.info(f"reading instances from {file_path}")
instance = call_back(file_path)
with num_inflight_items.get_lock():
num_inflight_items.value += 1
output_queue.put(instance)
class QIterable(Iterable[Instance]):
"""
You can't set attributes on Iterators, so this is just a dumb wrapper
that exposes the output_queue.
"""
def __init__(self, output_queue_size, epochs_per_read, num_workers, call_back, file_path) -> None:
self.output_queue = Queue(output_queue_size)
self.epochs_per_read = epochs_per_read
self.num_workers = num_workers
self.file_path = file_path
self.call_back = call_back
# Initialized in start.
self.input_queue: Optional[Queue] = None
self.processes: List[Process] = []
# The num_active_workers and num_inflight_items counts in conjunction
# determine whether there could be any outstanding instances.
self.num_active_workers: Optional[Value] = None
self.num_inflight_items: Optional[Value] = None
def __iter__(self) -> Iterator[Instance]:
self.start()
# Keep going as long as not all the workers have finished or there are items in flight.
while self.num_active_workers.value > 0 or self.num_inflight_items.value > 0:
# Inner loop to minimize locking on self.num_active_workers.
while True:
try:
# Non-blocking to handle the empty-queue case.
yield self.output_queue.get(block=False, timeout=1.0)
with self.num_inflight_items.get_lock():
self.num_inflight_items.value -= 1
except Empty:
# The queue could be empty because the workers are
# all finished or because they're busy processing.
# The outer loop distinguishes between these two
# cases.
break
self.join()
def start(self) -> None:
shards = glob.glob(self.file_path)
# Ensure a consistent order before shuffling for testing.
shards.sort()
num_shards = len(shards)
# If we want multiple epochs per read, put shards in the queue multiple times.
self.input_queue = Queue(num_shards * self.epochs_per_read + self.num_workers)
for _ in range(self.epochs_per_read):
np.random.shuffle(shards)
for shard in shards:
self.input_queue.put(shard)
# Then put a None per worker to signify no more files.
for _ in range(self.num_workers):
self.input_queue.put(None)
assert (
not self.processes
), "Process list non-empty! You must call QIterable.join() before restarting."
self.num_active_workers = Value("i", self.num_workers)
self.num_inflight_items = Value("i", 0)
for worker_id in range(self.num_workers):
process = Process(
target=_worker,
args=(
self.call_back,
self.input_queue,
self.output_queue,
self.num_active_workers,
self.num_inflight_items,
worker_id,
),
)
logger.info(f"starting worker {worker_id}")
process.start()
self.processes.append(process)
def join(self) -> None:
for process in self.processes:
process.join()
self.processes.clear()
def __del__(self) -> None:
"""
Terminate processes if the user hasn't joined. This is necessary as
leaving stray processes running can corrupt shared state. In brief,
we've observed shared memory counters being reused (when the memory was
free from the perspective of the parent process) while the stray
workers still held a reference to them.
For a discussion of using destructors in Python in this manner, see
https://eli.thegreenplace.net/2009/06/12/safely-using-destructors-in-python/.
"""
for process in self.processes:
process.terminate()
|
ContextualSP/semantic_parsing_in_context/dataset_reader/reader_queue.py/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/dataset_reader/reader_queue.py",
"repo_id": "ContextualSP",
"token_count": 2878
}
| 296 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Mainly borrowed from allennlp package
"""
from collections import defaultdict
from typing import Any, Dict, List, Set, Tuple
from overrides import overrides
import torch
from torch.nn.modules.rnn import LSTM, LSTMCell
from torch.nn.modules.linear import Linear
from allennlp.modules import Attention
from allennlp.nn import util, Activation
from models.states_machine.rnn_statelet import RnnStatelet
from models.states_machine.grammar_based_state import GrammarBasedState
from allennlp.state_machines.transition_functions.transition_function import TransitionFunction
class BasicTransitionFunction(TransitionFunction[GrammarBasedState]):
"""
This is a typical transition function for a state-based decoder. We use an LSTM to track
decoder state, and at every timestep we compute an attention over the input question/utterance
to help in selecting the action. All actions have an embedding, and we use a dot product
between a predicted action embedding and the allowed actions to compute a distribution over
actions at each timestep.
We allow the first action to be predicted separately from everything else. This is optional,
and is because that's how the original WikiTableQuestions semantic parser was written. The
intuition is that maybe you want to predict the type of your output program outside of the
typical LSTM decoder (or maybe Jayant just didn't realize this could be treated as another
action...).
Parameters
----------
encoder_output_dim : ``int``
action_embedding_dim : ``int``
input_attention : ``Attention``
activation : ``Activation``, optional (default=relu)
The activation that gets applied to the decoder LSTM input and to the action query.
predict_start_type_separately : ``bool``, optional (default=True)
If ``True``, we will predict the initial action (which is typically the base type of the
logical form) using a different mechanism than our typical action decoder. We basically
just do a projection of the hidden state, and don't update the decoder RNN.
num_start_types : ``int``, optional (default=None)
If ``predict_start_type_separately`` is ``True``, this is the number of start types that
are in the grammar. We need this so we can construct parameters with the right shape.
This is unused if ``predict_start_type_separately`` is ``False``.
add_action_bias : ``bool``, optional (default=True)
If ``True``, there has been a bias dimension added to the embedding of each action, which
gets used when predicting the next action. We add a dimension of ones to our predicted
action vector in this case to account for that.
dropout : ``float`` (optional, default=0.0)
num_layers: ``int``, (optional, default=1)
The number of layers in the decoder LSTM.
"""
def __init__(self,
encoder_output_dim: int,
decoder_input_dim: int,
action_embedding_dim: int,
input_attention: Attention,
sql_attention: Attention = None,
sql_output_dim: int = 100,
activation: Activation = Activation.by_name('relu')(),
predict_start_type_separately: bool = True,
num_start_types: int = None,
add_action_bias: bool = True,
dropout: float = 0.0,
num_layers: int = 1) -> None:
super().__init__()
self._input_attention = input_attention
if sql_attention:
self._sql_attention = sql_attention
self._hidden_to_sql = Linear(encoder_output_dim, sql_output_dim)
self._add_action_bias = add_action_bias
self._activation = activation
self._num_layers = num_layers
self._predict_start_type_separately = predict_start_type_separately
if predict_start_type_separately:
self._start_type_predictor = Linear(encoder_output_dim, num_start_types)
self._num_start_types = num_start_types
else:
self._start_type_predictor = None
self._num_start_types = None
# Decoder output dim needs to be the same as the encoder output dim since we initialize the
# hidden state of the decoder with the final hidden state of the encoder.
output_dim = encoder_output_dim
input_dim = output_dim
# Our decoder input will be the concatenation of the decoder hidden state and the previous
# action embedding, and we'll project that down to the decoder's `input_dim`
# [attention-based utterance; attention-based sql query; hidden state]
self._input_projection_layer = Linear(decoder_input_dim + action_embedding_dim, input_dim)
# Before making a prediction, we'll compute an attention over the input given our updated
# hidden state. Then we concatenate those with the decoder state and project to
# `action_embedding_dim` to make a prediction.
self._output_projection_layer = Linear(output_dim + decoder_input_dim, action_embedding_dim)
if self._num_layers > 1:
self._decoder_cell = LSTM(input_dim, output_dim, self._num_layers)
else:
# We use a ``LSTMCell`` if we just have one layer because it is slightly faster since we are
# just running the LSTM for one step each time.
self._decoder_cell = LSTMCell(input_dim, output_dim)
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
@overrides
def take_step(self,
state: GrammarBasedState,
max_actions: int = None,
allowed_actions: List[Set[int]] = None) -> List[GrammarBasedState]:
if self._predict_start_type_separately and not state.action_history[0]:
# The wikitables parser did something different when predicting the start type, which
# is our first action. So in this case we break out into a different function. We'll
# ignore max_actions on our first step, assuming there aren't that many start types.
return self._take_first_step(state, allowed_actions)
# Taking a step in the decoder consists of three main parts. First, we'll construct the
# input to the decoder and update the decoder's hidden state. Second, we'll use this new
# hidden state (and maybe other information) to predict an action. Finally, we will
# construct new states for the next step. Each new state corresponds to one valid action
# that can be taken from the current state, and they are ordered by their probability of
# being selected.
updated_state = self._update_decoder_state(state)
batch_results = self._compute_action_probabilities(state,
updated_state['hidden_state'],
updated_state['attention_weights'],
updated_state['predicted_action_embeddings'])
new_states = self._construct_next_states(state,
updated_state,
batch_results,
max_actions,
allowed_actions)
return new_states
def _update_decoder_state(self, state: GrammarBasedState) -> Dict[str, torch.Tensor]:
# For updating the decoder, we're doing a bunch of tensor operations that can be batched
# without much difficulty. So, we take all group elements and batch their tensors together
# before doing these decoder operations.
group_size = len(state.batch_indices)
attended_question = torch.stack([rnn_state.attended_input for rnn_state in state.rnn_state])
use_sql_attention = False
# use sql encoding attention
if state.rnn_state[0].attended_sql_input is not None:
use_sql_attention = True
if use_sql_attention:
attended_sql_input = torch.stack([rnn_state.attended_sql_input for rnn_state in state.rnn_state])
else:
attended_sql_input = [None] * group_size
if self._num_layers > 1:
hidden_state = torch.stack([rnn_state.hidden_state for rnn_state in state.rnn_state], 1)
memory_cell = torch.stack([rnn_state.memory_cell for rnn_state in state.rnn_state], 1)
else:
hidden_state = torch.stack([rnn_state.hidden_state for rnn_state in state.rnn_state])
memory_cell = torch.stack([rnn_state.memory_cell for rnn_state in state.rnn_state])
previous_action_embedding = torch.stack([rnn_state.previous_action_embedding
for rnn_state in state.rnn_state])
if use_sql_attention:
decoder_input = torch.cat([attended_question, attended_sql_input, previous_action_embedding], -1)
else:
decoder_input = torch.cat([attended_question, previous_action_embedding], -1)
# (group_size, decoder_input_dim)
projected_input = self._input_projection_layer(decoder_input)
decoder_input = self._activation(projected_input)
if self._num_layers > 1:
_, (hidden_state, memory_cell) = self._decoder_cell(decoder_input.unsqueeze(0),
(hidden_state, memory_cell))
else:
hidden_state, memory_cell = self._decoder_cell(decoder_input, (hidden_state, memory_cell))
hidden_state = self._dropout(hidden_state)
# (group_size, encoder_output_dim)
encoder_outputs = torch.stack([state.rnn_state[0].encoder_outputs[i] for i in state.batch_indices])
encoder_output_mask = torch.stack([state.rnn_state[0].encoder_output_mask[i] for i in state.batch_indices])
if self._num_layers > 1:
decoder_memory = [hidden_state[-1]]
else:
decoder_memory = [hidden_state]
if use_sql_attention:
sql_outputs = torch.stack([state.rnn_state[0].sql_outputs[i] for i in state.batch_indices])
sql_output_mask = torch.stack([state.rnn_state[0].sql_output_mask[i] for i in state.batch_indices])
if self._num_layers > 1:
query_vec = self._hidden_to_sql(hidden_state[-1])
attended_sql_input = self.attend_on_sql(query_vec,
sql_outputs,
sql_output_mask)
else:
query_vec = self._hidden_to_sql(hidden_state)
attended_sql_input = self.attend_on_sql(query_vec,
sql_outputs,
sql_output_mask)
decoder_memory.append(attended_sql_input)
if self._num_layers > 1:
attended_question, attention_weights = self.attend_on_question(hidden_state[-1],
encoder_outputs,
encoder_output_mask)
else:
attended_question, attention_weights = self.attend_on_question(hidden_state,
encoder_outputs,
encoder_output_mask)
decoder_memory.append(attended_question)
action_query = torch.cat(decoder_memory, dim=-1)
# (group_size, action_embedding_dim)
projected_query = self._activation(self._output_projection_layer(action_query))
predicted_action_embeddings = self._dropout(projected_query)
if self._add_action_bias:
# NOTE: It's important that this happens right before the dot product with the action
# embeddings. Otherwise this isn't a proper bias. We do it here instead of right next
# to the `.mm` below just so we only do it once for the whole group.
ones = predicted_action_embeddings.new([[1] for _ in range(group_size)])
predicted_action_embeddings = torch.cat([predicted_action_embeddings, ones], dim=-1)
return {
'hidden_state': hidden_state,
'memory_cell': memory_cell,
'attended_question': attended_question,
'attention_weights': attention_weights,
'predicted_action_embeddings': predicted_action_embeddings,
'attended_sql_input': attended_sql_input
}
def _compute_action_probabilities(self,
state: GrammarBasedState,
hidden_state: torch.Tensor,
attention_weights: torch.Tensor,
predicted_action_embeddings: torch.Tensor
) -> Dict[int, List[Tuple[int, Any, Any, Any, List[int]]]]:
# We take a couple of extra arguments here because subclasses might use them.
# pylint: disable=unused-argument,no-self-use
# In this section we take our predicted action embedding and compare it to the available
# actions in our current state (which might be different for each group element). For
# computing action scores, we'll forget about doing batched / grouped computation, as it
# adds too much complexity and doesn't speed things up, anyway, with the operations we're
# doing here. This means we don't need any action masks, as we'll only get the right
# lengths for what we're computing.
group_size = len(state.batch_indices)
actions = state.get_valid_actions()
batch_results: Dict[int, List[Tuple[int, Any, Any, Any, List[int]]]] = defaultdict(list)
for group_index in range(group_size):
instance_actions = actions[group_index]
predicted_action_embedding = predicted_action_embeddings[group_index]
action_embeddings, output_action_embeddings, action_ids = instance_actions['global']
# This is just a matrix product between a (num_actions, embedding_dim) matrix and an
# (embedding_dim, 1) matrix.
action_logits = action_embeddings.mm(predicted_action_embedding.unsqueeze(-1)).squeeze(-1)
current_log_probs = torch.nn.functional.log_softmax(action_logits, dim=-1)
# This is now the total score for each state after taking each action. We're going to
# sort by this later, so it's important that this is the total score, not just the
# score for the current action.
log_probs = state.score[group_index] + current_log_probs
batch_results[state.batch_indices[group_index]].append((group_index,
log_probs,
current_log_probs,
output_action_embeddings,
action_ids))
return batch_results
def _construct_next_states(self,
state: GrammarBasedState,
updated_rnn_state: Dict[str, torch.Tensor],
batch_action_probs: Dict[int, List[Tuple[int, Any, Any, Any, List[int]]]],
max_actions: int,
allowed_actions: List[Set[int]]):
# pylint: disable=no-self-use
# We'll yield a bunch of states here that all have a `group_size` of 1, so that the
# learning algorithm can decide how many of these it wants to keep, and it can just regroup
# them later, as that's a really easy operation.
#
# We first define a `make_state` method, as in the logic that follows we want to create
# states in a couple of different branches, and we don't want to duplicate the
# state-creation logic. This method creates a closure using variables from the method, so
# it doesn't make sense to pull it out of here.
# Each group index here might get accessed multiple times, and doing the slicing operation
# each time is more expensive than doing it once upfront. These three lines give about a
# 10% speedup in training time.
group_size = len(state.batch_indices)
chunk_index = 1 if self._num_layers > 1 else 0
hidden_state = [x.squeeze(chunk_index)
for x in updated_rnn_state['hidden_state'].chunk(group_size, chunk_index)]
memory_cell = [x.squeeze(chunk_index)
for x in updated_rnn_state['memory_cell'].chunk(group_size, chunk_index)]
attended_question = [x.squeeze(0) for x in updated_rnn_state['attended_question'].chunk(group_size, 0)]
# TODO:
if updated_rnn_state['attended_sql_input'][0] is not None:
attended_sql_input = [x.squeeze(0) for x in updated_rnn_state['attended_sql_input'].chunk(group_size, 0)]
else:
attended_sql_input = updated_rnn_state['attended_sql_input']
def make_state(group_index: int,
action: int,
new_score: torch.Tensor,
action_embedding: torch.Tensor) -> GrammarBasedState:
new_rnn_state = RnnStatelet(hidden_state[group_index],
memory_cell[group_index],
action_embedding,
attended_question[group_index],
state.rnn_state[group_index].encoder_outputs,
state.rnn_state[group_index].encoder_output_mask,
attended_sql_input[group_index],
state.rnn_state[group_index].sql_outputs,
state.rnn_state[group_index].sql_output_mask)
batch_index = state.batch_indices[group_index]
for i, _, current_log_probs, _, actions in batch_action_probs[batch_index]:
if i == group_index:
considered_actions = actions
probabilities = current_log_probs.exp().cpu()
break
return state.new_state_from_group_index(group_index,
action,
new_score,
new_rnn_state,
considered_actions,
probabilities,
updated_rnn_state['attention_weights'])
new_states = []
for _, results in batch_action_probs.items():
if allowed_actions and not max_actions:
# If we're given a set of allowed actions, and we're not just keeping the top k of
# them, we don't need to do any sorting, so we can speed things up quite a bit.
for group_index, log_probs, _, action_embeddings, actions in results:
for log_prob, action_embedding, action in zip(log_probs, action_embeddings, actions):
if action in allowed_actions[group_index]:
new_states.append(make_state(group_index, action, log_prob, action_embedding))
else:
# In this case, we need to sort the actions. We'll do that on CPU, as it's easier,
# and our action list is on the CPU, anyway.
group_indices = []
group_log_probs: List[torch.Tensor] = []
group_action_embeddings = []
group_actions = []
for group_index, log_probs, _, action_embeddings, actions in results:
if not actions:
continue
group_indices.extend([group_index] * len(actions))
group_log_probs.append(log_probs)
group_action_embeddings.append(action_embeddings)
group_actions.extend(actions)
if len(group_log_probs) == 0:
continue
log_probs = torch.cat(group_log_probs, dim=0)
action_embeddings = torch.cat(group_action_embeddings, dim=0)
log_probs_cpu = log_probs.data.cpu().numpy().tolist()
batch_states = [(log_probs_cpu[i],
group_indices[i],
log_probs[i],
action_embeddings[i],
group_actions[i])
for i in range(len(group_actions))
if (not allowed_actions or
group_actions[i] in allowed_actions[group_indices[i]])]
# We use a key here to make sure we're not trying to compare anything on the GPU.
batch_states.sort(key=lambda x: x[0], reverse=True)
if max_actions:
batch_states = batch_states[:max_actions]
for _, group_index, log_prob, action_embedding, action in batch_states:
new_states.append(make_state(group_index, action, log_prob, action_embedding))
return new_states
def _take_first_step(self,
state: GrammarBasedState,
allowed_actions: List[Set[int]] = None) -> List[GrammarBasedState]:
# We'll just do a projection from the current hidden state (which was initialized with the
# final encoder output) to the number of start actions that we have, normalize those
# logits, and use that as our score. We end up duplicating some of the logic from
# `_compute_new_states` here, but we do things slightly differently, and it's easier to
# just copy the parts we need than to try to re-use that code.
# (group_size, hidden_dim)
hidden_state = torch.stack([rnn_state.hidden_state for rnn_state in state.rnn_state])
# (group_size, num_start_type)
start_action_logits = self._start_type_predictor(hidden_state)
log_probs = torch.nn.functional.log_softmax(start_action_logits, dim=-1)
sorted_log_probs, sorted_actions = log_probs.sort(dim=-1, descending=True)
sorted_actions = sorted_actions.detach().cpu().numpy().tolist()
if state.debug_info is not None:
probs_cpu = log_probs.exp().detach().cpu().numpy().tolist()
else:
probs_cpu = [None] * len(state.batch_indices)
# state.get_valid_actions() will return a list that is consistently sorted, so as along as
# the set of valid start actions never changes, we can just match up the log prob indices
# above with the position of each considered action, and we're good.
valid_actions = state.get_valid_actions()
considered_actions = [actions['global'][2] for actions in valid_actions]
if len(considered_actions[0]) != self._num_start_types:
raise RuntimeError("Calculated wrong number of initial actions. Expected "
f"{self._num_start_types}, found {len(considered_actions[0])}.")
best_next_states: Dict[int, List[Tuple[int, int, int]]] = defaultdict(list)
for group_index, (batch_index, group_actions) in enumerate(zip(state.batch_indices, sorted_actions)):
for action_index, action in enumerate(group_actions):
# `action` is currently the index in `log_probs`, not the actual action ID. To get
# the action ID, we need to go through `considered_actions`.
action = considered_actions[group_index][action]
if allowed_actions is not None and action not in allowed_actions[group_index]:
# This happens when our _decoder trainer_ wants us to only evaluate certain
# actions, likely because they are the gold actions in this state. We just skip
# emitting any state that isn't allowed by the trainer, because constructing the
# new state can be expensive.
continue
best_next_states[batch_index].append((group_index, action_index, action))
new_states = []
for batch_index, best_states in sorted(best_next_states.items()):
for group_index, action_index, action in best_states:
# We'll yield a bunch of states here that all have a `group_size` of 1, so that the
# learning algorithm can decide how many of these it wants to keep, and it can just
# regroup them later, as that's a really easy operation.
new_score = state.score[group_index] + sorted_log_probs[group_index, action_index]
# This part is different from `_compute_new_states` - we're just passing through
# the previous RNN state, as predicting the start type wasn't included in the
# decoder RNN in the original model.
new_rnn_state = state.rnn_state[group_index]
new_state = state.new_state_from_group_index(group_index,
action,
new_score,
new_rnn_state,
considered_actions[group_index],
probs_cpu[group_index],
None)
new_states.append(new_state)
return new_states
def attend_on_question(self,
query: torch.Tensor,
encoder_outputs: torch.Tensor,
encoder_output_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Given a query (which is typically the decoder hidden state), compute an attention over the
output of the question encoder, and return a weighted sum of the question representations
given this attention. We also return the attention weights themselves.
This is a simple computation, but we have it as a separate method so that the ``forward``
method on the main parser module can call it on the initial hidden state, to simplify the
logic in ``take_step``.
"""
# (group_size, question_length)
question_attention_weights = self._input_attention(query,
encoder_outputs,
encoder_output_mask)
# (group_size, encoder_output_dim)
attended_question = util.weighted_sum(encoder_outputs, question_attention_weights)
return attended_question, question_attention_weights
def attend_on_sql(self,
query: torch.Tensor,
sql_outputs: torch.Tensor,
sql_output_mask: torch.Tensor) -> torch.Tensor:
# (group_size, question_length)
assert self._sql_attention is not None
question_attention_weights = self._sql_attention(query,
sql_outputs,
sql_output_mask)
# (group_size, encoder_output_dim)
attended_question = util.weighted_sum(sql_outputs, question_attention_weights)
return attended_question
|
ContextualSP/semantic_parsing_in_context/models/transition_functions/basic_transition_function.py/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/models/transition_functions/basic_transition_function.py",
"repo_id": "ContextualSP",
"token_count": 13252
}
| 297 |
{
"random_seed": 42,
"numpy_seed": 42,
"pytorch_seed": 42,
"dataset_reader": {
"type": "sparc",
"lazy": false,
"loading_limit": -1,
"context_mode": "none"
},
"model": {
"type": "sparc",
"loss_mask": 8,
"serialization_dir": "",
"text_embedder": {
"tokens": {
"type": "embedding",
"embedding_dim": 100,
"trainable": true,
"padding_index": 0
}
},
"action_embedding_dim": 100,
"entity_embedding_dim": 100,
"text_encoder": {
"type": "lstm",
"input_size": 200,
"hidden_size": 200,
"bidirectional": true,
"num_layers": 1
},
"decoder_beam_search": {
"beam_size": 10
},
"training_beam_size": 1,
"max_decoding_steps": 100,
"input_attention": {
"type": "dot_product"
},
"use_feature_score": true,
"use_schema_encoder": true,
"use_linking_embedding": true,
"use_attend_over_history": true,
"use_context_gate": true,
"gate_attn_size": 100,
"attn_on_self": true,
"use_sigmoid_gate": false,
"use_unk_candidate": true,
"dropout_rate": 0.5
},
"iterator": {
"type": "basic",
"batch_size": 8
},
"validation_iterator": {
"type": "basic",
"batch_size": 1
},
"trainer": {
"num_epochs": 100,
"cuda_device": 0,
"patience": 10,
"validation_metric": "+sql_exact_match",
"optimizer": {
"type": "adam",
"lr": 1e-3
},
"num_serialized_models_to_keep": 10,
"should_log_learning_rate": true
}
}
|
ContextualSP/semantic_parsing_in_context/train_configs/none.gate.jsonnet/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/train_configs/none.gate.jsonnet",
"repo_id": "ContextualSP",
"token_count": 691
}
| 298 |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and Microsoft Corporation.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import contextlib
import sys
from collections import Counter
from multiprocessing import Pool
# from .bpe_utils import get_encoder
from fairseq.data.encoders.gpt2_bpe import get_encoder
def main():
"""
Helper script to encode raw text with the GPT-2 BPE using multiple processes.
The encoder.json and vocab.bpe files can be obtained here:
- https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json
- https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--encoder-json",
help="path to encoder.json",
)
parser.add_argument(
"--vocab-bpe",
type=str,
help="path to vocab.bpe",
)
# parser.add_argument(
# "--special-token",
# type=str,
# help="path to special tokens split by \n"
# )
parser.add_argument(
"--inputs",
nargs="+",
default=["-"],
help="input files to filter/encode",
)
parser.add_argument(
"--outputs",
nargs="+",
default=["-"],
help="path to save encoded outputs",
)
parser.add_argument(
"--keep-empty",
action="store_true",
help="keep empty lines",
)
parser.add_argument("--workers", type=int, default=20)
args = parser.parse_args()
assert len(args.inputs) == len(
args.outputs
), "number of input and output paths should match"
with contextlib.ExitStack() as stack:
inputs = [
stack.enter_context(open(input, "r", encoding="utf-8"))
if input != "-"
else sys.stdin
for input in args.inputs
]
outputs = [
stack.enter_context(open(output, "w", encoding="utf-8"))
if output != "-"
else sys.stdout
for output in args.outputs
]
encoder = MultiprocessingEncoder(args)
pool = Pool(args.workers, initializer=encoder.initializer)
encoded_lines = pool.imap(encoder.encode_lines, zip(*inputs), 100)
stats = Counter()
for i, (filt, enc_lines) in enumerate(encoded_lines, start=1):
if filt == "PASS":
for enc_line, output_h in zip(enc_lines, outputs):
print(enc_line, file=output_h)
else:
stats["num_filtered_" + filt] += 1
if i % 10000 == 0:
print("processed {} lines".format(i), file=sys.stderr)
for k, v in stats.most_common():
print("[{}] filtered {} lines".format(k, v), file=sys.stderr)
class MultiprocessingEncoder(object):
def __init__(self, args):
self.args = args
def initializer(self):
global bpe
bpe = get_encoder(self.args.encoder_json, self.args.vocab_bpe)
# bpe = get_encoder(self.args.encoder_json, self.args.vocab_bpe, self.args.special_token)
def encode(self, line):
global bpe
ids = bpe.encode(line)
return list(map(str, ids))
def decode(self, tokens):
global bpe
return bpe.decode(tokens)
def encode_lines(self, lines):
"""
Encode a set of lines. All lines will be encoded together.
"""
enc_lines = []
for line in lines:
line = line.strip()
if len(line) == 0 and not self.args.keep_empty:
return ["EMPTY", None]
tokens = self.encode(line)
enc_lines.append(" ".join(tokens))
return ["PASS", enc_lines]
def decode_lines(self, lines):
dec_lines = []
for line in lines:
tokens = map(int, line.strip().split())
dec_lines.append(self.decode(tokens))
return ["PASS", dec_lines]
if __name__ == "__main__":
main()
|
ContextualSP/unified_parser_text_to_sql/multiprocessing_bpe_encoder.py/0
|
{
"file_path": "ContextualSP/unified_parser_text_to_sql/multiprocessing_bpe_encoder.py",
"repo_id": "ContextualSP",
"token_count": 1855
}
| 299 |
# Spider: A Large-Scale Human-Labeled Dataset for Complex and Cross-Domain Semantic Parsing and Text-to-SQL Task
Spider is a large human-labeled dataset for complex and cross-domain semantic parsing and text-to-SQL task (natural language interfaces for relational databases). It is released along with our EMNLP 2018 paper: [Spider: A Large-Scale Human-Labeled Dataset for Complex and Cross-Domain Semantic Parsing and Text-to-SQL Task](https://arxiv.org/abs/1809.08887). This repo contains all code for evaluation, preprocessing, and all baselines used in our paper. Please refer to [the task site](https://yale-lily.github.io/spider) for more general introduction and the leaderboard.
### Changelog
- `06/07/2020` We corrected some annotation errors and label mismatches (not errors) in Spider dev and test sets (~4% of dev examples updated, click [here](https://github.com/taoyds/spider/commit/25fcd85d9b6e94acaeb5e9172deadeefeed83f5e#diff-18b0a730a7b0d29b0a78a5070d971d49) for more details). Please download the Spider dataset from [the page](https://yale-lily.github.io/spider) again.
- `01/16/2020` For value prediction (in order to compute the execution accuracy), your model should be able to 1) copy from the question inputs, 2) retrieve from the database content (database content is available), or 3) generate numbers (e.g. 3 in "LIMIT 3").
- `1/14/2019` The submission toturial is ready! Please follow it to get your results on the unreleased test data.
- `12/17/2018` We updated 7 sqlite database files. Please download the Spider data from the official website again. Please refer to [the issue 14](https://github.com/taoyds/spider/issues/14) for more details.
- `10/25/2018`: evaluation script is updated so that the table in `count(*)`cases will be evaluated as well. Please check out [the issue 5](https://github.com/taoyds/spider/issues/5) for more info. Results of all baselines and [syntaxSQL](https://github.com/taoyds/syntaxSQL) on the papers are updated as well.
- `10/25/2018`: to get the latest SQL parsing results (a few small bugs fixed), please use `preprocess/parse_raw_json.py` to update. Please refer to [the issue 3](https://github.com/taoyds/spider/issues/3) for more details.
### Citation
The dataset is annotated by 11 college students. When you use the Spider dataset, we would appreciate it if you cite the following:
```
@inproceedings{Yu&al.18c,
title = {Spider: A Large-Scale Human-Labeled Dataset for Complex and Cross-Domain Semantic Parsing and Text-to-SQL Task},
author = {Tao Yu and Rui Zhang and Kai Yang and Michihiro Yasunaga and Dongxu Wang and Zifan Li and James Ma and Irene Li and Qingning Yao and Shanelle Roman and Zilin Zhang and Dragomir Radev}
booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
year = 2018
}
```
### Installation
`evaluation.py` and `process_sql.py` are written in Python 3. Enviroment setup for each baseline is in README under each baseline directory.
### Data Content and Format
#### Question, SQL, and Parsed SQL
Each file in`train.json` and `dev.json` contains the following fields:
- `question`: the natural language question
- `question_toks`: the natural language question tokens
- `db_id`: the database id to which this question is addressed.
- `query`: the SQL query corresponding to the question.
- `query_toks`: the SQL query tokens corresponding to the question.
- `sql`: parsed results of this SQL query using `process_sql.py`. Please refer to `parsed_sql_examples.sql` in the`preprocess` directory for the detailed documentation.
```
{
"db_id": "world_1",
"query": "SELECT avg(LifeExpectancy) FROM country WHERE Name NOT IN (SELECT T1.Name FROM country AS T1 JOIN countrylanguage AS T2 ON T1.Code = T2.CountryCode WHERE T2.Language = \"English\" AND T2.IsOfficial = \"T\")",
"query_toks": ["SELECT", "avg", "(", "LifeExpectancy", ")", "FROM", ...],
"question": "What is average life expectancy in the countries where English is not the official language?",
"question_toks": ["What", "is", "average", "life", ...],
"sql": {
"except": null,
"from": {
"conds": [],
"table_units": [
...
},
"groupBy": [],
"having": [],
"intersect": null,
"limit": null,
"orderBy": [],
"select": [
...
],
"union": null,
"where": [
[
true,
...
{
"except": null,
"from": {
"conds": [
[
false,
2,
[
...
},
"groupBy": [],
"having": [],
"intersect": null,
"limit": null,
"orderBy": [],
"select": [
false,
...
"union": null,
"where": [
[
false,
2,
[
0,
...
}
},
```
#### Tables
`tables.json` contains the following information for each database:
- `db_id`: database id
- `table_names_original`: original table names stored in the database.
- `table_names`: cleaned and normalized table names. We make sure the table names are meaningful. [to be changed]
- `column_names_original`: original column names stored in the database. Each column looks like: `[0, "id"]`. `0` is the index of table names in `table_names`, which is `city` in this case. `"id"` is the column name.
- `column_names`: cleaned and normalized column names. We make sure the column names are meaningful. [to be changed]
- `column_types`: data type of each column
- `foreign_keys`: foreign keys in the database. `[3, 8]` means column indices in the `column_names`. These two columns are foreign keys of two different tables.
- `primary_keys`: primary keys in the database. Each number is the index of `column_names`.
```
{
"column_names": [
[
0,
"id"
],
[
0,
"name"
],
[
0,
"country code"
],
[
0,
"district"
],
.
.
.
],
"column_names_original": [
[
0,
"ID"
],
[
0,
"Name"
],
[
0,
"CountryCode"
],
[
0,
"District"
],
.
.
.
],
"column_types": [
"number",
"text",
"text",
"text",
.
.
.
],
"db_id": "world_1",
"foreign_keys": [
[
3,
8
],
[
23,
8
]
],
"primary_keys": [
1,
8,
23
],
"table_names": [
"city",
"sqlite sequence",
"country",
"country language"
],
"table_names_original": [
"city",
"sqlite_sequence",
"country",
"countrylanguage"
]
}
```
#### Databases
All table contents are contained in corresponding SQLite3 database files.
### Evaluation
Our evaluation metrics include Component Matching, Exact Matching, and Execution Accuracy. For component and exact matching evaluation, instead of simply conducting string comparison between the predicted and gold SQL queries, we decompose each SQL into several clauses, and conduct set comparison in each SQL clause.
For Execution Accuracy, our current models do not predict any value in SQL conditions so that we do not provide execution accuracies. However, we encourage you to provide it in the future submissions. For value prediction, you can assume that a list of gold values for each question is given. Your model has to fill them into the right slots in the SQL.
Please refer to [our paper]() and [this page](https://github.com/taoyds/spider/tree/master/evaluation) for more details and examples.
```
python evaluation.py --gold [gold file] --pred [predicted file] --etype [evaluation type] --db [database dir] --table [table file]
arguments:
[gold file] gold.sql file where each line is `a gold SQL \t db_id`
[predicted file] predicted sql file where each line is a predicted SQL
[evaluation type] "match" for exact set matching score, "exec" for execution score, and "all" for both
[database dir] directory which contains sub-directories where each SQLite3 database is stored
[table file] table.json file which includes foreign key info of each database
```
### FAQ
|
ContextualSP/unified_parser_text_to_sql/third_party/spider/README.md/0
|
{
"file_path": "ContextualSP/unified_parser_text_to_sql/third_party/spider/README.md",
"repo_id": "ContextualSP",
"token_count": 3773
}
| 300 |
SUPERNET:
MLP_RATIO: 4.0
NUM_HEADS: 10
EMBED_DIM: 640
DEPTH: 16
SEARCH_SPACE:
MLP_RATIO:
- 3.0
- 3.5
- 4.0
NUM_HEADS:
- 9
- 10
DEPTH:
- 14
- 15
- 16
EMBED_DIM:
- 528
- 576
- 624
|
Cream/AutoFormer/experiments/supernet/supernet-B.yaml/0
|
{
"file_path": "Cream/AutoFormer/experiments/supernet/supernet-B.yaml",
"repo_id": "Cream",
"token_count": 155
}
| 301 |
import torch
import math
import warnings
from itertools import repeat
from torch._six import container_abcs
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
def drop_path(x, drop_prob: float = 0., training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
|
Cream/AutoFormer/model/utils.py/0
|
{
"file_path": "Cream/AutoFormer/model/utils.py",
"repo_id": "Cream",
"token_count": 1585
}
| 302 |
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = 32
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, num_heads * 32 * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, out_dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, out_dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, out_dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads[i], window_size=window_size[i],
shift_size=0 if (i % 2 == 0) else window_size[i] // 2,
mlp_ratio=mlp_ratio[i],
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, out_dim=out_dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
Ho, Wo = self.patches_resolution
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
return flops
class SSSTransformer(nn.Module):
r"""
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=[96, 192, 384, 768], depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
window_size=[7, 7, 7, 7], mlp_ratio=[4., 4., 4., 4.], qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim[-1])
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim[0],
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim[i_layer]),
out_dim=int(embed_dim[i_layer+1]) if (i_layer < self.num_layers - 1) else None,
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size[i_layer],
mlp_ratio=self.mlp_ratio[i_layer],
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x)
x = self.norm(x) # B L C
x = self.avgpool(x.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
|
Cream/AutoFormerV2/model/SSS.py/0
|
{
"file_path": "Cream/AutoFormerV2/model/SSS.py",
"repo_id": "Cream",
"token_count": 11447
}
| 303 |
from .io import imread, imwrite, imfrombytes
from .transforms import (bgr2gray, gray2bgr, bgr2rgb, rgb2bgr, bgr2hsv,
hsv2bgr, bgr2hls, hls2bgr, iminvert, imflip, imrotate,
imcrop, impad, impad_to_multiple, imnormalize,
imdenormalize, imresize, imresize_like, imrescale)
__all__ = [
'imread', 'imwrite', 'imfrombytes', 'bgr2gray', 'gray2bgr', 'bgr2rgb',
'rgb2bgr', 'bgr2hsv', 'hsv2bgr', 'bgr2hls', 'hls2bgr', 'iminvert',
'imflip', 'imrotate', 'imcrop', 'impad', 'impad_to_multiple',
'imnormalize', 'imdenormalize', 'imresize', 'imresize_like', 'imrescale'
]
|
Cream/CDARTS/CDARTS_detection/mmcv/image/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/image/__init__.py",
"repo_id": "Cream",
"token_count": 340
}
| 304 |
import os
import os.path as osp
import pkgutil
import time
import warnings
from collections import OrderedDict
from importlib import import_module
import torch
import torchvision
from terminaltables import AsciiTable
from torch.utils import model_zoo
import mmcv
from .utils import get_dist_info
open_mmlab_model_urls = {
} # yapf: disable
def load_state_dict(module, state_dict, strict=False, logger=None):
"""Load state_dict to a module.
This method is modified from :meth:`torch.nn.Module.load_state_dict`.
Default value for ``strict`` is set to ``False`` and the message for
param mismatch will be shown even if strict is False.
Args:
module (Module): Module that receives the state_dict.
state_dict (OrderedDict): Weights.
strict (bool): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
logger (:obj:`logging.Logger`, optional): Logger to log the error
message. If not specified, print function will be used.
"""
unexpected_keys = []
shape_mismatch_pairs = []
own_state = module.state_dict()
for name, param in state_dict.items():
if name not in own_state:
unexpected_keys.append(name)
continue
if isinstance(param, torch.nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
if param.size() != own_state[name].size():
shape_mismatch_pairs.append(
[name, own_state[name].size(),
param.size()])
continue
own_state[name].copy_(param)
all_missing_keys = set(own_state.keys()) - set(state_dict.keys())
# ignore "num_batches_tracked" of BN layers
missing_keys = [
key for key in all_missing_keys if 'num_batches_tracked' not in key
]
err_msg = []
if unexpected_keys:
err_msg.append('unexpected key in source state_dict: {}\n'.format(
', '.join(unexpected_keys)))
if missing_keys:
err_msg.append('missing keys in source state_dict: {}\n'.format(
', '.join(missing_keys)))
if shape_mismatch_pairs:
mismatch_info = 'these keys have mismatched shape:\n'
header = ['key', 'expected shape', 'loaded shape']
table_data = [header] + shape_mismatch_pairs
table = AsciiTable(table_data)
err_msg.append(mismatch_info + table.table)
rank, _ = get_dist_info()
if len(err_msg) > 0 and rank == 0:
err_msg.insert(
0, 'The model and loaded state dict do not match exactly\n')
err_msg = '\n'.join(err_msg)
if strict:
raise RuntimeError(err_msg)
elif logger is not None:
logger.warning(err_msg)
else:
print(err_msg)
def load_url_dist(url):
""" In distributed setting, this function only download checkpoint at
local rank 0 """
rank, world_size = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
if rank == 0:
checkpoint = model_zoo.load_url(url)
if world_size > 1:
torch.distributed.barrier()
if rank > 0:
checkpoint = model_zoo.load_url(url)
return checkpoint
def get_torchvision_models():
model_urls = dict()
for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__):
if ispkg:
continue
_zoo = import_module('torchvision.models.{}'.format(name))
if hasattr(_zoo, 'model_urls'):
_urls = getattr(_zoo, 'model_urls')
model_urls.update(_urls)
return model_urls
def load_checkpoint(model,
filename,
map_location=None,
strict=False,
logger=None):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Either a filepath or URL or modelzoo://xxxxxxx.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
logger (:mod:`logging.Logger` or None): The logger for error message.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
# load checkpoint from modelzoo or file or url
if filename.startswith('modelzoo://'):
warnings.warn('The URL scheme of "modelzoo://" is deprecated, please '
'use "torchvision://" instead')
model_urls = get_torchvision_models()
model_name = filename[11:]
checkpoint = load_url_dist(model_urls[model_name])
elif filename.startswith('torchvision://'):
model_urls = get_torchvision_models()
model_name = filename[14:]
checkpoint = load_url_dist(model_urls[model_name])
elif filename.startswith('open-mmlab://'):
model_name = filename[13:]
checkpoint = load_url_dist(open_mmlab_model_urls[model_name])
elif filename.startswith(('http://', 'https://')):
checkpoint = load_url_dist(filename)
else:
if not osp.isfile(filename):
raise IOError('{} is not a checkpoint file'.format(filename))
checkpoint = torch.load(filename, map_location=map_location)
# get state_dict from checkpoint
if isinstance(checkpoint, OrderedDict):
state_dict = checkpoint
elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
raise RuntimeError(
'No state_dict found in checkpoint file {}'.format(filename))
# strip prefix of state_dict
"""2019/11/15 for resume one-stage module"""
if list(state_dict.keys())[0].startswith('module.'):
state_dict = {k[7:]: v for k, v in checkpoint['state_dict'].items()}
# load state_dict
if hasattr(model, 'module'):
load_state_dict(model.module, state_dict, strict, logger)
else:
load_state_dict(model, state_dict, strict, logger)
return checkpoint
def weights_to_cpu(state_dict):
"""Copy a model state_dict to cpu.
Args:
state_dict (OrderedDict): Model weights on GPU.
Returns:
OrderedDict: Model weights on GPU.
"""
state_dict_cpu = OrderedDict()
for key, val in state_dict.items():
state_dict_cpu[key] = val.cpu()
return state_dict_cpu
def save_checkpoint(model, filename, optimizer=None, meta=None):
"""Save checkpoint to file.
The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
``optimizer``. By default ``meta`` will contain version and time info.
Args:
model (Module): Module whose params are to be saved.
filename (str): Checkpoint filename.
optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
meta (dict, optional): Metadata to be saved in checkpoint.
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError('meta must be a dict or None, but got {}'.format(
type(meta)))
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
mmcv.mkdir_or_exist(osp.dirname(filename))
if hasattr(model, 'module'):
model = model.module
checkpoint = {
'meta': meta,
'state_dict': weights_to_cpu(model.state_dict())
}
if optimizer is not None:
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, filename)
|
Cream/CDARTS/CDARTS_detection/mmcv/runner/checkpoint.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/runner/checkpoint.py",
"repo_id": "Cream",
"token_count": 3123
}
| 305 |
from collections import OrderedDict
import numpy as np
class LogBuffer(object):
def __init__(self):
self.val_history = OrderedDict()
self.n_history = OrderedDict()
self.output = OrderedDict()
self.ready = False
def clear(self):
self.val_history.clear()
self.n_history.clear()
self.clear_output()
def clear_output(self):
self.output.clear()
self.ready = False
def update(self, vars, count=1):
assert isinstance(vars, dict)
for key, var in vars.items():
if key not in self.val_history:
self.val_history[key] = []
self.n_history[key] = []
self.val_history[key].append(var)
self.n_history[key].append(count)
def average(self, n=0):
"""Average latest n values or all values"""
assert n >= 0
for key in self.val_history:
values = np.array(self.val_history[key][-n:])
nums = np.array(self.n_history[key][-n:])
avg = np.sum(values * nums) / np.sum(nums)
self.output[key] = avg
self.ready = True
|
Cream/CDARTS/CDARTS_detection/mmcv/runner/log_buffer.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/runner/log_buffer.py",
"repo_id": "Cream",
"token_count": 552
}
| 306 |
#include "flow_warp.hpp"
void FlowWarp(double* img, double* flow, double* out, const int height,
const int width, const int channels, const int filling_value = 0,
const int interpolateMode = 0) {
for (int h = 0; h < height; h++) {
for (int w = 0; w < width; w++) {
int offset_cur = h * width + w;
int offset_img = offset_cur * channels;
int offset_flow = offset_cur * 2;
double x, y;
x = h + flow[offset_flow + 1];
y = w + flow[offset_flow];
if (x < 0 || x >= height - 1 || y < 0 || y >= width - 1) {
for (int k = 0; k < channels; k++) {
out[offset_img + k] = filling_value;
}
continue;
}
if (interpolateMode == 0)
BilinearInterpolate(img, width, height, channels, x, y,
out + offset_img);
else if (interpolateMode == 1)
NNInterpolate(img, width, height, channels, x, y, out + offset_img);
else
throw "Not Implemented Interpolation Method";
}
}
}
void BilinearInterpolate(const double* img, int width, int height, int channels,
double x, double y, double* out) {
int xx, yy, m, n, u, v, offset, offset_img, l;
xx = x;
yy = y;
double dx, dy, s;
dx = __max__(__min__(x - xx, double(1)), double(0));
dy = __max__(__min__(y - yy, double(1)), double(0));
for (m = 0; m <= 1; m++)
for (n = 0; n <= 1; n++) {
u = EnforceRange(yy + n, width);
v = EnforceRange(xx + m, height);
offset = v * width + u;
offset_img = offset * channels;
s = fabs(1 - m - dx) * fabs(1 - n - dy);
for (l = 0; l < channels; l++) out[l] += img[offset_img + l] * s;
}
}
void NNInterpolate(const double* img, int width, int height, int channels,
double x, double y, double* out) {
int xx, yy, m, n, u, v, offset, offset_img, l;
xx = x;
yy = y;
double dx, dy;
dx = __max__(__min__(x - xx, double(1)), double(0));
dy = __max__(__min__(y - yy, double(1)), double(0));
m = (dx < 0.5) ? 0 : 1;
n = (dy < 0.5) ? 0 : 1;
u = EnforceRange(yy + n, width);
v = EnforceRange(xx + m, height);
offset = v * width + u;
offset_img = offset * channels;
for (l = 0; l < channels; l++) out[l] = img[offset_img + l];
}
|
Cream/CDARTS/CDARTS_detection/mmcv/video/optflow_warp/flow_warp.cpp/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/video/optflow_warp/flow_warp.cpp",
"repo_id": "Cream",
"token_count": 1051
}
| 307 |
from .env import get_root_logger, init_dist, set_random_seed
from .inference import (inference_detector, init_detector, show_result,
show_result_pyplot)
from .train import train_detector
__all__ = [
'init_dist', 'get_root_logger', 'set_random_seed', 'train_detector',
'init_detector', 'inference_detector', 'show_result', 'show_result_pyplot'
]
|
Cream/CDARTS/CDARTS_detection/mmdet/apis/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/apis/__init__.py",
"repo_id": "Cream",
"token_count": 152
}
| 308 |
import torch
from .transforms import bbox2delta
from ..utils import multi_apply
def bbox_target(pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg,
reg_classes=1,
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
concat=True):
labels, label_weights, bbox_targets, bbox_weights = multi_apply(
bbox_target_single,
pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg=cfg,
reg_classes=reg_classes,
target_means=target_means,
target_stds=target_stds)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bbox_targets = torch.cat(bbox_targets, 0)
bbox_weights = torch.cat(bbox_weights, 0)
return labels, label_weights, bbox_targets, bbox_weights
def bbox_target_single(pos_bboxes,
neg_bboxes,
pos_gt_bboxes,
pos_gt_labels,
cfg,
reg_classes=1,
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]):
num_pos = pos_bboxes.size(0)
num_neg = neg_bboxes.size(0)
num_samples = num_pos + num_neg
labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long)
label_weights = pos_bboxes.new_zeros(num_samples)
bbox_targets = pos_bboxes.new_zeros(num_samples, 4)
bbox_weights = pos_bboxes.new_zeros(num_samples, 4)
if num_pos > 0:
labels[:num_pos] = pos_gt_labels
pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
label_weights[:num_pos] = pos_weight
pos_bbox_targets = bbox2delta(pos_bboxes, pos_gt_bboxes, target_means,
target_stds)
bbox_targets[:num_pos, :] = pos_bbox_targets
bbox_weights[:num_pos, :] = 1
if num_neg > 0:
label_weights[-num_neg:] = 1.0
return labels, label_weights, bbox_targets, bbox_weights
def expand_target(bbox_targets, bbox_weights, labels, num_classes):
bbox_targets_expand = bbox_targets.new_zeros((bbox_targets.size(0),
4 * num_classes))
bbox_weights_expand = bbox_weights.new_zeros((bbox_weights.size(0),
4 * num_classes))
for i in torch.nonzero(labels > 0).squeeze(-1):
start, end = labels[i] * 4, (labels[i] + 1) * 4
bbox_targets_expand[i, start:end] = bbox_targets[i, :]
bbox_weights_expand[i, start:end] = bbox_weights[i, :]
return bbox_targets_expand, bbox_weights_expand
|
Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/bbox_target.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/bbox_target.py",
"repo_id": "Cream",
"token_count": 1558
}
| 309 |
import os
import os.path as osp
import mmcv
import torch
import torch.distributed as dist
from mmcv.parallel import collate, scatter
from mmcv.runner import Hook
from torch.utils.data import Dataset
class DistEvalHook(Hook):
def __init__(self, dataset, interval=1, **eval_kwargs):
from mmdet import datasets
if isinstance(dataset, Dataset):
self.dataset = dataset
elif isinstance(dataset, dict):
self.dataset = datasets.build_dataset(dataset, {'test_mode': True})
else:
raise TypeError(
'dataset must be a Dataset object or a dict, not {}'.format(
type(dataset)))
self.interval = interval
self.eval_kwargs = eval_kwargs
def after_train_epoch(self, runner):
if not self.every_n_epochs(runner, self.interval):
return
runner.model.eval()
results = [None for _ in range(len(self.dataset))]
if runner.rank == 0:
prog_bar = mmcv.ProgressBar(len(self.dataset))
for idx in range(runner.rank, len(self.dataset), runner.world_size):
data = self.dataset[idx]
data_gpu = scatter(
collate([data], samples_per_gpu=1),
[torch.cuda.current_device()])[0]
# compute output
with torch.no_grad():
result = runner.model(
return_loss=False, rescale=True, **data_gpu)
results[idx] = result
batch_size = runner.world_size
if idx % 200 == 0:
if runner.rank == 0:
for _ in range(0, batch_size, 10):
prog_bar.update()
if runner.rank == 0:
print('\n')
dist.barrier()
for i in range(1, runner.world_size):
tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(i))
tmp_results = mmcv.load(tmp_file)
for idx in range(i, len(results), runner.world_size):
results[idx] = tmp_results[idx]
os.remove(tmp_file)
self.evaluate(runner, results)
else:
tmp_file = osp.join(runner.work_dir,
'temp_{}.pkl'.format(runner.rank))
mmcv.dump(results, tmp_file)
dist.barrier()
dist.barrier()
def evaluate(self, runner, results):
eval_res = self.dataset.evaluate(
results, logger=runner.logger, **self.eval_kwargs)
for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
|
Cream/CDARTS/CDARTS_detection/mmdet/core/evaluation/eval_hooks.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/evaluation/eval_hooks.py",
"repo_id": "Cream",
"token_count": 1347
}
| 310 |
from .custom import CustomDataset
from .cityscapes import CityscapesDataset
from .xml_style import XMLDataset
from .coco import CocoDataset
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .loader import GroupSampler, DistributedGroupSampler, build_dataloader, build_dataloader_arch
from .dataset_wrappers import ConcatDataset, RepeatDataset
from .registry import DATASETS
from .builder import build_dataset
__all__ = [
'CustomDataset', 'XMLDataset', 'CocoDataset', 'VOCDataset',
'CityscapesDataset', 'GroupSampler', 'DistributedGroupSampler',
'build_dataloader', 'ConcatDataset', 'RepeatDataset', 'WIDERFaceDataset',
'DATASETS', 'build_dataset', 'build_dataloader_arch'
]
|
Cream/CDARTS/CDARTS_detection/mmdet/datasets/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/datasets/__init__.py",
"repo_id": "Cream",
"token_count": 260
}
| 311 |
import mmcv
import numpy as np
import torch
__all__ = [
'ImageTransform', 'BboxTransform', 'MaskTransform', 'SegMapTransform',
'Numpy2Tensor'
]
class ImageTransform(object):
"""Preprocess an image.
1. rescale the image to expected size
2. normalize the image
3. flip the image (if needed)
4. pad the image (if needed)
5. transpose to (c, h, w)
"""
def __init__(self,
mean=(0, 0, 0),
std=(1, 1, 1),
to_rgb=True,
size_divisor=None):
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_rgb = to_rgb
self.size_divisor = size_divisor
def __call__(self, img, scale, flip=False, keep_ratio=True):
if keep_ratio:
img, scale_factor = mmcv.imrescale(img, scale, return_scale=True)
else:
img, w_scale, h_scale = mmcv.imresize(
img, scale, return_scale=True)
scale_factor = np.array(
[w_scale, h_scale, w_scale, h_scale], dtype=np.float32)
img_shape = img.shape
img = mmcv.imnormalize(img, self.mean, self.std, self.to_rgb)
if flip:
img = mmcv.imflip(img)
if self.size_divisor is not None:
img = mmcv.impad_to_multiple(img, self.size_divisor)
pad_shape = img.shape
else:
pad_shape = img_shape
img = img.transpose(2, 0, 1)
return img, img_shape, pad_shape, scale_factor
def bbox_flip(bboxes, img_shape):
"""Flip bboxes horizontally.
Args:
bboxes(ndarray): shape (..., 4*k)
img_shape(tuple): (height, width)
"""
assert bboxes.shape[-1] % 4 == 0
w = img_shape[1]
flipped = bboxes.copy()
flipped[..., 0::4] = w - bboxes[..., 2::4] - 1
flipped[..., 2::4] = w - bboxes[..., 0::4] - 1
return flipped
class BboxTransform(object):
"""Preprocess gt bboxes.
1. rescale bboxes according to image size
2. flip bboxes (if needed)
3. pad the first dimension to `max_num_gts`
"""
def __init__(self, max_num_gts=None):
self.max_num_gts = max_num_gts
def __call__(self, bboxes, img_shape, scale_factor, flip=False):
gt_bboxes = bboxes * scale_factor
if flip:
gt_bboxes = bbox_flip(gt_bboxes, img_shape)
gt_bboxes[:, 0::2] = np.clip(gt_bboxes[:, 0::2], 0, img_shape[1] - 1)
gt_bboxes[:, 1::2] = np.clip(gt_bboxes[:, 1::2], 0, img_shape[0] - 1)
if self.max_num_gts is None:
return gt_bboxes
else:
num_gts = gt_bboxes.shape[0]
padded_bboxes = np.zeros((self.max_num_gts, 4), dtype=np.float32)
padded_bboxes[:num_gts, :] = gt_bboxes
return padded_bboxes
class MaskTransform(object):
"""Preprocess masks.
1. resize masks to expected size and stack to a single array
2. flip the masks (if needed)
3. pad the masks (if needed)
"""
def __call__(self, masks, pad_shape, scale_factor, flip=False):
masks = [
mmcv.imrescale(mask, scale_factor, interpolation='nearest')
for mask in masks
]
if flip:
masks = [mask[:, ::-1] for mask in masks]
padded_masks = [
mmcv.impad(mask, pad_shape[:2], pad_val=0) for mask in masks
]
padded_masks = np.stack(padded_masks, axis=0)
return padded_masks
class SegMapTransform(object):
"""Preprocess semantic segmentation maps.
1. rescale the segmentation map to expected size
3. flip the image (if needed)
4. pad the image (if needed)
"""
def __init__(self, size_divisor=None):
self.size_divisor = size_divisor
def __call__(self, img, scale, flip=False, keep_ratio=True):
if keep_ratio:
img = mmcv.imrescale(img, scale, interpolation='nearest')
else:
img = mmcv.imresize(img, scale, interpolation='nearest')
if flip:
img = mmcv.imflip(img)
if self.size_divisor is not None:
img = mmcv.impad_to_multiple(img, self.size_divisor)
return img
class Numpy2Tensor(object):
def __init__(self):
pass
def __call__(self, *args):
if len(args) == 1:
return torch.from_numpy(args[0])
else:
return tuple([torch.from_numpy(np.array(array)) for array in args])
|
Cream/CDARTS/CDARTS_detection/mmdet/datasets/transforms.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/datasets/transforms.py",
"repo_id": "Cream",
"token_count": 2153
}
| 312 |
import torch
import logging
import math
import re
from collections.__init__ import OrderedDict
from copy import deepcopy
from typing import Tuple, Optional, List
import torch.nn as nn
import numpy as np
from functools import partial
from itertools import repeat
from torch._six import container_abcs
# from timm.models.efficientnet_blocks import *
def swish(x, inplace: bool = False):
"""Swish - Described in: https://arxiv.org/abs/1710.05941
"""
return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid())
class Swish(nn.Module):
def __init__(self, inplace: bool = False):
super(Swish, self).__init__()
self.inplace = inplace
def forward(self, x):
return swish(x, self.inplace)
class HardSwish(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_swish(x, self.inplace)
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
def get_same_padding(x: int, k: int, s: int, d: int):
return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0)
def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0):
ih, iw = x.size()[-2:]
pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1])
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value)
return x
def conv2d_same(
x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1):
x = pad_same(x, weight.shape[-2:], stride, dilation)
return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups)
tup_pair = _ntuple(2)
def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int:
padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
return padding
def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_):
return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0
def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]:
dynamic = False
if isinstance(padding, str):
# for any string padding, the padding will be calculated for you, one of three ways
padding = padding.lower()
if padding == 'same':
# TF compatible 'SAME' padding, has a performance and GPU memory allocation impact
if is_static_pad(kernel_size, **kwargs):
# static case, no extra overhead
padding = get_padding(kernel_size, **kwargs)
else:
# dynamic 'SAME' padding, has runtime/GPU memory overhead
padding = 0
dynamic = True
elif padding == 'valid':
# 'VALID' padding, same as padding=0
padding = 0
else:
# Default to PyTorch style 'same'-ish symmetric padding
padding = get_padding(kernel_size, **kwargs)
return padding, dynamic
class CondConv2d(nn.Module):
""" Conditionally Parameterized Convolution
Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py
Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion:
https://github.com/pytorch/pytorch/issues/17983
"""
__constants__ = ['bias', 'in_channels', 'out_channels', 'dynamic_padding']
def __init__(self, in_channels, out_channels, kernel_size=3,
stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4):
super(CondConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = tup_pair(kernel_size)
self.stride = tup_pair(stride)
padding_val, is_padding_dynamic = get_padding_value(
padding, kernel_size, stride=stride, dilation=dilation)
self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript
self.padding = tup_pair(padding_val)
self.dilation = tup_pair(dilation)
self.groups = groups
self.num_experts = num_experts
self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size
weight_num_param = 1
for wd in self.weight_shape:
weight_num_param *= wd
self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param))
if bias:
self.bias_shape = (self.out_channels,)
self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
init_weight = get_condconv_initializer(
partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape)
init_weight(self.weight)
if self.bias is not None:
fan_in = np.prod(self.weight_shape[1:])
bound = 1 / math.sqrt(fan_in)
init_bias = get_condconv_initializer(
partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape)
init_bias(self.bias)
def forward(self, x, routing_weights):
B, C, H, W = x.shape
weight = torch.matmul(routing_weights, self.weight)
new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size
weight = weight.view(new_weight_shape)
bias = None
if self.bias is not None:
bias = torch.matmul(routing_weights, self.bias)
bias = bias.view(B * self.out_channels)
# move batch elements with channels so each batch element can be efficiently convolved with separate kernel
x = x.view(1, B * C, H, W)
if self.dynamic_padding:
out = conv2d_same(
x, weight, bias, stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=self.groups * B)
else:
out = F.conv2d(
x, weight, bias, stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=self.groups * B)
out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1])
# Literal port (from TF definition)
# x = torch.split(x, 1, 0)
# weight = torch.split(weight, 1, 0)
# if self.bias is not None:
# bias = torch.matmul(routing_weights, self.bias)
# bias = torch.split(bias, 1, 0)
# else:
# bias = [None] * B
# out = []
# for xi, wi, bi in zip(x, weight, bias):
# wi = wi.view(*self.weight_shape)
# if bi is not None:
# bi = bi.view(*self.bias_shape)
# out.append(self.conv_fn(
# xi, wi, bi, stride=self.stride, padding=self.padding,
# dilation=self.dilation, groups=self.groups))
# out = torch.cat(out, 0)
return out
def get_condconv_initializer(initializer, num_experts, expert_shape):
def condconv_initializer(weight):
"""CondConv initializer function."""
num_params = np.prod(expert_shape)
if (len(weight.shape) != 2 or weight.shape[0] != num_experts or
weight.shape[1] != num_params):
raise (ValueError(
'CondConv variables must have shape [num_experts, num_params]'))
for i in range(num_experts):
initializer(weight[i].view(expert_shape))
return condconv_initializer
def resolve_bn_args(kwargs):
bn_args = get_bn_args_tf() if kwargs.pop('bn_tf', False) else {}
bn_momentum = kwargs.pop('bn_momentum', None)
if bn_momentum is not None:
bn_args['momentum'] = bn_momentum
bn_eps = kwargs.pop('bn_eps', None)
if bn_eps is not None:
bn_args['eps'] = bn_eps
return bn_args
def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None):
"""Round number of filters based on depth multiplier."""
if not multiplier:
return channels
channels *= multiplier
return make_divisible(channels, divisor, channel_min)
def _parse_ksize(ss):
if ss.isdigit():
return int(ss)
else:
return [int(k) for k in ss.split('.')]
def make_divisible(v, divisor=8, min_value=None):
min_value = min_value or divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class DepthwiseSeparableConv(nn.Module):
""" DepthwiseSeparable block
Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion
(factor of 1.0). This is an alternative to having a IR with an optional first pw conv.
"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False,
pw_kernel_size=1, pw_act=False, se_ratio=0., se_kwargs=None,
norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0.):
super(DepthwiseSeparableConv, self).__init__()
norm_kwargs = norm_kwargs or {}
has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip
self.has_pw_act = pw_act # activation after point-wise conv
self.drop_path_rate = drop_path_rate
self.conv_dw = create_conv2d(
in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True)
self.bn1 = norm_layer(in_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
# Squeeze-and-excitation
if has_se:
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(in_chs, se_ratio=se_ratio, **se_kwargs)
else:
self.se = None
self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn2 = norm_layer(out_chs, **norm_kwargs)
self.act2 = act_layer(inplace=True) if self.has_pw_act else nn.Identity()
def feature_info(self, location):
if location == 'expansion':
# no expansion in this block, use depthwise, before SE
info = dict(module='act1', hook_type='forward', num_chs=self.conv_pw.in_channels)
elif location == 'depthwise': # after SE
info = dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels)
else: # location == 'bottleneck'
info = dict(module='', hook_type='', num_chs=self.conv_pw.out_channels)
return info
def forward(self, x):
residual = x
x = self.conv_dw(x)
x = self.bn1(x)
x = self.act1(x)
if self.se is not None:
x = self.se(x)
x = self.conv_pw(x)
x = self.bn2(x)
x = self.act2(x)
if self.has_residual:
x += residual
return x
class InvertedResidual(nn.Module):
""" Inverted residual block w/ optional SE and CondConv routing"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False,
exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1,
se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None,
conv_kwargs=None, drop_path_rate=0.):
super(InvertedResidual, self).__init__()
norm_kwargs = norm_kwargs or {}
conv_kwargs = conv_kwargs or {}
mid_chs = make_divisible(in_chs * exp_ratio)
has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_path_rate = drop_path_rate
# Point-wise expansion
self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs)
self.bn1 = norm_layer(mid_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
# Depth-wise convolution
self.conv_dw = create_conv2d(
mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation,
padding=pad_type, depthwise=True, **conv_kwargs)
self.bn2 = norm_layer(mid_chs, **norm_kwargs)
self.act2 = act_layer(inplace=True)
# Squeeze-and-excitation
if has_se:
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs)
else:
self.se = None
# Point-wise linear projection
self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs)
self.bn3 = norm_layer(out_chs, **norm_kwargs)
def feature_info(self, location):
if location == 'expansion':
info = dict(module='act1', hook_type='forward', num_chs=self.conv_pw.in_channels)
elif location == 'depthwise': # after SE
info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)
else: # location == 'bottleneck'
info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels)
return info
def forward(self, x):
residual = x
# Point-wise expansion
x = self.conv_pw(x)
x = self.bn1(x)
x = self.act1(x)
# Depth-wise convolution
x = self.conv_dw(x)
x = self.bn2(x)
x = self.act2(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x)
x = self.bn3(x)
if self.has_residual:
x += residual
return x
def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs):
padding = kwargs.pop('padding', '')
kwargs.setdefault('bias', False)
padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs)
if is_dynamic:
return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs)
else:
return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs)
def create_conv2d(in_chs, out_chs, kernel_size, **kwargs):
""" Select a 2d convolution implementation based on arguments
Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d.
Used extensively by EfficientNet, MobileNetv3 and related networks.
"""
assert 'groups' not in kwargs # only use 'depthwise' bool arg
if isinstance(kernel_size, list):
assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently
# We're going to use only lists for defining the MixedConv2d kernel groups,
# ints, tuples, other iterables will continue to pass to normal conv and specify h, w.
m = MixedConv2d(in_chs, out_chs, kernel_size, **kwargs)
else:
depthwise = kwargs.pop('depthwise', False)
groups = out_chs if depthwise else 1
if 'num_experts' in kwargs and kwargs['num_experts'] > 0:
m = CondConv2d(in_chs, out_chs, kernel_size, groups=groups, **kwargs)
else:
m = create_conv2d_pad(in_chs, out_chs, kernel_size, groups=groups, **kwargs)
return m
def resolve_se_args(kwargs, in_chs, act_layer=None):
se_kwargs = kwargs.copy() if kwargs is not None else {}
# fill in args that aren't specified with the defaults
for k, v in _SE_ARGS_DEFAULT.items():
se_kwargs.setdefault(k, v)
# some models, like MobilNetV3, calculate SE reduction chs from the containing block's mid_ch instead of in_ch
if not se_kwargs.pop('reduce_mid'):
se_kwargs['reduced_base_chs'] = in_chs
# act_layer override, if it remains None, the containing block's act_layer will be used
if se_kwargs['act_layer'] is None:
assert act_layer is not None
se_kwargs['act_layer'] = act_layer
return se_kwargs
def sigmoid(x, inplace: bool = False):
return x.sigmoid_() if inplace else x.sigmoid()
_SE_ARGS_DEFAULT = dict(
gate_fn=sigmoid,
act_layer=None,
reduce_mid=False,
divisor=1)
def _decode_block_str(block_str):
""" Decode block definition string
Gets a list of block arg (dicts) through a string notation of arguments.
E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip
All args can exist in any order with the exception of the leading string which
is assumed to indicate the block type.
leading string - block type (
ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct)
r - number of repeat blocks,
k - kernel size,
s - strides (1-9),
e - expansion ratio,
c - output channels,
se - squeeze/excitation ratio
n - activation fn ('re', 'r6', 'hs', or 'sw')
Args:
block_str: a string representation of block arguments.
Returns:
A list of block args (dicts)
Raises:
ValueError: if the string def not properly specified (TODO)
"""
assert isinstance(block_str, str)
ops = block_str.split('_')
block_type = ops[0] # take the block type off the front
ops = ops[1:]
options = {}
noskip = False
for op in ops:
# string options being checked on individual basis, combine if they grow
if op == 'noskip':
noskip = True
elif op.startswith('n'):
# activation fn
key = op[0]
v = op[1:]
if v == 're':
value = nn.ReLU
elif v == 'r6':
value = nn.ReLU6
elif v == 'hs':
value = HardSwish
elif v == 'sw':
value = Swish
else:
continue
options[key] = value
else:
# all numeric options
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# if act_layer is None, the model default (passed to model init) will be used
act_layer = options['n'] if 'n' in options else None
exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1
pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1
fake_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def
num_repeat = int(options['r'])
# each type of block has different valid arguments, fill accordingly
if block_type == 'ir':
block_args = dict(
block_type=block_type,
dw_kernel_size=_parse_ksize(options['k']),
exp_kernel_size=exp_kernel_size,
pw_kernel_size=pw_kernel_size,
out_chs=int(options['c']),
exp_ratio=float(options['e']),
se_ratio=float(options['se']) if 'se' in options else None,
stride=int(options['s']),
act_layer=act_layer,
noskip=noskip,
)
if 'cc' in options:
block_args['num_experts'] = int(options['cc'])
elif block_type == 'ds' or block_type == 'dsa':
block_args = dict(
block_type=block_type,
dw_kernel_size=_parse_ksize(options['k']),
pw_kernel_size=pw_kernel_size,
out_chs=int(options['c']),
se_ratio=float(options['se']) if 'se' in options else None,
stride=int(options['s']),
act_layer=act_layer,
pw_act=block_type == 'dsa',
noskip=block_type == 'dsa' or noskip,
)
elif block_type == 'er':
block_args = dict(
block_type=block_type,
exp_kernel_size=_parse_ksize(options['k']),
pw_kernel_size=pw_kernel_size,
out_chs=int(options['c']),
exp_ratio=float(options['e']),
fake_in_chs=fake_in_chs,
se_ratio=float(options['se']) if 'se' in options else None,
stride=int(options['s']),
act_layer=act_layer,
noskip=noskip,
)
elif block_type == 'cn':
block_args = dict(
block_type=block_type,
kernel_size=int(options['k']),
out_chs=int(options['c']),
stride=int(options['s']),
act_layer=act_layer,
)
else:
assert False, 'Unknown block type (%s)' % block_type
return block_args, num_repeat
class SqueezeExcite(nn.Module):
def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,
act_layer=nn.ReLU, gate_fn=sigmoid, divisor=1, **_):
super(SqueezeExcite, self).__init__()
self.gate_fn = gate_fn
reduced_chs = make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, x):
x_se = self.avg_pool(x)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
x = x * self.gate_fn(x_se)
return x
class Sigmoid(nn.Module):
def __init__(self, inplace: bool = False):
super(Sigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.sigmoid_() if self.inplace else x.sigmoid()
class ConvBnAct(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d, norm_kwargs=None):
super(ConvBnAct, self).__init__()
norm_kwargs = norm_kwargs or {}
self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type)
self.bn1 = norm_layer(out_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
def feature_info(self, location):
if location == 'expansion' or location == 'depthwise':
# no expansion or depthwise this block, use act after conv
info = dict(module='act1', hook_type='forward', num_chs=self.conv.out_channels)
else: # location == 'bottleneck'
info = dict(module='', hook_type='', num_chs=self.conv.out_channels)
return info
def forward(self, x):
x = self.conv(x)
x = self.bn1(x)
x = self.act1(x)
return x
def adaptive_pool_feat_mult(pool_type='avg'):
if pool_type == 'catavgmax':
return 2
else:
return 1
def modify_block_args(block_args, kernel_size, exp_ratio):
# kernel_size: 3,5,7
# exp_ratio: 4,6
block_type = block_args['block_type']
# each type of block has different valid arguments, fill accordingly
if block_type == 'cn':
block_args['kernel_size'] = kernel_size
elif block_type == 'er':
block_args['exp_kernel_size'] = kernel_size
else:
block_args['dw_kernel_size'] = kernel_size
if block_type == 'ir' or block_type == 'er':
block_args['exp_ratio'] = exp_ratio
return block_args
def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'):
""" Per-stage depth scaling
Scales the block repeats in each stage. This depth scaling impl maintains
compatibility with the EfficientNet scaling method, while allowing sensible
scaling for other models that may have multiple block arg definitions in each stage.
"""
# We scale the total repeat count for each stage, there may be multiple
# block arg defs per stage so we need to sum.
num_repeat = sum(repeats)
if depth_trunc == 'round':
# Truncating to int by rounding allows stages with few repeats to remain
# proportionally smaller for longer. This is a good choice when stage definitions
# include single repeat stages that we'd prefer to keep that way as long as possible
num_repeat_scaled = max(1, round(num_repeat * depth_multiplier))
else:
# The default for EfficientNet truncates repeats to int via 'ceil'.
# Any multiplier > 1.0 will result in an increased depth for every stage.
num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier))
# Proportionally distribute repeat count scaling to each block definition in the stage.
# Allocation is done in reverse as it results in the first block being less likely to be scaled.
# The first block makes less sense to repeat in most of the arch definitions.
repeats_scaled = []
for r in repeats[::-1]:
rs = max(1, round((r / num_repeat * num_repeat_scaled)))
repeats_scaled.append(rs)
num_repeat -= r
num_repeat_scaled -= rs
repeats_scaled = repeats_scaled[::-1]
# Apply the calculated scaling to each block arg in the stage
sa_scaled = []
for ba, rep in zip(stack_args, repeats_scaled):
sa_scaled.extend([deepcopy(ba) for _ in range(rep)])
return sa_scaled
def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1):
arch_args = []
for stack_idx, block_strings in enumerate(arch_def):
assert isinstance(block_strings, list)
stack_args = []
repeats = []
for block_str in block_strings:
assert isinstance(block_str, str)
ba, rep = _decode_block_str(block_str)
if ba.get('num_experts', 0) > 0 and experts_multiplier > 1:
ba['num_experts'] *= experts_multiplier
stack_args.append(ba)
repeats.append(rep)
arch_args.append(_scale_stage_depth(stack_args, repeats, depth_multiplier, depth_trunc))
return arch_args
class ChildNetBuilder:
""" Build Trunk Blocks
"""
def __init__(self, channel_multiplier=1.0, channel_divisor=8, channel_min=None,
output_stride=32, pad_type='', act_layer=None, se_kwargs=None,
norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0., feature_location='',
verbose=False):
self.channel_multiplier = channel_multiplier
self.channel_divisor = channel_divisor
self.channel_min = channel_min
self.output_stride = output_stride
self.pad_type = pad_type
self.act_layer = act_layer
self.se_kwargs = se_kwargs
self.norm_layer = norm_layer
self.norm_kwargs = norm_kwargs
self.drop_path_rate = drop_path_rate
self.feature_location = feature_location
assert feature_location in ('pre_pwl', 'post_exp', '')
self.verbose = verbose
# state updated during build, consumed by model
self.in_chs = None
self.features = OrderedDict()
def _round_channels(self, chs):
return round_channels(chs, self.channel_multiplier, self.channel_divisor, self.channel_min)
def _make_block(self, ba, block_idx, block_count):
drop_path_rate = self.drop_path_rate * block_idx / block_count
bt = ba.pop('block_type')
ba['in_chs'] = self.in_chs
ba['out_chs'] = self._round_channels(ba['out_chs'])
if 'fake_in_chs' in ba and ba['fake_in_chs']:
# FIXME this is a hack to work around mismatch in origin impl input filters
ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs'])
ba['norm_layer'] = self.norm_layer
ba['norm_kwargs'] = self.norm_kwargs
ba['pad_type'] = self.pad_type
# block act fn overrides the model default
ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer
assert ba['act_layer'] is not None
if bt == 'ir':
ba['drop_path_rate'] = drop_path_rate
ba['se_kwargs'] = self.se_kwargs
if self.verbose:
logging.info(' InvertedResidual {}, Args: {}'.format(block_idx, str(ba)))
if ba.get('num_experts', 0) > 0:
block = CondConvResidual(**ba)
else:
block = InvertedResidual(**ba)
elif bt == 'ds' or bt == 'dsa':
ba['drop_path_rate'] = drop_path_rate
ba['se_kwargs'] = self.se_kwargs
if self.verbose:
logging.info(' DepthwiseSeparable {}, Args: {}'.format(block_idx, str(ba)))
block = DepthwiseSeparableConv(**ba)
elif bt == 'er':
ba['drop_path_rate'] = drop_path_rate
ba['se_kwargs'] = self.se_kwargs
if self.verbose:
logging.info(' EdgeResidual {}, Args: {}'.format(block_idx, str(ba)))
block = EdgeResidual(**ba)
elif bt == 'cn':
if self.verbose:
logging.info(' ConvBnAct {}, Args: {}'.format(block_idx, str(ba)))
block = ConvBnAct(**ba)
else:
assert False, 'Uknkown block type (%s) while building model.' % bt
self.in_chs = ba['out_chs'] # update in_chs for arg of next block
return block
def __call__(self, in_chs, model_block_args):
""" Build the blocks
Args:
in_chs: Number of input-channels passed to first block
model_block_args: A list of lists, outer list defines stages, inner
list contains strings defining block configuration(s)
Return:
List of block stacks (each stack wrapped in nn.Sequential)
"""
if self.verbose:
logging.info('Building model trunk with %d stages...' % len(model_block_args))
self.in_chs = in_chs
total_block_count = sum([len(x) for x in model_block_args])
total_block_idx = 0
current_stride = 2
current_dilation = 1
feature_idx = 0
stages = []
# outer list of block_args defines the stacks ('stages' by some conventions)
for stage_idx, stage_block_args in enumerate(model_block_args):
last_stack = stage_idx == (len(model_block_args) - 1)
if self.verbose:
logging.info('Stack: {}'.format(stage_idx))
assert isinstance(stage_block_args, list)
blocks = []
# each stack (stage) contains a list of block arguments
for block_idx, block_args in enumerate(stage_block_args):
last_block = block_idx == (len(stage_block_args) - 1)
extract_features = '' # No features extracted
if self.verbose:
logging.info(' Block: {}'.format(block_idx))
# Sort out stride, dilation, and feature extraction details
assert block_args['stride'] in (1, 2)
if block_idx >= 1:
# only the first block in any stack can have a stride > 1
block_args['stride'] = 1
do_extract = False
if self.feature_location == 'pre_pwl':
if last_block:
next_stage_idx = stage_idx + 1
if next_stage_idx >= len(model_block_args):
do_extract = True
else:
do_extract = model_block_args[next_stage_idx][0]['stride'] > 1
elif self.feature_location == 'post_exp':
if block_args['stride'] > 1 or (last_stack and last_block) :
do_extract = True
if do_extract:
extract_features = self.feature_location
next_dilation = current_dilation
if block_args['stride'] > 1:
next_output_stride = current_stride * block_args['stride']
if next_output_stride > self.output_stride:
next_dilation = current_dilation * block_args['stride']
block_args['stride'] = 1
if self.verbose:
logging.info(' Converting stride to dilation to maintain output_stride=={}'.format(
self.output_stride))
else:
current_stride = next_output_stride
block_args['dilation'] = current_dilation
if next_dilation != current_dilation:
current_dilation = next_dilation
# create the block
block = self._make_block(block_args, total_block_idx, total_block_count)
blocks.append(block)
# stash feature module name and channel info for model feature extraction
if extract_features:
feature_module = block.feature_module(extract_features)
if feature_module:
feature_module = 'blocks.{}.{}.'.format(stage_idx, block_idx) + feature_module
feature_channels = block.feature_channels(extract_features)
self.features[feature_idx] = dict(
name=feature_module,
num_chs=feature_channels
)
feature_idx += 1
total_block_idx += 1 # incr global block idx (across all stacks)
stages.append(nn.Sequential(*blocks))
return stages
def _init_weight_goog(m, n='', fix_group_fanout=True, last_bn=None):
""" Weight initialization as per Tensorflow official implementations.
Args:
m (nn.Module): module to init
n (str): module name
fix_group_fanout (bool): enable correct (matching Tensorflow TPU impl) fanout calculation w/ group convs
Handles layers in EfficientNet, EfficientNet-CondConv, MixNet, MnasNet, MobileNetV3, etc:
* https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py
* https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py
"""
if isinstance(m, CondConv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
if fix_group_fanout:
fan_out //= m.groups
init_weight_fn = get_condconv_initializer(
lambda w: w.data.normal_(0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape)
init_weight_fn(m.weight)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
if fix_group_fanout:
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
if n in last_bn:
m.weight.data.zero_()
m.bias.data.zero_()
else:
m.weight.data.fill_(1.0)
m.bias.data.zero_()
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
fan_out = m.weight.size(0) # fan-out
fan_in = 0
if 'routing_fn' in n:
fan_in = m.weight.size(1)
init_range = 1.0 / math.sqrt(fan_in + fan_out)
m.weight.data.uniform_(-init_range, init_range)
m.bias.data.zero_()
def efficientnet_init_weights(model: nn.Module, init_fn=None, zero_gamma=False):
last_bn = []
if zero_gamma:
prev_n = ''
for n, m in model.named_modules():
if isinstance(m, nn.BatchNorm2d):
if ''.join(prev_n.split('.')[:-1]) != ''.join(n.split('.')[:-1]):
last_bn.append(prev_n)
prev_n = n
last_bn.append(prev_n)
init_fn = init_fn or _init_weight_goog
for n, m in model.named_modules():
init_fn(m, n, last_bn=last_bn)
|
Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/builder.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/builder.py",
"repo_id": "Cream",
"token_count": 16979
}
| 313 |
import logging
import torch
from collections import OrderedDict
def load_checkpoint(model,
filename,
strict=False,
logger=None):
checkpoint = torch.load(filename)
# get state_dict from checkpoint
if isinstance(checkpoint, OrderedDict):
state_dict = checkpoint
elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
raise RuntimeError(
'No state_dict found in checkpoint file {}'.format(filename))
# strip prefix of state_dict
if list(state_dict.keys())[0].startswith('module.'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
# load state_dict
if hasattr(model, 'module'):
load_state_dict(model.module, state_dict, strict, logger)
else:
load_state_dict(model, state_dict, strict, logger)
return checkpoint
def load_state_dict(module, state_dict, strict=False, logger=None):
"""Load state_dict to a module.
Args:
logger (:obj:`logging.Logger`, optional): Logger to log the error
message. If not specified, print function will be used.
"""
unexpected_keys = []
own_state = module.state_dict()
state_dict_modify = state_dict.copy()
for name, param in state_dict.items():
''' for mobilenet v2
if 'features' in name:
name = name.replace('features.','features')
'''
if isinstance(param, torch.nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
if 'conv2' in name and 'layer4.0.conv2_d2.weight' in own_state.keys():
d1 = name.replace('conv2', 'conv2_d1')
d1_c = own_state[d1].size(0)
own_state[d1].copy_(param[:d1_c,:,:,:])
state_dict_modify[d1] = param[:d1_c,:,:,:]
d2 = name.replace('conv2', 'conv2_d2')
d2_c = own_state[d2].size(0)
own_state[d2].copy_(param[d1_c:d1_c+d2_c,:,:,:])
state_dict_modify[d2] = param[d1_c:d1_c+d2_c,:,:,:]
d3 = name.replace('conv2', 'conv2_d3')
own_state[d3].copy_(param[d1_c+d2_c:,:,:,:])
state_dict_modify[d3] = param[d1_c+d2_c:,:,:,:]
else:
if name not in own_state:
unexpected_keys.append(name)
continue
try:
own_state[name].copy_(param)
except Exception:
raise RuntimeError(
'While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'.format(
name, own_state[name].size(), param.size()))
missing_keys = set(own_state.keys()) - set(state_dict_modify.keys())
'''
if 'layer4.0.conv2_d2.weight' in own_state.keys():
missing_keys = set(own_state.keys()) - set(state_dict_modify.keys())
else:
# for mobilenetv2
own_state_set = []
for name in set(own_state.keys()):
own_state_set.append(name.replace('features','features.'))
missing_keys = set(own_state_set) - set(state_dict.keys())
'''
err_msg = []
if unexpected_keys:
err_msg.append('unexpected key in source state_dict: {}\n'.format(
', '.join(unexpected_keys)))
if missing_keys:
err_msg.append('missing keys in source state_dict: {}\n'.format(
', '.join(missing_keys)))
err_msg = '\n'.join(err_msg)
if err_msg:
if strict:
raise RuntimeError(err_msg)
elif logger is not None:
logger.warn(err_msg)
else:
print(err_msg)
|
Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/utils.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/utils.py",
"repo_id": "Cream",
"token_count": 1798
}
| 314 |
from .single_stage import SingleStageDetector
from ..registry import DETECTORS
@DETECTORS.register_module
class FCOS(SingleStageDetector):
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained)
|
Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/fcos.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/fcos.py",
"repo_id": "Cream",
"token_count": 255
}
| 315 |
import torch
import torch.nn as nn
from mmdet.core import bbox_overlaps
from .utils import weighted_loss
from ..registry import LOSSES
@weighted_loss
def iou_loss(pred, target, eps=1e-6):
"""IoU loss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
The loss is calculated as negative log of IoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)
loss = -ious.log()
return loss
@weighted_loss
def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3):
"""Improving Object Localization with Fitness NMS and Bounded IoU Loss,
https://arxiv.org/abs/1711.00164.
Args:
pred (tensor): Predicted bboxes.
target (tensor): Target bboxes.
beta (float): beta parameter in smoothl1.
eps (float): eps to avoid NaN.
"""
pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5
pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5
pred_w = pred[:, 2] - pred[:, 0] + 1
pred_h = pred[:, 3] - pred[:, 1] + 1
with torch.no_grad():
target_ctrx = (target[:, 0] + target[:, 2]) * 0.5
target_ctry = (target[:, 1] + target[:, 3]) * 0.5
target_w = target[:, 2] - target[:, 0] + 1
target_h = target[:, 3] - target[:, 1] + 1
dx = target_ctrx - pred_ctrx
dy = target_ctry - pred_ctry
loss_dx = 1 - torch.max(
(target_w - 2 * dx.abs()) /
(target_w + 2 * dx.abs() + eps), torch.zeros_like(dx))
loss_dy = 1 - torch.max(
(target_h - 2 * dy.abs()) /
(target_h + 2 * dy.abs() + eps), torch.zeros_like(dy))
loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w /
(target_w + eps))
loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h /
(target_h + eps))
loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh],
dim=-1).view(loss_dx.size(0), -1)
loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta,
loss_comb - 0.5 * beta)
return loss
@LOSSES.register_module
class IoULoss(nn.Module):
def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
super(IoULoss, self).__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
if weight is not None and not torch.any(weight > 0):
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss = self.loss_weight * iou_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
@LOSSES.register_module
class BoundedIoULoss(nn.Module):
def __init__(self, beta=0.2, eps=1e-3, reduction='mean', loss_weight=1.0):
super(BoundedIoULoss, self).__init__()
self.beta = beta
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
if weight is not None and not torch.any(weight > 0):
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss = self.loss_weight * bounded_iou_loss(
pred,
target,
weight,
beta=self.beta,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
|
Cream/CDARTS/CDARTS_detection/mmdet/models/losses/iou_loss.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/losses/iou_loss.py",
"repo_id": "Cream",
"token_count": 2151
}
| 316 |
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import xavier_init
from mmdet.core import auto_fp16
from ..registry import NECKS
from ..utils import ConvModule
# For toy experiments
class MBBlock(nn.Module):
def __init__(self, in_channels, out_channels, expansion, stride, kernel_size, dilation=1, groups=1):
super(MBBlock, self).__init__()
self.in_channels = in_channels
self.out_channels =out_channels
self.stride = stride
self.groups = groups
mid_channels = in_channels * expansion
padding = (kernel_size - 1) * dilation // 2
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, 1, stride=1, padding=0, dilation=1, bias=False, groups=groups),
nn.SyncBatchNorm(mid_channels),
nn.ReLU(inplace=True)
)
self.conv2 = nn.Sequential(
nn.Conv2d(mid_channels, mid_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False, groups=mid_channels),
nn.SyncBatchNorm(mid_channels),
nn.ReLU(inplace=True)
)
self.conv3 = nn.Sequential(
nn.Conv2d(mid_channels, out_channels, 1, stride=1, padding=0, dilation=1, bias=False, groups=groups),
nn.SyncBatchNorm(out_channels)
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1])
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0001)
nn.init.constant_(m.running_mean, 0)
if isinstance(m, nn.SyncBatchNorm):
m._specify_ddp_gpu_num(1)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.in_channels == self.out_channels and self.stride == 1:
out = out + x
return out
@NECKS.register_module
class FPN(nn.Module):
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
add_extra_convs=False,
extra_convs_on_inputs=True,
relu_before_extra_convs=False,
conv_cfg=None,
norm_cfg=None,
activation=None,
fpn_kernel=3,
lateral_kernel=1,
depthwise=None,
toy_replace=None,
dense_add=None):
super(FPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.activation = activation
self.relu_before_extra_convs = relu_before_extra_convs
self.fp16_enabled = False
self.fpn_kernel = fpn_kernel
self.lateral_kernel = lateral_kernel
self.dense_add = dense_add
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level < inputs, no extra level is allowed
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
self.extra_convs_on_inputs = extra_convs_on_inputs
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
lateral_kernel,
padding=(lateral_kernel-1)//2,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
if depthwise is not None:
if depthwise == 'sep':
fpn_conv = nn.Conv2d(out_channels, out_channels, self.fpn_kernel,
padding=int((self.fpn_kernel-1)/2), groups=out_channels)
elif depthwise == 'sep-depth':
fpn_conv = nn.Sequential(
nn.Conv2d(out_channels, out_channels, self.fpn_kernel,
padding=int((self.fpn_kernel-1)/2), groups=out_channels),
nn.Conv2d(out_channels, out_channels, 1, padding=0))
else:
if toy_replace is not None and i == toy_replace.get('stage', 30):
if toy_replace.get('block', 'res') == 'ir':
fpn_conv = MBBlock(
out_channels, out_channels, 1, 1,
toy_replace.get('conv_kernel'), dilation=toy_replace.get('dilation'), groups=1)
else:
fpn_conv = ConvModule(
out_channels,
out_channels,
toy_replace.get('conv_kernel'),
padding=(toy_replace.get('conv_kernel')-1) * toy_replace.get('dilation') // 2,
dilation=toy_replace.get('dilation'),
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
else:
fpn_conv = ConvModule(
out_channels,
out_channels,
self.fpn_kernel,
padding=int((self.fpn_kernel-1)/2),
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
# add extra conv layers (e.g., RetinaNet)
extra_levels = num_outs - self.backbone_end_level + self.start_level
if add_extra_convs and extra_levels >= 1:
for i in range(extra_levels):
if i == 0 and self.extra_convs_on_inputs:
in_channels = self.in_channels[self.backbone_end_level - 1]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.fpn_convs.append(extra_fpn_conv)
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
@auto_fp16()
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build top-down path
used_backbone_levels = len(laterals)
if self.dense_add is not None:
if self.dense_add == 'no':
laterals = laterals
elif self.dense_add == 'all':
laterals_ = [0 for i in range(len(laterals))]
for i in range(used_backbone_levels - 1, -1, -1):
h, w = laterals[i].size(2), laterals[i].size(3)
for j in range(len(laterals)):
for k in range(i-j):
if k == 0:
tmp_lateral = F.max_pool2d(laterals[j], 2, stride=2)
else:
tmp_lateral = F.max_pool2d(tmp_lateral, 2, stride=2)
if i > j:
laterals_[i] += F.interpolate(tmp_lateral, size=(h,w), mode='bilinear', align_corners=True)
else:
laterals_[i] += F.interpolate(laterals[j], size=(h,w), mode='bilinear', align_corners=True)
laterals = laterals_
elif self.dense_add == 'top-down':
laterals_ = [0 for i in range(len(laterals))]
for i in range(used_backbone_levels - 1, -1, -1):
h, w = laterals[i].size(2), laterals[i].size(3)
for j in range(used_backbone_levels - 1, i-1, -1):
laterals_[i] += F.interpolate(laterals[j], size=(h,w), mode='nearest')
laterals = laterals_
elif self.dense_add == 'bottom-up-nearest':
for i in range(0, used_backbone_levels-1, 1):
laterals[i+1] += F.max_pool2d(laterals[i], 1, stride=2)
elif self.dense_add == 'bottom-up':
laterals_ = [0 for i in range(len(laterals))]
for i in range(used_backbone_levels - 1, -1, -1):
h, w = laterals[i].size(2), laterals[i].size(3)
for j in range(i+1):
for k in range(i-j):
if k == 0:
tmp_lateral = F.max_pool2d(laterals[j], 2, stride=2)
else:
tmp_lateral = F.max_pool2d(tmp_lateral, 2, stride=2)
if i > j:
laterals_[i] += F.interpolate(tmp_lateral, size=(h,w), mode='bilinear', align_corners=True)
else:
laterals_[i] += F.interpolate(laterals[j], size=(h,w), mode='bilinear', align_corners=True)
laterals = laterals_
else:
for i in range(used_backbone_levels - 1, 0, -1):
laterals[i - 1] += F.interpolate(
laterals[i], scale_factor=2, mode='nearest')
# build outputs
# part 1: from original levels
if self.fpn_kernel == 1 or self.fpn_kernel == 3:
outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)]
else:
outs = [laterals[i] for i in range(used_backbone_levels)]
# part 2: add extra levels
if self.num_outs > len(outs):
# use max pool to get more levels on top of outputs
# (e.g., Faster R-CNN, Mask R-CNN)
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
# add conv layers on top of original feature maps (RetinaNet)
else:
if self.extra_convs_on_inputs:
orig = inputs[self.backbone_end_level - 1]
outs.append(self.fpn_convs[used_backbone_levels](orig))
else:
outs.append(self.fpn_convs[used_backbone_levels](outs[-1]))
for i in range(used_backbone_levels + 1, self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i](outs[-1]))
return tuple(outs)
|
Cream/CDARTS/CDARTS_detection/mmdet/models/necks/fpn.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/necks/fpn.py",
"repo_id": "Cream",
"token_count": 6732
}
| 317 |
import torch.nn as nn
norm_cfg = {
# format: layer_type: (abbreviation, module)
'BN': ('bn', nn.BatchNorm2d),
'SyncBN': ('bn', nn.SyncBatchNorm),
'GN': ('gn', nn.GroupNorm),
# and potentially 'SN'
}
def build_norm_layer(cfg, num_features, postfix=''):
""" Build normalization layer
Args:
cfg (dict): cfg should contain:
type (str): identify norm layer type.
layer args: args needed to instantiate a norm layer.
requires_grad (bool): [optional] whether stop gradient updates
num_features (int): number of channels from input.
postfix (int, str): appended into norm abbreviation to
create named layer.
Returns:
name (str): abbreviation + postfix
layer (nn.Module): created norm layer
"""
assert isinstance(cfg, dict) and 'type' in cfg
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if layer_type not in norm_cfg:
raise KeyError('Unrecognized norm type {}'.format(layer_type))
else:
abbr, norm_layer = norm_cfg[layer_type]
if norm_layer is None:
raise NotImplementedError
assert isinstance(postfix, (int, str))
name = abbr + str(postfix)
requires_grad = cfg_.pop('requires_grad', True)
cfg_.setdefault('eps', 1e-5)
if layer_type != 'GN':
layer = norm_layer(num_features, **cfg_)
if layer_type == 'SyncBN':
layer._specify_ddp_gpu_num(1)
else:
assert 'num_groups' in cfg_
layer = norm_layer(num_channels=num_features, **cfg_)
for param in layer.parameters():
param.requires_grad = requires_grad
return name, layer
|
Cream/CDARTS/CDARTS_detection/mmdet/models/utils/norm.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/utils/norm.py",
"repo_id": "Cream",
"token_count": 705
}
| 318 |
/*!
* Copyright (c) 2017 Microsoft
* Licensed under The MIT License [see LICENSE for details]
* \file deformable_psroi_pooling.cu
* \brief
* \author Yi Li, Guodong Zhang, Jifeng Dai
*/
/***************** Adapted by Charles Shang *********************/
// modify from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/cuda/deform_psroi_pooling_cuda.cu
#include <ATen/ATen.h>
#include <THC/THCAtomics.cuh>
#include <stdio.h>
#include <math.h>
#include <algorithm>
using namespace at;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N)
{
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename scalar_t>
__device__ scalar_t bilinear_interp(
const scalar_t *data,
const scalar_t x,
const scalar_t y,
const int width,
const int height)
{
int x1 = floor(x);
int x2 = ceil(x);
int y1 = floor(y);
int y2 = ceil(y);
scalar_t dist_x = (scalar_t)(x - x1);
scalar_t dist_y = (scalar_t)(y - y1);
scalar_t value11 = data[y1 * width + x1];
scalar_t value12 = data[y2 * width + x1];
scalar_t value21 = data[y1 * width + x2];
scalar_t value22 = data[y2 * width + x2];
scalar_t value = (1 - dist_x) * (1 - dist_y) * value11 + (1 - dist_x) * dist_y * value12 + dist_x * (1 - dist_y) * value21 + dist_x * dist_y * value22;
return value;
}
template <typename scalar_t>
__global__ void DeformablePSROIPoolForwardKernel(
const int count,
const scalar_t *bottom_data,
const scalar_t spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const scalar_t *bottom_rois, const scalar_t *bottom_trans,
const int no_trans,
const scalar_t trans_std,
const int sample_per_part,
const int output_dim,
const int group_size,
const int part_size,
const int num_classes,
const int channels_each_class,
scalar_t *top_data,
scalar_t *top_count)
{
CUDA_KERNEL_LOOP(index, count)
{
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const scalar_t *offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
scalar_t roi_start_w = (scalar_t)(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
scalar_t roi_start_h = (scalar_t)(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
scalar_t roi_end_w = (scalar_t)(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
scalar_t roi_end_h = (scalar_t)(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
scalar_t roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0
scalar_t roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
scalar_t bin_size_h = roi_height / (scalar_t)(pooled_height);
scalar_t bin_size_w = roi_width / (scalar_t)(pooled_width);
scalar_t sub_bin_size_h = bin_size_h / (scalar_t)(sample_per_part);
scalar_t sub_bin_size_w = bin_size_w / (scalar_t)(sample_per_part);
int part_h = floor((scalar_t)(ph) / pooled_height * part_size);
int part_w = floor((scalar_t)(pw) / pooled_width * part_size);
int class_id = ctop / channels_each_class;
scalar_t trans_x = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std;
scalar_t trans_y = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std;
scalar_t wstart = (scalar_t)(pw)*bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
scalar_t hstart = (scalar_t)(ph)*bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
scalar_t sum = 0;
int count = 0;
int gw = floor((scalar_t)(pw)*group_size / pooled_width);
int gh = floor((scalar_t)(ph)*group_size / pooled_height);
gw = min(max(gw, 0), group_size - 1);
gh = min(max(gh, 0), group_size - 1);
const scalar_t *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width;
for (int ih = 0; ih < sample_per_part; ih++)
{
for (int iw = 0; iw < sample_per_part; iw++)
{
scalar_t w = wstart + iw * sub_bin_size_w;
scalar_t h = hstart + ih * sub_bin_size_h;
// bilinear interpolation
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5)
{
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
int c = (ctop * group_size + gh) * group_size + gw;
scalar_t val = bilinear_interp(offset_bottom_data + c * height * width, w, h, width, height);
sum += val;
count++;
}
}
top_data[index] = count == 0 ? (scalar_t)(0) : sum / count;
top_count[index] = count;
}
}
template <typename scalar_t>
__global__ void DeformablePSROIPoolBackwardAccKernel(
const int count,
const scalar_t *top_diff,
const scalar_t *top_count,
const int num_rois,
const scalar_t spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int output_dim,
scalar_t *bottom_data_diff, scalar_t *bottom_trans_diff,
const scalar_t *bottom_data,
const scalar_t *bottom_rois,
const scalar_t *bottom_trans,
const int no_trans,
const scalar_t trans_std,
const int sample_per_part,
const int group_size,
const int part_size,
const int num_classes,
const int channels_each_class)
{
CUDA_KERNEL_LOOP(index, count)
{
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const scalar_t *offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
scalar_t roi_start_w = (scalar_t)(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
scalar_t roi_start_h = (scalar_t)(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
scalar_t roi_end_w = (scalar_t)(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
scalar_t roi_end_h = (scalar_t)(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
scalar_t roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0
scalar_t roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
scalar_t bin_size_h = roi_height / (scalar_t)(pooled_height);
scalar_t bin_size_w = roi_width / (scalar_t)(pooled_width);
scalar_t sub_bin_size_h = bin_size_h / (scalar_t)(sample_per_part);
scalar_t sub_bin_size_w = bin_size_w / (scalar_t)(sample_per_part);
int part_h = floor((scalar_t)(ph) / pooled_height * part_size);
int part_w = floor((scalar_t)(pw) / pooled_width * part_size);
int class_id = ctop / channels_each_class;
scalar_t trans_x = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std;
scalar_t trans_y = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std;
scalar_t wstart = (scalar_t)(pw)*bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
scalar_t hstart = (scalar_t)(ph)*bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
if (top_count[index] <= 0)
{
continue;
}
scalar_t diff_val = top_diff[index] / top_count[index];
const scalar_t *offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width;
scalar_t *offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width;
int gw = floor((scalar_t)(pw)*group_size / pooled_width);
int gh = floor((scalar_t)(ph)*group_size / pooled_height);
gw = min(max(gw, 0), group_size - 1);
gh = min(max(gh, 0), group_size - 1);
for (int ih = 0; ih < sample_per_part; ih++)
{
for (int iw = 0; iw < sample_per_part; iw++)
{
scalar_t w = wstart + iw * sub_bin_size_w;
scalar_t h = hstart + ih * sub_bin_size_h;
// bilinear interpolation
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5)
{
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
int c = (ctop * group_size + gh) * group_size + gw;
// backward on feature
int x0 = floor(w);
int x1 = ceil(w);
int y0 = floor(h);
int y1 = ceil(h);
scalar_t dist_x = w - x0, dist_y = h - y0;
scalar_t q00 = (1 - dist_x) * (1 - dist_y);
scalar_t q01 = (1 - dist_x) * dist_y;
scalar_t q10 = dist_x * (1 - dist_y);
scalar_t q11 = dist_x * dist_y;
int bottom_index_base = c * height * width;
atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val);
if (no_trans)
{
continue;
}
scalar_t U00 = offset_bottom_data[bottom_index_base + y0 * width + x0];
scalar_t U01 = offset_bottom_data[bottom_index_base + y1 * width + x0];
scalar_t U10 = offset_bottom_data[bottom_index_base + y0 * width + x1];
scalar_t U11 = offset_bottom_data[bottom_index_base + y1 * width + x1];
scalar_t diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * trans_std * diff_val;
diff_x *= roi_width;
scalar_t diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * trans_std * diff_val;
diff_y *= roi_height;
atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w, diff_x);
atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w, diff_y);
}
}
}
}
void DeformablePSROIPoolForward(const at::Tensor data,
const at::Tensor bbox,
const at::Tensor trans,
at::Tensor out,
at::Tensor top_count,
const int batch,
const int channels,
const int height,
const int width,
const int num_bbox,
const int channels_trans,
const int no_trans,
const float spatial_scale,
const int output_dim,
const int group_size,
const int pooled_size,
const int part_size,
const int sample_per_part,
const float trans_std)
{
const int pooled_height = pooled_size;
const int pooled_width = pooled_size;
const int count = num_bbox * output_dim * pooled_height * pooled_width;
const int num_classes = no_trans ? 1 : channels_trans / 2;
const int channels_each_class = no_trans ? output_dim : output_dim / num_classes;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data.type(), "deformable_psroi_pool_forward", ([&] {
const scalar_t *bottom_data = data.data<scalar_t>();
const scalar_t *bottom_rois = bbox.data<scalar_t>();
const scalar_t *bottom_trans = no_trans ? NULL : trans.data<scalar_t>();
scalar_t *top_data = out.data<scalar_t>();
scalar_t *top_count_data = top_count.data<scalar_t>();
DeformablePSROIPoolForwardKernel<<<GET_BLOCKS(count), CUDA_NUM_THREADS>>>(
count, bottom_data, (scalar_t)spatial_scale, channels, height, width, pooled_height, pooled_width,
bottom_rois, bottom_trans, no_trans, (scalar_t)trans_std, sample_per_part, output_dim,
group_size, part_size, num_classes, channels_each_class, top_data, top_count_data);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in DeformablePSROIPoolForward: %s\n", cudaGetErrorString(err));
}
}
void DeformablePSROIPoolBackwardAcc(const at::Tensor out_grad,
const at::Tensor data,
const at::Tensor bbox,
const at::Tensor trans,
const at::Tensor top_count,
at::Tensor in_grad,
at::Tensor trans_grad,
const int batch,
const int channels,
const int height,
const int width,
const int num_bbox,
const int channels_trans,
const int no_trans,
const float spatial_scale,
const int output_dim,
const int group_size,
const int pooled_size,
const int part_size,
const int sample_per_part,
const float trans_std)
{
// LOG(INFO) << "DeformablePSROIPoolBackward";
const int num_rois = num_bbox;
const int pooled_height = pooled_size;
const int pooled_width = pooled_size;
const int count = num_bbox * output_dim * pooled_height * pooled_width;
const int num_classes = no_trans ? 1 : channels_trans / 2;
const int channels_each_class = no_trans ? output_dim : output_dim / num_classes;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
out_grad.type(), "deformable_psroi_pool_backward_acc", ([&] {
const scalar_t *top_diff = out_grad.data<scalar_t>();
const scalar_t *bottom_data = data.data<scalar_t>();
const scalar_t *bottom_rois = bbox.data<scalar_t>();
const scalar_t *bottom_trans = no_trans ? NULL : trans.data<scalar_t>();
scalar_t *bottom_data_diff = in_grad.data<scalar_t>();
scalar_t *bottom_trans_diff = no_trans ? NULL : trans_grad.data<scalar_t>();
const scalar_t *top_count_data = top_count.data<scalar_t>();
DeformablePSROIPoolBackwardAccKernel<<<GET_BLOCKS(count), CUDA_NUM_THREADS>>>(
count, top_diff, top_count_data, num_rois, (scalar_t)spatial_scale, channels, height, width,
pooled_height, pooled_width, output_dim, bottom_data_diff, bottom_trans_diff,
bottom_data, bottom_rois, bottom_trans, no_trans, (scalar_t)trans_std, sample_per_part,
group_size, part_size, num_classes, channels_each_class);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in DeformablePSROIPoolForward: %s\n", cudaGetErrorString(err));
}
}
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/dcn/src/deform_pool_cuda_kernel.cu/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/dcn/src/deform_pool_cuda_kernel.cu",
"repo_id": "Cream",
"token_count": 7753
}
| 319 |
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCDeviceUtils.cuh>
#include <vector>
#include <iostream>
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
// boxes is a N x 5 tensor
at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) {
using scalar_t = float;
AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor");
auto scores = boxes.select(1, 4);
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock);
scalar_t* boxes_dev = boxes_sorted.data<scalar_t>();
THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
unsigned long long* mask_dev = NULL;
//THCudaCheck(THCudaMalloc(state, (void**) &mask_dev,
// boxes_num * col_blocks * sizeof(unsigned long long)));
mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));
dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock),
THCCeilDiv(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
THCudaCheck(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
THCudaFree(state, mask_dev);
// TODO improve this part
return std::get<0>(order_t.index({
keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to(
order_t.device(), keep.scalar_type())
}).sort(0, false));
}
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/nms/src/nms_kernel.cu/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/nms/src/nms_kernel.cu",
"repo_id": "Cream",
"token_count": 2166
}
| 320 |
import torch
from torch.autograd import gradcheck
import os.path as osp
import sys
sys.path.append(osp.abspath(osp.join(__file__, '../../')))
from roi_pool import RoIPool # noqa: E402
feat = torch.randn(4, 16, 15, 15, requires_grad=True).cuda()
rois = torch.Tensor([[0, 0, 0, 50, 50], [0, 10, 30, 43, 55],
[1, 67, 40, 110, 120]]).cuda()
inputs = (feat, rois)
print('Gradcheck for roi pooling...')
test = gradcheck(RoIPool(4, 1.0 / 8), inputs, eps=1e-5, atol=1e-3)
print(test)
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/roi_pool/gradcheck.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/roi_pool/gradcheck.py",
"repo_id": "Cream",
"token_count": 224
}
| 321 |
# coding: utf-8
import asyncio
import contextlib
import logging
import os
import time
from typing import List
import torch
logger = logging.getLogger(__name__)
DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False))
@contextlib.asynccontextmanager
async def completed(trace_name='',
name='',
sleep_interval=0.05,
streams: List[torch.cuda.Stream] = None):
"""
Async context manager that waits for work to complete on
given CUDA streams.
"""
if not torch.cuda.is_available():
yield
return
stream_before_context_switch = torch.cuda.current_stream()
if not streams:
streams = [stream_before_context_switch]
else:
streams = [s if s else stream_before_context_switch for s in streams]
end_events = [
torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams
]
if DEBUG_COMPLETED_TIME:
start = torch.cuda.Event(enable_timing=True)
stream_before_context_switch.record_event(start)
cpu_start = time.monotonic()
logger.debug('%s %s starting, streams: %s', trace_name, name, streams)
grad_enabled_before = torch.is_grad_enabled()
try:
yield
finally:
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_end = time.monotonic()
for i, stream in enumerate(streams):
event = end_events[i]
stream.record_event(event)
grad_enabled_after = torch.is_grad_enabled()
# observed change of torch.is_grad_enabled() during concurrent run of
# async_test_bboxes code
assert (grad_enabled_before == grad_enabled_after
), 'Unexpected is_grad_enabled() value change'
are_done = [e.query() for e in end_events]
logger.debug('%s %s completed: %s streams: %s', trace_name, name,
are_done, streams)
with torch.cuda.stream(stream_before_context_switch):
while not all(are_done):
await asyncio.sleep(sleep_interval)
are_done = [e.query() for e in end_events]
logger.debug(
'%s %s completed: %s streams: %s',
trace_name,
name,
are_done,
streams,
)
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_time = (cpu_end - cpu_start) * 1000
stream_times_ms = ''
for i, stream in enumerate(streams):
elapsed_time = start.elapsed_time(end_events[i])
stream_times_ms += ' {} {:.2f} ms'.format(stream, elapsed_time)
logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time,
stream_times_ms)
@contextlib.asynccontextmanager
async def concurrent(streamqueue: asyncio.Queue,
trace_name='concurrent',
name='stream'):
"""Run code concurrently in different streams.
:param streamqueue: asyncio.Queue instance.
Queue tasks define the pool of streams used for concurrent execution.
"""
if not torch.cuda.is_available():
yield
return
initial_stream = torch.cuda.current_stream()
with torch.cuda.stream(initial_stream):
stream = await streamqueue.get()
assert isinstance(stream, torch.cuda.Stream)
try:
with torch.cuda.stream(stream):
logger.debug('%s %s is starting, stream: %s', trace_name, name,
stream)
yield
current = torch.cuda.current_stream()
assert current == stream
logger.debug('%s %s has finished, stream: %s', trace_name,
name, stream)
finally:
streamqueue.task_done()
streamqueue.put_nowait(stream)
|
Cream/CDARTS/CDARTS_detection/mmdet/utils/contextmanagers.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/utils/contextmanagers.py",
"repo_id": "Cream",
"token_count": 1902
}
| 322 |
import argparse
import re
from collections import OrderedDict
import torch
def convert(in_file, out_file):
"""Convert keys in checkpoints.
There can be some breaking changes during the development of mmdetection,
and this tool is used for upgrading checkpoints trained with old versions
to the latest one.
"""
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
for key, val in in_state_dict.items():
# Use ConvModule instead of nn.Conv2d in RetinaNet
# cls_convs.0.weight -> cls_convs.0.conv.weight
m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key)
if m is not None:
param = m.groups()[1]
new_key = key.replace(param, 'conv.{}'.format(param))
out_state_dict[new_key] = val
continue
out_state_dict[key] = val
checkpoint['state_dict'] = out_state_dict
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade model version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
args = parser.parse_args()
convert(args.in_file, args.out_file)
if __name__ == '__main__':
main()
|
Cream/CDARTS/CDARTS_detection/tools/upgrade_model_version.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/tools/upgrade_model_version.py",
"repo_id": "Cream",
"token_count": 514
}
| 323 |
from __future__ import print_function, division
import os
import numpy as np
import scipy.io
import torch.utils.data as data
from PIL import Image
from torchvision import transforms
from dataloaders import custom_transforms as tr
class SBDSegmentation(data.Dataset):
NUM_CLASSES = 21
def __init__(self,
args,
base_dir,
split='train',
):
"""
:param base_dir: path to VOC dataset directory
:param split: train/val
:param transform: transform to apply
"""
super().__init__()
self._base_dir = base_dir
self._dataset_dir = os.path.join(self._base_dir, 'dataset')
self._image_dir = os.path.join(self._dataset_dir, 'img')
self._cat_dir = os.path.join(self._dataset_dir, 'cls')
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.args = args
# Get list of all images from the split and check that the files exist
self.im_ids = []
self.images = []
self.categories = []
for splt in self.split:
with open(os.path.join(self._dataset_dir, splt + '.txt'), "r") as f:
lines = f.read().splitlines()
for line in lines:
_image = os.path.join(self._image_dir, line + ".jpg")
_categ= os.path.join(self._cat_dir, line + ".mat")
assert os.path.isfile(_image)
assert os.path.isfile(_categ)
self.im_ids.append(line)
self.images.append(_image)
self.categories.append(_categ)
assert (len(self.images) == len(self.categories))
# Display stats
print('Number of images: {:d}'.format(len(self.images)))
def __getitem__(self, index):
_img, _target = self._make_img_gt_point_pair(index)
sample = {'image': _img, 'label': _target}
return self.transform(sample)
def __len__(self):
return len(self.images)
def _make_img_gt_point_pair(self, index):
_img = Image.open(self.images[index]).convert('RGB')
_target = Image.fromarray(scipy.io.loadmat(self.categories[index])["GTcls"][0]['Segmentation'][0])
return _img, _target
def transform(self, sample):
composed_transforms = transforms.Compose([
tr.RandomHorizontalFlip(),
tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size),
tr.RandomGaussianBlur(),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
def __str__(self):
return 'SBDSegmentation(split=' + str(self.split) + ')'
if __name__ == '__main__':
from dataloaders.dataloader_utils import decode_segmap
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.base_size = 513
args.crop_size = 513
sbd_train = SBDSegmentation(args, split='train')
dataloader = DataLoader(sbd_train, batch_size=2, shuffle=True, num_workers=2)
for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]):
img = sample['image'].numpy()
gt = sample['label'].numpy()
tmp = np.array(gt[jj]).astype(np.uint8)
segmap = decode_segmap(tmp, dataset='pascal')
img_tmp = np.transpose(img[jj], axes=[1, 2, 0])
img_tmp *= (0.229, 0.224, 0.225)
img_tmp += (0.485, 0.456, 0.406)
img_tmp *= 255.0
img_tmp = img_tmp.astype(np.uint8)
plt.figure()
plt.title('display')
plt.subplot(211)
plt.imshow(img_tmp)
plt.subplot(212)
plt.imshow(segmap)
if ii == 1:
break
plt.show(block=True)
|
Cream/CDARTS/CDARTS_segmentation/dataloaders/datasets/sbd.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/dataloaders/datasets/sbd.py",
"repo_id": "Cream",
"token_count": 1950
}
| 324 |
from .build import (
build_dataset_from_cfg, build_train_loader_from_cfg, build_test_loader_from_cfg)
|
Cream/CDARTS/CDARTS_segmentation/segmentation/data/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/data/__init__.py",
"repo_id": "Cream",
"token_count": 40
}
| 325 |
# ------------------------------------------------------------------------------
# Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/evaluation/coco_evaluation.py
# Modified by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
import logging
from collections import OrderedDict
import os
import glob
import copy
import json
import numpy as np
from fvcore.common.file_io import PathManager
import pycocotools.mask as mask_util
class COCOInstanceEvaluator:
"""
Evaluate COCO instance segmentation
"""
def __init__(self, output_dir=None, train_id_to_eval_id=None,
gt_dir='./datasets/coco/annotations/instances_val2017.json'):
"""
Args:
output_dir (str): an output directory to dump results.
train_id_to_eval_id (list): maps training id to evaluation id.
gt_dir (str): path to ground truth annotations.
"""
if output_dir is None:
raise ValueError('Must provide a output directory.')
self._output_dir = output_dir
if self._output_dir:
PathManager.mkdirs(self._output_dir)
self._train_id_to_eval_id = train_id_to_eval_id
self._predictions = []
self._predictions_json = os.path.join(output_dir, 'predictions.json')
self._logger = logging.getLogger(__name__)
self._gt_dir = gt_dir
def update(self, instances, image_filename=None):
if image_filename is None:
raise ValueError('Need to provide image_filename.')
num_instances = len(instances)
for i in range(num_instances):
pred_class = instances[i]['pred_class']
if self._train_id_to_eval_id is not None:
pred_class = self._train_id_to_eval_id[pred_class]
image_id = int(os.path.basename(image_filename).split('.')[0])
score = instances[i]['score']
mask = instances[i]['pred_mask'].astype("uint8")
# use RLE to encode the masks, because they are too large and takes memory
# since this evaluator stores outputs of the entire dataset
mask_rle = mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0]
# "counts" is an array encoded by mask_util as a byte-stream. Python3's
# json writer which always produces strings cannot serialize a bytestream
# unless you decode it. Thankfully, utf-8 works out (which is also what
# the pycocotools/_mask.pyx does).
mask_rle["counts"] = mask_rle["counts"].decode("utf-8")
self._predictions.append(
{
'image_id': image_id,
'category_id': pred_class,
'segmentation': mask_rle,
'score': float(score)
}
)
def evaluate(self):
"""
Returns:
dict: has a key "segm", whose value is a dict of "AP" and "AP50".
"""
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
if self._gt_dir is None:
raise ValueError('Must provide coco gt path for evaluation.')
self._logger.info("Evaluating results under {} ...".format(self._output_dir))
coco_gt = COCO(self._gt_dir)
coco_results = copy.deepcopy(self._predictions)
# When evaluating mask AP, if the results contain bbox, cocoapi will
# use the box area as the area of the instance, instead of the mask area.
# This leads to a different definition of small/medium/large.
# We remove the bbox field to let mask AP use mask area.
for c in coco_results:
c.pop("bbox", None)
with PathManager.open(self._predictions_json, "w") as f:
f.write(json.dumps(coco_results))
coco_dt = coco_gt.loadRes(coco_results)
coco_eval = COCOeval(coco_gt, coco_dt, 'segm')
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval.stats
|
Cream/CDARTS/CDARTS_segmentation/segmentation/evaluation/coco_instance.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/evaluation/coco_instance.py",
"repo_id": "Cream",
"token_count": 1796
}
| 326 |
# ------------------------------------------------------------------------------
# DeepLabV3 decoder.
# Written by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
from collections import OrderedDict
from torch import nn
from .aspp import ASPP
__all__ = ["DeepLabV3Decoder"]
class DeepLabV3Decoder(nn.Module):
def __init__(self, in_channels, feature_key, decoder_channels, atrous_rates, num_classes):
super(DeepLabV3Decoder, self).__init__()
self.aspp = ASPP(in_channels, out_channels=decoder_channels, atrous_rates=atrous_rates)
self.feature_key = feature_key
self.classifier = nn.Sequential(
nn.Conv2d(decoder_channels, decoder_channels, 3, padding=1, bias=False),
nn.BatchNorm2d(decoder_channels),
nn.ReLU(),
nn.Conv2d(decoder_channels, num_classes, 1)
)
def set_image_pooling(self, pool_size):
self.aspp.set_image_pooling(pool_size)
def forward(self, features):
pred = OrderedDict()
res5 = features[self.feature_key]
x = self.aspp(res5)
x = self.classifier(x)
pred['semantic'] = x
return pred
|
Cream/CDARTS/CDARTS_segmentation/segmentation/model/decoder/deeplabv3.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/model/decoder/deeplabv3.py",
"repo_id": "Cream",
"token_count": 498
}
| 327 |
# ------------------------------------------------------------------------------
# Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/solver/lr_scheduler.py
# Modified by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
import math
from bisect import bisect_right
from typing import List
import torch
# NOTE: PyTorch's LR scheduler interface uses names that assume the LR changes
# only on epoch boundaries. We typically use iteration based schedules instead.
# As a result, "epoch" (e.g., as in self.last_epoch) should be understood to mean
# "iteration" instead.
# FIXME: ideally this would be achieved with a CombinedLRScheduler, separating
# MultiStepLR with WarmupLR but the current LRScheduler design doesn't allow it.
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer: torch.optim.Optimizer,
milestones: List[int],
gamma: float = 0.1,
warmup_factor: float = 0.001,
warmup_iters: int = 1000,
warmup_method: str = "linear",
last_epoch: int = -1,
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}", milestones
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
)
return [
base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
def _compute_values(self) -> List[float]:
# The new interface
return self.get_lr()
class WarmupCosineLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer: torch.optim.Optimizer,
max_iters: int,
warmup_factor: float = 0.001,
warmup_iters: int = 1000,
warmup_method: str = "linear",
last_epoch: int = -1,
):
self.max_iters = max_iters
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
)
# Different definitions of half-cosine with warmup are possible. For
# simplicity we multiply the standard half-cosine schedule by the warmup
# factor. An alternative is to start the period of the cosine at warmup_iters
# instead of at 0. In the case that warmup_iters << max_iters the two are
# very close to each other.
return [
base_lr
* warmup_factor
* 0.5
* (1.0 + math.cos(math.pi * self.last_epoch / self.max_iters))
for base_lr in self.base_lrs
]
def _compute_values(self) -> List[float]:
# The new interface
return self.get_lr()
class WarmupPolyLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer: torch.optim.Optimizer,
max_iters: int,
warmup_factor: float = 0.001,
warmup_iters: int = 1000,
warmup_method: str = "linear",
last_epoch: int = -1,
power: float = 0.9,
constant_ending: float = 0.
):
self.max_iters = max_iters
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
self.power = power
self.constant_ending = constant_ending
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
)
if self.constant_ending > 0 and warmup_factor == 1.:
# Constant ending lr.
if math.pow((1.0 - self.last_epoch / self.max_iters), self.power) < self.constant_ending:
return [
base_lr
* self.constant_ending
for base_lr in self.base_lrs
]
return [
base_lr
* warmup_factor
* math.pow((1.0 - self.last_epoch / self.max_iters), self.power)
for base_lr in self.base_lrs
]
def _compute_values(self) -> List[float]:
# The new interface
return self.get_lr()
def _get_warmup_factor_at_iter(
method: str, iter: int, warmup_iters: int, warmup_factor: float
) -> float:
"""
Return the learning rate warmup factor at a specific iteration.
See https://arxiv.org/abs/1706.02677 for more details.
Args:
method (str): warmup method; either "constant" or "linear".
iter (int): iteration at which to calculate the warmup factor.
warmup_iters (int): the number of warmup iterations.
warmup_factor (float): the base warmup factor (the meaning changes according
to the method used).
Returns:
float: the effective warmup factor at the given iteration.
"""
if iter >= warmup_iters:
return 1.0
if method == "constant":
return warmup_factor
elif method == "linear":
alpha = iter / warmup_iters
return warmup_factor * (1 - alpha) + alpha
else:
raise ValueError("Unknown warmup method: {}".format(method))
|
Cream/CDARTS/CDARTS_segmentation/segmentation/solver/lr_scheduler.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/solver/lr_scheduler.py",
"repo_id": "Cream",
"token_count": 2563
}
| 328 |
from .camvid import CamVid
__all__ = ['CamVid']
|
Cream/CDARTS/CDARTS_segmentation/tools/datasets/camvid/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/tools/datasets/camvid/__init__.py",
"repo_id": "Cream",
"token_count": 19
}
| 329 |
import torch
import torch.distributed as dist
from torch import nn
from torch.autograd.function import Function
from torch.nn import functional as F
class _NewEmptyTensorOp(torch.autograd.Function):
@staticmethod
def forward(ctx, x, new_shape):
ctx.shape = x.shape
return x.new_empty(new_shape)
@staticmethod
def backward(ctx, grad):
shape = ctx.shape
return _NewEmptyTensorOp.apply(grad, shape), None
class BatchNorm2d(torch.nn.BatchNorm2d):
"""
For torch < 1.4
A wrapper around :class:`torch.nn.BatchNorm2d` to support zero-size tensor.
"""
def forward(self, x):
if x.numel() > 0:
return super(BatchNorm2d, self).forward(x)
# get output shape
output_shape = x.shape
return _NewEmptyTensorOp.apply(x, output_shape)
class AllReduce(Function):
@staticmethod
def forward(ctx, input):
input_list = [torch.zeros_like(input) for k in range(dist.get_world_size())]
# Use allgather instead of allreduce since I don't trust in-place operations ..
dist.all_gather(input_list, input, async_op=False)
inputs = torch.stack(input_list, dim=0)
return torch.sum(inputs, dim=0)
@staticmethod
def backward(ctx, grad_output):
dist.all_reduce(grad_output, async_op=False)
return grad_output
class NaiveSyncBatchNorm(BatchNorm2d):
"""
In PyTorch<=1.5, ``nn.SyncBatchNorm`` has incorrect gradient
when the batch size on each worker is different.
(e.g., when scale augmentation is used, or when it is applied to mask head).
This is a slower but correct alternative to `nn.SyncBatchNorm`.
Note:
There isn't a single definition of Sync BatchNorm.
When ``stats_mode==""``, this module computes overall statistics by using
statistics of each worker with equal weight. The result is true statistics
of all samples (as if they are all on one worker) only when all workers
have the same (N, H, W). This mode does not support inputs with zero batch size.
When ``stats_mode=="N"``, this module computes overall statistics by weighting
the statistics of each worker by their ``N``. The result is true statistics
of all samples (as if they are all on one worker) only when all workers
have the same (H, W). It is slower than ``stats_mode==""``.
Even though the result of this module may not be the true statistics of all samples,
it may still be reasonable because it might be preferrable to assign equal weights
to all workers, regardless of their (H, W) dimension, instead of putting larger weight
on larger images. From preliminary experiments, little difference is found between such
a simplified implementation and an accurate computation of overall mean & variance.
"""
def __init__(self, *args, stats_mode="", **kwargs):
super().__init__(*args, **kwargs)
assert stats_mode in ["", "N"]
self._stats_mode = stats_mode
def forward(self, input):
if not self.training:
return super().forward(input)
if dist.get_world_size() == 1:
return super().forward(input)
B, C = input.shape[0], input.shape[1]
mean = torch.mean(input, dim=[0, 2, 3])
meansqr = torch.mean(input * input, dim=[0, 2, 3])
if self._stats_mode == "":
assert B > 0, 'SyncBatchNorm(stats_mode="") does not support zero batch size.'
vec = torch.cat([mean, meansqr], dim=0)
vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size())
mean, meansqr = torch.split(vec, C)
momentum = self.momentum
else:
if B == 0:
vec = torch.zeros([2 * C + 1], device=mean.device, dtype=mean.dtype)
vec = vec + input.sum() # make sure there is gradient w.r.t input
else:
vec = torch.cat(
[mean, meansqr, torch.ones([1], device=mean.device, dtype=mean.dtype)], dim=0
)
vec = AllReduce.apply(vec * B)
total_batch = vec[-1].detach()
momentum = total_batch.clamp(max=1) * self.momentum # no update if total_batch is 0
total_batch = torch.max(total_batch, torch.ones_like(total_batch)) # avoid div-by-zero
mean, meansqr, _ = torch.split(vec / total_batch, C)
var = meansqr - mean * mean
invstd = torch.rsqrt(var + self.eps)
scale = self.weight * invstd
bias = self.bias - mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
self.running_mean += momentum * (mean.detach() - self.running_mean)
self.running_var += momentum * (var.detach() - self.running_var)
return input * scale + bias
|
Cream/CDARTS/CDARTS_segmentation/train/layers.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/train/layers.py",
"repo_id": "Cream",
"token_count": 1988
}
| 330 |
import torch
import torch.nn as nn
from utils import utils
from datasets import data_utils
from models.loss import CrossEntropyLabelSmooth
def train(train_loader, model, optimizer, epoch, writer, logger, config):
device = torch.device("cuda")
if config.label_smooth > 0:
criterion = CrossEntropyLabelSmooth(config.n_classes, config.label_smooth).to(device)
else:
criterion = nn.CrossEntropyLoss().to(device)
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
losses = utils.AverageMeter()
step_num = len(train_loader)
cur_step = epoch*step_num
cur_lr = optimizer.param_groups[0]['lr']
if config.local_rank == 0:
logger.info("Train Epoch {} LR {}".format(epoch, cur_lr))
writer.add_scalar('train/lr', cur_lr, cur_step)
model.train()
for step, (X, y) in enumerate(train_loader):
X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True)
N = X.size(0)
X, target_a, target_b, lam = data_utils.mixup_data(X, y, config.mixup_alpha, use_cuda=True)
optimizer.zero_grad()
logits, logits_aux = model(X)
# loss = criterion(logits, y)
loss = data_utils.mixup_criterion(criterion, logits, target_a, target_b, lam)
if config.aux_weight > 0:
# loss_aux = criterion(logits_aux, y)
loss_aux = data_utils.mixup_criterion(criterion, logits_aux, target_a, target_b, lam)
loss = loss + config.aux_weight * loss_aux
if config.use_amp:
from apex import amp
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# gradient clipping
nn.utils.clip_grad_norm_(model.module.parameters(), config.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5))
if config.distributed:
reduced_loss = utils.reduce_tensor(loss.data, config.world_size)
prec1 = utils.reduce_tensor(prec1, config.world_size)
prec5 = utils.reduce_tensor(prec5, config.world_size)
else:
reduced_loss = loss.data
losses.update(reduced_loss.item(), N)
top1.update(prec1.item(), N)
top5.update(prec5.item(), N)
torch.cuda.synchronize()
if config.local_rank == 0 and (step % config.print_freq == 0 or step == step_num):
logger.info(
"Train: Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} "
"Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format(
epoch+1, config.epochs, step,
step_num, losses=losses, top1=top1, top5=top5))
if config.local_rank == 0:
writer.add_scalar('train/loss', reduced_loss.item(), cur_step)
writer.add_scalar('train/top1', prec1.item(), cur_step)
writer.add_scalar('train/top5', prec5.item(), cur_step)
cur_step += 1
if config.local_rank == 0:
logger.info("Train: Epoch {:2d}/{} Final Prec@1 {:.4%}".format(
epoch+1, config.epochs, top1.avg))
def validate(valid_loader, model, epoch, cur_step, writer, logger, config):
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
losses = utils.AverageMeter()
model.eval()
device = torch.device("cuda")
criterion = nn.CrossEntropyLoss().to(device)
with torch.no_grad():
for step, (X, y) in enumerate(valid_loader):
X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True)
N = X.size(0)
logits, _ = model(X)
loss = criterion(logits, y)
prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5))
if config.distributed:
reduced_loss = utils.reduce_tensor(loss.data, config.world_size)
prec1 = utils.reduce_tensor(prec1, config.world_size)
prec5 = utils.reduce_tensor(prec5, config.world_size)
else:
reduced_loss = loss.data
losses.update(reduced_loss.item(), N)
top1.update(prec1.item(), N)
top5.update(prec5.item(), N)
torch.cuda.synchronize()
step_num = len(valid_loader)
if (step % config.print_freq == 0 or step == step_num-1) and config.local_rank == 0:
logger.info(
"Valid: Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} "
"Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format(
epoch+1, config.epochs, step, step_num,
losses=losses, top1=top1, top5=top5))
if config.local_rank == 0:
writer.add_scalar('val/loss', losses.avg, cur_step)
writer.add_scalar('val/top1', top1.avg, cur_step)
writer.add_scalar('val/top5', top5.avg, cur_step)
logger.info("Valid: Epoch {:2d}/{} Final Prec@1 {:.4%}".format(
epoch+1, config.epochs, top1.avg))
return top1.avg, top5.avg
|
Cream/CDARTS/benchmark201/core/augment_function.py/0
|
{
"file_path": "Cream/CDARTS/benchmark201/core/augment_function.py",
"repo_id": "Cream",
"token_count": 2516
}
| 331 |
import torch
import numpy as np
import torchvision.datasets as dset
import torchvision.transforms as transforms
from lib.datasets.data_utils import SubsetDistributedSampler
from lib.datasets.data_utils import ImageNetPolicy
def get_search_datasets(config):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_data = dset.ImageFolder(
config.train_dir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.2),
transforms.ToTensor(),
normalize,
]))
test_data = dset.ImageFolder(
config.test_dir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
num_train = len(train_data)
indices = list(range(num_train))
split_mid = int(np.floor(0.5 * num_train))
train_sampler = SubsetDistributedSampler(train_data, indices[:split_mid])
valid_sampler = SubsetDistributedSampler(train_data, indices[split_mid:num_train])
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=config.batch_size,
sampler=train_sampler,
pin_memory=True, num_workers=config.workers)
valid_loader = torch.utils.data.DataLoader(
train_data, batch_size=config.batch_size,
sampler=valid_sampler,
pin_memory=True, num_workers=config.workers)
return [train_loader, valid_loader], [train_sampler, valid_sampler]
def get_augment_datasets(config):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if config.use_aa:
train_data = dset.ImageFolder(
config.train_dir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
ImageNetPolicy(),
transforms.ToTensor(),
normalize,
]))
else:
train_data = dset.ImageFolder(
config.train_dir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.2),
transforms.ToTensor(),
normalize,
]))
test_data = dset.ImageFolder(
config.test_dir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
train_sampler = torch.utils.data.distributed.DistributedSampler(train_data)
test_sampler = torch.utils.data.distributed.DistributedSampler(test_data)
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=config.batch_size,
sampler=train_sampler,
pin_memory=True, num_workers=config.workers)
test_loader = torch.utils.data.DataLoader(
test_data, batch_size=config.batch_size,
sampler=test_sampler,
pin_memory=True, num_workers=config.workers)
return [train_loader, test_loader], [train_sampler, test_sampler]
|
Cream/CDARTS/lib/datasets/imagenet.py/0
|
{
"file_path": "Cream/CDARTS/lib/datasets/imagenet.py",
"repo_id": "Cream",
"token_count": 1655
}
| 332 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.