python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import numpy as np
import os
import torch
from torch import nn
from torch.autograd import Variable
import torchvision
import utils.modelZoo as modelZoo
from utils.load_utils import *
DATA_PATHS = {
#'video_data/Oliver/train/':1,
#'video_data/Chemistry/train/':2,
'video_data/Seth/train/':5,
#'video_data/Conan/train/':6,
}
#######################################################
## main training function
#######################################################
def main(args):
## variables
learning_rate = args.learning_rate
pipeline = args.pipeline
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
feature_in_dim, feature_out_dim = FEATURE_MAP[pipeline]
feats = pipeline.split('2')
in_feat, out_feat = feats[0], feats[1]
currBestLoss = 1e3
rng = np.random.RandomState(23456)
torch.manual_seed(23456)
torch.cuda.manual_seed(23456)
## DONE variables
## set up generator model
args.model = 'regressor_fcn_bn_32'
generator = getattr(modelZoo, args.model)()
generator.build_net(feature_in_dim, feature_out_dim, require_image=args.require_image)
generator.cuda()
reg_criterion = nn.L1Loss()
g_optimizer = torch.optim.Adam(generator.parameters(), lr=learning_rate, weight_decay=1e-5)
generator.train()
## set up discriminator model
args.model = 'regressor_fcn_bn_discriminator'
discriminator = getattr(modelZoo, args.model)()
discriminator.build_net(feature_out_dim)
discriminator.cuda()
gan_criterion = nn.MSELoss()
d_optimizer = torch.optim.Adam(discriminator.parameters(), lr=learning_rate, weight_decay=1e-5)
discriminator.train()
## DONE model
## load data from saved files
data_tuple = load_data(args, rng)
if args.require_image:
train_X, train_Y, test_X, test_Y, train_ims, test_ims = data_tuple
else:
train_X, train_Y, test_X, test_Y = data_tuple
train_ims, test_ims = None, None
## DONE: load data from saved files
## training job
kld_weight = 0.05
prev_save_epoch = 0
patience = 20
for epoch in range(args.num_epochs):
args.epoch = epoch
## train discriminator
if epoch > 100 and (epoch - prev_save_epoch) > patience:
print('early stopping at:', epoch)
break
if epoch > 0 and epoch % 3 == 0:
train_discriminator(args, rng, generator, discriminator, gan_criterion, d_optimizer, train_X, train_Y, train_ims=train_ims)
else:
train_generator(args, rng, generator, discriminator, reg_criterion, gan_criterion, g_optimizer, train_X, train_Y, train_ims=train_ims)
currBestLoss = val_generator(args, generator, discriminator, reg_criterion, g_optimizer, test_X, test_Y, currBestLoss, test_ims=test_ims)
#######################################################
## local helper methods
#######################################################
## function to load data from external files
def load_data(args, rng):
gt_windows = None
quant_windows = None
p0_paths = None
hand_ims = None
## load from external files
for key, value in DATA_PATHS.items():
key = os.path.join(args.base_path, key)
curr_p0, curr_p1, curr_paths, _ = load_windows(key, args.pipeline, require_image=args.require_image)
if gt_windows is None:
if args.require_image:
hand_ims = curr_p0[1]
curr_p0 = curr_p0[0]
gt_windows = curr_p0
quant_windows = curr_p1
p0_paths = curr_paths
else:
if args.require_image:
hand_ims = np.concatenate((hand_ims, curr_p0[1]), axis=0)
curr_p0 = curr_p0[0]
gt_windows = np.concatenate((gt_windows, curr_p0), axis=0)
quant_windows = np.concatenate((quant_windows, curr_p1), axis=0)
p0_paths = np.concatenate((p0_paths, curr_paths), axis=0)
print '===> in/out', gt_windows.shape, quant_windows.shape
if args.require_image:
print "===> hand_ims", hand_ims.shape
## DONE load from external files
## shuffle and set train/validation
N = gt_windows.shape[0]
train_N = int(N * 0.7)
idx = np.random.permutation(N)
train_idx, test_idx = idx[:train_N], idx[train_N:]
train_X, test_X = gt_windows[train_idx, :, :], gt_windows[test_idx, :, :]
train_Y, test_Y = quant_windows[train_idx, :, :], quant_windows[test_idx, :, :]
if args.require_image:
train_ims, test_ims = hand_ims[train_idx,:,:], hand_ims[test_idx,:,:]
train_ims = train_ims.astype(np.float32)
test_ims = test_ims.astype(np.float32)
print "====> train/test", train_X.shape, test_X.shape
train_X = np.swapaxes(train_X, 1, 2).astype(np.float32)
train_Y = np.swapaxes(train_Y, 1, 2).astype(np.float32)
test_X = np.swapaxes(test_X, 1, 2).astype(np.float32)
test_Y = np.swapaxes(test_Y, 1, 2).astype(np.float32)
body_mean_X, body_std_X, body_mean_Y, body_std_Y = calc_standard(train_X, train_Y, args.pipeline)
np.savez_compressed(args.model_path + '{}{}_preprocess_core.npz'.format(args.tag, args.pipeline),
body_mean_X=body_mean_X, body_std_X=body_std_X,
body_mean_Y=body_mean_Y, body_std_Y=body_std_Y)
train_X = (train_X - body_mean_X) / body_std_X
test_X = (test_X - body_mean_X) / body_std_X
train_Y = (train_Y - body_mean_Y) / body_std_Y
test_Y = (test_Y - body_mean_Y) / body_std_Y
print("=====> standardization done")
# Data shuffle
I = np.arange(len(train_X))
rng.shuffle(I)
train_X = train_X[I]
train_Y = train_Y[I]
if args.require_image:
train_ims = train_ims[I]
return (train_X, train_Y, test_X, test_Y, train_ims, test_ims)
## DONE shuffle and set train/validation
return (train_X, train_Y, test_X, test_Y)
## calc temporal deltas within sequences
def calc_motion(tensor):
res = tensor[:,:,:1] - tensor[:,:,:-1]
return res
## training discriminator functin
def train_discriminator(args, rng, generator, discriminator, gan_criterion, d_optimizer, train_X, train_Y, train_ims=None):
generator.eval()
discriminator.train()
batchinds = np.arange(train_X.shape[0] // args.batch_size)
totalSteps = len(batchinds)
rng.shuffle(batchinds)
for bii, bi in enumerate(batchinds):
## setting batch data
idxStart = bi * args.batch_size
inputData_np = train_X[idxStart:(idxStart + args.batch_size), :, :]
outputData_np = train_Y[idxStart:(idxStart + args.batch_size), :, :]
inputData = Variable(torch.from_numpy(inputData_np)).cuda()
outputGT = Variable(torch.from_numpy(outputData_np)).cuda()
imsData = None
if args.require_image:
imsData_np = train_ims[idxStart:(idxStart + args.batch_size), :, :]
imsData = Variable(torch.from_numpy(imsData_np)).cuda()
## DONE setting batch data
with torch.no_grad():
fake_data = generator(inputData, image_=imsData).detach()
fake_motion = calc_motion(fake_data)
real_motion = calc_motion(outputGT)
fake_score = discriminator(fake_motion)
real_score = discriminator(real_motion)
d_loss = gan_criterion(fake_score, torch.zeros_like(fake_score)) + gan_criterion(real_score, torch.ones_like(real_score))
d_optimizer.zero_grad()
d_loss.backward()
d_optimizer.step()
## training generator function
def train_generator(args, rng, generator, discriminator, reg_criterion, gan_criterion, g_optimizer, train_X, train_Y, train_ims=None):
discriminator.eval()
generator.train()
batchinds = np.arange(train_X.shape[0] // args.batch_size)
totalSteps = len(batchinds)
rng.shuffle(batchinds)
avgLoss = 0.
for bii, bi in enumerate(batchinds):
## setting batch data
idxStart = bi * args.batch_size
inputData_np = train_X[idxStart:(idxStart + args.batch_size), :, :]
outputData_np = train_Y[idxStart:(idxStart + args.batch_size), :, :]
inputData = Variable(torch.from_numpy(inputData_np)).cuda()
outputGT = Variable(torch.from_numpy(outputData_np)).cuda()
imsData = None
if args.require_image:
imsData_np = train_ims[idxStart:(idxStart + args.batch_size), :, :]
imsData = Variable(torch.from_numpy(imsData_np)).cuda()
## DONE setting batch data
output = generator(inputData, image_=imsData)
fake_motion = calc_motion(output)
with torch.no_grad():
fake_score = discriminator(fake_motion)
fake_score = fake_score.detach()
g_loss = reg_criterion(output, outputGT) + gan_criterion(fake_score, torch.ones_like(fake_score))
g_optimizer.zero_grad()
g_loss.backward()
g_optimizer.step()
avgLoss += g_loss.item() * args.batch_size
if bii % args.log_step == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Perplexity: {:5.4f}'.format(args.epoch, args.num_epochs, bii, totalSteps,
avgLoss / (totalSteps * args.batch_size),
np.exp(avgLoss / (totalSteps * args.batch_size))))
## validating generator function
def val_generator(args, generator, discriminator, reg_criterion, g_optimizer, test_X, test_Y, currBestLoss, test_ims=None):
testLoss = 0
generator.eval()
discriminator.eval()
batchinds = np.arange(test_X.shape[0] // args.batch_size)
totalSteps = len(batchinds)
for bii, bi in enumerate(batchinds):
## setting batch data
idxStart = bi * args.batch_size
inputData_np = test_X[idxStart:(idxStart + args.batch_size), :, :]
outputData_np = test_Y[idxStart:(idxStart + args.batch_size), :, :]
inputData = Variable(torch.from_numpy(inputData_np)).cuda()
outputGT = Variable(torch.from_numpy(outputData_np)).cuda()
imsData = None
if args.require_image:
imsData_np = test_ims[idxStart:(idxStart + args.batch_size), :, :]
imsData = Variable(torch.from_numpy(imsData_np)).cuda()
## DONE setting batch data
output = generator(inputData, image_=imsData)
g_loss = reg_criterion(output, outputGT)
testLoss += g_loss.item() * args.batch_size
testLoss /= totalSteps * args.batch_size
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Perplexity: {:5.4f}'.format(args.epoch, args.num_epochs, bii, totalSteps,
testLoss,
np.exp(testLoss)))
print('----------------------------------')
if testLoss < currBestLoss:
prev_save_epoch = args.epoch
checkpoint = {'epoch': args.epoch,
'state_dict': generator.state_dict(),
'g_optimizer': g_optimizer.state_dict()}
fileName = args.model_path + '/{}{}_checkpoint_e{}_loss{:.4f}.pth'.format(args.tag, args.pipeline, args.epoch, testLoss)
torch.save(checkpoint, fileName)
currBestLoss = testLoss
return currBestLoss
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--base_path', type=str, required=True, help='path to the directory where the data files are stored')
parser.add_argument('--pipeline', type=str, default='arm2wh', help='pipeline specifying which input/output joints to use')
parser.add_argument('--num_epochs', type=int, default=100, help='number of training epochs')
parser.add_argument('--batch_size', type=int, default=64, help='batch size for training')
parser.add_argument('--learning_rate', type=float, default=1e-3, help='learning rate for training G and D')
parser.add_argument('--require_image', action='store_true', help='use additional image feature or not')
parser.add_argument('--model_path', type=str, required=True , help='path for saving trained models')
parser.add_argument('--log_step', type=int , default=100, help='step size for prining log info')
parser.add_argument('--tag', type=str, default='', help='prefix for naming purposes')
args = parser.parse_args()
print(args)
main(args)
| body2hands-main | train_gan.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import os
import json
import numpy as np
import torch
import torchvision
from torch import nn
from torch.autograd import Variable
import utils.modelZoo as modelZoo
from utils.load_utils import *
def main(args):
## variable initializations
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
rng = np.random.RandomState(23456)
torch.manual_seed(23456)
torch.cuda.manual_seed(23456)
pipeline = args.pipeline
feature_in_dim, feature_out_dim = FEATURE_MAP[pipeline]
feats = pipeline.split('2')
in_feat, out_feat = feats[0], feats[1]
## DONE variable initializations
## set up model/ load pretrained model
args.model = 'regressor_fcn_bn_32'
model = getattr(modelZoo,args.model)()
model.build_net(feature_in_dim, feature_out_dim, require_image=args.require_image)
pretrain_model = args.checkpoint
loaded_state = torch.load(pretrain_model, map_location=lambda storage, loc: storage)
model.load_state_dict(loaded_state['state_dict'], strict=False)
model = model.eval()
model.cuda()
criterion = nn.MSELoss()
## DONE set up model/ load pretrained model
## load/prepare data from external files
test_X, test_Y, test_Y_paths, _ = load_windows(args.data_dir, args.pipeline, require_image=args.require_image)
if args.require_image:
test_ims = test_X[1].astype(np.float32)
test_X = test_X[0]
test_X = np.swapaxes(test_X, 1, 2).astype(np.float32)
test_Y = np.swapaxes(test_Y, 1, 2).astype(np.float32)
# standardize
checkpoint_dir = os.path.split(pretrain_model)[0]
model_tag = os.path.basename(args.checkpoint).split(args.pipeline)[0]
preprocess = np.load(os.path.join(checkpoint_dir,'{}{}_preprocess_core.npz'.format(model_tag, args.pipeline)))
body_mean_X = preprocess['body_mean_X']
body_std_X = preprocess['body_std_X']
body_mean_Y = preprocess['body_mean_Y']
body_std_Y = preprocess['body_std_Y']
test_X = (test_X - body_mean_X) / body_std_X
test_Y = (test_Y - body_mean_Y) / body_std_Y
## DONE load/prepare data from external files
## pass loaded data into training
inputData = Variable(torch.from_numpy(test_X)).cuda()
outputGT = Variable(torch.from_numpy(test_Y)).cuda()
imsData = None
if args.require_image:
imsData = Variable(torch.from_numpy(test_ims)).cuda()
output = model(inputData, image_=imsData)
error = criterion(output, outputGT).data
print(">>> TOTAL ERROR: ", error)
print('----------------------------------')
## DONE pass loaded data into training
## preparing output for saving
output_np = output.data.cpu().numpy()
output_gt = outputGT.data.cpu().numpy()
output_np = output_np * body_std_Y + body_mean_Y
output_gt = output_gt * body_std_Y + body_mean_Y
output_np = np.swapaxes(output_np, 1, 2).astype(np.float32)
output_gt = np.swapaxes(output_gt, 1, 2).astype(np.float32)
save_results(test_Y_paths, output_np, args.pipeline, args.base_path, tag=args.tag)
## DONE preparing output for saving
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', type=str, required=True, help='path to checkpoint file (pretrained model)')
parser.add_argument('--base_path', type=str, required=True, help='absolute path to the base directory where all of the data is stored')
parser.add_argument('--data_dir', type=str, required=True, help='path to test data directory')
parser.add_argument('--pipeline', type=str, default='arm2wh', help='pipeline specifying which input/output joints to use')
parser.add_argument('--require_image', action='store_true', help='step size for prining log info')
parser.add_argument('--tag', type=str, default='', help='prefix for naming purposes')
args = parser.parse_args()
print(args)
main(args)
| body2hands-main | sample.py |
import tensorflow as tf
import os
import sys
from nets.CPM import CPM
from data.DomeReader import DomeReader
from data.TsimonDBReader import TsimonDBReader
from data.RHDReader import RHDReader
from data.STBReader import STBReader
from data.MultiDataset import combineMultiDataset
from data.GAneratedReader import GAneratedReader
import utils.general
import utils.PAF
from utils.multigpu import average_gradients
from tensorflow.python.client import device_lib
num_gpu = sum([_.device_type == 'GPU' for _ in device_lib.list_local_devices()])
fine_tune = False
already_trained = 50000
train_para = {'lr': [1e-4, 1e-5],
'lr_iter': [80000],
'max_iter': 160000,
'show_loss_freq': 10,
'snapshot_freq': 5000,
'snapshot_dir': 'snapshots/Final_qual_hand_clear_zoom',
'finetune_dir': 'snapshots/Final_qual_hand_clear',
'loss_weight_PAF': 1.0,
}
PATH_TO_SNAPSHOTS = './{}/model-{}'.format(train_para['finetune_dir'], already_trained) # only used when USE_RETRAINED is true
ignore_PAF_2D = False
with tf.Graph().as_default(), tf.device('/cpu:0'):
domereader = DomeReader(mode='training', batch_size=5, shuffle=True, objtype=1, crop_noise=True)
domereader.crop_scale_noise_sigma = 0.4
domereader.crop_offset_noise_sigma = 0.2
rhdreader = RHDReader(mode='training', batch_size=2, shuffle=True, objtype=1, crop_noise=True)
rhdreader.crop_scale_noise_sigma = 0.4
rhdreader.crop_offset_noise_sigma = 0.2
tsimonreader = TsimonDBReader(mode='training', batch_size=1, shuffle=True, objtype=1, crop_noise=True)
tsimonreader.crop_scale_noise_sigma = 0.4
tsimonreader.crop_offset_noise_sigma = 0.2
# ganeratedReader = GAneratedReader(mode='training', batch_size=2, shuffle=True, objtype=1, crop_noise=True)
data = combineMultiDataset([
domereader.get(),
rhdreader.get(),
tsimonreader.get(),
# ganeratedReader.get(),
],
name_wanted=['image_crop', 'scoremap2d', 'hand_valid', 'PAF', 'PAF_type', 'mask_crop'])
# data = domereader.get()
# stbreader = STBReader(mode='training', batch_size=4, shuffle=True, objtype=1, crop_noise=True)
# data = stbreader.get()
for k, v in data.items():
data[k] = tf.split(v, num_gpu, 0)
if fine_tune:
global_step = tf.Variable(already_trained + 1, trainable=False, name="global_step")
else:
global_step = tf.Variable(0, trainable=False, name="global_step")
lr_scheduler = utils.general.LearningRateScheduler(values=train_para['lr'], steps=train_para['lr_iter'])
lr = lr_scheduler.get_lr(global_step)
opt = tf.train.AdamOptimizer(lr)
tower_grads = []
tower_losses = []
tower_losses_PAF = []
tower_losses_2d = []
with tf.variable_scope(tf.get_variable_scope()):
for ig in range(num_gpu):
with tf.device('/gpu:%d' % ig):
# build network
net = CPM(out_chan=22, numPAF=20, crop_size=368, withPAF=True, PAFdim=3)
predicted_scoremaps, _, predicted_PAFs = net.inference(data['image_crop'][ig], train=True)
# Loss
assert len(predicted_scoremaps) == 6
s = data['scoremap2d'][ig].get_shape().as_list()
valid = tf.concat([data['hand_valid'][ig], tf.ones((s[0], 1), dtype=tf.bool)], axis=1)
valid = tf.cast(valid, tf.float32)
mask_scoremap = tf.tile(tf.expand_dims(data['mask_crop'][ig], axis=3), [1, 1, 1, s[3]])
loss_2d = 0.0
# multiply mask_scoremap to mask out the invalid areas
for ip, predicted_scoremap in enumerate(predicted_scoremaps):
resized_scoremap = tf.image.resize_images(predicted_scoremap, (s[1], s[2]))
mean_over_pixel = tf.reduce_sum(tf.square((resized_scoremap - data['scoremap2d'][ig]) * mask_scoremap), [1, 2]) / (tf.reduce_sum(mask_scoremap, [1, 2]) + 1e-6)
loss_2d_ig = tf.reduce_sum(valid * mean_over_pixel) / (tf.reduce_sum(valid) + 1e-6)
loss_2d += loss_2d_ig
loss_2d /= len(predicted_scoremaps)
assert 'PAF' in data
loss_PAF = 0.0
valid_PAF = tf.cast(utils.PAF.getValidPAF(data['hand_valid'][ig], 1, PAFdim=3), tf.float32)
# multiply mask_PAF to mask out the invalid areas
s = data['PAF'][ig].get_shape().as_list()
mask_PAF = tf.tile(tf.expand_dims(data['mask_crop'][ig], axis=3), [1, 1, 1, s[3]])
mask_PAF = tf.reshape(mask_PAF, [s[0], s[1], s[2], -1, 3]) # detach x, y, z
if ignore_PAF_2D:
mask_PAF2D = mask_PAF * tf.constant([0, 0, 0], dtype=tf.float32)
else:
mask_PAF2D = mask_PAF * tf.constant([1, 1, 0], dtype=tf.float32) # for the 2D case
mask_PAF = tf.where(data['PAF_type'][ig], mask_PAF, mask_PAF2D) # take out corresponding mask by PAF type
mask_PAF = tf.reshape(mask_PAF, [s[0], s[1], s[2], -1])
for ip, pred_PAF in enumerate(predicted_PAFs):
resized_PAF = tf.image.resize_images(pred_PAF, (s[1], s[2]), method=tf.image.ResizeMethod.BICUBIC)
channelWisePAF = tf.reshape(resized_PAF, [s[0], s[1], s[2], -1, 3])
PAF_x2y2 = tf.sqrt(tf.reduce_sum(tf.square(channelWisePAF[:, :, :, :, 0:2]), axis=4)) + 1e-6
PAF_normed_x = channelWisePAF[:, :, :, :, 0] / PAF_x2y2
PAF_normed_y = channelWisePAF[:, :, :, :, 1] / PAF_x2y2
PAF_normed_z = tf.zeros(PAF_normed_x.get_shape(), dtype=tf.float32)
normed_PAF = tf.stack([PAF_normed_x, PAF_normed_y, PAF_normed_z], axis=4)
normed_PAF = tf.reshape(normed_PAF, [s[0], s[1], s[2], -1])
normed_PAF = tf.where(tf.logical_and(tf.not_equal(data['PAF'][ig], 0.0), tf.not_equal(resized_PAF, 0.0)),
normed_PAF, tf.zeros((s[0], s[1], s[2], s[3]), dtype=tf.float32)) # use normed_PAF only in pixels where PAF is not zero
final_PAF = tf.where(data['PAF_type'][ig], resized_PAF, normed_PAF)
# mean_over_pixel = tf.reduce_sum(tf.square((resized_PAF - data['PAF'][ig]) * mask_PAF), [1, 2]) / (tf.reduce_sum(mask_PAF, [1, 2]) + 1e-6)
mean_over_pixel = tf.reduce_sum(tf.square((final_PAF - data['PAF'][ig]) * mask_PAF), [1, 2]) / (tf.reduce_sum(mask_PAF, [1, 2]) + 1e-6)
loss_PAF_ig = tf.reduce_sum(valid_PAF * mean_over_pixel) / (tf.reduce_sum(valid_PAF) + 1e-6)
loss_PAF += loss_PAF_ig
loss_PAF /= len(predicted_PAFs)
loss = loss_2d + loss_PAF * train_para['loss_weight_PAF']
tf.get_variable_scope().reuse_variables()
tower_losses.append(loss)
tower_losses_PAF.append(loss_PAF)
tower_losses_2d.append(loss_2d)
grad = opt.compute_gradients(loss)
tower_grads.append(grad)
total_loss = tf.reduce_mean(tower_losses)
total_loss_PAF = tf.reduce_mean(tower_losses_PAF)
total_loss_2d = tf.reduce_mean(tower_losses_2d)
grads = average_gradients(tower_grads)
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
tf.summary.scalar('loss', total_loss)
tf.summary.scalar('loss_PAF', total_loss_PAF)
tf.summary.scalar('loss_2d', total_loss_2d)
# init weights
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=None)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(train_para['snapshot_dir'] + '/train', sess.graph)
if not fine_tune:
start_iter = 0
net.init_vgg(sess)
else:
saver.restore(sess, PATH_TO_SNAPSHOTS)
start_iter = already_trained + 1
# snapshot dir
if not os.path.exists(train_para['snapshot_dir']):
os.mkdir(train_para['snapshot_dir'])
print('Created snapshot dir:', train_para['snapshot_dir'])
# Training loop
print('Starting to train ...')
for i in range(start_iter, train_para['max_iter']):
summary, _, loss_v, loss_2d_v, loss_PAF_v = sess.run([merged, apply_gradient_op, total_loss, total_loss_2d, total_loss_PAF])
train_writer.add_summary(summary, i)
if (i % train_para['show_loss_freq']) == 0:
print('Iteration %d\t Loss %.1e, Loss_2d %.1e, Loss_PAF %.1e' % (i, loss_v, loss_2d_v, loss_PAF_v))
sys.stdout.flush()
if (i % train_para['snapshot_freq']) == 0:
saver.save(sess, "%s/model" % train_para['snapshot_dir'], global_step=i)
print('Saved a snapshot.')
sys.stdout.flush()
print('Training finished. Saving final snapshot.')
saver.save(sess, "%s/model" % train_para['snapshot_dir'], global_step=train_para['max_iter'])
| body2hands-main | visualization/POF/training_PAF_hand.py |
import os
import numpy as np
import numpy.linalg as nl
import json
import pickle
import argparse
map_body25_to_body19 = list(range(8)) + list(range(9, 25)) # total of 24
parser = argparse.ArgumentParser()
parser.add_argument('--seqName', '-n', type=str)
parser.add_argument('--rootDir', '-r', type=str)
parser.add_argument('--count', '-c', type=int)
args = parser.parse_args()
seqName = args.seqName
root = args.rootDir
calib_file = os.path.join(root, 'calib.json')
with open(calib_file) as f:
calib_data = json.load(f)
frameRange = range(1, args.count + 1)
person_idx = -1
bs = []
ls = []
rs = []
fs = []
img_dirs = []
frame_indices = []
for i in frameRange:
img_file = os.path.join(root, "raw_image", '{}_{:08d}.png'.format(seqName, i))
assert os.path.isfile(img_file)
annot_2d = os.path.join(root, 'openpose_result', '{}_{:08d}_keypoints.json'.format(seqName, i))
assert os.path.exists(annot_2d)
with open(annot_2d) as f:
data = json.load(f)
# ideally there should be only one person
assert len(data['people']) == 1
ip = 0
joint2d = np.array(data["people"][ip]["pose_keypoints_2d"]).reshape(-1, 3)
left_hand2d = np.array(data["people"][ip]["hand_left_keypoints_2d"]).reshape(-1, 3)
right_hand2d = np.array(data["people"][ip]["hand_right_keypoints_2d"]).reshape(-1, 3)
face2d = np.array(data["people"][ip]["face_keypoints_2d"]).reshape(-1, 3)
bs.append(joint2d[map_body25_to_body19])
fs.append(face2d)
ls.append(left_hand2d)
rs.append(right_hand2d)
img_dirs.append(img_file)
frame_indices.append(i)
img_dirs = np.array(img_dirs)
bs = np.array(bs)
ls = np.array(ls)
rs = np.array(rs)
fs = np.array(fs)
frame_indices = np.array(frame_indices)
print('Openpose output collected: data dimension:')
print((len(ls), len(rs), len(fs), len(bs), len(img_dirs), len(frame_indices)))
with open('{}/{}.pkl'.format(root, seqName), 'wb') as f:
pickle.dump((bs, ls, rs, fs, img_dirs, calib_data, frame_indices), f)
| body2hands-main | visualization/POF/collect_openpose.py |
import tensorflow as tf
import os
import sys
from nets.CPM import CPM
from nets.Hourglass import Hourglass
from data.DomeReader import DomeReader
from data.HumanReader import HumanReader
from data.MultiDataset import combineMultiDataset
from data.COCOReader import COCOReader
import pickle
import utils.general
import utils.PAF
from utils.multigpu import average_gradients
from tensorflow.python.client import device_lib
num_gpu = sum([_.device_type == 'GPU' for _ in device_lib.list_local_devices()])
fine_tune = True
already_trained = 100000
train_para = {'lr': [1e-4, 1e-5],
'lr_iter': [100000],
'max_iter': 200000,
'show_loss_freq': 10,
'snapshot_freq': 5000,
'snapshot_dir': 'snapshots/PAF_body_headtop_domehumanCOCO_chest_noPAF',
'finetune_dir': 'snapshots/PAF_body_headtop_domehumanCOCO_chest_noPAF',
'loss_weight_PAF': 1.0,
}
PATH_TO_SNAPSHOTS = './{}/model-{}'.format(train_para['finetune_dir'], already_trained) # only used when USE_RETRAINED is true
numStage = 5
ignore_PAF_2D = True
with tf.Graph().as_default(), tf.device('/cpu:0'):
domereader = DomeReader(mode='training', batch_size=1, shuffle=True, objtype=0, crop_noise=True, full_only=False, head_top=True)
# domereader.rotate_augmentation = True
human36reader = HumanReader(mode='training', batch_size=2, shuffle=True, objtype=0, crop_noise=True)
# mpi3dreader = HumanReader(mode='training', name='MPI_INF_3DHP', batch_size=2, shuffle=True, objtype=0, crop_noise=True)
cocoreader = COCOReader(mode='training', batch_size=1, shuffle=True, objtype=0, crop_noise=True)
# cocoreader.rotate_augmentation = True
# upreader = HumanReader(mode='training', name='UP', batch_size=1, shuffle=True, objtype=0, crop_noise=True)
# surrealreader = HumanReader(mode='training', name='SURREAL', batch_size=3, shuffle=True, objtype=0, crop_noise=True)
# domereader.crop_size = 512
# human36reader.crop_size = 512
# cocoreader.crop_size = 512
data = combineMultiDataset([
domereader.get(),
human36reader.get(),
cocoreader.get(),
# mpi3dreader.get()
# upreader.get(),
# surrealreader.get()
],
name_wanted=['image_crop', 'scoremap2d', 'body_valid', 'PAF', 'PAF_type', 'mask_crop'])
for k, v in data.items():
data[k] = tf.split(v, num_gpu, 0)
if fine_tune:
global_step = tf.Variable(already_trained + 1, trainable=False, name="global_step")
else:
global_step = tf.Variable(0, trainable=False, name="global_step")
lr_scheduler = utils.general.LearningRateScheduler(values=train_para['lr'], steps=train_para['lr_iter'])
lr = lr_scheduler.get_lr(global_step)
opt = tf.train.AdamOptimizer(lr)
tower_grads = []
tower_losses = []
tower_losses_PAF = []
tower_losses_2d = []
with tf.variable_scope(tf.get_variable_scope()):
for ig in range(num_gpu):
with tf.device('/gpu:%d' % ig):
# build network
net = CPM(out_chan=21, crop_size=368, withPAF=True, PAFdim=3, numPAF=23, numStage=numStage)
predicted_scoremaps, _, predicted_PAFs = net.inference(data['image_crop'][ig], train=True)
# with tf.variable_scope('hourglass'):
# net = Hourglass(num_output_channel=20, PAF_dim=3, num_PAF=20, num_hourglass=numStage)
# predicted_scoremaps, predicted_PAFs = net.inference(data['image_crop'][ig])
# Loss
s = data['scoremap2d'][ig].get_shape().as_list()
valid = tf.concat([data['body_valid'][ig], tf.zeros((s[0], 1), dtype=tf.bool)], axis=1)
valid = tf.cast(valid, tf.float32)
mask_scoremap = tf.tile(tf.expand_dims(data['mask_crop'][ig], axis=3), [1, 1, 1, s[3]])
loss_2d = 0.0
# multiply mask_scoremap to mask out the invalid areas
for ip, predicted_scoremap in enumerate(predicted_scoremaps):
resized_scoremap = tf.image.resize_images(predicted_scoremap, (s[1], s[2]), method=tf.image.ResizeMethod.BICUBIC)
mean_over_pixel = tf.reduce_sum(tf.square((resized_scoremap - data['scoremap2d'][ig]) * mask_scoremap), [1, 2]) / (tf.reduce_sum(mask_scoremap, [1, 2]) + 1e-6)
loss_2d_ig = tf.reduce_sum(valid * mean_over_pixel) / (tf.reduce_sum(valid) + 1e-6)
loss_2d += loss_2d_ig
loss_2d /= len(predicted_scoremaps)
assert 'PAF' in data
loss_PAF = 0.0
valid_PAF = tf.cast(utils.PAF.getValidPAF(data['body_valid'][ig], 0, PAFdim=3), tf.float32)
# multiply mask_PAF to mask out the invalid areas
s = data['PAF'][ig].get_shape().as_list()
mask_PAF = tf.tile(tf.expand_dims(data['mask_crop'][ig], axis=3), [1, 1, 1, s[3]])
mask_PAF = tf.reshape(mask_PAF, [s[0], s[1], s[2], -1, 3]) # detach x, y, z
if ignore_PAF_2D:
mask_PAF2D = mask_PAF * tf.constant([0, 0, 0], dtype=tf.float32)
else:
mask_PAF2D = mask_PAF * tf.constant([1, 1, 0], dtype=tf.float32) # for the 2D case
mask_PAF = tf.where(data['PAF_type'][ig], mask_PAF, mask_PAF2D) # take out corresponding mask by PAF type
mask_PAF = tf.reshape(mask_PAF, [s[0], s[1], s[2], -1])
for ip, pred_PAF in enumerate(predicted_PAFs):
resized_PAF = tf.image.resize_images(pred_PAF, (s[1], s[2]), method=tf.image.ResizeMethod.BICUBIC)
channelWisePAF = tf.reshape(resized_PAF, [s[0], s[1], s[2], -1, 3])
PAF_x2y2 = tf.sqrt(tf.reduce_sum(tf.square(channelWisePAF[:, :, :, :, 0:2]), axis=4)) + 1e-6
PAF_normed_x = channelWisePAF[:, :, :, :, 0] / PAF_x2y2
PAF_normed_y = channelWisePAF[:, :, :, :, 1] / PAF_x2y2
PAF_normed_z = tf.zeros(PAF_normed_x.get_shape(), dtype=tf.float32)
normed_PAF = tf.stack([PAF_normed_x, PAF_normed_y, PAF_normed_z], axis=4)
normed_PAF = tf.reshape(normed_PAF, [s[0], s[1], s[2], -1])
normed_PAF = tf.where(tf.logical_and(tf.not_equal(data['PAF'][ig], 0.0), tf.not_equal(resized_PAF, 0.0)),
normed_PAF, tf.zeros((s[0], s[1], s[2], s[3]), dtype=tf.float32)) # use normed_PAF only in pixels where PAF is not zero
final_PAF = tf.where(data['PAF_type'][ig], resized_PAF, normed_PAF)
# mean_over_pixel = tf.reduce_sum(tf.square((resized_PAF - data['PAF'][ig]) * mask_PAF), [1, 2]) / (tf.reduce_sum(mask_PAF, [1, 2]) + 1e-6)
mean_over_pixel = tf.reduce_sum(tf.square((final_PAF - data['PAF'][ig]) * mask_PAF), [1, 2]) / (tf.reduce_sum(mask_PAF, [1, 2]) + 1e-6)
loss_PAF_ig = tf.reduce_sum(valid_PAF * mean_over_pixel) / (tf.reduce_sum(valid_PAF) + 1e-6)
loss_PAF += loss_PAF_ig
loss_PAF /= len(predicted_PAFs)
loss = loss_2d + loss_PAF * train_para['loss_weight_PAF']
tf.get_variable_scope().reuse_variables()
tower_losses.append(loss)
tower_losses_PAF.append(loss_PAF)
tower_losses_2d.append(loss_2d)
grad = opt.compute_gradients(loss)
tower_grads.append(grad)
total_loss = tf.reduce_mean(tower_losses)
total_loss_PAF = tf.reduce_mean(tower_losses_PAF)
total_loss_2d = tf.reduce_mean(tower_losses_2d)
grads = average_gradients(tower_grads)
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
tf.summary.scalar('loss', total_loss)
tf.summary.scalar('loss_PAF', total_loss_PAF)
tf.summary.scalar('loss_2d', total_loss_2d)
# init weights
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=None)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(train_para['snapshot_dir'] + '/train', sess.graph)
if not fine_tune:
start_iter = 0
if net.name == 'CPM':
net.init('./weights/openpose_body_3DPAF_randomz_headtop_chest.npy', sess)
# net.init('./weights/openpose_body_expanded_PAF.npy', sess)
elif net.name == 'Hourglass':
from tensorflow.contrib.framework import assign_from_values_fn
with open('weights/Hourglass_weights_processed.pkl', 'rb') as f:
hg_data = pickle.load(f)
map_trainable_variables = {i.name.replace('hourglass', 'my_model').replace(':0', ''): i.name for i in tf.trainable_variables()}
dic = dict()
for i, j in map_trainable_variables.items():
if i in hg_data:
dic[j] = hg_data[i]
init_fn = assign_from_values_fn(dic)
assert init_fn is not None
init_fn(sess)
else:
raise NotImplementedError
# net.init_vgg(sess)
else:
from utils.load_ckpt import load_weights_from_snapshot
load_weights_from_snapshot(sess, PATH_TO_SNAPSHOTS)
# saver.restore(sess, PATH_TO_SNAPSHOTS)
start_iter = already_trained + 1
# snapshot dir
if not os.path.exists(train_para['snapshot_dir']):
os.mkdir(train_para['snapshot_dir'])
print('Created snapshot dir:', train_para['snapshot_dir'])
# Training loop
print('Starting to train ...')
for i in range(start_iter, train_para['max_iter']):
# V = sess.run([resized_PAF, mask_PAF, PAF_x2y2, PAF_normed_x, PAF_normed_y, PAF_normed_z, normed_PAF, final_PAF, mean_over_pixel, loss_PAF_ig])
# import pdb
# pdb.set_trace()
summary, _, loss_v, loss_2d_v, loss_PAF_v = sess.run([merged, apply_gradient_op, total_loss, total_loss_2d, total_loss_PAF])
train_writer.add_summary(summary, i)
if (i % train_para['show_loss_freq']) == 0:
print('Iteration %d\t Loss %.1e, Loss_2d %.1e, Loss_PAF %.1e' % (i, loss_v, loss_2d_v, loss_PAF_v))
sys.stdout.flush()
if (i % train_para['snapshot_freq']) == 0 and i > start_iter:
saver.save(sess, "%s/model" % train_para['snapshot_dir'], global_step=i)
print('Saved a snapshot.')
sys.stdout.flush()
print('Training finished. Saving final snapshot.')
saver.save(sess, "%s/model" % train_para['snapshot_dir'], global_step=train_para['max_iter'])
| body2hands-main | visualization/POF/training_e2e_PAF.py |
from __future__ import print_function, unicode_literals
import tensorflow as tf
import numpy as np
import numpy.linalg as nl
import matplotlib.pyplot as plt
import matplotlib.patches
from mpl_toolkits.mplot3d import Axes3D
import argparse
import cv2
import os
from time import time
import json
from nets.CPM import CPM
from utils.load_ckpt import load_weights_from_snapshot
import utils.general
import utils.keypoint_conversion
import utils.PAF
import pickle
from utils.smoothing import savitzky_golay
body_zoom = 1.8
# hand_zoom = 2.5 # dslr_hands5, dslr_hands6, youtube_talkshow1
hand_zoom = 1.5 # youtube_conduct4
TRACK_HAND = True
BACK_TRACK_THRESH = 2.0
# evaluate both hands and body
parser = argparse.ArgumentParser()
parser.add_argument('--visualize', '-v', action='store_true')
parser.add_argument('--seqName', '-s', type=str)
parser.add_argument('--path', '-p', type=str)
parser.add_argument('--start-from', type=int, default=1)
parser.add_argument('--end-index', type=int, default=-1)
parser.add_argument('--width', type=int, default=1920) # to determine whether a keypoint is out of image
parser.add_argument('--height', type=int, default=1080)
parser.add_argument('--save-image', action='store_true')
parser.add_argument('--freeze', '-f', action='store_true') # upperbody only
args = parser.parse_args()
assert os.path.isdir(args.path)
if not os.path.isdir(os.path.join(args.path, 'net_output')):
os.makedirs(os.path.join(args.path, 'net_output'))
assert os.path.isdir(os.path.join(args.path, 'net_output'))
if args.save_image:
for folder in ['/body_2d', '/lhand_2d', '/rhand_2d', '/paf_xy_body', '/paf_z_body', '/paf_xy_lhand', '/paf_z_lhand', '/paf_xy_rhand', '/paf_z_rhand', '/heatmap']:
try:
os.makedirs(args.path + folder)
except Exception as e:
print ('Folder {} exists'.format(args.path + folder))
start_from = args.start_from
end_index = args.end_index
image_root = os.path.join(args.path, 'raw_image')
pkl_file = os.path.join(args.path, '{}.pkl'.format(args.seqName))
with open(pkl_file, 'rb') as f:
pkl_data = pickle.load(f)
num_samples = len(pkl_data[0]) # number of frames collected in pkl
K = np.array(pkl_data[5]['K'], dtype=np.float32)
s = [1, 368, 368, 3]
assert s[1] == s[2]
data = {
'bimage_crop': tf.placeholder_with_default(tf.zeros([s[0], s[1], s[2], 3], dtype=tf.float32),
shape=[s[0], s[1], s[2], 3]),
'limage_crop': tf.placeholder_with_default(tf.zeros([s[0], s[1], s[2], 3], dtype=tf.float32),
shape=[s[0], s[1], s[2], 3]),
'rimage_crop': tf.placeholder_with_default(tf.zeros([s[0], s[1], s[2], 3], dtype=tf.float32),
shape=[s[0], s[1], s[2], 3])
}
bcrop_center2d_origin = np.zeros((num_samples, 2), dtype=np.float32)
bscale2d_origin = np.zeros((num_samples,), dtype=np.float32)
# precompute the body bounding box for smoothing
for i in range(num_samples):
openpose_body = pkl_data[0][i, list(range(18)) + [1, 1], :2].astype(np.float32) # duplicate neck for headtop and chest
openpose_body_score = pkl_data[0][i, list(range(18)) + [0, 0], 2].astype(np.float32)
openpose_body_valid = (openpose_body_score > 0.01)
if not openpose_body_valid.any():
# no bounding box
if i > 0:
bcrop_center2d_origin[i] = bcrop_center2d_origin[i - 1]
bscale2d_origin[i] = bscale2d_origin[i - 1]
min_coord = np.amin(openpose_body[openpose_body_valid], axis=0)
max_coord = np.amax(openpose_body[openpose_body_valid], axis=0)
bcrop_center2d_origin[i] = 0.5 * (min_coord + max_coord)
fit_size = np.amax(np.maximum(max_coord - bcrop_center2d_origin[i], bcrop_center2d_origin[i] - min_coord))
# if (not openpose_body_valid[9]) and (not openpose_body_valid[10]) and (not openpose_body_valid[12]) and (not openpose_body_valid[13]):
if args.freeze or ((not openpose_body_valid[9]) and (not openpose_body_valid[10]) and (not openpose_body_valid[12]) and (not openpose_body_valid[13])):
# upper body only (detected by openpose)
# crop_size_best = 2 * fit_size * 3 # youtube_talkshow1
crop_size_best = 2 * fit_size * 4
else:
crop_size_best = 2 * fit_size * body_zoom
bscale2d_origin[i] = float(s[1]) / crop_size_best
bcrop_center2d_smooth = np.stack((savitzky_golay(bcrop_center2d_origin[:, 0], 21, 3), savitzky_golay(bcrop_center2d_origin[:, 1], 21, 3)), axis=1)
bscale2d_smooth = savitzky_golay(bscale2d_origin, 21, 3)
####
print('set bscale2d constant')
# bscale2d_smooth[1:] = bscale2d_smooth[0]
bscale2d_smooth[:-1] = bscale2d_smooth[-1]
if args.visualize:
plt.plot(bcrop_center2d_origin[:, 0])
plt.plot(bcrop_center2d_smooth[:, 0])
plt.show()
plt.plot(bcrop_center2d_origin[:, 1])
plt.plot(bcrop_center2d_smooth[:, 1])
plt.show()
plt.plot(bscale2d_origin)
plt.plot(bscale2d_smooth)
plt.show()
max_rsize = 0.0
max_lsize = 0.0
rhand_ref_frame = -1
lhand_ref_frame = -1
for i in range(num_samples):
openpose_rhand = pkl_data[2][i, utils.keypoint_conversion.a4_to_main['openpose_rhand'], :2].astype(np.float32) # duplicate neck for headtop
openpose_rhand_score = pkl_data[2][i, utils.keypoint_conversion.a4_to_main['openpose_rhand_score'], 2].astype(np.float32)
openpose_rhand_valid = (openpose_rhand_score > 0.01)
if openpose_rhand_valid.any():
min_coord = np.amin(openpose_rhand[openpose_rhand_valid], axis=0)
max_coord = np.amax(openpose_rhand[openpose_rhand_valid], axis=0)
rfit_size = np.amax(max_coord - min_coord) / 2
if rfit_size > max_rsize:
max_rsize = rfit_size
rhand_ref_frame = i
openpose_lhand = pkl_data[1][i, utils.keypoint_conversion.a4_to_main['openpose_lhand'], :2].astype(np.float32) # duplicate neck for headtop
openpose_lhand_score = pkl_data[1][i, utils.keypoint_conversion.a4_to_main['openpose_lhand_score'], 2].astype(np.float32)
openpose_lhand_valid = (openpose_lhand_score > 0.01)
if openpose_lhand_valid.any():
min_coord = np.amin(openpose_lhand[openpose_lhand_valid], axis=0)
max_coord = np.amax(openpose_lhand[openpose_lhand_valid], axis=0)
lfit_size = np.amax(max_coord - min_coord) / 2
if lfit_size > max_lsize:
max_lsize = lfit_size
lhand_ref_frame = i
assert max_rsize > 0
assert max_lsize > 0
rscale2d_ref = float(s[1]) / (2 * max_rsize * hand_zoom)
lscale2d_ref = float(s[1]) / (2 * max_lsize * hand_zoom)
bodynet = CPM(out_chan=21, crop_size=368, withPAF=True, PAFdim=3, numPAF=23)
handnet = CPM(out_chan=22, numPAF=20, crop_size=368, withPAF=True, PAFdim=3)
with tf.variable_scope('body'):
# feed through network
bheatmap_2d, _, bPAF = bodynet.inference(data['bimage_crop'], train=False)
with tf.variable_scope('hand', reuse=tf.AUTO_REUSE):
lheatmap_2d, _, lPAF = handnet.inference(data['limage_crop'], train=False)
# rheatmap_2d, _, rPAF = handnet.inference(data['rimage_crop'], train=False)
rheatmap_2d, _, rPAF = handnet.inference(data['rimage_crop'][:, :, ::-1, :], train=False) # flip right to left
s = data['bimage_crop'].get_shape().as_list()
data['bheatmap_2d'] = tf.image.resize_images(bheatmap_2d[-1], (s[1], s[2]), tf.image.ResizeMethod.BICUBIC)
data['bPAF'] = tf.image.resize_images(bPAF[-1], (s[1], s[2]), tf.image.ResizeMethod.BICUBIC)
s = data['limage_crop'].get_shape().as_list()
data['lheatmap_2d'] = tf.image.resize_images(lheatmap_2d[-1], (s[1], s[2]), tf.image.ResizeMethod.BICUBIC)
data['lPAF'] = tf.image.resize_images(lPAF[-1], (s[1], s[2]), tf.image.ResizeMethod.BICUBIC)
s = data['rimage_crop'].get_shape().as_list()
data['rheatmap_2d'] = tf.image.resize_images(rheatmap_2d[-1][:, :, ::-1, :], (s[1], s[2]), tf.image.ResizeMethod.BICUBIC) # flip back to right hand
data['rPAF'] = tf.image.resize_images(rPAF[-1][:, :, ::-1, :], (s[1], s[2]), tf.image.ResizeMethod.BICUBIC)
data['rPAF'] = data['rPAF'] * tf.constant([-1, 1, 1] * (60 // 3), dtype=tf.float32)
# Start TF
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.35)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
cpt = './snapshots/Final_qual_domeCOCO_chest_noPAF2D/model-390000'
load_weights_from_snapshot(sess, cpt, discard_list=['Adam', 'global_step', 'beta'], rename_dict={'CPM': 'body/CPM'})
cpt = './snapshots/Final_qual_hand_clear_zoom/model-160000'
load_weights_from_snapshot(sess, cpt, discard_list=['Adam', 'global_step', 'beta'], rename_dict={'CPM': 'hand/CPM'})
eval_list = ['bimage_crop', 'image', 'bcrop_center2d', 'bscale2d', 'bheatmap_2d', 'bPAF', 'body_uv_local', 'img_dir']
eval_list += ['limage_crop', 'lcrop_center2d', 'lscale2d', 'lheatmap_2d', 'lPAF', 'lhand_uv_local']
eval_list += ['rimage_crop', 'rcrop_center2d', 'rscale2d', 'rheatmap_2d', 'rPAF', 'rhand_uv_local']
eval_list += ['K', 'openpose_face', 'body_valid', 'left_hand_valid', 'right_hand_valid', 'openpose_body_score', 'openpose_lhand_score', 'openpose_rhand_score', 'openpose_face_score']
eval_list += ['openpose_foot', 'openpose_foot_score']
BODY_PAF_SELECT_INDEX = np.concatenate([np.arange(9), np.arange(10, 13), np.arange(14, 23)], axis=0)
lcrop_center2d_origin = np.zeros((num_samples, 2), dtype=np.float32)
lscale2d_origin = np.zeros((num_samples), dtype=np.float32)
rcrop_center2d_origin = np.zeros((num_samples, 2), dtype=np.float32)
rscale2d_origin = np.zeros((num_samples), dtype=np.float32)
frame_indices = pkl_data[6]
for i, frame_index in enumerate(frame_indices):
if frame_index < start_from:
continue
if args.end_index > 0 and frame_index > args.end_index:
break
if frame_index == start_from:
start_i = i
print('Start running frame No. {:08d}'.format(frame_index))
# read the data here
filename = os.path.join(image_root, pkl_data[4][i])
image_v = cv2.imread(filename)[:, :, ::-1] # convert to RGB order
val_dict = {}
openpose_body = pkl_data[0][i, list(range(18)) + [1, 1], :2].astype(np.float32) # duplicate neck for headtop and chest
openpose_body_score = pkl_data[0][i, list(range(18)) + [0, 0], 2].astype(np.float32)
openpose_body_valid = (openpose_body_score > 0)
val_dict['openpose_body'] = openpose_body
val_dict['openpose_body_score'] = openpose_body_score
val_dict['openpose_body_valid'] = openpose_body_valid
val_dict['openpose_face'] = pkl_data[3][i, :, :2]
val_dict['openpose_face_score'] = pkl_data[3][i, :, 2]
val_dict['openpose_foot'] = pkl_data[0][i, 18:, :2]
val_dict['openpose_foot_score'] = pkl_data[0][i, 18:, 2]
"""
crop body and feed into network
"""
val_dict['bcrop_center2d'] = bcrop_center2d_smooth[i]
val_dict['bscale2d'] = bscale2d_smooth[i]
bcrop_center2d = bcrop_center2d_smooth[i]
bscale2d = bscale2d_smooth[i]
# compute the Homography
bH = np.array([[bscale2d, 0, s[2] / 2 - bscale2d * bcrop_center2d[0]], [0, bscale2d, s[1] / 2 - bscale2d * bcrop_center2d[1]]], dtype=np.float32)
bimage_crop_v = cv2.warpAffine(image_v, bH, (s[2], s[1]), flags=cv2.INTER_LANCZOS4)
bimage_crop_v_feed = np.expand_dims((bimage_crop_v / 255 - 0.5), axis=0)
bheatmap_2d, bPAF = [np.squeeze(_) for _ in sess.run([data['bheatmap_2d'], data['bPAF']], feed_dict={data['bimage_crop']: bimage_crop_v_feed})]
val_dict['bheatmap_2d'] = bheatmap_2d
val_dict['bPAF'] = bPAF
# store the wrist coordinate of previous frame, to help verify hand bounding boxes
if frame_index > start_from:
lwrist_last = borigin[7]
lwrist_valid_last = body_valid[7]
rwrist_last = borigin[4]
rwrist_valid_last = body_valid[4]
# 2D body detection
if frame_index == start_from or args.seqName == 'qualitative':
body2d_pred_v, bscore = utils.PAF.detect_keypoints2d_PAF(val_dict['bheatmap_2d'], val_dict['bPAF'])
else:
body2d_pred_v, bscore = utils.PAF.detect_keypoints2d_PAF(val_dict['bheatmap_2d'], val_dict['bPAF'], prev_frame=prev_frame)
prev_frame = body2d_pred_v
body2d_pred_v = body2d_pred_v[:20, :] # with chest
body_valid = (bscore > 0.30)
body2d_pred_v[np.logical_not(body_valid)] = 0 # must do this, otherwise PAF_to_3D error
borigin = (body2d_pred_v - 184) / val_dict['bscale2d'] + val_dict['bcrop_center2d']
bout = (borigin[:, 0] < 0) + (borigin[:, 1] < 0) + (borigin[:, 0] >= args.width) + (borigin[:, 1] >= args.height)
body2d_pred_v[bout] = 0.0
body_valid[bout] = False
# store the wrist coordinate of current frame, to help verify hand bounding boxes
if frame_index > start_from:
lwrist = borigin[7]
lwrist_valid = body_valid[7]
rwrist = borigin[4]
rwrist_valid = body_valid[4]
"""
crop hands and feed into network
"""
openpose_rhand = pkl_data[2][i, utils.keypoint_conversion.a4_to_main['openpose_rhand'], :2].astype(np.float32) # duplicate neck for headtop
openpose_rhand_score = pkl_data[2][i, utils.keypoint_conversion.a4_to_main['openpose_rhand_score'], 2].astype(np.float32)
openpose_rhand_valid = (openpose_rhand_score > 0.01)
openpose_lhand = pkl_data[1][i, utils.keypoint_conversion.a4_to_main['openpose_lhand'], :2].astype(np.float32) # duplicate neck for headtop
openpose_lhand_score = pkl_data[1][i, utils.keypoint_conversion.a4_to_main['openpose_lhand_score'], 2].astype(np.float32)
openpose_lhand_valid = (openpose_lhand_score > 0.01)
val_dict['openpose_rhand'] = openpose_rhand
val_dict['openpose_rhand_score'] = openpose_rhand_score
val_dict['openpose_rhand_valid'] = openpose_rhand_valid
val_dict['openpose_lhand'] = openpose_lhand
val_dict['openpose_lhand_score'] = openpose_lhand_score
val_dict['openpose_lhand_valid'] = openpose_lhand_valid
lscale2d = lscale2d_ref
rscale2d = rscale2d_ref
if not TRACK_HAND or frame_index == start_from: # the first frame
if openpose_rhand_valid.any():
min_coord_rhand = np.amin(openpose_rhand[openpose_rhand_valid], axis=0)
max_coord_rhand = np.amax(openpose_rhand[openpose_rhand_valid], axis=0)
rcrop_center2d = 0.5 * (min_coord_rhand + max_coord_rhand)
fit_size_rhand = np.amax(np.maximum(max_coord_rhand - rcrop_center2d, rcrop_center2d - min_coord_rhand))
crop_size_best_r = 2 * fit_size_rhand * hand_zoom
else:
rcrop_center2d = np.array([-1000., -1000.])
fit_size_rhand = 100
crop_size_best_r = 2 * fit_size_rhand * hand_zoom
if openpose_lhand_valid.any():
min_coord_lhand = np.amin(openpose_lhand[openpose_lhand_valid], axis=0)
max_coord_lhand = np.amax(openpose_lhand[openpose_lhand_valid], axis=0)
lcrop_center2d = 0.5 * (min_coord_lhand + max_coord_lhand)
fit_size_lhand = np.amax(np.maximum(max_coord_lhand - lcrop_center2d, lcrop_center2d - min_coord_lhand))
crop_size_best_l = 2 * fit_size_lhand * hand_zoom
else:
lcrop_center2d = np.array([-1000., -1000.])
fit_size_lhand = 100
crop_size_best_l = 2 * fit_size_lhand * hand_zoom
if not TRACK_HAND:
rscale2d = float(s[1]) / crop_size_best_r
lscale2d = float(s[1]) / crop_size_best_l
else:
# flag, boxes = tracker.update(image_v)
gray_prev_image = cv2.cvtColor(prev_image_v, cv2.COLOR_RGB2GRAY)
gray_current_image = cv2.cvtColor(image_v, cv2.COLOR_RGB2GRAY)
l_lk_params = {'winSize': (int(2 * lhand_track_size), int(2 * lhand_track_size)), 'maxLevel': 3}
lp, lstatus, error = cv2.calcOpticalFlowPyrLK(gray_prev_image, gray_current_image, lcenter.reshape(1, 2), None, **l_lk_params)
lp_2, lstatus_2, error_2 = cv2.calcOpticalFlowPyrLK(gray_current_image, gray_prev_image, lp, None, **l_lk_params)
if nl.norm(lp_2[0] - lcenter) > BACK_TRACK_THRESH or error[0] > 15:
print ('LK left hand failed.')
lstatus[0] = 0
r_lk_params = {'winSize': (int(2 * rhand_track_size), int(2 * rhand_track_size)), 'maxLevel': 3}
rp, rstatus, error = cv2.calcOpticalFlowPyrLK(gray_prev_image, gray_current_image, rcenter.reshape(1, 2), None, **r_lk_params)
rp_2, rstatus_2, error_2 = cv2.calcOpticalFlowPyrLK(gray_current_image, gray_prev_image, rp, None, **r_lk_params)
if nl.norm(rp_2[0] - rcenter) > BACK_TRACK_THRESH or error[0] > 15:
print ('LK right hand failed.')
rstatus[0] = 0
lcrop_center2d_last = lcrop_center2d
rcrop_center2d_last = rcrop_center2d
if lstatus[0]:
lcrop_center2d = lp[0]
elif openpose_lhand_valid.any():
min_coord_lhand = np.amin(openpose_lhand[openpose_lhand_valid], axis=0)
max_coord_lhand = np.amax(openpose_lhand[openpose_lhand_valid], axis=0)
lcrop_center2d = 0.5 * (min_coord_lhand + max_coord_lhand)
elif lwrist_valid and lwrist_valid_last:
lcrop_center2d = lcrop_center2d_last + lwrist - lwrist_last
if rstatus[0]:
rcrop_center2d = rp[0]
elif openpose_rhand_valid.any():
min_coord_rhand = np.amin(openpose_rhand[openpose_rhand_valid], axis=0)
max_coord_rhand = np.amax(openpose_rhand[openpose_rhand_valid], axis=0)
rcrop_center2d = 0.5 * (min_coord_rhand + max_coord_rhand)
elif rwrist_valid and rwrist_valid_last:
rcrop_center2d = rcrop_center2d_last + rwrist - rwrist_last
# rcrop_center2d = rcenter + rwrist - rwrist_last
# chest the distance between wrist & hand bbox, and the velocity of wrist & hand bbox
# Also, if valid keypoint is too few, then don't trust the tracking result.
if np.sum(lhand_valid) < 5 or \
lwrist_valid and nl.norm(lwrist - lcrop_center2d) / lhand_track_size > 2 or \
(lwrist_valid and lwrist_valid_last and
nl.norm(lwrist - lwrist_last - lcrop_center2d + lcrop_center2d_last) / lhand_track_size > 1):
print ('tracking left hand lost, starting from openpose')
if openpose_lhand_valid.any():
min_coord_lhand = np.amin(openpose_lhand[openpose_lhand_valid], axis=0)
max_coord_lhand = np.amax(openpose_lhand[openpose_lhand_valid], axis=0)
lcrop_center2d = 0.5 * (min_coord_lhand + max_coord_lhand)
elif lwrist_valid:
lcrop_center2d = lwrist
elif lwrist_valid_last:
lcrop_center2d = lwrist_last
else:
# If Openpose not available and no wrist is available, then do not update the cropping center
lcrop_center2d = lcrop_center2d_last
if np.sum(rhand_valid) < 5 or \
rwrist_valid and nl.norm(rwrist - rcrop_center2d) / rhand_track_size > 2 or \
(rwrist_valid and rwrist_valid_last and
nl.norm(rwrist - rwrist_last - rcrop_center2d + rcrop_center2d_last) / rhand_track_size > 1):
print ('tracking right hand lost, starting from openpose')
if openpose_rhand_valid.any():
min_coord_rhand = np.amin(openpose_rhand[openpose_rhand_valid], axis=0)
max_coord_rhand = np.amax(openpose_rhand[openpose_rhand_valid], axis=0)
rcrop_center2d = 0.5 * (min_coord_rhand + max_coord_rhand)
elif rwrist_valid:
rcrop_center2d = rwrist
elif rwrist_valid_last:
rcrop_center2d = rwrist_last
else:
# If Openpose not available and no wrist is available, then do not update the cropping center
rcrop_center2d = rcrop_center2d_last
rcrop_center2d_origin[i] = rcrop_center2d
val_dict['rcrop_center2d'] = rcrop_center2d
rscale2d_origin[i] = rscale2d
val_dict['rscale2d'] = rscale2d
rH = np.array([[rscale2d, 0, s[2] / 2 - rscale2d * rcrop_center2d[0]], [0, rscale2d, s[1] / 2 - rscale2d * rcrop_center2d[1]]], dtype=np.float32)
rimage_crop_v = cv2.warpAffine(image_v, rH, (s[2], s[1]), flags=cv2.INTER_LANCZOS4)
rimage_crop_v_feed = np.expand_dims((rimage_crop_v / 255 - 0.5), axis=0)
lcrop_center2d_origin[i] = lcrop_center2d
val_dict['lcrop_center2d'] = lcrop_center2d
lscale2d_origin[i] = lscale2d
val_dict['lscale2d'] = lscale2d
lH = np.array([[lscale2d, 0, s[2] / 2 - lscale2d * lcrop_center2d[0]], [0, lscale2d, s[1] / 2 - lscale2d * lcrop_center2d[1]]], dtype=np.float32)
limage_crop_v = cv2.warpAffine(image_v, lH, (s[2], s[1]), flags=cv2.INTER_LANCZOS4)
limage_crop_v_feed = np.expand_dims((limage_crop_v / 255 - 0.5), axis=0)
lheatmap_2d, lPAF, rheatmap_2d, rPAF = \
[np.squeeze(_) for _ in
sess.run([data['lheatmap_2d'], data['lPAF'], data['rheatmap_2d'], data['rPAF']],
feed_dict={data['limage_crop']: limage_crop_v_feed, data['rimage_crop']: rimage_crop_v_feed})]
val_dict['rheatmap_2d'] = rheatmap_2d
val_dict['rPAF'] = rPAF
val_dict['lheatmap_2d'] = lheatmap_2d
val_dict['lPAF'] = lPAF
lhand2d_pred_v, lscore = utils.PAF.detect_keypoints2d_PAF(val_dict['lheatmap_2d'], val_dict['lPAF'], objtype=1)
rhand2d_pred_v, rscore = utils.PAF.detect_keypoints2d_PAF(val_dict['rheatmap_2d'], val_dict['rPAF'], objtype=1)
lhand2d_pred_v = lhand2d_pred_v[:21, :]
rhand2d_pred_v = rhand2d_pred_v[:21, :]
lhand_valid = lscore > 0.20 # false means that openpose fails to give the correct bounding box for hands
rhand_valid = rscore > 0.20
lhand2d_pred_v[np.logical_not(lhand_valid)] = 0 # must do this, otherwise PAF_to_3D error
rhand2d_pred_v[np.logical_not(rhand_valid)] = 0 # must do this, otherwise PAF_to_3D error
# check whether the keypoint is out of image
lorigin = (lhand2d_pred_v - 184) / val_dict['lscale2d'] + val_dict['lcrop_center2d']
lout = (lorigin[:, 0] < 0) + (lorigin[:, 1] < 0) + (lorigin[:, 0] >= args.width) + (lorigin[:, 1] >= args.height)
lhand2d_pred_v[lout] = 0.0
lhand_valid[lout] = False
rorigin = (rhand2d_pred_v - 184) / val_dict['rscale2d'] + val_dict['rcrop_center2d']
rout = (rorigin[:, 0] < 0) + (rorigin[:, 1] < 0) + (rorigin[:, 0] >= args.width) + (rorigin[:, 1] >= args.height)
rhand2d_pred_v[rout] = 0.0
rhand_valid[rout] = False
if args.freeze:
# freeze the torso
body2d_pred_v[8:14] = 0
body_valid[8:14] = 0
body2d_pred_v[19] = 0
body_valid[19] = 0
# rescale 2D detection back to the original image
body_2d = {'uv_local': body2d_pred_v, 'scale2d': val_dict['bscale2d'], 'crop_center2d': val_dict['bcrop_center2d'], 'valid': body_valid}
lhand_2d = {'uv_local': lhand2d_pred_v, 'scale2d': val_dict['lscale2d'], 'crop_center2d': val_dict['lcrop_center2d'], 'valid': lhand_valid}
rhand_2d = {'uv_local': rhand2d_pred_v, 'scale2d': val_dict['rscale2d'], 'crop_center2d': val_dict['rcrop_center2d'], 'valid': rhand_valid}
total_keypoints_2d = utils.keypoint_conversion.assemble_total_2d(body_2d, lhand_2d, rhand_2d) # put back to original image size, and change the keypoint order
openpose_face = val_dict['openpose_face']
openpose_face[:, 0] *= (val_dict['openpose_face_score'] > 0.5) # Face must have a high threshold in case of occlusion.
openpose_face[:, 1] *= (val_dict['openpose_face_score'] > 0.5)
openpose_foot = val_dict['openpose_foot']
openpose_foot[:, 0] *= (val_dict['openpose_foot_score'] > 0.05)
openpose_foot[:, 1] *= (val_dict['openpose_foot_score'] > 0.05)
total_keypoints_2d = np.concatenate([total_keypoints_2d, openpose_face, openpose_foot], axis=0) # has dimension 20 + 21 + 21 + 70 + 6
# extract PAF vectors from network prediction
body3d_pred_v, _ = utils.PAF.PAF_to_3D(body2d_pred_v, val_dict['bPAF'], objtype=0) # vec3ds has 18 rows, excluding shoulder to ear connection, only 14 used for fitting
vec3ds = utils.PAF.collect_PAF_vec(body2d_pred_v, val_dict['bPAF'], objtype=0) # vec3ds has 18 rows, excluding shoulder to ear connection, only 14 used for fitting
lhand3d_pred_v, _ = utils.PAF.PAF_to_3D(lhand2d_pred_v, val_dict['lPAF'], objtype=1)
lvec3ds = utils.PAF.collect_PAF_vec(lhand2d_pred_v, val_dict['lPAF'], objtype=1)
rhand3d_pred_v, _ = utils.PAF.PAF_to_3D(rhand2d_pred_v, val_dict['rPAF'], objtype=1)
rvec3ds = utils.PAF.collect_PAF_vec(rhand2d_pred_v, val_dict['rPAF'], objtype=1)
body3d_pred_v[np.logical_not(body_valid)] = 0
lhand3d_pred_v[np.logical_not(lhand_valid)] = 0
rhand3d_pred_v[np.logical_not(rhand_valid)] = 0
bPAF_valid = utils.PAF.getValidPAFNumpy(body_valid, 0) # A PAF is valid only if both end points are valid.
lPAF_valid = utils.PAF.getValidPAFNumpy(lhand_valid, 1)
rPAF_valid = utils.PAF.getValidPAFNumpy(rhand_valid, 1)
vec3ds[np.logical_not(bPAF_valid[BODY_PAF_SELECT_INDEX])] = 0
lvec3ds[np.logical_not(lPAF_valid)] = 0
rvec3ds[np.logical_not(rPAF_valid)] = 0
if args.freeze:
total_keypoints_2d[-6:] = 0
vec3ds[:6] = np.array([0., 1., 0.])
vec3ds[-3:] = 0
# all limbs plus neck -> nose, neck -> headtop, 3 connections with chest, (additional 6 connection), left hand, right hand (14 + 3 + 6 + 20 + 20)
PAF_vec = np.concatenate((vec3ds[:13, :], vec3ds[-4:, :], np.zeros([6, 3]), lvec3ds, rvec3ds), axis=0)
with open(os.path.join(args.path, 'net_output', '{:012d}.txt'.format(frame_index)), 'w') as f:
f.write('2D keypoints:\n')
for kp in total_keypoints_2d:
f.write('{} {}\n'.format(kp[0], kp[1]))
f.write('PAF:\n')
for vec in PAF_vec:
f.write('{} {} {}\n'.format(vec[0], vec[1], vec[2]))
f.write('{}\n'.format(float(np.sum(lscore) > 10)))
f.write('{}\n'.format(float(np.sum(rscore) > 10)))
if (np.sum(lscore) < 10):
print('Left hand blurry.')
if (np.sum(rscore) < 10):
print('Right hand blurry.')
if lhand_valid.any():
lcenter = 0.5 * (np.amin(lorigin[lhand_valid], axis=0) + np.amax(lorigin[lhand_valid], axis=0)).astype(np.float32) # detection center
else:
lcenter = lcrop_center2d.astype(np.float32)
if rhand_valid.any():
rcenter = 0.5 * (np.amin(rorigin[rhand_valid], axis=0) + np.amax(rorigin[rhand_valid], axis=0)).astype(np.float32) # detection center
else:
rcenter = rcrop_center2d.astype(np.float32)
lhand_track_size = fit_size_lhand * lscale2d_origin[start_i] / lscale2d
rhand_track_size = fit_size_rhand * rscale2d_origin[start_i] / rscale2d
prev_image_v = image_v
if args.visualize:
nc = 3
nr = 4
fig = plt.figure(1)
ax1 = fig.add_subplot(nc, nr, 1)
plt.imshow(bimage_crop_v)
utils.general.plot2d(ax1, body2d_pred_v, valid_idx=body_valid, type_str=utils.general.type_strs[0], color=np.array([0.0, 0.0, 1.0]))
ax2 = fig.add_subplot(nc, nr, 2)
ax2.imshow(limage_crop_v)
utils.general.plot2d(ax2, lhand2d_pred_v, type_str=utils.general.type_strs[1], color=np.array([0.0, 0.0, 1.0]))
ax3 = fig.add_subplot(nc, nr, 3)
ax3.imshow(rimage_crop_v)
utils.general.plot2d(ax3, rhand2d_pred_v, type_str=utils.general.type_strs[1], color=np.array([0.0, 0.0, 1.0]))
ax4 = fig.add_subplot(nc, nr, 4)
bPAF_xy, bPAF_z = utils.PAF.plot_all_PAF(val_dict['bPAF'], 3)
ax4.imshow(bPAF_xy)
ax5 = fig.add_subplot(nc, nr, 5)
ax5.imshow(bPAF_z)
ax6 = fig.add_subplot(nc, nr, 6)
plt.imshow(image_v)
utils.general.plot2d(ax6, total_keypoints_2d, type_str='total', s=5)
ax7 = fig.add_subplot(nc, nr, 7, projection='3d')
utils.general.plot3d(ax7, body3d_pred_v, valid_idx=body_valid, type_str=utils.general.type_strs[0], color=np.array([0.0, 0.0, 1.0]))
ax7.set_xlabel('X Label')
ax7.set_ylabel('Y Label')
ax7.set_zlabel('Z Label')
plt.axis('equal')
ax8 = fig.add_subplot(nc, nr, 8, projection='3d')
utils.general.plot3d(ax8, lhand3d_pred_v, type_str=utils.general.type_strs[1], color=np.array([0.0, 0.0, 1.0]))
ax8.set_xlabel('X Label')
ax8.set_ylabel('Y Label')
ax8.set_zlabel('Z Label')
plt.axis('equal')
ax9 = fig.add_subplot(nc, nr, 9, projection='3d')
utils.general.plot3d(ax9, rhand3d_pred_v, type_str=utils.general.type_strs[1], color=np.array([0.0, 0.0, 1.0]))
ax9.set_xlabel('X Label')
ax9.set_ylabel('Y Label')
ax9.set_zlabel('Z Label')
plt.axis('equal')
plt.show()
if args.save_image:
utils.general.plot2d_cv2(bimage_crop_v, body2d_pred_v, s=5, valid_idx=body_valid, use_color=False)
assert cv2.imwrite(os.path.join(args.path, 'body_2d', '{:04d}.png'.format(i)), bimage_crop_v[:, :, ::-1])
bPAF_xy, bPAF_z = utils.PAF.plot_all_PAF(val_dict['bPAF'], 3)
k = 1. / val_dict['bscale2d']
tx, ty = (val_dict['bcrop_center2d'] - 184 * k).astype(int)
M = np.array([[k, 0., tx], [0., k, ty]], dtype=np.float32)
resized_PAF_xy = cv2.warpAffine(bPAF_xy, M, (1920, 1080))[:args.height, :args.width, :]
resized_PAF_z = cv2.warpAffine(bPAF_z, M, (1920, 1080))[:args.height, :args.width, :]
assert cv2.imwrite(os.path.join(args.path, 'paf_xy_body', '{:04d}.png'.format(frame_index)), 255 - resized_PAF_xy[:, :, ::-1])
assert cv2.imwrite(os.path.join(args.path, 'paf_z_body', '{:04d}.png'.format(frame_index)), 255 - resized_PAF_z[:, :, ::-1])
utils.general.plot2d_cv2(limage_crop_v, lhand2d_pred_v, type_str='hand', s=5, use_color=True)
lPAF_xy, lPAF_z = utils.PAF.plot_all_PAF(val_dict['lPAF'], 3)
assert cv2.imwrite(os.path.join(args.path, 'lhand_2d', '{:04d}.png'.format(frame_index)), limage_crop_v[:, :, ::-1])
assert cv2.imwrite(os.path.join(args.path, 'paf_xy_lhand', '{:04d}.png'.format(frame_index)), 255 - lPAF_xy[:, :, ::-1])
assert cv2.imwrite(os.path.join(args.path, 'paf_z_lhand', '{:04d}.png'.format(frame_index)), 255 - lPAF_z[:, :, ::-1])
utils.general.plot2d_cv2(rimage_crop_v, rhand2d_pred_v, type_str='hand', s=5, use_color=True)
rPAF_xy, rPAF_z = utils.PAF.plot_all_PAF(val_dict['rPAF'], 3)
assert cv2.imwrite(os.path.join(args.path, 'rhand_2d', '{:04d}.png'.format(frame_index)), rimage_crop_v[:, :, ::-1])
assert cv2.imwrite(os.path.join(args.path, 'paf_xy_rhand', '{:04d}.png'.format(frame_index)), 255 - rPAF_xy[:, :, ::-1])
assert cv2.imwrite(os.path.join(args.path, 'paf_z_rhand', '{:04d}.png'.format(frame_index)), 255 - rPAF_z[:, :, ::-1])
| body2hands-main | visualization/POF/save_total_sequence.py |
import tensorflow as tf
import pickle
import os
from utils.ops import NetworkOps as ops
class handSegNet:
def __init__(self):
pass
def init_sess(self, sess):
file_name = './weights/handsegnet-rhd.pickle'
exclude_var_list = []
assert os.path.exists(file_name), "File not found."
with open(file_name, 'rb') as fi:
weight_dict = pickle.load(fi)
weight_dict = {k: v for k, v in weight_dict.items() if not any([x in k for x in exclude_var_list])}
if len(weight_dict) > 0:
init_op, init_feed = tf.contrib.framework.assign_from_values(weight_dict)
sess.run(init_op, init_feed)
print('Loaded %d variables from %s' % (len(weight_dict), file_name))
def inference_detection(self, image, train=False):
""" HandSegNet: Detects the hand in the input image by segmenting it.
Inputs:
image: [B, H, W, 3] tf.float32 tensor, Image with mean subtracted
train: bool, True in case weights should be trainable
Outputs:
scoremap_list_large: list of [B, 256, 256, 2] tf.float32 tensor, Scores for the hand segmentation classes
"""
with tf.variable_scope('HandSegNet'):
scoremap_list = list()
layers_per_block = [2, 2, 4, 4]
out_chan_list = [64, 128, 256, 512]
pool_list = [True, True, True, False]
# learn some feature representation, that describes the image content well
x = image
for block_id, (layer_num, chan_num, pool) in enumerate(zip(layers_per_block, out_chan_list, pool_list), 1):
for layer_id in range(layer_num):
x = ops.conv_relu(x, 'conv%d_%d' % (block_id, layer_id + 1), kernel_size=3, stride=1, out_chan=chan_num, trainable=train)
if pool:
x = ops.max_pool(x, 'pool%d' % block_id)
x = ops.conv_relu(x, 'conv5_1', kernel_size=3, stride=1, out_chan=512, trainable=train)
encoding = ops.conv_relu(x, 'conv5_2', kernel_size=3, stride=1, out_chan=128, trainable=train)
# use encoding to detect initial scoremap
x = ops.conv_relu(encoding, 'conv6_1', kernel_size=1, stride=1, out_chan=512, trainable=train)
scoremap = ops.conv(x, 'conv6_2', kernel_size=1, stride=1, out_chan=2, trainable=train)
scoremap_list.append(scoremap)
# upsample to full size
s = image.get_shape().as_list()
scoremap_list_large = [tf.image.resize_images(x, (s[1], s[2])) for x in scoremap_list]
return scoremap_list_large
| body2hands-main | visualization/POF/utils/handSegNet.py |
import tensorflow as tf
import numpy as np
import numpy.linalg as nl
import utils.general
import skimage.feature
import json
import os
PAF_type = 0
allPAFConnection = [[np.array([[1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13], [1, 2], [2, 3], [3, 4], [2, 16], [1, 5], [5, 6], [6, 7], [5, 17], [1, 0], [0, 14], [0, 15], [14, 16], [15, 17], [1, 18], [1, 19], [19, 8], [19, 11]]),
np.array([[0, 4], [4, 3], [3, 2], [2, 1], [0, 8], [8, 7], [7, 6], [6, 5], [0, 12], [12, 11], [11, 10], [10, 9], [0, 16], [16, 15], [15, 14], [14, 13], [0, 20], [20, 19], [19, 18], [18, 17]])
], # PAF type 0 (Original Openpose)
[np.array([[1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13], [1, 2], [2, 3], [3, 4], [2, 16], [1, 5], [5, 6], [6, 7], [5, 17],
[1, 0], [0, 14], [0, 15], [14, 16], [15, 17], [1, 18], [2, 4], [5, 7], [8, 4], [11, 7], [8, 10], [11, 13]]), # augmented PAF
np.array([[0, 4], [4, 3], [3, 2], [2, 1], [0, 8], [8, 7], [7, 6], [6, 5], [0, 12], [12, 11], [11, 10], [10, 9], [0, 16], [16, 15], [15, 14], [14, 13], [0, 20], [20, 19], [19, 18], [18, 17]])
]] # PAF type 1 (My augmented PAF)
PAFConnection = allPAFConnection[PAF_type]
dist_thresh = 8
if os.path.exists('utils/default_PAF_lengths.json'):
with open('utils/default_PAF_lengths.json', 'r') as f:
default_PAF_length = json.load(f)
def getValidPAF(valid, objtype, PAFdim):
# input "valid": a tensor containing bool valid/invalid for each channel
# input "objtype": 0 for body, 1 for hand (to select PAFConnection)
with tf.variable_scope('getValidPAF'):
assert objtype in (0, 1)
connection = tf.constant(np.repeat(PAFConnection[objtype], PAFdim, axis=0), dtype=tf.int64)
batch_size = valid.get_shape().as_list()[0]
PAF_valid = []
for ib in range(batch_size):
b_valid = valid[ib, :]
assert len(b_valid.get_shape().as_list()) == 1
indexed_valid = tf.gather(b_valid, connection, axis=0)
PAF_valid.append(tf.logical_and(indexed_valid[:, 0], indexed_valid[:, 1]))
PAF_valid = tf.stack(PAF_valid, axis=0)
return PAF_valid
def getValidPAFNumpy(valid, objtype):
# used in testing time
# input "valid": a numpy array containing bool valid/invalid for each channel
# input "objtype": 0 for body, 1 for hand (to select PAFConnection)
assert objtype in (0, 1)
connection = PAFConnection[objtype]
PAF_valid = []
for conn in connection:
connection_valid = valid[conn[0]] and valid[conn[1]]
PAF_valid.append(connection_valid)
PAF_valid = np.array(PAF_valid, dtype=bool)
return PAF_valid
def createPAF(keypoint2d, keypoint3d, objtype, output_size, normalize_3d=True, valid_vec=None):
# objtype: 0: body, 1: hand
# output_size: (h, w)
# keypoint2d: (x, y)
# normalize_3d: if True: set x^2 + y^2 + z^2 = 1; else set x^2 + y^2 = 1
with tf.variable_scope('createPAF'):
assert keypoint2d.get_shape().as_list()[0] == keypoint3d.get_shape().as_list()[0]
assert keypoint2d.get_shape().as_list()[1] == 2
assert keypoint3d.get_shape().as_list()[1] == 3
if valid_vec is None:
valid_vec = tf.ones([keypoint2d.get_shape()[0]], dtype=tf.bool)
h_range = tf.expand_dims(tf.range(output_size[0]), 1)
w_range = tf.expand_dims(tf.range(output_size[1]), 0)
H = tf.cast(tf.tile(h_range, [1, output_size[1]]), tf.float32)
W = tf.cast(tf.tile(w_range, [output_size[0], 1]), tf.float32)
PAFs = []
for ic, conn in enumerate(PAFConnection[objtype]):
AB = keypoint2d[conn[1]] - keypoint2d[conn[0]] # joint 0 - > joint 1
l_AB = tf.sqrt(tf.reduce_sum(tf.square(AB)))
AB = AB / l_AB
dx = W - keypoint2d[conn[0], 0]
dy = H - keypoint2d[conn[0], 1]
dist = tf.abs(dy * AB[0] - dx * AB[1]) # cross product
Xmin = tf.minimum(keypoint2d[conn[0], 0], keypoint2d[conn[1], 0]) - dist_thresh
Xmax = tf.maximum(keypoint2d[conn[0], 0], keypoint2d[conn[1], 0]) + dist_thresh
Ymin = tf.minimum(keypoint2d[conn[0], 1], keypoint2d[conn[1], 1]) - dist_thresh
Ymax = tf.maximum(keypoint2d[conn[0], 1], keypoint2d[conn[1], 1]) + dist_thresh
within_range = tf.cast(W >= Xmin, tf.float32) * tf.cast(W <= Xmax, tf.float32) * tf.cast(H >= Ymin, tf.float32) * tf.cast(H <= Ymax, tf.float32)
within_dist = tf.cast(dist < dist_thresh, tf.float32)
mask = within_range * within_dist
AB3d = (keypoint3d[conn[1]] - keypoint3d[conn[0]])
if normalize_3d:
scale = tf.sqrt(tf.reduce_sum(tf.square(AB3d)))
else:
scale = tf.sqrt(tf.reduce_sum(tf.square(AB3d[:2])))
AB3d /= scale
AB3d = tf.where(tf.is_nan(AB3d), tf.zeros([3], dtype=tf.float32), AB3d)
cond_valid = tf.logical_and(valid_vec[conn[0]], valid_vec[conn[1]])
connPAF = tf.cond(cond_valid, lambda: tf.tile(tf.expand_dims(mask, 2), [1, 1, 3]) * AB3d, lambda: tf.zeros((output_size[0], output_size[1], 3), dtype=tf.float32))
# create the PAF only when both joints are valid
PAFs.append(connPAF)
concat_PAFs = tf.concat(PAFs, axis=2)
return concat_PAFs
def getColorAffinity(v):
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
summed = RY + YG + GC + CB + BM + MR
v = min(max(v, 0.0), 1.0) * summed
if v < RY:
c = (255., 255. * (v / (RY)), 0.)
elif v < RY + YG:
c = (255. * (1 - ((v - RY) / (YG))), 255., 0.)
elif v < RY + YG + GC:
c = (0. * (1 - ((v - RY) / (YG))), 255., 255. * ((v - RY - YG) / (GC)))
elif v < RY + YG + GC + CB:
c = (0., 255. * (1 - ((v - RY - YG - GC) / (CB))), 255.)
elif v < summed - MR:
c = (255. * ((v - RY - YG - GC - CB) / (BM)), 0., 255.)
elif v < summed:
c = (255., 0., 255. * (1 - ((v - RY - YG - GC - CB - BM) / (MR))))
else:
c = (255., 0., 0.)
return np.array(c)
def plot_PAF(PAF_array):
# return a 3-channel uint8 np array
assert len(PAF_array.shape) == 3
assert PAF_array.shape[2] == 2 or PAF_array.shape[2] == 3
out = np.zeros((PAF_array.shape[0], PAF_array.shape[1], 3), dtype=np.uint8)
# 2D PAF: use Openpose Visualization
x = PAF_array[:, :, 0]
y = PAF_array[:, :, 1]
rad = np.sqrt(np.square(x) + np.square(y))
rad = np.minimum(rad, 1.0)
a = np.arctan2(-y, -x) / np.pi
fk = (a + 1.) / 2.
for i in range(PAF_array.shape[0]):
for j in range(PAF_array.shape[1]):
color = getColorAffinity(fk[i, j]) * rad[i, j]
out[i, j, :] = color
if PAF_array.shape[2] == 3:
# also return the average z value (for judge pointing out / in)
# total_rad = np.sqrt(np.sum(np.square(PAF_array), axis=2))
# rz = PAF_array[:, :, 2] / total_rad
# rz[np.isnan(rz)] = 0.0
# rz[total_rad < 0.5] = 0.0
# z_map = np.zeros((PAF_array.shape[0], PAF_array.shape[1], 3))
# z_map[:, :, 0] = 255 * rz * (rz > 0)
# z_map[:, :, 1] = 255 * (-rz) * (rz < 0)
rz = PAF_array[:, :, 2]
z_map = np.zeros((PAF_array.shape[0], PAF_array.shape[1], 3))
z_map[:, :, 0] = 255 * rz * (rz > 0)
z_map[:, :, 1] = 255 * (-rz) * (rz < 0)
z_map = np.maximum(np.minimum(z_map, 255), 0)
return out, z_map.astype(np.uint8)
return out
def plot_all_PAF(PAF_array, PAFdim):
assert PAFdim in (2, 3)
if PAFdim == 2:
assert PAF_array.shape[2] % 2 == 0
total_PAF_x = np.sum(PAF_array[:, :, ::2], axis=2)
total_PAF_y = np.sum(PAF_array[:, :, 1::2], axis=2)
total_PAF = np.stack([total_PAF_x, total_PAF_y], axis=2)
return plot_PAF(total_PAF)
else:
assert PAFdim == 3 and PAF_array.shape[2] % 3 == 0
total_PAF_x = np.sum(PAF_array[:, :, ::3], axis=2)
total_PAF_y = np.sum(PAF_array[:, :, 1::3], axis=2)
total_PAF_z = np.sum(PAF_array[:, :, 2::3], axis=2)
total_PAF = np.stack([total_PAF_x, total_PAF_y, total_PAF_z], axis=2)
return plot_PAF(total_PAF)
def PAF_to_3D(coord2d, PAF, objtype=0):
if objtype == 0:
depth_root_idx = 1 # put neck at 0-depth
else:
assert objtype == 1
depth_root_idx = 0
assert len(coord2d.shape) == 2 and coord2d.shape[1] == 2
coord3d = np.zeros((coord2d.shape[0], 3), dtype=coord2d.dtype)
coord3d[:, :2] = coord2d
coord3d[depth_root_idx, 2] = 0.0
vec3d_array = []
for ic, conn in enumerate(PAFConnection[objtype]):
if objtype == 0:
if PAF_type == 0:
if ic in (9, 13):
continue
elif PAF_type == 1:
if ic in (9, 13) or ic >= 20:
continue
A = coord2d[conn[0]]
B = coord2d[conn[1]]
u = np.linspace(0.0, 1.0, num=11)
v = 1.0 - u
points = (np.outer(A, v) + np.outer(B, u)).astype(int) # 2 * N
vec3ds = PAF[points[1], points[0], 3 * ic:3 * ic + 3] # note order of y, x in index
vec3d = np.mean(vec3ds, axis=0)
vec3d[np.isnan(vec3d)] = 0.0 # numerical stability
if (A == B).all(): # A and B actually coincides with each other, put the default bone length.
coord3d[conn[1], 0] = A[0]
coord3d[conn[1], 1] = A[1]
if vec3d[2] >= 0:
coord3d[conn[1], 2] = coord3d[conn[0], 2] + default_PAF_length[objtype][ic]
else:
coord3d[conn[1], 2] = coord3d[conn[0], 2] - default_PAF_length[objtype][ic]
else:
# find the least square solution of Ax = b
A = np.zeros([3, 2])
A[2, 0] = -1.
A[:, 1] = vec3d
b = coord3d[conn[1]] - coord3d[conn[0]] # by this time the z-value of target joint should be 0
x, _, _, _ = nl.lstsq(A, b, rcond=-1)
if x[1] < 0: # the direction is reversed
if vec3d[2] >= 0:
coord3d[conn[1], 2] = coord3d[conn[0], 2] + default_PAF_length[objtype][ic] # assume that this connection is vertical to the screen
else:
coord3d[conn[1], 2] = coord3d[conn[0], 2] - default_PAF_length[objtype][ic]
else:
coord3d[conn[1], 2] = x[0]
if nl.norm(vec3d) < 0.1 or x[1] < 0: # If there is almost no response, or the direction is reversed, put it zero so that Adam does not fit.
vec3d[:] = 0
vec3d_array.append(vec3d)
return coord3d, np.array(vec3d_array)
def collect_PAF_vec(coord2d, PAF, objtype=0):
assert len(coord2d.shape) == 2 and coord2d.shape[1] == 2
assert len(PAF.shape) == 3 # H, W, C
vec3d_array = []
for ic, conn in enumerate(PAFConnection[objtype]):
if objtype == 0:
if PAF_type == 0 and ic in (9, 13):
continue
elif PAF_type == 1 and ic in (9, 13): # need the extra PAFs here
continue
A = coord2d[conn[0]]
B = coord2d[conn[1]]
u = np.linspace(0.0, 1.0, num=11)
v = 1.0 - u
points = (np.outer(A, v) + np.outer(B, u)).astype(int) # 2 * N
if 3 * ic < PAF.shape[2]: # to be compatible with old network with only 20 PAFs instead of 23
vec3ds = PAF[points[1], points[0], 3 * ic:3 * ic + 3] # note order of y, x in index
vec3d = np.mean(vec3ds, axis=0)
else:
vec3d = np.zeros((3,))
vec3d[np.isnan(vec3d)] = 0.0 # numerical stability
vec3d_array.append(vec3d)
return np.array(vec3d_array)
def recon_skeleton_PAF(vec3ds, objtype=0):
# reconstruct a skeleton with standard bone length from PAF only
selected_PAF_array = []
if objtype == 0:
coord3d_pred_v = np.zeros([19, 3], dtype=vec3ds.dtype)
root_idx = 1
else:
assert objtype == 1
coord3d_pred_v = np.zeros([21, 3], dtype=vec3ds.dtype)
root_idx = 0
coord3d_pred_v[root_idx] = 0.0
count_vec = 0
for ic, conn in enumerate(PAFConnection[objtype]):
if objtype == 0:
if PAF_type == 0 and (ic in (9, 13) or ic >= 21):
continue
elif PAF_type == 1 and ic in (9, 13):
continue
vec = vec3ds[count_vec]
vlength = nl.norm(vec)
assert vlength > 0
if vlength < 0.1: # almost no response, set to 0
vec = np.zeros(3, dtype=vec3ds.dtype)
else:
vec = vec / vlength # unit vector
selected_PAF_array.append(vec)
count_vec += 1
if objtype == 0 and PAF_type == 1 and ic >= 20:
continue
coord3d_pred_v[conn[1]] = coord3d_pred_v[conn[0]] + default_PAF_length[objtype][ic] * vec
return coord3d_pred_v, np.array(selected_PAF_array)
def connection_score_2d(A, B, PAF):
AB = (B - A).astype(np.float32)
if not AB.any():
# A B coincides
return 0.1
AB /= nl.norm(AB.astype(np.float32))
s = PAF.shape
assert len(s) == 3
u = np.linspace(0.0, 1.0, num=11)
v = 1.0 - u
points = (np.outer(A, v) + np.outer(B, u)).astype(int)
vec2ds = PAF[points[1], points[0], :2]
inner_product = np.dot(vec2ds, AB)
return np.mean(inner_product)
def detect_keypoints2d_PAF(scoremaps, PAF, objtype=0, weight_conn=1.0, mean_shift=False, prev_frame=None):
print('PAF_type {}'.format(PAF_type))
if len(scoremaps.shape) == 4:
scoremaps = np.squeeze(scoremaps)
s = scoremaps.shape
assert len(s) == 3, "This function was only designed for 3D Scoremaps."
assert (s[2] < s[1]) and (s[2] < s[0]), "Probably the input is not correct, because [H, W, C] is expected."
num_candidate = 5
local_maxs = []
for i in range(s[2]):
candidates = skimage.feature.peak_local_max(scoremaps[:, :, i], num_peaks=num_candidate)
if candidates.shape[0] < num_candidate:
# if less than that, replicate the first element
if candidates.shape[0] > 0:
candidates = np.concatenate([candidates[0][np.newaxis, :]] * (num_candidate - candidates.shape[0]) + [candidates], axis=0)
else:
candidates = np.zeros((5, 2), dtype=int)
local_maxs.append(candidates)
if objtype == 0:
root_idx = 1 # starting constructing the tree from root_idx
else:
assert objtype == 1
root_idx = 0
joint_idx_list = [root_idx]
candidate_idx_list = [[c] for c in range(num_candidate)]
sum_score_list = [scoremaps[local_maxs[root_idx][c, 0], local_maxs[root_idx][c, 1], root_idx] for c in range(num_candidate)]
if prev_frame is not None:
for c in range(num_candidate):
sum_score_list[c] -= 20 * nl.norm(local_maxs[root_idx][candidate_idx_list[c][0]][::-1] - prev_frame[c]) / (s[0] + s[1])
# dynamic programming
for iconn, conn in enumerate(PAFConnection[objtype]):
if objtype == 0:
if PAF_type == 0:
if iconn in (9, 13) or iconn >= 21: # unused PAF connection
continue
elif PAF_type == 1:
if iconn in (9, 13) or iconn >= 20:
continue
joint_idx_list.append(conn[1])
candidates = local_maxs[conn[1]]
new_candidate_idx_list = []
new_sum_score_list = []
for ican, candidate in enumerate(candidates):
best_sum_score = -np.inf
best_candidate_idx = None
B = candidate[::-1]
for candidate_idx, sum_score in zip(candidate_idx_list, sum_score_list):
parent_idx = conn[0]
parent_candidate_idx = candidate_idx[joint_idx_list.index(parent_idx)]
A = local_maxs[parent_idx][parent_candidate_idx][::-1]
connection_score = connection_score_2d(A, B, PAF[:, :, 3 * iconn:3 * iconn + 3])
new_sum_score = sum_score + scoremaps[candidate[0], candidate[1], conn[1]] + weight_conn * connection_score # TODO
if prev_frame is not None:
new_sum_score -= 20 * nl.norm(prev_frame[conn[1]] - B) / (s[0] + s[1])
if new_sum_score > best_sum_score:
best_sum_score = new_sum_score
best_candidate_idx = candidate_idx
assert best_candidate_idx is not None
new_sum_score_list.append(best_sum_score)
new_candidate_idx_list.append(best_candidate_idx + [ican])
sum_score_list = new_sum_score_list
candidate_idx_list = new_candidate_idx_list
best_candidate_idx = candidate_idx_list[np.argmax(sum_score_list)]
best_candidate_idx_joint_order = np.zeros_like(best_candidate_idx)
best_candidate_idx_joint_order[np.array(joint_idx_list, dtype=int)] = best_candidate_idx
best_candidate = np.array([local_maxs[i][j] for i, j in enumerate(best_candidate_idx_joint_order)])
coord2d = best_candidate[:, ::-1]
if objtype == 0:
assert coord2d.shape[0] == 19 or coord2d.shape[0] == 20
if objtype == 1:
assert coord2d.shape[0] == 21
scores = []
for i in range(coord2d.shape[0]):
scores.append(scoremaps[coord2d[i, 1], coord2d[i, 0], i])
if mean_shift:
dWidth = 3
dHeight = 3
new_coord2d = []
for i in range(coord2d.shape[0]):
x1 = max(coord2d[i, 0] - dWidth, 0)
x2 = min(coord2d[i, 0] + dWidth + 1, s[1])
y1 = max(coord2d[i, 1] - dHeight, 0)
y2 = min(coord2d[i, 1] + dHeight + 1, s[0])
Xmap = np.arange(x1, x2)
Ymap = np.arange(y1, y2)
local_scoremap = scoremaps[y1:y2, x1:x2, i]
gt0 = (local_scoremap > 0)
if gt0.any():
pos_scoremap = gt0 * local_scoremap
xAcc = np.sum(pos_scoremap * Xmap)
yAcc = np.sum(np.transpose(pos_scoremap) * Ymap)
scoreAcc = np.sum(pos_scoremap)
new_coord2d.append([xAcc / scoreAcc, yAcc / scoreAcc])
else:
new_coord2d.append([coord2d[i, 0], coord2d[i, 1]])
coord2d = np.array(new_coord2d, dtype=np.float32)
return coord2d.astype(np.float32), np.array(scores, dtype=np.float32)
"""
Tensorized get_color_affinity()
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
summed = RY + YG + GC + CB + BM + MR
v = torch.clamp(v, min=0., max=1.) * summed
# v = min(max(v, 0.0), 1.0) * summed
value = v.cpu().detach().numpy() # [O, H, W]
O, H, W = value.shape
record = np.zeros([O, H, W])
out = np.zeros([O, H, W, 3], dtype=value.dtype)
out[:, :, :, 0] = 255.
print(out.shape)
# if v < RY:
# c = (255., 255. * (v / (RY)), 0.)
idx = np.where(np.logical_and(value < RY, record == 0))
record[idx] = 1
idx_ext = idx + (np.array([1] * len(idx[0])),)
out[idx_ext] = 255. * value[idx] / RY
# elif v < RY + YG:
# c = (255. * (1 - ((v - RY) / (YG))), 255., 0.)
idx = np.where(np.logical_and(value < RY + YG, record == 0))
record[idx] = 1
idx_ext = idx + (np.array([0] * len(idx[0])),)
out[idx_ext] = 255. * (1 - ((value[idx] - RY) / (YG)))
idx_ext = idx + (np.array([1] * len(idx[0])),)
out[idx_ext] = 255.
# elif v < RY + YG + GC:
# c = (0. * (1 - ((v - RY) / (YG))), 255., 255. * ((v - RY - YG) / (GC)))
idx = np.where(np.logical_and(value < RY + YG + GC, record == 0))
record[idx] = 1
idx_ext = idx + (np.array([0] * len(idx[0])),)
out[idx_ext] = 0.
idx_ext = idx + (np.array([1] * len(idx[0])),)
out[idx_ext] = 255
idx_ext = idx + (np.array([2] * len(idx[0])),)
out[idx_ext] = 255. * ((value[idx] - RY - YG) / (GC))
# elif v < RY + YG + GC + CB:
# c = (0., 255. * (1 - ((v - RY - YG - GC) / (CB))), 255.)
idx = np.where(np.logical_and(value < RY + YG + GC + CB, record == 0))
record[idx] = 1
idx_ext = idx + (np.array([0] * len(idx[0])),)
out[idx_ext] = 0.
idx_ext = idx + (np.array([1] * len(idx[0])),)
out[idx_ext] = 255. * (1 - ((value[idx] - RY - YG - GC) / (CB)))
idx_ext = idx + (np.array([2] * len(idx[0])),)
out[idx_ext] = 255.
# elif v < summed - MR:
# c = (255. * ((v - RY - YG - GC - CB) / (BM)), 0., 255.)
idx = np.where(np.logical_and(value < summed - MR, record == 0))
record[idx] = 1
idx_ext = idx + (np.array([0] * len(idx[0])),)
out[idx_ext] = 255.
"""
| body2hands-main | visualization/POF/utils/PAF.py |
import numpy as np
def transReProjectionLoss(t, X0, K, uv):
assert t.shape == (3,)
assert len(X0.shape) == 2 and X0.shape[1] == 3
assert K.shape == (3, 3)
assert len(uv.shape) == 2 and uv.shape[1] == 2
X = X0 + t[np.newaxis, :]
x = X.dot(K.T)
x /= x[:, 2][:, np.newaxis]
return np.sum(np.square(x[:, :2] - uv))
| body2hands-main | visualization/POF/utils/optimization.py |
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
def load_weights_from_snapshot(session, checkpoint_path, discard_list=None, rename_dict=None):
""" Loads weights from a snapshot except the ones indicated with discard_list. Others are possibly renamed. """
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
# Remove everything from the discard list
if discard_list is not None:
num_disc = 0
var_to_shape_map_new = dict()
for k, v in var_to_shape_map.items():
good = True
for dis_str in discard_list:
if dis_str in k:
good = False
if good:
var_to_shape_map_new[k] = v
else:
num_disc += 1
var_to_shape_map = dict(var_to_shape_map_new)
print('Discarded %d items' % num_disc)
# rename everything according to rename_dict
num_rename = 0
var_to_shape_map_new = dict()
for name in var_to_shape_map.keys():
new_name = name
if rename_dict is not None:
for rename_str in rename_dict.keys():
if rename_str in name:
new_name = new_name.replace(rename_str, rename_dict[rename_str], 1) # my modification: replace no more than once
num_rename += 1
var_to_shape_map_new[new_name] = reader.get_tensor(name)
var_to_shape_map = dict(var_to_shape_map_new)
init_op, init_feed = tf.contrib.framework.assign_from_values(var_to_shape_map)
session.run(init_op, init_feed)
print('Initialized %d variables from %s.' % (len(var_to_shape_map), checkpoint_path))
def load_weights_to_dict(checkpoint_path, discard_list=None, rename_dict=None):
""" Loads weights from a snapshot except the ones indicated with discard_list. Others are possibly renamed. """
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
# Remove everything from the discard list
if discard_list is not None:
num_disc = 0
var_to_shape_map_new = dict()
for k, v in var_to_shape_map.items():
good = True
for dis_str in discard_list:
if dis_str in k:
good = False
if good:
var_to_shape_map_new[k] = v
else:
num_disc += 1
var_to_shape_map = dict(var_to_shape_map_new)
print('Discarded %d items' % num_disc)
# rename everything according to rename_dict
num_rename = 0
var_to_shape_map_new = dict()
for name in var_to_shape_map.keys():
new_name = name
if rename_dict is not None:
for rename_str in rename_dict.keys():
if rename_str in name:
new_name = new_name.replace(rename_str, rename_dict[rename_str])
num_rename += 1
var_to_shape_map_new[new_name] = reader.get_tensor(name)
var_to_shape_map = dict(var_to_shape_map_new)
return var_to_shape_map
| body2hands-main | visualization/POF/utils/load_ckpt.py |
import tensorflow as tf
import json
import numpy as np
class AdamModel(object):
num_shape_coeff = 30
num_vertices = 18540
num_joints = 62
def __init__(self):
# read in model file
model_file = 'utils/adam_v1_plus2.json'
with open(model_file) as f:
model_data = json.load(f)
pca_file = 'utils/adam_blendshapes_348_delta_norm.json'
with open(pca_file) as f:
pca_data = json.load(f)
with tf.variable_scope("AdamModel"):
self.mean_shape = tf.constant(np.array(pca_data['mu']), shape=(self.num_vertices * 3,), name='mean_shape', dtype=tf.float32)
self.shape_basis = tf.constant(np.array(pca_data['Uw1']), name='shape_basis', dtype=tf.float32)
J_reg_sparse = model_data['adam_J_regressor_big']
J_reg_size = np.array(J_reg_sparse[0], dtype=np.int32)[:2]
J_reg = np.array(J_reg_sparse[1:], dtype=np.float32)
J_reg_indices = J_reg[:, :2].astype(np.int32)
J_reg_vals = J_reg[:, 2]
self.J_reg = tf.sparse_reorder(tf.SparseTensor(J_reg_indices, J_reg_vals, J_reg_size))
self.J_reg_dense = tf.sparse_tensor_to_dense(self.J_reg)
# parental relationship (for forward_kinametics)
kintree_table = np.array(model_data['kintree_table'], dtype=np.int32)
id_to_col = np.zeros((self.num_joints), dtype=np.int32)
self.m_parent = np.zeros((self.num_joints), dtype=np.int32) # !: This is numpy array.
for i in range(kintree_table.shape[1]):
id_to_col[kintree_table[1, i]] = i
for i in range(1, kintree_table.shape[1]):
self.m_parent[i] = id_to_col[kintree_table[0, i]]
def reconstruct(self, pose=None, coeff=None, trans=None):
with tf.variable_scope("AdamModel"):
if pose is None and coeff is None:
batch_size = 1
else:
if pose is not None:
batch_size = pose.get_shape().as_list()[0]
else:
batch_size = coeff.get_shape().as_list()[0]
if coeff is None:
coeff = tf.zeros((batch_size, self.num_shape_coeff), dtype=tf.float32)
assert len(coeff.get_shape().as_list()) == 2 # [batch_size, shape_coeff]
batch_size = coeff.get_shape().as_list()[0]
V = self.mean_shape + tf.matmul(coeff, self.shape_basis, transpose_b=True) # mean + shape_basis * shape_coeff
# mat_V = tf.reshape(V, [self.num_vertices, 3])
# J0 = tf.transpose(tf.sparse_tensor_dense_matmul(self.J_reg, V))
J0 = tf.matmul(V, self.J_reg_dense, transpose_b=True)
mat_J0 = tf.reshape(J0, [batch_size, -1, 3])
if pose is None:
pose = tf.zeros((batch_size, 3 * self.num_joints), dtype=tf.float32) # note different size with coeff
assert len(pose.get_shape().as_list()) == 2 # [batch_size, 3 * num_joints]
Js = []
for i in range(batch_size):
mat_J = self.forward_kinametics(mat_J0[i, :, :], pose[i, :])
if trans is not None: # [batch_size, 3]
assert len(trans.get_shape().as_list()) == 2
mat_J = mat_J + trans[i, :]
J = tf.reshape(mat_J, [-1])
Js.append(J)
Js = tf.stack(Js, axis=0)
return Js
def forward_kinametics(self, J0, pose):
with tf.variable_scope("forward_kinametics"):
Rs = [] # transformation matrix
ts = []
R0 = self.AngleAxisToRotationMatrix(pose[:3])
t0 = tf.transpose(J0[0:1, :])
Rs.append(R0)
ts.append(t0)
for idj in range(1, self.num_joints):
ipar = self.m_parent[idj]
if idj in (10, 11): # foot ends
angles = tf.zeros((3,), dtype=pose.dtype)
elif idj in (7, 8): # foot ankle
angles = tf.concat([pose[idj * 3:(idj + 1) * 3 - 1], tf.zeros([1, ], dtype=pose.dtype)], axis=0)
elif idj in (24, 26, 27, 28, 31, 32, 35, 39, 40, 44, 47, 48, 51, 52, 55, 56, 59, 60):
angles = tf.concat([tf.zeros([2, ], dtype=pose.dtype), pose[idj * 3 + 2:(idj + 1) * 3]], axis=0)
else:
angles = pose[idj * 3:(idj + 1) * 3]
R = self.EulerAngleToRotationMatrix(angles) # in ceres function, R is assumed to be row major, but in adam_reconstruct_euler, R is column major.
R = tf.matmul(Rs[ipar], R)
t = ts[ipar] + tf.matmul(Rs[ipar], tf.transpose(J0[idj:(idj + 1), :] - J0[ipar:(ipar + 1), :]))
Rs.append(R)
ts.append(t)
for idj in range(self.num_joints):
ts[idj] = ts[idj] - tf.matmul(Rs[idj], tf.transpose(J0[idj:(idj + 1), :]))
J_out = []
for idj in range(self.num_joints):
J_out.append(tf.matmul(Rs[idj], tf.transpose(J0[idj:(idj + 1), :])) + ts[idj]) # original pose -> transformed pose (world coordinate)
J_out = tf.transpose(tf.concat(J_out, axis=1))
return J_out
@staticmethod
def AngleAxisToRotationMatrix(angle_axis):
""" angle_axis is a 3d vector whose direction points to the rotation axis and whose norm is the angle (in radians) """
with tf.variable_scope("AngleAxisToRotationMatrix"):
theta = tf.norm(angle_axis)
cos = tf.cos(theta)
sin = tf.sin(theta)
xyz = tf.divide(angle_axis, theta)
x = xyz[0]
y = xyz[1]
z = xyz[2]
# when theta > 0
R00 = cos + x * x * (1. - cos)
R10 = sin * z + x * y * (1. - cos)
R20 = -sin * y + x * z * (1. - cos)
Rcol0 = tf.stack([R00, R10, R20], axis=0)
R01 = x * y * (1. - cos) - z * sin
R11 = cos + y * y * (1. - cos)
R21 = x * sin + y * z * (1. - cos)
Rcol1 = tf.stack([R01, R11, R21], axis=0)
R02 = y * sin + x * z * (1. - cos)
R12 = -x * sin + y * z * (1. - cos)
R22 = cos + z * z * (1. - cos)
Rcol2 = tf.stack([R02, R12, R22], axis=0)
R = tf.stack([Rcol0, Rcol1, Rcol2], axis=1)
# when theta == 0
R_00 = tf.ones([], dtype=angle_axis.dtype)
R_10 = angle_axis[2]
R_20 = -angle_axis[1]
R_col0 = tf.stack([R_00, R_10, R_20], axis=0)
R_01 = -angle_axis[2]
R_11 = tf.ones([], dtype=angle_axis.dtype)
R_21 = angle_axis[0]
R_col1 = tf.stack([R_01, R_11, R_21], axis=0)
R_02 = angle_axis[1]
R_12 = -angle_axis[0]
R_22 = tf.ones([], dtype=angle_axis.dtype)
R_col2 = tf.stack([R_02, R_12, R_22], axis=0)
R_ = tf.stack([R_col0, R_col1, R_col2], axis=1)
return tf.cond(tf.greater(theta, 0), lambda: R, lambda: R_)
@staticmethod
def EulerAngleToRotationMatrix(euler_angle):
""" This function computes the rotation matrix corresponding to Euler Angle (x, y, z) R_z * R_y * R_x (consistent with Ceres). (x, y, z) in degrees."""
with tf.variable_scope("EulerAngleToRotationMatrix"):
deg = euler_angle * np.pi / 180
cos = tf.cos(deg)
sin = tf.sin(deg)
c3 = cos[0]
c2 = cos[1]
c1 = cos[2]
s3 = sin[0]
s2 = sin[1]
s1 = sin[2]
R00 = c1 * c2
R10 = s1 * c2
R20 = -s2
Rcol0 = tf.stack([R00, R10, R20], axis=0)
R01 = -s1 * c3 + c1 * s2 * s3
R11 = c1 * c3 + s1 * s2 * s3
R21 = c2 * s3
Rcol1 = tf.stack([R01, R11, R21], axis=0)
R02 = s1 * s3 + c1 * s2 * c3
R12 = -c1 * s3 + s1 * s2 * c3
R22 = c2 * c3
Rcol2 = tf.stack([R02, R12, R22], axis=0)
R = tf.stack([Rcol0, Rcol1, Rcol2], axis=1)
return R
if __name__ == '__main__':
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
a = AdamModel()
pose_np = np.zeros((2, 3 * 62,), dtype=np.float32)
pose_np[0, 3 * 16 + 1] = -90.
pose = tf.Variable(pose_np)
J = a.reconstruct(pose=pose)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
JJ = sess.run(J)
JJ = JJ.reshape(2, -1, 3)
fig = plt.figure()
ax = fig.add_subplot(121, projection='3d')
ax.scatter(JJ[0, :, 0], JJ[0, :, 1], JJ[0, :, 2], color='red')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
ax = fig.add_subplot(122, projection='3d')
ax.scatter(JJ[1, :, 0], JJ[1, :, 1], JJ[1, :, 2], color='red')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.axis('equal')
# from meshWrapper import meshWrapper
# meshlib = meshWrapper("/home/donglaix/Documents/Experiments/hand_model/build/libPythonWrapper.so")
# meshlib.load_totalmodel()
# meshlib.reset_value()
# meshlib.cpose[:] = pose_np.tolist()
# ax = fig.add_subplot(222)
# img1 = meshlib.total_visualize(cameraMode=False, target=False, first_render=False, position=0)
# ax.imshow(img1)
# ax = fig.add_subplot(223)
# img2 = meshlib.total_visualize(cameraMode=False, target=False, first_render=False, position=1)
# ax.imshow(img2)
# ax = fig.add_subplot(224)
# img3 = meshlib.total_visualize(cameraMode=False, target=False, first_render=False, position=2)
# ax.imshow(img3)
plt.show()
| body2hands-main | visualization/POF/utils/AdamModel.py |
import tensorflow as tf
import math
import numpy as np
class NetworkOps(object):
""" Operations that are frequently used within networks. """
neg_slope_of_relu = 0.01
@classmethod
def leaky_relu(cls, tensor, name='relu'):
out_tensor = tf.maximum(tensor, cls.neg_slope_of_relu * tensor, name=name)
return out_tensor
@classmethod
def relu(cls, tensor, name='relu_pure'):
out_tensor = tf.maximum(tensor, tf.constant(0.0, dtype=tf.float32), name=name)
return out_tensor
@classmethod
def conv(cls, in_tensor, layer_name, kernel_size, stride, out_chan, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
strides = [1, stride, stride, 1]
kernel_shape = [kernel_size, kernel_size, in_size[3], out_chan]
# conv
kernel = tf.get_variable('weights', kernel_shape, tf.float32,
tf.contrib.layers.xavier_initializer_conv2d(), trainable=trainable, collections=['wd', 'variables', 'filters'])
tmp_result = tf.nn.conv2d(in_tensor, kernel, strides, padding='SAME')
# bias
biases = tf.get_variable('biases', [kernel_shape[3]], tf.float32,
tf.constant_initializer(0.0001), trainable=trainable, collections=['wd', 'variables', 'biases'])
out_tensor = tf.nn.bias_add(tmp_result, biases, name='out')
return out_tensor
@classmethod
def conv_relu(cls, in_tensor, layer_name, kernel_size, stride, out_chan, leaky=True, trainable=True):
tensor = cls.conv(in_tensor, layer_name, kernel_size, stride, out_chan, trainable)
if leaky:
out_tensor = cls.leaky_relu(tensor, name='out')
else:
out_tensor = cls.relu(tensor, name='out')
return out_tensor
@classmethod
def conv3d(cls, in_tensor, layer_name, kernel_size, stride, out_chan, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
strides = [1, stride, stride, stride, 1]
kernel_shape = [kernel_size, kernel_size, kernel_size, in_size[4], out_chan]
# conv
kernel = tf.get_variable('weights', kernel_shape, tf.float32,
tf.contrib.layers.xavier_initializer(), trainable=trainable, collections=['wd', 'variables', 'filters'])
tmp_result = tf.nn.conv3d(in_tensor, kernel, strides, padding='SAME')
# bias
biases = tf.get_variable('biases', [kernel_shape[4]], tf.float32,
tf.constant_initializer(0.0001), trainable=trainable, collections=['wd', 'variables', 'biases'])
out_tensor = tf.nn.bias_add(tmp_result, biases, name='out')
return out_tensor
@classmethod
def conv3d_relu(cls, in_tensor, layer_name, kernel_size, stride, out_chan, leaky=True, trainable=True):
tensor = cls.conv3d(in_tensor, layer_name, kernel_size, stride, out_chan, trainable)
if leaky:
out_tensor = cls.leaky_relu(tensor, name='out')
else:
out_tensor = cls.leaky_relu(tensor, name='out')
return out_tensor
@classmethod
def max_pool(cls, bottom, name='pool'):
pooled = tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='VALID', name=name)
return pooled
@classmethod
def upconv(cls, in_tensor, layer_name, output_shape, kernel_size, stride, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
kernel_shape = [kernel_size, kernel_size, in_size[3], in_size[3]]
strides = [1, stride, stride, 1]
# conv
kernel = cls.get_deconv_filter(kernel_shape, trainable)
tmp_result = tf.nn.conv2d_transpose(value=in_tensor, filter=kernel, output_shape=output_shape,
strides=strides, padding='SAME')
# bias
biases = tf.get_variable('biases', [kernel_shape[2]], tf.float32,
tf.constant_initializer(0.0), trainable=trainable, collections=['wd', 'variables', 'biases'])
out_tensor = tf.nn.bias_add(tmp_result, biases)
return out_tensor
@classmethod
def upconv_relu(cls, in_tensor, layer_name, output_shape, kernel_size, stride, trainable=True):
tensor = cls.upconv(in_tensor, layer_name, output_shape, kernel_size, stride, trainable)
out_tensor = cls.leaky_relu(tensor, name='out')
return out_tensor
@staticmethod
def get_deconv_filter(f_shape, trainable):
width = f_shape[0]
height = f_shape[1]
f = math.ceil(width / 2.0)
c = (2 * f - 1 - f % 2) / (2.0 * f)
bilinear = np.zeros([f_shape[0], f_shape[1]])
for x in range(width):
for y in range(height):
value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
bilinear[x, y] = value
weights = np.zeros(f_shape)
for i in range(f_shape[2]):
weights[:, :, i, i] = bilinear
init = tf.constant_initializer(value=weights,
dtype=tf.float32)
return tf.get_variable(name="weights", initializer=init,
shape=weights.shape, trainable=trainable, collections=['wd', 'variables', 'filters'])
@staticmethod
def fully_connected(in_tensor, layer_name, out_chan, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
assert len(in_size) == 2, 'Input to a fully connected layer must be a vector.'
weights_shape = [in_size[1], out_chan]
# weight matrix
weights = tf.get_variable('weights', weights_shape, tf.float32,
tf.contrib.layers.xavier_initializer(), trainable=trainable)
weights = tf.check_numerics(weights, 'weights: %s' % layer_name)
# bias
biases = tf.get_variable('biases', [out_chan], tf.float32,
tf.constant_initializer(0.0001), trainable=trainable)
biases = tf.check_numerics(biases, 'biases: %s' % layer_name)
out_tensor = tf.matmul(in_tensor, weights) + biases
return out_tensor
@classmethod
def fully_connected_relu(cls, in_tensor, layer_name, out_chan, trainable=True):
tensor = cls.fully_connected(in_tensor, layer_name, out_chan, trainable)
out_tensor = tf.maximum(tensor, cls.neg_slope_of_relu * tensor, name='out')
return out_tensor
@staticmethod
def dropout(in_tensor, keep_prob, evaluation):
""" Dropout: Each neuron is dropped independently. """
with tf.variable_scope('dropout'):
tensor_shape = in_tensor.get_shape().as_list()
out_tensor = tf.cond(evaluation,
lambda: tf.nn.dropout(in_tensor, 1.0,
noise_shape=tensor_shape),
lambda: tf.nn.dropout(in_tensor, keep_prob,
noise_shape=tensor_shape))
return out_tensor
@staticmethod
def spatial_dropout(in_tensor, keep_prob, evaluation):
""" Spatial dropout: Not each neuron is dropped independently, but feature map wise. """
with tf.variable_scope('spatial_dropout'):
tensor_shape = in_tensor.get_shape().as_list()
out_tensor = tf.cond(evaluation,
lambda: tf.nn.dropout(in_tensor, 1.0,
noise_shape=tensor_shape),
lambda: tf.nn.dropout(in_tensor, keep_prob,
noise_shape=[tensor_shape[0], 1, 1, tensor_shape[3]]))
return out_tensor
| body2hands-main | visualization/POF/utils/ops.py |
from utils.AdamModel import AdamModel
from utils.PAF import PAFConnection
import tensorflow as tf
import numpy as np
import json
if __name__ == '__main__':
adam = AdamModel()
adam_joints = adam.reconstruct()
sess = tf.Session()
V_vec, joints_v = sess.run([adam.mean_shape, adam_joints])
sess.close()
joints_v = joints_v.reshape(adam.num_joints, 3)
V = V_vec.reshape(adam.num_vertices, 3)
coords3d = np.zeros([19, 3], dtype=np.float64)
coords3d[1] = joints_v[12]
coords3d[2] = joints_v[17]
coords3d[3] = joints_v[19]
coords3d[4] = joints_v[21]
coords3d[5] = joints_v[16]
coords3d[6] = joints_v[18]
coords3d[7] = joints_v[20]
coords3d[8] = joints_v[2]
coords3d[9] = joints_v[5]
coords3d[10] = joints_v[8]
coords3d[11] = joints_v[1]
coords3d[12] = joints_v[4]
coords3d[13] = joints_v[7]
coords3d[0] = V[8130]
coords3d[16] = V[10088]
coords3d[17] = V[6970]
coords3d[18] = V[1372]
coords3d[14] = V[9707]
coords3d[15] = V[2058]
PAF_lengths = [[], []]
for conn in PAFConnection[0]:
vector = coords3d[conn[1]] - coords3d[conn[0]]
length = np.sqrt(vector.dot(vector))
PAF_lengths[0].append(length)
coords3d_hand = np.zeros([21, 3], dtype=np.float64)
coords3d_hand[0] = joints_v[20]
coords3d_hand[1] = joints_v[25]
coords3d_hand[2] = joints_v[24]
coords3d_hand[3] = joints_v[23]
coords3d_hand[4] = joints_v[22]
coords3d_hand[5] = joints_v[29]
coords3d_hand[6] = joints_v[28]
coords3d_hand[7] = joints_v[27]
coords3d_hand[8] = joints_v[26]
coords3d_hand[9] = joints_v[33]
coords3d_hand[10] = joints_v[32]
coords3d_hand[11] = joints_v[31]
coords3d_hand[12] = joints_v[30]
coords3d_hand[13] = joints_v[37]
coords3d_hand[14] = joints_v[36]
coords3d_hand[15] = joints_v[35]
coords3d_hand[16] = joints_v[34]
coords3d_hand[17] = joints_v[41]
coords3d_hand[18] = joints_v[40]
coords3d_hand[19] = joints_v[39]
coords3d_hand[20] = joints_v[38]
for conn in PAFConnection[1]:
vector = coords3d_hand[conn[1]] - coords3d_hand[conn[0]]
length = np.sqrt(vector.dot(vector))
PAF_lengths[1].append(length)
with open('utils/default_PAF_lengths.json', 'w') as f:
json.dump(PAF_lengths, f)
| body2hands-main | visualization/POF/utils/default_PAF_length.py |
import numpy as np
def calc_auc(x, y):
""" Given x and y values it calculates the approx. integral and normalizes it: area under curve"""
integral = np.trapz(y, x)
norm = np.trapz(np.ones_like(y), x)
return integral / norm
class EvalUtil:
""" Util class for evaluation networks.
"""
def __init__(self, num_kp=21):
# init empty data storage
self.data = list()
self.num_kp = num_kp
for _ in range(num_kp):
self.data.append(list())
def feed(self, keypoint_gt, keypoint_vis, keypoint_pred):
""" Used to feed data to the class. Stores the euclidean distance between gt and pred, when it is visible. """
keypoint_gt = np.squeeze(keypoint_gt)
keypoint_pred = np.squeeze(keypoint_pred)
keypoint_vis = np.squeeze(keypoint_vis).astype('bool')
assert len(keypoint_gt.shape) == 2
assert len(keypoint_pred.shape) == 2
assert len(keypoint_vis.shape) == 1
# calc euclidean distance
diff = keypoint_gt - keypoint_pred
euclidean_dist = np.sqrt(np.sum(np.square(diff), axis=1))
num_kp = keypoint_gt.shape[0]
for i in range(num_kp):
if keypoint_vis[i]:
self.data[i].append(euclidean_dist[i])
def _get_pck(self, kp_id, threshold):
""" Returns pck for one keypoint for the given threshold. """
if len(self.data[kp_id]) == 0:
return None
data = np.array(self.data[kp_id])
pck = np.mean((data <= threshold).astype('float'))
return pck
def _get_epe(self, kp_id):
""" Returns end point error for one keypoint. """
if len(self.data[kp_id]) == 0:
return None, None
data = np.array(self.data[kp_id])
epe_mean = np.mean(data)
epe_median = np.median(data)
return epe_mean, epe_median
def get_measures(self, val_min, val_max, steps):
""" Outputs the average mean and median error as well as the pck score. """
thresholds = np.linspace(val_min, val_max, steps)
thresholds = np.array(thresholds)
norm_factor = np.trapz(np.ones_like(thresholds), thresholds)
# init mean measures
epe_mean_all = list()
epe_median_all = list()
auc_all = list()
pck_curve_all = list()
# Create one plot for each part
for part_id in range(self.num_kp):
# mean/median error
mean, median = self._get_epe(part_id)
if mean is None:
# there was no valid measurement for this keypoint
continue
epe_mean_all.append(mean)
epe_median_all.append(median)
# pck/auc
pck_curve = list()
for t in thresholds:
pck = self._get_pck(part_id, t)
pck_curve.append(pck)
pck_curve = np.array(pck_curve)
pck_curve_all.append(pck_curve)
auc = np.trapz(pck_curve, thresholds)
auc /= norm_factor
auc_all.append(auc)
epe_mean_all = np.mean(np.array(epe_mean_all))
epe_median_all = np.mean(np.array(epe_median_all))
auc_all = np.mean(np.array(auc_all))
pck_curve_all = np.mean(np.array(pck_curve_all), 0) # mean only over keypoints
return epe_mean_all, epe_median_all, auc_all, pck_curve_all, thresholds
| body2hands-main | visualization/POF/utils/EvalUtil.py |
# Don't use anaconda for this
import ctypes
import os
from PIL import Image, ImageOps
import matplotlib.pyplot as plt
import numpy as np
class wrapper_hand_model(object):
def __init__(self, lib_file='./utils/libPythonWrapper.so', model_file='./utils/hand2_l_all_uv.json'):
self.lib = ctypes.cdll.LoadLibrary(lib_file)
self.fit_hand3d = self.lib.fit_hand3d
self.fit_hand3d.argtypes = [ctypes.POINTER(ctypes.c_double), ctypes.c_char_p, ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_int, ctypes.c_bool]
self.fit_hand3d.restype = None
self.Opengl_visualize = self.lib.Opengl_visualize
self.Opengl_visualize.argtypes = [ctypes.c_char_p, ctypes.POINTER(ctypes.c_ubyte), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_bool, ctypes.c_uint, ctypes.c_int, ctypes.c_bool, ctypes.c_bool]
self.Opengl_visualize.restype = None
self.fit_hand2d = self.lib.fit_hand2d
self.fit_hand2d.argtypes = [ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_char_p,
ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_int, ctypes.c_bool,
ctypes.c_double, ctypes.c_int]
self.fit_hand2d.restype = None
self.extract_fit_result = self.lib.extract_fit_result
self.extract_fit_result.argtypes = [ctypes.c_char_p, ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_int, ctypes.c_bool]
self.extract_fit_result.restype = None
self.lib.set_calibK.argtypes = [ctypes.POINTER(ctypes.c_double)]
self.lib.set_calibK.restype = None
self.cmodel_file = ctypes.create_string_buffer(model_file.encode('ascii'))
self.ctarget_array = (ctypes.c_double * 63)()
self.ctrans = (ctypes.c_double * 3)()
self.ccoeff = (ctypes.c_double * 63)()
self.cpose = (ctypes.c_double * 63)()
self.cret_bytes = (ctypes.c_ubyte * (600 * 600 * 3))()
self.ctarget2d_array = (ctypes.c_double * 42)()
self.calibK = (ctypes.c_double * 9)()
self.cret_bytes_cam = (ctypes.c_ubyte * (1080 * 1920 * 4))()
self.PAF_array = (ctypes.c_double * (20 * 3))()
def reset_value(self):
self.ctrans[:] = [0.0, 0.0, 2.0]
self.ccoeff[:] = [1.0 for _ in range(63)]
self.cpose[:] = [0.0 for _ in range(63)]
def fit3d(self, joint3d, regressor_type=0, euler=True):
assert joint3d.shape == (21, 3)
self.ctarget_array[:] = joint3d.reshape(-1).tolist()
self.fit_hand3d(self.ctarget_array, self.cmodel_file, self.cpose, self.ccoeff, self.ctrans, regressor_type, euler)
trans = np.array(self.ctrans[:])
pose = np.array(self.cpose[:]).reshape(-1, 3)
coeff = np.array(self.ccoeff[:]).reshape(-1, 3)
return trans, pose, coeff
def fit2d(self, joint2d, calibK, PAF, regressor_type=0, euler=True, prior_weight=100.0, mode=0):
assert joint2d.shape == (21, 2) and calibK.shape == (3, 3)
self.ctarget2d_array[:] = joint2d.reshape(-1).tolist()
self.calibK[:] = calibK.reshape(-1).tolist()
assert PAF.size == len(self.PAF_array[:])
self.PAF_array[:] = PAF.reshape(-1).tolist()
self.fit_hand2d(self.ctarget2d_array, self.calibK, self.PAF_array, self.cmodel_file, self.cpose, self.ccoeff, self.ctrans,
regressor_type, euler, prior_weight, mode)
trans = np.array(self.ctrans[:])
pose = np.array(self.cpose[:]).reshape(-1, 3)
coeff = np.array(self.ccoeff[:]).reshape(-1, 3)
return trans, pose, coeff
def render(self, cameraMode=False, target=True, first_render=False, position=0, regressor_type=0, stay=False, euler=True):
if cameraMode:
read_buffer = self.cret_bytes_cam
else:
read_buffer = self.cret_bytes
if target:
if first_render:
self.Opengl_visualize(self.cmodel_file, read_buffer, self.cpose, self.ccoeff, self.ctrans, self.ctarget_array, ctypes.c_bool(cameraMode), position, regressor_type, stay, euler)
self.Opengl_visualize(self.cmodel_file, read_buffer, self.cpose, self.ccoeff, self.ctrans, self.ctarget_array, ctypes.c_bool(cameraMode), position, regressor_type, stay, euler)
else:
if first_render:
self.Opengl_visualize(self.cmodel_file, read_buffer, self.cpose, self.ccoeff, self.ctrans, None, ctypes.c_bool(cameraMode), position, regressor_type, stay, euler)
self.Opengl_visualize(self.cmodel_file, read_buffer, self.cpose, self.ccoeff, self.ctrans, None, ctypes.c_bool(cameraMode), position, regressor_type, stay, euler)
img = bytes(read_buffer)
if not cameraMode:
img = Image.frombytes("RGBA", (600, 600), img)
else:
img = Image.frombytes("RGBA", (1920, 1080), img)
img = ImageOps.flip(img)
return img
def set_calibK(self, K):
self.calibK[:] = K.reshape(-1).tolist()
self.lib.set_calibK(self.calibK)
if __name__ == '__main__':
import numpy as np
wrapper = wrapper_hand_model("/home/donglaix/Documents/Experiments/hand_model/build/libPythonWrapper.so")
joint3d = np.array([-33.3889, -173.355, -36.0744, -35.0518, -173.959, -37.7108, -36.5972, -176.126, -40.7544, -37.4367, -178.032, -43.6272, -38.7743, -178.843, -45.5877, -36.4731, -180.718, -38.2183, -37.0009, -181.596, -42.4443, -37.4651, -181.437, -45.0006, -37.7732, -181.458, -47.0573, -34.2598, -180.606, -38.3926, -35.2143, -
180.671, -43.2699, -36.3031, -179.876, -45.6931, -37.1902, -179.438, -47.745, -32.0926, -179.69, -38.4972, -33.7518, -179.847, -42.8798, -34.9357, -179.212, -45.3947, -35.7699, -178.853, -47.3468, -30.3247, -178.334, -39.2571, -31.8778, -178.837, -42.4667, -33.003, -178.501, -44.2697, -33.8762, -178.325, -45.8248]).reshape(-1, 3)
joint2d = np.array([1284.646, 254.091, 1296.991, 248.479, 1319.012, 231.635, 1339.5621, 217.027, 1354.4766, 209.81, 1300.0491, 200.093, 1330.055, 192.596, 1348.5556, 192.777, 1363.3952, 191.943, 1299.7998, 202.764, 1334.6115,
200.494, 1352.7438, 204.628, 1368.139, 206.547, 1299.2785, 210.884, 1330.8779, 207.547, 1349.5700, 210.478, 1364.09, 211.918, 1303.6187, 221.421, 1326.7478, 216.127, 1340.2151, 217.196, 1351.8205, 217.42]).reshape(-1, 2)
K = np.array([[1633.34, 0, 942.256], [0, 1628.84, 557.344], [0, 0, 1]])
wrapper.fit3d(joint3d)
wrapper.fit2d(joint2d, K)
img = wrapper.render(cameraMode=False, first_render=True)
# print(trans)
# print(pose)
# print(coeff)
plt.imshow(img)
plt.show()
| body2hands-main | visualization/POF/utils/wrapper_hand_model.py |
import numpy as np
from math import factorial
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order + 1)
half_window = (window_size - 1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::-1], y, mode='valid')
| body2hands-main | visualization/POF/utils/smoothing.py |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.widgets import Slider
import utils.general
class vis_heatmap3d(object):
def __init__(self, fig, ax, heatmap, keypoints=None, type_str=None):
assert len(heatmap.shape) == 4
self.fig = fig
self.idx = 0
self.threshold = 0.5
self.heatmap = heatmap
self.ax = ax
self.keypoints = keypoints
self.type_str = type_str
axcolor = 'lightgoldenrodyellow'
axx = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
self.slider_threshold = Slider(axx, 'threshold', 0.0, 1.0, valinit=0.5)
self.slider_threshold.on_changed(self.update)
def draw(self):
self.ax.clear()
if self.keypoints is not None:
utils.general.plot3d(self.ax, self.keypoints, self.type_str)
active_map = self.heatmap[:, :, :, self.idx]
Z, Y, X = np.where(active_map >= self.threshold)
colors = [(1 - s) * np.array([0., 0., 1.], dtype=float) for s in active_map[Z, Y, X]]
self.ax.scatter(X, Y, Z, color=colors)
def update(self, val):
self.threshold = self.slider_threshold.val
self.draw()
self.fig.canvas.draw_idle()
| body2hands-main | visualization/POF/utils/vis_heatmap3d.py |
import tensorflow as tf
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
| body2hands-main | visualization/POF/utils/multigpu.py |
import ctypes
from PIL import Image, ImageOps
import numpy as np
class meshWrapper(object):
def __init__(self, lib_file='./utils/libPythonWrapper.so'):
self.lib = ctypes.cdll.LoadLibrary(lib_file)
# extern "C" void load_totalmodel(char* obj_file, char* model_file, char* pca_file);
self.lib.load_totalmodel.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p]
self.lib.load_totalmodel.restype = None
self.obj_file = ctypes.create_string_buffer('./utils/mesh_nofeet.obj'.encode('ascii'))
self.model_file = ctypes.create_string_buffer('./utils/adam_v1_plus2.json'.encode('ascii'))
self.pca_file = ctypes.create_string_buffer('./utils/adam_blendshapes_348_delta_norm.json'.encode('ascii'))
self.correspondence_file = ctypes.create_string_buffer('./utils/correspondences_nofeet.txt'.encode('ascii'))
# self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/adam_cocoplus_regressor.json'.encode('ascii'))
# self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/reg_human36_angjooOrder_ls.json'.encode('ascii'))
# self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/reg_human36_angjooOrder_nonneg.json'.encode('ascii'))
# self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/reg_combined_angjoo1.json'.encode('ascii'))
# self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/regressor_0n.json'.encode('ascii'))
# self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/reg_human36_angjooOrder_regressor2_nonneg.json'.encode('ascii'))
# self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/reg_human36_angjooOrder_regressor2_nonneg_root.json'.encode('ascii'))
# self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/regressor_0n1.json'.encode('ascii'))
self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/regressor_0n1_root.json'.encode('ascii'))
# extern "C" void fit_total3d(double* targetJoint, double* pose, double* coeff, double* trans)
self.lib.fit_total3d.argtypes = [ctypes.POINTER(ctypes.c_double)] * 5
self.lib.fit_total3d.restype = None
self.lib.fit_total2d.argtypes = [ctypes.POINTER(ctypes.c_double)] * 6
self.lib.fit_total2d.restype = None
self.lib.fit_total3d2d.argtypes = [ctypes.POINTER(ctypes.c_double)] * 7
self.lib.fit_total3d2d.restype = None
# extern "C" void fit_PAF_vec(double* targetJoint2d, double* PAF_vec, double* calibK, double* pose, double* coeff, double* trans, double* face_coeff)
self.lib.fit_PAF_vec.argtypes = [ctypes.POINTER(ctypes.c_double)] * 8 + [ctypes.c_uint, ctypes.c_bool, ctypes.c_bool, ctypes.c_bool]
self.lib.fit_PAF_vec.restype = None
# Eigen::Matrix<double, 62, 3, Eigen::RowMajor> m_adam_pose; //62 ==TotalModel::NUM_JOINTS
# Eigen::Matrix<double, 30, 1> m_adam_coeffs; //30 ==TotalModel::NUM_SHAPE_COEFFICIENTS
# Eigen::Vector3d m_adam_t;
self.cpose = (ctypes.c_double * (62 * 3))()
self.ccoeff = (ctypes.c_double * 30)()
self.ctrans = (ctypes.c_double * 3)()
self.cface_coeff = (ctypes.c_double * 200)()
self.ctarget_array = (ctypes.c_double * ((62 + 70 + 6) * 3))()
self.ctarget_array_2d = (ctypes.c_double * ((63 + 70 + 6) * 2))()
self.cret_bytes = (ctypes.c_ubyte * (600 * 600 * 4))()
self.cfull_bytes = (ctypes.c_ubyte * (1920 * 1080 * 4))()
self.cortho_bytes = (ctypes.c_ubyte * (1920 * 1080 * 4))()
self.PAF_array = (ctypes.c_double * (63 * 3))()
self.out_joint = (ctypes.c_double * (65 * 3))() # regressor 2: 19 (small coco regressor) + 20 (hand) + 20 (hand) + 6 (feet)
self.calibK = (ctypes.c_double * 9)()
# extern "C" void Total_visualize(GLubyte* ret_bytes, double* targetJoint, uint CameraMode, uint position, bool meshSolid, float scale, int vis_type)
self.lib.Total_visualize.argtypes = [ctypes.POINTER(ctypes.c_ubyte), ctypes.POINTER(ctypes.c_double),
ctypes.c_uint, ctypes.c_uint, ctypes.c_bool, ctypes.c_float, ctypes.c_int, ctypes.c_bool]
self.lib.Total_visualize.restype = None
self.lib.VisualizeSkeleton.argtypes = [ctypes.POINTER(ctypes.c_ubyte), ctypes.POINTER(ctypes.c_double), ctypes.c_uint, ctypes.c_uint, ctypes.c_float]
self.lib.VisualizeSkeleton.restype = None
self.lib.init_renderer.argtypes = []
self.lib.init_renderer.restype = None
self.lib.reconstruct_adam.argtypes = [ctypes.POINTER(ctypes.c_double)] * 4 + [ctypes.c_int]
self.lib.reconstruct_adam.restype = None
self.lib.reconstruct_adam_mesh.argtypes = [ctypes.POINTER(ctypes.c_double)] * 4 + [ctypes.c_int, ctypes.c_bool]
self.lib.reconstruct_adam_mesh.restype = None
self.lib.fit_h36m_groundtruth.argtypes = [ctypes.POINTER(ctypes.c_double)] * 5
self.lib.fit_h36m_groundtruth.restype = None
self.lib.adam_refit.argtypes = [ctypes.POINTER(ctypes.c_double)] * 5 + [ctypes.c_uint]
self.lib.adam_refit.restype = None
self.lib.adam_sequence_init.argtypes = [ctypes.POINTER(ctypes.c_double)] * 5 + [ctypes.c_uint]
self.lib.adam_sequence_init.restype = None
self.lib.adam_hsiu_fit_dome.argtypes = [ctypes.POINTER(ctypes.c_double)] * 5 + [ctypes.c_bool]
self.lib.adam_hsiu_fit_dome.restype = None
def reset_value(self):
self.ctrans[:] = [0.0, 0.0, 500.0]
self.ccoeff[:] = [0.0] * 30
self.cpose[:] = [0.0] * (62 * 3)
self.cface_coeff[:] = [0.0] * 200
def load_totalmodel(self):
self.lib.load_totalmodel(self.obj_file, self.model_file, self.pca_file, self.correspondence_file, self.cocoplus_regressor_file)
def fit_total3d(self, joint3d):
assert joint3d.shape[1] == 3, joint3d.shape
self.ctarget_array[:joint3d.size] = joint3d.reshape(-1).tolist()
self.lib.fit_total3d(self.ctarget_array, self.cpose, self.ccoeff, self.ctrans, self.cface_coeff)
def total_visualize(self, cameraMode=0, target=True, first_render=False, position=0, meshSolid=True, scale=1.0, vis_type=1, show_joint=True):
if cameraMode == 0:
read_buffer = self.cret_bytes
read_size = (600, 600)
elif cameraMode == 1:
read_buffer = self.cfull_bytes
read_size = (1920, 1080)
else:
assert cameraMode == 2
read_buffer = self.cortho_bytes
read_size = (1920, 1080)
if first_render:
self.lib.Total_visualize(read_buffer, self.ctarget_array if target else None, ctypes.c_uint(cameraMode),
ctypes.c_uint(position), ctypes.c_bool(meshSolid), ctypes.c_float(scale), ctypes.c_int(vis_type),
ctypes.c_bool(show_joint))
read_buffer[:] = [0] * len(read_buffer[:])
self.lib.Total_visualize(read_buffer, self.ctarget_array if target else None, ctypes.c_uint(cameraMode),
ctypes.c_uint(position), ctypes.c_bool(meshSolid), ctypes.c_float(scale), ctypes.c_int(vis_type),
ctypes.c_bool(show_joint))
img = bytes(read_buffer[:read_size[0] * read_size[1] * 4])
img = Image.frombytes("RGBA", read_size, img)
img = ImageOps.flip(img)
return img
def fit_total2d(self, joint2d, K):
assert joint2d.shape[1] == 2, joint2d.shape
assert K.shape == (3, 3), K
self.calibK[:] = K.reshape(-1).tolist()
self.ctarget_array_2d[:] = joint2d.reshape(-1).tolist()
self.lib.fit_total2d(self.ctarget_array_2d, self.calibK, self.cpose, self.ccoeff, self.ctrans, self.cface_coeff)
def fit_total3d2d(self, joint3d, joint2d, K):
assert joint3d.shape[1] == 3, joint3d.shape
assert joint2d.shape[1] == 2, joint2d.shape
assert K.shape == (3, 3), K
self.ctarget_array[:joint3d.size] = joint3d.reshape(-1).tolist()
self.ctarget_array_2d[:] = joint2d.reshape(-1).tolist()
self.calibK[:] = K.reshape(-1).tolist()
self.lib.fit_total3d2d(self.ctarget_array, self.ctarget_array_2d, self.calibK, self.cpose, self.ccoeff, self.ctrans, self.cface_coeff)
def visualize_skeleton(self, joint3d, cameraMode=0, first_render=False, position=0, scale=1.0):
if cameraMode == 0:
read_buffer = self.cret_bytes
read_size = (600, 600)
elif cameraMode == 1:
read_buffer = self.cfull_bytes
read_size = (1920, 1080)
else:
assert cameraMode == 2
read_buffer = self.cortho_bytes
read_size = (1920, 1080)
read_buffer[:] = [0] * len(read_buffer[:])
assert joint3d.shape[1] == 3, joint3d.shape
self.ctarget_array[:joint3d.size] = joint3d.reshape(-1).tolist()
if first_render:
self.lib.VisualizeSkeleton(read_buffer, self.ctarget_array, ctypes.c_uint(cameraMode), ctypes.c_uint(position), ctypes.c_float(scale))
self.lib.VisualizeSkeleton(read_buffer, self.ctarget_array, ctypes.c_uint(cameraMode), ctypes.c_uint(position), ctypes.c_float(scale))
img = bytes(read_buffer[:read_size[0] * read_size[1] * 4])
img = Image.frombytes("RGBA", read_size, img)
img = ImageOps.flip(img)
return img
def fit_PAF_vec(self, joint2d, PAF_vec, K, joint3d=None, regressor_type=0, quan=False, fitPAFfirst=False, fit_face_exp=False):
assert joint2d.shape == (139, 2), joint2d.shape
assert K.shape == (3, 3), K
assert PAF_vec.shape[1] == 3, PAF_vec.shape
assert PAF_vec.shape[0] == 63, PAF_vec.shape
if joint3d is not None:
assert joint3d.shape[1] == 3, joint3d.shape
self.ctarget_array[:] = joint3d.reshape(-1).tolist()
self.calibK[:] = K.reshape(-1).tolist()
self.ctarget_array_2d[:] = [0.0] * len(self.ctarget_array_2d[:])
self.ctarget_array_2d[:joint2d.shape[0] * 2] = joint2d.reshape(-1).tolist()
self.PAF_array[:PAF_vec.size] = PAF_vec.reshape(-1).tolist()
self.lib.fit_PAF_vec(self.ctarget_array_2d, self.PAF_array, self.calibK, self.cpose, self.ccoeff, self.ctrans, self.cface_coeff,
None if joint3d is None else self.ctarget_array, ctypes.c_uint(regressor_type),
ctypes.c_bool(quan), ctypes.c_bool(fitPAFfirst), ctypes.c_bool(fit_face_exp))
def adam_refit(self, joint3d, regressor_type):
assert joint3d.shape[1] == 3, joint3d.shape
self.ctarget_array[:] = joint3d.reshape(-1).tolist()
self.lib.adam_refit(self.cpose, self.ccoeff, self.ctrans, self.cface_coeff, self.ctarget_array, regressor_type)
def adam_sequence_init(self, joint3d, regressor_type):
assert joint3d.shape[1] == 3, joint3d.shape
self.ctarget_array[:] = joint3d.reshape(-1).tolist()
self.lib.adam_sequence_init(self.cpose, self.ccoeff, self.ctrans, self.cface_coeff, self.ctarget_array, regressor_type)
def adam_hsiu_fit_dome(self, target_joint, freeze_shape=False):
assert target_joint.shape == (20, 3)
self.ctarget_array[:60] = target_joint.reshape(-1).tolist()
self.lib.adam_hsiu_fit_dome(self.cpose, self.ccoeff, self.ctrans, self.cface_coeff, self.ctarget_array, freeze_shape)
def refit_eval_h36m(self, regressor_type, prior_weight=1.0):
# refit Adam using skeleton reconstructed from current params, update params with pose prior && AngleAxis
self.lib.refit_eval_h36m(self.cpose, self.ccoeff, self.ctrans, ctypes.c_uint(regressor_type), ctypes.c_double(prior_weight))
def fitSingleStage(self, joint2d, PAF_vec, K, regressor_type=0, fit_face_exp=False):
assert joint2d.shape == (139, 2), joint2d.shape
assert K.shape == (3, 3), K
assert PAF_vec.shape[1] == 3, PAF_vec.shape
assert PAF_vec.shape[0] == 63, PAF_vec.shape
self.calibK[:] = K.reshape(-1).tolist()
self.ctarget_array_2d[:] = [0.0] * len(self.ctarget_array_2d[:])
self.ctarget_array_2d[:joint2d.shape[0] * 2] = joint2d.reshape(-1).tolist()
self.PAF_array[:PAF_vec.size] = PAF_vec.reshape(-1).tolist()
self.lib.fitSingleStage(self.ctarget_array_2d, self.PAF_array, self.calibK, self.cpose, self.ccoeff, self.ctrans, self.cface_coeff,
ctypes.c_uint(regressor_type), ctypes.c_bool(fit_face_exp))
| body2hands-main | visualization/POF/utils/meshWrapper.py |
import tensorflow as tf
import numpy as np
import numpy.linalg as nl
import cv2
# in A4 order (SMC)
tbody_connMat = np.array([0, 1, 0, 3, 3, 4, 4, 5, 0, 9, 9, 10, 10, 11, 0, 2, 2, 6, 6, 7, 7, 8, 2, 12, 12, 13, 13, 14, 1, 15, 15, 16, 1, 17, 17, 18, 0, 19, 0, 20, 20, 12, 20, 6])
thand_connMat = np.array([0, 1, 1, 2, 2, 3, 3, 4, 0, 5, 5, 6, 6, 7, 7, 8, 0, 9, 9, 10, 10, 11, 11, 12, 0, 13, 13, 14, 14, 15, 15, 16, 0, 17, 17, 18, 18, 19, 19, 20])
total_connMat = np.concatenate([tbody_connMat, thand_connMat + 21, thand_connMat + 42], axis=0).reshape(-1, 2)
connMat = {
'body': np.array([[0, 1], [1, 2], [2, 3], [3, 4], [1, 5], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13], [0, 14], [14, 16], [0, 15], [15, 17], [1, 18], [1, 19]], dtype=int),
'hand': np.array([[0, 4], [4, 3], [3, 2], [2, 1], [0, 8], [8, 7], [7, 6], [6, 5], [0, 12], [12, 11], [11, 10], [10, 9],
[0, 16], [16, 15], [15, 14], [14, 13], [0, 20], [20, 19], [19, 18], [18, 17]]),
'total': total_connMat,
'face': np.array([]),
'human3.6m': np.array([[0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8], [8, 9], [9, 10], [8, 11], [11, 12], [12, 13], [8, 14], [14, 15], [15, 16]])
}
type_strs = ['body', 'hand']
class LearningRateScheduler:
"""
Provides scalar tensors at certain iteration as is needed for a multistep learning rate schedule.
"""
def __init__(self, steps, values):
self.steps = steps
self.values = values
assert len(steps) + 1 == len(values), "There must be one more element in value as step."
def get_lr(self, global_step):
with tf.name_scope('lr_scheduler'):
if len(self.values) == 1: # 1 value -> no step
learning_rate = tf.constant(self.values[0])
elif len(self.values) == 2: # 2 values -> one step
cond = tf.greater(global_step, self.steps[0])
learning_rate = tf.where(cond, self.values[1], self.values[0])
else: # n values -> n-1 steps
cond_first = tf.less(global_step, self.steps[0])
cond_between = list()
for ind, step in enumerate(range(0, len(self.steps) - 1)):
cond_between.append(tf.logical_and(tf.less(global_step, self.steps[ind + 1]),
tf.greater_equal(global_step, self.steps[ind])))
cond_last = tf.greater_equal(global_step, self.steps[-1])
cond_full = [cond_first]
cond_full.extend(cond_between)
cond_full.append(cond_last)
cond_vec = tf.stack(cond_full)
lr_vec = tf.stack(self.values)
learning_rate = tf.where(cond_vec, lr_vec, tf.zeros_like(lr_vec))
learning_rate = tf.reduce_sum(learning_rate)
return learning_rate
def crop_image_from_xy(image, crop_location, crop_size, scale=1.0):
"""
Crops an image. When factor is not given does an central crop.
Inputs:
image: 4D tensor, [batch, height, width, channels] which will be cropped in height and width dimension
crop_location: tensor, [batch, 2] which represent the height and width location of the crop
crop_size: int, describes the extension of the crop
Outputs:
image_crop: 4D tensor, [batch, crop_size, crop_size, channels]
"""
with tf.name_scope('crop_image_from_xy'):
s = image.get_shape().as_list()
assert len(s) == 4, "Image needs to be of shape [batch, width, height, channel]"
scale = tf.reshape(scale, [-1])
crop_location = tf.cast(crop_location, tf.float32)
crop_location = tf.reshape(crop_location, [s[0], 2])
crop_size = tf.cast(crop_size, tf.float32)
crop_size_scaled = crop_size / scale
y1 = crop_location[:, 0] - crop_size_scaled // 2
y2 = y1 + crop_size_scaled
x1 = crop_location[:, 1] - crop_size_scaled // 2
x2 = x1 + crop_size_scaled
y1 /= s[1]
y2 /= s[1]
x1 /= s[2]
x2 /= s[2]
boxes = tf.stack([y1, x1, y2, x2], -1)
crop_size = tf.cast(tf.stack([crop_size, crop_size]), tf.int32)
box_ind = tf.range(s[0])
image_c = tf.image.crop_and_resize(tf.cast(image, tf.float32), boxes, box_ind, crop_size, name='crop')
return image_c
def detect_keypoints2d(scoremaps):
""" Performs detection per scoremap for the hands keypoints. """
if len(scoremaps.shape) == 4:
scoremaps = np.squeeze(scoremaps)
s = scoremaps.shape
assert len(s) == 3, "This function was only designed for 3D Scoremaps."
assert (s[2] < s[1]) and (s[2] < s[0]), "Probably the input is not correct, because [H, W, C] is expected."
keypoint_uv = np.zeros((s[2], 2))
for i in range(s[2]):
v, u = np.unravel_index(np.argmax(scoremaps[:, :, i]), (s[0], s[1]))
keypoint_uv[i, 0] = u
keypoint_uv[i, 1] = v
return keypoint_uv
def detect_keypoints3d(scoremaps):
""" Performs detection per scoremap for the hands keypoints. """
if len(scoremaps.shape) == 5:
scoremaps = np.squeeze(scoremaps)
s = scoremaps.shape
assert len(s) == 4, "This function was only designed for 3D Scoremaps."
assert (s[3] < s[2]) and (s[3] < s[1]) and (s[3] < s[0]), "Probably the input is not correct, because [D, H, W, C] is expected."
keypoint_coords = np.zeros((s[3], 3))
for i in range(s[3]):
z, y, x = np.unravel_index(np.argmax(scoremaps[:, :, :, i]), (s[0], s[1], s[2]))
keypoint_coords[i, 0] = x
keypoint_coords[i, 1] = y
keypoint_coords[i, 2] = z
return keypoint_coords
def plot2d(ax, keypoint, type_str='body', valid_idx=None, color='red', s=10):
assert len(keypoint.shape) == 2 and keypoint.shape[1] == 2
if valid_idx is not None:
plot_point = keypoint[valid_idx, :]
else:
plot_point = keypoint
ax.scatter(plot_point[:, 0], plot_point[:, 1], c=color, s=s)
for conn in connMat[type_str]:
coord1 = keypoint[conn[0]]
coord2 = keypoint[conn[1]]
if valid_idx is not None and (not valid_idx[conn[0]] or not valid_idx[conn[1]]):
continue
coords = np.vstack([coord1, coord2])
ax.plot(coords[:, 0], coords[:, 1], c=color)
def plot2d_cv2(img, keypoint, type_str='body', valid_idx=None, s=10, use_color=False):
assert len(keypoint.shape) == 2 and keypoint.shape[1] == 2
if valid_idx is not None:
plot_point = keypoint[valid_idx, :]
else:
plot_point = keypoint
for i, kp in enumerate(plot_point):
x = int(kp[0])
y = int(kp[1])
if x == 0 and y == 0:
continue
if not use_color:
cv2.circle(img, (x, y), s, (255, 0, 0), -1)
else:
if i <= 4:
color = (255, 0, 0)
elif i <= 8:
color = (0, 255, 0)
elif i <= 12:
color = (0, 0, 255)
elif i <= 16:
color = (255, 255, 0)
else:
color = (0, 255, 255)
cv2.circle(img, (x, y), s, color, -1)
for i, conn in enumerate(connMat[type_str]):
coord1 = keypoint[conn[0]]
coord2 = keypoint[conn[1]]
if valid_idx is not None and (not valid_idx[conn[0]] or not valid_idx[conn[1]]):
continue
pt1 = (int(coord1[0]), int(coord1[1]))
pt2 = (int(coord2[0]), int(coord2[1]))
if (pt1[0] == 0 and pt1[1] == 0) or (pt2[0] == 0 and pt2[1] == 0):
continue
if not use_color:
cv2.line(img, pt1, pt2, (255, 0, 0), int(s / 2))
else:
if i < 4:
color = (255, 0, 0)
elif i < 8:
color = (0, 255, 0)
elif i < 12:
color = (0, 0, 255)
elif i < 16:
color = (255, 255, 0)
else:
color = (0, 255, 255)
cv2.line(img, pt1, pt2, color, int(s / 2))
def plot3d(ax, keypoint, type_str='body', valid_idx=None, color='red'):
assert len(keypoint.shape) == 2 and keypoint.shape[1] == 3
if valid_idx is not None:
plot_point = keypoint[valid_idx, :]
else:
plot_point = keypoint
ax.scatter(plot_point[:, 0], plot_point[:, 1], plot_point[:, 2], c=color)
for conn in connMat[type_str]:
coord1 = keypoint[conn[0]]
coord2 = keypoint[conn[1]]
if valid_idx is not None and (not valid_idx[conn[0]] or not valid_idx[conn[1]]):
continue
coords = np.vstack([coord1, coord2])
ax.plot(coords[:, 0], coords[:, 1], coords[:, 2], c=color)
def h36LimbLength(keypoint):
assert keypoint.shape == (17, 3)
connections = np.array([[0, 1], [0, 4], [0, 7], [1, 2], [2, 3], [4, 5], [5, 6], [7, 8], [8, 9], [8, 11], [8, 14], [9, 10], [11, 12], [12, 13], [14, 15], [15, 16]])
Ls = []
for conn in connections:
L = nl.norm(keypoint[conn[0]] - keypoint[conn[1]])
Ls.append(L)
return np.array(Ls, dtype=np.float32)
| body2hands-main | visualization/POF/utils/general.py |
import numpy as np
import numpy.linalg as nl
from utils.general import connMat
a4_to_main = {
'body': np.array([1, 0, 9, 10, 11, 3, 4, 5, 12, 13, 14, 6, 7, 8, 17, 15, 18, 16, 19, 20], dtype=np.int64), # convert to order of openpose
'1_body': np.array([1, 0, 9, 10, 11, 3, 4, 5, 12, 13, 14, 6, 7, 8, 17, 15, 18, 16, 19, 20], dtype=np.int64), # convert to order of openpose
'2_body': np.array([1, 0, 9, 10, 11, 3, 4, 5, 12, 13, 14, 6, 7, 8, 17, 15, 18, 16, 19, 20], dtype=np.int64), # convert to order of openpose
'left_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'1_left_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'2_left_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'right_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'1_right_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'2_right_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'openpose_lhand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'openpose_rhand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'openpose_lhand_score': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'openpose_rhand_score': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
}
human36m_to_main = {
'body': np.array([9, 8, 14, 15, 16, 11, 12, 13, 4, 5, 6, 1, 2, 3, 17, 17, 17, 17, 10, 17], dtype=np.int64)
}
mpi3d_to_main = {
'body': np.array([6, 5, 14, 15, 16, 9, 10, 11, 23, 24, 25, 18, 19, 20, 28, 28, 28, 28, 7], dtype=np.int64)
}
adam_to_main = {
'body': np.array([12, 17, 19, 21, 16, 18, 20, 2, 5, 8, 1, 4, 7], dtype=np.int64),
'select_body_main': np.arange(1, 14, dtype=np.int64)
}
COCO_to_main = {
'body': np.array([0, 17, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 19], dtype=np.int64),
'body_valid': np.array([0, 17, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 19], dtype=np.int64),
'all_body': np.array([0, 17, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 19], dtype=np.int64),
'all_body_valid': np.array([0, 17, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 19], dtype=np.int64)
}
SMPL_to_main = { # actually COCOPLUS regressor to main
'body': np.array([14, 12, 8, 7, 6, 9, 10, 11, 2, 1, 0, 3, 4, 5, 16, 15, 18, 17, 13], dtype=np.int64)
}
STB_to_main = {
'left_hand': np.array([0, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1], dtype=np.int64)
}
MPII_to_main = {
'body': np.array([16, 8, 12, 11, 10, 13, 14, 15, 2, 1, 0, 3, 4, 5, 16, 16, 16, 16, 9], dtype=np.int64),
'body_valid': np.array([16, 8, 12, 11, 10, 13, 14, 15, 2, 1, 0, 3, 4, 5, 16, 16, 16, 16, 9], dtype=np.int64)
}
tsimon_to_main = {
'left_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'right_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'left_hand_valid': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'right_hand_valid': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
}
GAnerated_to_main = {
'left_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'left_hand_valid': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'left_hand_3d': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'right_hand': np.arange(21, dtype=np.int64),
'right_hand_valid': np.arange(21, dtype=np.int64),
'right_hand_3d': np.arange(21, dtype=np.int64)
}
std_body_size = 267.807
std_hand_size = (82.2705 + 79.8843) / 2
def compute_size(joint3d, type_str):
""" use this to compute size for scaling: joints are in main order.
"""
length = 0.0
for ic, conn in enumerate(connMat[type_str]):
if type_str == 'body':
if ic in (2, 3, 5, 6, 8, 9, 11, 12):
length += nl.norm(joint3d[conn[0]] - joint3d[conn[1]])
else:
assert type_str == 'hand'
length += nl.norm(joint3d[conn[0]] - joint3d[conn[1]])
return length
def main_to_a4(joint):
assert joint.shape[0] == 20
output = np.zeros((21, joint.shape[1]), dtype=joint.dtype)
for io, ic in enumerate(a4_to_main['body']):
output[ic, :] = joint[io, :]
output[2, :] = (output[6, :] + output[12, :]) / 2
return output
def main_to_a4_hand(joint):
assert joint.shape[0] == 21
output = np.zeros(joint.shape, dtype=joint.dtype)
output[0] = joint[0]
for i in (1, 5, 9, 13, 17):
output[i:i + 4] = joint[i + 3:i - 1:-1]
return output
def assemble_total_3d(body, lhand, rhand):
len_b = compute_size(body, 'body')
if len_b > 0:
sbody = (std_body_size / len_b) * body
else:
sbody = body
len_l = compute_size(lhand, 'hand')
if len_l > 0:
slhand = (std_hand_size / len_l) * lhand
else:
slhand = lhand
len_r = compute_size(rhand, 'hand')
if len_r > 0:
srhand = (std_hand_size / len_r) * rhand
else:
srhand = rhand
sbody = main_to_a4(sbody)
slhand = main_to_a4_hand(slhand)
srhand = main_to_a4_hand(srhand)
slhand_invalid = (slhand[:, 0] == 0) * (slhand[:, 1] == 0) * (slhand[:, 2] == 0)
srhand_invalid = (srhand[:, 0] == 0) * (srhand[:, 1] == 0) * (srhand[:, 2] == 0)
if not slhand[0].any():
slhand_invalid[:] = True
if not srhand[0].any():
srhand_invalid[:] = True
lhand_idx_a4 = 5
rhand_idx_a4 = 11
shift_lhand = sbody[lhand_idx_a4] - slhand[0]
shift_rhand = sbody[rhand_idx_a4] - srhand[0]
slhand += shift_lhand
srhand += shift_rhand
slhand[slhand_invalid] = 0
srhand[srhand_invalid] = 0
return np.concatenate([sbody, slhand, srhand], axis=0), std_body_size / len_b
def assemble_total_2d(body_2d, lhand_2d, rhand_2d):
keypoint_list = []
for i, item in enumerate((body_2d, lhand_2d, rhand_2d)):
keypoint = item['uv_local']
keypoint = (keypoint - 184) / item['scale2d'] + item['crop_center2d']
valid = item['valid']
keypoint = keypoint * np.stack([valid, valid], axis=1) # remove those invalid values
if i == 0:
keypoint = main_to_a4(keypoint)
else:
keypoint = main_to_a4_hand(keypoint)
keypoint_list.append(keypoint)
ret = np.concatenate(keypoint_list, axis=0)
ret[np.isnan(ret)] = 0.0 # nan when the whole joint is zero
return ret
def main_to_human36m(joint):
# except 9, 10 in human36m
out = np.zeros((17, 3), dtype=joint.dtype)
for im, ih in enumerate(human36m_to_main['body']):
if ih == 17: # virtual zero joint
continue
out[ih] = np.copy(joint[im, :])
out[0] = (out[1] + out[4]) / 2 # middle hip
out[7] = (out[1] + out[4] + out[11] + out[14]) / 4 # abdomen (average of l/r hip, l/r shoulder)
return out
| body2hands-main | visualization/POF/utils/keypoint_conversion.py |
import tensorflow as tf
from utils.ops import NetworkOps
import numpy as np
ops = NetworkOps
class CPM(object):
# The original CPM: set input image to right hand, BGR channel order (OpenCV), image scale to x / 256.0 - 0.5, output channel number to 22 (the last one for background)
def __init__(self, crop_size=256, out_chan=21, withPAF=False, PAFdim=2, numPAF=19, numStage=5, input_chan=3):
self.name = 'CPM'
self.out_chan = out_chan
self.crop_size = crop_size
self.withPAF = withPAF
self.PAFdim = PAFdim
self.numPAF = numPAF
self.numStage = numStage
def init(self, weight_path, sess):
with tf.variable_scope("CPM"):
data_dict = np.load(weight_path, encoding='latin1').item()
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in data_dict[op_name].items():
var = tf.get_variable(param_name)
sess.run(var.assign(data))
print('Finish loading weight from {}'.format(weight_path))
def init_pickle(self, session, weight_files=None, exclude_var_list=None):
""" Initializes weights from pickled python dictionaries.
Inputs:
session: tf.Session, Tensorflow session object containing the network graph
weight_files: list of str, Paths to the pickle files that are used to initialize network weights
exclude_var_list: list of str, Weights that should not be loaded
"""
if exclude_var_list is None:
exclude_var_list = list()
import pickle
import os
# Initialize with weights
for file_name in weight_files:
assert os.path.exists(file_name), "File not found."
with open(file_name, 'rb') as fi:
weight_dict = pickle.load(fi)
weight_dict = {k: v for k, v in weight_dict.items() if not any([x in k for x in exclude_var_list])}
if len(weight_dict) > 0:
init_op, init_feed = tf.contrib.framework.assign_from_values(weight_dict)
session.run(init_op, init_feed)
print('Loaded %d variables from %s' % (len(weight_dict), file_name))
def init_vgg(self, sess, weight_path='./weights/vgg16.npy'):
print('initialize from ImageNet pretrained VGG')
with tf.variable_scope("CPM"):
data_dict = np.load(weight_path, encoding='latin1').item()
for op_name in data_dict:
if not op_name.startswith("conv") or op_name == 'conv5_3':
continue
with tf.variable_scope(op_name, reuse=True):
assert len(data_dict[op_name]) == 2
for data in data_dict[op_name]:
try:
if data.ndim == 4:
var = tf.get_variable('weights')
elif data.ndim == 1:
var = tf.get_variable('biases')
else:
raise Exception
sess.run(var.assign(data))
except Exception:
print('Fail to load {}'.format(op_name))
print('Finish loading weight from {}'.format(weight_path))
def inference(self, input_image, train=False):
with tf.variable_scope("CPM"):
s = input_image.get_shape().as_list()
assert s[1] == self.crop_size and s[2] == self.crop_size
layers_per_block = [2, 2, 4, 2]
out_chan_list = [64, 128, 256, 512]
pool_list = [True, True, True, False]
# conv1_1 ~ conv4_4
x = input_image
for block_id, (layer_num, chan_num, pool) in enumerate(zip(layers_per_block, out_chan_list, pool_list), 1):
for layer_id in range(layer_num):
x = ops.conv_relu(x, 'conv%d_%d' % (block_id, layer_id + 1), kernel_size=3, stride=1, out_chan=chan_num, leaky=False, trainable=train)
if pool:
x = ops.max_pool(x, 'pool%d' % block_id)
PAF = []
if not self.withPAF: # openpose hand net
x = ops.conv_relu(x, 'conv4_3', kernel_size=3, stride=1, out_chan=512, leaky=False, trainable=train)
x = ops.conv_relu(x, 'conv4_4', kernel_size=3, stride=1, out_chan=512, leaky=False, trainable=train)
x = ops.conv_relu(x, 'conv5_1', kernel_size=3, stride=1, out_chan=512, leaky=False, trainable=train)
x = ops.conv_relu(x, 'conv5_2', kernel_size=3, stride=1, out_chan=512, leaky=False, trainable=train)
conv_feature = ops.conv_relu(x, 'conv5_3_CPM', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train)
x = ops.conv_relu(conv_feature, 'conv6_1_CPM', kernel_size=1, stride=1, out_chan=512, leaky=False, trainable=train)
x = ops.conv(x, 'conv6_2_CPM', kernel_size=1, stride=1, out_chan=self.out_chan, trainable=train)
scoremaps = [x]
for stage_id in range(2, 7):
x = tf.concat([x, conv_feature], axis=3, name='concat_stage{}'.format(stage_id))
for layer_id in range(1, 6):
x = ops.conv_relu(x, 'Mconv{}_stage{}'.format(layer_id, stage_id), kernel_size=7, stride=1, out_chan=128, leaky=False, trainable=train)
x = ops.conv_relu(x, 'Mconv6_stage{}'.format(stage_id), kernel_size=1, stride=1, out_chan=128, leaky=False, trainable=train)
x = ops.conv(x, 'Mconv7_stage{}'.format(stage_id), kernel_size=1, stride=1, out_chan=self.out_chan, trainable=train)
scoremaps.append(x)
else: # with PAF (openpose body net)
x = ops.conv_relu(x, 'conv4_3_CPM', kernel_size=3, stride=1, out_chan=256, leaky=False, trainable=train)
conv_feature = ops.conv_relu(x, 'conv4_4_CPM', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train)
x1 = ops.conv_relu(conv_feature, 'conv5_1_CPM_L1', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train)
x1 = ops.conv_relu(x1, 'conv5_2_CPM_L1', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train)
x1 = ops.conv_relu(x1, 'conv5_3_CPM_L1', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train)
x1 = ops.conv_relu(x1, 'conv5_4_CPM_L1', kernel_size=1, stride=1, out_chan=512, leaky=False, trainable=train)
x1 = ops.conv(x1, 'conv5_5_CPM_L1', kernel_size=1, stride=1, out_chan=self.PAFdim * self.numPAF, trainable=train)
x2 = ops.conv_relu(conv_feature, 'conv5_1_CPM_L2', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train)
x2 = ops.conv_relu(x2, 'conv5_2_CPM_L2', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train)
x2 = ops.conv_relu(x2, 'conv5_3_CPM_L2', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train)
x2 = ops.conv_relu(x2, 'conv5_4_CPM_L2', kernel_size=1, stride=1, out_chan=512, leaky=False, trainable=train)
x2 = ops.conv(x2, 'conv5_5_CPM_L2', kernel_size=1, stride=1, out_chan=self.out_chan, trainable=train)
scoremaps = [x2]
PAF.append(x1)
for stage_id in range(2, 2 + self.numStage):
x = tf.concat([x1, x2, conv_feature], axis=3, name='concat_stage{}'.format(stage_id))
x1 = ops.conv_relu(x, 'Mconv{}_stage{}_L1'.format(1, stage_id), kernel_size=7, stride=1, out_chan=128, leaky=False, trainable=train)
x2 = ops.conv_relu(x, 'Mconv{}_stage{}_L2'.format(1, stage_id), kernel_size=7, stride=1, out_chan=128, leaky=False, trainable=train)
for layer_id in range(2, 6):
x1 = ops.conv_relu(x1, 'Mconv{}_stage{}_L1'.format(layer_id, stage_id), kernel_size=7, stride=1, out_chan=128, leaky=False, trainable=train)
x2 = ops.conv_relu(x2, 'Mconv{}_stage{}_L2'.format(layer_id, stage_id), kernel_size=7, stride=1, out_chan=128, leaky=False, trainable=train)
x1 = ops.conv_relu(x1, 'Mconv6_stage{}_L1'.format(stage_id), kernel_size=1, stride=1, out_chan=128, leaky=False, trainable=train)
x2 = ops.conv_relu(x2, 'Mconv6_stage{}_L2'.format(stage_id), kernel_size=1, stride=1, out_chan=128, leaky=False, trainable=train)
x1 = ops.conv(x1, 'Mconv7_stage{}_L1'.format(stage_id), kernel_size=1, stride=1, out_chan=self.PAFdim * self.numPAF, trainable=train)
x2 = ops.conv(x2, 'Mconv7_stage{}_L2'.format(stage_id), kernel_size=1, stride=1, out_chan=self.out_chan, trainable=train)
scoremaps.append(x2)
PAF.append(x1)
return scoremaps, conv_feature, PAF
| body2hands-main | visualization/POF/nets/CPM.py |
import tensorflow as tf
from data.BaseReader import BaseReader
import numpy as np
class TempConstReader(BaseReader):
crop_scale_noise_sigma = 0.1
crop_offset_noise_sigma = 0.1
def __init__(self, objtype=0, shuffle=False, batch_size=1, crop_noise=False):
super(TempConstReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
assert objtype in (0, 1), "This data reader only support single body / hands"
def get(self, withPAF=True, read_image=True, imw=1920, imh=1080):
# input to this data reader should have two consecutive frames
# produce data from slice_input_producer
flow_list = tf.train.slice_input_producer(list(self.tensor_dict.values()), shuffle=self.shuffle)
flow_dict = {key: flow_list[ik] for ik, key in enumerate(self.tensor_dict.keys())}
# build data dictionary
data_dict = {}
data_dict['1_img_dir'] = flow_dict['1_img_dirs']
data_dict['2_img_dir'] = flow_dict['2_img_dirs']
data_dict['1_K'] = flow_dict['1_K']
data_dict['1_K'] = flow_dict['2_K']
# rotate and project to camera frame
if self.objtype == 0:
body2d_1, body3d_1 = self.project_tf(flow_dict['1_body'], flow_dict['1_K'], flow_dict['1_R'], flow_dict['1_t'], flow_dict['1_distCoef'])
body2d_2, body3d_2 = self.project_tf(flow_dict['2_body'], flow_dict['2_K'], flow_dict['2_R'], flow_dict['2_t'], flow_dict['2_distCoef'])
body3d_1 = tf.cast(body3d_1, tf.float32)
body3d_2 = tf.cast(body3d_2, tf.float32)
body2d_1 = tf.cast(body2d_1, tf.float32)
body2d_2 = tf.cast(body2d_2, tf.float32)
data_dict['1_keypoint_xyz_origin'] = body3d_1
data_dict['2_keypoint_xyz_origin'] = body3d_2
data_dict['1_keypoint_uv_origin'] = body2d_1
data_dict['2_keypoint_uv_origin'] = body2d_2
data_dict['1_body_valid'] = flow_dict['1_body_valid']
data_dict['2_body_valid'] = flow_dict['2_body_valid']
elif self.objtype == 1:
cond_left = tf.reduce_any(tf.cast(flow_dict['left_hand_valid'], dtype=tf.bool)) # 0 for right hand, 1 for left hand
hand3d_1 = tf.cond(cond_left, lambda: flow_dict['1_left_hand'], lambda: flow_dict['1_right_hand']) # in world coordinate
hand3d_2 = tf.cond(cond_left, lambda: flow_dict['2_left_hand'], lambda: flow_dict['2_right_hand']) # in world coordinate
hand2d_1, hand3d_1 = self.project_tf(hand3d_1, flow_dict['1_K'], flow_dict['1_R'], flow_dict['1_t'], flow_dict['1_distCoef']) # in camera coordinate
hand2d_2, hand3d_2 = self.project_tf(hand3d_2, flow_dict['2_K'], flow_dict['2_R'], flow_dict['2_t'], flow_dict['2_distCoef']) # in camera coordinate
hand3d_1 = tf.cast(hand3d_1, tf.float32)
hand3d_2 = tf.cast(hand3d_2, tf.float32)
hand2d_1 = tf.cast(hand2d_1, tf.float32)
hand2d_2 = tf.cast(hand2d_2, tf.float32)
data_dict['1_keypoint_xyz_origin'] = hand3d_1
data_dict['2_keypoint_xyz_origin'] = hand3d_2
data_dict['1_keypoint_uv_origin'] = hand2d_1
data_dict['2_keypoint_uv_origin'] = hand2d_2
data_dict['cond_left'] = cond_left
data_dict['left_hand_valid'] = flow_dict['left_hand_valid']
data_dict['right_hand_valid'] = flow_dict['right_hand_valid']
# read image
if read_image:
img_file_1 = tf.read_file(flow_dict['1_img_dirs'])
img_file_2 = tf.read_file(flow_dict['2_img_dirs'])
image_1 = tf.image.decode_image(img_file_1, channels=3)
image_2 = tf.image.decode_image(img_file_2, channels=3)
image_1 = tf.image.pad_to_bounding_box(image_1, 0, 0, imh, imw)
image_2 = tf.image.pad_to_bounding_box(image_2, 0, 0, imh, imw)
image_1.set_shape((imh, imw, 3))
image_2.set_shape((imh, imw, 3))
image_1 = tf.cast(image_1, tf.float32) / 255.0 - 0.5
image_2 = tf.cast(image_2, tf.float32) / 255.0 - 0.5
data_dict['1_image'] = image_1
data_dict['2_image'] = image_2
if 'mask_dirs_1' in flow_dict:
assert 'mask_dirs_2' in flow_dict
mask_file_1 = tf.read_file(flow_dict['1_mask_dirs'])
mask_file_2 = tf.read_file(flow_dict['2_mask_dirs'])
mask_1 = tf.image.decode_image(mask_file_1, channels=3)
mask_2 = tf.image.decode_image(mask_file_2, channels=3)
mask_1 = tf.image.pad_to_bounding_box(mask_1, 0, 0, imh, imw)
mask_2 = tf.image.pad_to_bounding_box(mask_2, 0, 0, imh, imw)
mask_1.set_shape((imh, imw, 3))
mask_2.set_shape((imh, imw, 3))
mask_1 = mask_1[:, :, 0]
mask_2 = mask_2[:, :, 0]
mask_1 = tf.cast(mask_1, tf.float32)
mask_2 = tf.cast(mask_2, tf.float32)
else:
mask_1 = tf.ones((imh, imw), dtype=tf.float32)
mask_2 = tf.ones((imh, imw), dtype=tf.float32)
data_dict['1_mask'] = mask_1
data_dict['2_mask'] = mask_2
# calculate crop size
if self.objtype in (0, 1):
if self.objtype == 0:
keypoints_1 = body3d_1
keypoints_2 = body3d_2
valid_1 = flow_dict['1_body_valid']
valid_2 = flow_dict['2_body_valid']
elif self.objtype == 1:
keypoints_1 = hand3d_1
keypoints_2 = hand3d_2
valid_1 = tf.cond(cond_left, lambda: flow_dict['left_hand_valid'], lambda: flow_dict['right_hand_valid'])
valid_2 = tf.cond(cond_left, lambda: flow_dict['left_hand_valid'], lambda: flow_dict['right_hand_valid'])
data_dict['1_hand_valid'] = valid_1
data_dict['2_hand_valid'] = valid_2
crop_center3d_1, scale3d_1, crop_center2d_1, scale2d_1, crop_center3d_2, scale3d_2, crop_center2d_2, scale2d_2 = \
self.calc_crop_scale_temp_const(keypoints_1, flow_dict['1_K'], flow_dict['1_distCoef'], valid_1, keypoints_2, flow_dict['2_K'], flow_dict['2_distCoef'], valid_2)
data_dict['1_crop_center2d'], data_dict['1_scale2d'] = crop_center2d_1, scale2d_1
data_dict['2_crop_center2d'], data_dict['2_scale2d'] = crop_center2d_2, scale2d_2
data_dict['1_crop_center3d'], data_dict['1_scale3d'] = crop_center3d_1, scale3d_1
data_dict['2_crop_center3d'], data_dict['2_scale3d'] = crop_center3d_2, scale3d_2
# do cropping
if self.objtype == 1:
body2d_1 = hand2d_1
body2d_2 = hand2d_2
body3d_1 = hand3d_1
body3d_2 = hand3d_2
if self.rotate_augmentation:
print('using rotation augmentation')
rotate_angle_1 = tf.random_uniform([], minval=-np.pi * 40 / 180, maxval=np.pi * 40 / 180)
else:
rotate_angle_1 = 0.0
rotate_angle_2 = tf.random_uniform([], minval=-np.pi * 5 / 180, maxval=np.pi * 5 / 180) + rotate_angle_1
R2_1 = tf.reshape(tf.stack([tf.cos(rotate_angle_1), -tf.sin(rotate_angle_1), tf.sin(rotate_angle_1), tf.cos(rotate_angle_1)]), [2, 2])
R2_2 = tf.reshape(tf.stack([tf.cos(rotate_angle_2), -tf.sin(rotate_angle_2), tf.sin(rotate_angle_2), tf.cos(rotate_angle_2)]), [2, 2])
R3_1 = tf.reshape(tf.stack([tf.cos(rotate_angle_1), -tf.sin(rotate_angle_1), 0, tf.sin(rotate_angle_1), tf.cos(rotate_angle_1), 0, 0, 0, 1]), [3, 3])
R3_2 = tf.reshape(tf.stack([tf.cos(rotate_angle_2), -tf.sin(rotate_angle_2), 0, tf.sin(rotate_angle_2), tf.cos(rotate_angle_2), 0, 0, 0, 1]), [3, 3])
body2d_1 = tf.matmul((body2d_1 - crop_center2d_1), R2_1) + crop_center2d_1
body2d_2 = tf.matmul((body2d_2 - crop_center2d_2), R2_2) + crop_center2d_2
body3d_1 = tf.matmul((body3d_1 - crop_center3d_1), R3_1) + crop_center3d_1
body3d_2 = tf.matmul((body3d_2 - crop_center3d_2), R3_2) + crop_center3d_2
data_dict['1_keypoint_xyz_origin'] = body3d_1 # note that the projection of 3D might not be aligned with 2D any more after rotation
data_dict['2_keypoint_xyz_origin'] = body3d_2 # note that the projection of 3D might not be aligned with 2D any more after rotation
data_dict['1_keypoint_uv_origin'] = body2d_1
data_dict['2_keypoint_uv_origin'] = body2d_2
body2d_local_1 = self.update_keypoint2d(body2d_1, crop_center2d_1, scale2d_1)
body2d_local_2 = self.update_keypoint2d(body2d_2, crop_center2d_2, scale2d_2)
data_dict['1_keypoint_uv_local'] = body2d_local_1
data_dict['2_keypoint_uv_local'] = body2d_local_2
if read_image:
image_crop_1 = self.crop_image(image_1, crop_center2d_1, scale2d_1)
image_crop_2 = self.crop_image(image_2, crop_center2d_2, scale2d_2)
data_dict['1_image_crop'] = image_crop_1
data_dict['2_image_crop'] = image_crop_2
mask_crop_1 = self.crop_image(tf.stack([mask_1] * 3, axis=2), crop_center2d_1, scale2d_1)
mask_crop_2 = self.crop_image(tf.stack([mask_2] * 3, axis=2), crop_center2d_2, scale2d_2)
data_dict['1_mask_crop'] = mask_crop_1[:, :, 0]
data_dict['2_mask_crop'] = mask_crop_2[:, :, 0]
data_dict['1_image_crop'] = tf.contrib.image.rotate(data_dict['1_image_crop'], rotate_angle_1)
data_dict['2_image_crop'] = tf.contrib.image.rotate(data_dict['2_image_crop'], rotate_angle_2)
data_dict['1_mask_crop'] = tf.contrib.image.rotate(data_dict['1_mask_crop'], rotate_angle_1)
data_dict['2_mask_crop'] = tf.contrib.image.rotate(data_dict['2_mask_crop'], rotate_angle_2)
if self.blur_augmentation:
print('using blur augmentation')
rescale_factor = tf.random_uniform([], minval=0.1, maxval=1.0)
rescale = tf.cast(rescale_factor * self.crop_size, tf.int32)
resized_image_1 = tf.image.resize_images(data_dict['1_image_crop'], [rescale, rescale])
resized_image_2 = tf.image.resize_images(data_dict['2_image_crop'], [rescale, rescale])
data_dict['1_image_crop'] = tf.image.resize_images(resized_image_1, [self.crop_size, self.crop_size])
data_dict['2_image_crop'] = tf.image.resize_images(resized_image_2, [self.crop_size, self.crop_size])
# create 2D gaussian map
scoremap2d_1 = self.create_multiple_gaussian_map(body2d_local_1[:, ::-1], (self.crop_size, self.crop_size), self.sigma, valid_vec=valid_1, extra=True) # coord_hw, imsize_hw
scoremap2d_2 = self.create_multiple_gaussian_map(body2d_local_2[:, ::-1], (self.crop_size, self.crop_size), self.sigma, valid_vec=valid_2, extra=True) # coord_hw, imsize_hw
data_dict['1_scoremap2d'] = scoremap2d_1
data_dict['2_scoremap2d'] = scoremap2d_2
if withPAF:
from utils.PAF import createPAF
data_dict['1_PAF'] = createPAF(body2d_local_1, body3d_1, self.objtype, (self.crop_size, self.crop_size), True, valid_vec=valid_1)
data_dict['2_PAF'] = createPAF(body2d_local_2, body3d_2, self.objtype, (self.crop_size, self.crop_size), True, valid_vec=valid_2)
data_dict['1_PAF_type'] = tf.ones([], dtype=bool) # 0 for 2D PAF, 1 for 3D PAF
data_dict['2_PAF_type'] = tf.ones([], dtype=bool) # 0 for 2D PAF, 1 for 3D PAF
# create 3D gaussian_map
body3d_local_1 = self.update_keypoint3d(body3d_1, crop_center3d_1, scale3d_1)
body3d_local_2 = self.update_keypoint3d(body3d_2, crop_center3d_2, scale3d_2)
data_dict['1_keypoint_xyz_local'] = body3d_local_1
data_dict['2_keypoint_xyz_local'] = body3d_local_2
# scoremap3d = self.create_multiple_gaussian_map_3d(body3d_local, self.grid_size, self.sigma3d, valid_vec=valid, extra=True)
# data_dict['1_scoremap3d'] = scoremap3d
if self.objtype == 1: # this is hand, flip the image if it is right hand
data_dict['1_image_crop'] = tf.cond(cond_left, lambda: data_dict['1_image_crop'], lambda: data_dict['1_image_crop'][:, ::-1, :])
data_dict['2_image_crop'] = tf.cond(cond_left, lambda: data_dict['2_image_crop'], lambda: data_dict['2_image_crop'][:, ::-1, :])
data_dict['1_mask_crop'] = tf.cond(cond_left, lambda: data_dict['1_mask_crop'], lambda: data_dict['1_mask_crop'][:, ::-1])
data_dict['2_mask_crop'] = tf.cond(cond_left, lambda: data_dict['2_mask_crop'], lambda: data_dict['2_mask_crop'][:, ::-1])
data_dict['1_scoremap2d'] = tf.cond(cond_left, lambda: data_dict['1_scoremap2d'], lambda: data_dict['1_scoremap2d'][:, ::-1, :])
data_dict['2_scoremap2d'] = tf.cond(cond_left, lambda: data_dict['2_scoremap2d'], lambda: data_dict['2_scoremap2d'][:, ::-1, :])
data_dict['1_keypoint_uv_local'] = tf.cond(cond_left, lambda: data_dict['1_keypoint_uv_local'],
lambda: tf.constant([self.crop_size, 0], tf.float32) + tf.constant([-1, 1], tf.float32) * data_dict['1_keypoint_uv_local'])
data_dict['2_keypoint_uv_local'] = tf.cond(cond_left, lambda: data_dict['2_keypoint_uv_local'],
lambda: tf.constant([self.crop_size, 0], tf.float32) + tf.constant([-1, 1], tf.float32) * data_dict['2_keypoint_uv_local'])
if withPAF:
data_dict['1_PAF'] = tf.cond(cond_left, lambda: data_dict['1_PAF'],
lambda: (data_dict['1_PAF'][:, ::-1, :]) * tf.constant([-1, 1, 1] * (data_dict['1_PAF'].get_shape().as_list()[2] // 3), dtype=tf.float32))
data_dict['2_PAF'] = tf.cond(cond_left, lambda: data_dict['2_PAF'],
lambda: (data_dict['2_PAF'][:, ::-1, :]) * tf.constant([-1, 1, 1] * (data_dict['2_PAF'].get_shape().as_list()[2] // 3), dtype=tf.float32))
names, tensors = zip(*data_dict.items())
if self.shuffle:
tensors = tf.train.shuffle_batch_join([tensors],
batch_size=self.batch_size,
capacity=100,
min_after_dequeue=20,
enqueue_many=False)
else:
tensors = tf.train.batch_join([tensors],
batch_size=self.batch_size,
capacity=20,
enqueue_many=False)
return dict(zip(names, tensors))
def calc_crop_scale_temp_const(self, keypoints_1, calibK_1, calibDC_1, valid_1, keypoints_2, calibK_2, calibDC_2, valid_2):
if self.objtype == 0:
keypoint_center_1 = (keypoints_1[8] + keypoints_1[11]) / 2
keypoint_center_2 = (keypoints_2[8] + keypoints_2[11]) / 2
center_valid_1 = tf.logical_and(valid_1[8], valid_1[11])
center_valid_2 = tf.logical_and(valid_2[8], valid_2[11])
elif self.objtype == 1:
keypoint_center_1 = keypoints_1[12]
keypoint_center_2 = keypoints_2[12]
center_valid_1 = valid_1[12]
center_valid_2 = valid_2[12]
else:
raise NotImplementedError
valid_idx_1 = tf.where(valid_1)[:, 0]
valid_idx_2 = tf.where(valid_2)[:, 0]
valid_keypoints_1 = tf.gather(keypoints_1, valid_idx_1, name='1_valid_keypoints')
valid_keypoints_2 = tf.gather(keypoints_2, valid_idx_2, name='2_valid_keypoints')
min_coord_1 = tf.reduce_min(valid_keypoints_1, 0, name='1_min_coord')
min_coord_2 = tf.reduce_min(valid_keypoints_2, 0, name='2_min_coord')
max_coord_1 = tf.reduce_max(valid_keypoints_1, 0, name='1_max_coord')
max_coord_2 = tf.reduce_max(valid_keypoints_2, 0, name='2_max_coord')
keypoint_center_1 = tf.cond(center_valid_1, lambda: keypoint_center_1, lambda: (min_coord_1 + max_coord_1) / 2)
keypoint_center_2 = tf.cond(center_valid_2, lambda: keypoint_center_2, lambda: (min_coord_2 + max_coord_2) / 2)
keypoint_center_1.set_shape((3,))
keypoint_center_2.set_shape((3,))
fit_size_1 = tf.reduce_max(tf.maximum(max_coord_1 - keypoint_center_1, keypoint_center_1 - min_coord_1))
fit_size_2 = tf.reduce_max(tf.maximum(max_coord_2 - keypoint_center_2, keypoint_center_2 - min_coord_2))
crop_scale_noise_1 = tf.cast(1.0, tf.float32)
if self.crop_noise:
crop_scale_noise_1 = tf.exp(tf.truncated_normal([], mean=0.0, stddev=self.crop_scale_noise_sigma))
crop_scale_noise_1 = tf.maximum(crop_scale_noise_1, tf.reciprocal(self.crop_size_zoom))
crop_scale_noise_2 = crop_scale_noise_1 + tf.truncated_normal([], mean=0.0, stddev=0.01)
crop_size_best_1 = tf.multiply(crop_scale_noise_1, 2 * fit_size_1 * self.crop_size_zoom, name='1_crop_size_best')
crop_size_best_2 = tf.multiply(crop_scale_noise_2, 2 * fit_size_2 * self.crop_size_zoom, name='2_crop_size_best')
crop_offset_noise_1 = tf.cast(0.0, tf.float32)
if self.crop_noise:
crop_offset_noise_1 = tf.truncated_normal([3], mean=0.0, stddev=self.crop_offset_noise_sigma) * fit_size_1 * tf.constant([1., 1., 0.], dtype=tf.float32)
crop_offset_noise_2 = tf.truncated_normal([3], mean=0.0, stddev=0.01) * fit_size_2 * tf.constant([1., 1., 0.], dtype=tf.float32) + crop_offset_noise_1
crop_offset_noise_1 = tf.maximum(crop_offset_noise_1, max_coord_1 + 1e-5 - crop_size_best_1 / 2 - keypoint_center_1)
crop_offset_noise_2 = tf.maximum(crop_offset_noise_2, max_coord_2 + 1e-5 - crop_size_best_2 / 2 - keypoint_center_2)
crop_offset_noise_1 = tf.minimum(crop_offset_noise_1, min_coord_1 - 1e-5 + crop_size_best_1 / 2 - keypoint_center_1, name='1_crop_offset_noise')
crop_offset_noise_2 = tf.minimum(crop_offset_noise_2, min_coord_2 - 1e-5 + crop_size_best_2 / 2 - keypoint_center_2, name='2_crop_offset_noise')
crop_center_1 = tf.add(keypoint_center_1, crop_offset_noise_1, name='1_crop_center')
crop_center_2 = tf.add(keypoint_center_2, crop_offset_noise_2, name='2_crop_center')
crop_box_bl_1 = tf.concat([crop_center_1[:2] - crop_size_best_1 / 2, crop_center_1[2:]], 0)
crop_box_bl_2 = tf.concat([crop_center_2[:2] - crop_size_best_2 / 2, crop_center_2[2:]], 0)
crop_box_ur_1 = tf.concat([crop_center_1[:2] + crop_size_best_1 / 2, crop_center_1[2:]], 0)
crop_box_ur_2 = tf.concat([crop_center_2[:2] + crop_size_best_2 / 2, crop_center_2[2:]], 0)
crop_box_1 = tf.stack([crop_box_bl_1, crop_box_ur_1], 0)
crop_box_2 = tf.stack([crop_box_bl_2, crop_box_ur_2], 0)
scale_1 = tf.cast(self.grid_size, tf.float32) / crop_size_best_1
scale_2 = tf.cast(self.grid_size, tf.float32) / crop_size_best_2
crop_box2d_1, _ = self.project_tf(crop_box_1, calibK_1, calibDistCoef=calibDC_1)
crop_box2d_2, _ = self.project_tf(crop_box_2, calibK_2, calibDistCoef=calibDC_2)
min_coord2d_1 = tf.reduce_min(crop_box2d_1, 0)
min_coord2d_2 = tf.reduce_min(crop_box2d_2, 0)
max_coord2d_1 = tf.reduce_max(crop_box2d_1, 0)
max_coord2d_2 = tf.reduce_max(crop_box2d_2, 0)
crop_size_best2d_1 = tf.reduce_max(max_coord2d_1 - min_coord2d_1)
crop_size_best2d_2 = tf.reduce_max(max_coord2d_2 - min_coord2d_2)
crop_center2d_1 = (min_coord2d_1 + max_coord2d_1) / 2
crop_center2d_2 = (min_coord2d_2 + max_coord2d_2) / 2
scale2d_1 = tf.cast(self.crop_size, tf.float32) / crop_size_best2d_1
scale2d_2 = tf.cast(self.crop_size, tf.float32) / crop_size_best2d_2
return crop_center_1, scale_1, crop_center2d_1, scale2d_1, crop_center_2, scale_2, crop_center2d_2, scale2d_2
@staticmethod
def convertToSingleFrameDataWithPrevGT(data_dict):
out_dict = {}
out_dict['scoremap2d'] = data_dict['2_scoremap2d']
if '2_hand_valid' in data_dict:
out_dict['hand_valid'] = data_dict['2_hand_valid']
elif '2_body_valid' in data_dict:
out_dict['body_valid'] = data_dict['2_body_valid']
out_dict['PAF'] = data_dict['2_PAF']
out_dict['PAF_type'] = data_dict['2_PAF_type']
out_dict['mask_crop'] = data_dict['2_mask_crop']
out_dict['image_crop'] = tf.concat([data_dict['2_image_crop'], data_dict['1_image_crop'], data_dict['1_scoremap2d'], data_dict['1_PAF']], axis=3)
return out_dict
@staticmethod
def convertToSingleFrameDataWithPrevOutput(data_dict):
out_dict = {}
out_dict['scoremap2d'] = data_dict['2_scoremap2d']
if '2_hand_valid' in data_dict:
out_dict['hand_valid'] = data_dict['2_hand_valid']
elif '2_body_valid' in data_dict:
out_dict['body_valid'] = data_dict['2_body_valid']
out_dict['PAF'] = data_dict['2_PAF']
out_dict['PAF_type'] = data_dict['2_PAF_type']
out_dict['mask_crop'] = data_dict['2_mask_crop']
out_dict['image_crop'] = tf.concat([data_dict['2_image_crop'], data_dict['1_image_crop']], axis=3)
out_dict['pre_input'] = data_dict['pre_input']
out_dict['temp_data'] = data_dict['temp_data']
return out_dict
| body2hands-main | visualization/POF/data/TempConstReader.py |
# Run this script with OpenCV2
import cv2
import numpy as np
import os
import json
source_dir = '/media/posefs3b/Users/gines/mpii_mask'
target_dir = '/media/posefs3b/Users/donglaix/mpii_mask'
if __name__ == '__main__':
path_to_db = './MPII_collected.json'
with open(path_to_db) as f:
db_data = json.load(f)
total_num = len(db_data['img_paths'])
for i in range(total_num):
print ('processing image {} / {}'.format(i, total_num))
bbox = np.array(db_data['bbox'][i], dtype=np.float32)
bbox_other = np.array(db_data['bbox_other'][i], dtype=np.float32).reshape(-1, 4)
x = (bbox[0] + bbox[2]) / 2
y = (bbox[1] + bbox[3]) / 2
img_path = db_data['img_paths'][i]
source_mask = os.path.join('/media/posefs3b/Users/gines/mpii_mask', img_path)
mask = cv2.imread(source_mask)
mask = (mask[:, :, 0] >= 128).astype(np.uint8) # the stored data are 0 ~ 255, convert to bool
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
belong = []
for cnt in contours:
x1 = np.amin(cnt[:, 0, 0])
x2 = np.amax(cnt[:, 0, 0])
y1 = np.amin(cnt[:, 0, 1])
y2 = np.amax(cnt[:, 0, 1])
if x < x1 or x > x2 or y < y1 or y > y2:
belong.append(False)
continue
# the center is inside this contour, now check the all other bounding boxes
xo = (bbox_other[:, 0] + bbox_other[:, 2]) / 2
yo = (bbox_other[:, 1] + bbox_other[:, 3]) / 2
if ((xo >= x1) * (xo <= x2) * (yo >= y1) * (yo <= y2)).any(): # the center of any other bounding boxes fall inside
belong.append(False)
else:
belong.append(True) # the center of current bbox is in and others are not in.
assert len(belong) == len(contours)
new_mask = np.ones(mask.shape, dtype=np.uint8)
cv2.drawContours(new_mask, [cnt for TF, cnt in zip(belong, contours) if not TF], -1, 0, -1)
cv2.imwrite(os.path.join(target_dir, '{:05d}.png'.format(i)), new_mask)
| body2hands-main | visualization/POF/data/process_MPII_mask.py |
import tensorflow as tf
import numpy as np
import utils.general
class BaseReader(object):
# BaseReader is a virual base class to be inherited by other data readers which provide data by calling register_tensor
crop_size_zoom = 1.5
crop_size_zoom_2d = 1.8
crop_size = 368
grid_size = crop_size // 8
sigma = 7
sigma3d = 3
rotate_augmentation = False
blur_augmentation = False
crop_scale_noise_sigma = 0.1
crop_offset_noise_sigma = 0.1
crop_scale_noise_sigma_2d = 0.1
crop_offset_noise_sigma_2d = 0.1
def __init__(self, objtype=0, shuffle=True, batch_size=1, crop_noise=False):
# objtype: 0 = body only, 1 = hand only, 2 = body and hands
assert objtype in (0, 1, 2)
self.objtype = objtype
self.shuffle = shuffle
self.batch_size = batch_size
self.crop_noise = crop_noise
def register_tensor(self, data_dict, order_dict):
l = [len(value) for value in data_dict.values() if len(value) > 0]
assert len(set(l)) == 1 # check the length of all data items to be consistent
print('loading dataset of size {}'.format(l[0]))
self.tensor_dict = {}
for key, value in data_dict.items():
if len(value) > 0:
value = np.array(value)
if key in order_dict:
self.tensor_dict[key] = self.switch_joint_order(value, order_dict[key])
else:
self.tensor_dict[key] = value
def get(self, withPAF=True, PAF_normalize3d=True, read_image=True, imw=1920, imh=1080, bbox2d=0):
# bbox2d: 0: computed from 3D bounding box, 1: compute from openpose
assert bbox2d in (0, 1)
assert type(withPAF) == bool
# produce data from slice_input_producer
flow_list = tf.train.slice_input_producer(list(self.tensor_dict.values()), shuffle=self.shuffle)
flow_dict = {key: flow_list[ik] for ik, key in enumerate(self.tensor_dict.keys())}
# build data dictionary
data_dict = {}
data_dict['img_dir'] = flow_dict['img_dirs']
data_dict['K'] = flow_dict['K']
# rotate and project to camera frame
if self.objtype == 0:
body2d, body3d = self.project_tf(flow_dict['body'], flow_dict['K'], flow_dict['R'], flow_dict['t'], flow_dict['distCoef'])
body3d = tf.cast(body3d, tf.float32)
body2d = tf.cast(body2d, tf.float32)
data_dict['keypoint_xyz_origin'] = body3d
data_dict['keypoint_uv_origin'] = body2d
data_dict['body_valid'] = flow_dict['body_valid']
elif self.objtype == 1:
cond_left = tf.reduce_any(tf.cast(flow_dict['left_hand_valid'], dtype=tf.bool)) # 0 for right hand, 1 for left hand
hand3d = tf.cond(cond_left, lambda: flow_dict['left_hand'], lambda: flow_dict['right_hand']) # in world coordinate
hand2d, hand3d = self.project_tf(hand3d, flow_dict['K'], flow_dict['R'], flow_dict['t'], flow_dict['distCoef']) # in camera coordinate
hand3d = tf.cast(hand3d, tf.float32)
hand2d = tf.cast(hand2d, tf.float32)
data_dict['keypoint_xyz_origin'] = hand3d
data_dict['keypoint_uv_origin'] = hand2d
data_dict['cond_left'] = cond_left
data_dict['left_hand_valid'] = flow_dict['left_hand_valid']
data_dict['right_hand_valid'] = flow_dict['right_hand_valid']
elif self.objtype == 2:
body2d, body3d = self.project_tf(flow_dict['body'], flow_dict['K'], flow_dict['R'], flow_dict['t'], flow_dict['distCoef'])
lhand2d, lhand3d = self.project_tf(flow_dict['left_hand'], flow_dict['K'], flow_dict['R'], flow_dict['t'], flow_dict['distCoef'])
rhand2d, rhand3d = self.project_tf(flow_dict['right_hand'], flow_dict['K'], flow_dict['R'], flow_dict['t'], flow_dict['distCoef'])
data_dict['body_xyz_origin'] = body3d
data_dict['body_uv_origin'] = body2d
data_dict['lhand_xyz_origin'] = lhand3d
data_dict['lhand_uv_origin'] = lhand2d
data_dict['rhand_xyz_origin'] = rhand3d
data_dict['rhand_uv_origin'] = rhand2d
data_dict['body_valid'] = flow_dict['body_valid']
data_dict['left_hand_valid'] = flow_dict['left_hand_valid']
data_dict['right_hand_valid'] = flow_dict['right_hand_valid']
# read image
if read_image:
img_file = tf.read_file(flow_dict['img_dirs'])
image = tf.image.decode_image(img_file, channels=3)
image = tf.image.pad_to_bounding_box(image, 0, 0, imh, imw)
image.set_shape((imh, imw, 3))
image = tf.cast(image, tf.float32) / 255.0 - 0.5
data_dict['image'] = image
if 'mask_dirs' in flow_dict:
mask_file = tf.read_file(flow_dict['mask_dirs'])
mask = tf.image.decode_image(mask_file, channels=3)
mask = tf.image.pad_to_bounding_box(mask, 0, 0, imh, imw)
mask.set_shape((imh, imw, 3))
mask = mask[:, :, 0]
mask = tf.cast(mask, tf.float32)
else:
mask = tf.ones((imh, imw), dtype=tf.float32)
data_dict['mask'] = mask
# calculate crop size
if self.objtype in (0, 1):
if self.objtype == 0:
keypoints = body3d
valid = flow_dict['body_valid']
elif self.objtype == 1:
keypoints = hand3d
valid = tf.cond(cond_left, lambda: flow_dict['left_hand_valid'], lambda: flow_dict['right_hand_valid'])
data_dict['hand_valid'] = valid
crop_center3d, scale3d, crop_center2d, scale2d = self.calc_crop_scale(keypoints, flow_dict['K'], flow_dict['distCoef'], valid)
data_dict['crop_center2d'], data_dict['scale2d'] = crop_center2d, scale2d
data_dict['crop_center3d'], data_dict['scale3d'] = crop_center3d, scale3d
# do cropping
if self.objtype == 1:
body2d = hand2d
body3d = hand3d
if self.rotate_augmentation:
print('using rotation augmentation')
rotate_angle = tf.random_uniform([], minval=-np.pi * 40 / 180, maxval=np.pi * 40 / 180)
R2 = tf.reshape(tf.stack([tf.cos(rotate_angle), -tf.sin(rotate_angle), tf.sin(rotate_angle), tf.cos(rotate_angle)]), [2, 2])
R3 = tf.reshape(tf.stack([tf.cos(rotate_angle), -tf.sin(rotate_angle), 0, tf.sin(rotate_angle), tf.cos(rotate_angle), 0, 0, 0, 1]), [3, 3])
body2d = tf.matmul((body2d - crop_center2d), R2) + crop_center2d
body3d = tf.matmul((body3d - crop_center3d), R3) + crop_center3d
data_dict['keypoint_xyz_origin'] = body3d # note that the projection of 3D might not be aligned with 2D any more after rotation
data_dict['keypoint_uv_origin'] = body2d
body2d_local = self.update_keypoint2d(body2d, crop_center2d, scale2d)
data_dict['keypoint_uv_local'] = body2d_local
if read_image:
image_crop = self.crop_image(image, crop_center2d, scale2d)
data_dict['image_crop'] = image_crop
mask_crop = self.crop_image(tf.stack([mask] * 3, axis=2), crop_center2d, scale2d)
data_dict['mask_crop'] = mask_crop[:, :, 0]
if self.rotate_augmentation:
data_dict['image_crop'] = tf.contrib.image.rotate(data_dict['image_crop'], rotate_angle)
data_dict['mask_crop'] = tf.contrib.image.rotate(data_dict['mask_crop'], rotate_angle)
if self.blur_augmentation:
print('using blur augmentation')
rescale_factor = tf.random_uniform([], minval=0.1, maxval=1.0)
rescale = tf.cast(rescale_factor * self.crop_size, tf.int32)
resized_image = tf.image.resize_images(data_dict['image_crop'], [rescale, rescale])
data_dict['image_crop'] = tf.image.resize_images(resized_image, [self.crop_size, self.crop_size])
# create 2D gaussian map
scoremap2d = self.create_multiple_gaussian_map(body2d_local[:, ::-1], (self.crop_size, self.crop_size), self.sigma, valid_vec=valid, extra=True) # coord_hw, imsize_hw
data_dict['scoremap2d'] = scoremap2d
if withPAF:
from utils.PAF import createPAF
data_dict['PAF'] = createPAF(body2d_local, body3d, self.objtype, (self.crop_size, self.crop_size), PAF_normalize3d, valid_vec=valid)
data_dict['PAF_type'] = tf.ones([], dtype=bool) # 0 for 2D PAF, 1 for 3D PAF
# create 3D gaussian_map
body3d_local = self.update_keypoint3d(body3d, crop_center3d, scale3d)
data_dict['keypoint_xyz_local'] = body3d_local
# scoremap3d = self.create_multiple_gaussian_map_3d(body3d_local, self.grid_size, self.sigma3d, valid_vec=valid, extra=True)
# data_dict['scoremap3d'] = scoremap3d
if self.objtype == 1: # this is hand, flip the image if it is right hand
data_dict['image_crop'] = tf.cond(cond_left, lambda: data_dict['image_crop'], lambda: data_dict['image_crop'][:, ::-1, :])
data_dict['mask_crop'] = tf.cond(cond_left, lambda: data_dict['mask_crop'], lambda: data_dict['mask_crop'][:, ::-1])
data_dict['scoremap2d'] = tf.cond(cond_left, lambda: data_dict['scoremap2d'], lambda: data_dict['scoremap2d'][:, ::-1, :])
data_dict['keypoint_uv_local'] = tf.cond(cond_left, lambda: data_dict['keypoint_uv_local'],
lambda: tf.constant([self.crop_size, 0], tf.float32) + tf.constant([-1, 1], tf.float32) * data_dict['keypoint_uv_local'])
if withPAF:
data_dict['PAF'] = tf.cond(cond_left, lambda: data_dict['PAF'],
lambda: (data_dict['PAF'][:, ::-1, :]) * tf.constant([-1, 1, 1] * (data_dict['PAF'].get_shape().as_list()[2] // 3), dtype=tf.float32))
elif self.objtype == 2:
bcrop_center3d, bscale3d, bcrop_center2d, bscale2d = self.calc_crop_scale(body3d, flow_dict['K'], flow_dict['distCoef'], flow_dict['body_valid'])
lcrop_center3d, lscale3d, lcrop_center2d, lscale2d = self.calc_crop_scale(lhand3d, flow_dict['K'], flow_dict['distCoef'], flow_dict['left_hand_valid'])
rcrop_center3d, rscale3d, rcrop_center2d, rscale2d = self.calc_crop_scale(rhand3d, flow_dict['K'], flow_dict['distCoef'], flow_dict['right_hand_valid'])
body3d_local = self.update_keypoint3d(body3d, bcrop_center3d, bscale3d)
lhand3d_local = self.update_keypoint3d(lhand3d, lcrop_center3d, lscale3d)
rhand3d_local = self.update_keypoint3d(rhand3d, rcrop_center3d, rscale3d)
bscoremap3d = self.create_multiple_gaussian_map_3d(body3d_local, self.grid_size, self.sigma3d,
valid_vec=flow_dict['body_valid'], extra=True) # coord_hw, imsize_hw
lscoremap3d = self.create_multiple_gaussian_map_3d(lhand3d_local, self.grid_size, self.sigma3d,
valid_vec=flow_dict['left_hand_valid'], extra=True) # coord_hw, imsize_hw
rscoremap3d = self.create_multiple_gaussian_map_3d(rhand3d_local, self.grid_size, self.sigma3d,
valid_vec=flow_dict['right_hand_valid'], extra=True) # coord_hw, imsize_hw
data_dict['bscoremap3d'] = bscoremap3d
data_dict['lscoremap3d'] = lscoremap3d
data_dict['rscoremap3d'] = rscoremap3d
data_dict['body_xyz_local'] = body3d_local
data_dict['lhand_xyz_local'] = lhand3d_local
data_dict['rhand_xyz_local'] = rhand3d_local
# 2D keypoints and cropped images
if bbox2d == 1:
# crop the 2D bounding box from openpose data
body2d = flow_dict['openpose_body']
lhand2d = flow_dict['openpose_lhand']
rhand2d = flow_dict['openpose_rhand']
bvalid = tf.logical_and(tf.not_equal(body2d[:, 0], 0.0), tf.not_equal(body2d[:, 1], 0.0))
lvalid = tf.logical_and(tf.not_equal(lhand2d[:, 0], 0.0), tf.not_equal(lhand2d[:, 1], 0.0))
rvalid = tf.logical_and(tf.not_equal(rhand2d[:, 0], 0.0), tf.not_equal(rhand2d[:, 1], 0.0))
data_dict['body_valid'] = bvalid
data_dict['left_hand_valid'] = lvalid
data_dict['right_hand_valid'] = rvalid
if 'openpose_foot' in flow_dict:
data_dict['openpose_foot'] = flow_dict['openpose_foot']
bcrop_center2d, bscale2d = self.calc_crop_scale2d(body2d, bvalid)
lcrop_center2d, lscale2d = self.calc_crop_scale2d(lhand2d, lvalid)
rcrop_center2d, rscale2d = self.calc_crop_scale2d(rhand2d, rvalid)
body2d_local = self.update_keypoint2d(body2d, bcrop_center2d, bscale2d)
lhand2d_local = self.update_keypoint2d(lhand2d, lcrop_center2d, lscale2d)
rhand2d_local = self.update_keypoint2d(rhand2d, rcrop_center2d, rscale2d)
data_dict['body_uv_local'] = body2d_local
data_dict['lhand_uv_local'] = lhand2d_local
data_dict['rhand_uv_local'] = rhand2d_local
data_dict['bcrop_center2d'] = bcrop_center2d
data_dict['lcrop_center2d'] = lcrop_center2d
data_dict['rcrop_center2d'] = rcrop_center2d
data_dict['bscale2d'] = bscale2d
data_dict['lscale2d'] = lscale2d
data_dict['rscale2d'] = rscale2d
if read_image:
bimage_crop = self.crop_image(image, bcrop_center2d, bscale2d)
limage_crop = self.crop_image(image, lcrop_center2d, lscale2d)
rimage_crop = self.crop_image(image, rcrop_center2d, rscale2d)
data_dict['bimage_crop'] = bimage_crop
data_dict['limage_crop'] = limage_crop
data_dict['rimage_crop'] = rimage_crop
bscoremap2d = self.create_multiple_gaussian_map(body2d_local[:, ::-1], (self.crop_size, self.crop_size), self.sigma,
valid_vec=flow_dict['body_valid'], extra=True) # coord_hw, imsize_hw
lscoremap2d = self.create_multiple_gaussian_map(lhand2d_local[:, ::-1], (self.crop_size, self.crop_size), self.sigma,
valid_vec=flow_dict['left_hand_valid'], extra=True) # coord_hw, imsize_hw
rscoremap2d = self.create_multiple_gaussian_map(rhand2d_local[:, ::-1], (self.crop_size, self.crop_size), self.sigma,
valid_vec=flow_dict['right_hand_valid'], extra=True) # coord_hw, imsize_hw
data_dict['bscoremap2d'] = bscoremap2d
data_dict['lscoremap2d'] = lscoremap2d
data_dict['rscoremap2d'] = rscoremap2d
# for openpose data
for key, val in flow_dict.items():
if 'openpose' not in key:
continue
data_dict[key] = val
names, tensors = zip(*data_dict.items())
if self.shuffle:
tensors = tf.train.shuffle_batch_join([tensors],
batch_size=self.batch_size,
capacity=100,
min_after_dequeue=20,
enqueue_many=False)
else:
tensors = tf.train.batch_join([tensors],
batch_size=self.batch_size,
capacity=20,
enqueue_many=False)
return dict(zip(names, tensors))
def calc_crop_scale(self, keypoints, calibK, calibDC, valid):
if self.objtype == 0:
keypoint_center = (keypoints[8] + keypoints[11]) / 2
center_valid = tf.logical_and(valid[8], valid[11])
elif self.objtype == 1:
keypoint_center = keypoints[12]
center_valid = valid[12]
else: # objtype == 2
assert self.objtype == 2 # conditioned by the shape of input
if keypoints.shape[0] == 18:
keypoint_center = (keypoints[8] + keypoints[11]) / 2
center_valid = tf.logical_and(valid[8], valid[11])
else:
keypoint_center = keypoints[12]
center_valid = valid[12]
valid_idx = tf.where(valid)[:, 0]
valid_keypoints = tf.gather(keypoints, valid_idx, name='valid_keypoints')
min_coord = tf.reduce_min(valid_keypoints, 0, name='min_coord')
max_coord = tf.reduce_max(valid_keypoints, 0, name='max_coord')
keypoint_center = tf.cond(center_valid, lambda: keypoint_center, lambda: (min_coord + max_coord) / 2)
keypoint_center.set_shape((3,))
fit_size = tf.reduce_max(tf.maximum(max_coord - keypoint_center, keypoint_center - min_coord))
crop_scale_noise = tf.cast(1.0, tf.float32)
if self.crop_noise:
crop_scale_noise = tf.exp(tf.truncated_normal([], mean=0.0, stddev=self.crop_scale_noise_sigma))
crop_scale_noise = tf.maximum(crop_scale_noise, tf.reciprocal(self.crop_size_zoom))
crop_size_best = tf.multiply(crop_scale_noise, 2 * fit_size * self.crop_size_zoom, name='crop_size_best')
crop_offset_noise = tf.cast(0.0, tf.float32)
if self.crop_noise:
crop_offset_noise = tf.truncated_normal([3], mean=0.0, stddev=self.crop_offset_noise_sigma) * fit_size * tf.constant([1., 1., 0.], dtype=tf.float32)
crop_offset_noise = tf.maximum(crop_offset_noise, max_coord + 1e-5 - crop_size_best / 2 - keypoint_center)
crop_offset_noise = tf.minimum(crop_offset_noise, min_coord - 1e-5 + crop_size_best / 2 - keypoint_center, name='crop_offset_noise')
crop_center = tf.add(keypoint_center, crop_offset_noise, name='crop_center')
crop_box_bl = tf.concat([crop_center[:2] - crop_size_best / 2, crop_center[2:]], 0)
crop_box_ur = tf.concat([crop_center[:2] + crop_size_best / 2, crop_center[2:]], 0)
crop_box = tf.stack([crop_box_bl, crop_box_ur], 0)
scale = tf.cast(self.grid_size, tf.float32) / crop_size_best
crop_box2d, _ = self.project_tf(crop_box, calibK, calibDistCoef=calibDC)
min_coord2d = tf.reduce_min(crop_box2d, 0)
max_coord2d = tf.reduce_max(crop_box2d, 0)
crop_size_best2d = tf.reduce_max(max_coord2d - min_coord2d)
crop_center2d = (min_coord2d + max_coord2d) / 2
scale2d = tf.cast(self.crop_size, tf.float32) / crop_size_best2d
return crop_center, scale, crop_center2d, scale2d
def calc_crop_scale2d(self, keypoints, valid):
# assert self.objtype == 2
if keypoints.shape[0] == 19 or keypoints.shape[0] == 20:
keypoint_center = (keypoints[8] + keypoints[11]) / 2
center_valid = tf.logical_and(valid[8], valid[11])
else:
keypoint_center = keypoints[12]
center_valid = valid[12]
valid_idx = tf.where(valid)[:, 0]
valid_keypoints = tf.gather(keypoints, valid_idx)
min_coord = tf.reduce_min(valid_keypoints, 0)
max_coord = tf.reduce_max(valid_keypoints, 0)
keypoint_center = tf.cond(center_valid, lambda: keypoint_center, lambda: (min_coord + max_coord) / 2)
keypoint_center.set_shape((2,))
fit_size = tf.reduce_max(tf.maximum(max_coord - keypoint_center, keypoint_center - min_coord))
crop_scale_noise = tf.cast(1.0, tf.float32)
if self.crop_noise:
crop_scale_noise = tf.exp(tf.truncated_normal([], mean=0.0, stddev=self.crop_scale_noise_sigma_2d))
crop_size_best = 2 * fit_size * self.crop_size_zoom_2d * crop_scale_noise
crop_offset_noise = tf.cast(0.0, tf.float32)
if self.crop_noise:
crop_offset_noise = tf.truncated_normal([2], mean=0.0, stddev=self.crop_offset_noise_sigma_2d) * fit_size
crop_offset_noise = tf.maximum(crop_offset_noise, keypoint_center - crop_size_best / 2 - min_coord + 1)
crop_offset_noise = tf.minimum(crop_offset_noise, keypoint_center + crop_size_best / 2 - max_coord - 1)
crop_center = keypoint_center + crop_offset_noise
scale2d = tf.cast(self.crop_size, tf.float32) / crop_size_best
return crop_center, scale2d
def crop_image(self, image, crop_center2d, scale2d):
image_crop = utils.general.crop_image_from_xy(tf.expand_dims(image, 0), crop_center2d[::-1], self.crop_size, scale2d) # crop_center_hw
image_crop = tf.squeeze(image_crop)
return image_crop
def update_keypoint2d(self, keypoint2d, crop_center2d, scale2d):
keypoint_x = (keypoint2d[:, 0] - crop_center2d[0]) * scale2d + self.crop_size // 2
keypoint_y = (keypoint2d[:, 1] - crop_center2d[1]) * scale2d + self.crop_size // 2
keypoint2d_local = tf.stack([keypoint_x, keypoint_y], 1)
keypoint2d_local = keypoint2d_local
return keypoint2d_local
def update_keypoint3d(self, keypoint3d, crop_center3d, scale3d):
keypoint_x = (keypoint3d[:, 0] - crop_center3d[0]) * scale3d + self.grid_size // 2
keypoint_y = (keypoint3d[:, 1] - crop_center3d[1]) * scale3d + self.grid_size // 2
keypoint_z = (keypoint3d[:, 2] - crop_center3d[2]) * scale3d + self.grid_size // 2
keypoint3d_local = tf.stack([keypoint_x, keypoint_y, keypoint_z], 1)
return keypoint3d_local
@staticmethod
def project_tf(joint3d, calibK, calibR=None, calibt=None, calibDistCoef=None):
""" This function projects the 3D hand to 2D using camera parameters
"""
with tf.name_scope('project_tf'):
x = joint3d
if calibR is not None:
x = tf.matmul(joint3d, calibR, transpose_b=True)
if calibt is not None:
x = x + calibt
xi = tf.divide(x[:, 0], x[:, 2])
yi = tf.divide(x[:, 1], x[:, 2])
if calibDistCoef is not None:
X2 = xi * xi
Y2 = yi * yi
XY = X2 * Y2
R2 = X2 + Y2
R4 = R2 * R2
R6 = R4 * R2
dc = calibDistCoef
radial = 1.0 + dc[0] * R2 + dc[1] * R4 + dc[4] * R6
tan_x = 2.0 * dc[2] * XY + dc[3] * (R2 + 2.0 * X2)
tan_y = 2.0 * dc[3] * XY + dc[2] * (R2 + 2.0 * Y2)
xi = radial * xi + tan_x
yi = radial * yi + tan_y
xp = tf.transpose(tf.stack([xi, yi], axis=0))
pt = tf.matmul(xp, calibK[:2, :2], transpose_b=True) + calibK[:2, 2]
return pt, x
@staticmethod
def switch_joint_order(keypoint, order):
# reorder the joints to the order used in our network
assert len(order.shape) == 1, 'order must be 1-dim'
# axis 0: sample, axis 1: keypoint order, axis 2: xyz
return keypoint[:, order, ...]
@staticmethod
def create_multiple_gaussian_map(coords_wh, output_size, sigma, valid_vec=None, extra=False):
""" Creates a map of size (output_shape[0], output_shape[1]) at (center[0], center[1])
with variance sigma for multiple coordinates."""
with tf.name_scope('create_multiple_gaussian_map'):
sigma = tf.cast(sigma, tf.float32)
assert len(output_size) == 2
s = coords_wh.get_shape().as_list()
coords_wh = tf.cast(coords_wh, tf.int32)
if valid_vec is not None:
valid_vec = tf.cast(valid_vec, tf.float32)
valid_vec = tf.squeeze(valid_vec)
cond_val = tf.greater(valid_vec, 0.5)
else:
cond_val = tf.ones_like(coords_wh[:, 0], dtype=tf.float32)
cond_val = tf.greater(cond_val, 0.5)
cond_1_in = tf.logical_and(tf.less(coords_wh[:, 0], output_size[0] - 1), tf.greater(coords_wh[:, 0], 0))
cond_2_in = tf.logical_and(tf.less(coords_wh[:, 1], output_size[1] - 1), tf.greater(coords_wh[:, 1], 0))
cond_in = tf.logical_and(cond_1_in, cond_2_in)
cond = tf.logical_and(cond_val, cond_in)
coords_wh = tf.cast(coords_wh, tf.float32)
# create meshgrid
x_range = tf.expand_dims(tf.range(output_size[0]), 1)
y_range = tf.expand_dims(tf.range(output_size[1]), 0)
X = tf.cast(tf.tile(x_range, [1, output_size[1]]), tf.float32)
Y = tf.cast(tf.tile(y_range, [output_size[0], 1]), tf.float32)
X.set_shape((output_size[0], output_size[1]))
Y.set_shape((output_size[0], output_size[1]))
X = tf.expand_dims(X, -1)
Y = tf.expand_dims(Y, -1)
X_b = tf.tile(X, [1, 1, s[0]])
Y_b = tf.tile(Y, [1, 1, s[0]])
X_b -= coords_wh[:, 0]
Y_b -= coords_wh[:, 1]
dist = tf.square(X_b) + tf.square(Y_b)
scoremap = tf.exp(-dist / (2 * tf.square(sigma))) * tf.cast(cond, tf.float32)
if extra:
negative = 1 - tf.reduce_sum(scoremap, axis=2, keep_dims=True)
negative = tf.minimum(tf.maximum(negative, 0.0), 1.0)
scoremap = tf.concat([scoremap, negative], axis=2)
return scoremap
@staticmethod
def create_multiple_gaussian_map_3d(keypoint_3d, output_size, sigma3d, valid_vec=None, extra=False):
""" Creates a 3D heatmap for the hand skeleton
"""
with tf.name_scope('create_multiple_gaussian_map_3d'):
if valid_vec is not None:
valid_vec = tf.cast(valid_vec, tf.float32)
valid_vec = tf.squeeze(valid_vec)
cond_val = tf.greater(valid_vec, 0.5)
else:
cond_val = tf.ones_like(keypoint_3d[:, 0], dtype=tf.float32)
cond_val = tf.greater(cond_val, 0.5)
sigma3d = tf.cast(sigma3d, tf.float32)
# reverse the order of axis: tensorflow uses NDHWC
reverse = keypoint_3d[:, ::-1]
z_range = tf.expand_dims(tf.expand_dims(tf.range(output_size, dtype=tf.float32), 1), 2)
y_range = tf.expand_dims(tf.expand_dims(tf.range(output_size, dtype=tf.float32), 0), 2)
x_range = tf.expand_dims(tf.expand_dims(tf.range(output_size, dtype=tf.float32), 0), 1)
Z = tf.tile(z_range, [1, output_size, output_size])
Y = tf.tile(y_range, [output_size, 1, output_size])
X = tf.tile(x_range, [output_size, output_size, 1])
Z = tf.expand_dims(Z, -1)
Y = tf.expand_dims(Y, -1)
X = tf.expand_dims(X, -1)
s = reverse.get_shape().as_list()
Z_b = tf.tile(Z, [1, 1, 1, s[0]])
Y_b = tf.tile(Y, [1, 1, 1, s[0]])
X_b = tf.tile(X, [1, 1, 1, s[0]])
Z_b -= reverse[:, 0]
Y_b -= reverse[:, 1]
X_b -= reverse[:, 2]
dist = tf.square(X_b) + tf.square(Y_b) + tf.square(Z_b)
scoremap_3d = tf.exp(-dist / (2 * tf.square(sigma3d))) * tf.cast(cond_val, tf.float32)
if extra:
negative = 1 - tf.reduce_sum(scoremap_3d, axis=3, keep_dims=True)
negative = tf.minimum(tf.maximum(negative, 0.0), 1.0)
scoremap_3d = tf.concat([scoremap_3d, negative], axis=3)
return scoremap_3d
def start_from(self, idx):
for key, value in self.tensor_dict.items():
if value.size > 0:
self.tensor_dict[key] = value[idx:]
| body2hands-main | visualization/POF/data/BaseReader.py |
import tensorflow as tf
from data.BaseReader import BaseReader
import numpy as np
import h5py
from utils.keypoint_conversion import human36m_to_main, mpi3d_to_main, SMPL_to_main
import pickle
import os
class HumanReader(BaseReader):
def __init__(self, name='Human3.6M', mode='training', objtype=0, shuffle=True, batch_size=1, crop_noise=False):
super(HumanReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
assert objtype == 0 # this dataset reader only supports human body data
assert mode in ('training', 'evaluation')
self.mode = mode
assert name in ('Human3.6M', 'MPI_INF_3DHP', 'UP', 'SURREAL')
self.name = name
if name == 'Human3.6M':
self.image_root = '/media/posefs1b/Users/donglaix/c2f-vol-train/data/h36m/images/'
if self.mode == 'training':
image_list_file = '/media/posefs1b/Users/donglaix/c2f-vol-train/data/h36m/annot/train_images.txt'
path_to_db = '/media/posefs1b/Users/donglaix/c2f-vol-train/data/h36m/annot/train.h5'
else:
image_list_file = '/media/posefs1b/Users/donglaix/c2f-vol-train/data/h36m/annot/valid_images.txt'
path_to_db = '/media/posefs1b/Users/donglaix/c2f-vol-train/data/h36m/annot/valid.h5'
path_to_calib = '/media/posefs3b/Users/donglaix/h36m/cameras.h5'
with open(image_list_file) as f:
img_list = [_.strip() for _ in f]
fannot = h5py.File(path_to_db, 'r')
annot3d = fannot['S'][:]
annot2d = fannot['part'][:]
fannot.close()
fcalib = h5py.File(path_to_calib, 'r')
calib_data = {}
map_camera = {'54138969': 'camera1', '55011271': 'camera2', '58860488': 'camera3', '60457274': 'camera4'}
for pid in fcalib.keys():
if pid == '3dtest':
continue
person_cam_data = {}
for camera in map_camera.values():
cam_data = {_: fcalib[pid][camera][_][:] for _ in fcalib[pid][camera].keys()}
person_cam_data[camera] = cam_data
calib_data[pid] = person_cam_data
fcalib.close()
human3d = {'body': [], 'left_hand': [], 'right_hand': [], 'gt_body': []}
calib = {'K': [], 'R': [], 't': [], 'distCoef': []}
img_dirs = []
for img_idx, img_name in enumerate(img_list):
img_dir = os.path.join(self.image_root, img_name)
body2d = annot2d[img_idx].astype(np.float32)
if mode == 'training' and (body2d >= 1000).any() or (body2d <= 0).any():
continue
body3d = annot3d[img_idx].astype(np.float32)
human3d['gt_body'].append(body3d)
body3d = np.concatenate((body3d, np.ones((1, 3), dtype=np.float32)), axis=0) # put dummy values in order_dict
person = img_name.split('_')[0].replace('S', 'subject')
camera = img_name.split('.')[1].split('_')[0]
camera_name = map_camera[camera]
cam_param = calib_data[person][camera_name]
K = np.eye(3)
K[0, 0] = cam_param['f'][0, 0]
K[1, 1] = cam_param['f'][1, 0]
K[0, 2] = cam_param['c'][0, 0]
K[1, 2] = cam_param['c'][1, 0]
dc = np.zeros((5,))
dc[:3] = cam_param['k'][:, 0]
dc[3:] = cam_param['p'][:, 0]
human3d['body'].append(body3d)
img_dirs.append(img_dir)
calib['K'].append(K.astype(np.float32))
calib['R'].append(np.eye(3, dtype=np.float32))
calib['t'].append(np.zeros((3,), dtype=np.float32))
calib['distCoef'].append(dc.astype(np.float32))
self.num_samples = len(img_dirs)
human3d.update(calib)
human3d['img_dirs'] = img_dirs
body_valid = np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0]], dtype=bool)
human3d['body_valid'] = np.tile(body_valid, (self.num_samples, 1))
order_dict = human36m_to_main
elif name == 'MPI_INF_3DHP':
self.image_root = '/media/posefs1b/Users/donglaix/mpi_inf_3dhp/'
assert mode == 'training'
self.path_to_db = self.image_root + 'mpi3d.pickle'
with open(self.path_to_db, 'rb') as f:
db_data = pickle.load(f)
img_dirs, body, K = db_data
self.num_samples = img_dirs.shape[0]
img_dirs = np.core.defchararray.add(np.array([self.image_root]), img_dirs)
body = body.astype(np.float32)
body = np.concatenate([body, np.ones((self.num_samples, 1, 3))], axis=1).astype(np.float32)
K = K.astype(np.float32)
body_valid = np.ones((self.num_samples, 19), dtype=bool)
body_valid[:, 0] = False
body_valid[:, 14:18] = False
R = np.tile(np.expand_dims(np.eye(3, dtype=np.float32), axis=0), (self.num_samples, 1, 1))
t = np.tile(np.zeros((1, 3), dtype=np.float32), (self.num_samples, 1))
dc = np.tile(np.zeros((1, 5), dtype=np.float32), (self.num_samples, 1))
human3d = {'img_dirs': img_dirs, 'body': body, 'K': K, 'body_valid': body_valid, 'R': R, 't': t, 'distCoef': dc}
order_dict = mpi3d_to_main
elif name == 'UP':
self.image_root = '/media/posefs3b/Users/donglaix/UP/'
assert mode in 'training'
self.path_to_db = './data/UP_collected.pkl'
with open(self.path_to_db, 'rb') as f:
db_data = pickle.load(f, encoding='latin')
human3d = {'body': [], 'img_dirs': [], 'body_valid': [], 'mask_dirs': []}
calib = {'K': [], 'R': [], 't': [], 'distCoef': []}
for data in db_data:
calib['K'].append(data['K'].astype(np.float32))
calib['R'].append(data['R'].astype(np.float32))
calib['t'].append(data['t'].astype(np.float32))
calib['distCoef'].append(np.zeros([5], dtype=np.float32))
human3d['body'].append(data['J'].astype(np.float32))
body_valid = np.ones([19], dtype=bool)
# body_valid[0] = False
# body_valid[14:] = False
human3d['body_valid'].append(body_valid)
human3d['img_dirs'].append(os.path.join(self.image_root, data['img_dir']))
human3d['mask_dirs'].append(os.path.join(self.image_root, data['mask']))
human3d.update(calib)
order_dict = SMPL_to_main
self.num_samples = len(human3d['img_dirs'])
elif name == 'SURREAL':
self.image_root = '/media/posefs3b/Users/donglaix/surreal/surreal/SURREAL/'
assert mode in 'training'
self.path_to_db = os.path.join(self.image_root, 'surreal_collected.pkl')
with open(self.path_to_db, 'rb') as f:
db_data = pickle.load(f, encoding='latin')
human3d = {'body': [], 'img_dirs': [], 'body_valid': []}
calib = {'K': [], 'R': [], 't': [], 'distCoef': []}
for data in db_data:
calib['K'].append(data['K'].astype(np.float32))
calib['R'].append(data['R'].astype(np.float32))
calib['t'].append(np.ravel(data['t']).astype(np.float32))
calib['distCoef'].append(np.zeros([5], dtype=np.float32))
human3d['body'].append(data['J'].astype(np.float32))
body_valid = np.ones([19], dtype=bool)
body_valid[0] = False
body_valid[14:] = False
human3d['body_valid'].append(body_valid)
human3d['img_dirs'].append(os.path.join(self.image_root, data['img_dir']))
human3d.update(calib)
order_dict = SMPL_to_main
self.num_samples = len(human3d['img_dirs'])
else:
raise NotImplementedError
self.register_tensor(human3d, order_dict)
def get(self):
if self.name == 'Human3.6M':
d = super(HumanReader, self).get(withPAF=True, imw=1000, imh=1002)
elif self.name == 'MPI_INF_3DHP':
d = super(HumanReader, self).get(withPAF=True, imw=2048, imh=2048)
elif self.name == 'UP':
d = super(HumanReader, self).get(withPAF=True, imw=1920, imh=1080)
elif self.name == 'SURREAL':
d = super(HumanReader, self).get(withPAF=True, imw=320, imh=240)
else:
raise NotImplementedError
return d
if __name__ == '__main__':
d = HumanReader(mode='evaluation', name='Human3.6M', shuffle=False, objtype=0, crop_noise=False)
d.start_from(77095)
# d.crop_size_zoom = 1.5
data_dict = d.get()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import utils.general
from utils.PAF import plot_all_PAF
# from utils.vis_heatmap3d import vis_heatmap3d
validation_images = []
for i in range(d.num_samples):
print('{}/{}'.format(i + 1, d.num_samples))
bimage_crop, img_dir, body2d, body_valid, body2d_heatmap, body3d, PAF, mask_crop = \
sess.run([data_dict['image_crop'], data_dict['img_dir'], data_dict['keypoint_uv_local'], data_dict['body_valid'], data_dict['scoremap2d'],
data_dict['keypoint_xyz_local'], data_dict['PAF'], data_dict['mask_crop']])
image_name = img_dir[0].decode()
print(image_name)
image_v = ((bimage_crop[0] + 0.5) * 255).astype(np.uint8)
body2d = np.squeeze(body2d)
body_valid = np.squeeze(body_valid)
body2d_heatmap = np.squeeze(body2d_heatmap)
body3d = np.squeeze(body3d)
mask_crop = np.squeeze(mask_crop).astype(bool)
PAF = np.squeeze(PAF)
body2d_detected = utils.general.detect_keypoints2d(body2d_heatmap)[:19, :]
fig = plt.figure(1)
ax1 = fig.add_subplot(161)
plt.imshow(image_v)
utils.general.plot2d(ax1, body2d, valid_idx=body_valid, color=np.array([1.0, 0.0, 0.0]))
utils.general.plot2d(ax1, body2d_detected, valid_idx=body_valid, color=np.array([0.0, 0.0, 1.0]))
for i in range(19):
plt.text(int(body2d[i, 0]), int(body2d[i, 1]), str(i))
ax2 = fig.add_subplot(162, projection='3d')
utils.general.plot3d(ax2, body3d, valid_idx=body_valid, color=np.array([1.0, 0.0, 0.0]))
ax2.set_xlabel('X Label')
ax2.set_ylabel('Y Label')
ax2.set_zlabel('Z Label')
ax2.set_xlim(0, 47)
ax2.set_ylim(0, 47)
ax2.set_zlim(0, 47)
ax3 = fig.add_subplot(163)
plt.imshow(mask_crop)
ax4 = fig.add_subplot(164)
mask_3c = np.stack([mask_crop] * 3, axis=2)
masked = mask_3c * image_v
plt.imshow(masked)
xy, z = plot_all_PAF(PAF, 3)
ax5 = fig.add_subplot(165)
ax5.imshow(xy)
ax6 = fig.add_subplot(166)
ax6.imshow(z)
plt.show()
| body2hands-main | visualization/POF/data/HumanReader.py |
import tensorflow as tf
from data.BaseReader import BaseReader
import numpy as np
class Base2DReader(BaseReader):
# inherit from BaseReader, implement different 2D cropping (cropping from 2D)
def __init__(self, objtype=0, shuffle=True, batch_size=1, crop_noise=False):
super(Base2DReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
def get(self, withPAF=True, read_image=True, imw=1920, imh=1080):
assert type(withPAF) == bool
assert self.objtype in (0, 1)
# produce data from slice_input_producer
flow_list = tf.train.slice_input_producer(list(self.tensor_dict.values()), shuffle=self.shuffle)
flow_dict = {key: flow_list[ik] for ik, key in enumerate(self.tensor_dict.keys())}
# build data dictionary
data_dict = {}
data_dict['img_dir'] = flow_dict['img_dirs']
PAF_given = False
if self.objtype == 0:
body2d = flow_dict['body']
data_dict['body_valid'] = flow_dict['body_valid']
data_dict['keypoint_uv_origin'] = body2d
if 'body_3d' in flow_dict:
data_dict['keypoint_xyz_origin'] = flow_dict['body_3d']
data_dict['keypoint_xyz_local'] = flow_dict['body_3d']
PAF_given = True
elif self.objtype == 1:
cond_left = tf.reduce_any(tf.cast(flow_dict['left_hand_valid'], dtype=tf.bool)) # 0 for right hand, 1 for left hand
hand2d = tf.cond(cond_left, lambda: flow_dict['left_hand'], lambda: flow_dict['right_hand']) # in world coordinate
hand2d = tf.cast(hand2d, tf.float32)
data_dict['keypoint_uv_origin'] = hand2d
data_dict['left_hand_valid'] = flow_dict['left_hand_valid']
data_dict['right_hand_valid'] = flow_dict['right_hand_valid']
if 'left_hand_3d' in flow_dict and 'right_hand_3d' in flow_dict:
hand3d = tf.cond(cond_left, lambda: flow_dict['left_hand_3d'], lambda: flow_dict['right_hand_3d'])
data_dict['keypoint_xyz_origin'] = hand3d
data_dict['keypoint_xyz_local'] = hand3d
PAF_given = True
# read image
if read_image:
img_file = tf.read_file(flow_dict['img_dirs'])
image = tf.image.decode_image(img_file, channels=3)
image = tf.image.pad_to_bounding_box(image, 0, 0, imh, imw)
image.set_shape((imh, imw, 3))
image = tf.cast(image, tf.float32) / 255.0 - 0.5
data_dict['image'] = image
if 'mask_dirs' in flow_dict:
mask_file = tf.read_file(flow_dict['mask_dirs'])
mask = tf.image.decode_image(mask_file, channels=3)
mask = tf.image.pad_to_bounding_box(mask, 0, 0, imh, imw)
mask.set_shape((imh, imw, 3))
mask = mask[:, :, 0]
mask = tf.cast(mask, tf.float32)
else:
mask = tf.ones((imh, imw), dtype=tf.float32)
if 'other_bbox' in flow_dict:
ob = flow_dict['other_bbox']
Xindmap = tf.tile(tf.expand_dims(tf.range(imw, dtype=tf.int32), 0), [imh, 1])
Xindmap = tf.tile(tf.expand_dims(Xindmap, 2), [1, 1, 20])
Yindmap = tf.tile(tf.expand_dims(tf.range(imh, dtype=tf.int32), 1), [1, imw])
Yindmap = tf.tile(tf.expand_dims(Yindmap, 2), [1, 1, 20])
x_out = tf.logical_or(tf.less(Xindmap, ob[:, 0]), tf.greater_equal(Xindmap, ob[:, 2]))
y_out = tf.logical_or(tf.less(Yindmap, ob[:, 1]), tf.greater_equal(Yindmap, ob[:, 3]))
out = tf.cast(tf.logical_or(x_out, y_out), tf.float32)
out = tf.reduce_min(out, axis=2)
mask = tf.minimum(mask, out)
data_dict['mask'] = mask
if self.objtype in (0, 1):
if self.objtype == 0:
keypoints = body2d
valid = flow_dict['body_valid']
elif self.objtype == 1:
keypoints = hand2d
body2d = hand2d
valid = tf.cond(cond_left, lambda: flow_dict['left_hand_valid'], lambda: flow_dict['right_hand_valid'])
data_dict['hand_valid'] = valid
if PAF_given:
body3d = hand3d
crop_center2d, scale2d = self.calc_crop_scale2d(keypoints, valid)
data_dict['crop_center2d'] = crop_center2d
data_dict['scale2d'] = scale2d
if self.rotate_augmentation:
print('using rotation augmentation')
rotate_angle = tf.random_uniform([], minval=-np.pi * 40 / 180, maxval=np.pi * 40 / 180)
R2 = tf.reshape(tf.stack([tf.cos(rotate_angle), -tf.sin(rotate_angle), tf.sin(rotate_angle), tf.cos(rotate_angle)]), [2, 2])
body2d = tf.matmul((body2d - crop_center2d), R2) + crop_center2d
data_dict['keypoint_uv_origin'] = body2d
if PAF_given:
R3 = tf.reshape(tf.stack([tf.cos(rotate_angle), -tf.sin(rotate_angle), 0., tf.sin(rotate_angle), tf.cos(rotate_angle), 0., 0., 0., 1.]), [3, 3])
body3d = tf.matmul(body3d, R3)
data_dict['keypoint_xyz_origin'] = body3d
data_dict['keypoint_xyz_local'] = body3d
body2d_local = self.update_keypoint2d(body2d, crop_center2d, scale2d)
data_dict['keypoint_uv_local'] = body2d_local
if read_image:
image_crop = self.crop_image(image, crop_center2d, scale2d)
data_dict['image_crop'] = image_crop
mask_crop = self.crop_image(tf.stack([mask] * 3, axis=2), crop_center2d, scale2d)
data_dict['mask_crop'] = mask_crop[:, :, 0]
if self.rotate_augmentation:
data_dict['image_crop'] = tf.contrib.image.rotate(data_dict['image_crop'], rotate_angle)
data_dict['mask_crop'] = tf.contrib.image.rotate(data_dict['mask_crop'], rotate_angle)
if self.blur_augmentation:
print('using blur augmentation')
rescale_factor = tf.random_uniform([], minval=0.1, maxval=1.0)
rescale = tf.cast(rescale_factor * self.crop_size, tf.int32)
resized_image = tf.image.resize_images(data_dict['image_crop'], [rescale, rescale])
data_dict['image_crop'] = tf.image.resize_images(resized_image, [self.crop_size, self.crop_size])
# create 2D gaussian map
scoremap2d = self.create_multiple_gaussian_map(body2d_local[:, ::-1], (self.crop_size, self.crop_size), self.sigma, valid_vec=valid, extra=True) # coord_hw, imsize_hw
data_dict['scoremap2d'] = scoremap2d
if withPAF:
from utils.PAF import createPAF
num_keypoint = body2d_local.get_shape().as_list()[0]
zeros = tf.zeros([num_keypoint, 1], dtype=tf.float32)
if PAF_given:
data_dict['PAF'] = createPAF(body2d_local, body3d, self.objtype, (self.crop_size, self.crop_size), normalize_3d=True, valid_vec=valid)
data_dict['PAF_type'] = tf.ones([], dtype=bool) # 0 for 2D PAF, 1 for 3D PAF
else:
data_dict['PAF'] = createPAF(body2d_local, tf.concat([body2d, zeros], axis=1), self.objtype, (self.crop_size, self.crop_size), normalize_3d=False, valid_vec=valid)
data_dict['PAF_type'] = tf.zeros([], dtype=bool) # 0 for 2D PAF, 1 for 3D PAF
if self.objtype == 1: # this is hand, flip the image if it is right hand
data_dict['image_crop'] = tf.cond(cond_left, lambda: data_dict['image_crop'], lambda: data_dict['image_crop'][:, ::-1, :])
data_dict['mask_crop'] = tf.cond(cond_left, lambda: data_dict['mask_crop'], lambda: data_dict['mask_crop'][:, ::-1])
data_dict['scoremap2d'] = tf.cond(cond_left, lambda: data_dict['scoremap2d'], lambda: data_dict['scoremap2d'][:, ::-1, :])
data_dict['keypoint_uv_local'] = tf.cond(cond_left, lambda: data_dict['keypoint_uv_local'],
lambda: tf.constant([self.crop_size, 0], tf.float32) + tf.constant([-1, 1], tf.float32) * data_dict['keypoint_uv_local'])
if withPAF:
data_dict['PAF'] = tf.cond(cond_left, lambda: data_dict['PAF'],
lambda: (data_dict['PAF'][:, ::-1, :]) * tf.constant([-1, 1, 1] * (data_dict['PAF'].get_shape().as_list()[2] // 3), dtype=tf.float32))
names, tensors = zip(*data_dict.items())
if self.shuffle:
tensors = tf.train.shuffle_batch_join([tensors],
batch_size=self.batch_size,
capacity=100,
min_after_dequeue=50,
enqueue_many=False)
else:
tensors = tf.train.batch_join([tensors],
batch_size=self.batch_size,
capacity=100,
enqueue_many=False)
return dict(zip(names, tensors))
| body2hands-main | visualization/POF/data/Base2DReader.py |
import tensorflow as tf
from data.TempConstReader import TempConstReader
import numpy as np
import numpy.linalg as nl
import pickle
from utils.keypoint_conversion import a4_to_main as order_dict
import json
import os
class DomeReaderTempConst(TempConstReader):
def __init__(self, mode='training', objtype=0, shuffle=False, batch_size=1, crop_noise=False, full_only=True, head_top=True):
super(DomeReaderTempConst, self).__init__(objtype, shuffle, batch_size, crop_noise)
assert mode in ('training', 'evaluation')
assert objtype in (0, 1)
self.image_root = '/media/posefs0c/panopticdb/'
# read data from a4plus with consecutive frames
path_to_db = './data/a4plus_collected.pkl'
path_to_calib = './data/camera_data_a4.pkl'
with open(path_to_db, 'rb') as f:
db_data = pickle.load(f)
with open('./data/a4_hands_annotated.txt') as f:
hand_annots = {}
for line in f:
strs = line.split()
hand_annots[tuple(strs[:3])] = eval(strs[3])
if mode == 'training':
mode_data = db_data['training_data']
else:
mode_data = db_data['testing_data']
with open(path_to_calib, 'rb') as f:
calib_data = pickle.load(f)
human3d = {'1_body': [], '1_left_hand': [], '1_right_hand': [], '1_body_valid': [], 'left_hand_valid': [], 'right_hand_valid': [],
'2_body': [], '2_left_hand': [], '2_right_hand': [], '2_body_valid': []}
calib = {'1_K': [], '1_R': [], '1_t': [], '1_distCoef': [],
'2_K': [], '2_R': [], '2_t': [], '2_distCoef': []}
img_dirs_1 = []
img_dirs_2 = []
map_next = {}
for data3d in mode_data:
seqName = data3d['seqName']
frame_str = data3d['frame_str']
frame = int(frame_str)
if frame % 5: # a4plus is sampled 1 out of 5, frame number *0 and *5 is the first frame, *1 and *6 is the second frame
continue
map_next[(seqName, frame_str)] = None
for data3d in mode_data:
seqName = data3d['seqName']
frame_str = data3d['frame_str']
frame = int(frame_str)
if frame % 5 != 1:
continue
prev_key = (seqName, '{:08d}'.format(frame - 1))
if prev_key not in map_next:
continue
map_next[prev_key] = data3d
for data3d in mode_data:
seqName = data3d['seqName']
frame_str = data3d['frame_str']
frame = int(frame_str)
if frame % 5:
continue
# check for manual annotation, remove the annotation if the hand is annotated as incorrect.
if 'left_hand' in data3d and not hand_annots[(seqName, frame_str, 'left')]:
del data3d['left_hand']
if 'right_hand' in data3d and not hand_annots[(seqName, frame_str, 'righ')]:
del data3d['right_hand']
next_data = map_next[(seqName, frame_str)]
if next_data is None:
continue
if objtype == 0:
body3d = np.array(data3d['body']['landmarks'], dtype=np.float32).reshape(-1, 3)
nose_lear = body3d[16] - body3d[1]
nose_rear = body3d[18] - body3d[1]
neck_nose = body3d[1] - body3d[0]
n = np.cross(nose_lear, nose_rear)
n = n / nl.norm(n)
d = np.dot(neck_nose, n)
assert d > 0
head_top_kp = body3d[0] + 1.5 * d * n
if head_top:
body3d = np.concatenate((body3d, head_top_kp[np.newaxis, :]), axis=0)
chest = 0.5 * body3d[0] + 0.25 * (body3d[6] + body3d[12])
body3d_1 = np.concatenate((body3d, chest[np.newaxis, :]), axis=0)
body3d = np.array(next_data['body']['landmarks'], dtype=np.float32).reshape(-1, 3)
nose_lear = body3d[16] - body3d[1]
nose_rear = body3d[18] - body3d[1]
neck_nose = body3d[1] - body3d[0]
n = np.cross(nose_lear, nose_rear)
n = n / nl.norm(n)
d = np.dot(neck_nose, n)
assert d > 0
head_top_kp = body3d[0] + 1.5 * d * n
if head_top:
body3d = np.concatenate((body3d, head_top_kp[np.newaxis, :]), axis=0)
chest = 0.5 * body3d[0] + 0.25 * (body3d[6] + body3d[12])
body3d_2 = np.concatenate((body3d, chest[np.newaxis, :]), axis=0)
elif objtype == 1:
# left hand or right hand must be valid
if 'left_hand' in data3d:
left_hand3d_1 = np.array(data3d['left_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
if 'left_hand' in next_data:
left_hand3d_2 = np.array(next_data['left_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
if 'right_hand' in data3d:
right_hand3d_1 = np.array(data3d['right_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
if 'right_hand' in next_data:
right_hand3d_2 = np.array(next_data['right_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
if ('left_hand' not in data3d or 'left_hand' not in next_data) and ('right_hand' not in data3d or 'right_hand' not in next_data):
# one hand must be valid for both frames
continue
if objtype == 0:
for camIdx, camDict in data3d['body']['2D'].items():
if camIdx not in next_data['body']['2D']: # no data from this camera in the next frame
continue
if full_only:
cond_inside_1 = all(camDict['insideImg'])
cond_inside_2 = all(next_data['body'][camIdx]['insideImg'])
else: # if not full_only, use the image if at least half keypoints are visible
inside_ratio_1 = np.float(np.sum(camDict['insideImg'])) / len(camDict['insideImg'])
inside_ratio_2 = np.float(np.sum(next_data['body']['2D'][camIdx]['insideImg'])) / len(next_data['body']['2D'][camIdx]['insideImg'])
cond_inside_1 = (inside_ratio_1 > 0.1)
cond_inside_2 = (inside_ratio_2 > 0.1)
if any(camDict['occluded']) or any(next_data['body']['2D'][camIdx]['occluded']) or not cond_inside_1 or not cond_inside_2:
continue
human3d['1_body'].append(body3d_1)
human3d['2_body'].append(body3d_2)
human3d['1_body_valid'].append(np.ones((20 if head_top else 19,), dtype=bool))
human3d['2_body_valid'].append(np.ones((20 if head_top else 19,), dtype=bool))
calib['1_K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['2_K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['1_R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['2_R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['1_t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['2_t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['1_distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
calib['2_distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs_1.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a4', seqName, frame_str, camIdx, frame_str))
img_dirs_2.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a4', seqName, next_data['frame_str'], camIdx, next_data['frame_str']))
elif objtype == 1:
if 'left_hand' in data3d and 'left_hand' in next_data:
for camIdx, camDict in data3d['left_hand']['2D'].items():
if camIdx not in next_data['left_hand']['2D']:
continue
if any(data3d['left_hand']['2D'][camIdx]['occluded']) or not all(data3d['left_hand']['2D'][camIdx]['insideImg']) or data3d['left_hand']['2D'][camIdx]['overlap']:
continue
if any(next_data['left_hand']['2D'][camIdx]['occluded']) or not all(next_data['left_hand']['2D'][camIdx]['insideImg']) or next_data['left_hand']['2D'][camIdx]['overlap']:
continue
human3d['1_left_hand'].append(left_hand3d_1)
human3d['2_left_hand'].append(left_hand3d_2)
human3d['1_right_hand'].append(np.zeros((21, 3), dtype=np.float32))
human3d['2_right_hand'].append(np.zeros((21, 3), dtype=np.float32))
human3d['left_hand_valid'].append(np.ones((21,), dtype=bool))
human3d['right_hand_valid'].append(np.zeros((21,), dtype=bool))
calib['1_K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['2_K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['1_R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['2_R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['1_t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['2_t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['1_distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
calib['2_distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs_1.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a4', seqName, frame_str, camIdx, frame_str))
img_dirs_2.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a4', seqName, next_data['frame_str'], camIdx, next_data['frame_str']))
if 'right_hand' in data3d and 'right_hand' in next_data:
for camIdx, camDict in data3d['right_hand']['2D'].items():
if camIdx not in next_data['right_hand']['2D']:
continue
if any(data3d['right_hand']['2D'][camIdx]['occluded']) or not all(data3d['right_hand']['2D'][camIdx]['insideImg']) or data3d['right_hand']['2D'][camIdx]['overlap']:
continue
if any(next_data['right_hand']['2D'][camIdx]['occluded']) or not all(next_data['right_hand']['2D'][camIdx]['insideImg']) or next_data['right_hand']['2D'][camIdx]['overlap']:
continue
human3d['1_right_hand'].append(right_hand3d_1)
human3d['2_right_hand'].append(right_hand3d_2)
human3d['1_left_hand'].append(np.zeros((21, 3), dtype=np.float32))
human3d['2_left_hand'].append(np.zeros((21, 3), dtype=np.float32))
human3d['left_hand_valid'].append(np.zeros((21,), dtype=bool))
human3d['right_hand_valid'].append(np.ones((21,), dtype=bool))
calib['1_K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['2_K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['1_R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['2_R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['1_t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['2_t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['1_distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
calib['2_distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs_1.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a4', seqName, frame_str, camIdx, frame_str))
img_dirs_2.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a4', seqName, next_data['frame_str'], camIdx, next_data['frame_str']))
human3d.update(calib)
human3d['1_img_dirs'] = img_dirs_1
human3d['2_img_dirs'] = img_dirs_2
self.register_tensor(human3d, order_dict)
self.num_samples = len(self.tensor_dict['1_img_dirs'])
def get(self, withPAF=True):
d = super(DomeReaderTempConst, self).get(withPAF=withPAF)
return d
if __name__ == '__main__':
# d = DomeReaderTempConst(mode='training', shuffle=True, objtype=0, crop_noise=True, full_only=False)
# data_dict = d.get()
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
# sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
# sess.run(tf.global_variables_initializer())
# tf.train.start_queue_runners(sess=sess)
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
# import utils.general
# from utils.vis_heatmap3d import vis_heatmap3d
# from utils.PAF import plot_PAF, PAF_to_3D, plot_all_PAF
# validation_images = []
# for i in range(d.num_samples):
# print('{}/{}'.format(i + 1, d.num_samples))
# values = \
# sess.run([data_dict['1_image_crop'], data_dict['1_img_dir'], data_dict['1_keypoint_uv_local'], data_dict['1_body_valid'], data_dict['1_scoremap2d'],
# data_dict['1_PAF'], data_dict['1_mask_crop'], data_dict['1_keypoint_xyz_local'],
# data_dict['2_image_crop'], data_dict['2_img_dir'], data_dict['2_keypoint_uv_local'], data_dict['2_body_valid'], data_dict['2_scoremap2d'],
# data_dict['2_PAF'], data_dict['2_mask_crop'], data_dict['2_keypoint_xyz_local']
# ])
# image_crop_1, img_dir_1, body2d_1, body_valid_1, body2d_heatmap_1, PAF_1, mask_crop_1, body3d_1, \
# image_crop_2, img_dir_2, body2d_2, body_valid_2, body2d_heatmap_2, PAF_2, mask_crop_2, body3d_2 = [np.squeeze(_) for _ in values]
# image_name_1 = img_dir_1.item().decode()
# image_name_2 = img_dir_2.item().decode()
# image_v_1 = ((image_crop_1 + 0.5) * 255).astype(np.uint8)
# image_v_2 = ((image_crop_2 + 0.5) * 255).astype(np.uint8)
# body2d_detected_1, bscore_1 = utils.PAF.detect_keypoints2d_PAF(body2d_heatmap_1, PAF_1)
# body2d_detected_2, bscore_2 = utils.PAF.detect_keypoints2d_PAF(body2d_heatmap_2, PAF_2)
# # body2d_detected = utils.general.detect_keypoints2d(body2d_heatmap)[:20, :]
# body3d_detected_1, _ = PAF_to_3D(body2d_detected_1, PAF_1, objtype=0)
# body3d_detected_2, _ = PAF_to_3D(body2d_detected_2, PAF_2, objtype=0)
# # body3d_detected = body3d_detected[:21, :]
# fig = plt.figure(1)
# ax1 = fig.add_subplot(241)
# plt.imshow(image_v_1)
# utils.general.plot2d(ax1, body2d_1, type_str='body', valid_idx=body_valid_1, color=np.array([1.0, 0.0, 0.0]))
# utils.general.plot2d(ax1, body2d_detected_1, type_str='body', valid_idx=body_valid_1, color=np.array([0.0, 0.0, 1.0]))
# ax2 = fig.add_subplot(242)
# plt.imshow(image_v_2)
# utils.general.plot2d(ax2, body2d_2, type_str='body', valid_idx=body_valid_2, color=np.array([1.0, 0.0, 0.0]))
# utils.general.plot2d(ax2, body2d_detected_2, type_str='body', valid_idx=body_valid_2, color=np.array([0.0, 0.0, 1.0]))
# ax3 = fig.add_subplot(243, projection='3d')
# utils.general.plot3d(ax3, body3d_detected_1, type_str='body', valid_idx=body_valid_1, color=np.array([0.0, 0.0, 1.0]))
# utils.general.plot3d(ax3, body3d_detected_2, type_str='body', valid_idx=body_valid_2, color=np.array([0.0, 0.0, 1.0]))
# ax3.set_xlabel('X Label')
# ax3.set_ylabel('Y Label')
# ax3.set_zlabel('Z Label')
# plt.axis('equal')
# ax4 = fig.add_subplot(244, projection='3d')
# utils.general.plot3d(ax4, body3d_1, type_str='body', valid_idx=body_valid_1, color=np.array([1.0, 0.0, 0.0]))
# utils.general.plot3d(ax4, body3d_2, type_str='body', valid_idx=body_valid_2, color=np.array([1.0, 0.0, 0.0]))
# ax4.set_xlabel('X Label')
# ax4.set_ylabel('Y Label')
# ax4.set_zlabel('Z Label')
# plt.axis('equal')
# xy, z = plot_all_PAF(PAF_1, 3)
# ax5 = fig.add_subplot(245)
# ax5.imshow(xy)
# ax6 = fig.add_subplot(246)
# ax6.imshow(z)
# xy, z = plot_all_PAF(PAF_2, 3)
# ax7 = fig.add_subplot(247)
# ax7.imshow(xy)
# ax8 = fig.add_subplot(248)
# ax8.imshow(z)
# plt.show()
d = DomeReaderTempConst(mode='training', shuffle=True, objtype=1, crop_noise=True, full_only=False)
d.crop_scale_noise_sigma = 0.4
d.crop_offset_noise_sigma = 0.2
data_dict = d.get()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import utils.general
from utils.vis_heatmap3d import vis_heatmap3d
from utils.PAF import plot_PAF, PAF_to_3D, plot_all_PAF
validation_images = []
for i in range(d.num_samples):
print('{}/{}'.format(i + 1, d.num_samples))
values = \
sess.run([data_dict['1_image_crop'], data_dict['1_img_dir'], data_dict['1_keypoint_uv_local'], data_dict['1_hand_valid'], data_dict['1_scoremap2d'],
data_dict['1_PAF'], data_dict['1_mask_crop'], data_dict['1_keypoint_xyz_local'],
data_dict['2_image_crop'], data_dict['2_img_dir'], data_dict['2_keypoint_uv_local'], data_dict['2_hand_valid'], data_dict['2_scoremap2d'],
data_dict['2_PAF'], data_dict['2_mask_crop'], data_dict['2_keypoint_xyz_local']
])
image_crop_1, img_dir_1, body2d_1, body_valid_1, body2d_heatmap_1, PAF_1, mask_crop_1, body3d_1, \
image_crop_2, img_dir_2, body2d_2, body_valid_2, body2d_heatmap_2, PAF_2, mask_crop_2, body3d_2 = [np.squeeze(_) for _ in values]
image_name_1 = img_dir_1.item().decode()
image_name_2 = img_dir_2.item().decode()
image_v_1 = ((image_crop_1 + 0.5) * 255).astype(np.uint8)
image_v_2 = ((image_crop_2 + 0.5) * 255).astype(np.uint8)
body2d_detected_1, bscore_1 = utils.PAF.detect_keypoints2d_PAF(body2d_heatmap_1, PAF_1, objtype=1)
body2d_detected_2, bscore_2 = utils.PAF.detect_keypoints2d_PAF(body2d_heatmap_2, PAF_2, objtype=1)
# body2d_detected = utils.general.detect_keypoints2d(body2d_heatmap)[:20, :]
body3d_detected_1, _ = PAF_to_3D(body2d_detected_1, PAF_1, objtype=1)
body3d_detected_2, _ = PAF_to_3D(body2d_detected_2, PAF_2, objtype=1)
body3d_detected_1 = body3d_detected_1[:21, :]
body3d_detected_2 = body3d_detected_2[:21, :]
fig = plt.figure(1)
ax1 = fig.add_subplot(241)
plt.imshow(image_v_1)
utils.general.plot2d(ax1, body2d_1, type_str='hand', valid_idx=body_valid_1, color=np.array([1.0, 0.0, 0.0]))
utils.general.plot2d(ax1, body2d_detected_1, type_str='hand', valid_idx=body_valid_1, color=np.array([0.0, 0.0, 1.0]))
ax2 = fig.add_subplot(242)
plt.imshow(image_v_2)
utils.general.plot2d(ax2, body2d_2, type_str='hand', valid_idx=body_valid_2, color=np.array([1.0, 0.0, 0.0]))
utils.general.plot2d(ax2, body2d_detected_2, type_str='hand', valid_idx=body_valid_2, color=np.array([0.0, 0.0, 1.0]))
ax3 = fig.add_subplot(243, projection='3d')
utils.general.plot3d(ax3, body3d_detected_1, type_str='hand', valid_idx=body_valid_1, color=np.array([0.0, 0.0, 1.0]))
utils.general.plot3d(ax3, body3d_detected_2, type_str='hand', valid_idx=body_valid_2, color=np.array([0.0, 0.0, 1.0]))
ax3.set_xlabel('X Label')
ax3.set_ylabel('Y Label')
ax3.set_zlabel('Z Label')
plt.axis('equal')
ax4 = fig.add_subplot(244, projection='3d')
utils.general.plot3d(ax4, body3d_1, type_str='hand', valid_idx=body_valid_1, color=np.array([1.0, 0.0, 0.0]))
utils.general.plot3d(ax4, body3d_2, type_str='hand', valid_idx=body_valid_2, color=np.array([1.0, 0.0, 0.0]))
ax4.set_xlabel('X Label')
ax4.set_ylabel('Y Label')
ax4.set_zlabel('Z Label')
plt.axis('equal')
xy, z = plot_all_PAF(PAF_1, 3)
ax5 = fig.add_subplot(245)
ax5.imshow(xy)
ax6 = fig.add_subplot(246)
ax6.imshow(z)
xy, z = plot_all_PAF(PAF_2, 3)
ax7 = fig.add_subplot(247)
ax7.imshow(xy)
ax8 = fig.add_subplot(248)
ax8.imshow(z)
plt.show()
| body2hands-main | visualization/POF/data/DomeReaderTempConst.py |
import os
import numpy as np
import numpy.linalg as nl
import json
import pickle
map_body25_to_body19 = list(range(8)) + list(range(9, 25)) # total of 24
seqName = 'Dexter_Grasp2'
# root = '/home/donglaix/Documents/Experiments/{}'.format(seqName)
root = '/media/posefs1b/Users/donglaix/siggasia018/{}/'.format(seqName)
calib_file = os.path.join(root, 'calib.json')
with open(calib_file) as f:
calib_data = json.load(f)
start = 0
end = 648
frameRange = range(start, end)
person_idx = -1
# -1 for most obvious person, -2 for second obvious person
bs = []
ls = []
rs = []
fs = []
img_dirs = []
for i in frameRange:
# img_file = os.path.join('openpose_image', '{}_{:012d}.jpg'.format(seqName, i)) if os.path.exists(os.path.join(root, 'openpose_image', '{}_{:012d}.jpg'.format(seqName, i))) \
# else os.path.join('openpose_image', '{}_{:012d}.png'.format(seqName, i)) # Openpose run on images
img_file = os.path.join('openpose_image', '{}_{:012d}_rendered.png'.format(seqName, i)) # Openpose run on video
assert os.path.exists(os.path.join(root, img_file))
annot_2d = os.path.join(root, 'openpose_result', '{}_{:012d}_keypoints.json'.format(seqName, i))
assert os.path.exists(annot_2d)
with open(annot_2d) as f:
data = json.load(f)
scores = []
areas = []
for ip in range(len(data["people"])):
joint2d = np.array(data["people"][ip]["pose_keypoints_2d"]).reshape(-1, 3)
left_hand2d = np.array(data["people"][ip]["hand_left_keypoints_2d"]).reshape(-1, 3)
right_hand2d = np.array(data["people"][ip]["hand_right_keypoints_2d"]).reshape(-1, 3)
face2d = np.array(data["people"][ip]["face_keypoints_2d"]).reshape(-1, 3)
score = np.sum(joint2d[:, 2]) + np.sum(left_hand2d[:, 2]) + np.sum(right_hand2d[:, 2]) + np.sum(face2d[:, 2])
scores.append(score)
joint_valid = (joint2d[:, 0] > 0.0) * (joint2d[:, 1] > 0.0)
joint_nonzero = joint2d[joint_valid, :][:, :2]
mx, my = joint_nonzero.min(axis=0)
Mx, My = joint_nonzero.max(axis=0)
areas.append((Mx - mx) * (My - my))
scores = np.array(scores)
areas = np.array(areas)
idx = np.argsort(scores)
# idx = np.argsort(areas)
ip = idx[person_idx]
joint2d = np.array(data["people"][ip]["pose_keypoints_2d"]).reshape(-1, 3)
left_hand2d = np.array(data["people"][ip]["hand_left_keypoints_2d"]).reshape(-1, 3)
right_hand2d = np.array(data["people"][ip]["hand_right_keypoints_2d"]).reshape(-1, 3)
face2d = np.array(data["people"][ip]["face_keypoints_2d"]).reshape(-1, 3)
final_body = joint2d[map_body25_to_body19]
final_left = left_hand2d
final_right = right_hand2d
final_face = face2d
bs.append(final_body)
fs.append(final_face)
ls.append(final_left)
rs.append(final_right)
img_dirs.append(img_file)
img_dirs = np.array(img_dirs)
bs = np.array(bs)
ls = np.array(ls)
rs = np.array(rs)
fs = np.array(fs)
print((len(ls), len(rs), len(fs), len(bs), len(img_dirs)))
with open('{}.pkl'.format(seqName), 'wb') as f:
pickle.dump((bs, ls, rs, fs, img_dirs, calib_data), f)
| body2hands-main | visualization/POF/data/collect_openpose.py |
import tensorflow as tf
class MultiDataset(object):
# A class to combine multi dataset input
def __init__(self, db_list):
assert type(db_list) == list and len(db_list) >= 1
self.db_list = db_list
def get(self, name_wanted):
data_list = []
for i, db in enumerate(self.db_list):
data = db.get()
data_list.append(data)
ret_data = {}
for name in name_wanted:
ret_data[name] = tf.concat([d[name] for d in data_list], axis=0)
return ret_data
def combineMultiDataset(data_list, name_wanted):
# data_list is a list of data_dict
ret_data = {}
for name in name_wanted:
ret_data[name] = tf.concat([d[name] for d in data_list], axis=0)
return ret_data
if __name__ == '__main__':
pass
| body2hands-main | visualization/POF/data/MultiDataset.py |
import tensorflow as tf
import os
import numpy as np
from data.BaseReader import BaseReader
import pickle
from data.collect_stb import PATH_TO_DATASET, K, Rl, Rr, tl, tr, TRAIN_SEQS, TEST_SEQS
from utils.keypoint_conversion import STB_to_main
class STBReader(BaseReader):
def __init__(self, mode='training', objtype=1, shuffle=False, batch_size=1, crop_noise=False):
assert objtype == 1
super(STBReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
assert mode in ('training', 'evaluation')
self.name = 'STB'
self.image_root = PATH_TO_DATASET
path_to_db = './data/stb_collected.pkl'
with open(path_to_db, 'rb') as f:
db_data = pickle.load(f)
if mode == 'training':
mode_data = db_data[0]
SEQS = TRAIN_SEQS
else:
mode_data = db_data[1]
SEQS = TEST_SEQS
assert mode_data.shape[0] == len(SEQS) * 1500
hand3d = np.tile(mode_data, [2, 1, 1]).astype(np.float32)
hand3d[:, 0] = 2 * hand3d[:, 0] - hand3d[:, 9]
self.num_samples = hand3d.shape[0]
Ks = np.array([K] * self.num_samples, dtype=np.float32)
Rs = np.array([Rl] * mode_data.shape[0] + [Rr] * mode_data.shape[0], dtype=np.float32)
ts = np.array([tl] * mode_data.shape[0] + [tr] * mode_data.shape[0], dtype=np.float32)
distCoef = np.zeros([self.num_samples, 5], dtype=np.float32)
left_hand_valid = np.ones([self.num_samples, 21], dtype=bool)
img_dirs = [os.path.join(self.image_root, seq, 'BB_left_{}.png').format(i) for seq in SEQS for i in range(1500)] + \
[os.path.join(self.image_root, seq, 'BB_right_{}.png'.format(i)) for seq in SEQS for i in range(1500)]
human3d = {'K': Ks, 'R': Rs, 't': ts, 'distCoef': distCoef,
'left_hand': hand3d, 'left_hand_valid': left_hand_valid, 'img_dirs': img_dirs,
'right_hand': np.zeros([self.num_samples, 21, 3], dtype=np.float32), 'right_hand_valid': np.zeros([self.num_samples, 21], dtype=bool)}
self.register_tensor(human3d, STB_to_main)
def get(self):
d = super(STBReader, self).get(imw=640, imh=480)
return d
if __name__ == '__main__':
d = STBReader(mode='evaluation', shuffle=True, objtype=1, crop_noise=True)
data_dict = d.get()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import utils.general
from utils.PAF import plot_PAF, PAF_to_3D, plot_all_PAF
for i in range(d.num_samples):
print('{}/{}'.format(i + 1, d.num_samples))
values = \
sess.run([data_dict['image_crop'], data_dict['img_dir'], data_dict['keypoint_uv_local'], data_dict['left_hand_valid'], data_dict['scoremap2d'],
data_dict['PAF'], data_dict['mask_crop'], data_dict['keypoint_xyz_local'], data_dict['keypoint_uv_origin'], data_dict['image']])
image_crop, img_dir, hand2d, hand_valid, hand2d_heatmap, PAF, mask_crop, hand3d, hand2d_origin, image_full = [np.squeeze(_) for _ in values]
image_v = ((image_crop + 0.5) * 255).astype(np.uint8)
image_full_v = ((image_full + 0.5) * 255).astype(np.uint8)
hand2d_detected = utils.general.detect_keypoints2d(hand2d_heatmap)[:21, :]
hand3d_detected, _ = PAF_to_3D(hand2d_detected, PAF, objtype=1)
hand3d_detected = hand3d_detected[:21, :]
fig = plt.figure(1)
ax1 = fig.add_subplot(231)
plt.imshow(image_v)
utils.general.plot2d(ax1, hand2d, type_str='hand', valid_idx=hand_valid, color=np.array([1.0, 0.0, 0.0]))
utils.general.plot2d(ax1, hand2d_detected, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
for j in range(21):
plt.text(hand2d[j, 0], hand2d[j, 1], str(j))
ax2 = fig.add_subplot(232, projection='3d')
utils.general.plot3d(ax2, hand3d_detected, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
ax2.set_xlabel('X Label')
ax2.set_ylabel('Y Label')
ax2.set_zlabel('Z Label')
plt.axis('equal')
ax3 = fig.add_subplot(233, projection='3d')
utils.general.plot3d(ax3, hand3d, type_str='hand', valid_idx=hand_valid, color=np.array([1.0, 0.0, 0.0]))
ax3.set_xlabel('X Label')
ax3.set_ylabel('Y Label')
ax3.set_zlabel('Z Label')
plt.axis('equal')
xy, z = plot_all_PAF(PAF, 3)
ax4 = fig.add_subplot(234)
ax4.imshow(xy)
ax5 = fig.add_subplot(235)
ax5.imshow(z)
ax6 = fig.add_subplot(236)
ax6.imshow(image_full_v)
utils.general.plot2d(ax6, hand2d_origin, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
plt.show()
| body2hands-main | visualization/POF/data/STBReader.py |
import tensorflow as tf
from data.BaseReader import BaseReader
import numpy as np
import numpy.linalg as nl
import pickle
from utils.keypoint_conversion import a4_to_main as order_dict
import json
import os
class DomeReader(BaseReader):
def __init__(self, mode='training', objtype=0, shuffle=False, batch_size=1, crop_noise=False, full_only=True, head_top=True):
super(DomeReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
assert mode in ('training', 'evaluation')
self.image_root = '/media/posefs0c/panopticdb/'
# read data from a4
path_to_db = './data/a4_collected.pkl'
path_to_calib = './data/camera_data_a4.pkl'
with open(path_to_db, 'rb') as f:
db_data = pickle.load(f)
with open('./data/a4_hands_annotated.txt') as f:
hand_annots = {}
for line in f:
strs = line.split()
hand_annots[tuple(strs[:3])] = eval(strs[3])
if mode == 'training':
mode_data = db_data['training_data']
else:
mode_data = db_data['testing_data']
with open(path_to_calib, 'rb') as f:
calib_data = pickle.load(f)
human3d = {'body': [], 'left_hand': [], 'right_hand': [], 'body_valid': [], 'left_hand_valid': [], 'right_hand_valid': []}
calib = {'K': [], 'R': [], 't': [], 'distCoef': []}
img_dirs = []
for data3d in mode_data:
seqName = data3d['seqName']
frame_str = data3d['frame_str']
# check for manual annotation, remove the annotation if the hand is annotated as incorrect.
if 'left_hand' in data3d and not hand_annots[(seqName, frame_str, 'left')]:
del data3d['left_hand']
if 'right_hand' in data3d and not hand_annots[(seqName, frame_str, 'righ')]:
del data3d['right_hand']
if objtype == 0:
body3d = np.array(data3d['body']['landmarks'], dtype=np.float32).reshape(-1, 3)
nose_lear = body3d[16] - body3d[1]
nose_rear = body3d[18] - body3d[1]
neck_nose = body3d[1] - body3d[0]
n = np.cross(nose_lear, nose_rear)
n = n / nl.norm(n)
d = np.dot(neck_nose, n)
assert d > 0
head_top_kp = body3d[0] + 1.5 * d * n
if head_top:
body3d = np.concatenate((body3d, head_top_kp[np.newaxis, :]), axis=0)
chest = 0.5 * body3d[0] + 0.25 * (body3d[6] + body3d[12])
body3d = np.concatenate((body3d, chest[np.newaxis, :]), axis=0)
elif objtype == 1:
# left hand or right hand must be valid
if 'left_hand' in data3d:
left_hand3d = np.array(data3d['left_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
if 'right_hand' in data3d:
right_hand3d = np.array(data3d['right_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
if ('left_hand' not in data3d) and ('right_hand' not in data3d):
continue
else:
assert objtype == 2
body3d = np.array(data3d['body']['landmarks'], dtype=np.float32).reshape(-1, 3)
# both left and right hand must be valid
if 'left_hand' in data3d:
left_hand3d = np.array(data3d['left_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
# discard the sample if hand is wanted but there is no left hand.
else:
continue
if 'right_hand' in data3d:
right_hand3d = np.array(data3d['right_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
else:
continue
if objtype == 0:
for camIdx, camDict in data3d['body']['2D'].items():
if full_only:
cond_inside = all(camDict['insideImg'])
else: # if not full_only, use the image if at least half keypoints are visible
inside_ratio = np.float(np.sum(camDict['insideImg'])) / len(camDict['insideImg'])
cond_inside = (inside_ratio > 0.5)
if any(camDict['occluded']) or not cond_inside:
continue
human3d['body'].append(body3d)
human3d['body_valid'].append(np.ones((20 if head_top else 19,), dtype=bool))
calib['K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a4', seqName, frame_str, camIdx, frame_str))
elif objtype == 1:
if 'left_hand' in data3d:
for camIdx, camDict in data3d['left_hand']['2D'].items():
if any(data3d['left_hand']['2D'][camIdx]['occluded']) or not all(data3d['left_hand']['2D'][camIdx]['insideImg']) or data3d['left_hand']['2D'][camIdx]['overlap']:
continue
human3d['left_hand'].append(left_hand3d)
human3d['right_hand'].append(np.zeros((21, 3), dtype=np.float32))
human3d['left_hand_valid'].append(np.ones((21,), dtype=bool))
human3d['right_hand_valid'].append(np.zeros((21,), dtype=bool))
calib['K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a4', seqName, frame_str, camIdx, frame_str))
if 'right_hand' in data3d:
for camIdx, camDict in data3d['right_hand']['2D'].items():
if any(data3d['right_hand']['2D'][camIdx]['occluded']) or not all(data3d['right_hand']['2D'][camIdx]['insideImg']) or data3d['right_hand']['2D'][camIdx]['overlap']:
continue
human3d['right_hand'].append(right_hand3d)
human3d['left_hand'].append(np.zeros((21, 3), dtype=np.float32))
human3d['left_hand_valid'].append(np.zeros((21,), dtype=bool))
human3d['right_hand_valid'].append(np.ones((21,), dtype=bool))
calib['K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a4', seqName, frame_str, camIdx, frame_str))
else:
assert objtype == 2
for camIdx, camDict in data3d['body']['2D'].items():
if any(camDict['occluded']) or not all(camDict['insideImg']):
continue
if any(data3d['left_hand']['2D'][camIdx]['occluded']) or not all(data3d['left_hand']['2D'][camIdx]['insideImg']):
continue
if any(data3d['right_hand']['2D'][camIdx]['occluded']) or not all(data3d['right_hand']['2D'][camIdx]['insideImg']):
continue
# If this line is reached, the sample and cam view is valid.
human3d['body'].append(body3d)
human3d['left_hand'].append(left_hand3d)
human3d['right_hand'].append(right_hand3d)
human3d['body_valid'].append(np.ones((18,), dtype=bool))
human3d['left_hand_valid'].append(np.ones((21,), dtype=bool))
human3d['right_hand_valid'].append(np.ones((21,), dtype=bool))
calib['K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a4', seqName, frame_str, camIdx, frame_str))
if mode == 'evaluation':
if objtype == 2:
openpose_output_file = '/home/donglaix/Documents/Experiments/dome_valid/a4_openpose.json'
assert os.path.exists(openpose_output_file)
with open(openpose_output_file) as f:
openpose_data = json.load(f)
openpose_data = np.array(openpose_data, dtype=np.float32).reshape(-1, 70, 3)
openpose_valid = (openpose_data[:, :, 2] >= 0.5)
openpose_data[:, :, 0] *= openpose_valid
openpose_data[:, :, 1] *= openpose_valid
openpose_face = openpose_data[:, :, :2]
human3d['openpose_face'] = openpose_face
# read data from a5
path_to_db = './data/a5_collected.pkl'
path_to_calib = './data/camera_data_a5.pkl'
with open(path_to_db, 'rb') as f:
db_data = pickle.load(f)
if mode == 'training':
mode_data = db_data['training_data']
else:
mode_data = db_data['testing_data']
with open(path_to_calib, 'rb') as f:
calib_data = pickle.load(f)
for data3d in mode_data:
seqName = data3d['seqName']
frame_str = data3d['frame_str']
if objtype == 0:
body3d = np.array(data3d['body']['landmarks'], dtype=np.float32).reshape(-1, 3)
nose_lear = body3d[16] - body3d[1]
nose_rear = body3d[18] - body3d[1]
neck_nose = body3d[1] - body3d[0]
n = np.cross(nose_lear, nose_rear)
n = n / nl.norm(n)
d = np.dot(neck_nose, n)
assert d > 0
if head_top:
head_top_kp = body3d[0] + 1.5 * d * n
body3d = np.concatenate((body3d, head_top_kp[np.newaxis, :]), axis=0)
chest = 0.5 * body3d[0] + 0.25 * (body3d[6] + body3d[12])
body3d = np.concatenate((body3d, chest[np.newaxis, :]), axis=0)
elif objtype == 1:
# left hand or right hand must be valid
if 'left_hand' in data3d:
left_hand3d = np.array(data3d['left_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
if 'right_hand' in data3d:
right_hand3d = np.array(data3d['right_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
if ('left_hand' not in data3d) and ('right_hand' not in data3d):
continue
else:
assert objtype == 2
body3d = np.array(data3d['body']['landmarks'], dtype=np.float32).reshape(-1, 3)
# both left and right hand must be valid
if 'left_hand' in data3d:
left_hand3d = np.array(data3d['left_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
# discard the sample if hand is wanted but there is no left hand.
else:
continue
if 'right_hand' in data3d:
right_hand3d = np.array(data3d['right_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
else:
continue
if objtype == 0:
for camIdx, camDict in data3d['body']['2D'].items():
if full_only:
cond_inside = all(camDict['insideImg'])
else: # if not full_only, use the image if at least half keypoints are visible
inside_ratio = np.float(np.sum(camDict['insideImg'])) / len(camDict['insideImg'])
cond_inside = (inside_ratio > 0.5)
if any(camDict['occluded']) or not cond_inside:
continue
human3d['body'].append(body3d)
human3d['body_valid'].append(np.ones((20 if head_top else 19,), dtype=bool))
calib['K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a5', seqName, frame_str, camIdx, frame_str))
elif objtype == 1:
if 'left_hand' in data3d:
for camIdx, camDict in data3d['left_hand']['2D'].items():
if any(data3d['left_hand']['2D'][camIdx]['occluded']) or not all(data3d['left_hand']['2D'][camIdx]['insideImg']) or data3d['left_hand']['2D'][camIdx]['overlap']:
continue
human3d['left_hand'].append(left_hand3d)
human3d['right_hand'].append(np.zeros((21, 3), dtype=np.float32))
human3d['left_hand_valid'].append(np.ones((21,), dtype=bool))
human3d['right_hand_valid'].append(np.zeros((21,), dtype=bool))
calib['K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a5', seqName, frame_str, camIdx, frame_str))
if 'right_hand' in data3d:
for camIdx, camDict in data3d['right_hand']['2D'].items():
if any(data3d['right_hand']['2D'][camIdx]['occluded']) or not all(data3d['right_hand']['2D'][camIdx]['insideImg']) or data3d['right_hand']['2D'][camIdx]['overlap']:
continue
human3d['right_hand'].append(right_hand3d)
human3d['left_hand'].append(np.zeros((21, 3), dtype=np.float32))
human3d['left_hand_valid'].append(np.zeros((21,), dtype=bool))
human3d['right_hand_valid'].append(np.ones((21,), dtype=bool))
calib['K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a5', seqName, frame_str, camIdx, frame_str))
else:
assert objtype == 2
for camIdx, camDict in data3d['body']['2D'].items():
if any(camDict['occluded']) or not all(camDict['insideImg']):
continue
if any(data3d['left_hand']['2D'][camIdx]['occluded']) or not all(data3d['left_hand']['2D'][camIdx]['insideImg']):
continue
if any(data3d['right_hand']['2D'][camIdx]['occluded']) or not all(data3d['right_hand']['2D'][camIdx]['insideImg']):
continue
# If this line is reached, the sample and cam view is valid.
human3d['body'].append(body3d)
human3d['left_hand'].append(left_hand3d)
human3d['right_hand'].append(right_hand3d)
human3d['body_valid'].append(np.ones((18,), dtype=bool))
human3d['left_hand_valid'].append(np.ones((21,), dtype=bool))
human3d['right_hand_valid'].append(np.ones((21,), dtype=bool))
calib['K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a5', seqName, frame_str, camIdx, frame_str))
human3d.update(calib)
human3d['img_dirs'] = img_dirs
# import cv2
# for img_dir in img_dirs:
# if cv2.imread(img_dir) is None:
# print(img_dir)
self.register_tensor(human3d, order_dict)
self.num_samples = len(self.tensor_dict['img_dirs'])
def get(self, withPAF=True):
d = super(DomeReader, self).get(withPAF=withPAF)
return d
if __name__ == '__main__':
d = DomeReader(mode='training', shuffle=True, objtype=1, crop_noise=True, full_only=False)
# d.rotate_augmentation = True
# d.blur_augmentation = True
data_dict = d.get()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import utils.general
from utils.vis_heatmap3d import vis_heatmap3d
from utils.PAF import plot_PAF, PAF_to_3D, plot_all_PAF
validation_images = []
for i in range(d.num_samples):
print('{}/{}'.format(i + 1, d.num_samples))
values = \
sess.run([data_dict['image_crop'], data_dict['img_dir'], data_dict['keypoint_uv_local'], data_dict['hand_valid'], data_dict['scoremap2d'],
data_dict['PAF'], data_dict['mask_crop'], data_dict['keypoint_xyz_local']])
image_crop, img_dir, hand2d, hand_valid, hand2d_heatmap, PAF, mask_crop, hand3d = [np.squeeze(_) for _ in values]
image_name = img_dir.item().decode()
image_v = ((image_crop + 0.5) * 255).astype(np.uint8)
hand2d_detected, bscore = utils.PAF.detect_keypoints2d_PAF(hand2d_heatmap, PAF, objtype=1)
# hand2d_detected = utils.general.detect_keypoints2d(hand2d_heatmap)[:20, :]
hand3d_detected, _ = PAF_to_3D(hand2d_detected, PAF, objtype=1)
hand3d_detected = hand3d_detected[:21, :]
fig = plt.figure(1)
ax1 = fig.add_subplot(231)
plt.imshow(image_v)
utils.general.plot2d(ax1, hand2d, type_str='hand', valid_idx=hand_valid, color=np.array([1.0, 0.0, 0.0]))
utils.general.plot2d(ax1, hand2d_detected, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
ax2 = fig.add_subplot(232, projection='3d')
utils.general.plot3d(ax2, hand3d_detected, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
ax2.set_xlabel('X Label')
ax2.set_ylabel('Y Label')
ax2.set_zlabel('Z Label')
plt.axis('equal')
ax3 = fig.add_subplot(233, projection='3d')
utils.general.plot3d(ax3, hand3d, type_str='hand', valid_idx=hand_valid, color=np.array([1.0, 0.0, 0.0]))
ax2.set_xlabel('X Label')
ax2.set_ylabel('Y Label')
ax2.set_zlabel('Z Label')
plt.axis('equal')
xy, z = plot_all_PAF(PAF, 3)
ax4 = fig.add_subplot(234)
ax4.imshow(xy)
ax5 = fig.add_subplot(235)
ax5.imshow(z)
plt.show()
| body2hands-main | visualization/POF/data/DomeReader.py |
import tensorflow as tf
import os
import numpy as np
import json
from data.Base2DReader import Base2DReader
from utils.keypoint_conversion import COCO_to_main, MPII_to_main
class COCOReader(Base2DReader):
def __init__(self, name='COCO', mode='training', objtype=0, shuffle=True, batch_size=1, crop_noise=False):
super(COCOReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
self.name = name
assert name in ('COCO', 'MPII')
assert mode in ('training', 'evaluation')
if name == 'COCO':
self.image_root = '/media/posefs3b/Users/gines/openpose_train/dataset/COCO/cocoapi/images/train2017/'
self.mask_root = '/media/posefs3b/Users/gines/openpose_train/dataset/COCO/cocoapi/images/mask2017/train2017/'
assert mode == 'training'
path_to_db = '/media/posefs3b/Users/gines/openpose_train/dataset/COCO/json/COCO.json'
with open(path_to_db) as f:
db_data = json.load(f)
img_dirs = []
mask_dirs = []
human = {'body': [], 'body_valid': [], 'other_bbox': []}
for i, image_data in enumerate(db_data['root']):
# bounding box test
# discard the image if this bounding box overlaps with any other bounding box
bbox = np.array(image_data['bbox'], dtype=np.float32)
bbox[2:] += bbox[:2]
if type(image_data['bbox_other']) != dict and len(image_data['bbox_other']) > 0:
bbox_other = np.array(image_data['bbox_other'], dtype=np.float32).reshape(-1, 4)
bbox_other[:, 2:] += bbox_other[:, :2]
# xmin = np.maximum(bbox_other[:, 0], bbox[0])
# ymin = np.maximum(bbox_other[:, 1], bbox[1])
# xmax = np.minimum(bbox_other[:, 2], bbox[2])
# ymax = np.minimum(bbox_other[:, 3], bbox[3])
# overlap_cond = np.logical_and(xmin < xmax, ymin < ymax).any()
# if overlap_cond:
# continue
zero_left = np.zeros([20 - bbox_other.shape[0], 4])
bbox_other = np.concatenate([bbox_other, zero_left], axis=0).astype(np.int32)
else:
bbox_other = np.zeros([20, 4], dtype=np.int32)
body = np.array(image_data['joint_self'], dtype=int)
if np.sum(body[:, 2] == 1) <= 3:
continue
img_dirs.append(os.path.join(self.image_root, image_data['img_paths']))
mask_dirs.append(os.path.join(self.mask_root, image_data['img_paths'][:-3] + 'png'))
neck = (body[5:6, :2] + body[6:7, :2]) / 2
heattop = np.zeros((1, 2), dtype=int)
chest = 0.25 * (body[5:6, :2] + body[6:7, :2] + body[11:12, :2] + body[12:13, :2])
neck_valid = np.logical_and(body[5:6, 2] == 1, body[6:7, 2] == 1)
heattop_valid = np.zeros((1,), dtype=bool)
chest_valid = np.logical_and(body[5:6, 2] == 1, body[6:7, 2] == 1) * np.logical_and(body[11:12, 2] == 1, body[12:13, 2] == 1)
body2d = np.concatenate([body[:, :2], neck, heattop, chest], axis=0)
valid = np.concatenate([body[:, 2] == 1, neck_valid, heattop_valid, chest_valid])
human['body'].append(body2d.astype(np.float32))
human['body_valid'].append(valid.astype(bool))
human['other_bbox'].append(bbox_other)
human['img_dirs'] = img_dirs
human['mask_dirs'] = mask_dirs
order_dict = COCO_to_main
elif name == 'MPII':
self.image_root = '/media/posefs3b/Datasets/MPI/images/'
self.mask_root = '/media/posefs3b/Users/donglaix/mpii_mask/'
path_to_db = 'data/MPII_collected.json'
with open(path_to_db) as f:
db_data = json.load(f)
total_num = len(db_data['img_paths'])
human = {'body': [], 'body_valid': [], 'other_bbox': []}
img_dirs = []
mask_dirs = []
for i in range(total_num):
if (mode == 'training' and not db_data['is_train'][i]) and (mode == 'evaluation' and db_data['is_train'][i]):
continue
body = np.array(db_data['joint_self'][i], dtype=int)
if np.sum(body[:, 2] == 1) <= 3:
continue
img_dirs.append(os.path.join(self.image_root, db_data['img_paths'][i]))
mask_dirs.append(os.path.join(self.mask_root, '{:05d}.png'.format(i)))
body = np.concatenate([body, np.zeros([1, 3], dtype=int)], axis=0)
human['body'].append(body[:, :2].astype(np.float32))
human['body_valid'].append(body[:, 2].astype(bool))
human['img_dirs'] = img_dirs
human['mask_dirs'] = mask_dirs
order_dict = MPII_to_main
else:
raise NotImplementedError
self.register_tensor(human, order_dict)
self.num_samples = len(self.tensor_dict['img_dirs'])
def get(self):
if self.name == 'COCO':
d = super(COCOReader, self).get(withPAF=True, read_image=True, imw=640, imh=640)
elif self.name == 'MPII':
d = super(COCOReader, self).get(withPAF=True, read_image=True, imw=1920, imh=1080)
else:
raise NotImplementedError
return d
if __name__ == '__main__':
dataset = COCOReader(name='COCO', mode='training', shuffle=False, objtype=0, crop_noise=False)
data_dict = dataset.get()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
import matplotlib.pyplot as plt
import utils.general
from utils.PAF import plot_all_PAF, plot_PAF
for i in range(dataset.num_samples):
image_crop, image, body2d, body_valid, img_dir, mask, mask_crop, PAF, PAF_type = \
sess.run([data_dict['image_crop'], data_dict['image'], data_dict['keypoint_uv_local'], data_dict['body_valid'], data_dict['img_dir'], data_dict['mask'],
data_dict['mask_crop'], data_dict['PAF'], data_dict['PAF_type']])
print ('{}: {}'.format(i, img_dir[0].decode()))
body2d = np.squeeze(body2d)
body_valid = np.squeeze(body_valid)
image_crop = np.squeeze((image_crop + 0.5) * 255).astype(np.uint8)
image = np.squeeze((image + 0.5) * 255).astype(np.uint8)
mask = np.squeeze(mask)
mask_crop = np.squeeze(mask_crop)
PAF = np.squeeze(PAF)
mask_image = np.stack([mask] * 3, axis=2)
mask_crop_image = np.stack([mask_crop] * 3, axis=2)
fig = plt.figure(1)
ax1 = fig.add_subplot(231)
plt.imshow(image_crop)
utils.general.plot2d(ax1, body2d, valid_idx=body_valid)
ax2 = fig.add_subplot(232)
plt.imshow(image)
ax3 = fig.add_subplot(233)
plt.gray()
plt.imshow(mask_image)
ax4 = fig.add_subplot(234)
plt.gray()
plt.imshow(mask_crop_image)
ax5 = fig.add_subplot(235)
PAF_img, img_z = plot_all_PAF(PAF, 3)
ax5.imshow(PAF_img)
ax6 = fig.add_subplot(236)
ax6.imshow(img_z)
plt.show()
| body2hands-main | visualization/POF/data/COCOReader.py |
import pickle
from scipy.io import loadmat
import os
import numpy as np
PATH_TO_DATASET = '/media/posefs0c/Users/donglaix/Experiments/StereoHandTracking/'
TEST_SEQS = ['B1Counting', 'B1Random']
TRAIN_SEQS = ['B2Counting', 'B2Random', 'B3Counting', 'B3Random', 'B4Counting', 'B4Random', 'B5Counting', 'B5Random', 'B6Counting', 'B6Random']
K = np.diag([822.79041, 822.79041, 1.0]).astype(np.float32)
K[0, 2] = 318.47345
K[1, 2] = 250.31296
base = 120.054
Rl = np.eye(3, dtype=np.float32)
Rr = np.eye(3, dtype=np.float32)
tl = np.zeros((3,), dtype=np.float32)
tr = np.array([-base, 0, 0], dtype=np.float32)
if __name__ == '__main__':
assert os.path.isdir(PATH_TO_DATASET)
# collect the testing sequences
all_test_data = np.zeros((0, 21, 3), dtype=np.float32)
for test_seq in TEST_SEQS:
mat_path = os.path.join(PATH_TO_DATASET, 'labels', test_seq + '_BB.mat')
mat_data = loadmat(mat_path)
mat_data = np.transpose(mat_data['handPara'], (2, 1, 0))
all_test_data = np.concatenate((all_test_data, mat_data), axis=0)
all_train_data = np.zeros((0, 21, 3), dtype=np.float32)
for train_seq in TRAIN_SEQS:
mat_path = os.path.join(PATH_TO_DATASET, 'labels', train_seq + '_BB.mat')
mat_data = loadmat(mat_path)
mat_data = np.transpose(mat_data['handPara'], (2, 1, 0))
all_train_data = np.concatenate((all_train_data, mat_data), axis=0)
with open('stb_collected.pkl', 'wb') as f:
pickle.dump((all_train_data, all_test_data), f)
| body2hands-main | visualization/POF/data/collect_stb.py |
import tensorflow as tf
from data.Base2DReader import Base2DReader
import os
import pickle
import numpy as np
from utils.keypoint_conversion import GAnerated_to_main as order_dict
class GAneratedReader(Base2DReader):
def __init__(self, mode='training', objtype=1, shuffle=False, batch_size=1, crop_noise=False):
super(GAneratedReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
assert mode == 'training'
assert objtype == 1
self.name = 'GAnerated'
self.image_root = '/media/posefs1b/Users/donglaix/hand_datasets/GANeratedHands_Release/data/' # GANerated
self.path_to_db = '/media/posefs1b/Users/donglaix/hand_datasets/GANeratedHands_Release/data/collected_data.pkl'
human2d = {'left_hand': [], 'right_hand': [], 'left_hand_valid': [], 'right_hand_valid': []}
with open(self.path_to_db, 'rb') as f:
db_data = pickle.load(f)
# load a tuple of 3 elements: list of img dirs, array of 2D joint, array of 3D joint
img_dirs = [os.path.join(self.image_root, _) for _ in db_data[0]]
human2d['right_hand'] = np.zeros((len(img_dirs), 21, 2), dtype=np.float32)
human2d['right_hand_valid'] = np.zeros((len(img_dirs), 21), dtype=bool)
human2d['right_hand_3d'] = np.zeros((len(img_dirs), 21, 3), dtype=np.float32)
human2d['left_hand'] = db_data[1].astype(np.float32)
human2d['left_hand_valid'] = np.ones((len(img_dirs), 21), dtype=bool)
human2d['left_hand_3d'] = db_data[2].astype(np.float32)
human2d['img_dirs'] = img_dirs
self.num_samples = len(img_dirs)
self.register_tensor(human2d, order_dict)
def get(self):
d = super(GAneratedReader, self).get(imw=256, imh=256)
return d
if __name__ == '__main__':
d = GAneratedReader()
d.rotate_augmentation = True
d.blur_augmentation = True
data_dict = d.get()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import utils.general
from utils.vis_heatmap3d import vis_heatmap3d
from utils.PAF import plot_PAF, PAF_to_3D, plot_all_PAF
validation_images = []
for i in range(d.num_samples):
print('{}/{}'.format(i + 1, d.num_samples))
values = \
sess.run([data_dict['image_crop'], data_dict['img_dir'], data_dict['keypoint_uv_local'], data_dict['hand_valid'], data_dict['scoremap2d'],
data_dict['PAF'], data_dict['mask_crop'], data_dict['keypoint_xyz_origin']])
image_crop, img_dir, hand2d, hand_valid, hand2d_heatmap, PAF, mask_crop, hand3d = [np.squeeze(_) for _ in values]
image_name = img_dir.item().decode()
image_v = ((image_crop + 0.5) * 255).astype(np.uint8)
hand2d_detected = utils.general.detect_keypoints2d(hand2d_heatmap)[:21, :]
hand3d_detected, _ = PAF_to_3D(hand2d_detected, PAF, objtype=1)
hand3d_detected = hand3d_detected[:21, :]
fig = plt.figure(1)
ax1 = fig.add_subplot(231)
plt.imshow(image_v)
utils.general.plot2d(ax1, hand2d, type_str='hand', valid_idx=hand_valid, color=np.array([1.0, 0.0, 0.0]))
utils.general.plot2d(ax1, hand2d_detected, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
ax2 = fig.add_subplot(232, projection='3d')
utils.general.plot3d(ax2, hand3d_detected, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
max_range = 0.5 * (np.amax(hand3d_detected, axis=0) - np.amin(hand3d_detected, axis=0)).max()
center = 0.5 * (np.amax(hand3d_detected, axis=0) + np.amin(hand3d_detected, axis=0))
ax2.set_xlabel('X Label')
ax2.set_ylabel('Y Label')
ax2.set_zlabel('Z Label')
ax2.set_xlim(center[0] - max_range, center[0] + max_range)
ax2.set_ylim(center[1] - max_range, center[1] + max_range)
ax2.set_zlim(center[2] - max_range, center[2] + max_range)
ax3 = fig.add_subplot(233, projection='3d')
utils.general.plot3d(ax3, hand3d, type_str='hand', valid_idx=hand_valid, color=np.array([1.0, 0.0, 0.0]))
max_range = 0.5 * (np.amax(hand3d, axis=0) - np.amin(hand3d, axis=0)).max()
center = 0.5 * (np.amax(hand3d, axis=0) + np.amin(hand3d, axis=0))
ax3.set_xlabel('X Label')
ax3.set_ylabel('Y Label')
ax3.set_zlabel('Z Label')
ax3.set_xlim(center[0] - max_range, center[0] + max_range)
ax3.set_ylim(center[1] - max_range, center[1] + max_range)
ax3.set_zlim(center[2] - max_range, center[2] + max_range)
xy, z = plot_all_PAF(PAF, 3)
ax4 = fig.add_subplot(234)
ax4.imshow(xy)
ax5 = fig.add_subplot(235)
ax5.imshow(z)
plt.show()
| body2hands-main | visualization/POF/data/GAneratedReader.py |
import pickle
import os
import numpy as np
from utils.general import plot2d_cv2
import cv2
map_index = np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=int)
def project(joints, K, R=None, t=None, distCoef=None):
""" Perform Projection.
joints: N * 3
"""
x = joints.T
if R is not None:
x = np.dot(R, x)
if t is not None:
x = x + t.reshape(3, 1)
xp = x[:2, :] / x[2, :]
if distCoef is not None:
X2 = xp[0, :] * xp[0, :]
Y2 = xp[1, :] * xp[1, :]
XY = X2 * Y2
R2 = X2 + Y2
R4 = R2 * R2
R6 = R4 * R2
dc = distCoef
radial = 1.0 + dc[0] * R2 + dc[1] * R4 + dc[4] * R6
tan_x = 2.0 * dc[2] * XY + dc[3] * (R2 + 2.0 * X2)
tan_y = 2.0 * dc[3] * XY + dc[2] * (R2 + 2.0 * Y2)
xp[0, :] = radial * xp[0, :] + tan_x
xp[1, :] = radial * xp[1, :] + tan_y
pt = np.dot(K[:2, :2], xp) + K[:2, 2].reshape((2, 1))
return pt.T, x.T
if __name__ == '__main__':
image_root = '/media/posefs0c/panopticdb/'
save_root = '/media/posefs1b/Users/donglaix/clean_a4_hand/crop_hand_new/'
with open('./data/a4_collected.pkl', 'rb') as f:
data = pickle.load(f)
with open('./data/camera_data_a4.pkl', 'rb') as f:
cam_data = pickle.load(f)
for set_name, set_data in data.items():
for i, sample_data in enumerate(set_data):
print ('processing {} {} / {}'.format(set_name, i, len(set_data)))
seqName = sample_data['seqName']
frame_str = sample_data['frame_str']
if 'left_hand' in sample_data:
joints = np.array(sample_data['left_hand']['landmarks']).reshape(-1, 3)
joints = joints[map_index]
count_img = 0
for c in np.random.permutation(31):
if count_img == 3: # enough
break
if c not in sample_data['left_hand']['2D']:
continue
if sum(sample_data['left_hand']['2D'][c]['insideImg']) < 15 or \
sum(sample_data['left_hand']['2D'][c]['occluded']) > 5 or (sample_data['left_hand']['2D'][c]['occluded'] == 1):
continue
count_img += 1
joint2d, _ = project(joints, cam_data[seqName][c]['K'], cam_data[seqName][c]['R'], cam_data[seqName][c]['t'], cam_data[seqName][c]['distCoef'])
img_name = '{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(image_root, 'a4', seqName, frame_str, c, frame_str)
img = cv2.imread(img_name)
assert img is not None
x1 = np.amin(joint2d[:, 0])
x2 = np.amax(joint2d[:, 0])
y1 = np.amin(joint2d[:, 1])
y2 = np.amax(joint2d[:, 1])
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
size = max(x2 - x1, y2 - y1)
scale = 200 / (1.5 * size)
M = np.array([[scale, 0, (100 - scale * cx)],
[0, scale, (100 - scale * cy)]], dtype=float)
target_img = cv2.warpAffine(img, M, (200, 200))
tjoint2d = (joint2d - np.array([cx, cy])) * scale + 100
plot2d_cv2(target_img, tjoint2d, 'hand', s=3, use_color=True)
filename = '{}#{}#left#{:02d}.png'.format(seqName, frame_str, c)
cv2.imwrite(os.path.join(save_root, filename), target_img)
if 'right_hand' in sample_data:
joints = np.array(sample_data['right_hand']['landmarks']).reshape(-1, 3)
joints = joints[map_index]
count_img = 0
for c in np.random.permutation(31):
if count_img == 3: # enough
break
if c not in sample_data['right_hand']['2D']:
continue
if sum(sample_data['right_hand']['2D'][c]['insideImg']) < 15 or \
sum(sample_data['right_hand']['2D'][c]['occluded']) > 5 or (sample_data['right_hand']['2D'][c]['occluded'] == 1):
continue
count_img += 1
joint2d, _ = project(joints, cam_data[seqName][c]['K'], cam_data[seqName][c]['R'], cam_data[seqName][c]['t'], cam_data[seqName][c]['distCoef'])
img_name = '{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(image_root, 'a4', seqName, frame_str, c, frame_str)
img = cv2.imread(img_name)
assert img is not None
x1 = np.amin(joint2d[:, 0])
x2 = np.amax(joint2d[:, 0])
y1 = np.amin(joint2d[:, 1])
y2 = np.amax(joint2d[:, 1])
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
size = max(x2 - x1, y2 - y1)
scale = 200 / (1.5 * size)
M = np.array([[scale, 0, (100 - scale * cx)],
[0, scale, (100 - scale * cy)]], dtype=float)
target_img = cv2.warpAffine(img, M, (200, 200))
tjoint2d = (joint2d - np.array([cx, cy])) * scale + 100
plot2d_cv2(target_img, tjoint2d, 'hand', s=3, use_color=True)
filename = '{}#{}#righ#{:02d}.png'.format(seqName, frame_str, c)
cv2.imwrite(os.path.join(save_root, filename), target_img)
| body2hands-main | visualization/POF/data/collect_crop_hand.py |
import tensorflow as tf
import numpy as np
import json
from data.Base2DReader import Base2DReader
import os
from utils.keypoint_conversion import tsimon_to_main as order_dict
class TsimonDBReader(Base2DReader):
def __init__(self, mode='training', objtype=1, shuffle=False, batch_size=1, crop_noise=False):
super(TsimonDBReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
assert mode == 'training'
assert objtype == 1
self.name = 'Tsimon'
self.image_root = '/media/posefs0c/Users/donglaix/tsimon/'
self.path_to_db = ['/media/posefs0c/Users/donglaix/tsimon/hands_v12.json', '/media/posefs0c/Users/donglaix/tsimon/hands_v13.json', '/media/posefs0c/Users/donglaix/tsimon/hands_v143.json']
human2d = {'left_hand': [], 'right_hand': [], 'left_hand_valid': [], 'right_hand_valid': []}
img_dirs = []
for filename in self.path_to_db:
with open(filename) as f:
filedata = json.load(f)
for ihand, hand_data in enumerate(filedata['root']):
joint2d = np.array(hand_data['joint_self'])
human2d['right_hand'].append(joint2d[:, :2].astype(np.float32))
human2d['right_hand_valid'].append(joint2d[:, 2].astype(bool))
human2d['left_hand'].append(np.zeros((21, 2), dtype=np.float32))
human2d['left_hand_valid'].append(np.zeros((21,), dtype=bool))
img_dir = os.path.join(self.image_root, '/'.join(hand_data['img_paths'].split('/')[5:]))
img_dirs.append(img_dir)
human2d['img_dirs'] = img_dirs
self.num_samples = len(img_dirs)
self.register_tensor(human2d, order_dict)
def get(self):
d = super(TsimonDBReader, self).get(imw=1920, imh=1080)
return d
if __name__ == '__main__':
dataset = TsimonDBReader(mode='training', shuffle=True, objtype=1, crop_noise=True)
data_dict = dataset.get()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import utils.general
from utils.PAF import plot_PAF, PAF_to_3D, plot_all_PAF
for i in range(dataset.num_samples):
print('{}/{}'.format(i + 1, dataset.num_samples))
values = \
sess.run([data_dict['image_crop'], data_dict['img_dir'], data_dict['keypoint_uv_local'], data_dict['scoremap2d'],
data_dict['PAF'], data_dict['mask_crop'], data_dict['keypoint_uv_origin'], data_dict['image'],
data_dict['left_hand_valid'], data_dict['right_hand_valid']])
image_crop, img_dir, hand2d, hand2d_heatmap, PAF, mask_crop, hand2d_origin, image_full, left_hand_valid, right_hand_valid \
= [np.squeeze(_) for _ in values]
image_v = ((image_crop + 0.5) * 255).astype(np.uint8)
image_full_v = ((image_full + 0.5) * 255).astype(np.uint8)
hand2d_detected = utils.general.detect_keypoints2d(hand2d_heatmap)[:21, :]
hand_valid = right_hand_valid
fig = plt.figure(1)
ax1 = fig.add_subplot(241)
plt.imshow(image_v)
utils.general.plot2d(ax1, hand2d, type_str='hand', valid_idx=hand_valid, color=np.array([1.0, 0.0, 0.0]))
utils.general.plot2d(ax1, hand2d_detected, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
for j in range(21):
plt.text(hand2d[j, 0], hand2d[j, 1], str(j))
xy, z = plot_all_PAF(PAF, 3)
ax4 = fig.add_subplot(244)
ax4.imshow(xy)
ax6 = fig.add_subplot(246)
ax6.imshow(image_full_v)
utils.general.plot2d(ax6, hand2d_origin, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
ax7 = fig.add_subplot(247)
mask_3c = np.stack([mask_crop] * 3, axis=2)
ax7.imshow(mask_3c)
ax8 = fig.add_subplot(248)
ax8.imshow((mask_3c * image_v).astype(np.uint8))
plt.show()
| body2hands-main | visualization/POF/data/TsimonDBReader.py |
import tensorflow as tf
import pickle
from data.BaseReader import BaseReader
import os
import numpy as np
class RHDReader(BaseReader):
def __init__(self, mode='training', objtype=1, shuffle=False, batch_size=1, crop_noise=False):
assert objtype == 1
super(RHDReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
assert mode in ('training', 'evaluation')
self.name = 'RHD'
self.image_root = '/media/posefs0c/Users/donglaix/Experiments/RHD_published_v2/{}/'.format(mode)
path_to_db = os.path.join(self.image_root, 'anno_{}.pickle'.format(mode))
with open(path_to_db, 'rb') as f:
db_data = pickle.load(f)
human3d = {'K': [], 'R': [], 't': [], 'distCoef': [], 'left_hand': [], 'left_hand_valid': [], 'right_hand': [], 'right_hand_valid': []}
img_dirs = []
mask_dirs = []
for i, data in db_data.items():
img_dir = os.path.join(self.image_root, 'color', '{:05d}.png'.format(i))
if data['uv_vis'][:21, 2].all():
# add the left hand
img_dirs.append(img_dir)
human3d['R'].append(np.eye(3, dtype=np.float32))
human3d['t'].append(np.zeros((3,), dtype=np.float32))
human3d['distCoef'].append(np.zeros((5,), dtype=np.float32))
human3d['K'].append(data['K'].astype(np.float32))
human3d['left_hand'].append(data['xyz'][:21, :].astype(np.float32))
human3d['right_hand'].append(np.zeros((21, 3), dtype=np.float32))
human3d['left_hand_valid'].append(np.ones((21,), dtype=bool))
human3d['right_hand_valid'].append(np.zeros((21,), dtype=bool))
mask_dir = os.path.join(self.image_root, 'mask_sep', 'left_{:05d}.png'.format(i))
mask_dirs.append(mask_dir)
if data['uv_vis'][21:, 2].all():
# add the right hand
img_dirs.append(img_dir)
human3d['R'].append(np.eye(3, dtype=np.float32))
human3d['t'].append(np.zeros((3,), dtype=np.float32))
human3d['distCoef'].append(np.zeros((5,), dtype=np.float32))
human3d['K'].append(data['K'].astype(np.float32))
human3d['right_hand'].append(data['xyz'][21:, :].astype(np.float32))
human3d['left_hand'].append(np.zeros((21, 3), dtype=np.float32))
human3d['left_hand_valid'].append(np.zeros((21,), dtype=bool))
human3d['right_hand_valid'].append(np.ones((21,), dtype=bool))
mask_dir = os.path.join(self.image_root, 'mask_sep', 'right_{:05d}.png'.format(i))
mask_dirs.append(mask_dir)
human3d['img_dirs'] = img_dirs
# human3d['mask_dirs'] = mask_dirs
self.register_tensor(human3d, {}) # pass in an empty dict because no order needs to be changed
self.num_samples = len(img_dirs)
def get(self):
d = super(RHDReader, self).get(imw=320, imh=320)
return d
if __name__ == '__main__':
d = RHDReader(mode='training', shuffle=True, objtype=1, crop_noise=True)
d.rotate_augmentation = True
d.blur_augmentation = True
data_dict = d.get()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.05)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import utils.general
from utils.PAF import plot_PAF, PAF_to_3D, plot_all_PAF
for i in range(d.num_samples):
print('{}/{}'.format(i + 1, d.num_samples))
values = \
sess.run([data_dict['image_crop'], data_dict['img_dir'], data_dict['keypoint_uv_local'], data_dict['scoremap2d'],
data_dict['PAF'], data_dict['mask_crop'], data_dict['keypoint_xyz_local'], data_dict['keypoint_uv_origin'], data_dict['image']])
image_crop, img_dir, hand2d, hand2d_heatmap, PAF, mask_crop, hand3d, hand2d_origin, image_full = [np.squeeze(_) for _ in values]
image_v = ((image_crop + 0.5) * 255).astype(np.uint8)
image_full_v = ((image_full + 0.5) * 255).astype(np.uint8)
hand2d_detected = utils.general.detect_keypoints2d(hand2d_heatmap)[:21, :]
hand3d_detected, _ = PAF_to_3D(hand2d_detected, PAF, objtype=1)
hand3d_detected = hand3d_detected[:21, :]
hand_valid = np.ones((21,), dtype=bool)
fig = plt.figure(1)
ax1 = fig.add_subplot(241)
plt.imshow(image_v)
utils.general.plot2d(ax1, hand2d, type_str='hand', valid_idx=hand_valid, color=np.array([1.0, 0.0, 0.0]))
utils.general.plot2d(ax1, hand2d_detected, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
for j in range(21):
plt.text(hand2d[j, 0], hand2d[j, 1], str(j))
ax2 = fig.add_subplot(242, projection='3d')
utils.general.plot3d(ax2, hand3d_detected, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
ax2.set_xlabel('X Label')
ax2.set_ylabel('Y Label')
ax2.set_zlabel('Z Label')
plt.axis('equal')
ax3 = fig.add_subplot(243, projection='3d')
utils.general.plot3d(ax3, hand3d, type_str='hand', valid_idx=hand_valid, color=np.array([1.0, 0.0, 0.0]))
ax3.set_xlabel('X Label')
ax3.set_ylabel('Y Label')
ax3.set_zlabel('Z Label')
plt.axis('equal')
xy, z = plot_all_PAF(PAF, 3)
ax4 = fig.add_subplot(244)
ax4.imshow(xy)
ax5 = fig.add_subplot(245)
ax5.imshow(z)
ax6 = fig.add_subplot(246)
ax6.imshow(image_full_v)
utils.general.plot2d(ax6, hand2d_origin, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
ax7 = fig.add_subplot(247)
mask_3c = np.stack([mask_crop] * 3, axis=2)
ax7.imshow(mask_3c)
ax8 = fig.add_subplot(248)
ax8.imshow((mask_3c * image_v).astype(np.uint8))
plt.show()
| body2hands-main | visualization/POF/data/RHDReader.py |
import os
import pickle
import json
import numpy as np
def load_calib_file(calib_file):
assert os.path.isfile(calib_file)
with open(calib_file) as f:
calib = json.load(f)
for key in calib:
if type(calib[key]) == list:
calib[key] = np.array(calib[key])
return calib
"""
#################################################################
Panoptic A4
#################################################################
"""
# run in Python 3
root = '/media/posefs0c/panopticdb/a4/'
sample_list = os.path.join(root, 'sample_list.pkl')
with open(sample_list, 'rb') as f:
df = pickle.load(f)
# collect hand data
if os.path.isfile('./a4_collected.pkl'):
print('A4 collection file exists.')
else:
training_data = []
testing_data = []
for seqName, seq_samples in df.items():
i = 0
for hvframe, frame_dict in seq_samples.items():
i += 1
hv, frame_str = hvframe
print('collecting data: {} {}/{}'.format(seqName, i, len(seq_samples)))
person3df = os.path.join(root, 'annot_{}_3d'.format(hv), seqName, 'Recon3D_{0}{1}.json'.format(hv, frame_str))
with open(person3df) as f:
print(person3df)
person3d = json.load(f)
map_id = {}
for person_data in person3d:
pid = person_data['id']
if pid == -1:
continue
person_dict = {'seqName': seqName, 'frame_str': frame_str, 'id': pid}
body_dict = {'landmarks': person_data['body']['landmarks'], '2D': {}}
person_dict['body'] = body_dict
if 'subjectsWithValidLHand' in frame_dict and pid in frame_dict['subjectsWithValidLHand']:
left_hand_dict = {'landmarks': person_data['left_hand']['landmarks'], '2D': {}}
person_dict['left_hand'] = left_hand_dict
if 'subjectsWithValidRHand' in frame_dict and pid in frame_dict['subjectsWithValidRHand']:
right_hand_dict = {'landmarks': person_data['right_hand']['landmarks'], '2D': {}}
person_dict['right_hand'] = right_hand_dict
map_id[pid] = person_dict
for panelIdx, camIdx in frame_dict['camIdxArray']:
person2df = os.path.join(root, 'annot_{}_2d'.format(hv), seqName, frame_str, 'Recon2D_00_{0:02d}_{1}.json'.format(camIdx, frame_str))
with open(person2df) as f:
person2d = json.load(f)
for person_data in person2d:
pid = person_data['id']
if pid == -1:
continue
person_dict = map_id[pid]
person_dict['body']['2D'][camIdx] = {'insideImg': person_data['body']['insideImg'], 'occluded': person_data['body']['occluded']}
if 'left_hand' in person_dict:
person_dict['left_hand']['2D'][camIdx] = {'insideImg': person_data['left_hand']['insideImg'], 'occluded': person_data['left_hand']['self_occluded'],
'overlap': person_data['left_hand']['overlap']}
if 'right_hand' in person_dict:
person_dict['right_hand']['2D'][camIdx] = {'insideImg': person_data['right_hand']['insideImg'], 'occluded': person_data['right_hand']['self_occluded'],
'overlap': person_data['right_hand']['overlap']}
for _, value in map_id.items():
if seqName == '171204_pose5' or seqName == '171204_pose6':
testing_data.append(value)
else:
training_data.append(value)
with open('./a4_collected.pkl', 'wb') as f:
pickle.dump({'training_data': training_data, 'testing_data': testing_data}, f)
# collect camera calibration data
if os.path.isfile('./camera_data_a4.pkl'):
print('Camere file exists.')
else:
seqs = df.keys()
calib_dict = {}
for seqName in seqs:
cam_dict = {}
for camIdx in range(31):
annot_dir = os.path.join(root, 'annot_calib', seqName)
calib_file = os.path.join(annot_dir, 'calib_00_{:02d}.json'.format(camIdx))
calib = load_calib_file(calib_file)
cam_dict[camIdx] = calib
calib_dict[seqName] = cam_dict
with open('./camera_data_a4.pkl', 'wb') as f:
pickle.dump(calib_dict, f)
"""
#################################################################
Panoptic A5
#################################################################
"""
# run in Python 3
root = '/media/posefs0c/panopticdb/a5/'
sample_list = os.path.join(root, 'sample_list.pkl')
with open(sample_list, 'rb') as f:
df = pickle.load(f)
# collect hand data
if os.path.isfile('./a5_collected.pkl'):
print('A5 collection file exists.')
else:
training_data = []
testing_data = []
for seqName, seq_samples in df.items():
i = 0
for hvframe, frame_dict in seq_samples.items():
i += 1
hv, frame_str = hvframe
print('collecting data: {} {}/{}'.format(seqName, i, len(seq_samples)))
person3df = os.path.join(root, 'annot_{}_3d'.format(hv), seqName, 'Recon3D_{0}{1}.json'.format(hv, frame_str))
with open(person3df) as f:
print(person3df)
person3d = json.load(f)
map_id = {}
for person_data in person3d:
pid = person_data['id']
if pid == -1:
continue
person_dict = {'seqName': seqName, 'frame_str': frame_str, 'id': pid}
body_dict = {'landmarks': person_data['body']['landmarks'], '2D': {}}
person_dict['body'] = body_dict
if 'subjectsWithValidLHand' in frame_dict and pid in frame_dict['subjectsWithValidLHand']:
left_hand_dict = {'landmarks': person_data['left_hand']['landmarks'], '2D': {}}
person_dict['left_hand'] = left_hand_dict
if 'subjectsWithValidRHand' in frame_dict and pid in frame_dict['subjectsWithValidRHand']:
right_hand_dict = {'landmarks': person_data['right_hand']['landmarks'], '2D': {}}
person_dict['right_hand'] = right_hand_dict
map_id[pid] = person_dict
for panelIdx, camIdx in frame_dict['camIdxArray']:
person2df = os.path.join(root, 'annot_{}_2d'.format(hv), seqName, frame_str, 'Recon2D_00_{0:02d}_{1}.json'.format(camIdx, frame_str))
with open(person2df) as f:
person2d = json.load(f)
for person_data in person2d:
pid = person_data['id']
if pid == -1:
continue
person_dict = map_id[pid]
person_dict['body']['2D'][camIdx] = {'insideImg': person_data['body']['insideImg'], 'occluded': person_data['body']['occluded']}
if 'left_hand' in person_dict:
person_dict['left_hand']['2D'][camIdx] = {'insideImg': person_data['left_hand']['insideImg'], 'occluded': person_data['left_hand']['self_occluded'],
'overlap': person_data['left_hand']['overlap']}
if 'right_hand' in person_dict:
person_dict['right_hand']['2D'][camIdx] = {'insideImg': person_data['right_hand']['insideImg'], 'occluded': person_data['right_hand']['self_occluded'],
'overlap': person_data['right_hand']['overlap']}
for _, value in map_id.items():
training_data.append(value)
with open('./a5_collected.pkl', 'wb') as f:
pickle.dump({'training_data': training_data, 'testing_data': testing_data}, f)
# collect camera calibration data
if os.path.isfile('./camera_data_a5.pkl'):
print('Camere file exists.')
else:
seqs = df.keys()
calib_dict = {}
for seqName in seqs:
cam_dict = {}
for camIdx in range(31):
annot_dir = os.path.join(root, 'annot_calib', seqName)
calib_file = os.path.join(annot_dir, 'calib_00_{:02d}.json'.format(camIdx))
calib = load_calib_file(calib_file)
cam_dict[camIdx] = calib
calib_dict[seqName] = cam_dict
with open('./camera_data_a5.pkl', 'wb') as f:
pickle.dump(calib_dict, f)
"""
#################################################################
Panoptic A4Plus
#################################################################
"""
# run in Python 3
root = '/media/posefs0c/panopticdb/a4/'
sample_list = os.path.join(root, 'sample_list.pkl')
with open(sample_list, 'rb') as f:
df = pickle.load(f)
# collect hand data
if os.path.isfile('./a4plus_collected.pkl'):
print('A4 collection file exists.')
else:
training_data = []
testing_data = []
for seqName, seq_samples in df.items():
i = 0
for hvframe, frame_dict in seq_samples.items():
i += 1
hv, frame_str = hvframe
print('collecting data: {} {}/{}'.format(seqName, i, len(seq_samples)))
person3df = os.path.join(root, 'annot_{}_3d'.format(hv), seqName, 'Recon3D_{0}{1}.json'.format(hv, frame_str))
with open(person3df) as f:
print(person3df)
person3d = json.load(f)
map_id = {}
for person_data in person3d:
pid = person_data['id']
if pid == -1:
continue
person_dict = {'seqName': seqName, 'frame_str': frame_str, 'id': pid}
body_dict = {'landmarks': person_data['body']['landmarks'], '2D': {}}
person_dict['body'] = body_dict
if 'subjectsWithValidLHand' in frame_dict and pid in frame_dict['subjectsWithValidLHand']:
left_hand_dict = {'landmarks': person_data['left_hand']['landmarks'], '2D': {}}
person_dict['left_hand'] = left_hand_dict
if 'subjectsWithValidRHand' in frame_dict and pid in frame_dict['subjectsWithValidRHand']:
right_hand_dict = {'landmarks': person_data['right_hand']['landmarks'], '2D': {}}
person_dict['right_hand'] = right_hand_dict
map_id[pid] = person_dict
for panelIdx, camIdx in frame_dict['camIdxArray']:
person2df = os.path.join(root, 'annot_{}_2d'.format(hv), seqName, frame_str, 'Recon2D_00_{0:02d}_{1}.json'.format(camIdx, frame_str))
with open(person2df) as f:
person2d = json.load(f)
for person_data in person2d:
pid = person_data['id']
if pid == -1:
continue
person_dict = map_id[pid]
person_dict['body']['2D'][camIdx] = {'insideImg': person_data['body']['insideImg'], 'occluded': person_data['body']['occluded']}
if 'left_hand' in person_dict:
person_dict['left_hand']['2D'][camIdx] = {'insideImg': person_data['left_hand']['insideImg'], 'occluded': person_data['left_hand']['self_occluded'],
'overlap': person_data['left_hand']['overlap']}
if 'right_hand' in person_dict:
person_dict['right_hand']['2D'][camIdx] = {'insideImg': person_data['right_hand']['insideImg'], 'occluded': person_data['right_hand']['self_occluded'],
'overlap': person_data['right_hand']['overlap']}
for _, value in map_id.items():
if seqName == '171204_pose5' or seqName == '171204_pose6':
testing_data.append(value)
else:
training_data.append(value)
with open('./a4plus_collected.pkl', 'wb') as f:
pickle.dump({'training_data': training_data, 'testing_data': testing_data}, f)
| body2hands-main | visualization/POF/data/collect_a4.py |
import tensorflow as tf
from data.BaseReader import BaseReader
import numpy as np
import pickle
from utils.keypoint_conversion import a4_to_main as order_dict
import json
import os
class OpenposeReader(BaseReader):
def __init__(self, seqName, mode='evaluation', objtype=0, shuffle=False, batch_size=1, crop_noise=False):
super(OpenposeReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
assert mode == 'evaluation'
assert objtype == 2
self.image_root = '/media/posefs1b/Users/donglaix/siggasia018/{}/'.format(seqName)
assert os.path.isdir(self.image_root)
path_to_db = './data/{}.pkl'.format(seqName)
with open(path_to_db, 'rb') as f:
db_data = pickle.load(f)
human3d = {}
num_samples = len(db_data[0])
K = np.array(db_data[5]['K'], dtype=np.float32)
K = np.expand_dims(K, axis=0)
K = np.tile(K, (num_samples, 1, 1))
human3d['K'] = K
human3d['openpose_body'] = db_data[0].astype(np.float32)[:, :18, :]
# duplicate the neck for head top and chest
human3d['openpose_body'] = np.concatenate((human3d['openpose_body'], human3d['openpose_body'][:, 1:2, :], human3d['openpose_body'][:, 1:2, :]), axis=1)
human3d['openpose_body_score'] = db_data[0][:, :18, 2].astype(np.float32)
# duplicate the neck for head top and chest
human3d['openpose_body_score'] = np.concatenate((human3d['openpose_body_score'], human3d['openpose_body_score'][:, 1:2], human3d['openpose_body_score'][:, 1:2]), axis=1)
human3d['openpose_lhand'] = db_data[1].astype(np.float32)
human3d['openpose_lhand_score'] = db_data[1][:, :, 2].astype(np.float32)
human3d['openpose_rhand'] = db_data[2].astype(np.float32)
human3d['openpose_rhand_score'] = db_data[2][:, :, 2].astype(np.float32)
human3d['openpose_face'] = db_data[3].astype(np.float32)
human3d['openpose_face_score'] = db_data[3][:, :, 2].astype(np.float32)
human3d['openpose_foot'] = db_data[0].astype(np.float32)[:, 18:, :]
human3d['openpose_foot_score'] = db_data[0].astype(np.float32)[:, 18:, 2]
human3d['img_dirs'] = np.core.defchararray.add(np.array([self.image_root]), db_data[4])
human3d['body_valid'] = np.ones((num_samples, 20), dtype=bool)
human3d['left_hand_valid'] = np.ones((num_samples, 21), dtype=bool)
human3d['right_hand_valid'] = np.ones((num_samples, 21), dtype=bool)
# dummy values
R = np.eye(3, dtype=np.float32)
R = np.expand_dims(R, axis=0)
R = np.tile(R, (num_samples, 1, 1))
human3d['R'] = R
t = np.ones((3,), dtype=np.float32)
t = np.expand_dims(t, axis=0)
t = np.tile(t, (num_samples, 1))
human3d['t'] = t
dc = np.zeros((5,), dtype=np.float32)
dc = np.expand_dims(dc, axis=0)
dc = np.tile(dc, (num_samples, 1))
human3d['distCoef'] = dc
human3d['body'] = np.zeros((num_samples, 21, 3), dtype=np.float32)
human3d['left_hand'] = np.zeros((num_samples, 21, 3), dtype=np.float32)
human3d['right_hand'] = np.zeros((num_samples, 21, 3), dtype=np.float32)
for key, val in human3d.items():
if 'openpose' in key and 'score' not in key:
# valid = val[:, :, 2] > 0.05
valid = val[:, :, 2] > 0.0
val[:, :, 0] *= valid
val[:, :, 1] *= valid
human3d[key] = val[:, :, :2]
self.register_tensor(human3d, order_dict)
self.num_samples = len(self.tensor_dict['img_dirs'])
def get(self, imw=1920, imh=1080):
d = super(OpenposeReader, self).get(withPAF=False, bbox2d=1, imw=imw, imh=imh)
return d
if __name__ == '__main__':
d = OpenposeReader(mode='evaluation', seqName='test3', shuffle=False, objtype=2, crop_noise=False)
data_dict = d.get()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import utils.general
from utils.vis_heatmap3d import vis_heatmap3d
validation_images = []
for i in range(d.num_samples):
print('{}/{}'.format(i + 1, d.num_samples))
bimage_crop, image, body2d, body2d_local, foot2d = sess.run([data_dict['bimage_crop'], data_dict['image'], data_dict['openpose_body'], data_dict['body_uv_local'], data_dict['openpose_foot']])
foot2d = np.squeeze(foot2d)
image_v = ((image[0] + 0.5) * 255).astype(np.uint8)
image_crop_v = ((bimage_crop[0] + 0.5) * 255).astype(np.uint8)
fig = plt.figure()
ax1 = fig.add_subplot(121)
plt.imshow(image_v)
plt.scatter(foot2d[:, 0], foot2d[:, 1])
for i in range(4):
plt.text(int(foot2d[i, 0]), int(foot2d[i, 1]), str(i))
utils.general.plot2d(ax1, body2d[0])
ax2 = fig.add_subplot(122)
plt.imshow(image_crop_v)
utils.general.plot2d(ax2, body2d_local[0])
plt.show()
| body2hands-main | visualization/POF/data/OpenposeReader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import sys
import numpy as np
import scipy.io as io
rng = np.random.RandomState(23456)
import torch
import torchvision
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision.utils import save_image
from torchvision.datasets import MNIST
import os
from PIL import Image,ImageDraw
class regressor_fcn_bn_32(nn.Module):
def __init__(self):
super(regressor_fcn_bn_32, self).__init__()
def build_net(self, feature_in_dim, feature_out_dim, require_image=False, default_size=256):
self.require_image = require_image
self.default_size = default_size
self.use_resnet = True
embed_size = default_size
if self.require_image:
embed_size += default_size
if self.use_resnet:
self.image_resnet_postprocess = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(512*2, default_size),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(default_size, momentum=0.01),
)
self.image_reduce = nn.Sequential(
nn.MaxPool1d(kernel_size=2, stride=2),
)
self.encoder = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(feature_in_dim,256,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(256),
nn.MaxPool1d(kernel_size=2, stride=2),
)
self.conv5 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.conv6 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.conv7 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.conv8 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.conv9 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.conv10 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.skip1 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.skip2 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.skip4 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.skip5 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.decoder = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
nn.Dropout(0.5),
nn.ConvTranspose1d(embed_size, feature_out_dim, 7, stride=2, padding=3, output_padding=1),
nn.ReLU(True),
nn.BatchNorm1d(feature_out_dim),
nn.Dropout(0.5),
nn.Conv1d(feature_out_dim, feature_out_dim, 7, padding=3),
)
## create image embedding
def process_image(self, image_):
B, T, _ = image_.shape
image_ = image_.view(-1, 512*2)
feat = self.image_resnet_postprocess(image_)
feat = feat.view(B, T, self.default_size)
feat = feat.permute(0, 2, 1).contiguous()
feat = self.image_reduce(feat)
return feat
## utility upsampling function
def upsample(self, tensor, shape):
return tensor.repeat_interleave(2, dim=2)[:,:,:shape[2]]
## forward pass through generator
def forward(self, input_, audio_=None, percent_rand_=0.7, image_=None):
B, T = input_.shape[0], input_.shape[2]
fourth_block = self.encoder(input_)
if self.require_image:
feat = self.process_image(image_)
fourth_block = torch.cat((fourth_block, feat), dim=1)
fifth_block = self.conv5(fourth_block)
sixth_block = self.conv6(fifth_block)
seventh_block = self.conv7(sixth_block)
eighth_block = self.conv8(seventh_block)
ninth_block = self.conv9(eighth_block)
tenth_block = self.conv10(ninth_block)
ninth_block = tenth_block + ninth_block
ninth_block = self.skip1(ninth_block)
eighth_block = ninth_block + eighth_block
eighth_block = self.skip2(eighth_block)
sixth_block = self.upsample(seventh_block, sixth_block.shape) + sixth_block
sixth_block = self.skip4(sixth_block)
fifth_block = sixth_block + fifth_block
fifth_block = self.skip5(fifth_block)
output = self.decoder(fifth_block)
return output
class regressor_fcn_bn_discriminator(nn.Module):
def __init__(self):
super(regressor_fcn_bn_discriminator, self).__init__()
def build_net(self, feature_in_dim):
self.convs = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(feature_in_dim,64,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(64),
## 64
nn.Dropout(0.5),
nn.Conv1d(64,64,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(64),
## 32
nn.Dropout(0.5),
nn.Conv1d(64,32,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(32),
## 16
nn.Dropout(0.5),
nn.Conv1d(32,32,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(32),
## 8
nn.Dropout(0.5),
nn.Conv1d(32,16,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(16),
## 4
nn.Dropout(0.5),
nn.Conv1d(16,16,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(16),
## 2
nn.Dropout(0.5),
nn.Conv1d(16,8,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(8),
## 1
nn.Dropout(0.5),
nn.Conv1d(8,1,3,padding=1),
)
def forward(self, input_):
outputs = self.convs(input_)
return outputs
| body2hands-main | utils/modelZoo.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import numpy as np
import os, sys
import scipy
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
from scipy.spatial.transform import Rotation as R
from shutil import copyfile
from PIL import Image,ImageDraw
from torchvision import transforms
import torch
FEATURE_MAP = {
'arm2wh':((6*6), 42*6),
}
ARMS_ONLY = [12,13,14,15,16,17] #arms for mtc
EPSILON = 1e-10
## helper for calculating mean and standard dev
def mean_std(feat, data, rot_idx):
if feat == 'wh':
mean = data.mean(axis=2).mean(axis=0)[np.newaxis,:, np.newaxis]
std = data.std(axis=2).std(axis=0)[np.newaxis,:, np.newaxis]
std += EPSILON
else:
mean = data.mean(axis=2).mean(axis=0)[np.newaxis,:, np.newaxis]
std = np.array([[[data.std()]]]).repeat(data.shape[1], axis=1)
return mean, std
## helper for calculating standardization stats
def calc_standard(train_X, train_Y, pipeline):
rot_idx = -6
feats = pipeline.split('2')
in_feat, out_feat = feats[0], feats[1]
body_mean_X, body_std_X = mean_std(in_feat, train_X, rot_idx)
if in_feat == out_feat:
body_mean_Y = body_mean_X
body_std_Y = body_std_X
else:
body_mean_Y, body_std_Y = mean_std(out_feat, train_Y, rot_idx)
return body_mean_X, body_std_X, body_mean_Y, body_std_Y
## utility check if object is float
def is_float(n):
try:
float(n)
return True
except:
return False
## utility function to convert from r6d space to axis angle
def rot6d_to_aa(r6ds):
res = np.zeros((r6ds.shape[0], 3))
for i,row in enumerate(r6ds):
np_r6d = np.expand_dims(row, axis=0)
np_mat = np.reshape(np_rot6d_to_mat(np_r6d)[0], (3,3))
np_mat = R.from_matrix(np_mat)
aa = np_mat.as_rotvec()
res[i,:] = aa
return res
def np_mat_to_rot6d(np_mat):
""" Get 6D rotation representation for rotation matrix.
Implementation base on
https://arxiv.org/abs/1812.07035
[Inputs]
flattened rotation matrix (last dimension is 9)
[Returns]
6D rotation representation (last dimension is 6)
"""
shape = np_mat.shape
if not ((shape[-1] == 3 and shape[-2] == 3) or (shape[-1] == 9)):
raise AttributeError("The inputs in tf_matrix_to_rotation6d should be [...,9] or [...,3,3], \
but found tensor with shape {}".format(shape[-1]))
np_mat = np.reshape(np_mat, [-1, 3, 3])
np_r6d = np.concatenate([np_mat[...,0], np_mat[...,1]], axis=-1)
if len(shape) == 1:
np_r6d = np.reshape(np_r6d, [6])
return np_r6d
## utility function to convert from axis angle to r6d space
def aa_to_rot6d(vecs):
res = np.zeros((vecs.shape[0], 6))
for i,row in enumerate(vecs):
np_mat = R.from_rotvec(row)
np_mat = np_mat.as_dcm()
np_mat = np.expand_dims(np_mat, axis=0) #e.g. batch 1
np_r6d = np_mat_to_rot6d(np_mat)[0]
res[i,:] = np_r6d
return res
## utility function to convert from r6d space to rotation matrix
def np_rot6d_to_mat(np_r6d):
shape = np_r6d.shape
np_r6d = np.reshape(np_r6d, [-1,6])
x_raw = np_r6d[:,0:3]
y_raw = np_r6d[:,3:6]
x = x_raw / np.linalg.norm(x_raw, ord=2, axis=-1)
z = np.cross(x, y_raw)
z = z / np.linalg.norm(z, ord=2, axis=-1)
y = np.cross(z, x)
x = np.reshape(x, [-1,3,1])
y = np.reshape(y, [-1,3,1])
z = np.reshape(z, [-1,3,1])
np_matrix = np.concatenate([x,y,z], axis=-1)
if len(shape) == 1:
np_matrix = np.reshape(np_matrix, [9])
else:
output_shape = shape[:-1] + (9,)
np_matrix = np.reshape(np_matrix, output_shape)
return np_matrix
## utility to load windows from outside files
def load_windows(data_dir, pipeline, num_samples=None, use_euler=False, require_image=False, require_audio=False, hand3d_image=False, use_lazy=False, test_smpl=False, temporal=False):
preload_path = os.path.join(data_dir, 'filepaths.npy')
if os.path.exists(preload_path):
filepaths = np.load(preload_path, allow_pickle=True)
feats = pipeline.split('2')
in_feat, out_feat = feats[0], feats[1]
p0_size, p1_size = FEATURE_MAP[pipeline]
if os.path.exists(os.path.join(data_dir, 'full_bodies2.npy')):
print('using super quick load', data_dir)
p1_windows = np.load(os.path.join(data_dir, 'full_hands2.npy'), allow_pickle=True)
p0_windows = np.load(os.path.join(data_dir, 'full_bodies2.npy'), allow_pickle=True)
B,T = p0_windows.shape[0], p0_windows.shape[1]
if in_feat == 'arm':
p0_windows = np.reshape(p0_windows, (B,T,-1,6))
p0_windows = p0_windows[:,:,ARMS_ONLY,:]
p0_windows = np.reshape(p0_windows, (B,T,-1))
if require_image:
image_windows = np.load(os.path.join(data_dir, 'full_resnet.npy'), allow_pickle=True)
if require_image:
p0_windows = (p0_windows, image_windows)
return p0_windows, p1_windows, filepaths, None
## utility to save results
def save_results(paths, output, pipeline, base_path, tag=''):
feats = pipeline.split('2')
out_feat = feats[1]
paths = np.array(paths)
for i in range(paths.shape[0]):
print('working on', paths[i,0,0])
for j in range(paths.shape[1]):
vid_path, pnum, frame_idx = paths[i][j]
vid_path = os.path.join(base_path, vid_path)
if not os.path.exists(os.path.join(vid_path, 'results/')):
os.makedirs(os.path.join(vid_path, 'results/'))
if out_feat == 'wh':
pred_dir = os.path.join(vid_path, 'results/{}predicted_body_3d_frontal/'.format(tag))
if not os.path.exists(pred_dir):
os.makedirs(pred_dir)
pred_path = os.path.join(pred_dir, '{:04d}.txt'.format(int(frame_idx)))
## set the ground truth estimated full body pose parameters for viewing
gt_path = os.path.join(vid_path, 'body_3d_frontal/{:04d}.txt'.format(int(frame_idx)))
with open(gt_path) as f:
lines = f.readlines()
cam = lines[0]
cam = [float(n) for n in cam.split(' ') if is_float(n)]
pose = lines[1]
pose = [float(n) for n in pose.split(' ') if is_float(n)]
shape = lines[2]
shape = [float(n) for n in shape.split(' ') if is_float(n)]
idk = lines[3]
idk = [float(n) for n in idk.split(' ') if is_float(n)]
## DONE set the ground truth estimated full body pose parameters for viewing
## fill in the predicted hands to the full body pose
pose = np.reshape(pose, (62,3))
if out_feat == 'wh':
hands_r6d = np.reshape(output[i][j],(42,6))
hands = rot6d_to_aa(hands_r6d)
pose[-42:,:] = hands
pose = np.reshape(pose, (-1))
## DONE fill in the predicted hands to the full body pose
## writing prediciton to file
with open(pred_path, 'w') as f:
for item in cam:
f.write("%s "%item)
f.write("\n")
for item in pose:
f.write("%s "%item)
f.write("\n")
for item in shape:
f.write("%s "%item)
f.write("\n")
for item in idk:
f.write("%s "%item)
## DONE writing prediciton to file
| body2hands-main | utils/load_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import os
import json
import numpy as np
import torch
import torchvision
from torch import nn
from torch.autograd import Variable
import pickle
import utils.modelZoo as modelZoo
from utils.load_utils import *
ARMS_ONLY = [13,14,16,17,18,19] #arms for smpl
N = 4
## main function demo script to run body2hands on frankmocap (smplx) predictions
def main(args):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
rng = np.random.RandomState(23456)
torch.manual_seed(23456)
torch.cuda.manual_seed(23456)
print('> checkpoint', args.checkpoint)
pipeline = args.pipeline
feature_in_dim, feature_out_dim = FEATURE_MAP[pipeline]
pretrain_model = args.checkpoint
tag = args.tag
######################################
# Setup model
feats = pipeline.split('2')
in_feat, out_feat = feats[0], feats[1]
checkpoint_dir = os.path.split(args.checkpoint)[0]
model_tag = os.path.basename(args.checkpoint).split(args.pipeline)[0]
preprocess = np.load(os.path.join(checkpoint_dir,'{}{}_preprocess_core.npz'.format(model_tag, args.pipeline)))
args.model = 'regressor_fcn_bn_32'
model = getattr(modelZoo,args.model)()
model.build_net(feature_in_dim, feature_out_dim)
model.cuda()
# Create model
loaded_state = torch.load(pretrain_model, map_location=lambda storage, loc: storage)
model.load_state_dict(loaded_state['state_dict'], strict=False)
model.eval()
test_X, total_body, total_cam = load_smplx(args.data_dir)
###### swap axis ######
print("seq len", test_X.shape)
test_X = np.swapaxes(test_X, 1, 2).astype(np.float32)
###### standardize ######
body_mean_X = preprocess['body_mean_X']
body_std_X = preprocess['body_std_X']
body_mean_Y = preprocess['body_mean_Y']
body_std_Y = preprocess['body_std_Y']
test_X = (test_X - body_mean_X) / body_std_X
##### convert to tensor ######
inputData = Variable(torch.from_numpy(test_X)).cuda()
# ===================forward=====================
output = model(inputData)
# De-standardaize
output_np = output.data.cpu().numpy()
output_np = output_np * body_std_Y + body_mean_Y
output_np = np.swapaxes(output_np, 1, 2).astype(np.float32)
### saving as output in MTC format
save_output(output_np, total_body, total_cam, 'models/', args.pipeline, tag=args.tag)
## process to save smplx based prediction to mtc format
def save_output(output, total_body, total_cam, model_path, pipeline, tag):
feats = pipeline.split('2')
out_feat = feats[1]
start = 0
for j in range(N):
frame_idx = start+j
save_dir = os.path.join(args.data_dir, 'results/{}predicted_body_3d_frontal'.format(tag))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_path = os.path.join(save_dir, '{:04d}.txt'.format(int(frame_idx)))
## note camera differences for visualization between MTC and frankmocap,
## so we just use a frontal default camera.
cam = [-12.9248, 51.8431, 209.5]
shape = np.zeros(30)
idk = np.zeros(200)
## load output from smpl body pose
pose = np.zeros((62,3))
pose[:20,:] = np.reshape(total_body[0][j], (-1,3))[:20,:]
## load predicted hands (convert from 6d to 3d)
hands_r6d = np.reshape(output[0][j],(42,6))
hands = rot6d_to_aa(hands_r6d)
pose[-42:,:] = hands
pose = np.reshape(pose, (-1))
## save in MTC format
with open(save_path, 'w') as f:
for item in cam:
f.write("%s "%item)
f.write("\n")
for item in pose:
f.write("%s "%item)
f.write("\n")
for item in shape:
f.write("%s "%item)
f.write("\n")
for item in idk:
f.write("%s "%item)
## function to load smplx data from frankmocap plugin
def load_smplx(data_dir):
result = np.zeros((N,36))
body_result = np.zeros((N,72))
cam_result = np.zeros((N,3))
start = 0
## body_result contains original full body smpl (in original aa)
## result contains arms only smpl (in r6d)
for i in range(N):
file_path = os.path.join(args.data_dir, '{:05d}_prediction_result.pkl'.format(i+start))
with open(file_path, 'rb') as f:
data = pickle.load(f)
cam = data['pred_output_list'][0]['pred_camera']
cam_result[i,:] = cam
body = data['pred_output_list'][0]['pred_body_pose']
body *= -1
body_result[i,:] = body
# convert aa to r6d
body = np.reshape(body, (-1, 3))
body = aa_to_rot6d(body)
body = np.reshape(body[ARMS_ONLY,:], (-1))
result[i,:] = body
## apply additional smoothing to original smpl for nice visualization
body_result = body_result[np.newaxis,:,:]
outputs_smoothed = np.copy(body_result)
cam_result = cam_result[np.newaxis,:,:]
cam_smoothed = np.copy(cam_result)
for i in range(2, body_result.shape[1]-2):
outputs_smoothed[:,i,:] = body_result[:,i-2,:]*0.1 + body_result[:,i-1,:]*0.2 + body_result[:,i,:]*0.4 + body_result[:,i+1,:]*0.2 + body_result[:,i+2,:]*0.1
cam_smoothed[:,i,:] = cam_result[:,i-2,:]*0.1 + cam_result[:,i-1,:]*0.2 + cam_result[:,i,:]*0.4 + cam_result[:,i+1,:]*0.2 + cam_result[:,i+2,:]*0.1
return result[np.newaxis,:,:], outputs_smoothed, cam_smoothed
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', type=str, required=True, help='path to pretrained model')
parser.add_argument('--data_dir', type=str, required=True, help='input data directory with frankmocap output')
parser.add_argument('--pipeline', type=str, default='arm2wh', help='pipeline to run')
parser.add_argument('--tag', type=str, default='mocap_')
args = parser.parse_args()
print(args)
main(args)
| body2hands-main | smplx_plugin/demo.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#%%
import json
import time
try:
from eval_server_common import connect_to_redis
except ImportError:
print("HINT: copy example.eval_server_common.py to eval_server_common.py")
raise
import codraw_data
import episode
#%%
FAREWELL_MSG = "that's it, thanks!"
class Bot():
model_name = "model_generic"
agent_type = None
fns = None
# TODO(nikita): peek action for bot drawers is not supported
def __init__(self, id):
self.id = id
self.episode = episode.Episode()
self.role = "question" if self.agent_type == codraw_data.Agent.DRAWER else "answer"
self.handlers = {
'paired': self.on_paired,
'receive message': self.on_receive_message,
'server error': self.on_server_error, #TODO(nikita): Not emitted after I modified the server code
'disconnected partner': self.on_disconnected_partner,
}
self.disconnected = False
self.num_messages_sent = 0
def disconnect(self):
if self.id in type(self).active_bots:
assert type(self).active_bots[self.id] == self
del type(self).active_bots[self.id]
if not self.disconnected:
self.disconnected = True
self.emit('disconnect')
def emit(self, event, **msg):
obj = {
'botId': self.id,
'event': event,
'msg': msg,
}
self.redis.publish('visdial_server', json.dumps(obj))
def send_msg(self, msg):
self.num_messages_sent += 1
self.emit('chat message', msg=msg, role=self.role, seqId=self.num_messages_sent)
print("Sent chat message:", msg)
def send_scene_log(self, scene):
self.emit('scene log', scene=scene.stringify(), role=self.role, seqId=self.num_messages_sent)
# TODO(nikita): implement drawer bots, including "send_scene_log" which is sent by drawer
# socket.emit('scene log', {scene: Abs.resultAMT(), hitId: hitId, assignmentId: assignmentId, workerId: workerId, role: workerRole, seqId: noOfMsg});
def run_model_actions(self, must_trigger=True):
old_len = len(self.episode)
terminated = self._run_model_actions()
if terminated:
print("No action taking. Disconnecting")
if INTERACTIVE:
display(self.episode.get_true_scene())
self.send_msg(FAREWELL_MSG)
self.disconnect()
return
if must_trigger:
if len(self.episode) == old_len:
self.disconnect()
assert False, f"No response for event: {type(self.episode[-1]).__name__}"
msg_to_send = None
do_send_scene_log = False
for event in self.episode[old_len:]:
# TODO(nikita): log latent actions, such as SelectClipart
if isinstance(event, codraw_data.TellGroup):
assert msg_to_send is None, "Multiple TellGroup events added in a single round!"
msg_to_send = event.msg
elif isinstance(event, codraw_data.ReplyGroup):
assert msg_to_send is None, "Multiple ReplyGroup events added in a single round!"
msg_to_send = event.msg
elif isinstance(event, (codraw_data.DrawClipart, codraw_data.DrawGroup)):
do_send_scene_log = True
if do_send_scene_log:
assert self.agent_type == codraw_data.Agent.DRAWER
self.send_scene_log(self.episode.reconstruct())
if self.agent_type == codraw_data.Agent.TELLER:
assert msg_to_send is not None, "No message to send"
# Empty message is a signal for the drawer to begin the conversation
if msg_to_send == "" and len([x for x in self.episode if isinstance(x, codraw_data.TellGroup)]) == 1:
msg_to_send = None
print("Model expects the human drawer to start the conversation.")
else:
assert msg_to_send is not None or isinstance(self.episode[-1], codraw_data.ObserveTruth), "No message to send, and not the start"
if msg_to_send is not None:
self.send_msg(msg_to_send)
def _run_model_actions(self):
while True:
for fn in self.fns:
if type(self.episode[-1]) in fn._trigger_types:
old_len = len(self.episode)
fn(self.episode)
if len(self.episode) == old_len:
return True # terminated
break
else:
# print('no trigger for', type(self.episode[-1]))
return False
def on_paired(self, partnerId=None, key=None, image_url=None, role=None, caption=None):
if self.disconnected:
print("[ERROR] Disconnected bot was paired!")
return
print("Paired wih human partner!")
print("image_url:", image_url)
print("partner role:", role) # Yes, the role sent in the message is for the partner
assigned_role = "question" if role == "answer" else "answer"
assert assigned_role == self.role, "Wrong role assigned to bot!"
true_scene = codraw_data.AbstractScene(image_url)
self.episode.append(codraw_data.ObserveTruth(true_scene))
self.run_model_actions(must_trigger=False)
def on_receive_message(self, message=None, noOfMsg=None):
if self.disconnected:
print("[ERROR] Disconnected bot received a message!")
return
print(f"Got human message {noOfMsg}: {message}")
assert message is not None
if self.agent_type == codraw_data.Agent.TELLER:
self.episode.append(codraw_data.ReplyGroup(message))
else:
self.episode.append(codraw_data.TellGroup(message))
self.run_model_actions()
def on_disconnected_partner(self, disable='_unused'):
print("Partner disconnected from bot! Cleanining up the bot")
self.disconnect()
def on_server_error(self, errorMsg='[no errorMsg specified]'):
print("Error from server:", errorMsg)
self.disconnect()
# %%
def run_loop(classes):
active_bots = {}
channel_to_cls = {}
for cls in classes:
assert cls.agent_type in (codraw_data.Agent.TELLER, codraw_data.Agent.DRAWER), "Invalid agent_type for bot!"
channel = f'visdial_models.{cls.model_name}'.encode('utf-8')
assert channel not in channel_to_cls, f"Duplicate model name {cls.model_name}"
channel_to_cls[channel] = cls
if not hasattr(cls, 'redis'):
cls.redis = connect_to_redis()
if not hasattr(cls, 'active_bots'):
cls.active_bots = active_bots
p = cls.redis.pubsub()
for channel in channel_to_cls:
p.subscribe(channel)
for redis_msg in p.listen():
print("Got redis msg", redis_msg)
if redis_msg['type'] != 'message':
continue
if redis_msg['channel'] not in channel_to_cls:
print(f"WARNING: unrecognized channel {redis_msg['channel']}")
continue
data = json.loads(redis_msg['data'])
id = data['botId']
event = data['event']
msg = data['msg']
if event == 'paired':
active_bots[id] = channel_to_cls[redis_msg['channel']](id)
if id in active_bots:
handler = active_bots[id].handlers.get(event, None)
if handler is None:
print(f"No handler for event '{event}'")
else:
active_bots[id].handlers[event](**msg)
# %%
def make_script_teller_class():
import model
class ScriptTellerBot(Bot):
model_name = 'teller_script'
agent_type = codraw_data.Agent.TELLER
fns = [model.scripted_tell_before_peek]
scene_to_script = {}
def _run_model_actions(self):
if not hasattr(self.episode, 'script'):
script = self.scene_to_script[self.episode.get_last(codraw_data.ObserveTruth).scene.stringify()]
self.episode.script = script
self.episode.script_index = 0
return super()._run_model_actions()
for scene, script in codraw_data.get_scenes_and_scripts('all'):
ScriptTellerBot.scene_to_script[scene.stringify()] = script
return ScriptTellerBot
# %%
def model_to_bot_class(model_name, model, model_agent_type=codraw_data.Agent.TELLER):
model_name_ = model_name
class TheBot(Bot):
model_name = model_name_
agent_type = model_agent_type
fns = model.get_action_fns()
TheBot.__name__ = type(model).__name__ + 'Bot'
TheBot.__qualname__ = TheBot.__qualname__.replace('TheBot', TheBot.__name__)
return TheBot
# %%
def run_model_pairs(tellers, drawers=[], include_script_teller=True):
classes = []
if include_script_teller:
classes.append(make_script_teller_class())
for teller_name, (a, b) in tellers:
if a is not None:
classes.append(model_to_bot_class(teller_name + '_a', a, codraw_data.Agent.TELLER))
if b is not None:
classes.append(model_to_bot_class(teller_name + '_b', b, codraw_data.Agent.TELLER))
for drawer_name, (a, b) in drawers:
if a is not None:
classes.append(model_to_bot_class(drawer_name + '_a', a, codraw_data.Agent.DRAWER))
if b is not None:
classes.append(model_to_bot_class(drawer_name + '_b', b, codraw_data.Agent.DRAWER))
run_loop(classes)
#%%
if __name__ == '__main__':
from saved_models import load_models, make_pairs
models = load_models()
models['teller_scene2seq_a'].max_rounds = 20
models['teller_scene2seq_aux2_a'].max_rounds = 20
models['teller_rl_a'].max_rounds = 20
# TODO(nikita): change max_rounds for partition-b tellers, too
tellers = make_pairs(models,
'teller_nn',
'teller_pragmaticnn',
'teller_scene2seq',
'teller_scene2seq_aux2',
'teller_rl',
)
drawers = make_pairs(models,
'drawer_nn',
'drawer_bowcanvas2bce',
'drawer_lstmaddonly',
)
run_model_pairs(tellers, drawers)
| codraw-models-master | eval_run_bots.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#%%
import numpy as np
from pathlib import Path
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available, torch_load
from attention import AttentionSeqToMasked
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, Transcriber, respond_to
from model import make_fns, eval_fns
from model import Model
from baseline3_models import SceneToSeqTeller
# %%
def process_episode(episode,
brw_rewards, brw_discounted_rewards,
utterance_penalty,
gamma,
uninformative_penalty,
):
scene_sims = None
for event in episode:
if isinstance(event, codraw_data.ObserveTruth):
drawn_scene = []
true_scene = event.scene
scene_sims = []
reward_idxs = []
yield event
elif isinstance(event, codraw_data.TellGroup):
if reward_idxs:
base_idx = reward_idxs[-1] + 1
else:
base_idx = 0
offset = len(event.msg.split())
if offset >= 50:
offset = 50 - 1
reward_idxs.append(base_idx + offset)
yield event
elif isinstance(event, (codraw_data.ObserveCanvas, codraw_data.ReplyGroup)):
yield event
elif isinstance(event, codraw_data.DrawGroup):
assert drawn_scene is not None
drawn_scene = [c for c in drawn_scene if c.idx not in [c2.idx for c2 in event.cliparts]]
drawn_scene.extend(event.cliparts)
scene_sims.append(scene_similarity(drawn_scene, true_scene))
yield codraw_data.SetDrawing(drawn_scene)
elif isinstance(event, codraw_data.SetDrawing):
scene_sims.append(scene_similarity(event.scene, true_scene))
yield event
if scene_sims is not None:
rewards = np.array(scene_sims) - np.array([0] + scene_sims[:-1])
rewards = np.where(rewards > 0, rewards, -uninformative_penalty)
if len(rewards) >= 50:
rewards = np.array(list(rewards - utterance_penalty))
else:
rewards = np.array(list(rewards - utterance_penalty) + [0])
if reward_idxs:
reward_idxs.append(reward_idxs[-1] + 1)
else:
reward_idxs.append(0)
new_brw_rewards = np.zeros(reward_idxs[-1] + 1)
new_brw_rewards[np.array(reward_idxs)] = rewards
brw_rewards.extend(list(new_brw_rewards))
brw_discounted_rewards.extend(list(discount_rewards(new_brw_rewards, gamma)))
def discount_rewards(r, gamma=0.99):
""" take 1D float array of rewards and compute discounted reward """
# https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5
r = np.asarray(r)
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
def examples_from_episodes(episodes, dg, utterance_penalty, gamma, uninformative_penalty):
brw_rewards = []
brw_discounted_rewards = []
episodes = [list(process_episode(episode,
brw_rewards, brw_discounted_rewards,
utterance_penalty,
gamma,
uninformative_penalty,
))
for episode in episodes]
example_batch = dg.tensors_from_episodes(episodes + [[codraw_data.ObserveTruth([])]])
example_batch['brw_rewards'] = torch.tensor(brw_rewards, dtype=torch.float, device=cuda_if_available)
example_batch['brw_discounted_rewards'] = torch.tensor(brw_discounted_rewards, dtype=torch.float, device=cuda_if_available)
return example_batch
# %%
def collect_episodes(fns,
dg,
scenes=codraw_data.get_scenes('dev'),
batch_size=16,
utterance_penalty=0.25,
gamma=0.99,
uninformative_penalty=0.3
):
with torch.no_grad():
episodes = []
for scene in np.random.choice(scenes, batch_size):
ep = Episode.run(scene, fns)
episodes.append(ep)
example_batch = examples_from_episodes(
episodes,
dg=dg,
utterance_penalty=utterance_penalty,
gamma=gamma,
uninformative_penalty=uninformative_penalty,
)
return episodes, example_batch
# %%
class RLSceneToSeqTeller(SceneToSeqTeller):
def disable_dropout(self):
for module in self.modules():
if isinstance(module, nn.Dropout):
module.p = 0
def calc_rl_loss(self, example_batch):
dg = self.datagen
b_clipart_tags = self.tag_embs(example_batch['b_scene_tags']).view(-1, dg.NUM_INDEX, self.d_clipart_tags)
packer = example_batch['packer']
ob_clipart_tags = packer.ob_from_b(b_clipart_tags)
ob_clipart_tags = self.pre_attn_tag_dropout(ob_clipart_tags)
ob_scene_mask = packer.ob_from_b(example_batch['b_scene_mask'])
brw_teller_tokens_in = example_batch['brw_teller_tokens_in']
brw_embs = self.pre_lstm_emb_dropout(self.word_embs(brw_teller_tokens_in))
orwb_embs = packer.orwb_from_brw_pack(brw_embs)
orwb_attended_values_prelstm = self.attn_prelstm(orwb_embs, ob_clipart_tags, ob_clipart_tags, k_mask=ob_scene_mask)
orwb_lstm_in = nn.utils.rnn.PackedSequence(torch.cat([
orwb_embs.data,
orwb_attended_values_prelstm.data,
], -1), orwb_embs.batch_sizes)
orwb_lstm_out, _ = self.lstm(orwb_lstm_in)
orwb_lstm_out = nn.utils.rnn.PackedSequence(self.post_lstm_dropout(orwb_lstm_out.data), orwb_lstm_out.batch_sizes)
orwb_attended_values = self.attn(orwb_lstm_out, ob_clipart_tags, ob_clipart_tags, k_mask=ob_scene_mask)
brw_pre_project = torch.cat([
packer.brw_from_orwb_unpack(orwb_lstm_out),
packer.brw_from_orwb_unpack(orwb_attended_values),
], -1)
brw_word_logits = self.word_project(brw_pre_project)
brw_word_losses = F.cross_entropy(brw_word_logits, example_batch['brw_teller_tokens_out'], reduce=False)
b_word_losses = nn.utils.rnn.pad_packed_sequence(packer.orwb_from_brw_pack(brw_word_losses))[0].sum(0)
print('mean nll', float(b_word_losses.mean()))
# Discounting occurs at every word
# brw_discounted_rewards = example_batch['brw_discounted_rewards'][:brw_word_losses.shape[0]]
# XXX(nikita): clipping here seems wrong. Make sure there are no more crashes!
brw_discounted_rewards = example_batch['brw_discounted_rewards']
# TODO(nikita): what is the right baseline?
baseline = 0.8
brw_discounted_rewards = brw_discounted_rewards - baseline
brw_rl_losses = brw_word_losses * brw_discounted_rewards
rl_loss = brw_rl_losses.mean()
return rl_loss
# %%
def load_baseline4():
models = {}
rl_spec_a = torch_load('models/rl_nodict_aug2.pt')
models['teller_rl_a'] = RLSceneToSeqTeller(spec=rl_spec_a)
models['teller_rl_b'] = None
models['teller_rl_a'].eval()
return models
| codraw-models-master | baseline4_models.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
An event-based view of the CoDraw dataset
"""
#%%
import numpy as np
from pathlib import Path
import json
from enum import Enum
from collections import namedtuple
import inspect
import abs_util_orig
import abs_render
#%%
if INTERACTIVE:
DATASET_PATH = Path('../CoDraw/dataset/CoDraw_1_0.json')
else:
DATASET_PATH = Path(__file__).parent / '../CoDraw/dataset/CoDraw_1_0.json'
assert DATASET_PATH.exists()
#%% clipart wrappers, with better docs than abs_util_orig.py
ClipartBase = namedtuple('Clipart',
['idx', 'subtype', 'depth', 'flip', 'x', 'y'])
# idx: integer [0-57]
# subtype: integer [0-34]
# depth: integer [0-2]
# flip: integer [0-1]
# x: float [1-500]
# y: float [1-400]
class Clipart(ClipartBase):
__slots__ = ()
NUM_IDX = 58
NUM_SUBTYPE = 35
NUM_DEPTH = 3
NUM_FLIP = 2
CANVAS_WIDTH = 500.0
CANVAS_HEIGHT = 400.0
NUM_EXPRESSION = 5
NUM_POSE = 7
assert NUM_SUBTYPE == (NUM_EXPRESSION * NUM_POSE)
HUMAN_IDXS = (18, 19)
def __new__(cls, idx, subtype, depth, flip, x=None, y=None, normed_x=None, normed_y=None):
if normed_x is not None:
if x is not None:
raise ValueError("The arguments x and normed_x are mutually exclusive")
x = normed_x * cls.CANVAS_WIDTH
elif x is None:
raise ValueError("Either x or normed_x is required")
if normed_y is not None:
if y is not None:
raise ValueError("The arguments y and normed_y are mutually exclusive")
y = normed_y * cls.CANVAS_HEIGHT
elif y is None:
raise ValueError("Either y or normed_y is required")
return ClipartBase.__new__(cls, idx, subtype, depth, flip, x, y)
@property
def normed_x(self):
return self.x / self.CANVAS_WIDTH
@property
def normed_y(self):
return self.y / self.CANVAS_HEIGHT
@property
def expression(self):
"""
Facial expression
"""
return self.subtype % self.NUM_EXPRESSION
@property
def pose(self):
"""
Body pose
"""
return self.subtype // self.NUM_EXPRESSION
@property
def human_idx(self):
if self.idx not in self.HUMAN_IDXS:
raise ValueError("Cannot get human_idx of non-human clipart")
return self.idx - self.HUMAN_IDXS[0]
@property
def render_order_key(self):
"""
Key that can be used to sort cliparts by the order in which they are
rendered.
"""
# Sun (idx=3) is always in the back; this is also in Abs.js
# All sky objects (idx < 8) are behind any non-sky objects
# Past that, objects are sorted by depth and then by index
return (self.idx != 3, self.idx >= 8, -self.depth, self.idx)
def _repr_svg_(self):
return abs_render.svg_from_cliparts([self])
class AbstractScene(list):
"""
Abstract scene representation that only encodes objects which are present,
and never a library of available objects that are not in the scene
"""
def __init__(self, string_or_iterable):
if isinstance(string_or_iterable, str):
abs = abs_util_orig.AbsUtil(string_or_iterable)
if abs.obj is None:
super().__init__()
else:
super().__init__(Clipart(*c) for c in abs.obj)
else:
super().__init__(string_or_iterable)
def __repr__(self):
return "<AbstractScene " + super().__repr__() + ">"
def __str__(self):
return super().__repr__()
def _repr_svg_(self):
return abs_render.svg_from_cliparts(self)
def stringify(self):
scene_str = ""
scene_str += f"{len(self)},"
for i, clipart in enumerate(self):
img_name = abs_render.get_image_name(clipart)
prefix, num = img_name[:-5].split('_')
prefix = ['s', 'p', 'hb0', 'hb1', 'a', 'c', 'e', 't'].index(prefix)
num = int(num)
scene_str += f"{img_name},"
scene_str += f"{i},"
scene_str += f"{num},"
scene_str += f"{prefix},"
scene_str += f"{clipart.x},"
scene_str += f"{clipart.y},"
scene_str += f"{clipart.depth},"
scene_str += f"{clipart.flip},"
return scene_str
#%% Data loading helper for a particular split
def data_for_splits(split_or_splits):
if isinstance(split_or_splits, str):
splits = [split_or_splits]
else:
splits = split_or_splits
data_all = json.loads(DATASET_PATH.read_text())['data']
keys_train = sorted([k for k in data_all.keys() if k.startswith('train')])
keys_dev = sorted([k for k in data_all.keys() if k.startswith('val')])
keys_test = sorted([k for k in data_all.keys() if k.startswith('test')])
keys_all = sorted(data_all.keys())
half_train_len = len(keys_train) // 2
keys_from_split = {
'train_a': keys_train[:half_train_len],
'a': keys_train[:half_train_len],
'train_b': keys_train[half_train_len:],
'b': keys_train[half_train_len:],
'train_full': keys_train,
'dev': keys_dev,
'test': keys_test,
'all': keys_all,
}
res = []
for split in splits:
data_split = {k: data_all[k] for k in keys_from_split[split]}
res.append(data_split)
return res
def cached_split_wrapper(fn):
"""
Modifies the function to accept a split or list of splits instead of a
a raw data dictionary for a single split, and caches results so they don't
have to be recalculated.
"""
fn.split_to_results = {}
def deco(split_or_splits):
if isinstance(split_or_splits, str):
splits = [split_or_splits]
else:
splits = split_or_splits
uncached_splits = [split for split in splits if split not in fn.split_to_results]
uncached_splits_data = data_for_splits(uncached_splits)
for split, data in zip(uncached_splits, uncached_splits_data):
result = fn(data)
if inspect.isgenerator(result):
result = list(result)
fn.split_to_results[split] = result
if isinstance(split_or_splits, str):
return fn.split_to_results[split_or_splits]
else:
return [fn.split_to_results[split] for split in split_or_splits]
return deco
#%% An event-based view of the CoDraw dataset
# TODO(nikita): Agent class and actor/observer are currently doing nothing.
# Is there a need for them?
class Agent(Enum):
TELLER = 0
DRAWER = 1
class Event:
def __init__(self, actor=None, observer=None):
self.actor = actor
self.observer = observer
class ObserveTruth(Event):
def __init__(self, scene):
super().__init__(observer=Agent.TELLER)
self.scene = scene
def __repr__(self):
return f"{type(self).__name__}()"
class SelectClipart(Event):
def __init__(self, clipart):
super().__init__(actor=Agent.TELLER, observer=None)
self.clipart = clipart
def __repr__(self):
return f"{type(self).__name__}(clipart={self.clipart})"
class TellerIntention(Event):
def __init__(self, drawn=None, undrawn=None, draw_next=None):
super().__init__(actor=Agent.TELLER, observer=None)
self.drawn = drawn
self.undrawn = undrawn
self.draw_next = draw_next
def __repr__(self):
return f"{type(self).__name__}(drawn={self.drawn}, undrawn={self.undrawn}, draw_next={self.draw_next})"
class TellGroup(Event):
# group because each word is an action
def __init__(self, msg):
super().__init__(actor=Agent.TELLER, observer=Agent.DRAWER)
self.msg = msg
def __repr__(self):
return f"{type(self).__name__}(msg={repr(self.msg)})"
class Peek(Event):
def __init__(self):
super().__init__(actor=Agent.TELLER, observer=None)
def __repr__(self):
return f"{type(self).__name__}()"
class TellerObserveCanvas(Event):
def __init__(self, scene):
super().__init__(observer=Agent.TELLER)
if not isinstance(scene, AbstractScene):
scene = AbstractScene(scene)
self.scene = scene
def __repr__(self):
return f"{type(self).__name__}({self.scene})"
class ObserveCanvas(Event):
def __init__(self, scene):
super().__init__(observer=Agent.DRAWER)
if not isinstance(scene, AbstractScene):
scene = AbstractScene(scene)
self.scene = scene
def __repr__(self):
return f"{type(self).__name__}({self.scene})"
class DrawClipart(Event):
# Draws or moves a clipart
# Since multiple copies of the same clipart are not allowed, duplicate draw
# events with the same id will result in the removal of the older instance
# of the clipart to make way for the new one.
def __init__(self, clipart):
super().__init__(actor=Agent.DRAWER, observer=None)
self.clipart = clipart
def __repr__(self):
return f"{type(self).__name__}(clipart={self.clipart})"
class DrawGroup(Event):
# Draws or moves multiple (or no) cliparts at the same time
# Since multiple copies of the same clipart are not allowed, duplicate draw
# events with the same id will result in the removal of the older instance
# of the clipart to make way for the new one.
def __init__(self, cliparts):
super().__init__(actor=Agent.DRAWER, observer=None)
self.cliparts = cliparts
def __repr__(self):
return f"{type(self).__name__}(cliparts={self.cliparts})"
class SetDrawing(Event):
# Updates the drawer canvas to exactly match the scene argumentt
# This was added for transcripts of humans performing the task because
# neither DrawClipart nor DrawGroup have support for removing clipart.
def __init__(self, scene):
super().__init__(actor=Agent.DRAWER, observer=None)
self.scene = scene
def __repr__(self):
return f"{type(self).__name__}({self.scene})"
class ReplyGroup(Event):
# group because each word is an action
def __init__(self, msg):
super().__init__(actor=Agent.DRAWER, observer=Agent.TELLER)
self.msg = msg
def __repr__(self):
return f"{type(self).__name__}(msg={repr(self.msg)})"
#%%
def events_from_datum_place_one(datum):
# TODO(nikita): this filtering keeps just over 25% of conversational rounds
# What do I need to do to match the 37.6% number in the arxiv paper?
# perhaps I should include the cases where a clipart is updated? But that
# only seems to bring me up to around 31%
buffer = []
buffer.append(ObserveTruth(AbstractScene(datum['abs_t'])))
for entry in datum['dialog']:
abs_b = AbstractScene(entry['abs_b'])
abs_d = AbstractScene(entry['abs_d'])
strictly_additive = len(set(abs_b) - set(abs_d)) == 0
added_cliparts = set(abs_d) - set(abs_b)
if strictly_additive and len(added_cliparts) == 1 and entry['msg_t']:
added_clipart = list(added_cliparts)[0]
buffer.append(SelectClipart(added_clipart))
buffer.append(TellGroup(entry['msg_t']))
buffer.append(DrawClipart(added_clipart))
buffer.append(ReplyGroup(entry['msg_d']))
if isinstance(buffer[-1], ObserveTruth):
return []
return buffer
@cached_split_wrapper
def get_place_one(data):
for datum in data.values():
yield from events_from_datum_place_one(datum)
#%%
def events_from_datum_place_many(datum):
buffer = []
buffer.append(ObserveTruth(AbstractScene(datum['abs_t'])))
for entry in datum['dialog']:
abs_b = AbstractScene(entry['abs_b'])
abs_d = AbstractScene(entry['abs_d'])
added_cliparts = set(abs_d) - set(abs_b)
added_cliparts = sorted(added_cliparts, key=lambda c: c.render_order_key)
buffer.append(TellGroup(entry['msg_t']))
buffer.append(DrawGroup(added_cliparts))
buffer.append(ReplyGroup(entry['msg_d']))
if isinstance(buffer[-1], ObserveTruth):
return []
return buffer
@cached_split_wrapper
def get_place_many(data):
for datum in data.values():
yield from events_from_datum_place_many(datum)
#%%
def events_from_datum_contextual_place_many(datum):
buffer = []
buffer.append(ObserveTruth(AbstractScene(datum['abs_t'])))
for entry in datum['dialog']:
abs_b = AbstractScene(entry['abs_b'])
abs_d = AbstractScene(entry['abs_d'])
added_cliparts = set(abs_d) - set(abs_b)
added_cliparts = sorted(added_cliparts, key=lambda c: c.render_order_key)
buffer.append(TellGroup(entry['msg_t']))
buffer.append(ObserveCanvas(abs_b))
buffer.append(DrawGroup(added_cliparts))
buffer.append(ReplyGroup(entry['msg_d']))
if isinstance(buffer[-1], ObserveTruth):
return []
return buffer
@cached_split_wrapper
def get_contextual_place_many(data):
for datum in data.values():
yield from events_from_datum_contextual_place_many(datum)
# %%
def events_from_datum_set_clipart(datum):
buffer = []
buffer.append(ObserveTruth(AbstractScene(datum['abs_t'])))
for entry in datum['dialog']:
abs_b = AbstractScene(entry['abs_b'])
abs_d = AbstractScene(entry['abs_d'])
buffer.append(TellGroup(entry['msg_t']))
buffer.append(ObserveCanvas(abs_b))
buffer.append(SetDrawing(abs_d))
buffer.append(ReplyGroup(entry['msg_d']))
if isinstance(buffer[-1], ObserveTruth):
return []
return buffer
@cached_split_wrapper
def get_set_clipart(data):
for datum in data.values():
yield from events_from_datum_set_clipart(datum)
# %%
def events_from_datum_set_clipart_pre_peek(datum):
buffer = []
buffer.append(ObserveTruth(AbstractScene(datum['abs_t'])))
for entry in datum['dialog']:
if entry.get('peeked', False):
# Note that Peek happens before TellGroup
break
abs_b = AbstractScene(entry['abs_b'])
abs_d = AbstractScene(entry['abs_d'])
buffer.append(TellGroup(entry['msg_t']))
buffer.append(ObserveCanvas(abs_b))
buffer.append(SetDrawing(abs_d))
buffer.append(ReplyGroup(entry['msg_d']))
if isinstance(buffer[-1], ObserveTruth):
return []
return buffer
@cached_split_wrapper
def get_set_clipart_pre_peek(data):
for datum in data.values():
yield from events_from_datum_set_clipart_pre_peek(datum)
# %%
@cached_split_wrapper
def get_scenes(data):
for datum in data.values():
yield AbstractScene(datum['abs_t'])
# %%
@cached_split_wrapper
def get_scenes_and_scripts(data):
for datum in data.values():
scene = AbstractScene(datum['abs_t'])
script = []
for entry in datum['dialog']:
if entry.get('peeked', False):
script.append(Peek())
script.append(TellerObserveCanvas(AbstractScene(entry['abs_b'])))
if entry['msg_t']:
script.append(TellGroup(entry['msg_t']))
yield (scene, script)
# %%
@cached_split_wrapper
def get_scenes_and_scripts_with_peek(data):
for datum in data.values():
scene = AbstractScene(datum['abs_t'])
script = []
have_peeked = False
for entry in datum['dialog']:
if entry.get('peeked', False):
script.append(Peek())
script.append(TellerObserveCanvas(AbstractScene(entry['abs_b'])))
have_peeked = True
if entry['msg_t']:
script.append(TellGroup(entry['msg_t']))
# Exclude events with no Peek action, or no messages sent afterwards
if have_peeked and not isinstance(script[-1], TellerObserveCanvas):
yield (scene, script)
# %%
@cached_split_wrapper
def get_truth_and_human_scenes(data):
for datum in data.values():
scene = AbstractScene(datum['abs_t'])
scene_after = None
for entry in datum['dialog']:
scene_after = entry['abs_d']
assert scene_after is not None
scene_after = AbstractScene(scene_after)
yield (scene, scene_after)
@cached_split_wrapper
def get_truth_and_human_scenes_pre_peek(data):
for datum in data.values():
scene = AbstractScene(datum['abs_t'])
scene_after = None
for entry in datum['dialog']:
if entry.get('peeked', False):
break
scene_after = entry['abs_d']
assert scene_after is not None
scene_after = AbstractScene(scene_after)
yield (scene, scene_after)
@cached_split_wrapper
def get_truth_and_human_scenes_with_js_scores(data):
for datum in data.values():
scene = AbstractScene(datum['abs_t'])
scene_after = None
score_after = None
for entry in datum['dialog']:
if entry.get('score', None) is not None:
score_after = entry['score']
scene_after = entry['abs_d']
assert scene_after is not None
assert score_after is not None
scene_after = AbstractScene(scene_after)
yield (scene, scene_after, score_after)
| codraw-models-master | codraw_data.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Multi-headed attention implementation
"""
#%%
import numpy as np
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
#%%
class AttentionSeqToMasked(nn.Module):
def __init__(self,
d_pre_q, d_pre_k, d_pre_v,
d_qk, d_v, num_heads,
attn_dropout):
super().__init__()
self.d_qk = d_qk
self.d_v = d_v
self.num_heads = num_heads
self.q_proj = nn.Linear(d_pre_q, self.num_heads * self.d_qk)
self.k_proj = nn.Linear(d_pre_k, self.num_heads * self.d_qk)
self.v_proj = nn.Linear(d_pre_v, self.num_heads * self.d_v)
self.attn_dropout = nn.Dropout(attn_dropout)
self.d_out = self.num_heads * self.d_v
def split_heads(self, tensor):
"""
[...dims, a, num_heads x b] -> [...dims, num_heads, a, b]
"""
return tensor.view(*tensor.shape[:-1], self.num_heads, -1).transpose(-3, -2)
def join_heads(self, tensor):
"""
[...dims, num_heads, a, b] -> [...dims, a, num_heads x b]
"""
res = tensor.transpose(-3, -2).contiguous()
return res.view(*res.shape[:-2], -1)
def precompute_kv(self, pre_ks, pre_vs):
assert not self.training
ks = self.split_heads(self.k_proj(pre_ks))
vs = self.split_heads(self.v_proj(pre_vs))
return ks, vs
def forward(self, pre_qs=None, pre_ks=None, pre_vs=None, ks=None, vs=None, k_mask=None):
if isinstance(pre_qs, nn.utils.rnn.PackedSequence):
pre_qs, lengths = nn.utils.rnn.pad_packed_sequence(pre_qs, batch_first=True)
else:
lengths = None
qs = self.split_heads(self.q_proj(pre_qs))
if ks is None:
ks = self.split_heads(self.k_proj(pre_ks))
if vs is None:
vs = self.split_heads(self.v_proj(pre_vs))
attn_logits = torch.matmul(qs, ks.transpose(-2, -1)) / np.sqrt(self.d_qk)
if k_mask is not None:
# k_mask is [batch, pre_ks.shape[1]] mask signalling which values
# are valid attention targets
attn_logits = torch.where(
k_mask[:, None, None, :],
attn_logits,
torch.full_like(attn_logits, float('-inf'))
)
attn_probs = F.softmax(attn_logits, dim=-1)
attn_probs = self.attn_dropout(attn_probs)
res = self.join_heads(torch.matmul(attn_probs, vs))
if lengths is not None:
res = nn.utils.rnn.pack_padded_sequence(res, lengths, batch_first=True)
return res
| codraw-models-master | attention.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#%%
import numpy as np
from pathlib import Path
import heapq
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available, torch_load
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, respond_to, response_partial
from datagen import BOWAddUpdateData, NearestNeighborData
from model import Model, select_clipart_to_tell, drawer_observe_canvas, make_fns, eval_fns
from model import scripted_tell, scripted_tell_before_peek, scripted_tell_after_peek
# %%
class BaseAddOnlyDrawer(Model, torch.nn.Module):
datagen_cls = BOWAddUpdateData
def init_full(self, d_hidden):
# Helps overcome class imbalance (most cliparts are not drawn most of
# the time)
self.positive_scaling_coeff = 3.
# Sigmoid is used to prevent drawing cliparts far off the canvas
self.sigmoid_coeff = 2.
# Scaling coefficient so that the sigmoid doesn't always saturate
self.vals_coeff = 1. / 5.
dg = self.datagen
self.canvas_binary_to_hidden = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(dg.NUM_BINARY, d_hidden, bias=False),
)
self.canvas_numerical_to_hidden = nn.Sequential(
nn.Linear(dg.NUM_INDEX * dg.NUM_NUMERICAL, d_hidden, bias=False),
)
d_out = dg.NUM_INDEX * (dg.NUM_ALL + 1)
self.hidden_to_clipart = nn.Sequential(
nn.Dropout(0.4),
nn.ReLU(),
nn.Linear(d_hidden, d_out),
)
def lang_to_hidden(self, msg_idxs, offsets=None):
# Offsets is None only when batch_size is 1
raise NotImplementedError("Subclasses should override this")
def forward(self, example_batch):
dg = self.datagen
hidden_feats = (
self.lang_to_hidden(example_batch['msg_idxs'], example_batch['offsets'])
+ self.canvas_binary_to_hidden(example_batch['canvas_binary'].float())
+ self.canvas_numerical_to_hidden(example_batch['canvas_numerical'])
)
clipart_scores = self.hidden_to_clipart(hidden_feats).view(-1, dg.NUM_INDEX, dg.NUM_ALL + 1)
correct_categorical = example_batch['clipart_categorical']
correct_numerical = example_batch['clipart_numerical']
correct_mask = example_batch['clipart_added_mask']
clipart_idx_scores = clipart_scores[:,:,0]
idx_losses = F.binary_cross_entropy_with_logits(clipart_idx_scores, correct_mask.to(torch.float), reduce=False)
idx_losses = torch.where(correct_mask, self.positive_scaling_coeff * idx_losses, idx_losses)
per_example_idx_loss = idx_losses.sum(1)
flat_scores = clipart_scores[:,:,1:].view((-1, dg.NUM_ALL))
(logits_subtype, logits_depth, logits_flip, vals_numerical) = torch.split(flat_scores, [dg.NUM_SUBTYPES, dg.NUM_DEPTH, dg.NUM_FLIP, dg.NUM_NUMERICAL], dim=1)
vals_numerical = self.sigmoid_coeff * F.sigmoid(self.vals_coeff * vals_numerical)
subtype_losses = F.cross_entropy(logits_subtype, correct_categorical[:,:,0].view((-1,)), reduce=False).view_as(correct_categorical[:,:,0])
depth_losses = F.cross_entropy(logits_depth, correct_categorical[:,:,1].view((-1,)), reduce=False).view_as(correct_categorical[:,:,1])
flip_losses = F.cross_entropy(logits_flip, correct_categorical[:,:,2].view((-1,)), reduce=False).view_as(correct_categorical[:,:,2])
vals_losses = F.mse_loss(vals_numerical, correct_numerical.view((-1, dg.NUM_NUMERICAL)), reduce=False).view_as(correct_numerical).sum(-1)
all_losses = torch.stack([subtype_losses, depth_losses, flip_losses, vals_losses], -1).sum(-1)
per_example_loss = torch.where(correct_mask, all_losses, all_losses.new_zeros(1)).sum(-1)
loss = per_example_idx_loss.mean() + per_example_loss.mean()
return loss
@respond_to(codraw_data.ObserveCanvas)
def draw(self, episode):
dg = self.datagen
msg = episode.get_last(codraw_data.TellGroup).msg
# assert msg != ""
words = [self.datagen.vocabulary_dict.get(word, None) for word in msg.split()]
words = [word for word in words if word is not None]
if not words:
episode.append(codraw_data.DrawGroup([]))
episode.append(codraw_data.ReplyGroup("ok"))
return
msg_idxs = torch.tensor(words).to(cuda_if_available)
canvas_context = episode.get_last(codraw_data.ObserveCanvas).scene
canvas_binary = np.zeros((dg.NUM_INDEX, 1 + dg.NUM_DEPTH + dg.NUM_FLIP), dtype=bool)
canvas_pose = np.zeros((2, dg.NUM_SUBTYPES), dtype=bool)
canvas_numerical = np.zeros((dg.NUM_INDEX, dg.NUM_NUMERICAL))
for clipart in canvas_context:
if clipart.idx in Clipart.HUMAN_IDXS:
canvas_pose[clipart.human_idx, clipart.subtype] = True
canvas_binary[clipart.idx, 0] = True
canvas_binary[clipart.idx, 1 + clipart.depth] = True
canvas_binary[clipart.idx, 1 + dg.NUM_DEPTH + clipart.flip] = True
canvas_numerical[clipart.idx, 0] = clipart.normed_x
canvas_numerical[clipart.idx, 1] = clipart.normed_y
canvas_binary = np.concatenate([canvas_binary.reshape((-1,)), canvas_pose.reshape((-1,))])
canvas_numerical = canvas_numerical.reshape((-1,))
canvas_binary = torch.tensor(canvas_binary.astype(np.uint8), dtype=torch.uint8)[None,:].to(cuda_if_available)
canvas_numerical = torch.tensor(canvas_numerical, dtype=torch.float)[None,:].to(cuda_if_available)
hidden_feats = (
self.lang_to_hidden(msg_idxs[None,:], None)
+ self.canvas_binary_to_hidden(canvas_binary.float())
+ self.canvas_numerical_to_hidden(canvas_numerical)
)
clipart_scores = self.hidden_to_clipart(hidden_feats).view(-1, dg.NUM_INDEX, (dg.NUM_ALL + 1))
cliparts = []
prior_idxs = set([c.idx for c in canvas_context])
flat_scores = clipart_scores[:,:,1:].view((-1, dg.NUM_ALL))
(logits_subtype, logits_depth, logits_flip, vals_numerical) = torch.split(flat_scores, [dg.NUM_SUBTYPES, dg.NUM_DEPTH, dg.NUM_FLIP, dg.NUM_NUMERICAL], dim=1)
vals_numerical = self.sigmoid_coeff * F.sigmoid(self.vals_coeff * vals_numerical)
vals_numerical = vals_numerical.cpu().detach().numpy()
clipart_idx_scores = clipart_scores[0,:,0].cpu().detach().numpy()
for idx in np.where(clipart_idx_scores > 0)[0]:
if idx in prior_idxs:
continue
nx, ny = vals_numerical[idx,:]
clipart = Clipart(idx, int(logits_subtype[idx,:].argmax()), int(logits_depth[idx,:].argmax()), int(logits_flip[idx,:].argmax()), normed_x=nx, normed_y=ny)
cliparts.append(clipart)
episode.append(codraw_data.DrawGroup(cliparts))
episode.append(codraw_data.ReplyGroup("ok"))
def get_action_fns(self):
return [drawer_observe_canvas, self.draw]
# %%
class BOWAddOnlyDrawer(BaseAddOnlyDrawer):
def init_full(self, d_embeddings=512, d_hidden=512):
self._args = dict(
d_embeddings=d_embeddings,
d_hidden=d_hidden,
)
super().init_full(d_hidden)
self.d_embeddings = d_embeddings
self.word_embs = torch.nn.EmbeddingBag(len(self.datagen.vocabulary_dict), d_embeddings)
self.lang_to_hidden_module = nn.Linear(d_embeddings, d_hidden)
self.to(cuda_if_available)
def lang_to_hidden(self, msg_idxs, offsets=None):
bow_feats = self.word_embs(msg_idxs, offsets)
return self.lang_to_hidden_module(bow_feats)
# %%
class LSTMAddOnlyDrawer(BaseAddOnlyDrawer):
def init_full(self, d_embeddings=256, d_hidden=512, d_lstm=256, num_lstm_layers=1, pre_lstm_dropout=0.4, lstm_dropout=0.0):
self._args = dict(
d_embeddings=d_embeddings,
d_hidden=d_hidden,
d_lstm=256,
num_lstm_layers=num_lstm_layers,
pre_lstm_dropout=pre_lstm_dropout,
lstm_dropout=lstm_dropout,
)
super().init_full(d_hidden)
self.d_embeddings = d_embeddings
self.word_embs = torch.nn.Embedding(len(self.datagen.vocabulary_dict), d_embeddings)
self.pre_lstm_dropout = nn.Dropout(pre_lstm_dropout)
self.lstm = nn.LSTM(d_embeddings, d_lstm, bidirectional=True, num_layers=num_lstm_layers, dropout=lstm_dropout)
# self.post_lstm_project = nn.Linear(d_lstm * 2 * num_lstm_layers, d_hidden)
# self.post_lstm_project = lambda x: x #nn.Linear(d_lstm * 2 * num_lstm_layers, d_hidden)
self.post_lstm_project = lambda x: x[:,:d_hidden]
self.to(cuda_if_available)
def lang_to_hidden(self, msg_idxs, offsets=None):
# global dump
# dump = msg_idxs, offsets
# assert False
# bow_feats = self.word_embs(msg_idxs, offsets)
# return self.lang_to_hidden_module(bow_feats)
if offsets is not None:
start = offsets.cpu()
end = torch.cat([start[1:], torch.tensor([msg_idxs.shape[-1]])])
undo_sorting = np.zeros(start.shape[0], dtype=int)
undo_sorting[(start - end).numpy().argsort()] = np.arange(start.shape[0], dtype=int)
words_packed = nn.utils.rnn.pack_sequence(sorted([msg_idxs[i:j] for i, j in list(zip(start.numpy(), end.numpy()))], key=lambda x: -x.shape[0]))
else:
words_packed = nn.utils.rnn.pack_sequence([msg_idxs[0,:]])
undo_sorting = np.array([0], dtype=int)
word_vecs = embedded = nn.utils.rnn.PackedSequence(
self.pre_lstm_dropout(self.word_embs(words_packed.data)),
words_packed.batch_sizes)
_, (h_final, c_final) = self.lstm(word_vecs)
# sentence_reps = h_final[-2:,:,:].permute(1, 2, 0).contiguous().view(undo_sorting.size, -1)
sentence_reps = c_final[-2:,:,:].permute(1, 2, 0).contiguous().view(undo_sorting.size, -1)
sentence_reps = self.post_lstm_project(sentence_reps)
if offsets is not None:
sentence_reps = sentence_reps[undo_sorting]
return sentence_reps
# %%
class PragmaticNearestNeighborTeller(Model):
datagen_cls = NearestNeighborData
def init_full(self, drawer_model=None, num_candidates=10):
self.drawer_model = drawer_model
self.num_candidates = num_candidates
def set_drawer_model(self, drawer_model):
self.drawer_model = drawer_model
def get_spec(self):
return dict(num_candidates=self.num_candidates)
@respond_to(codraw_data.SelectClipart)
def tell(self, episode):
clipart = episode.get_last(codraw_data.SelectClipart).clipart
candidate_cliparts = heapq.nlargest(self.num_candidates, self.datagen.clipart_to_msg, key=lambda cand_clipart: clipart_similarity(cand_clipart, clipart))
# global dump
# dump = candidate_cliparts, episode
# assert False
candidate_msgs = [self.datagen.clipart_to_msg[cand_clipart] for cand_clipart in candidate_cliparts]
expected_context = [event.clipart for event in episode if isinstance(event, codraw_data.SelectClipart)][:-1]
candidate_responses = [self.drawer_model.just_draw(msg, expected_context) for msg in candidate_msgs]
best_idx = np.argmax([scene_similarity(response_scene, [clipart]) for response_scene in candidate_responses])
best_msg = candidate_msgs[best_idx]
episode.append(codraw_data.TellGroup(best_msg))
def get_action_fns(self):
return [select_clipart_to_tell, self.tell]
# %%
def load_baseline2():
baseline2_specs = torch_load(Path('models/lstmaddonly_may31.pt'))
models = {}
for k, spec in baseline2_specs.items():
print(k)
models[k] = globals()[spec['class']](spec=spec)
# TODO(nikita): serialize these models to disk
data_nn_a = NearestNeighborData('a')
data_nn_b = NearestNeighborData('b')
print('teller_pragmaticnn_a')
models['teller_pragmaticnn_a'] = PragmaticNearestNeighborTeller(data_nn_a, drawer_model=models['drawer_lstmaddonly_a'])
print('teller_pragmaticnn_b')
models['teller_pragmaticnn_b'] = PragmaticNearestNeighborTeller(data_nn_b, drawer_model=models['drawer_lstmaddonly_b'])
return models
| codraw-models-master | baseline2_models.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
import numpy as np
from pathlib import Path
import editdistance
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, Transcriber, respond_to, response_partial
from saved_models import load_models, make_pairs
from eval_automatic import print_eval
# %%
models = load_models(1, 2, 3, 4)
# HACK while the model is still training
models['teller_rl_b'] = models['teller_scene2seq_aux2_b']
# %%
tellers = make_pairs(models,
# 'teller_nn',
# 'teller_pragmaticnn',
# 'teller_scene2seq',
# 'teller_scene2seq_aux',
# 'teller_scene2seq_aux2',
'teller_rl',
)
drawers = make_pairs(models,
# 'drawer_nn',
# 'drawer_sim',
# 'drawer_bow2c',
# 'drawer_bow2bce',
# 'drawer_bowcanvas2bce',
'drawer_lstmaddonly',
)
# %%
print()
print_eval(do_human=True)
# %%
print()
print()
print_eval(tellers, drawers, limit=None, do_pairwise=True)
# %%
print()
print()
print_eval(tellers, drawers, limit=None, do_script=True, do_components_pairwise=True, do_components_script=True)
| codraw-models-master | baseline4_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
assert __name__ == "__main__", "Training script should not be imported!"
#%%
import numpy as np
from pathlib import Path
import editdistance
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, respond_to, response_partial
from datagen import NearestNeighborData, MessageSimilarityData, BOWtoClipartData, ClipartToSeqData, BOWplusCanvasToMultiData
from model import Model, select_clipart_to_tell, drawer_observe_canvas, make_fns, eval_fns, scripted_tell
from baseline1_models import NearestNeighborTeller, CharNeighborDrawer
from baseline1_models import BOWNeighborDrawer, BOWtoClipartDrawer, ClipartToSeqTeller
from baseline1_models import BOWtoMultiBCEDrawer, BOWplusCanvasDrawer
#%%
data_nn_a = NearestNeighborData('a')
data_nn_b = NearestNeighborData('b')
teller_nn_a = NearestNeighborTeller(data_nn_a)
teller_nn_b = NearestNeighborTeller(data_nn_b)
drawer_nn_a = CharNeighborDrawer(data_nn_a)
drawer_nn_b = CharNeighborDrawer(data_nn_b)
#%%
data_sim_a = MessageSimilarityData('a')
data_sim_b = MessageSimilarityData('b')
drawer_sim_a = BOWNeighborDrawer(data_sim_a)
drawer_sim_b = BOWNeighborDrawer(data_sim_b)
optimizer_sim_a = torch.optim.Adam(drawer_sim_a.parameters())
optimizer_sim_b = torch.optim.Adam(drawer_sim_b.parameters())
#%%
for epoch in range(500):
drawer_sim_a.train()
for num, ex in enumerate(drawer_sim_a.datagen.get_examples_batch()):
optimizer_sim_a.zero_grad()
loss = drawer_sim_a.forward(ex)
loss.backward()
optimizer_sim_a.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 25 == 0:
drawer_sim_a.prepare_for_inference()
for splits in ('aa', 'ba'):
sims = eval_fns(make_fns(splits, (teller_nn_a, teller_nn_b), (drawer_sim_a, drawer_sim_b)), limit=100)
print(splits, sims.mean())
drawer_sim_a.prepare_for_inference()
# %%
for epoch in range(500):
drawer_sim_b.train()
for num, ex in enumerate(drawer_sim_b.datagen.get_examples_batch()):
optimizer_sim_b.zero_grad()
loss = drawer_sim_b.forward(ex)
loss.backward()
optimizer_sim_b.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 25 == 0:
drawer_sim_b.prepare_for_inference()
for splits in ('ab', 'bb'):
sims = eval_fns(make_fns(splits, (teller_nn_a, teller_nn_b), (drawer_sim_a, drawer_sim_b)), limit=100)
print(splits, sims.mean())
drawer_sim_b.prepare_for_inference()
#%%
data_bow2c_a = BOWtoClipartData('a')
data_bow2c_b = BOWtoClipartData('b')
drawer_bow2c_a = BOWtoClipartDrawer(data_bow2c_a)
drawer_bow2c_b = BOWtoClipartDrawer(data_bow2c_b)
optimizer_bow2c_a = torch.optim.Adam(drawer_bow2c_a.parameters())
optimizer_bow2c_b = torch.optim.Adam(drawer_bow2c_b.parameters())
# %%
for epoch in range(20):
drawer_bow2c_a.train()
for num, ex in enumerate(drawer_bow2c_a.datagen.get_examples_batch()):
optimizer_bow2c_a.zero_grad()
loss = drawer_bow2c_a.forward(ex)
loss.backward()
optimizer_bow2c_a.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 5 == 0:
for splits in ('aa', 'ba'):
sims = eval_fns(make_fns(splits, (teller_nn_a, teller_nn_b), (drawer_bow2c_a, drawer_bow2c_b)), limit=100)
print(splits, sims.mean())
#%%
for epoch in range(20):
drawer_bow2c_b.train()
for num, ex in enumerate(drawer_bow2c_b.datagen.get_examples_batch()):
optimizer_bow2c_b.zero_grad()
loss = drawer_bow2c_b.forward(ex)
loss.backward()
optimizer_bow2c_b.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 5 == 0:
for splits in ('ab', 'bb'):
sims = eval_fns(make_fns(splits, (teller_nn_a, teller_nn_b), (drawer_bow2c_a, drawer_bow2c_b)), limit=100)
print(splits, sims.mean())
#%%
data_c2seq_a = ClipartToSeqData('a')
data_c2seq_b = ClipartToSeqData('b')
teller_c2seq_a = ClipartToSeqTeller(data_c2seq_a)
teller_c2seq_b = ClipartToSeqTeller(data_c2seq_b)
optimizer_c2seq_a = torch.optim.Adam(teller_c2seq_a.parameters())
optimizer_c2seq_b = torch.optim.Adam(teller_c2seq_b.parameters())
#%%
for epoch in range(80):
teller_c2seq_a.train()
for num, ex in enumerate(teller_c2seq_a.datagen.get_examples_batch()):
optimizer_c2seq_a.zero_grad()
loss = teller_c2seq_a(ex)
loss.backward()
optimizer_c2seq_a.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 5 == 0:
for splits in ('aa', 'ab'):
sims = eval_fns(make_fns(splits, (teller_c2seq_a, teller_c2seq_b), (drawer_bow2c_a, drawer_bow2c_b)), limit=100)
print(splits, sims.mean())
if epoch % 50 == 49:
optimizer_c2seq_a.param_groups[0]['lr'] *= 0.5
print("Learning rate reduced to", optimizer_c2seq_a.param_groups[0]['lr'])
#%%
for epoch in range(80):
teller_c2seq_b.train()
for num, ex in enumerate(teller_c2seq_b.datagen.get_examples_batch()):
optimizer_c2seq_b.zero_grad()
loss = teller_c2seq_b(ex)
loss.backward()
optimizer_c2seq_b.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 5 == 0:
for splits in ('ba', 'bb'):
sims = eval_fns(make_fns(splits, (teller_c2seq_a, teller_c2seq_b), (drawer_bow2c_a, drawer_bow2c_b)), limit=100)
print(splits, sims.mean())
if epoch % 50 == 49:
optimizer_c2seq_b.param_groups[0]['lr'] *= 0.5
print("Learning rate reduced to", optimizer_c2seq_b.param_groups[0]['lr'])
#%%
data_bowcanvas_a = BOWplusCanvasToMultiData('a')
data_bowcanvas_b = BOWplusCanvasToMultiData('b')
drawer_bow2bce_a = BOWtoMultiBCEDrawer(data_bowcanvas_a)
drawer_bow2bce_b = BOWtoMultiBCEDrawer(data_bowcanvas_b)
optimizer_bow2bce_a = torch.optim.Adam(drawer_bow2bce_a.parameters())
optimizer_bow2bce_b = torch.optim.Adam(drawer_bow2bce_b.parameters())
#%%
for epoch in range(5):
drawer_bow2bce_a.train()
for num, ex in enumerate(drawer_bow2bce_a.datagen.get_examples_batch()):
optimizer_bow2bce_a.zero_grad()
loss = drawer_bow2bce_a.forward(ex)
loss.backward()
optimizer_bow2bce_a.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 1 == 0:
for split in ('a',):
sims = eval_fns(make_fns(split, scripted_tell, (drawer_bow2bce_a, drawer_bow2bce_b)), limit=100)
print(split, sims.mean())
#%%
for epoch in range(5):
drawer_bow2bce_b.train()
for num, ex in enumerate(drawer_bow2bce_b.datagen.get_examples_batch()):
optimizer_bow2bce_b.zero_grad()
loss = drawer_bow2bce_b.forward(ex)
loss.backward()
optimizer_bow2bce_b.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 1 == 0:
for split in ('b',):
sims = eval_fns(make_fns(split, scripted_tell, (drawer_bow2bce_a, drawer_bow2bce_b)), limit=100)
print(split, sims.mean())
#%%
drawer_bowcanvas2bce_a = BOWplusCanvasDrawer(data_bowcanvas_a)
drawer_bowcanvas2bce_b = BOWplusCanvasDrawer(data_bowcanvas_b)
optimizer_bowcanvas2bce_a = torch.optim.Adam(drawer_bowcanvas2bce_a.parameters())
optimizer_bowcanvas2bce_b = torch.optim.Adam(drawer_bowcanvas2bce_b.parameters())
#%%
for epoch in range(15):
drawer_bowcanvas2bce_a.train()
for num, ex in enumerate(drawer_bowcanvas2bce_a.datagen.get_examples_batch()):
optimizer_bowcanvas2bce_a.zero_grad()
loss = drawer_bowcanvas2bce_a.forward(ex)
loss.backward()
optimizer_bowcanvas2bce_a.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 1 == 0:
for split in ('a',):
sims = eval_fns(make_fns(split, scripted_tell, (drawer_bowcanvas2bce_a, drawer_bowcanvas2bce_b)), limit=100)
print(split, sims.mean())
#%%
for epoch in range(15):
drawer_bowcanvas2bce_b.train()
for num, ex in enumerate(drawer_bowcanvas2bce_b.datagen.get_examples_batch()):
optimizer_bowcanvas2bce_b.zero_grad()
loss = drawer_bowcanvas2bce_b.forward(ex)
loss.backward()
optimizer_bowcanvas2bce_b.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 1 == 0:
for split in ('b',):
sims = eval_fns(make_fns(split, scripted_tell, (drawer_bowcanvas2bce_a, drawer_bowcanvas2bce_b)), limit=100)
print(split, sims.mean())
#%%
baseline1_specs = dict(
teller_nn_a = teller_nn_a.spec,
teller_nn_b = teller_nn_b.spec,
drawer_nn_a = drawer_nn_a.spec,
drawer_nn_b = drawer_nn_b.spec,
drawer_sim_a = drawer_sim_a.spec,
drawer_sim_b = drawer_sim_b.spec,
drawer_bow2c_a = drawer_bow2c_a.spec,
drawer_bow2c_b = drawer_bow2c_b.spec,
teller_c2seq_a = teller_c2seq_a.spec,
teller_c2seq_b = teller_c2seq_b.spec,
drawer_bow2bce_a = drawer_bow2bce_a.spec,
drawer_bow2bce_b = drawer_bow2bce_b.spec,
drawer_bowcanvas2bce_a = drawer_bowcanvas2bce_a.spec,
drawer_bowcanvas2bce_b = drawer_bowcanvas2bce_b.spec,
)
#%%
torch.save(baseline1_specs, Path('models/baseline1.pt'))
| codraw-models-master | baseline1_train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
def scene_similarity_orig(pred, target):
"""
DEPRECATED: use scene_similarity instead!
This is a re-implementation of the original CoDraw similarity metric, as per
https://arxiv.org/abs/1712.05558v1
"""
idx1 = set(x.idx for x in target)
idx2 = set(x.idx for x in pred)
iou = len(idx1 & idx2) / len(idx1 | idx2)
common_idxs = list(idx1 & idx2)
match1 = [[x for x in target if x.idx == idx][0] for idx in common_idxs]
match2 = [[x for x in pred if x.idx == idx][0] for idx in common_idxs]
num = np.zeros(7)
denom = np.zeros(7)
num[0] = 1
for c1, c2 in zip(match1, match2):
if c1.idx not in c1.HUMAN_IDXS:
num[1] += int(c1.flip != c2.flip)
denom[1] += 1
else:
num[2] += int(c1.subtype != c2.subtype or c1.flip != c2.flip)
denom[2] += 1
num[3] += int(c1.depth != c2.depth)
num[4] += np.sqrt((c1.normed_x - c2.normed_x) ** 2 + (c1.normed_y - c2.normed_y) ** 2)
denom[3] += 1
denom[4] += 1
for idx_i in range(len(match1)):
for idx_j in range(len(match1)):
c1i, c1j = match1[idx_i], match1[idx_j]
c2i, c2j = match2[idx_i], match2[idx_j]
# NOTE(nikita): the metric, as originally defined, pairs up objects
# with themselves, and also yields misleadingly high results for
# models that place multiple clipart at the exact same location
# (e.g. a model that places all clipart in the center of the canvas
# will receive zero relative-position penalty)
num[5] += int((c1i.x - c1j.x) * (c2i.x - c2j.x) < 0)
num[6] += int((c1i.y - c1j.y) * (c2i.y - c2j.y) < 0)
denom[5] += 1
denom[6] += 1
denom = np.maximum(denom, 1)
score_components = iou * (num / denom)
score_weights = np.array([5,-1,-1,-1,-1,-0.5,-0.5])
return score_components @ score_weights
def scene_similarity_v1(pred, target):
"""
DEPRECATED: use scene_similarity instead!
The similarity metric used for initial experiments prior to June 8, 2018.
Both this metric and scene_similarity_orig have corner cases where adding a
new, correct clipart to the scene can actually cause the similarity score
to decrease.
"""
idx1 = set(x.idx for x in target)
idx2 = set(x.idx for x in pred)
iou = len(idx1 & idx2) / len(idx1 | idx2)
common_idxs = list(idx1 & idx2)
match1 = [[x for x in target if x.idx == idx][0] for idx in common_idxs]
match2 = [[x for x in pred if x.idx == idx][0] for idx in common_idxs]
num = np.zeros(7)
denom = np.zeros(7)
num[0] = 1
for c1, c2 in zip(match1, match2):
if c1.idx not in c1.HUMAN_IDXS:
num[1] += int(c1.flip != c2.flip)
denom[1] += 1
else:
num[2] += int(c1.subtype != c2.subtype or c1.flip != c2.flip)
denom[2] += 1
num[3] += int(c1.depth != c2.depth)
num[4] += np.sqrt((c1.normed_x - c2.normed_x) ** 2 + (c1.normed_y - c2.normed_y) ** 2)
denom[3] += 1
denom[4] += 1
for idx_i in range(len(match1)):
for idx_j in range(idx_i, len(match1)):
if idx_i == idx_j:
continue
c1i, c1j = match1[idx_i], match1[idx_j]
c2i, c2j = match2[idx_i], match2[idx_j]
# TODO(nikita): this doesn't correctly handle the case if two
# cliparts have *exactly* the same x/y coordinates in the target
num[5] += int((c1i.x - c1j.x) * (c2i.x - c2j.x) <= 0)
num[6] += int((c1i.y - c1j.y) * (c2i.y - c2j.y) <= 0)
denom[5] += 1
denom[6] += 1
denom = np.maximum(denom, 1)
score_components = iou * (num / denom)
score_weights = np.array([5,-1,-1,-1,-1,-0.5,-0.5])
return score_components @ score_weights
def scene_similarity_v2(pred, target):
"""
DEPRECATED: use scene_similarity instead!
This version of the scene similarity metric should be monotonic, in the
sense that adding correct clipart should always increase the score, adding
incorrect clipart should decrease it, and removing incorrect clipart should
increase it.
This version jointly scores subtype/flip/depth for humans, which was later
replaced with a more fine-grained scoring
"""
idx1 = set(x.idx for x in target)
idx2 = set(x.idx for x in pred)
iou = len(idx1 & idx2) / len(idx1 | idx2)
intersection_size = len(idx1 & idx2)
union_size = len(idx1 | idx2)
common_idxs = list(idx1 & idx2)
match1 = [[x for x in target if x.idx == idx][0] for idx in common_idxs]
match2 = [[x for x in pred if x.idx == idx][0] for idx in common_idxs]
num = np.zeros(7)
denom = np.zeros(7)
num[0] = intersection_size
for c1, c2 in zip(match1, match2):
if c1.idx not in c1.HUMAN_IDXS:
num[1] += int(c1.flip != c2.flip)
else:
num[2] += int(c1.subtype != c2.subtype or c1.flip != c2.flip)
num[3] += int(c1.depth != c2.depth)
num[4] += np.sqrt((c1.normed_x - c2.normed_x) ** 2 + (c1.normed_y - c2.normed_y) ** 2)
denom[:5] = union_size
for idx_i in range(len(match1)):
for idx_j in range(idx_i, len(match1)):
if idx_i == idx_j:
continue
c1i, c1j = match1[idx_i], match1[idx_j]
c2i, c2j = match2[idx_i], match2[idx_j]
# TODO(nikita): this doesn't correctly handle the case if two
# cliparts have *exactly* the same x/y coordinates in the target
num[5] += int((c1i.x - c1j.x) * (c2i.x - c2j.x) <= 0)
num[6] += int((c1i.y - c1j.y) * (c2i.y - c2j.y) <= 0)
denom[5:] = union_size * (intersection_size - 1)
denom = np.maximum(denom, 1)
score_components = num / denom
score_weights = np.array([5,-1,-1,-1,-1,-1,-1])
return score_components @ score_weights
def scene_similarity(pred, target):
"""
This version of the scene similarity metric should be monotonic, in the
sense that adding correct clipart should always increase the score, adding
incorrect clipart should decrease it, and removing incorrect clipart should
increase it. It also breaks out the different components of Mike/Jenny:
flip, expression, and pose; as well as capping distance error at 1.
"""
idx1 = set(x.idx for x in target)
idx2 = set(x.idx for x in pred)
iou = len(idx1 & idx2) / len(idx1 | idx2)
intersection_size = len(idx1 & idx2)
union_size = len(idx1 | idx2)
common_idxs = list(idx1 & idx2)
match1 = [[x for x in target if x.idx == idx][0] for idx in common_idxs]
match2 = [[x for x in pred if x.idx == idx][0] for idx in common_idxs]
num = np.zeros(8)
denom = np.zeros(8)
num[0] = intersection_size
for c1, c2 in zip(match1, match2):
num[1] += int(c1.flip != c2.flip)
if c1.idx in c1.HUMAN_IDXS:
num[2] += int(c1.expression != c2.expression)
num[3] += int(c1.pose != c2.pose)
num[4] += int(c1.depth != c2.depth)
num[5] += min(1.0, np.sqrt((c1.normed_x - c2.normed_x) ** 2 + (c1.normed_y - c2.normed_y) ** 2))
denom[:6] = union_size
for idx_i in range(len(match1)):
for idx_j in range(idx_i, len(match1)):
if idx_i == idx_j:
continue
c1i, c1j = match1[idx_i], match1[idx_j]
c2i, c2j = match2[idx_i], match2[idx_j]
# TODO(nikita): this doesn't correctly handle the case if two
# cliparts have *exactly* the same x/y coordinates in the target
num[6] += int((c1i.x - c1j.x) * (c2i.x - c2j.x) <= 0)
num[7] += int((c1i.y - c1j.y) * (c2i.y - c2j.y) <= 0)
denom[6:] = union_size * (intersection_size - 1)
denom = np.maximum(denom, 1)
score_components = num / denom
score_weights = np.array([5,-1,-0.5,-0.5,-1,-1,-1,-1])
return score_components @ score_weights
def clipart_similarity_v1(a, b):
"""
DEPRECATED: use clipart_similarity instead!
The original clipart similarity metric, before subtype was split into
pose/expression
"""
if a.idx != b.idx:
return 0
score = 5
score -= int(a.subtype != b.subtype or a.flip != b.flip)
score -= int(a.depth != b.depth)
score -= np.sqrt((a.normed_x - b.normed_x) ** 2 + (a.normed_y - b.normed_y) ** 2)
return score
def clipart_similarity(a, b):
"""
This version of the metric splits out subtype into pose/expression, and caps
distance error at 1.
"""
if a.idx != b.idx:
return 0
score = 5
score -= int(a.flip != b.flip)
score -= 0.5 * int(a.expression != b.expression)
score -= 0.5 * int(a.pose != b.pose)
score -= int(a.depth != b.depth)
score -= min(1.0, np.sqrt((a.normed_x - b.normed_x) ** 2 + (a.normed_y - b.normed_y) ** 2))
return score
| codraw-models-master | abs_metric.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
try:
from IPython.display import display
except ImportError:
assert not INTERACTIVE
def display(*args, **kwargs):
pass
import functools
from pathlib import Path
import datetime
import abs_render
import codraw_data
from abs_metric import scene_similarity
class Episode(list):
def get_last(self, event_type):
for event in reversed(self):
if isinstance(event, event_type):
return event
return None
def reconstruct(self):
reconstructed_scene = []
for event in self:
if isinstance(event, codraw_data.DrawClipart):
reconstructed_scene = [c for c in reconstructed_scene if c.idx != event.clipart.idx]
reconstructed_scene.append(event.clipart)
elif isinstance(event, codraw_data.DrawGroup):
reconstructed_scene = [c for c in reconstructed_scene if c.idx not in [c2.idx for c2 in event.cliparts]]
reconstructed_scene.extend(event.cliparts)
return codraw_data.AbstractScene(reconstructed_scene)
def display(self):
scene = None
for event in self:
if isinstance(event, codraw_data.ObserveTruth):
assert scene is None, "Multiple ObserveTruth events not allowed in an episode"
scene = event.scene
elif isinstance(event, codraw_data.SelectClipart):
display(event.clipart)
elif isinstance(event, codraw_data.DrawClipart):
abs_render.display_cliparts([event.clipart], color='red', scale=0.75)
elif isinstance(event, codraw_data.DrawGroup):
abs_render.display_cliparts(event.cliparts, color='red', scale=0.75)
elif isinstance(event, codraw_data.TellGroup):
print("TELLER:", event.msg)
elif isinstance(event, codraw_data.ReplyGroup):
print("DRAWER:", event.msg)
elif isinstance(event, codraw_data.TellerIntention):
if event.drawn is not None:
abs_render.display_cliparts(event.drawn, color='purple', label='drawn', scale=0.33)
if event.draw_next is not None:
abs_render.display_cliparts(event.draw_next, color='yellow', label='draw next', scale=0.33)
if event.undrawn is not None:
abs_render.display_cliparts(event.undrawn, color='cyan', label='undrawn', scale=0.33)
print('===')
reconstructed_scene = self.reconstruct()
abs_render.display_cliparts(scene, label='ground truth', scale=0.75)
abs_render.display_cliparts(reconstructed_scene, color='red', label='reconstructed', scale=0.75)
print('Similarity =', scene_similarity(reconstructed_scene, scene))
def to_html(self):
res = ""
scene = None
delayed_selected_clipart = ""
for event in self:
if isinstance(event, codraw_data.ObserveTruth):
assert scene is None, "Multiple ObserveTruth events not allowed in an episode"
scene = event.scene
elif isinstance(event, codraw_data.SelectClipart):
delayed_selected_clipart += abs_render.svg_from_cliparts([event.clipart], inline_images=False)
elif isinstance(event, codraw_data.DrawClipart):
res += delayed_selected_clipart
delayed_selected_clipart = ""
res += abs_render.svg_from_cliparts([event.clipart], color='red', inline_images=False)
elif isinstance(event, codraw_data.DrawGroup):
res += delayed_selected_clipart
delayed_selected_clipart = ""
res += abs_render.svg_from_cliparts(event.cliparts, color='red', inline_images=False)
elif isinstance(event, codraw_data.TellGroup):
res += f"<p>TELLER: {event.msg}</p>"
elif isinstance(event, codraw_data.ReplyGroup):
res += f"<p>DRAWER: {event.msg}</p>"
elif isinstance(event, codraw_data.TellerIntention):
if event.drawn is not None:
res += abs_render.svg_from_cliparts(event.drawn, color='purple', label='drawn', scale=0.33)
if event.draw_next is not None:
res += abs_render.svg_from_cliparts(event.draw_next, color='yellow', label='draw next', scale=0.33)
if event.undrawn is not None:
res += abs_render.svg_from_cliparts(event.undrawn, color='cyan', label='undrawn', scale=0.33)
res += f"<p>===</p>"
reconstructed_scene = self.reconstruct()
res += abs_render.svg_from_cliparts(scene, label='ground truth', inline_images=False)
res += abs_render.svg_from_cliparts(reconstructed_scene, color='red', label='reconstructed', inline_images=False)
res += f"<p>Similarity = {scene_similarity(reconstructed_scene, scene)}</p>"
return res
def write_html(self, name_or_path):
if isinstance(name_or_path, Path):
path = name_or_path
else:
path = Path(f"./renders/{name_or_path}.html").resolve()
assert not path.exists(), "File already exists!"
assert path.parent.exists(), "Parent directory does not exist"
path.write_text(self.to_html())
def get_true_scene(self):
scene = None
for event in self:
if isinstance(event, codraw_data.ObserveTruth):
assert scene is None, "Multiple ObserveTruth events not allowed in an episode"
scene = event.scene
assert scene is not None, "Episode has no ObserveTruth events"
return scene
def scene_similarity(self):
return scene_similarity(self.reconstruct(), self.get_true_scene())
@classmethod
def run(cls, scene, fns):
episode = cls([codraw_data.ObserveTruth(scene)])
while True:
for fn in fns:
if type(episode[-1]) in fn._trigger_types:
old_len = len(episode)
fn(episode)
if len(episode) == old_len:
return episode
break
else:
assert False, f"No response for event: {type(episode[-1]).__name__}"
@classmethod
def run_script(cls, scene_and_script, fns):
scene, script = scene_and_script
episode = cls([codraw_data.ObserveTruth(scene)])
episode.script = script
episode.script_index = 0
while True:
for fn in fns:
if type(episode[-1]) in fn._trigger_types:
old_len = len(episode)
fn(episode)
if len(episode) == old_len:
return episode
break
else:
assert False, f"No response for event: {type(episode[-1]).__name__}"
def respond_to(*event_types):
types = set([(x if issubclass(x, codraw_data.Event) else None) for x in event_types])
assert None not in types, "Invalid event type in decorator"
def deco(fn):
if hasattr(fn, '_trigger_types'):
fn._trigger_types |= types
else:
fn._trigger_types = types
return fn
return deco
def response_partial(fn, *args, **kwargs):
res = functools.partial(fn, *args, **kwargs)
res._trigger_types = fn._trigger_types
return res
class Transcriber:
def __init__(self, filename, scenes=None, scenes_description="", scenes_and_scripts=None):
self.filename = filename
if scenes is not None:
self.scene_data = scenes
self.use_script = False
else:
self.scene_data = scenes_and_scripts
self.use_script = True
self.scenes_description = scenes_description
def __call__(self, name_or_path, description="", **partition_to_fns):
if isinstance(name_or_path, Path):
path = name_or_path
else:
path = Path(f"./renders/{name_or_path}.html").resolve()
assert not path.exists(), "File already exists!"
assert path.parent.exists(), "Parent directory does not exist"
assert isinstance(description, str)
res = ""
res += f"<p>Filename: {self.filename}</p>"
res += f"<p>Scenes: {self.scenes_description}</p>"
res += f"<p>Started: {datetime.datetime.now()}</p>"
res += f"<p>Description: {description}</p>"
for partition, fns in partition_to_fns.items():
res += f"<p></p>"
res += f"<h2>Partition {partition}</h2>"
for i, scene_datum in enumerate(self.scene_data):
res += f'<h3 id="{partition}_{i}">Scene {i} <a href="#{partition}_{i}">[here]</a></h3>'
if not self.use_script:
res += Episode.run(scene_datum, fns).to_html()
else:
res += Episode.run_script(scene_datum, fns).to_html()
path.write_text(res)
| codraw-models-master | episode.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import redis
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_PASSWORD = 'YOUR PASSWORD HERE'
REDIS_CONNECTION = None
def connect_to_redis():
global REDIS_CONNECTION
if REDIS_CONNECTION is None:
REDIS_CONNECTION = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD, db=0)
return REDIS_CONNECTION
| codraw-models-master | example.eval_server_common.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
__all__ = ['cpu', 'cuda_if_available', 'logsumexp', 'torch_load']
import torch
# %%
cpu = torch.device('cpu')
if torch.cuda.is_available():
cuda_if_available = torch.device('cuda')
else:
cuda_if_available = cpu
# %%
# https://github.com/pytorch/pytorch/issues/2591
def logsumexp(x, dim=None, keepdim=False):
if dim is None:
x, dim = x.view(-1), 0
xm, _ = torch.max(x, dim, keepdim=True)
x = torch.where(
(xm == float('inf')) | (xm == float('-inf')),
xm,
xm + torch.log(torch.sum(torch.exp(x - xm), dim, keepdim=True)))
return x if keepdim else x.squeeze(dim)
# %%
def torch_load(*args, **kwargs):
if cuda_if_available == cpu:
return torch.load(*args, map_location=lambda storage, loc: storage, **kwargs)
else:
return torch.load(*args, **kwargs)
| codraw-models-master | nkfb_util.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#%%
import numpy as np
from pathlib import Path
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available, torch_load
from attention import AttentionSeqToMasked
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, Transcriber, respond_to
from datagen import SceneToSeqData
from model import make_fns, eval_fns
from model import Model
# %%
class SceneToSeqTeller(Model, torch.nn.Module):
datagen_cls = SceneToSeqData
def init_full(self,
d_word_emb=256,
d_tag_emb=128, num_heads=4, d_qkv=128,
pre_attn_tag_dropout=0.2, attn_dropout=0.1,
d_lstm=1024, num_lstm_layers=1,
pre_lstm_emb_dropout=0.5,
pre_lstm_scene_dropout=0.15,
lstm_dropout=0.0,
post_lstm_dropout=0.3,
label_smoothing=0.05,
prediction_loss_scale=5.,
d_clipart_state_hidden=1024,
predict_for_full_library=True,
):
self._args = dict(
d_word_emb=d_word_emb,
d_tag_emb=d_tag_emb, num_heads=num_heads, d_qkv=d_qkv,
pre_attn_tag_dropout=pre_attn_tag_dropout,
attn_dropout=attn_dropout,
d_lstm=d_lstm, num_lstm_layers=num_lstm_layers, pre_lstm_emb_dropout=pre_lstm_emb_dropout,
pre_lstm_scene_dropout=pre_lstm_scene_dropout,
lstm_dropout=lstm_dropout,
post_lstm_dropout=post_lstm_dropout,
label_smoothing=label_smoothing,
prediction_loss_scale=prediction_loss_scale,
d_clipart_state_hidden=d_clipart_state_hidden,
predict_for_full_library=predict_for_full_library,
)
dg = self.datagen
self.tag_embs = nn.Embedding(dg.NUM_TAGS, d_tag_emb)
self.d_clipart_tags = d_tag_emb * dg.NUM_TAGS_PER_INDEX
self.pre_attn_tag_dropout = nn.Dropout(pre_attn_tag_dropout)
self.attn_prelstm = AttentionSeqToMasked(
d_pre_q=d_word_emb,
d_pre_k=self.d_clipart_tags,
d_pre_v=self.d_clipart_tags,
d_qk=d_qkv, d_v=d_qkv,
num_heads=num_heads,
attn_dropout=attn_dropout)
self.attn = AttentionSeqToMasked(
d_pre_q=d_lstm,
d_pre_k=self.d_clipart_tags,
d_pre_v=self.d_clipart_tags,
d_qk=d_qkv, d_v=d_qkv,
num_heads=num_heads,
attn_dropout=attn_dropout)
self.word_embs = nn.Embedding(len(self.datagen.vocabulary_dict), d_word_emb)
self.pre_lstm_emb_dropout = nn.Dropout(pre_lstm_emb_dropout)
self.pre_lstm_scene_dropout = nn.Dropout(pre_lstm_scene_dropout)
self.lstm = nn.LSTM(d_word_emb + self.attn_prelstm.d_out, d_lstm, num_layers=num_lstm_layers, dropout=lstm_dropout)
self.post_lstm_dropout = nn.Dropout(post_lstm_dropout)
self.word_project = nn.Linear(d_lstm + self.attn.d_out, len(self.datagen.vocabulary_dict))
self.label_smoothing = label_smoothing
# Possible auxiliary loss for predicting clipart state
self.prediction_loss_scale = prediction_loss_scale
self.predict_for_full_library = predict_for_full_library
if prediction_loss_scale > 0:
if predict_for_full_library:
d_clipart_state_in = d_lstm + dg.NUM_INDEX
else:
d_clipart_state_in = d_lstm
self.clipart_state_predictor = nn.Sequential(
nn.Linear(d_clipart_state_in, d_clipart_state_hidden),
nn.ReLU(),
nn.Linear(d_clipart_state_hidden, dg.NUM_INDEX * dg.NUM_CLIPART_STATES),
)
else:
self.clipart_state_predictor = None
self.to(cuda_if_available)
self.inference_method = 'greedy'
self.sampling_temperature = 1.0
self.max_rounds = 50 # This is only changed for human eval
def get_spec(self):
return self._args
def print_hparams(self):
print("Hyperparameters:")
for k, v in self._args.items():
print(k, '=', v)
print()
def forward(self, example_batch, return_loss=True, return_nll_count=False):
dg = self.datagen
b_clipart_tags = self.tag_embs(example_batch['b_scene_tags']).view(-1, dg.NUM_INDEX, self.d_clipart_tags)
if not (return_loss or return_nll_count):
ks_prelstm, vs_prelstm = self.attn_prelstm.precompute_kv(b_clipart_tags, b_clipart_tags)
ks, vs = self.attn.precompute_kv(b_clipart_tags, b_clipart_tags)
return example_batch['b_scene_mask'], ks_prelstm, vs_prelstm, ks, vs
packer = example_batch['packer']
ob_clipart_tags = packer.ob_from_b(b_clipart_tags)
ob_clipart_tags = self.pre_attn_tag_dropout(ob_clipart_tags)
ob_scene_mask = packer.ob_from_b(example_batch['b_scene_mask'])
brw_teller_tokens_in = example_batch['brw_teller_tokens_in']
if self.training:
word_dropout_probs = 1. / (1. + example_batch['brw_teller_counts_in'])
brw_word_dropout_mask = torch.rand_like(word_dropout_probs) < word_dropout_probs
brw_teller_tokens_in = torch.where(brw_word_dropout_mask, torch.full_like(brw_teller_tokens_in, dg.unk_index), brw_teller_tokens_in)
brw_embs = self.pre_lstm_emb_dropout(self.word_embs(brw_teller_tokens_in))
orwb_embs = packer.orwb_from_brw_pack(brw_embs)
orwb_attended_values_prelstm = self.attn_prelstm(orwb_embs, ob_clipart_tags, ob_clipart_tags, k_mask=ob_scene_mask)
orwb_lstm_in = nn.utils.rnn.PackedSequence(torch.cat([
orwb_embs.data,
orwb_attended_values_prelstm.data,
], -1), orwb_embs.batch_sizes)
orwb_lstm_out, _ = self.lstm(orwb_lstm_in)
orwb_lstm_out = nn.utils.rnn.PackedSequence(self.post_lstm_dropout(orwb_lstm_out.data), orwb_lstm_out.batch_sizes)
orwb_attended_values = self.attn(orwb_lstm_out, ob_clipart_tags, ob_clipart_tags, k_mask=ob_scene_mask)
brw_pre_project = torch.cat([
packer.brw_from_orwb_unpack(orwb_lstm_out),
packer.brw_from_orwb_unpack(orwb_attended_values),
], -1)
brw_word_logits = self.word_project(brw_pre_project)
brw_word_losses = F.cross_entropy(brw_word_logits, example_batch['brw_teller_tokens_out'], reduce=False)
if self.prediction_loss_scale > 0:
brw_starts_round = (example_batch['brw_teller_tokens_in'] == dg.vocabulary_dict['<S>'])
if self.predict_for_full_library:
br_clipart_state_predictor_in = torch.cat([
packer.brw_from_orwb_unpack(orwb_lstm_out)[brw_starts_round],
packer.br_from_b_expand(example_batch['b_scene_mask']).to(torch.float),
], -1)
else:
br_clipart_state_predictor_in = packer.brw_from_orwb_unpack(orwb_lstm_out)[brw_starts_round]
bri_clipart_state_logits = self.clipart_state_predictor(br_clipart_state_predictor_in).view(-1, dg.NUM_CLIPART_STATES)
bri_clipart_state_losses = F.cross_entropy(bri_clipart_state_logits, example_batch['br_drawer_clipart_state'].view(-1), reduce=False)
if self.predict_for_full_library:
br_clipart_state_losses = bri_clipart_state_losses.view(-1, dg.NUM_INDEX).sum(-1)
else:
br_clipart_state_losses = torch.where(
packer.br_from_b_expand(example_batch['b_scene_mask']),
bri_clipart_state_losses.view(-1, dg.NUM_INDEX),
torch.zeros_like(bri_clipart_state_losses.view(-1, dg.NUM_INDEX))).sum(-1)
if return_loss:
# Label smoothing
eps = (self.label_smoothing / brw_word_logits.shape[-1])
brw_word_losses = (1. - self.label_smoothing) * brw_word_losses + eps * (-F.log_softmax(brw_word_logits, dim=-1).sum(dim=-1))
# TODO(nikita): Packer should implement some reduction operations
per_example_word_losses = nn.utils.rnn.pad_packed_sequence(packer.orwb_from_brw_pack(brw_word_losses))[0].sum(0)
word_loss = per_example_word_losses.mean()
if self.prediction_loss_scale > 0:
per_example_prediction_losses = nn.utils.rnn.pad_packed_sequence(packer.srb_from_br_pack(br_clipart_state_losses))[0].sum(0)
prediction_loss = per_example_prediction_losses.mean()
return self.prediction_loss_scale * prediction_loss + word_loss
else:
return word_loss
if return_nll_count:
# TODO(nikita): the model uses multiple tokens to signal the end of
# the last utterance, followed by the end of the conversation. These
# extra actions make perplexity not quite the same as models that
# do stop tokens differently
brw_non_unk_mask = example_batch['brw_teller_tokens_out'] != dg.unk_index
brw_nll = torch.where(brw_non_unk_mask, brw_word_losses, torch.zeros_like(brw_word_losses))
nll = float(brw_nll.sum())
count = int(brw_non_unk_mask.long().sum())
return nll, count
assert False, "unreachable"
@respond_to(codraw_data.ObserveTruth)
@respond_to(codraw_data.ReplyGroup)
def tell(self, episode):
if not hasattr(episode, 'to_tell'):
self.prepare(episode)
if episode.to_tell:
events = episode.to_tell.pop(0)
episode.extend(events)
def prepare(self, episode):
true_scene = episode.get_last(codraw_data.ObserveTruth).scene
example_batch = self.datagen.tensors_from_episode(episode)
b_scene_mask, ks_prelstm, vs_prelstm, ks, vs = self.forward(example_batch, return_loss=False)
to_tell = []
lstm_state = None # carried across conversation rounds!
for round in range(self.max_rounds):
tokens = [self.datagen.vocabulary_dict['<S>']]
events_this_round = []
# Longest utterance in all of CoDraw is 39 words
# Humans have a 140-char limit, but this is not easy to enforce with
# word-level tokenization
for wordnum in range(50):
token_emb = self.word_embs(torch.tensor(tokens[-1], dtype=torch.long).to(cuda_if_available))[None,None,:]
attended_values_prelstm = self.attn_prelstm(token_emb, ks=ks_prelstm, vs=vs_prelstm, k_mask=b_scene_mask)
lstm_in = torch.cat([token_emb, attended_values_prelstm], -1)
lstm_out, lstm_state = self.lstm(lstm_in, lstm_state)
attended_values = self.attn(lstm_out, ks=ks, vs=vs, k_mask=b_scene_mask)
pre_project = torch.cat([lstm_out, attended_values], -1)
if tokens[-1] == self.datagen.vocabulary_dict['<S>'] and self.prediction_loss_scale > 0:
assert not events_this_round
if self.predict_for_full_library:
clipart_state_predictor_in = torch.cat([
lstm_out,
b_scene_mask.to(torch.float)[None,:,:],
], -1)
else:
clipart_state_predictor_in = lstm_out
clipart_state_logits = self.clipart_state_predictor(clipart_state_predictor_in).view(self.datagen.NUM_INDEX, self.datagen.NUM_CLIPART_STATES)
clipart_state_selected = clipart_state_logits.argmax(dim=-1)
undrawn = AbstractScene([c for c in true_scene if clipart_state_selected[c.idx] == self.datagen.CLIPART_STATE_UNDRAWN])
intention = codraw_data.TellerIntention(drawn=None, undrawn=undrawn, draw_next=None)
events_this_round.append(intention)
word_logits = self.word_project(pre_project[0,0,:])
word_logits[self.datagen.vocabulary_dict['<S>']] = -float('inf')
if round == 0 and wordnum == 0:
word_logits[self.datagen.vocabulary_dict['</TELL>']] = -float('inf')
if self.inference_method == 'greedy':
next_token = int(word_logits.argmax())
elif self.inference_method == 'sample':
next_token = int(torch.multinomial(F.softmax(word_logits / self.sampling_temperature, dim=-1)[None, :], 1).item())
else:
raise ValueError(f"Invalid inference_method: {self.inference_method}")
assert next_token != self.datagen.vocabulary_dict['<S>']
tokens.append(next_token)
if next_token == self.datagen.vocabulary_dict['</S>']:
break
elif next_token == self.datagen.vocabulary_dict['</TELL>']:
break
if tokens[-1] == self.datagen.vocabulary_dict['</TELL>']:
break
msg = " ".join([self.datagen.vocabulary[i] for i in tokens[1:-1]])
events_this_round.append(codraw_data.TellGroup(msg))
to_tell.append(events_this_round)
episode.to_tell = to_tell
def get_action_fns(self):
return [self.tell]
def calc_split_loss(self, split='dev'):
"""
Calculates teller loss on a full split
"""
datagen_spec = {**self.datagen.spec}
datagen_spec['split'] = split
datagen_dev = self.datagen_cls(spec=datagen_spec)
assert datagen_dev.vocabulary == self.datagen.vocabulary
losses = []
count = 0
with torch.no_grad():
self.eval()
for ex in datagen_dev.get_examples_unshuffled_batch(batch_size=128):
batch_size = ex['b_scene_mask'].shape[0]
loss = self.forward(ex)
loss = float(loss) * batch_size
losses.append(loss)
count += batch_size
return np.array(losses).sum() / count
# %%
def load_baseline3():
baseline3_specs = torch_load(Path('models/scene2seq_july11.pt'))
models = {}
for k, spec in baseline3_specs.items():
print(k)
models[k] = globals()[spec['class']](spec=spec)
models[k].eval()
return models
| codraw-models-master | baseline3_models.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from pathlib import Path
import editdistance
from collections import Counter
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
from packer import Packer
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, respond_to, response_partial
#%%
class Datagen:
# the spec contains summaries (like a vocab list), but the events are stored
# as a pointer and not as the actual events dictionary. The events get
# restored only if needed, (which shouldn't really be the case because saved
# models won't need to be trained further.)
def __init__(self, split=None, spec=None, **kwargs):
self._examples_cache = None
if spec is not None:
self.split = spec['split']
self.init_from_spec(**{k: v for (k,v) in spec.items() if k != 'split'})
else:
self.split = split
self.init_full(**kwargs)
def init_full(self):
raise NotImplementedError("Subclasses should override this")
def init_from_spec(self):
raise NotImplementedError("Subclasses should override this")
def calc_derived(self):
pass
def get_spec(self):
return {}
@property
def spec(self):
spec = self.get_spec()
if 'split' not in spec:
spec['split'] = self.split
return spec
def get_examples(self):
raise NotImplementedError("Subclasses should override this")
def collate(self, batch):
raise NotImplementedError("Subclasses should override this")
def get_examples_batch(self, batch_size=16):
if self._examples_cache is None:
self._examples_cache = list(self.get_examples())
batch = []
epoch_examples = self._examples_cache[:]
np.random.shuffle(epoch_examples)
for ex in epoch_examples:
batch.append(ex)
if len(batch) == batch_size:
yield self.collate(batch)
batch = []
def get_examples_unshuffled_batch(self, batch_size=16):
"""
Does not shuffle, and the last batch may contain less elements.
Originally added for perplexity evaluation.
"""
if self._examples_cache is None:
self._examples_cache = list(self.get_examples())
batch = []
epoch_examples = self._examples_cache[:]
for ex in epoch_examples:
batch.append(ex)
if len(batch) == batch_size:
yield self.collate(batch)
batch = []
if batch:
yield self.collate(batch)
#%%
class NearestNeighborData(Datagen):
def init_full(self):
self.build_dicts()
def init_from_spec(self):
self.build_dicts()
def build_dicts(self):
# calculate events
events = codraw_data.get_place_one(self.split)
self.msg_to_clipart = {}
self.clipart_to_msg = {}
it = iter(events)
for event in it:
if isinstance(event, codraw_data.SelectClipart):
clipart = event.clipart
event = next(it)
assert isinstance(event, codraw_data.TellGroup)
msg = event.msg
event = next(it)
assert isinstance(event, codraw_data.DrawClipart)
event = next(it)
assert isinstance(event, codraw_data.ReplyGroup)
self.msg_to_clipart[msg] = clipart
self.clipart_to_msg[clipart] = msg
#%%
class MessageSimilarityData(Datagen):
def init_full(self):
self.build_dicts()
vocabulary = set()
for msg in self.msg_to_clipart:
vocabulary |= set(msg.split())
self.vocabulary = sorted(vocabulary)
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
self.calc_derived()
def init_from_spec(self, vocabulary):
self.build_dicts()
self.vocabulary = vocabulary
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
def get_spec(self):
return dict(vocabulary=self.vocabulary)
def build_dicts(self):
events = codraw_data.get_place_one(self.split)
self.msg_to_clipart = {}
it = iter(events)
for event in it:
if isinstance(event, codraw_data.SelectClipart):
clipart = event.clipart
event = next(it)
assert isinstance(event, codraw_data.TellGroup)
msg = event.msg
assert msg != ""
event = next(it)
assert isinstance(event, codraw_data.DrawClipart)
event = next(it)
assert isinstance(event, codraw_data.ReplyGroup)
self.msg_to_clipart[msg] = clipart
def calc_derived(self):
self.all_msgs = list(self.msg_to_clipart.keys())
assert "" not in self.all_msgs
all_cliparts = [self.msg_to_clipart[msg] for msg in self.all_msgs]
self.similarity_matrix = np.zeros((len(all_cliparts), len(all_cliparts)))
for i in range(self.similarity_matrix.shape[0]):
for j in range(i, self.similarity_matrix.shape[1]):
self.similarity_matrix[i, j] = clipart_similarity(all_cliparts[i], all_cliparts[j])
for i in range(self.similarity_matrix.shape[0]):
for j in range(i):
self.similarity_matrix[i, j] = self.similarity_matrix[j, i]
# Never suggest the same sentence as both the input and a candidate
for i in range(self.similarity_matrix.shape[0]):
self.similarity_matrix[i, i] = -1
matrix_good = self.similarity_matrix > 4.5
matrix_bad = (self.similarity_matrix < 3.5) & (self.similarity_matrix >= 0)
for i in range(matrix_good.shape[0]):
if not matrix_good[i].any():
matrix_good[i, self.similarity_matrix[i].argmax()] = True
self.cands_good = np.zeros_like(self.similarity_matrix, dtype=int)
self.cands_good_lens = np.zeros(self.cands_good.shape[0], dtype=int)
self.cands_bad = np.zeros_like(self.similarity_matrix, dtype=int)
self.cands_bad_lens = np.zeros(self.cands_bad.shape[0], dtype=int)
where_good_i, where_good_j = np.where(matrix_good)
for i in range(matrix_good.shape[0]):
cands_good = where_good_j[where_good_i == i]
self.cands_good_lens[i] = len(cands_good)
self.cands_good[i,:len(cands_good)] = cands_good
where_bad_i, where_bad_j = np.where(matrix_bad)
unique_vals, unique_indices = np.unique(where_bad_i, return_index=True)
assert (unique_vals == np.arange(self.cands_bad.shape[0])).all()
for i in range(matrix_bad.shape[0]):
start = unique_indices[i]
if i == matrix_bad.shape[0] - 1:
assert (where_bad_i[start:] == i).all()
cands_bad = where_bad_j[start:]
else:
end = unique_indices[i+1]
assert (where_bad_i[start:end] == i).all()
cands_bad = where_bad_j[start:end]
self.cands_bad_lens[i] = len(cands_bad)
self.cands_bad[i,:len(cands_bad)] = cands_bad
def get_candidates_for(self, i):
good = np.random.choice(self.cands_good[i][:self.cands_good_lens[i]])
bad = np.random.choice(self.cands_bad[i][:self.cands_bad_lens[i]], size=19)
return (good, *bad)
def get_examples(self):
for i in np.random.permutation(self.cands_good.shape[0]):
cands = self.get_candidates_for(i)
idxs = (i, *cands)
words = []
offsets = []
next_offset = 0
for idx in idxs:
offsets.append(next_offset)
toks = [self.vocabulary_dict.get(tok, None) for tok in self.all_msgs[idx].split()]
toks = [tok for tok in toks if tok is not None]
words.extend(toks)
next_offset += len(toks)
yield {
'words': torch.LongTensor(words),
'offsets': torch.LongTensor(offsets)
}
def get_examples_batch(self, batch_size=16):
batch = []
for ex in self.get_examples():
batch.append(ex)
if len(batch) == batch_size:
yield self.collate(batch)
batch = []
def collate(self, batch):
offsets = [x['offsets'] for x in batch]
extra = 0
for i in range(len(offsets)):
offsets[i] += extra
extra += len(batch[i]['words'])
return {
'words': torch.cat([x['words'] for x in batch]).to(cuda_if_available),
'offsets': torch.cat(offsets).to(cuda_if_available),
}
#%%
def vocabulary_for_split(split, event_getter=codraw_data.get_place_one):
vocabulary = set()
it = iter(event_getter(split))
for event in it:
if isinstance(event, codraw_data.TellGroup):
msg = event.msg
vocabulary |= set(msg.split())
return sorted(vocabulary)
def vocabulary_counter_for_split(split, event_getter=codraw_data.get_place_one):
vocabulary = Counter()
it = iter(event_getter(split))
for event in it:
if isinstance(event, codraw_data.TellGroup):
msg = event.msg
vocabulary.update(msg.split())
return vocabulary
#%%
class BOWtoClipartData(Datagen):
def init_full(self):
self.vocabulary = vocabulary_for_split(self.split)
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
self.calc_derived()
def init_from_spec(self, vocabulary):
self.vocabulary = vocabulary
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
def get_spec(self):
return dict(vocabulary=self.vocabulary)
def get_examples(self):
it = iter(codraw_data.get_place_one(self.split))
for event in it:
if isinstance(event, codraw_data.SelectClipart):
clipart = event.clipart
event = next(it)
assert isinstance(event, codraw_data.TellGroup)
msg = event.msg
event = next(it)
assert isinstance(event, codraw_data.DrawClipart)
event = next(it)
assert isinstance(event, codraw_data.ReplyGroup)
clipart_index = torch.LongTensor(np.array(clipart.idx, dtype=int))
clipart_categorical = torch.LongTensor([
clipart.subtype, clipart.depth, clipart.flip])
clipart_numerical = torch.tensor([clipart.normed_x, clipart.normed_y], dtype=torch.float)
msg_idxs = [self.vocabulary_dict.get(word, None) for word in msg.split()]
msg_idxs = [idx for idx in msg_idxs if idx is not None]
msg_idxs = torch.LongTensor(msg_idxs)
example = {
'clipart_index': clipart_index,
'clipart_categorical': clipart_categorical,
'clipart_numerical': clipart_numerical,
'msg_idxs': msg_idxs,
}
yield example
def collate(self, batch):
offsets = np.cumsum([0] + [len(x['msg_idxs']) for x in batch])[:-1]
return {
'clipart_index': torch.stack([x['clipart_index'] for x in batch]).to(cuda_if_available),
'clipart_categorical': torch.stack([x['clipart_categorical'] for x in batch]).to(cuda_if_available),
'clipart_numerical': torch.stack([x['clipart_numerical'] for x in batch]).to(cuda_if_available),
'msg_idxs': torch.cat([x['msg_idxs'] for x in batch]).to(cuda_if_available),
'offsets': torch.tensor(offsets).to(cuda_if_available),
}
#%%
class ClipartToSeqData(Datagen):
NUM_INDEX = Clipart.NUM_IDX
NUM_SUBTYPES = Clipart.NUM_SUBTYPE
NUM_DEPTH = Clipart.NUM_DEPTH
NUM_FLIP = Clipart.NUM_FLIP
NUM_BINARY = NUM_INDEX + NUM_SUBTYPES + NUM_DEPTH + NUM_FLIP
BINARY_OFFSETS = np.cumsum([0, NUM_INDEX, NUM_SUBTYPES, NUM_DEPTH])
NUM_NUMERICAL = 2 # x, y
def init_full(self):
self.vocabulary = ['<S>', '</S>'] + vocabulary_for_split(self.split)
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
self.calc_derived()
def init_from_spec(self, vocabulary):
self.vocabulary = vocabulary
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
def get_spec(self):
return dict(vocabulary=self.vocabulary)
def get_examples(self):
it = iter(codraw_data.get_place_one(self.split))
for event in it:
if isinstance(event, codraw_data.SelectClipart):
clipart = event.clipart
event = next(it)
assert isinstance(event, codraw_data.TellGroup)
msg = event.msg
event = next(it)
assert isinstance(event, codraw_data.DrawClipart)
event = next(it)
assert isinstance(event, codraw_data.ReplyGroup)
x = clipart.normed_x
y = clipart.normed_y
clipart_numerical = torch.tensor([x, y], dtype=torch.float)
clipart_binary = torch.zeros(self.NUM_BINARY)
for val, offset in zip([clipart.idx, clipart.subtype, clipart.depth, clipart.flip], self.BINARY_OFFSETS):
clipart_binary[val + offset] = 1.
msg_idxs = [self.vocabulary_dict['<S>']] + [self.vocabulary_dict.get(word, None) for word in msg.split()] + [self.vocabulary_dict['</S>']]
msg_idxs = [idx for idx in msg_idxs if idx is not None]
msg_idxs = torch.LongTensor(msg_idxs)
example = {
'clipart_binary': clipart_binary,
'clipart_numerical': clipart_numerical,
'msg_idxs': msg_idxs,
}
yield example
def collate(self, batch):
batch = sorted(batch, key=lambda x: -len(x['msg_idxs']))
msg_lens = torch.tensor([len(x['msg_idxs']) - 1 for x in batch], dtype=torch.long)
max_len = int(msg_lens.max())
msg_idxs_input = torch.stack([F.pad(torch.tensor(x['msg_idxs'][:-1]), (0, max_len + 1 - len(x['msg_idxs']))) for x in batch])
msg_idxs_output = torch.stack([F.pad(torch.tensor(x['msg_idxs'][1:]), (0, max_len + 1 - len(x['msg_idxs']))) for x in batch])
return {
'clipart_binary': torch.stack([x['clipart_binary'] for x in batch]).to(cuda_if_available),
'clipart_numerical': torch.stack([x['clipart_numerical'] for x in batch]).to(cuda_if_available),
'msg_in': nn.utils.rnn.pack_padded_sequence(msg_idxs_input.to(cuda_if_available), msg_lens.to(cuda_if_available), batch_first=True),
'msg_out': nn.utils.rnn.pack_padded_sequence(msg_idxs_output.to(cuda_if_available), msg_lens.to(cuda_if_available), batch_first=True),
}
#%%
class BOWplusCanvasToMultiData(Datagen):
NUM_INDEX = Clipart.NUM_IDX
NUM_SUBTYPES = Clipart.NUM_SUBTYPE
NUM_DEPTH = Clipart.NUM_DEPTH
NUM_FLIP = Clipart.NUM_FLIP
NUM_CATEGORICAL = NUM_SUBTYPES + NUM_DEPTH + NUM_FLIP
NUM_NUMERICAL = 2 # x, y
NUM_ALL = NUM_CATEGORICAL + NUM_NUMERICAL
NUM_BINARY = (NUM_INDEX * (1 + NUM_DEPTH + NUM_FLIP)) + 2 * NUM_SUBTYPES
def init_full(self):
self.vocabulary = vocabulary_for_split(self.split, codraw_data.get_contextual_place_many)
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
self.calc_derived()
def init_from_spec(self, vocabulary):
self.vocabulary = vocabulary
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
def get_spec(self):
return dict(vocabulary=self.vocabulary)
def get_examples(self):
it = iter(codraw_data.get_contextual_place_many(self.split))
for event in it:
if isinstance(event, codraw_data.TellGroup):
assert isinstance(event, codraw_data.TellGroup)
msg = event.msg
event = next(it)
assert isinstance(event, codraw_data.ObserveCanvas)
canvas_context = event.scene
event = next(it)
assert isinstance(event, codraw_data.DrawGroup)
cliparts = event.cliparts
event = next(it)
assert isinstance(event, codraw_data.ReplyGroup)
if not msg:
continue
clipart_chosen_mask = np.zeros(self.NUM_INDEX , dtype=bool)
clipart_categorical = np.zeros((self.NUM_INDEX, 3))
clipart_numerical = np.zeros((self.NUM_INDEX, self.NUM_NUMERICAL))
for clipart in cliparts:
clipart_chosen_mask[clipart.idx] = True
clipart_categorical[clipart.idx, :] = [clipart.subtype, clipart.depth, clipart.flip]
clipart_numerical[clipart.idx, :] = [clipart.normed_x, clipart.normed_y]
clipart_chosen_mask = torch.tensor(clipart_chosen_mask.astype(np.uint8), dtype=torch.uint8)
clipart_categorical = torch.tensor(clipart_categorical, dtype=torch.long)
clipart_numerical = torch.tensor(clipart_numerical, dtype=torch.float)
canvas_binary = np.zeros((self.NUM_INDEX, 1 + self.NUM_DEPTH + self.NUM_FLIP), dtype=bool)
canvas_pose = np.zeros((2, self.NUM_SUBTYPES), dtype=bool)
canvas_numerical = np.zeros((self.NUM_INDEX, self.NUM_NUMERICAL))
for clipart in canvas_context:
if clipart.idx in Clipart.HUMAN_IDXS:
canvas_pose[clipart.human_idx, clipart.subtype] = True
canvas_binary[clipart.idx, 0] = True
canvas_binary[clipart.idx, 1 + clipart.depth] = True
canvas_binary[clipart.idx, 1 + self.NUM_DEPTH + clipart.flip] = True
canvas_numerical[clipart.idx, 0] = clipart.normed_x
canvas_numerical[clipart.idx, 1] = clipart.normed_y
canvas_binary = np.concatenate([canvas_binary.reshape((-1,)), canvas_pose.reshape((-1,))])
canvas_numerical = canvas_numerical.reshape((-1,))
canvas_binary = torch.tensor(canvas_binary.astype(np.uint8), dtype=torch.uint8)
canvas_numerical = torch.tensor(canvas_numerical, dtype=torch.float)
msg_idxs = [self.vocabulary_dict.get(word, None) for word in msg.split()]
msg_idxs = [idx for idx in msg_idxs if idx is not None]
msg_idxs = torch.LongTensor(msg_idxs)
example = {
'clipart_chosen_mask': clipart_chosen_mask,
'clipart_categorical': clipart_categorical,
'clipart_numerical': clipart_numerical,
'canvas_binary': canvas_binary,
'canvas_numerical': canvas_numerical,
'msg_idxs': msg_idxs,
}
yield example
def collate(self, batch):
offsets = np.cumsum([0] + [len(x['msg_idxs']) for x in batch])[:-1]
return {
'clipart_chosen_mask': torch.stack([x['clipart_chosen_mask'] for x in batch]).to(cuda_if_available),
'clipart_categorical': torch.stack([x['clipart_categorical'] for x in batch]).to(cuda_if_available),
'clipart_numerical': torch.stack([x['clipart_numerical'] for x in batch]).to(cuda_if_available),
'canvas_binary': torch.stack([x['canvas_binary'] for x in batch]).to(cuda_if_available),
'canvas_numerical': torch.stack([x['canvas_numerical'] for x in batch]).to(cuda_if_available),
'msg_idxs': torch.cat([x['msg_idxs'] for x in batch]).to(cuda_if_available),
'offsets': torch.tensor(offsets).to(cuda_if_available),
}
#%%
class BOWAddUpdateData(Datagen):
NUM_INDEX = Clipart.NUM_IDX
NUM_SUBTYPES = Clipart.NUM_SUBTYPE
NUM_DEPTH = Clipart.NUM_DEPTH
NUM_FLIP = Clipart.NUM_FLIP
NUM_CATEGORICAL = NUM_SUBTYPES + NUM_DEPTH + NUM_FLIP
NUM_NUMERICAL = 2 # x, y
NUM_ALL = NUM_CATEGORICAL + NUM_NUMERICAL
NUM_BINARY = (NUM_INDEX * (1 + NUM_DEPTH + NUM_FLIP)) + 2 * NUM_SUBTYPES
NUM_X_TICKS = 3
NUM_Y_TICKS = 2
NUM_TAGS = 1 + Clipart.NUM_IDX + Clipart.NUM_SUBTYPE + Clipart.NUM_DEPTH + Clipart.NUM_FLIP + NUM_X_TICKS + NUM_Y_TICKS + 1
NUM_TAGS_PER_INDEX = 6 # index, subtype, depth, flip, x, y
def init_full(self):
self.vocabulary = vocabulary_for_split(self.split, codraw_data.get_contextual_place_many)
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
self.calc_derived()
def init_from_spec(self, vocabulary):
self.vocabulary = vocabulary
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
def get_spec(self):
return dict(vocabulary=self.vocabulary)
def get_examples(self):
it = iter(codraw_data.get_contextual_place_many(self.split))
for event in it:
if isinstance(event, codraw_data.TellGroup):
assert isinstance(event, codraw_data.TellGroup)
msg = event.msg
event = next(it)
assert isinstance(event, codraw_data.ObserveCanvas)
canvas_context = event.scene
event = next(it)
assert isinstance(event, codraw_data.DrawGroup)
cliparts = event.cliparts
event = next(it)
assert isinstance(event, codraw_data.ReplyGroup)
if not msg:
continue
context_idxs = set([c.idx for c in canvas_context])
clipart_added_mask = np.zeros(self.NUM_INDEX , dtype=bool)
clipart_updated_mask = np.zeros(self.NUM_INDEX , dtype=bool)
clipart_categorical = np.zeros((self.NUM_INDEX, 3))
clipart_numerical = np.zeros((self.NUM_INDEX, self.NUM_NUMERICAL))
for clipart in cliparts:
if clipart.idx in context_idxs:
clipart_updated_mask[clipart.idx] = True
else:
clipart_added_mask[clipart.idx] = True
clipart_categorical[clipart.idx, :] = [clipart.subtype, clipart.depth, clipart.flip]
clipart_numerical[clipart.idx, :] = [clipart.normed_x, clipart.normed_y]
clipart_added_mask = torch.tensor(clipart_added_mask.astype(np.uint8), dtype=torch.uint8)
clipart_updated_mask = torch.tensor(clipart_updated_mask.astype(np.uint8), dtype=torch.uint8)
clipart_categorical = torch.tensor(clipart_categorical, dtype=torch.long)
clipart_numerical = torch.tensor(clipart_numerical, dtype=torch.float)
canvas_binary = np.zeros((self.NUM_INDEX, 1 + self.NUM_DEPTH + self.NUM_FLIP), dtype=bool)
canvas_pose = np.zeros((2, self.NUM_SUBTYPES), dtype=bool)
canvas_numerical = np.zeros((self.NUM_INDEX, self.NUM_NUMERICAL))
canvas_tags = np.zeros((self.NUM_INDEX + 1, self.NUM_TAGS_PER_INDEX), dtype=int)
canvas_mask = np.zeros(self.NUM_INDEX + 1, dtype=bool)
for clipart in canvas_context:
if clipart.idx in Clipart.HUMAN_IDXS:
canvas_pose[clipart.human_idx, clipart.subtype] = True
canvas_binary[clipart.idx, 0] = True
canvas_binary[clipart.idx, 1 + clipart.depth] = True
canvas_binary[clipart.idx, 1 + self.NUM_DEPTH + clipart.flip] = True
canvas_numerical[clipart.idx, 0] = clipart.normed_x
canvas_numerical[clipart.idx, 1] = clipart.normed_y
x_tick = int(np.floor(clipart.normed_x * self.NUM_X_TICKS))
if x_tick < 0:
x_tick = 0
elif x_tick >= self.NUM_X_TICKS:
x_tick = self.NUM_X_TICKS - 1
y_tick = int(np.floor(clipart.normed_y * self.NUM_Y_TICKS))
if y_tick < 0:
y_tick = 0
elif y_tick >= self.NUM_Y_TICKS:
y_tick = self.NUM_Y_TICKS - 1
# Tag features (for attention)
canvas_tags[clipart.idx, 0] = 1 + clipart.idx
canvas_tags[clipart.idx, 1] = 1 + Clipart.NUM_IDX + clipart.subtype
canvas_tags[clipart.idx, 2] = 1 + Clipart.NUM_IDX + Clipart.NUM_SUBTYPE + clipart.depth
canvas_tags[clipart.idx, 3] = 1 + Clipart.NUM_IDX + Clipart.NUM_SUBTYPE + Clipart.NUM_DEPTH + int(clipart.flip)
canvas_tags[clipart.idx, 4] = 1 + Clipart.NUM_IDX + Clipart.NUM_SUBTYPE + Clipart.NUM_DEPTH + Clipart.NUM_FLIP + x_tick
canvas_tags[clipart.idx, 5] = 1 + Clipart.NUM_IDX + Clipart.NUM_SUBTYPE + Clipart.NUM_DEPTH + Clipart.NUM_FLIP + self.NUM_X_TICKS + y_tick
canvas_mask[clipart.idx] = True
if not canvas_context:
canvas_tags[-1, 0] = self.NUM_TAGS - 1
canvas_mask[-1] = True
canvas_binary = np.concatenate([canvas_binary.reshape((-1,)), canvas_pose.reshape((-1,))])
canvas_numerical = canvas_numerical.reshape((-1,))
canvas_binary = torch.tensor(canvas_binary.astype(np.uint8), dtype=torch.uint8)
canvas_numerical = torch.tensor(canvas_numerical, dtype=torch.float)
canvas_tags = torch.tensor(canvas_tags, dtype=torch.long)
canvas_mask = torch.tensor(canvas_mask.astype(np.uint8), dtype=torch.uint8)
msg_idxs = [self.vocabulary_dict.get(word, None) for word in msg.split()]
msg_idxs = [idx for idx in msg_idxs if idx is not None]
msg_idxs = torch.LongTensor(msg_idxs)
example = {
'clipart_added_mask': clipart_added_mask,
'clipart_updated_mask': clipart_updated_mask,
'clipart_categorical': clipart_categorical,
'clipart_numerical': clipart_numerical,
'canvas_binary': canvas_binary,
'canvas_numerical': canvas_numerical,
'canvas_tags': canvas_tags,
'canvas_mask': canvas_mask,
'msg_idxs': msg_idxs,
}
yield example
def collate(self, batch):
offsets = np.cumsum([0] + [len(x['msg_idxs']) for x in batch])[:-1]
return {
'clipart_added_mask': torch.stack([x['clipart_added_mask'] for x in batch]).to(cuda_if_available),
'clipart_updated_mask': torch.stack([x['clipart_updated_mask'] for x in batch]).to(cuda_if_available),
'clipart_categorical': torch.stack([x['clipart_categorical'] for x in batch]).to(cuda_if_available),
'clipart_numerical': torch.stack([x['clipart_numerical'] for x in batch]).to(cuda_if_available),
'canvas_binary': torch.stack([x['canvas_binary'] for x in batch]).to(cuda_if_available),
'canvas_numerical': torch.stack([x['canvas_numerical'] for x in batch]).to(cuda_if_available),
'canvas_tags': torch.stack([x['canvas_tags'] for x in batch]).to(cuda_if_available),
'canvas_mask': torch.stack([x['canvas_mask'] for x in batch]).to(cuda_if_available),
'msg_idxs': torch.cat([x['msg_idxs'] for x in batch]).to(cuda_if_available),
'offsets': torch.tensor(offsets).to(cuda_if_available),
}
#%%
class SceneToSeqData(Datagen):
NUM_INDEX = Clipart.NUM_IDX
NUM_SUBTYPES = Clipart.NUM_SUBTYPE
NUM_DEPTH = Clipart.NUM_DEPTH
NUM_FLIP = Clipart.NUM_FLIP
NUM_X_TICKS = 3
NUM_Y_TICKS = 2
NUM_BINARY = (NUM_INDEX * (1 + NUM_DEPTH + NUM_FLIP + NUM_X_TICKS + NUM_Y_TICKS)) + 2 * NUM_SUBTYPES
NUM_TAGS = 1 + Clipart.NUM_IDX + Clipart.NUM_SUBTYPE + Clipart.NUM_DEPTH + Clipart.NUM_FLIP + NUM_X_TICKS + NUM_Y_TICKS
NUM_TAGS_PER_INDEX = 6 # index, subtype, depth, flip, x, y
CLIPART_STATE_NOT_UNDRAWN = 0
CLIPART_STATE_UNDRAWN = 1
NUM_CLIPART_STATES = 2
def init_full(self):
self.vocabulary_counts = vocabulary_counter_for_split(self.split, codraw_data.get_set_clipart_pre_peek)
self.vocabulary = ['</TELL>', '<S>', '</S>', '<UNK>'] + sorted(self.vocabulary_counts.keys())
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
self.unk_index = self.vocabulary_dict['<UNK>']
self.calc_derived()
def init_from_spec(self, vocabulary, vocabulary_counts):
self.vocabulary_counts = vocabulary_counts
self.vocabulary = vocabulary
self.vocabulary_dict = {item: num for num, item in enumerate(self.vocabulary)}
self.unk_index = self.vocabulary_dict['<UNK>']
def get_spec(self):
return dict(vocabulary=self.vocabulary, vocabulary_counts=self.vocabulary_counts)
def tensors_from_episode(self, episode, is_train=False):
examples = list(self.get_examples(episode, is_train=is_train))
if not examples:
print(episode)
assert len(examples) > 0, "Episode did not produce any examples"
assert len(examples) == 1, "Episode should not produce multiple examples"
return self.collate(examples, is_train=is_train)
def tensors_from_episodes(self, episodes, is_train=True):
events = []
for episode in episodes:
events.extend(episode)
examples = list(self.get_examples(events, is_train=is_train))
if not examples:
print(episode)
assert len(examples) > 0, "Episode did not produce any examples"
return self.collate(examples, is_train=is_train)
def get_examples(self, events=None, is_train=True):
example = None
scene_present_idxs = None
prev_drawn_idxs = None
num_unfilled_past = None
if events is None:
events = codraw_data.get_set_clipart_pre_peek(self.split)
it = iter(events)
for event in it:
if isinstance(event, codraw_data.ObserveTruth):
if example is not None:
# When doing RL, it's important that the batched data
# matches the decisions taken in step-by-step mode
# If an episode was cut off, don't include a </TELL> token
# All human conversations have less than 50 rounds
if len(example['teller_tokens_in']) < 50:
teller_tokens_stop = [self.vocabulary_dict[x] for x in ('<S>', '</TELL>')]
teller_tokens_stop = torch.tensor(teller_tokens_stop, dtype=torch.long)
example['teller_tokens_in'].append(teller_tokens_stop[:-1])
example['teller_tokens_out'].append(teller_tokens_stop[1:])
example['teller_counts_in'].append(torch.tensor([np.inf], dtype=torch.float))
else:
example['drawer_clipart_state'].pop()
yield example
scene = event.scene
scene_present_idxs = set([c.idx for c in scene])
scene_tags = np.zeros((self.NUM_INDEX, self.NUM_TAGS_PER_INDEX), dtype=int)
scene_mask = np.zeros(self.NUM_INDEX, dtype=bool)
for clipart in scene:
x_tick = int(np.floor(clipart.normed_x * self.NUM_X_TICKS))
if x_tick < 0:
x_tick = 0
elif x_tick >= self.NUM_X_TICKS:
x_tick = self.NUM_X_TICKS - 1
y_tick = int(np.floor(clipart.normed_y * self.NUM_Y_TICKS))
if y_tick < 0:
y_tick = 0
elif y_tick >= self.NUM_Y_TICKS:
y_tick = self.NUM_Y_TICKS - 1
# Tag features (for attention)
scene_tags[clipart.idx, 0] = 1 + clipart.idx
scene_tags[clipart.idx, 1] = 1 + Clipart.NUM_IDX + clipart.subtype
scene_tags[clipart.idx, 2] = 1 + Clipart.NUM_IDX + Clipart.NUM_SUBTYPE + clipart.depth
scene_tags[clipart.idx, 3] = 1 + Clipart.NUM_IDX + Clipart.NUM_SUBTYPE + Clipart.NUM_DEPTH + int(clipart.flip)
scene_tags[clipart.idx, 4] = 1 + Clipart.NUM_IDX + Clipart.NUM_SUBTYPE + Clipart.NUM_DEPTH + Clipart.NUM_FLIP + x_tick
scene_tags[clipart.idx, 5] = 1 + Clipart.NUM_IDX + Clipart.NUM_SUBTYPE + Clipart.NUM_DEPTH + Clipart.NUM_FLIP + self.NUM_X_TICKS + y_tick
scene_mask[clipart.idx] = True
scene_tags = torch.tensor(scene_tags, dtype=torch.long)
scene_mask = torch.tensor(scene_mask.astype(np.uint8), dtype=torch.uint8)
if is_train:
assert scene_present_idxs is not None
drawer_clipart_state = np.zeros(self.NUM_INDEX, dtype=int)
for idx in range(self.NUM_INDEX):
if idx not in scene_present_idxs:
# drawer_clipart_state[idx] = self.CLIPART_STATE_NOT_IN_SCENE
drawer_clipart_state[idx] = self.CLIPART_STATE_NOT_UNDRAWN
else:
drawer_clipart_state[idx] = self.CLIPART_STATE_UNDRAWN
drawer_clipart_state = torch.tensor(drawer_clipart_state, dtype=torch.long)
prev_drawn_idxs = set()
num_unfilled_past = 1
example = {
'scene_tags': scene_tags,
'scene_mask': scene_mask,
'teller_tokens_in': [],
'teller_counts_in': [],
'teller_tokens_out': [],
'drawer_clipart_state': [drawer_clipart_state],
}
else:
yield {
'scene_tags': scene_tags,
'scene_mask': scene_mask,
}
# At test time, there shouldn't be anything after the
# ObserveTruth event
continue
if isinstance(event, codraw_data.TellGroup):
assert isinstance(event, codraw_data.TellGroup)
msg = event.msg
event = next(it)
assert isinstance(event, codraw_data.ObserveCanvas)
canvas_context = event.scene
event = next(it)
assert isinstance(event, codraw_data.SetDrawing)
drawn_scene = event.scene
event = next(it)
assert isinstance(event, codraw_data.ReplyGroup)
teller_tokens = [self.vocabulary_dict.get(word, self.unk_index) for word in msg.split()]
teller_counts = [self.vocabulary_counts[word] for word in msg.split()]
teller_tokens = [self.vocabulary_dict['<S>']] + teller_tokens + [self.vocabulary_dict['</S>']]
teller_counts = [np.inf] + teller_counts + [np.inf]
# Needed for RL. All human utterances have less than 50 words
# due to a character limit imposed during data collection
if len(teller_tokens) > 51:
teller_tokens = teller_tokens[:51]
teller_counts = teller_counts[:51]
teller_tokens = torch.tensor(teller_tokens, dtype=torch.long)
teller_counts = torch.tensor(teller_counts, dtype=torch.float)
example['teller_tokens_in'].append(teller_tokens[:-1])
example['teller_tokens_out'].append(teller_tokens[1:])
example['teller_counts_in'].append(teller_counts[:-1])
assert scene_present_idxs is not None
drawn_idxs = set([c.idx for c in drawn_scene])
drawer_clipart_state = np.zeros(self.NUM_INDEX, dtype=int)
for idx in range(self.NUM_INDEX):
if idx not in scene_present_idxs or idx in drawn_idxs:
drawer_clipart_state[idx] = self.CLIPART_STATE_NOT_UNDRAWN
else:
drawer_clipart_state[idx] = self.CLIPART_STATE_UNDRAWN
drawer_clipart_state = torch.tensor(drawer_clipart_state, dtype=torch.long)
example['drawer_clipart_state'].append(drawer_clipart_state)
def collate(self, batch, is_train=True):
if is_train:
packer = Packer([x['teller_tokens_in'] for x in batch])
return {
'packer': packer,
'brw_teller_tokens_in': packer.brw_from_list([x['teller_tokens_in'] for x in batch]).to(cuda_if_available),
'brw_teller_counts_in': packer.brw_from_list([x['teller_counts_in'] for x in batch]).to(cuda_if_available),
'brw_teller_tokens_out': packer.brw_from_list([x['teller_tokens_out'] for x in batch]).to(cuda_if_available),
'b_scene_tags': torch.stack([x['scene_tags'] for x in batch]).to(cuda_if_available),
'b_scene_mask': torch.stack([x['scene_mask'] for x in batch]).to(cuda_if_available),
'br_drawer_clipart_state': packer.br_from_list([x['drawer_clipart_state'] for x in batch]).to(cuda_if_available),
}
else:
return {
'b_scene_tags': torch.stack([x['scene_tags'] for x in batch]).to(cuda_if_available),
'b_scene_mask': torch.stack([x['scene_mask'] for x in batch]).to(cuda_if_available),
}
| codraw-models-master | datagen.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#%%
def load_models(*partitions):
if not partitions:
partitions = (1, 2, 3, 4)
models = {}
if 1 in partitions:
from baseline1_models import load_baseline1
models.update(load_baseline1())
if 2 in partitions:
from baseline2_models import load_baseline2
models.update(load_baseline2())
if 3 in partitions:
from baseline3_models import load_baseline3
models.update(load_baseline3())
if 4 in partitions:
from baseline4_models import load_baseline4
models.update(load_baseline4())
return models
#%%
def make_pairs(models, *names):
if models is None:
models = load_models()
res = []
for name in names:
res.append((name, (models[name + '_a'], models[name + '_b'])))
return res
| codraw-models-master | saved_models.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Provides the Packer class, which is useful for managing a hierarchy where each
batch element has a variable number of conversation rounds, and each round may
consist of a variable number of messages.
"""
#%%
import numpy as np
import torch
from torch.nn.utils.rnn import PackedSequence
# %%
class Packer:
def __init__(self, list_brw):
coords = []
b_lens = []
br_lens = []
coords_flat = []
b_lens_flat = []
for b, list_rw in enumerate(list_brw):
b_lens.append(len(list_rw))
len_flat = 0
for r, list_w in enumerate(list_rw):
br_lens.append(len(list_w))
for w, _ in enumerate(list_w):
coords.append([b, r, w])
coords_flat.append([b, len_flat + w])
len_flat += len(list_w)
b_lens_flat.append(len_flat)
self.coords_brw = np.array(coords, dtype=int)
self.b_lens = np.array(b_lens, dtype=int)
self.br_lens = np.array(br_lens, dtype=int)
self.coords_flat = np.array(coords_flat, dtype=int)
self.b_lens_flat = np.array(b_lens_flat, dtype=int)
self.coords_br, self.indices_br2brw = np.unique(self.coords_brw[:,:-1], axis=0, return_inverse=True)
_, self.indices_b2br = np.unique(self.coords_br[:,:-1], axis=0, return_inverse=True)
self.indices_b2brw = self.indices_b2br[self.indices_br2brw]
self.dense_shape = np.max(self.coords_brw, 0) + 1
# Must use stable sorts here, which is why kind='mergesort'
self.indices_b2sb = np.argsort(-self.b_lens, kind='mergesort')
sort_by_num_rounds = np.argsort(-self.b_lens[self.indices_b2br], kind='mergesort')
sort_by_round = np.argsort(self.coords_br[sort_by_num_rounds][:,-1], kind='mergesort')
self.indices_br2srb = sort_by_num_rounds[sort_by_round]
self.indices_br2sx = np.argsort(-self.br_lens, kind='mergesort')
sort_by_num_words = np.argsort(-self.br_lens[self.indices_br2brw], kind='mergesort')
sort_by_word_idx = np.argsort(self.coords_brw[sort_by_num_words][:,-1], kind='mergesort')
self.indices_brw2swx = sort_by_num_words[sort_by_word_idx]
_, batch_sizes_srb = np.unique(self.coords_br[self.indices_br2srb][:,-1], return_counts=True)
_, batch_sizes_swx = np.unique(self.coords_brw[self.indices_brw2swx][:,-1], return_counts=True)
self.batch_sizes_srb = torch.tensor(batch_sizes_srb, dtype=torch.long)
self.batch_sizes_swx = torch.tensor(batch_sizes_swx, dtype=torch.long)
self.indices_srb2br = np.argsort(self.indices_br2srb, kind='mergesort')
self.indices_swx2brw = np.argsort(self.indices_brw2swx, kind='mergesort')
self.indices_sb2b = np.argsort(self.indices_b2sb, kind='mergesort')
self.indices_sx2br = np.argsort(self.indices_br2sx, kind='mergesort')
# For flat
self.indices_b2ob = np.argsort(-self.b_lens_flat, kind='mergesort')
sort_by_flat_words = np.argsort(-self.b_lens_flat[self.indices_b2brw], kind='mergesort')
sort_by_flat_word_idx = np.argsort(self.coords_flat[sort_by_flat_words][:,-1], kind='mergesort')
self.indices_brw2orwb = sort_by_flat_words[sort_by_flat_word_idx]
_, batch_sizes_orwb = np.unique(self.coords_flat[self.indices_brw2orwb][:,-1], return_counts=True)
self.batch_sizes_orwb = torch.tensor(batch_sizes_orwb, dtype=torch.long)
self.indices_ob2b = np.argsort(self.indices_b2ob, kind='mergesort')
self.indices_orwb2brw = np.argsort(self.indices_brw2orwb, kind='mergesort')
def brw_from_list(self, list_brw):
vals = []
for list_rw in list_brw:
for list_w in list_rw:
vals.extend(list_w)
assert len(vals) == self.coords_brw.shape[0]
if torch.is_tensor(vals[0]):
return torch.stack(vals)
else:
return torch.tensor(vals)
def br_from_list(self, list_br):
vals = []
for list_r in list_br:
vals.extend(list_r)
assert len(vals) == self.coords_br.shape[0]
if torch.is_tensor(vals[0]):
return torch.stack(vals)
else:
return torch.tensor(vals)
def br_from_b_expand(self, b_in):
return b_in[self.indices_b2br]
def brw_from_br_expand(self, br_in):
return br_in[self.indices_br2brw]
def brw_from_b_expand(self, b_in):
return b_in[self.indices_b2brw]
def srb_from_br_pack(self, br_in):
return PackedSequence(
br_in[self.indices_br2srb],
self.batch_sizes_srb
)
def swx_from_brw_pack(self, brw_in):
return PackedSequence(
brw_in[self.indices_brw2swx],
self.batch_sizes_swx
)
def br_from_srb_unpack(self, srb_in):
return srb_in.data[self.indices_srb2br]
def brw_from_swx_unpack(self, swx_in):
return swx_in.data[self.indices_swx2brw]
def br_from_sx(self, sx_in):
return sx_in[self.indices_sx2br]
def b_from_sb(self, sb_in):
return sb_in[self.indices_sb2b]
def sx_from_br(self, br_in):
return br_in[self.indices_br2sx]
def sb_from_b(self, b_in):
return b_in[self.indices_b2sb]
# For flat
def orwb_from_brw_pack(self, brw_in):
return PackedSequence(
brw_in[self.indices_brw2orwb],
self.batch_sizes_orwb
)
def brw_from_orwb_unpack(self, orwb_in):
return orwb_in.data[self.indices_orwb2brw]
def b_from_ob(self, ob_in):
return ob_in[self.indices_ob2b]
def ob_from_b(self, b_in):
return b_in[self.indices_b2ob]
| codraw-models-master | packer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from pathlib import Path
import editdistance
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, respond_to, response_partial
#%%
@respond_to(codraw_data.ObserveTruth)
@respond_to(codraw_data.ReplyGroup)
def select_clipart_to_tell(episode):
cliparts = set(episode.get_last(codraw_data.ObserveTruth).scene)
cliparts -= set([e.clipart for e in episode if isinstance(e, codraw_data.SelectClipart)])
if cliparts:
cliparts = list(sorted(cliparts))
clipart = cliparts[0]
# For now, don't randomize the clipart selection order.
#cliparts[np.random.choice(len(cliparts))]
episode.append(codraw_data.SelectClipart(clipart))
@respond_to(codraw_data.ObserveTruth)
@respond_to(codraw_data.ReplyGroup)
def scripted_tell(episode):
if episode.script_index < len(episode.script):
event = episode.script[episode.script_index]
if isinstance(event, codraw_data.Peek):
# Skip to the next non-peek event
assert isinstance(episode.script[episode.script_index + 1], codraw_data.TellerObserveCanvas)
episode.script_index += 2
return scripted_tell(episode)
episode.script_index += 1
episode.append(event)
@respond_to(codraw_data.ObserveTruth)
@respond_to(codraw_data.ReplyGroup)
def scripted_tell_before_peek(episode):
if episode.script_index < len(episode.script):
event = episode.script[episode.script_index]
if isinstance(event, codraw_data.Peek):
return
episode.script_index += 1
episode.append(event)
@respond_to(codraw_data.ObserveTruth)
@respond_to(codraw_data.ReplyGroup)
def scripted_tell_after_peek(episode):
if episode.script_index == 0:
while episode.script_index < len(episode.script):
event = episode.script[episode.script_index]
episode.script_index += 1
if not isinstance(event, codraw_data.Peek):
continue
event = episode.script[episode.script_index]
assert isinstance(event, codraw_data.TellerObserveCanvas)
start_scene = event.scene
episode.script_index += 1
break
else:
assert False, "Could not find Peek event in the script!"
episode.append(codraw_data.DrawGroup(start_scene))
assert episode.script_index < len(episode.script)
if episode.script_index < len(episode.script):
event = episode.script[episode.script_index]
episode.script_index += 1
episode.append(event)
@respond_to(codraw_data.TellGroup)
def draw_nothing(episode):
episode.append(codraw_data.DrawGroup([]))
episode.append(codraw_data.ReplyGroup("ok"))
@respond_to(codraw_data.TellGroup)
def drawer_observe_canvas(episode):
# TODO(nikita): can cache for higher efficiency
scene = episode.reconstruct()
event = codraw_data.ObserveCanvas(scene)
episode.append(event)
def make_fns(splits, *objs_or_pairs):
split_to_use = 0
res = []
for obj_or_pair in objs_or_pairs:
if isinstance(obj_or_pair, tuple):
assert len(obj_or_pair) == 2
if splits[split_to_use] == 'a':
obj = obj_or_pair[0]
elif splits[split_to_use] == 'b':
obj = obj_or_pair[1]
else:
raise ValueError(f"Invalid split: {splits[split_to_use]}")
split_to_use += 1
else:
obj = obj_or_pair
if isinstance(obj, nn.Module):
# Switch pytorch modules to evaluation mode
obj.eval()
if hasattr(obj, 'get_action_fns'):
res.extend(obj.get_action_fns())
else:
res.append(obj)
assert split_to_use == len(splits), "Too many splits specified"
return res
def episodes_from_fns(fns, limit=None, split='dev'):
use_scripts = (scripted_tell in fns) or (scripted_tell_before_peek in fns)
if scripted_tell_after_peek in fns:
use_scripts = True
run_from = codraw_data.get_scenes_and_scripts_with_peek(split)
elif use_scripts:
run_from = codraw_data.get_scenes_and_scripts(split)
else:
run_from = codraw_data.get_scenes(split)
if limit is not None:
run_from = run_from[:limit]
sims = []
with torch.no_grad():
for run_from_single in run_from:
if use_scripts:
episode = Episode.run_script(run_from_single, fns)
else:
episode = Episode.run(run_from_single, fns)
yield episode
def eval_fns(fns, limit=None, split='dev'):
sims = [episode.scene_similarity() for episode in episodes_from_fns(fns, limit=limit, split=split)]
return np.array(sims)
#%%
def calc_perplexity(teller, split='dev'):
"""
Calculates teller perplexity. Does not work with all teller classes, e.g.
perplexity has not been defined for the nearest-neighbor tellers.
"""
datagen_spec = {**teller.datagen.spec}
datagen_spec['split'] = split
datagen_dev = teller.datagen_cls(spec=datagen_spec)
assert datagen_dev.vocabulary == teller.datagen.vocabulary
nlls = []
counts = []
with torch.no_grad():
teller.eval()
for ex in datagen_dev.get_examples_unshuffled_batch(batch_size=128):
nll, count = teller(ex, return_loss=False, return_nll_count=True)
nlls.append(nll)
counts.append(count)
nll_per_word = np.array(nlls).sum() / np.array(counts).sum()
return np.exp(nll_per_word)
#%%
class ComponentEvaluator:
NUM_FEATURES = 7
_instance_cache = {}
@classmethod
def get(cls, split_for_baseline='train_full'):
if split_for_baseline not in cls._instance_cache:
cls._instance_cache[split_for_baseline] = cls(split_for_baseline)
return cls._instance_cache[split_for_baseline]
def __init__(self, split_for_baseline='train_full'):
cliparts_by_idx = {idx: [] for idx in range(58)}
for scene in codraw_data.get_scenes(split_for_baseline):
for clipart in scene:
cliparts_by_idx[clipart.idx].append(clipart)
self.idx_to_exemplar = {}
for idx in cliparts_by_idx:
if idx in Clipart.HUMAN_IDXS:
expression, _ = torch.mode(torch.tensor([c.expression for c in cliparts_by_idx[idx]]))
pose, _ = torch.mode(torch.tensor([c.pose for c in cliparts_by_idx[idx]]))
subtype = pose * Clipart.NUM_EXPRESSION + expression
else:
subtype = 0
depth, _ = torch.mode(torch.tensor([c.depth for c in cliparts_by_idx[idx]]))
flip, _ = torch.mode(torch.tensor([c.flip for c in cliparts_by_idx[idx]]))
x = np.mean([c.x for c in cliparts_by_idx[idx]])
y = np.mean([c.y for c in cliparts_by_idx[idx]])
self.idx_to_exemplar[idx] = Clipart(idx, int(subtype), int(depth), int(flip), x, y)
# Calculate prior baseline, and human performance
human_numer = np.zeros(self.NUM_FEATURES)
human_denom = np.zeros(self.NUM_FEATURES)
baseline_numer = np.zeros(self.NUM_FEATURES)
baseline_denom = np.zeros(self.NUM_FEATURES)
for scene_true, scene_human in codraw_data.get_truth_and_human_scenes('dev'):
ep_numer, ep_denom = self.eval_scene(scene_human, scene_true)
human_numer += ep_numer
human_denom += ep_denom
ep_numer, ep_denom = self.eval_scene([], scene_true)
baseline_numer += ep_numer
baseline_denom += ep_denom
self.human_scores = human_numer / human_denom
self.baseline_scores = baseline_numer / baseline_denom
def eval_scene(self, pred, target):
res_numer = np.zeros(self.NUM_FEATURES)
res_denom = np.zeros(self.NUM_FEATURES)
for truth_clipart in target:
other_cliparts = [c for c in pred if c.idx == truth_clipart.idx]
if other_cliparts:
other_clipart = other_cliparts[0]
else:
other_clipart = self.idx_to_exemplar[truth_clipart.idx]
feats_numer = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
feats_denom = [1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]
feats_numer[0] = float(truth_clipart.flip != other_clipart.flip)
if truth_clipart.idx in Clipart.HUMAN_IDXS:
feats_numer[1] = float(truth_clipart.expression != other_clipart.expression)
feats_numer[2] = float(truth_clipart.pose != other_clipart.pose)
feats_denom[1] = 1.0
feats_denom[2] = 1.0
feats_numer[3] = float(truth_clipart.depth != other_clipart.depth)
displacements = np.array([truth_clipart.normed_x - other_clipart.normed_x, truth_clipart.normed_y - other_clipart.normed_y])
feats_numer[4] = np.sum(displacements ** 2)
feats_numer[5], feats_numer[6] = np.abs(displacements)
res_numer += feats_numer
res_denom += feats_denom
return res_numer, res_denom
def eval_episode(self, episode):
return self.eval_scene(episode.reconstruct(), episode.get_true_scene())
def eval_fns(self, fns, limit=None, split='dev', unscaled=False):
numer = np.zeros(self.NUM_FEATURES)
denom = np.zeros(self.NUM_FEATURES)
for episode in episodes_from_fns(fns, limit=limit, split=split):
ep_numer, ep_denom = self.eval_episode(episode)
numer += ep_numer
denom += ep_denom
res = numer / denom
if not unscaled:
res = (res - self.human_scores) / (self.baseline_scores - self.human_scores)
res = 1.0 - res
return res
#%%
class Model(object):
datagen_cls = None
def __init__(self, datagen=None, spec=None, **kwargs):
super().__init__()
if spec is not None:
assert self.datagen_cls is not None
assert self.datagen_cls.__name__ == spec['datagen_class']
self.datagen = self.datagen_cls(spec=spec['datagen_spec'])
self.init_from_spec(**{k: v for (k,v) in spec.items() if k not in ['class', 'datagen_spec', 'datagen_class', 'state_dict']})
if 'state_dict' in spec:
self.load_state_dict(spec['state_dict'])
self.to(cuda_if_available)
self.post_init_from_spec()
else:
assert isinstance(datagen, self.datagen_cls)
self.datagen = datagen
self.init_full(**kwargs)
if hasattr(self, 'state_dict'):
self.to(cuda_if_available)
def init_full(self):
pass
def init_from_spec(self, **kwargs):
self.init_full(**kwargs)
def post_init_from_spec(self):
pass
def get_action_fns(self):
raise NotImplementedError("Subclasses should override this")
def get_spec(self):
return {}
@property
def spec(self):
res = {
'class': type(self).__name__,
'datagen_class': type(self.datagen).__name__,
'datagen_spec': self.datagen.spec,
**self.get_spec(),
}
if hasattr(self, 'state_dict'):
res['state_dict'] = self.state_dict()
return res
# This method doesn't work because models are defined in other files, so
# globals() fails to register them. TODO(nikita): better deserialization
# helper?
# @staticmethod
# def new_from_spec(spec):
# model_class = globals()[spec['class']]
# return model_class(spec=spec)
def just_tell(self, clipart, *args, **kwargs):
assert hasattr(self, 'tell'), "Model is not a teller"
if isinstance(self, nn.Module):
self.eval()
episode = Episode([codraw_data.SelectClipart(clipart)])
self.tell(episode, *args, **kwargs)
return episode.get_last(codraw_data.TellGroup).msg
def just_draw(self, msg, scene=[], *args, **kwargs):
assert hasattr(self, 'draw'), "Model is not a drawer"
episode = Episode([codraw_data.TellGroup(msg), codraw_data.ObserveCanvas(scene)])
if isinstance(self, nn.Module):
self.eval()
self.draw(episode, *args, **kwargs)
event_multi = episode.get_last(codraw_data.DrawGroup)
if event_multi is not None:
return codraw_data.AbstractScene(event_multi.cliparts)
event_single = episode.get_last(codraw_data.DrawClipart)
return event_single.clipart
| codraw-models-master | model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
import json
import numpy as np
import codraw_data
import model
from abs_metric import scene_similarity
from pathlib import Path
#%%
TRANSCRIPTS_PATH = Path('transcripts-eval-v1.json')
TRANSCRIPTS_SPLIT = 'test'
#%%
transcripts = json.loads(TRANSCRIPTS_PATH.read_text())
#%%
def get_transcript_results(transcripts):
data = transcripts['data']
for datum in data.values():
model_name = datum['model_name']
scene = codraw_data.AbstractScene(datum['abs_t'])
scene_after = None
for entry in datum['dialog']:
scene_after = entry['abs_d']
assert scene_after is not None
scene_after = codraw_data.AbstractScene(scene_after)
yield (model_name, scene, scene_after)
#%%
compontent_evaluator = model.ComponentEvaluator.get()
#%%
true_to_human = {}
for true_scene, human_scene in codraw_data.get_truth_and_human_scenes(TRANSCRIPTS_SPLIT):
true_to_human[tuple(true_scene)] = human_scene
# %%
model_to_sims = {}
model_to_numer = {}
model_to_denom = {}
true_scenes_set = set()
for model_name, true_scene, reconstructed_scene in get_transcript_results(transcripts):
if model_name not in model_to_sims:
model_to_sims[model_name] = []
if model_name not in model_to_numer:
assert model_name not in model_to_denom
model_to_numer[model_name] = []
model_to_denom[model_name] = []
model_to_sims[model_name].append(scene_similarity(reconstructed_scene, true_scene))
numer, denom = compontent_evaluator.eval_scene(reconstructed_scene, true_scene)
model_to_numer[model_name].append(numer)
model_to_denom[model_name].append(denom)
true_scenes_set.add(tuple(true_scene))
#%%
print("Model \t Scene similarity")
for model_name, sims in model_to_sims.items():
print(f"{model_name:17s}\t {np.array(sims).mean():.2f}")
sims = np.array([scene_similarity(true_to_human[scene], scene) for scene in true_scenes_set])
print(f"{'human':17s}\t {np.array(sims).mean():.2f}")
#%%
print()
print()
#%%
print("Model \t Dir \t Expr(human)\t Pose(human)\t Depth \t xy (sq.)\t x-only \t y-only")
for model_name in model_to_numer:
numer = model_to_numer[model_name]
denom = model_to_denom[model_name]
components = np.array(numer).sum(0) / np.array(denom).sum(0)
components = 1.0 - (components - compontent_evaluator.human_scores) / (compontent_evaluator.baseline_scores - compontent_evaluator.human_scores)
print(f"{model_name:17s}\t", "\t".join(f"{num: .6f}" for num in components))
human_numer_denom = [compontent_evaluator.eval_scene(true_to_human[scene], scene) for scene in true_scenes_set]
components = np.array([x[0] for x in human_numer_denom]).sum(0) / np.array([x[1] for x in human_numer_denom]).sum(0)
components = 1.0 - (components - compontent_evaluator.human_scores) / (compontent_evaluator.baseline_scores - compontent_evaluator.human_scores)
print(f"{'human':17s}\t", "\t".join(f"{num: .6f}" for num in components))
#%%
| codraw-models-master | eval_transcripts.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
import numpy as np
from pathlib import Path
import editdistance
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, Transcriber, respond_to, response_partial
from baseline1_models import load_baseline1
from baseline2_models import load_baseline2
import model
from model import make_fns, eval_fns
# %%
compontent_evaluator = model.ComponentEvaluator.get()
# %%
models_baseline1 = load_baseline1()
models_baseline2 = load_baseline2()
# %%
tellers = [
('teller_nn', (models_baseline1['teller_nn_a'], models_baseline1['teller_nn_b'])),
# ('teller_c2seq', (models_baseline1['teller_c2seq_a'], models_baseline1['teller_c2seq_b'])),
('teller_pragmaticnn', (models_baseline2['teller_pragmaticnn_a'], models_baseline2['teller_pragmaticnn_b'])),
]
drawers = [
# ('drawer_nn', (models_baseline1['drawer_nn_a'], models_baseline1['drawer_nn_b'])),
# ('drawer_sim', (models_baseline1['drawer_sim_a'], models_baseline1['drawer_sim_b'])),
# ('drawer_bow2c', (models_baseline1['drawer_bow2c_a'], models_baseline1['drawer_bow2c_b'])),
('drawer_bow2bce', (models_baseline1['drawer_bow2bce_a'], models_baseline1['drawer_bow2bce_b'])),
('drawer_bowcanvas2bce', (models_baseline1['drawer_bowcanvas2bce_a'], models_baseline1['drawer_bowcanvas2bce_b'])),
('drawer_lstmaddonly', (models_baseline2['drawer_lstmaddonly_a'], models_baseline2['drawer_lstmaddonly_b'])),
]
# %%
print()
human_sims = np.array([
scene_similarity(human_scene, true_scene)
for true_scene, human_scene in codraw_data.get_truth_and_human_scenes('dev')
])
print(f"Human scene similarity: mean={human_sims.mean():.6f} std={human_sims.std():.6f} median={np.median(human_sims):.6f}")
# %%
print()
print()
# %%
limit = None
print("Teller \t Drawer \t Scene similarity")
for splits_group in [('ab', 'ba'), ('aa', 'bb')]:
for teller_name, teller_pair in tellers:
for drawer_name, drawer_pair in drawers:
for splits in splits_group:
sims = eval_fns(make_fns(splits, teller_pair, drawer_pair), limit=limit)
teller_caption = f"{teller_name}_{splits[0]}"
drawer_caption = f"{drawer_name}_{splits[1]}"
print(f"{teller_caption:17s}\t {drawer_caption:17s}\t", sims.mean())
print()
# %%
print()
print()
# %%
limit = None
print("Drawer evaluations against script")
print("Drawer \t Scene similarity")
for drawer_name, drawer_pair in drawers:
for split in ('a', 'b'):
sims = eval_fns(make_fns(split, model.scripted_tell, drawer_pair), limit=limit)
drawer_caption = f"{drawer_name}_{split}"
print(f"{drawer_caption:17s}\t", sims.mean())
# %%
print()
print()
# %%
limit = None
print("Teller \t Drawer \t Dir \t Expr(human)\t Pose(human)\t Depth \t xy (sq.)\t x-only \t y-only")
for splits_group in [('ab', 'ba'), ('aa', 'bb')]:
for teller_name, teller_pair in tellers:
for drawer_name, drawer_pair in drawers:
for splits in splits_group:
components = compontent_evaluator.eval_fns(make_fns(splits, teller_pair, drawer_pair), limit=limit)
teller_caption = f"{teller_name}_{splits[0]}"
drawer_caption = f"{drawer_name}_{splits[1]}"
print(f"{teller_caption:17s}\t {drawer_caption:17s}\t", "\t".join(f"{num: .6f}" for num in components))
print()
# %%
print()
print()
# %%
limit = None
print("Drawer evaluations against script")
print("Drawer \t Dir \t Expr(human)\t Pose(human)\t Depth \t xy (sq.)\t x-only \t y-only")
for drawer_name, drawer_pair in drawers:
for split in ('a', 'b'):
components = compontent_evaluator.eval_fns(make_fns(split, model.scripted_tell, drawer_pair), limit=limit)
drawer_caption = f"{drawer_name}_{split}"
print(f"{drawer_caption:17s}\t", "\t".join(f"{num: .6f}" for num in components))
# %%
# %%
# %%
# %%
# %%
# %%
| codraw-models-master | baseline2_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from IPython.display import SVG, display
from PIL import Image
from binascii import b2a_base64
PNGS_PATH = (Path(__file__).parent / '../CoDraw/Pngs').resolve()
EMBED_PNGS_PATH = '../../CoDraw/Pngs'
DEPTH_SCALE = [1.0, 0.7, 0.49]
IMAGE_NAMES = [
's_0s.png',
's_1s.png',
's_2s.png',
's_3s.png',
's_4s.png',
's_5s.png',
's_6s.png',
's_7s.png',
'p_0s.png',
'p_1s.png',
'p_2s.png',
'p_3s.png',
'p_4s.png',
'p_5s.png',
'p_6s.png',
'p_7s.png',
'p_8s.png',
'p_9s.png',
'hb0_0s.png',
'hb0_1s.png',
'hb0_2s.png',
'hb0_3s.png',
'hb0_4s.png',
'hb0_5s.png',
'hb0_6s.png',
'hb0_7s.png',
'hb0_8s.png',
'hb0_9s.png',
'hb0_10s.png',
'hb0_11s.png',
'hb0_12s.png',
'hb0_13s.png',
'hb0_14s.png',
'hb0_15s.png',
'hb0_16s.png',
'hb0_17s.png',
'hb0_18s.png',
'hb0_19s.png',
'hb0_20s.png',
'hb0_21s.png',
'hb0_22s.png',
'hb0_23s.png',
'hb0_24s.png',
'hb0_25s.png',
'hb0_26s.png',
'hb0_27s.png',
'hb0_28s.png',
'hb0_29s.png',
'hb0_30s.png',
'hb0_31s.png',
'hb0_32s.png',
'hb0_33s.png',
'hb0_34s.png',
'hb1_0s.png',
'hb1_1s.png',
'hb1_2s.png',
'hb1_3s.png',
'hb1_4s.png',
'hb1_5s.png',
'hb1_6s.png',
'hb1_7s.png',
'hb1_8s.png',
'hb1_9s.png',
'hb1_10s.png',
'hb1_11s.png',
'hb1_12s.png',
'hb1_13s.png',
'hb1_14s.png',
'hb1_15s.png',
'hb1_16s.png',
'hb1_17s.png',
'hb1_18s.png',
'hb1_19s.png',
'hb1_20s.png',
'hb1_21s.png',
'hb1_22s.png',
'hb1_23s.png',
'hb1_24s.png',
'hb1_25s.png',
'hb1_26s.png',
'hb1_27s.png',
'hb1_28s.png',
'hb1_29s.png',
'hb1_30s.png',
'hb1_31s.png',
'hb1_32s.png',
'hb1_33s.png',
'hb1_34s.png',
'a_0s.png',
'a_1s.png',
'a_2s.png',
'a_3s.png',
'a_4s.png',
'a_5s.png',
'c_0s.png',
'c_1s.png',
'c_2s.png',
'c_3s.png',
'c_4s.png',
'c_5s.png',
'c_6s.png',
'c_7s.png',
'c_8s.png',
'c_9s.png',
'e_0s.png',
'e_1s.png',
'e_2s.png',
'e_3s.png',
'e_4s.png',
'e_5s.png',
'e_6s.png',
't_0s.png',
't_1s.png',
't_2s.png',
't_3s.png',
't_4s.png',
't_5s.png',
't_6s.png',
't_7s.png',
't_8s.png',
't_9s.png',
't_10s.png',
't_11s.png',
't_12s.png',
't_13s.png',
't_14s.png',
]
def get_image_name(clipart):
if clipart.idx < 18:
return IMAGE_NAMES[clipart.idx]
elif clipart.idx < 18 + 2:
return IMAGE_NAMES[18 + (clipart.idx - 18) * 35 + clipart.subtype]
else:
return IMAGE_NAMES[clipart.idx + 34*2]
def snippet_from_clipart(clipart, inline_images=True):
img_name = get_image_name(clipart)
img_path = PNGS_PATH / img_name
img_pil = Image.open(img_path)
width, height = img_pil.width, img_pil.height
if inline_images:
data = b2a_base64(img_path.read_bytes()).decode('ascii')
scale = DEPTH_SCALE[clipart.depth]
width = width * scale
height = height * scale
flip = -1 if bool(clipart.flip) else 1
x = clipart.x - width / 2.0
y = clipart.y - height / 2.0
flipped_sub_x = (-width) if clipart.flip else 0
if inline_images:
href = f"data:image/png;base64,{data}"
else:
href = f"{EMBED_PNGS_PATH}/{img_name}"
return f"""
<g transform="translate({x}, {y})">
<image href="{href}" x="{flipped_sub_x}" y="0" width="{width}" height="{height}"
transform="scale({flip}, 1)"/>
</g>
"""
def svg_from_cliparts(cliparts, color=None, label=None, inline_images=True, scale=1.0):
img_path = PNGS_PATH / 'background.png'
if inline_images:
data = b2a_base64(img_path.read_bytes()).decode('ascii')
href = f"data:image/png;base64,{data}"
else:
href = f"{EMBED_PNGS_PATH}/background.png"
svg = f"""
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="{int(500*scale)}px" height="{int(400*scale)}px" viewBox="0 0 500 400">
<image href="{href}" x="0" y="0" width="100%" height="100%"/>
"""
if color:
svg += f"""
<rect fill="{color}" opacity="0.2" x="0" y="0" width="100%" height="100%"/>
"""
# Sun (idx=3) is always in the back; this hack is also in Abs.js
# All sky objects (idx < 8) are behind any non-sky objects
# Past that, objects are sorted by depth and then by index
for clipart in sorted(cliparts, key=lambda c: c.render_order_key):
svg += snippet_from_clipart(clipart, inline_images=inline_images)
if label:
svg += f"""<text x="95%" y="8%" style="text-anchor: end">{label}</text>"""
svg += "</svg>"
return svg
def display_cliparts(cliparts, color=None, label=None, scale=1.0):
display(SVG(svg_from_cliparts(cliparts, color, label, scale=scale)))
| codraw-models-master | abs_render.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
assert __name__ == "__main__", "Training script should not be imported!"
#%%
import numpy as np
from pathlib import Path
import editdistance
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, respond_to, response_partial
from datagen import BOWAddUpdateData
from baseline2_models import BOWAddOnlyDrawer, LSTMAddOnlyDrawer
import model
from model import make_fns, eval_fns
from model import scripted_tell, scripted_tell_before_peek, scripted_tell_after_peek
# %%
data_bowaddupdate_a = BOWAddUpdateData('a')
data_bowaddupdate_b = BOWAddUpdateData('b')
# %%
# drawer_bowaddonly_a = BOWAddOnlyDrawer(data_bowaddupdate_a)
# drawer_bowaddonly_b = BOWAddOnlyDrawer(data_bowaddupdate_b)
#
# optimizer_bowaddonly_a = torch.optim.Adam(drawer_bowaddonly_a.parameters())
# optimizer_bowaddonly_b = torch.optim.Adam(drawer_bowaddonly_b.parameters())
#%%
# for epoch in range(15):
# drawer_bowaddonly_a.train()
# for num, ex in enumerate(drawer_bowaddonly_a.datagen.get_examples_batch()):
# optimizer_bowaddonly_a.zero_grad()
# loss = drawer_bowaddonly_a.forward(ex)
# loss.backward()
# optimizer_bowaddonly_a.step()
#
# print(f'Done epoch {epoch} loss {float(loss)}')
# if epoch % 1 == 0:
# for split in ('a',):
# sims = eval_fns(make_fns(split, scripted_tell, (drawer_bowaddonly_a, drawer_bowaddonly_b)), limit=100)
# print(split, sims.mean())
#
# sims = eval_fns(make_fns(split, scripted_tell_before_peek, (drawer_bowaddonly_a, drawer_bowaddonly_b)), limit=100)
# print(split, 'before', sims.mean())
#
# sims = eval_fns(make_fns(split, scripted_tell_after_peek, (drawer_bowaddonly_a, drawer_bowaddonly_b)), limit=100)
# print(split, 'after', sims.mean())
# %%
drawer_lstmaddonly_a = LSTMAddOnlyDrawer(data_bowaddupdate_a)
drawer_lstmaddonly_b = LSTMAddOnlyDrawer(data_bowaddupdate_b)
optimizer_lstmaddonly_a = torch.optim.Adam(drawer_lstmaddonly_a.parameters())
optimizer_lstmaddonly_b = torch.optim.Adam(drawer_lstmaddonly_b.parameters())
#%%
for epoch in range(15):
drawer_lstmaddonly_a.train()
for num, ex in enumerate(drawer_lstmaddonly_a.datagen.get_examples_batch()):
optimizer_lstmaddonly_a.zero_grad()
loss = drawer_lstmaddonly_a.forward(ex)
loss.backward()
optimizer_lstmaddonly_a.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 1 == 0:
for split in ('a',):
sims = eval_fns(make_fns(split, scripted_tell, (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=100)
print(split, sims.mean())
sims = eval_fns(make_fns(split, scripted_tell_before_peek, (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=100)
print(split, 'before', sims.mean())
sims = eval_fns(make_fns(split, scripted_tell_after_peek, (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=100)
print(split, 'after', sims.mean())
#%%
for epoch in range(15):
drawer_lstmaddonly_b.train()
for num, ex in enumerate(drawer_lstmaddonly_b.datagen.get_examples_batch()):
optimizer_lstmaddonly_b.zero_grad()
loss = drawer_lstmaddonly_b.forward(ex)
loss.backward()
optimizer_lstmaddonly_b.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 1 == 0:
for split in ('b',):
sims = eval_fns(make_fns(split, scripted_tell, (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=100)
print(split, sims.mean())
sims = eval_fns(make_fns(split, scripted_tell_before_peek, (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=100)
print(split, 'before', sims.mean())
sims = eval_fns(make_fns(split, scripted_tell_after_peek, (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=100)
print(split, 'after', sims.mean())
# %%
lstmaddonly_specs = dict(
drawer_lstmaddonly_a = drawer_lstmaddonly_a.spec,
drawer_lstmaddonly_b = drawer_lstmaddonly_b.spec,
)
#%%
torch.save(lstmaddonly_specs, Path('models/lstmaddonly.pt'))
| codraw-models-master | baseline2_train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
assert __name__ == "__main__", "Training script should not be imported!"
#%%
import numpy as np
from pathlib import Path
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, Transcriber, respond_to
import model
from model import make_fns, eval_fns
from model import Model
from baseline2_models import load_baseline2
from datagen import SceneToSeqData
from baseline3_models import SceneToSeqTeller
# %%
# scenes_and_scripts_dev = codraw_data.get_scenes_and_scripts('dev')
# transcribe = Transcriber(
# 'baseline3_train.py' if INTERACTIVE else __file__,
# scenes_and_scripts=scenes_and_scripts_dev[::110],
# scenes_description="scenes_and_scripts_dev[::110]")
# %%
models_baseline2 = load_baseline2()
# %%
drawer_lstmaddonly_a = models_baseline2['drawer_lstmaddonly_a']
drawer_lstmaddonly_b = models_baseline2['drawer_lstmaddonly_b']
# %%
data_scene2seq_a = SceneToSeqData('a')
data_scene2seq_b = SceneToSeqData('b')
# %%
def train_teller(split, teller_pair, num_epochs=50, limit=100):
splits_pair = split + 'a', split + 'b'
if split == 'a':
teller = teller_pair[0]
elif split == 'b':
teller = teller_pair[1]
else:
assert False
optimizer = torch.optim.Adam(teller.parameters())
print('perplexity-dev', model.calc_perplexity(teller))
print('perplexity-a', model.calc_perplexity(teller, 'a'))
print('avg-loss-dev', teller.calc_split_loss())
print('avg-loss-a', teller.calc_split_loss('a'))
for epoch in range(num_epochs):
teller.train()
for num, ex in enumerate(teller.datagen.get_examples_batch()):
optimizer.zero_grad()
loss = teller(ex)
loss.backward()
optimizer.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 5 == 0:
del ex, loss # clean up memory
print('perplexity-dev', model.calc_perplexity(teller))
print('perplexity-a', model.calc_perplexity(teller, 'a'))
print('avg-loss-dev', teller.calc_split_loss())
print('avg-loss-a', teller.calc_split_loss('a'))
for splits in splits_pair:
sims = eval_fns(make_fns(splits, teller_pair, (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=limit)
print(splits, sims.mean())
# %%
teller_scene2seq_a = SceneToSeqTeller(data_scene2seq_a, prediction_loss_scale=0)
teller_scene2seq_b = SceneToSeqTeller(data_scene2seq_b, prediction_loss_scale=0)
train_teller('a', (teller_scene2seq_a, teller_scene2seq_b))
train_teller('b', (teller_scene2seq_a, teller_scene2seq_b))
# %% scene2seq with intermediate supervision for all clipart ids
teller_scene2seq_aux_a = SceneToSeqTeller(data_scene2seq_a)
teller_scene2seq_aux_b = SceneToSeqTeller(data_scene2seq_b)
train_teller('a', (teller_scene2seq_aux_a, teller_scene2seq_aux_b))
train_teller('b', (teller_scene2seq_aux_a, teller_scene2seq_aux_b))
# %% scene2seq with intermediate supervision only for present cliparts
teller_scene2seq_aux2_a = SceneToSeqTeller(data_scene2seq_a, predict_for_full_library=False, prediction_loss_scale=6.)
teller_scene2seq_aux2_b = SceneToSeqTeller(data_scene2seq_b, predict_for_full_library=False, prediction_loss_scale=6.)
train_teller('a', (teller_scene2seq_aux2_a, teller_scene2seq_aux2_b), num_epochs=40)
train_teller('b', (teller_scene2seq_aux2_a, teller_scene2seq_aux2_b), num_epochs=40)
# %%
scene2seq_specs = dict(
teller_scene2seq_a = teller_scene2seq_a.spec,
teller_scene2seq_b = teller_scene2seq_b.spec,
teller_scene2seq_aux_a = teller_scene2seq_aux_a.spec,
teller_scene2seq_aux_b = teller_scene2seq_aux_b.spec,
teller_scene2seq_aux2_a = teller_scene2seq_aux2_a.spec,
teller_scene2seq_aux2_b = teller_scene2seq_aux2_b.spec,
)
# %%
print()
print()
print("Saving models")
torch.save(scene2seq_specs, Path('models/scene2seq.pt'))
# %%
print()
print("Final evaluation on full dev set (scene2seq)")
for splits in ('aa', 'ab', 'ba', 'bb'):
sims = eval_fns(make_fns(splits, (teller_scene2seq_a, teller_scene2seq_b), (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=None)
print(splits, sims.mean())
print("Final evaluation on full dev set (scene2seq_aux)")
for splits in ('aa', 'ab', 'ba', 'bb'):
sims = eval_fns(make_fns(splits, (teller_scene2seq_aux_a, teller_scene2seq_aux_b), (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=None)
print(splits, sims.mean())
print("Final evaluation on full dev set (scene2seq_aux2)")
for splits in ('aa', 'ab', 'ba', 'bb'):
sims = eval_fns(make_fns(splits, (teller_scene2seq_aux2_a, teller_scene2seq_aux2_b), (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=None)
print(splits, sims.mean())
| codraw-models-master | baseline3_train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
import numpy as np
from pathlib import Path
import editdistance
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, Transcriber, respond_to, response_partial
import model
from model import make_fns, eval_fns
from saved_models import load_models, make_pairs
# %%
def print_human(limit=None, split='dev'):
human_sims = np.array([
scene_similarity(human_scene, true_scene)
for true_scene, human_scene in codraw_data.get_truth_and_human_scenes('test')[:limit]
])
print(f"Human scene similarity [{split}]: mean={human_sims.mean():.2f} std={human_sims.std():.2f} median={np.median(human_sims):.2f}")
# %%
def print_pairwise(tellers, drawers, teller_splits='ab', drawer_splits='ab', limit=None, split='dev'):
print(f"Teller \t Drawer \t Scene similarity [{split}]")
for splits_group in [('ab', 'ba'), ('aa', 'bb')]:
for teller_name, teller_pair in tellers:
for drawer_name, drawer_pair in drawers:
for splits in splits_group:
if splits[0] not in teller_splits or splits[1] not in drawer_splits:
continue
sims = eval_fns(make_fns(splits, teller_pair, drawer_pair), limit=limit, split=split)
teller_caption = f"{teller_name}_{splits[0]}"
drawer_caption = f"{drawer_name}_{splits[1]}"
print(f"{teller_caption:17s}\t {drawer_caption:17s}\t {sims.mean():.2f}")
print()
# %%
def print_script(drawers, drawer_splits='ab', limit=None, split='dev'):
print("Drawer evaluations against script")
print(f"Drawer \t Scene similarity [{split}]")
for drawer_name, drawer_pair in drawers:
for drawer_split in drawer_splits:
sims = eval_fns(make_fns(drawer_split, model.scripted_tell, drawer_pair), limit=limit, split=split)
drawer_caption = f"{drawer_name}_{drawer_split}"
print(f"{drawer_caption:17s}\t {sims.mean():.2f}")
# %%
component_evaluator = model.ComponentEvaluator.get()
# %%
def print_components_pairwise(tellers, drawers, teller_splits='ab', drawer_splits='ab', limit=None, split='dev'):
print(f"Component evaluations [{split}]")
print("Teller \t Drawer \t Dir \t Expr(human)\t Pose(human)\t Depth \t xy (sq.)\t x-only \t y-only")
for splits_group in [('ab', 'ba'), ('aa', 'bb')]:
for teller_name, teller_pair in tellers:
for drawer_name, drawer_pair in drawers:
for splits in splits_group:
if splits[0] not in teller_splits or splits[1] not in drawer_splits:
continue
components = component_evaluator.eval_fns(make_fns(splits, teller_pair, drawer_pair), limit=limit, split=split)
teller_caption = f"{teller_name}_{splits[0]}"
drawer_caption = f"{drawer_name}_{splits[1]}"
print(f"{teller_caption:17s}\t {drawer_caption:17s}\t", "\t".join(f"{num: .6f}" for num in components))
print()
def print_components_script(drawers, drawer_splits='ab', limit=None, split='dev'):
print(f"Drawer evaluations against script [{split}]")
print("Drawer \t Dir \t Expr(human)\t Pose(human)\t Depth \t xy (sq.)\t x-only \t y-only")
for drawer_name, drawer_pair in drawers:
for drawer_split in drawer_splits:
components = component_evaluator.eval_fns(make_fns(drawer_split, model.scripted_tell, drawer_pair), limit=limit, split=split)
drawer_caption = f"{drawer_name}_{drawer_split}"
print(f"{drawer_caption:17s}\t", "\t".join(f"{num: .6f}" for num in components))
# %%
def print_eval(
tellers=None, drawers=None,
teller_splits='ab', drawer_splits='ab',
limit=None,
split='dev',
do_all=False,
do_human=False,
do_pairwise=False,
do_script=False,
do_components_pairwise=False,
do_components_script=False,
):
if do_all:
do_human = True
do_pairwise = True
do_script = True
do_components_pairwise = True
do_components_script = True
print()
if do_human:
print_human(limit=limit, split=split)
print()
print()
if do_pairwise:
print_pairwise(tellers, drawers, teller_splits=teller_splits, drawer_splits=drawer_splits, limit=limit, split=split)
print()
print()
if do_script:
print_script(drawers, drawer_splits=drawer_splits, limit=limit, split=split)
print()
print()
if do_components_pairwise:
print_components_pairwise(tellers, drawers, teller_splits=teller_splits, drawer_splits=drawer_splits, limit=limit, split=split)
print()
print()
if do_components_script:
print_components_script(drawers, drawer_splits=drawer_splits, limit=limit, split=split)
print()
print()
# %%
if __name__ == '__main__':
models = load_models()
# %%
if __name__ == '__main__':
tellers = make_pairs(models,
'teller_nn',
# 'teller_pragmaticnn',
'teller_scene2seq',
'teller_scene2seq_aux2',
'teller_rl',
)
drawers_for_script = make_pairs(models,
'drawer_nn',
# 'drawer_bowcanvas2bce',
'drawer_lstmaddonly',
)
drawers_for_pairwise = make_pairs(models,
'drawer_lstmaddonly',
)
limit=None
split='test'
print_eval(limit=limit, split=split, do_human=True)
print_eval(tellers, drawers_for_pairwise, teller_splits='a', drawer_splits='b', limit=limit, split=split, do_pairwise=True)
print_eval(tellers, drawers_for_script, teller_splits='a', drawer_splits='b', limit=limit, split=split, do_script=True)
# %%
# %%
# %%
# %%
# %%
# %%
| codraw-models-master | eval_automatic.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
import numpy as np
from pathlib import Path
import editdistance
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, Transcriber, respond_to, response_partial
from baseline1_models import load_baseline1
from baseline2_models import load_baseline2
from baseline3_models import load_baseline3
import model
from model import make_fns, eval_fns
# %%
compontent_evaluator = model.ComponentEvaluator.get()
# %%
models_baseline1 = load_baseline1()
models_baseline2 = load_baseline2()
models_baseline3 = load_baseline3()
# %%
tellers = [
# ('teller_nn', (models_baseline1['teller_nn_a'], models_baseline1['teller_nn_b'])),
# ('teller_c2seq', (models_baseline1['teller_c2seq_a'], models_baseline1['teller_c2seq_b'])),
# ('teller_pragmaticnn', (models_baseline2['teller_pragmaticnn_a'], models_baseline2['teller_pragmaticnn_b'])),
('teller_scene2seq', (models_baseline3['teller_scene2seq_a'], models_baseline3['teller_scene2seq_b'])),
('teller_scene2seq_aux', (models_baseline3['teller_scene2seq_aux_a'], models_baseline3['teller_scene2seq_aux_b'])),
('teller_scene2seq_aux2', (models_baseline3['teller_scene2seq_aux2_a'], models_baseline3['teller_scene2seq_aux2_b'])),
]
drawers = [
# ('drawer_nn', (models_baseline1['drawer_nn_a'], models_baseline1['drawer_nn_b'])),
# ('drawer_sim', (models_baseline1['drawer_sim_a'], models_baseline1['drawer_sim_b'])),
# ('drawer_bow2c', (models_baseline1['drawer_bow2c_a'], models_baseline1['drawer_bow2c_b'])),
# ('drawer_bow2bce', (models_baseline1['drawer_bow2bce_a'], models_baseline1['drawer_bow2bce_b'])),
# ('drawer_bowcanvas2bce', (models_baseline1['drawer_bowcanvas2bce_a'], models_baseline1['drawer_bowcanvas2bce_b'])),
('drawer_lstmaddonly', (models_baseline2['drawer_lstmaddonly_a'], models_baseline2['drawer_lstmaddonly_b'])),
]
# %%
print()
human_sims = np.array([
scene_similarity(human_scene, true_scene)
for true_scene, human_scene in codraw_data.get_truth_and_human_scenes('dev')
])
print(f"Human scene similarity: mean={human_sims.mean():.6f} std={human_sims.std():.6f} median={np.median(human_sims):.6f}")
# %%
print()
print()
# %%
limit = None
print("Teller \t Drawer \t Scene similarity")
for splits_group in [('ab', 'ba'), ('aa', 'bb')]:
for teller_name, teller_pair in tellers:
for drawer_name, drawer_pair in drawers:
for splits in splits_group:
sims = eval_fns(make_fns(splits, teller_pair, drawer_pair), limit=limit)
teller_caption = f"{teller_name}_{splits[0]}"
drawer_caption = f"{drawer_name}_{splits[1]}"
print(f"{teller_caption:17s}\t {drawer_caption:17s}\t", sims.mean())
print()
# %%
print()
print()
# %%
limit = None
print("Drawer evaluations against script")
print("Drawer \t Scene similarity")
for drawer_name, drawer_pair in drawers:
for split in ('a', 'b'):
sims = eval_fns(make_fns(split, model.scripted_tell, drawer_pair), limit=limit)
drawer_caption = f"{drawer_name}_{split}"
print(f"{drawer_caption:17s}\t", sims.mean())
# %%
print()
print()
# %%
limit = None
print("Teller \t Drawer \t Dir \t Expr(human)\t Pose(human)\t Depth \t xy (sq.)\t x-only \t y-only")
for splits_group in [('ab', 'ba'), ('aa', 'bb')]:
for teller_name, teller_pair in tellers:
for drawer_name, drawer_pair in drawers:
for splits in splits_group:
components = compontent_evaluator.eval_fns(make_fns(splits, teller_pair, drawer_pair), limit=limit)
teller_caption = f"{teller_name}_{splits[0]}"
drawer_caption = f"{drawer_name}_{splits[1]}"
print(f"{teller_caption:17s}\t {drawer_caption:17s}\t", "\t".join(f"{num: .6f}" for num in components))
print()
# %%
print()
print()
# %%
limit = None
print("Drawer evaluations against script")
print("Drawer \t Dir \t Expr(human)\t Pose(human)\t Depth \t xy (sq.)\t x-only \t y-only")
for drawer_name, drawer_pair in drawers:
for split in ('a', 'b'):
components = compontent_evaluator.eval_fns(make_fns(split, model.scripted_tell, drawer_pair), limit=limit)
drawer_caption = f"{drawer_name}_{split}"
print(f"{drawer_caption:17s}\t", "\t".join(f"{num: .6f}" for num in components))
| codraw-models-master | baseline3_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Abstract Scene (abs) utilities copied from the original CoDraw codebase
"""
import math
import torch
from torch.autograd import Variable
import math
class AbsUtil:
"""AbsUtil ported from AbsUtil.js"""
# Various variables setting up the appearence of the interface
CANVAS_WIDTH = 500
CANVAS_HEIGHT = 400
NOT_USED = -10000
numClipArts = 58
numTypes = 8
numProps = 6
numClasses = [58,35,3,2,1,1]
Null = 0
def __init__(self, str):
# Each object type has its own prefix, the ordering of the object types affects the
# order in which they are rendered. That is the "t" type (toys) will be rendered on top
# of the "hb0" (boy) category assuming they have the same depth.
self.prefix = ['s','p','hb0','hb1','a','c','e','t']
# Total number of clipart for each type
self.typeTotalCt = [8,10,35,35,6,10,7,15]
# Total number of clipart to be randomly selected for each type
# The sum should equal numClipart
self.typeCt = [3,4,5,5,2,3,2,4]
self.str = str
self.obj = self.preprocess(str)
# Preprocess given CSV into 7Val format, which is
# 1. clipartIdx integer [0-57]
# ~~2. clipartType integer [0-7]~~
# 3. clipartSubType integer [0-34]
# 4. depth integer [0-2]
# 5. flip integer [0-1]
# 6. x-position float [1-500]
# 7. y-position float [1-400]
def preprocess(self, str, verbose=False):
idx = 1;
val = [];
if not str or len(str) < 1:
return None
results = str.split(',')
numClipArts = int(results[0])
for i in range(numClipArts):
v = list()
idx = idx + 1 # png filename
idx = idx + 1 # clip art local index
_clipArtObjectIdx = int(results[idx]); idx = idx + 1
_clipArtTypeIdx = int(results[idx]); idx = idx + 1
# This code was originally used to read the dataset from Python
_clipArtX = int(round(float(results[idx]))); idx = idx + 1
_clipArtY = int(round(float(results[idx]))); idx = idx + 1
# The javascript code, however, used parseInt instead. This has
# slightly different rounding behavior, which can be recreated by
# using the following Python code instead:
# _clipArtX = float(results[idx]); idx = idx + 1
# _clipArtY = float(results[idx]); idx = idx + 1
# _clipArtX = int(math.floor(_clipArtX)) if _clipArtX >= 0 else -int(math.floor(-_clipArtX))
# _clipArtY = int(math.floor(_clipArtY)) if _clipArtY >= 0 else -int(math.floor(-_clipArtY))
_clipArtZ = int(results[idx]); idx = idx + 1
_clipArtFlip = int(results[idx]); idx = idx + 1
if not verbose and (_clipArtX==AbsUtil.NOT_USED or _clipArtY==AbsUtil.NOT_USED):
continue
v.append(self.getClipArtIdx(_clipArtObjectIdx, _clipArtTypeIdx))
# v.append(_clipArtTypeIdx); # remove this redundant feature
v.append(_clipArtObjectIdx if (_clipArtTypeIdx==2 or _clipArtTypeIdx==3) else 0)
v.append(_clipArtZ)
v.append(_clipArtFlip)
v.append(_clipArtX)
v.append(_clipArtY)
val.append(v)
return val
def asTensor(self):
if None==self.obj:
return None
# notice that position (x & y) is rounded as LongTensor
t = torch.LongTensor(AbsUtil.numClipArts, 6).fill_(AbsUtil.Null)
# clipartIdx & clipartSubType are starting with 1
t[:,:2].add_(-1)
for v in self.obj:
clipartIdx = v[0]
t[clipartIdx].copy_(torch.LongTensor(v))
t[:,:2].add_(1)
return t
def __repr__(self):
return self.obj.__repr__()
def getClipArtIdx(self, clipArtObjectIdx, clipArtTypeIdx):
typeTotalPos = [0,8,18,19,20,26,36,43]
offset = 0 if (clipArtTypeIdx==2 or clipArtTypeIdx==3) else clipArtObjectIdx
return typeTotalPos[clipArtTypeIdx] + offset
# Static methods #############################################################
# Sample clipart from idx(abs_d - abs_b)>0
# @param abs_b Tensor(bx58x6)
# @param abs_d Tensor(bx58x6)
# @output Tensor(bx6)
# @output Tensor(bx58)
@staticmethod
def sample_abs_c(abs_b, abs_d):
# using Tensors directly
abs_b = abs_b.data
abs_d = abs_d.data
# bx58
abs_c_mask = (abs_d - abs_b).abs().sum(2)!=0 # updated cliparts
# bx58x6
mask = abs_c_mask.unsqueeze(2).expand_as(abs_d)
# collapsed x 6
abs_c = abs_d[mask.byte()].view(-1, abs_d.size(-1))
return abs_c, abs_c_mask
# Get abs_c mask, if `r_mask` is given, masked over it.
# @param abs_b (long, bx58x6): latest drawn scene before prev teller's message
# @param abs_d (long, bx58x6): latest drawn scene before next teller's message
# @param r_mask (byte, optional, b)
# #output c_mask (byte, b): batch mask whether drawn scene is changed or not
@staticmethod
def get_c_mask(abs_b, abs_d, r_mask=None):
if Variable==type(r_mask):
r_mask = r_mask.data
_, abs_c_mask = AbsUtil.sample_abs_c(abs_b, abs_d) # _, bx58
c_mask = abs_c_mask.sum(1).byte()>0
if r_mask is not None:
c_mask = c_mask.mul(r_mask)
return c_mask
| codraw-models-master | abs_util_orig.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
import numpy as np
from pathlib import Path
import editdistance
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, respond_to, response_partial
from datagen import NearestNeighborData, MessageSimilarityData, BOWtoClipartData, ClipartToSeqData, BOWplusCanvasToMultiData
from model import Model, select_clipart_to_tell, drawer_observe_canvas, make_fns, eval_fns
from model import scripted_tell, scripted_tell_before_peek, scripted_tell_after_peek, draw_nothing
from baseline1_models import load_baseline1
# %%
models = load_baseline1()
# %%
tellers = [
('teller_nn', (models['teller_nn_a'], models['teller_nn_b'])),
('teller_c2seq', (models['teller_c2seq_a'], models['teller_c2seq_b'])),
]
drawers = [
('drawer_nn', (models['drawer_nn_a'], models['drawer_nn_b'])),
('drawer_sim', (models['drawer_sim_a'], models['drawer_sim_b'])),
('drawer_bow2c', (models['drawer_bow2c_a'], models['drawer_bow2c_b'])),
('drawer_bow2bce', (models['drawer_bow2bce_a'], models['drawer_bow2bce_b'])),
('drawer_bowcanvas2bce', (models['drawer_bowcanvas2bce_a'], models['drawer_bowcanvas2bce_b'])),
]
# %%
limit = None
print("Drawer evaluations against script")
for drawer_name, drawer_pair in drawers:
for split in ('a', 'b'):
sims = eval_fns(make_fns(split, scripted_tell, drawer_pair), limit=limit)
print(f"{drawer_name}_{split}", sims.mean())
# %%
limit = None
print("Drawer evaluations against script before peek")
for drawer_name, drawer_pair in drawers:
for split in ('a', 'b'):
sims = eval_fns(make_fns(split, scripted_tell_before_peek, drawer_pair), limit=limit)
print(f"{drawer_name}_{split}", sims.mean())
# %%
limit = None
print("Drawer evaluations against script after peek")
sims = eval_fns(make_fns('', scripted_tell_after_peek, draw_nothing), limit=limit)
print("draw_nothing", sims.mean())
for drawer_name, drawer_pair in drawers:
for split in ('a', 'b'):
sims = eval_fns(make_fns(split, scripted_tell_after_peek, drawer_pair), limit=limit)
print(f"{drawer_name}_{split}", sims.mean())
# %%
limit = None
print("Teller/Drawer pair evaluations")
for teller_name, teller_pair in tellers:
for drawer_name, drawer_pair in drawers:
for splits in ('aa', 'ab', 'ba', 'bb'):
sims = eval_fns(make_fns(splits, teller_pair, drawer_pair), limit=limit)
print(f"{teller_name}_{splits[0]} {drawer_name}_{splits[1]}", sims.mean())
| codraw-models-master | baseline1_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
try:
get_ipython()
INTERACTIVE=True
except:
INTERACTIVE=False
def try_magic(*args, **kwargs):
if not INTERACTIVE:
return
return get_ipython().magic(*args, **kwargs)
def try_cd(loc):
if not INTERACTIVE:
return
return get_ipython().magic(f'%cd {loc}')
| codraw-models-master | interactivity.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Scene-level nearest-neighbor teller
"""
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
import numpy as np
from pathlib import Path
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, Transcriber, respond_to
import model
from model import make_fns, eval_fns
from model import Model
from baseline2_models import load_baseline2
# %%
scenes_and_scripts_dev = codraw_data.get_scenes_and_scripts('dev')
transcribe = Transcriber(
'exp28_scenenn.py' if INTERACTIVE else __file__,
scenes_and_scripts=scenes_and_scripts_dev[::110],
scenes_description="scenes_and_scripts_dev[::110]")
# %%
models_baseline2 = load_baseline2()
# %%
drawer_lstmaddonly_a = models_baseline2['drawer_lstmaddonly_a']
drawer_lstmaddonly_b = models_baseline2['drawer_lstmaddonly_b']
# %%
from datagen import Datagen
class SceneNearestNeighborData(Datagen):
def init_full(self):
self.build_dicts()
def init_from_spec(self):
self.build_dicts()
def build_dicts(self):
self.scene_to_msgs = {}
# calculate events
events = codraw_data.get_contextual_place_many(self.split)
scene = None
msgs = None
it = iter(events)
for event in it:
if isinstance(event, codraw_data.ObserveTruth):
if scene is not None and msgs is not None:
self.scene_to_msgs[tuple(scene)] = msgs
scene = event.scene
msgs = []
elif isinstance(event, codraw_data.TellGroup):
msgs.append(event.msg)
if scene is not None and msgs is not None:
self.scene_to_msgs[tuple(scene)] = msgs
# %%
class SceneNearestNeighborTeller(Model):
datagen_cls = SceneNearestNeighborData
def prepare(self, episode):
scene = episode.get_last(codraw_data.ObserveTruth).scene
best_similarity = -1
best_msgs = []
best_scene_tuple = None
for cand_scene_tuple in self.datagen.scene_to_msgs:
cand_sim = scene_similarity(cand_scene_tuple, scene)
if cand_sim > best_similarity:
best_similarity = cand_sim
best_msgs = self.datagen.scene_to_msgs[cand_scene_tuple]
best_scene_tuple = cand_scene_tuple
# display(AbstractScene(scene))
# display(AbstractScene(best_scene_tuple))
# display(best_similarity)
episode.to_tell = best_msgs[::] # make a copy!
@respond_to(codraw_data.ObserveTruth)
@respond_to(codraw_data.ReplyGroup)
def tell(self, episode):
if not hasattr(episode, 'to_tell'):
self.prepare(episode)
if episode.to_tell:
msg = episode.to_tell.pop(0)
episode.append(codraw_data.TellGroup(msg))
def get_action_fns(self):
return [self.tell]
# %%
data_scenenn_a = SceneNearestNeighborData('a')
data_scenenn_b = SceneNearestNeighborData('b')
# %%
teller_scenenn_a = SceneNearestNeighborTeller(data_scenenn_a)
teller_scenenn_b = SceneNearestNeighborTeller(data_scenenn_b)
# %%
# Episode.run(codraw_data.get_scenes('dev')[0], make_fns('aa', (teller_scenenn_a, teller_scenenn_b), (drawer_lstmaddonly_a, drawer_lstmaddonly_b))).display()
# %%
# %%
# %%
print()
print()
print("Final evaluation on full dev set")
# %%
for splits in ('aa', 'ab', 'ba', 'bb'):
sims = eval_fns(make_fns(splits, (teller_scenenn_a, teller_scenenn_b), (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=None)
print(splits, sims.mean())
# aa 1.3095491909624886
# ab 1.3115692170881366
# nohier aa 2.229799264350204
# nohier ab 2.255167911899865
# %%
for splits in ('ba', 'bb'):
sims = eval_fns(make_fns(splits, (teller_scenenn_a, teller_scenenn_b), (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=None)
print(splits, sims.mean())
# %%
transcribe("exp28_scenenn",
aa=make_fns('aa', (teller_scenenn_a, teller_scenenn_b), (drawer_lstmaddonly_a, drawer_lstmaddonly_b)),
ab=make_fns('ab', (teller_scenenn_a, teller_scenenn_b), (drawer_lstmaddonly_a, drawer_lstmaddonly_b)),
)
# %%
# hieraddonlyseq = dict(
# drawer_hieraddonlyseq_a = drawer_hieraddonlyseq_a.spec,
# drawer_hieraddonlyseq_b = drawer_hieraddonlyseq_b.spec,
# )
#%%
# torch.save(hieraddonlyseq, Path('models/hieraddonlyseq.pt'))
# %%
# %%
# %%
# %%
| codraw-models-master | exp28_scenenn.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#%%
import numpy as np
from pathlib import Path
import editdistance
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available, torch_load
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, respond_to, response_partial
from datagen import NearestNeighborData, MessageSimilarityData, BOWtoClipartData, ClipartToSeqData, BOWplusCanvasToMultiData
from model import Model, select_clipart_to_tell, drawer_observe_canvas, make_fns, eval_fns, scripted_tell
# %%
class NearestNeighborTeller(Model):
datagen_cls = NearestNeighborData
@respond_to(codraw_data.SelectClipart)
def tell(self, episode):
clipart = episode.get_last(codraw_data.SelectClipart).clipart
best_similarity = -1
best_msg = ""
for cand_clipart in self.datagen.clipart_to_msg:
cand_sim = clipart_similarity(cand_clipart, clipart)
if cand_sim > best_similarity:
best_similarity = cand_sim
best_msg = self.datagen.clipart_to_msg[cand_clipart]
episode.append(codraw_data.TellGroup(best_msg))
def get_action_fns(self):
return [select_clipart_to_tell, self.tell]
#%%
class CharNeighborDrawer(Model):
datagen_cls = NearestNeighborData
@respond_to(codraw_data.TellGroup)
def draw(self, episode):
msg = episode.get_last(codraw_data.TellGroup).msg
best_distance = float('inf')
best_clipart = None
for cand_msg in self.datagen.msg_to_clipart:
cand_dist = editdistance.eval(cand_msg, msg)
if cand_dist < best_distance:
best_distance = cand_dist
best_clipart = self.datagen.msg_to_clipart[cand_msg]
episode.append(codraw_data.DrawClipart(best_clipart))
episode.append(codraw_data.ReplyGroup("ok"))
def get_action_fns(self):
return [self.draw]
#%%
class BOWNeighborDrawer(Model, torch.nn.Module):
datagen_cls = MessageSimilarityData
def init_full(self, d_embeddings=512):
self.d_embeddings = d_embeddings
self.word_embs = torch.nn.EmbeddingBag(len(self.datagen.vocabulary_dict), d_embeddings)
self.msg_vecs = []
self.msg_vecs_cliparts = []
self.null_clipart = None
def post_init_from_spec(self):
self.prepare_for_inference()
def get_spec(self):
return dict(d_embeddings=self.d_embeddings)
def forward(self, example_batch):
bow_feats = self.word_embs(example_batch['words'], example_batch['offsets']).reshape(-1,21,self.d_embeddings)
# assert np.isfinite(bow_feats.data.numpy()).all()
bow_feats_src = bow_feats[:,0,:]
bow_feats_tgt = bow_feats[:,1:,:]
similarity_scores = torch.bmm(bow_feats_tgt, bow_feats_src[:,:,None])[:,:,0]
loss = F.cross_entropy(similarity_scores, torch.zeros(similarity_scores.shape[0], dtype=torch.long, device=cuda_if_available))
return loss
def vec_for_msg(self, msg):
if msg == "":
return None
words = [self.datagen.vocabulary_dict.get(word, None) for word in msg.split()]
words = [word for word in words if word is not None]
if not words:
return None
return self.word_embs(torch.tensor([words], dtype=torch.long, device=self.word_embs.weight.device))[0,:].cpu().detach().numpy()
def prepare_for_inference(self):
self.msg_vecs = []
self.msg_vecs_cliparts = []
# sorting is important for deterministic serialization
for msg in sorted(self.datagen.msg_to_clipart.keys()):
clipart = self.datagen.msg_to_clipart[msg]
vec = self.vec_for_msg(msg)
if vec is not None:
self.msg_vecs.append(vec)
self.msg_vecs_cliparts.append(clipart)
else:
self.null_clipart = clipart
if self.null_clipart is None:
self.null_clipart = self.msg_vecs_cliparts[0]
self.msg_vecs = np.array(self.msg_vecs).T
self.eval()
@respond_to(codraw_data.TellGroup)
def draw(self, episode):
msg = episode.get_last(codraw_data.TellGroup).msg
vec = self.vec_for_msg(msg)
if vec is not None:
best_clipart = self.msg_vecs_cliparts[np.argmax(vec @ self.msg_vecs)]
else:
best_clipart = self.null_clipart
episode.append(codraw_data.DrawClipart(best_clipart))
episode.append(codraw_data.ReplyGroup("ok"))
def get_action_fns(self):
return [self.draw]
#%%
class BOWtoClipartDrawer(Model, torch.nn.Module):
datagen_cls = BOWtoClipartData
NUM_INDEX = 58
NUM_SUBTYPES = 35
NUM_DEPTH = 3
NUM_FLIP = 2
NUM_CATEGORICAL = 35 + 3 + 2
NUM_NUMERICAL = 2 # x, y
NUM_ALL = NUM_CATEGORICAL + NUM_NUMERICAL
def init_full(self, d_embeddings=512, d_hidden=1024):
self.d_embeddings = d_embeddings
self.d_hidden = d_hidden
self.word_embs = torch.nn.EmbeddingBag(len(self.datagen.vocabulary_dict), d_embeddings)
# Sigmoid is used to prevent drawing cliparts far off the canvas
self.sigmoid_coeff = 2.
# Scaling coefficient so that the sigmoid doesn't always saturate
self.vals_coeff = 1. / 5.
d_out = self.NUM_INDEX * (self.NUM_ALL + 1)
self.lang_to_clipart = nn.Sequential(
nn.Linear(d_embeddings, d_hidden),
nn.Dropout(0.4),
nn.ReLU(),
nn.Linear(d_hidden, d_out),
)
self.to(cuda_if_available)
def get_spec(self):
return dict(d_embeddings=self.d_embeddings, d_hidden=self.d_hidden)
def forward(self, example_batch):
bow_feats = self.word_embs(example_batch['msg_idxs'], example_batch['offsets'])
clipart_scores = self.lang_to_clipart(bow_feats).reshape(-1, self.NUM_INDEX, (self.NUM_ALL + 1))
correct_index = example_batch['clipart_index']
logits_index = clipart_scores[:,:,0]
correct_scores = clipart_scores[torch.arange(correct_index.shape[0], dtype=torch.long, device=cuda_if_available), correct_index][:,1:]
(logits_subtype, logits_depth, logits_flip, vals_numerical) = torch.split(correct_scores, [self.NUM_SUBTYPES, self.NUM_DEPTH, self.NUM_FLIP, self.NUM_NUMERICAL], dim=1)
vals_numerical = self.sigmoid_coeff * F.sigmoid(self.vals_coeff * vals_numerical)
correct_categorical = example_batch['clipart_categorical']
correct_numerical = example_batch['clipart_numerical']
loss = ( F.cross_entropy(logits_index, correct_index)
+ F.cross_entropy(logits_subtype, correct_categorical[:,0])
+ F.cross_entropy(logits_depth, correct_categorical[:,1])
+ F.cross_entropy(logits_flip, correct_categorical[:,2])
+ F.mse_loss(vals_numerical, correct_numerical)
)
return loss
@respond_to(codraw_data.TellGroup)
def draw(self, episode):
msg = episode.get_last(codraw_data.TellGroup).msg
words = [self.datagen.vocabulary_dict.get(word, None) for word in msg.split()]
words = [word for word in words if word is not None]
if not words:
# XXX(nikita): this is using DrawGroup, while normally DrawClipart is used
episode.append(codraw_data.DrawGroup([]))
episode.append(codraw_data.ReplyGroup("ok"))
return
msg_idxs = torch.tensor(words).to(cuda_if_available)
bow_feats = self.word_embs(msg_idxs[None,:])
clipart_scores = self.lang_to_clipart(bow_feats).reshape(-1, self.NUM_INDEX, (self.NUM_ALL + 1))[0,:,:]
best_idx = int(clipart_scores[:,0].argmax())
(logits_subtype, logits_depth, logits_flip, vals_numerical) = torch.split(clipart_scores[best_idx,1:], [self.NUM_SUBTYPES, self.NUM_DEPTH, self.NUM_FLIP, self.NUM_NUMERICAL])
vals_numerical = self.sigmoid_coeff * F.sigmoid(self.vals_coeff * vals_numerical)
nx, ny = vals_numerical.cpu().detach().numpy()
clipart = Clipart(best_idx, int(logits_subtype.argmax()), int(logits_depth.argmax()), int(logits_flip.argmax()), normed_x=nx, normed_y=ny)
episode.append(codraw_data.DrawClipart(clipart))
episode.append(codraw_data.ReplyGroup("ok"))
def get_action_fns(self):
return [self.draw]
#%%
class ClipartToSeqTeller(Model, torch.nn.Module):
datagen_cls = ClipartToSeqData
def init_full(self, d_word_emb=256, d_clipart_binary=256, d_clipart_numerical=256, d_clipart_hidden=1024, d_hidden=1024):
self._args = dict(
d_word_emb=d_word_emb,
d_clipart_binary=d_clipart_binary,
d_clipart_numerical=d_clipart_numerical,
d_clipart_hidden=d_clipart_hidden,
d_hidden=d_hidden)
self.word_embs = nn.Embedding(len(self.datagen.vocabulary_dict), d_word_emb)
self.binary_feature_embs = nn.Linear(self.datagen.NUM_BINARY, d_clipart_binary, bias=False)
self.numerical_transform = nn.Sequential(
nn.Linear(self.datagen.NUM_NUMERICAL, d_clipart_numerical),
nn.ReLU(),
)
self.clipart_transform = nn.Sequential(
nn.Linear(d_clipart_numerical + d_clipart_binary, d_clipart_hidden),
nn.ReLU(),
nn.Linear(d_clipart_hidden, d_hidden),
)
self.lstm = nn.LSTM(d_word_emb, d_hidden, num_layers=2)
self.word_project = nn.Linear(d_hidden, len(self.datagen.vocabulary_dict))
self.to(cuda_if_available)
def get_spec(self):
return self._args
def forward(self, example_batch):
binary_feats = self.binary_feature_embs(example_batch['clipart_binary'])
numerical_feats = self.numerical_transform(example_batch['clipart_numerical'])
clipart_feats = self.clipart_transform(torch.cat([binary_feats, numerical_feats], -1))
msg_embedded = nn.utils.rnn.PackedSequence(self.word_embs(example_batch['msg_in'].data), example_batch['msg_in'].batch_sizes)
initial_state = torch.stack([clipart_feats] * self.lstm.num_layers)
lstm_out, _ = self.lstm(msg_embedded, (initial_state, initial_state))
word_logits = self.word_project(lstm_out.data)
per_word_losses = nn.utils.rnn.PackedSequence(F.cross_entropy(word_logits, example_batch['msg_out'].data, reduce=False), example_batch['msg_out'].batch_sizes)
per_example_losses = nn.utils.rnn.pad_packed_sequence(per_word_losses)[0].sum(-1)
loss = per_example_losses.mean()
return loss
@respond_to(codraw_data.SelectClipart)
def tell(self, episode):
clipart = episode.get_last(codraw_data.SelectClipart).clipart
x = clipart.normed_x
y = clipart.normed_y
clipart_numerical = torch.tensor([x, y], dtype=torch.float)
clipart_binary = torch.zeros(self.datagen.NUM_BINARY)
for val, offset in zip([clipart.idx, clipart.subtype, clipart.depth, clipart.flip], self.datagen.BINARY_OFFSETS):
clipart_binary[val + offset] = 1.
binary_feats = self.binary_feature_embs(clipart_binary[None,:].to(cuda_if_available))
numerical_feats = self.numerical_transform(clipart_numerical[None,:].to(cuda_if_available))
clipart_feats = self.clipart_transform(torch.cat([binary_feats, numerical_feats], -1))
token_idxs = [self.datagen.vocabulary_dict['<S>']]
# lstm_state = (F.tanh(clipart_feats[None,:,:]), clipart_feats[None,:,:])
lstm_state = torch.stack([clipart_feats] * self.lstm.num_layers)
lstm_state = (lstm_state, lstm_state)
for _ in range(200):
token_emb = self.word_embs(torch.tensor(token_idxs[-1], dtype=torch.long).to(cuda_if_available))[None,None,:]
lstm_out, lstm_state = self.lstm(token_emb, lstm_state)
next_token = int(self.word_project(lstm_out[0,0,:]).argmax())
token_idxs.append(next_token)
if next_token == self.datagen.vocabulary_dict['</S>']:
break
msg = " ".join([self.datagen.vocabulary[i] for i in token_idxs[1:-1]])
episode.append(codraw_data.TellGroup(msg))
def get_action_fns(self):
return [select_clipart_to_tell, self.tell]
#%%
class BOWtoMultiBCEDrawer(Model, torch.nn.Module):
datagen_cls = BOWplusCanvasToMultiData
def init_full(self, d_embeddings=512, d_hidden=1024):
self._args = dict(
d_embeddings=d_embeddings,
d_hidden=d_hidden,
)
self.d_embeddings = d_embeddings
self.word_embs = torch.nn.EmbeddingBag(len(self.datagen.vocabulary_dict), d_embeddings)
# Sigmoid is used to prevent drawing cliparts far off the canvas
self.sigmoid_coeff = 2.
# Scaling coefficient so that the sigmoid doesn't always saturate
self.vals_coeff = 1. / 5.
dg = self.datagen
d_out = dg.NUM_INDEX * (dg.NUM_ALL + 1)
self.lang_to_clipart = nn.Sequential(
nn.Linear(d_embeddings, d_hidden),
# nn.Dropout(0.4),
nn.ReLU(),
nn.Linear(d_hidden, d_out),
)
self.to(cuda_if_available)
def get_spec(self):
return self._args
def forward(self, example_batch):
dg = self.datagen
bow_feats = self.word_embs(example_batch['msg_idxs'], example_batch['offsets'])
assert np.isfinite(bow_feats.cpu().detach().numpy()).all()
clipart_scores = self.lang_to_clipart(bow_feats).view(-1, dg.NUM_INDEX, dg.NUM_ALL + 1)
clipart_idx_scores = clipart_scores[:,:,0]
idx_losses = F.binary_cross_entropy_with_logits(clipart_idx_scores, example_batch['clipart_chosen_mask'].to(torch.float), reduce=False)
# idx_losses = torch.where(example_batch['clipart_chosen_mask'], 3. * idx_losses, idx_losses)
per_example_idx_loss = idx_losses.sum(1)
flat_scores = clipart_scores[:,:,1:].view((-1, dg.NUM_ALL))
(logits_subtype, logits_depth, logits_flip, vals_numerical) = torch.split(flat_scores, [dg.NUM_SUBTYPES, dg.NUM_DEPTH, dg.NUM_FLIP, dg.NUM_NUMERICAL], dim=1)
vals_numerical = self.sigmoid_coeff * F.sigmoid(self.vals_coeff * vals_numerical)
correct_categorical = example_batch['clipart_categorical']
correct_numerical = example_batch['clipart_numerical']
subtype_losses = F.cross_entropy(logits_subtype, correct_categorical[:,:,0].view((-1,)), reduce=False).view_as(correct_categorical[:,:,0])
depth_losses = F.cross_entropy(logits_depth, correct_categorical[:,:,1].view((-1,)), reduce=False).view_as(correct_categorical[:,:,1])
flip_losses = F.cross_entropy(logits_flip, correct_categorical[:,:,2].view((-1,)), reduce=False).view_as(correct_categorical[:,:,2])
vals_losses = F.mse_loss(vals_numerical, correct_numerical.view((-1, dg.NUM_NUMERICAL)), reduce=False).view_as(correct_numerical).sum(-1)
all_losses = torch.stack([subtype_losses, depth_losses, flip_losses, vals_losses], -1).sum(-1)
per_example_loss = torch.where(example_batch['clipart_chosen_mask'], all_losses, all_losses.new_zeros(1)).sum(-1)
loss = per_example_idx_loss.mean() + per_example_loss.mean()
return loss
@respond_to(codraw_data.TellGroup)
def draw(self, episode):
dg = self.datagen
msg = episode.get_last(codraw_data.TellGroup).msg
# assert msg != ""
words = [self.datagen.vocabulary_dict.get(word, None) for word in msg.split()]
words = [word for word in words if word is not None]
if not words:
episode.append(codraw_data.DrawGroup([]))
episode.append(codraw_data.ReplyGroup("ok"))
return
msg_idxs = torch.tensor(words).to(cuda_if_available)
bow_feats = self.word_embs(msg_idxs[None,:])
assert np.isfinite(bow_feats.cpu().detach().numpy()).all()
clipart_scores = self.lang_to_clipart(bow_feats).view(-1, dg.NUM_INDEX, (dg.NUM_ALL + 1))
flat_scores = clipart_scores[:,:,1:].view((-1, dg.NUM_ALL))
(logits_subtype, logits_depth, logits_flip, vals_numerical) = torch.split(flat_scores, [dg.NUM_SUBTYPES, dg.NUM_DEPTH, dg.NUM_FLIP, dg.NUM_NUMERICAL], dim=1)
vals_numerical = self.sigmoid_coeff * F.sigmoid(self.vals_coeff * vals_numerical)
vals_numerical = vals_numerical.cpu().detach().numpy()
clipart_idx_scores = clipart_scores[0,:,0].cpu().detach().numpy()
cliparts = []
for idx in np.where(clipart_idx_scores > 0)[0]:
nx, ny = vals_numerical[idx,:]
clipart = Clipart(idx, int(logits_subtype[idx,:].argmax()), int(logits_depth[idx,:].argmax()), int(logits_flip[idx,:].argmax()), normed_x=nx, normed_y=ny)
cliparts.append(clipart)
episode.append(codraw_data.DrawGroup(cliparts))
episode.append(codraw_data.ReplyGroup("ok"))
def get_action_fns(self):
return [self.draw]
# %%
class BOWplusCanvasDrawer(Model, torch.nn.Module):
datagen_cls = BOWplusCanvasToMultiData
def init_full(self, d_embeddings=512, d_hidden=512):
self._args = dict(
d_embeddings=d_embeddings,
d_hidden=d_hidden,
)
self.d_embeddings = d_embeddings
self.word_embs = torch.nn.EmbeddingBag(len(self.datagen.vocabulary_dict), d_embeddings)
# Helps overcome class imbalance (most cliparts are not drawn most of
# the time)
self.positive_scaling_coeff = 3.
# Sigmoid is used to prevent drawing cliparts far off the canvas
self.sigmoid_coeff = 2.
# Scaling coefficient so that the sigmoid doesn't always saturate
self.vals_coeff = 1. / 5.
dg = self.datagen
self.lang_to_hidden = nn.Linear(d_embeddings, d_hidden)
self.canvas_binary_to_hidden = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(dg.NUM_BINARY, d_hidden, bias=False),
)
self.canvas_numerical_to_hidden = nn.Sequential(
nn.Linear(dg.NUM_INDEX * dg.NUM_NUMERICAL, d_hidden, bias=False),
)
d_out = dg.NUM_INDEX * (dg.NUM_ALL + 1)
self.hidden_to_clipart = nn.Sequential(
nn.Dropout(0.4),
nn.ReLU(),
nn.Linear(d_hidden, d_out),
)
self.to(cuda_if_available)
def forward(self, example_batch):
dg = self.datagen
bow_feats = self.word_embs(example_batch['msg_idxs'], example_batch['offsets'])
assert np.isfinite(bow_feats.cpu().detach().numpy()).all()
hidden_feats = (
self.lang_to_hidden(bow_feats)
+ self.canvas_binary_to_hidden(example_batch['canvas_binary'].float())
+ self.canvas_numerical_to_hidden(example_batch['canvas_numerical'])
)
clipart_scores = self.hidden_to_clipart(hidden_feats).view(-1, dg.NUM_INDEX, dg.NUM_ALL + 1)
clipart_idx_scores = clipart_scores[:,:,0]
idx_losses = F.binary_cross_entropy_with_logits(clipart_idx_scores, example_batch['clipart_chosen_mask'].to(torch.float), reduce=False)
idx_losses = torch.where(example_batch['clipart_chosen_mask'], self.positive_scaling_coeff * idx_losses, idx_losses)
per_example_idx_loss = idx_losses.sum(1)
flat_scores = clipart_scores[:,:,1:].view((-1, dg.NUM_ALL))
(logits_subtype, logits_depth, logits_flip, vals_numerical) = torch.split(flat_scores, [dg.NUM_SUBTYPES, dg.NUM_DEPTH, dg.NUM_FLIP, dg.NUM_NUMERICAL], dim=1)
vals_numerical = self.sigmoid_coeff * F.sigmoid(self.vals_coeff * vals_numerical)
correct_categorical = example_batch['clipart_categorical']
correct_numerical = example_batch['clipart_numerical']
subtype_losses = F.cross_entropy(logits_subtype, correct_categorical[:,:,0].view((-1,)), reduce=False).view_as(correct_categorical[:,:,0])
depth_losses = F.cross_entropy(logits_depth, correct_categorical[:,:,1].view((-1,)), reduce=False).view_as(correct_categorical[:,:,1])
flip_losses = F.cross_entropy(logits_flip, correct_categorical[:,:,2].view((-1,)), reduce=False).view_as(correct_categorical[:,:,2])
vals_losses = F.mse_loss(vals_numerical, correct_numerical.view((-1, dg.NUM_NUMERICAL)), reduce=False).view_as(correct_numerical).sum(-1)
all_losses = torch.stack([subtype_losses, depth_losses, flip_losses, vals_losses], -1).sum(-1)
per_example_loss = torch.where(example_batch['clipart_chosen_mask'], all_losses, all_losses.new_zeros(1)).sum(-1)
loss = per_example_idx_loss.mean() + per_example_loss.mean()
return loss
@respond_to(codraw_data.ObserveCanvas)
def draw(self, episode):
dg = self.datagen
msg = episode.get_last(codraw_data.TellGroup).msg
# assert msg != ""
words = [self.datagen.vocabulary_dict.get(word, None) for word in msg.split()]
words = [word for word in words if word is not None]
if not words:
episode.append(codraw_data.DrawGroup([]))
episode.append(codraw_data.ReplyGroup("ok"))
return
msg_idxs = torch.tensor(words).to(cuda_if_available)
canvas_context = episode.get_last(codraw_data.ObserveCanvas).scene
canvas_binary = np.zeros((dg.NUM_INDEX, 1 + dg.NUM_DEPTH + dg.NUM_FLIP), dtype=bool)
canvas_pose = np.zeros((2, dg.NUM_SUBTYPES), dtype=bool)
canvas_numerical = np.zeros((dg.NUM_INDEX, dg.NUM_NUMERICAL))
for clipart in canvas_context:
if clipart.idx in Clipart.HUMAN_IDXS:
canvas_pose[clipart.human_idx, clipart.subtype] = True
canvas_binary[clipart.idx, 0] = True
canvas_binary[clipart.idx, 1 + clipart.depth] = True
canvas_binary[clipart.idx, 1 + dg.NUM_DEPTH + clipart.flip] = True
canvas_numerical[clipart.idx, 0] = clipart.normed_x
canvas_numerical[clipart.idx, 1] = clipart.normed_y
canvas_binary = np.concatenate([canvas_binary.reshape((-1,)), canvas_pose.reshape((-1,))])
canvas_numerical = canvas_numerical.reshape((-1,))
canvas_binary = torch.tensor(canvas_binary.astype(np.uint8), dtype=torch.uint8)[None,:].to(cuda_if_available)
canvas_numerical = torch.tensor(canvas_numerical, dtype=torch.float)[None,:].to(cuda_if_available)
bow_feats = self.word_embs(msg_idxs[None,:])
assert np.isfinite(bow_feats.cpu().detach().numpy()).all()
hidden_feats = (
self.lang_to_hidden(bow_feats)
+ self.canvas_binary_to_hidden(canvas_binary.float())
+ self.canvas_numerical_to_hidden(canvas_numerical)
)
clipart_scores = self.hidden_to_clipart(hidden_feats).view(-1, dg.NUM_INDEX, (dg.NUM_ALL + 1))
flat_scores = clipart_scores[:,:,1:].view((-1, dg.NUM_ALL))
(logits_subtype, logits_depth, logits_flip, vals_numerical) = torch.split(flat_scores, [dg.NUM_SUBTYPES, dg.NUM_DEPTH, dg.NUM_FLIP, dg.NUM_NUMERICAL], dim=1)
vals_numerical = self.sigmoid_coeff * F.sigmoid(self.vals_coeff * vals_numerical)
vals_numerical = vals_numerical.cpu().detach().numpy()
clipart_idx_scores = clipart_scores[0,:,0].cpu().detach().numpy()
cliparts = []
prior_idxs = set([c.idx for c in canvas_context])
for idx in np.where(clipart_idx_scores > 0)[0]:
if idx in prior_idxs: # XXX: break ties in favor of earlier actions
continue
nx, ny = vals_numerical[idx,:]
clipart = Clipart(idx, int(logits_subtype[idx,:].argmax()), int(logits_depth[idx,:].argmax()), int(logits_flip[idx,:].argmax()), normed_x=nx, normed_y=ny)
cliparts.append(clipart)
episode.append(codraw_data.DrawGroup(cliparts))
episode.append(codraw_data.ReplyGroup("ok"))
def get_action_fns(self):
return [drawer_observe_canvas, self.draw]
#%%
def load_baseline1():
baseline1_specs = torch_load(Path('models/baseline1_may31.pt'))
models = {}
for k, spec in baseline1_specs.items():
print(k)
models[k] = globals()[spec['class']](spec=spec)
return models
| codraw-models-master | baseline1_models.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
assert __name__ == "__main__", "Training script should not be imported!"
#%%
import numpy as np
from pathlib import Path
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available, torch_load
from attention import AttentionSeqToMasked
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, Transcriber, respond_to
from model import make_fns, eval_fns
from model import Model
from baseline2_models import load_baseline2
from baseline3_models import load_baseline3
from baseline4_models import RLSceneToSeqTeller, collect_episodes
# %%
models_baseline2 = load_baseline2()
models_baseline3 = load_baseline3()
# %%
drawer_lstmaddonly_a, drawer_lstmaddonly_b = models_baseline2['drawer_lstmaddonly_a'], models_baseline2['drawer_lstmaddonly_b']
teller_scene2seq_aux2_a, teller_scene2seq_aux2_b = models_baseline3['teller_scene2seq_aux2_a'], models_baseline3['teller_scene2seq_aux2_b']
# %%
def train_teller(split, teller_pair, scenes,
utterance_penalty=0.1,
gamma=0.999,
uninformative_penalty=0.3,
batch_size=16,
num_batches=12500,
eval_every=2000,
lr=0.00007,
limit=100,
base_name="scene2seq_rl",
):
print("Training hyperparameters:")
for param in ['utterance_penalty',
'gamma',
'uninformative_penalty',
'batch_size',
'num_batches',
'lr',
'limit',
]:
print(param, '=', locals()[param])
drawer_pair = drawer_lstmaddonly_a, drawer_lstmaddonly_b
splits_pair = split + 'a', split + 'b'
if split == 'a':
teller = teller_pair[0]
elif split == 'b':
teller = teller_pair[1]
else:
assert False
teller.disable_dropout()
fns = make_fns(split + split, teller_pair, drawer_pair)
optimizer = torch.optim.Adam(teller.parameters(), lr=lr)
def validate():
for inference_method in ['greedy', 'sample']:
teller.inference_method = inference_method
for splits in splits_pair:
sims = eval_fns(make_fns(splits, teller_pair, drawer_pair), limit=limit)
print(splits, f'[{inference_method}]', sims.mean())
validate()
teller.inference_method = 'sample'
for batch_num in range(num_batches):
optimizer.zero_grad()
teller.eval()
episodes, ex = collect_episodes(
fns,
teller.datagen,
scenes=scenes,
batch_size=batch_size,
utterance_penalty=utterance_penalty,
gamma=gamma,
uninformative_penalty=uninformative_penalty,
)
teller.train()
loss = teller.calc_rl_loss(ex)
loss.backward()
# grad_norm = nn.utils.clip_grad_norm_(teller.parameters(), float('inf'))
# XXX(nikita): clip gradients in an attempt to stabilize. Need to see if
# there's an underlying bug, though.
grad_norm = nn.utils.clip_grad_norm_(teller.parameters(), 1.5)
optimizer.step()
mean_reward = float(ex['brw_rewards'].sum().item() / ex['b_scene_mask'].shape[0])
mean_len = np.mean([
len([event for event in episode if isinstance(event, codraw_data.TellGroup)])
for episode in episodes])
sims = np.array([episode.scene_similarity() for episode in episodes])
mean_sim = sims.mean()
std_sim = sims.std()
print(f'batch {batch_num} mean-reward {mean_reward} loss {float(loss)} grad {float(grad_norm)} mean-len {mean_len} mean-sim {mean_sim} std-sim {std_sim}')
if batch_num % 5 == 0:
for event in episodes[-1]:
if isinstance(event, codraw_data.TellGroup):
print(' >', event.msg)
if batch_num % 50 == 0:
del episodes, ex, loss # clean up memory
validate()
if batch_num > 0 and batch_num % eval_every == 0:
teller.eval()
print("Printing representative sampled dialogs")
teller.inference_method = 'sample'
episodes, ex = collect_episodes(fns, teller.datagen, scenes=scenes[:1], batch_size=5)
for episode in episodes:
for event in episode:
if isinstance(event, codraw_data.TellGroup):
print(' >', event.msg)
print('similarity', episode.scene_similarity())
print('-----')
print("Evaluating on the full dev set")
for inference_method in ['greedy', 'sample']:
teller.inference_method = inference_method
for splits in splits_pair:
sims = eval_fns(make_fns(splits, (teller_rl_a, teller_rl_b), (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=None)
print(splits, f'[{inference_method}]', sims.mean())
if base_name is not None:
print("Serializing teller to disk")
torch.save(teller.spec, Path(f'rl_models/{base_name}_{split}_{batch_num}.pt'))
# %%
# Change this to train a different teller
TELLER_SPLIT = 'a'
# TELLER_SPLIT = 'b'
# Reduce entropy: the uncertainty in the pre-trained model isn't ideal for
# starting RL. It may be possible to adjust label smoothing in the pre-training,
# but for now just reweigh the linear layer prior to the softmax
SOFTMAX_RESCALE = 3.
# %%
teller_rl_a, teller_rl_b = None, None
if TELLER_SPLIT == 'a':
teller_rl_a = RLSceneToSeqTeller(spec=teller_scene2seq_aux2_a.spec)
teller_rl_a.word_project.weight.data *= SOFTMAX_RESCALE
teller_rl_a.word_project.bias.data *= SOFTMAX_RESCALE
else:
teller_rl_b = RLSceneToSeqTeller(spec=teller_scene2seq_aux2_b.spec)
teller_rl_b.word_project.weight.data *= SOFTMAX_RESCALE
teller_rl_b.word_project.bias.data *= SOFTMAX_RESCALE
# %%
print(f"Info: training on partition {TELLER_SPLIT}")
scenes = np.asarray(codraw_data.get_scenes(TELLER_SPLIT))
train_teller(
TELLER_SPLIT,
(teller_rl_a, teller_rl_b),
scenes,
utterance_penalty=0.0,
gamma=0.995,
uninformative_penalty=0.3,
batch_size=16,
num_batches=60000,
eval_every=2000,
lr=0.00003,
limit=100,
base_name="b5_utt0_lr3_clip15",
)
| codraw-models-master | baseline4_train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import ast
from itertools import chain
import logging
import math
import os
import sys
import json
import hashlib
import editdistance
from argparse import Namespace
import numpy as np
import torch
from fairseq import checkpoint_utils, options, tasks, utils, distributed_utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.models import FairseqLanguageModel
from omegaconf import DictConfig
from pathlib import Path
import hydra
from hydra.core.config_store import ConfigStore
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
GenerationConfig,
FairseqDataclass,
)
from dataclasses import dataclass, field, is_dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
from omegaconf import OmegaConf
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
config_path = Path(__file__).resolve().parent / "conf"
@dataclass
class OverrideConfig(FairseqDataclass):
noise_wav: Optional[str] = field(default=None, metadata={'help': 'noise wav file'})
noise_prob: float = field(default=0, metadata={'help': 'noise probability'})
noise_snr: float = field(default=0, metadata={'help': 'noise SNR in audio'})
modalities: List[str] = field(default_factory=lambda: [""], metadata={'help': 'which modality to use'})
data: Optional[str] = field(default=None, metadata={'help': 'path to test data directory'})
label_dir: Optional[str] = field(default=None, metadata={'help': 'path to test label directory'})
@dataclass
class InferConfig(FairseqDataclass):
task: Any = None
generation: GenerationConfig = GenerationConfig()
common: CommonConfig = CommonConfig()
common_eval: CommonEvalConfig = CommonEvalConfig()
checkpoint: CheckpointConfig = CheckpointConfig()
distributed_training: DistributedTrainingConfig = DistributedTrainingConfig()
dataset: DatasetConfig = DatasetConfig()
override: OverrideConfig = OverrideConfig()
is_ax: bool = field(
default=False,
metadata={
"help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume"
},
)
def main(cfg: DictConfig):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
assert cfg.common_eval.path is not None, "--path required for recognition!"
assert (
not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam
), "--sampling requires --nbest to be equal to --beam"
if cfg.common_eval.results_path is not None:
os.makedirs(cfg.common_eval.results_path, exist_ok=True)
output_path = os.path.join(cfg.common_eval.results_path, "decode.log")
with open(output_path, "w", buffering=1, encoding="utf-8") as h:
return _main(cfg, h)
return _main(cfg, sys.stdout)
def get_symbols_to_strip_from_output(generator):
if hasattr(generator, "symbols_to_strip_from_output"):
return generator.symbols_to_strip_from_output
else:
return {generator.eos, generator.pad}
def _main(cfg, output_file):
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=output_file,
)
logger = logging.getLogger("hybrid.speech_recognize")
if output_file is not sys.stdout: # also print to stdout
logger.addHandler(logging.StreamHandler(sys.stdout))
utils.import_user_module(cfg.common)
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task([cfg.common_eval.path])
models = [model.eval().cuda() for model in models]
saved_cfg.task.modalities = cfg.override.modalities
task = tasks.setup_task(saved_cfg.task)
task.build_tokenizer(saved_cfg.tokenizer)
task.build_bpe(saved_cfg.bpe)
logger.info(cfg)
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_cuda = torch.cuda.is_available()
# Set dictionary
dictionary = task.target_dictionary
# loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config
task.cfg.noise_prob = cfg.override.noise_prob
task.cfg.noise_snr = cfg.override.noise_snr
task.cfg.noise_wav = cfg.override.noise_wav
if cfg.override.data is not None:
task.cfg.data = cfg.override.data
if cfg.override.label_dir is not None:
task.cfg.label_dir = cfg.override.label_dir
task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task)
lms = [None]
# Optimize ensemble for generation
for model in chain(models, lms):
if model is None:
continue
if cfg.common.fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(cfg.dataset.gen_subset),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
task.max_positions(), *[m.max_positions() for m in models]
),
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=cfg.distributed_training.distributed_world_size,
shard_id=cfg.distributed_training.distributed_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
# Initialize generator
if cfg.generation.match_source_len:
logger.warning(
"The option match_source_len is not applicable to speech recognition. Ignoring it."
)
gen_timer = StopwatchMeter()
extra_gen_cls_kwargs = {
"lm_model": lms[0],
"lm_weight": cfg.generation.lm_weight,
}
cfg.generation.score_reference = False #
save_attention_plot = cfg.generation.print_alignment is not None
cfg.generation.print_alignment = None #
generator = task.build_generator(
models, cfg.generation, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
def decode_fn(x):
symbols_ignore = get_symbols_to_strip_from_output(generator)
symbols_ignore.add(dictionary.pad())
if hasattr(task.datasets[cfg.dataset.gen_subset].label_processors[0], 'decode'):
return task.datasets[cfg.dataset.gen_subset].label_processors[0].decode(x, symbols_ignore)
chars = dictionary.string(x, extra_symbols_to_ignore=symbols_ignore)
words = " ".join("".join(chars.split()).replace('|', ' ').split())
return words
num_sentences = 0
has_target = True
wps_meter = TimeMeter()
result_dict = {'utt_id': [], 'ref': [], 'hypo': []}
for sample in progress:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "net_input" not in sample:
continue
prefix_tokens = None
if cfg.generation.prefix_size > 0:
prefix_tokens = sample["target"][:, : cfg.generation.prefix_size]
constraints = None
if "constraints" in sample:
constraints = sample["constraints"]
gen_timer.start()
hypos = task.inference_step(
generator,
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i in range(len(sample["id"])):
result_dict['utt_id'].append(sample['utt_id'][i])
ref_sent = decode_fn(sample['target'][i].int().cpu())
result_dict['ref'].append(ref_sent)
best_hypo = hypos[i][0]['tokens'].int().cpu()
hypo_str = decode_fn(best_hypo)
result_dict['hypo'].append(hypo_str)
logger.info(f"\nREF:{ref_sent}\nHYP:{hypo_str}\n")
wps_meter.update(num_generated_tokens)
progress.log({"wps": round(wps_meter.avg)})
num_sentences += sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
logger.info("NOTE: hypothesis and token scores are output in base 2")
logger.info("Recognized {:,} utterances ({} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)".format(
num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1. / gen_timer.avg))
yaml_str = OmegaConf.to_yaml(cfg.generation)
fid = int(hashlib.md5(yaml_str.encode("utf-8")).hexdigest(), 16)
fid = fid % 1000000
result_fn = f"{cfg.common_eval.results_path}/hypo-{fid}.json"
json.dump(result_dict, open(result_fn, 'w'), indent=4)
n_err, n_total = 0, 0
assert len(result_dict['hypo']) == len(result_dict['ref'])
for hypo, ref in zip(result_dict['hypo'], result_dict['ref']):
hypo, ref = hypo.strip().split(), ref.strip().split()
n_err += editdistance.eval(hypo, ref)
n_total += len(ref)
wer = 100 * n_err / n_total
wer_fn = f"{cfg.common_eval.results_path}/wer.{fid}"
with open(wer_fn, "w") as fo:
fo.write(f"WER: {wer}\n")
fo.write(f"err / num_ref_words = {n_err} / {n_total}\n\n")
fo.write(f"{yaml_str}")
logger.info(f"WER: {wer}%")
return
@hydra.main(config_path=config_path, config_name="infer")
def hydra_main(cfg: InferConfig) -> Union[float, Tuple[float, Optional[float]]]:
container = OmegaConf.to_container(cfg, resolve=True, enum_to_str=True)
cfg = OmegaConf.create(container)
OmegaConf.set_struct(cfg, True)
if cfg.common.reset_logging:
reset_logging()
wer = float("inf")
try:
if cfg.common.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, main)
else:
distributed_utils.call_main(cfg, main)
except BaseException as e: # pylint: disable=broad-except
if not cfg.common.suppress_crashes:
raise
else:
logger.error("Crashed! %s", str(e))
return
def cli_main() -> None:
try:
from hydra._internal.utils import (
get_args,
) # pylint: disable=import-outside-toplevel
cfg_name = get_args().config_name or "infer"
except ImportError:
logger.warning("Failed to get config name from hydra args")
cfg_name = "infer"
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=InferConfig)
for k in InferConfig.__dataclass_fields__:
if is_dataclass(InferConfig.__dataclass_fields__[k].type):
v = InferConfig.__dataclass_fields__[k].default
cs.store(name=k, node=v)
hydra_main() # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
cli_main()
| av_hubert-main | avhubert/infer_s2s.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from argparse import Namespace
import contextlib
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dataclasses import dataclass, field
from omegaconf import MISSING, II, open_dict
from typing import Any, Optional
from fairseq import checkpoint_utils, tasks, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.tasks import FairseqTask
from fairseq.models import (
BaseFairseqModel,
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
)
# from fairseq.models.wav2vec.wav2vec2 import MASKING_DISTRIBUTION_CHOICES
from fairseq.modules import (
LayerNorm,
PositionalEmbedding,
TransformerDecoderLayer,
)
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
cfg,
dictionary,
embed_tokens,
no_encoder_attn=False,
):
super().__init__(dictionary)
self.dropout = cfg.decoder_dropout
self.share_input_output_embed = cfg.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = cfg.decoder_embed_dim
self.output_embed_dim = cfg.decoder_embed_dim
self.layerdrop = cfg.decoder_layerdrop
padding_idx = embed_tokens.padding_idx
self.max_target_positions = cfg.max_target_positions
self.embed_tokens = embed_tokens
# self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim)
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
cfg.max_target_positions,
embed_dim,
padding_idx,
learned=cfg.decoder_learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
# TODO: update this when transformer gets converted to dataclass configs
transformer_cfg = copy.deepcopy(cfg)
# with open_dict(transformer_cfg):
transformer_cfg.dropout = transformer_cfg.decoder_dropout
transformer_cfg.attention_dropout = (
transformer_cfg.decoder_attention_dropout
)
transformer_cfg.activation_dropout = (
transformer_cfg.decoder_activation_dropout
)
self.layers = nn.ModuleList([])
self.layers.extend(
[
TransformerDecoderLayer(transformer_cfg, no_encoder_attn)
for _ in range(transformer_cfg.decoder_layers)
]
)
if not self.share_input_output_embed:
self.embed_out = nn.Parameter(
torch.Tensor(len(dictionary), self.output_embed_dim)
)
nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)
if transformer_cfg.decoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
prev_output_tokens = prev_output_tokens.long()
x, extra = self.extract_features(
prev_output_tokens, encoder_out, incremental_state
)
x = self.output_layer(x)
return x, extra
def extract_features(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused
):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
# embed positions
positions = (
self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if self.embed_positions is not None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
for layer in self.layers:
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, attn, _ = layer(
x,
encoder_out["encoder_out"] if encoder_out is not None else None,
encoder_out["padding_mask"] if encoder_out is not None else None,
incremental_state,
self_attn_mask=self.buffered_future_mask(x)
if incremental_state is None
else None,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
return x, {"attn": attn, "inner_states": inner_states}
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
# project back to size of vocabulary
emb_mat = self.embed_tokens.weight if self.share_input_output_embed else self.embed_out
return torch.matmul(features, emb_mat.transpose(0, 1))
# if self.share_input_output_embed:
# return F.linear(features, self.embed_tokens.weight)
# else:
# return F.linear(features, self.embed_out)
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
| av_hubert-main | avhubert/decoder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os, glob
import sys
from typing import Dict, List, Optional, Tuple
import numpy as np
from dataclasses import dataclass, field
from fairseq import metrics, search
from fairseq.data import Dictionary, encoders
from fairseq.dataclass.configs import FairseqDataclass
from fairseq.tasks import register_task
from fairseq.tasks.fairseq_task import FairseqTask
from omegaconf import MISSING, II
import numpy as np
from argparse import Namespace
DBG=True if len(sys.argv) == 1 else False
if DBG:
from hubert_dataset import AVHubertDataset
from sequence_generator import SequenceGenerator
else:
from .hubert_dataset import AVHubertDataset
from .sequence_generator import SequenceGenerator
logger = logging.getLogger(__name__)
class LabelEncoder(object):
def __init__(self, dictionary: Dictionary) -> None:
self.dictionary = dictionary
def __call__(self, label: str) -> List[str]:
return self.dictionary.encode_line(
label, append_eos=False, add_if_not_exist=False,
)
class LabelEncoderS2SToken(object):
def __init__(self, dictionary: Dictionary, bpe_tokenizer) -> None:
self.bpe_tokenizer = bpe_tokenizer
self.dictionary = dictionary
def __call__(self, label: str) -> List[str]:
label = self.bpe_tokenizer.encode(label.lower())
return self.dictionary.encode_line(
label, append_eos=True, add_if_not_exist=False,
).long()
def decode(self, tok, symbols_ignore=None):
tok = self.dictionary.string(tok, extra_symbols_to_ignore=symbols_ignore)
if self.bpe_tokenizer:
tok = self.bpe_tokenizer.decode(tok)
return tok
@dataclass
class AVHubertPretrainingConfig(FairseqDataclass):
data: str = field(
default=MISSING, metadata={"help": "path to data directory"}
)
labels: List[str] = field(
default_factory=lambda: ["ltr"],
metadata={
"help": (
"extension of the label files to load, frame-level labels for"
" pre-training, and sequence-level label for fine-tuning"
)
},
)
label_dir: Optional[str] = field(
default=None,
metadata={
"help": "if set, looks for labels in this directory instead",
},
)
label_rate: int = field(
default=-1,
metadata={"help": "label frame rate. -1 for sequence label"},
)
sample_rate: int = field(
default=16_000,
metadata={
"help": "target sample rate. audio files will be up/down "
"sampled to this rate"
},
)
normalize: bool = field(
default=False,
metadata={
"help": "if set, normalizes input to have 0 mean and unit variance"
},
)
enable_padding: bool = field(
default=False,
metadata={"help": "pad shorter samples instead of cropping"},
)
max_sample_size: Optional[int] = field(
default=None,
metadata={"help": "max sample size to keep in training"},
)
min_sample_size: Optional[int] = field(
default=None,
metadata={"help": "min sample size to keep in training"},
)
max_trim_sample_size: Optional[int] = field(
default=II("task.max_sample_size"),
metadata={"help": "max sample size to trim to for batching"},
)
single_target: Optional[bool] = field(
default=False,
metadata={
"help": "if set, AddTargetDatasets outputs same keys "
"as AddTargetDataset"
},
)
random_crop: Optional[bool] = field(
default=True,
metadata={"help": "always crop from the beginning if false"},
)
pad_audio: Optional[bool] = field(
default=False,
metadata={"help": "pad audio to the longest one in the batch if true"},
)
pdb: Optional[bool] = field(
default=False,
metadata={"help": "pdb"},
)
stack_order_audio: int = field(
default=1,
metadata={"help": "concatenate n consecutive audio frames for one step"},
)
skip_verify: Optional[bool] = field(
default=False,
metadata={"help": "skip verifying label-audio alignment"},
)
image_aug: bool = field(default=False, metadata={'help': 'image data augmentation'})
image_crop_size: int = field(
default=88, metadata={"help": "image ROI size"})
image_mean: float = field(
default=0.421, metadata={"help": "image mean"})
image_std: float = field(
default=0.165, metadata={"help": "image std"})
modalities: Optional[List[str]] = field(default_factory=lambda: ["audio", "video"], metadata={'help': 'modalities to load'})
is_s2s: bool=field(default=False, metadata={'help': 'seq2seq fine-tuning only'})
tokenizer_bpe_name: Optional[str] = field(default=None, metadata={'help': 'tokenizer model name'})
tokenizer_bpe_model: Optional[str] = field(default=None, metadata={'help': 'tokenizer model path'})
noise_wav: Optional[str] = field(default=None, metadata={'help': 'manifest of noise wav files (one wav file path per line)'})
noise_prob: float = field(default=0, metadata={'help': 'noise probability'})
noise_snr: Optional[str] = field(default='0', metadata={'help': 'noise SNR in audio'})
noise_num: int = field(default=1, metadata={'help': 'number of noise wav files to mix'})
fine_tuning: bool = field(default=False, metadata={"help": "set to true if fine-tuning AV-Hubert"})
@register_task("av_hubert_pretraining", dataclass=AVHubertPretrainingConfig)
class AVHubertPretrainingTask(FairseqTask):
cfg: AVHubertPretrainingConfig
def __init__(
self,
cfg: AVHubertPretrainingConfig,
) -> None:
super().__init__(cfg)
logger.info(f"current directory is {os.getcwd()}")
logger.info(f"AVHubertPretrainingTask Config {cfg}")
self.fine_tuning = cfg.fine_tuning
if cfg.fine_tuning:
self.state.add_factory("target_dictionary", self.load_dictionaries)
if cfg.is_s2s:
self.state.add_factory("s2s_tokenizer", self.load_tokenizer)
else:
self.state.add_factory("dictionaries", self.load_dictionaries)
self.blank_symbol = "<s>"
@property
def source_dictionary(self) -> Optional[Dictionary]:
return None # self._source_dictionary
@property
def target_dictionary(self) -> Optional[Dictionary]:
return self.state.target_dictionary # self._target_dictionary
@property
def dictionaries(self) -> List[Dictionary]:
return self.state.dictionaries
def load_dictionaries(self):
label_dir = self.cfg.data if self.cfg.label_dir is None else self.cfg.label_dir
dictionaries = [
Dictionary.load(f"{label_dir}/dict.{label}.txt")
for label in self.cfg.labels
]
return dictionaries[0] if self.cfg.fine_tuning else dictionaries
def load_tokenizer(self):
bpe_args = Namespace(**{'bpe': self.cfg.tokenizer_bpe_name, f"{self.cfg.tokenizer_bpe_name}_model": self.cfg.tokenizer_bpe_model})
bpe_tokenizer = encoders.build_bpe(bpe_args)
return bpe_tokenizer
@property
def s2s_tokenizer(self):
return self.state.s2s_tokenizer
@classmethod
def setup_task(
cls, cfg: AVHubertPretrainingConfig, **kwargs
) -> "AVHubertPretrainingTask":
if cfg.pdb:
import pdb
pdb.set_trace()
return cls(cfg)
def get_label_dir(self) -> str:
if self.cfg.label_dir is None:
return self.cfg.data
return self.cfg.label_dir
def load_dataset(self, split: str, **kwargs) -> None:
manifest = f"{self.cfg.data}/{split}.tsv"
dictionaries = [self.target_dictionary] if self.fine_tuning else self.dictionaries
pad_list = [dictionary.pad() for dictionary in dictionaries]
eos_list = [dictionary.eos() for dictionary in dictionaries]
if not self.cfg.is_s2s:
procs = [LabelEncoder(dictionary) for dictionary in dictionaries]
else:
logger.info(f"Using tokenizer")
bpe_tokenizer = self.s2s_tokenizer
procs = [LabelEncoderS2SToken(dictionary, bpe_tokenizer) for dictionary in dictionaries]
paths = [
f"{self.get_label_dir()}/{split}.{l}" for l in self.cfg.labels
]
image_aug = self.cfg.image_aug if split == 'train' else False
noise_fn, noise_snr = f"{self.cfg.noise_wav}/{split}.tsv" if self.cfg.noise_wav is not None else None, eval(self.cfg.noise_snr)
noise_num = self.cfg.noise_num #
self.datasets[split] = AVHubertDataset(
manifest,
sample_rate=self.cfg.sample_rate,
label_paths=paths,
label_rates=self.cfg.label_rate,
pad_list=pad_list,
eos_list=eos_list,
label_processors=procs,
max_keep_sample_size=self.cfg.max_sample_size,
min_keep_sample_size=self.cfg.min_sample_size,
max_sample_size=self.cfg.max_trim_sample_size,
pad_audio=self.cfg.pad_audio,
normalize=self.cfg.normalize,
store_labels=False,
random_crop=self.cfg.random_crop,
single_target=self.cfg.single_target,
stack_order_audio=self.cfg.stack_order_audio,
skip_verify=self.cfg.skip_verify,
image_mean=self.cfg.image_mean,
image_std=self.cfg.image_std,
image_crop_size=self.cfg.image_crop_size,
image_aug=image_aug,
modalities=self.cfg.modalities,
is_s2s=self.cfg.is_s2s,
noise_fn=noise_fn,
noise_prob=self.cfg.noise_prob,
noise_snr=noise_snr,
noise_num=noise_num
)
def max_positions(self) -> Tuple[int, int]:
return (sys.maxsize, sys.maxsize)
def filter_indices_by_size(
self, indices: np.array, *args, **kwargs
) -> np.array:
return indices
def build_generator(
self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None, prefix_allowed_tokens_fn=None,
):
"""
Build a :class:`~fairseq.SequenceGenerator` instance for this
task.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
args (fairseq.dataclass.configs.GenerationConfig):
configuration object (dataclass) for generation
extra_gen_cls_kwargs (Dict[str, Any]): extra options to pass
through to SequenceGenerator
prefix_allowed_tokens_fn (Callable[[int, torch.Tensor], List[int]]):
If provided, this function constrains the beam search to
allowed tokens only at each step. The provided function
should take 2 arguments: the batch ID (`batch_id: int`)
and a unidimensional tensor of token ids (`inputs_ids:
torch.Tensor`). It has to return a `List[int]` with the
allowed tokens for the next generation step conditioned
on the previously generated tokens (`inputs_ids`) and
the batch ID (`batch_id`). This argument is useful for
constrained generation conditioned on the prefix, as
described in "Autoregressive Entity Retrieval"
(https://arxiv.org/abs/2010.00904) and
https://github.com/facebookresearch/GENRE.
"""
if getattr(args, "score_reference", False):
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(
self.target_dictionary,
compute_alignment=getattr(args, "print_alignment", False),
)
# Choose search strategy. Defaults to Beam Search.
sampling = getattr(args, "sampling", False)
sampling_topk = getattr(args, "sampling_topk", -1)
sampling_topp = getattr(args, "sampling_topp", -1.0)
diverse_beam_groups = getattr(args, "diverse_beam_groups", -1)
diverse_beam_strength = getattr(args, "diverse_beam_strength", 0.5)
match_source_len = getattr(args, "match_source_len", False)
diversity_rate = getattr(args, "diversity_rate", -1)
constrained = getattr(args, "constraints", False)
if prefix_allowed_tokens_fn is None:
prefix_allowed_tokens_fn = getattr(args, "prefix_allowed_tokens_fn", None)
if (
sum(
int(cond)
for cond in [
sampling,
diverse_beam_groups > 0,
match_source_len,
diversity_rate > 0,
]
)
> 1
):
raise ValueError("Provided Search parameters are mutually exclusive.")
assert sampling_topk < 0 or sampling, "--sampling-topk requires --sampling"
assert sampling_topp < 0 or sampling, "--sampling-topp requires --sampling"
if sampling:
search_strategy = search.Sampling(
self.target_dictionary, sampling_topk, sampling_topp
)
elif diverse_beam_groups > 0:
search_strategy = search.DiverseBeamSearch(
self.target_dictionary, diverse_beam_groups, diverse_beam_strength
)
elif match_source_len:
# this is useful for tagging applications where the output
# length should match the input length, so we hardcode the
# length constraints for simplicity
search_strategy = search.LengthConstrainedBeamSearch(
self.target_dictionary,
min_len_a=1,
min_len_b=0,
max_len_a=1,
max_len_b=0,
)
elif diversity_rate > -1:
search_strategy = search.DiverseSiblingsSearch(
self.target_dictionary, diversity_rate
)
elif constrained:
search_strategy = search.LexicallyConstrainedBeamSearch(
self.target_dictionary, args.constraints
)
elif prefix_allowed_tokens_fn:
search_strategy = search.PrefixConstrainedBeamSearch(
self.target_dictionary, prefix_allowed_tokens_fn
)
else:
search_strategy = search.BeamSearch(self.target_dictionary)
extra_gen_cls_kwargs = extra_gen_cls_kwargs or {}
if seq_gen_cls is None:
if getattr(args, "print_alignment", False):
seq_gen_cls = SequenceGeneratorWithAlignment
extra_gen_cls_kwargs["print_alignment"] = args.print_alignment
else:
seq_gen_cls = SequenceGenerator
return seq_gen_cls(
models,
self.target_dictionary,
beam_size=getattr(args, "beam", 5),
max_len_a=getattr(args, "max_len_a", 0),
max_len_b=getattr(args, "max_len_b", 200),
min_len=getattr(args, "min_len", 1),
normalize_scores=(not getattr(args, "unnormalized", False)),
len_penalty=getattr(args, "lenpen", 1),
unk_penalty=getattr(args, "unkpen", 0),
temperature=getattr(args, "temperature", 1.0),
match_source_len=getattr(args, "match_source_len", False),
no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0),
search_strategy=search_strategy,
**extra_gen_cls_kwargs,
)
| av_hubert-main | avhubert/hubert_pretraining.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .hubert import * # noqa
from .hubert_asr import * # noqa
from .hubert_dataset import *
from .hubert_pretraining import *
from .hubert_criterion import *
| av_hubert-main | avhubert/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, List, Optional
import sys
import torch
import torch.nn as nn
from fairseq import search, utils
from fairseq.data import data_utils
from fairseq.models import FairseqIncrementalDecoder
from torch import Tensor
from fairseq.ngram_repeat_block import NGramRepeatBlock
class SequenceGenerator(nn.Module):
def __init__(
self,
models,
tgt_dict,
beam_size=1,
max_len_a=0,
max_len_b=200,
max_len=0,
min_len=1,
normalize_scores=True,
len_penalty=1.0,
unk_penalty=0.0,
temperature=1.0,
match_source_len=False,
no_repeat_ngram_size=0,
search_strategy=None,
eos=None,
symbols_to_strip_from_output=None,
lm_model=None,
lm_weight=1.0,
):
"""Generates translations of a given source sentence.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models,
currently support fairseq.models.TransformerModel for scripting
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
max_len (int, optional): the maximum length of the generated output
(not including end-of-sentence)
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
super().__init__()
if isinstance(models, EnsembleModel):
self.model = models
else:
self.model = EnsembleModel(models)
self.tgt_dict = tgt_dict
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos() if eos is None else eos
self.symbols_to_strip_from_output = (
symbols_to_strip_from_output.union({self.eos})
if symbols_to_strip_from_output is not None
else {self.eos}
)
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.max_len = max_len or self.model.max_decoder_positions()
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.temperature = temperature
self.match_source_len = match_source_len
if no_repeat_ngram_size > 0:
self.repeat_ngram_blocker = NGramRepeatBlock(no_repeat_ngram_size)
else:
self.repeat_ngram_blocker = None
assert temperature > 0, "--temperature must be greater than 0"
self.search = (
search.BeamSearch(tgt_dict) if search_strategy is None else search_strategy
)
# We only need to set src_lengths in LengthConstrainedBeamSearch.
# As a module attribute, setting it would break in multithread
# settings when the model is shared.
self.should_set_src_lengths = (
hasattr(self.search, "needs_src_lengths") and self.search.needs_src_lengths
)
self.model.eval()
self.lm_model = lm_model
self.lm_weight = lm_weight
if self.lm_model is not None:
self.lm_model.eval()
def cuda(self):
self.model.cuda()
return self
@torch.no_grad()
def forward(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
"""Generate a batch of translations.
Args:
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, prefix_tokens, bos_token=bos_token)
# TODO(myleott): unused, deprecate after pytorch-translate migration
def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None):
"""Iterate over a batched dataset and yield individual translations.
Args:
cuda (bool, optional): use GPU for generation
timer (StopwatchMeter, optional): time generations
"""
for sample in data_itr:
s = utils.move_to_cuda(sample) if cuda else sample
if "net_input" not in s:
continue
input = s["net_input"]
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in input.items() if k != "prev_output_tokens"
}
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(encoder_input)
if timer is not None:
timer.stop(sum(len(h[0]["tokens"]) for h in hypos))
for i, id in enumerate(s["id"].data):
# remove padding
src = utils.strip_pad(input["src_tokens"].data[i, :], self.pad)
ref = (
utils.strip_pad(s["target"].data[i, :], self.pad)
if s["target"] is not None
else None
)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs) -> List[List[Dict[str, Tensor]]]:
"""Generate translations. Match the api of other fairseq generators.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
constraints (torch.LongTensor, optional): force decoder to include
the list of constraints
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, **kwargs)
def _generate(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
constraints: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(self.model.models_size)
],
)
net_input = sample["net_input"]
if "src_tokens" in net_input:
src_tokens = net_input["src_tokens"]
# length of the source text being the character length except EndOfSentence and pad
src_lengths = (
(src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
)
elif "source" in net_input:
src_tokens = net_input["source"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
elif "features" in net_input:
src_tokens = net_input["features"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
else:
raise Exception("expected src_tokens or source in net input. input keys: " + str(net_input.keys()))
# bsz: total number of sentences in beam
# Note that src_tokens may have more than 2 dimensions (i.e. audio features)
if src_tokens['audio'] is not None:
bsz, src_len = src_tokens['audio'].size()[:2]
src_device = src_tokens['audio'].device
else:
bsz, src_len = net_input['padding_mask'].size()
src_device = src_tokens['video'].device
beam_size = self.beam_size
if constraints is not None and not self.search.supports_constraints:
raise NotImplementedError(
"Target-side constraints were provided, but search method doesn't support them"
)
# Initialize constraints, when active
self.search.init_constraints(constraints, beam_size)
max_len: int = -1
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
self.max_len - 1,
)
assert (
self.min_len <= max_len
), "min_len cannot be larger than max_len, please adjust these!"
# compute the encoder output for each beam
encoder_outs = self.model.forward_encoder(net_input)
# placeholder of indices for bsz * beam_size to hold tokens and accumulative scores
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_device).long()
encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order)
# ensure encoder_outs is a List.
assert encoder_outs is not None
# initialize buffers
scores = (
torch.zeros(bsz * beam_size, max_len + 1).to(src_device).float()
) # +1 for eos; pad is never chosen for scoring
tokens = (
torch.zeros(bsz * beam_size, max_len + 2)
.to(src_device)
.long()
.fill_(self.pad)
) # +2 for eos and pad
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn: Optional[Tensor] = None
# A list that indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then cands_to_ignore would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
cands_to_ignore = (
torch.zeros(bsz, beam_size).to(src_device).eq(-1)
) # forward and backward-compatible False mask
# list of completed sentences
finalized = torch.jit.annotate(
List[List[Dict[str, Tensor]]],
[torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)],
) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step
# a boolean array indicating if the sentence at the index is finished or not
finished = [False for i in range(bsz)]
num_remaining_sent = bsz # number of sentences remaining
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (
(torch.arange(0, bsz) * beam_size)
.unsqueeze(1)
.type_as(tokens)
.to(src_device)
)
cand_offsets = torch.arange(0, cand_size).type_as(tokens).to(src_device)
reorder_state: Optional[Tensor] = None
batch_idxs: Optional[Tensor] = None
original_batch_idxs: Optional[Tensor] = None
if "id" in sample and isinstance(sample["id"], Tensor):
original_batch_idxs = sample["id"]
else:
original_batch_idxs = torch.arange(0, bsz).type_as(tokens)
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(
batch_idxs
)
reorder_state.view(-1, beam_size).add_(
corr.unsqueeze(-1) * beam_size
)
original_batch_idxs = original_batch_idxs[batch_idxs]
self.model.reorder_incremental_state(incremental_states, reorder_state)
encoder_outs = self.model.reorder_encoder_out(
encoder_outs, reorder_state
)
lprobs, avg_attn_scores = self.model.forward_decoder(
tokens[:, : step + 1],
encoder_outs,
incremental_states,
self.temperature,
)
if self.lm_model is not None:
lm_out = self.lm_model(tokens[:, : step + 1])
probs = self.lm_model.get_normalized_probs(
lm_out, log_probs=True, sample=None
)
probs = probs[:, -1, :] * self.lm_weight
lprobs += probs
lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs)
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# handle max length constraint
if step >= max_len:
lprobs[:, : self.eos] = -math.inf
lprobs[:, self.eos + 1 :] = -math.inf
# handle prefix tokens (possibly with different lengths)
if (
prefix_tokens is not None
and step < prefix_tokens.size(1)
and step < max_len
):
lprobs, tokens, scores = self._prefix_tokens(
step, lprobs, scores, tokens, prefix_tokens, beam_size
)
elif step < self.min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs[:, self.eos] = -math.inf
# Record attention scores, only support avg_attn_scores is a Tensor
if avg_attn_scores is not None:
if attn is None:
attn = torch.empty(
bsz * beam_size, avg_attn_scores.size(1), max_len + 2
).to(scores)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
eos_bbsz_idx = torch.empty(0).to(
tokens
) # indices of hypothesis ending with eos (finished sentences)
eos_scores = torch.empty(0).to(
scores
) # scores of hypothesis ending with eos (finished sentences)
if self.should_set_src_lengths:
self.search.set_src_lengths(src_lengths)
if self.repeat_ngram_blocker is not None:
lprobs = self.repeat_ngram_blocker(tokens, lprobs, bsz, beam_size, step)
# Shape: (batch, cand_size)
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
tokens[:, : step + 1],
original_batch_idxs,
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
# Shape of eos_mask: (batch size, beam size)
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask)
# only consider eos when it's among the top beam_size indices
# Now we know what beam item(s) to finish
# Shape: 1d list of absolute-numbered
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents: List[int] = []
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents = self.finalize_hypos(
step,
eos_bbsz_idx,
eos_scores,
tokens,
scores,
finalized,
finished,
beam_size,
attn,
src_lengths,
max_len,
)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
if self.search.stop_on_max_len and step >= max_len:
break
assert step < max_len, f"{step} < {max_len}"
# Remove finalized sentences (ones for which {beam_size}
# finished hypotheses have been generated) from the batch.
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = torch.ones(
bsz, dtype=torch.bool, device=cand_indices.device
)
batch_mask[finalized_sents] = False
# TODO replace `nonzero(as_tuple=False)` after TorchScript supports it
batch_idxs = torch.arange(
bsz, device=cand_indices.device
).masked_select(batch_mask)
# Choose the subset of the hypothesized constraints that will continue
self.search.prune_sentences(batch_idxs)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
cands_to_ignore = cands_to_ignore[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, attn.size(1), -1
)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
# Rewrite the operator since the element wise or is not supported in torchscript.
eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size]))
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just
# the hypos with the smallest values in active_mask.
# {active_hypos} indicates which {beam_size} hypotheses
# from the list of {2 * beam_size} candidates were
# selected. Shapes: (batch size, beam size)
new_cands_to_ignore, active_hypos = torch.topk(
active_mask, k=beam_size, dim=1, largest=False
)
# update cands_to_ignore to ignore any finalized hypos.
cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size]
# Make sure there is at least one active item for each sentence in the batch.
assert (~cands_to_ignore).any(dim=1).all()
# update cands_to_ignore to ignore any finalized hypos
# {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam
# can be selected more than once).
active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos)
active_scores = torch.gather(cand_scores, dim=1, index=active_hypos)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
# Set the tokens for each beam (can select the same row more than once)
tokens[:, : step + 1] = torch.index_select(
tokens[:, : step + 1], dim=0, index=active_bbsz_idx
)
# Select the next token for each of them
tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather(
cand_indices, dim=1, index=active_hypos
)
if step > 0:
scores[:, :step] = torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx
)
scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather(
cand_scores, dim=1, index=active_hypos
)
# Update constraints based on which candidates were selected for the next beam
self.search.update_constraints(active_hypos)
# copy attention for active hypotheses
if attn is not None:
attn[:, :, : step + 2] = torch.index_select(
attn[:, :, : step + 2], dim=0, index=active_bbsz_idx
)
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
scores = torch.tensor(
[float(elem["score"].item()) for elem in finalized[sent]]
)
_, sorted_scores_indices = torch.sort(scores, descending=True)
finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices]
finalized[sent] = torch.jit.annotate(
List[Dict[str, Tensor]], finalized[sent]
)
return finalized
def _prefix_tokens(
self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int
):
"""Handle prefix tokens"""
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = torch.tensor(-math.inf).to(lprobs)
lprobs[prefix_mask] = lprobs[prefix_mask].scatter(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[
:, 0, 1 : step + 1
]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
# copy tokens, scores and lprobs from the first beam to all beams
tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size)
scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size)
lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size)
return lprobs, tokens, scores
def replicate_first_beam(self, tensor, mask, beam_size: int):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
def finalize_hypos(
self,
step: int,
bbsz_idx,
eos_scores,
tokens,
scores,
finalized: List[List[Dict[str, Tensor]]],
finished: List[bool],
beam_size: int,
attn: Optional[Tensor],
src_lengths,
max_len: int,
):
"""Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.
A sentence is finalized when {beam_size} finished items have been collected for it.
Returns number of sentences (not beam items) being finalized.
These will be removed from the batch and not processed further.
Args:
bbsz_idx (Tensor):
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors.
# tokens is (batch * beam, max_len). So the index_select
# gets the newly EOS rows, then selects cols 1..{step + 2}
tokens_clone = tokens.index_select(0, bbsz_idx)[
:, 1 : step + 2
] # skip the first index, which is EOS
tokens_clone[:, step] = self.eos
attn_clone = (
attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2]
if attn is not None
else None
)
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
# cum_unfin records which sentences in the batch are finished.
# It helps match indexing between (a) the original sentences
# in the batch and (b) the current, possibly-reduced set of
# sentences.
cum_unfin: List[int] = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
# The keys here are of the form "{sent}_{unfin_idx}", where
# "unfin_idx" is the index in the current (possibly reduced)
# list of sentences, and "sent" is the index in the original,
# unreduced batch
# set() is not supported in script export
sents_seen: Dict[str, Optional[Tensor]] = {}
# For every finished beam item
for i in range(bbsz_idx.size()[0]):
idx = bbsz_idx[i]
score = eos_scores[i]
# sentence index in the current (possibly reduced) batch
unfin_idx = idx // beam_size
# sentence index in the original (unreduced) batch
sent = unfin_idx + cum_unfin[unfin_idx]
# Cannot create dict for key type '(int, int)' in torchscript.
# The workaround is to cast int to string
seen = str(sent.item()) + "_" + str(unfin_idx.item())
if seen not in sents_seen:
sents_seen[seen] = None
if self.match_source_len and step > src_lengths[unfin_idx]:
score = torch.tensor(-math.inf).to(score)
# An input sentence (among those in a batch) is finished when
# beam_size hypotheses have been collected for it
if len(finalized[sent]) < beam_size:
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = torch.empty(0)
finalized[sent].append(
{
"tokens": tokens_clone[i],
"score": score,
"attention": hypo_attn, # src_len x tgt_len
"alignment": torch.empty(0),
"positional_scores": pos_scores[i],
}
)
newly_finished: List[int] = []
for seen in sents_seen.keys():
# check termination conditions for this sentence
sent: int = int(float(seen.split("_")[0]))
unfin_idx: int = int(float(seen.split("_")[1]))
if not finished[sent] and self.is_finished(
step, unfin_idx, max_len, len(finalized[sent]), beam_size
):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
def is_finished(
self,
step: int,
unfin_idx: int,
max_len: int,
finalized_sent_len: int,
beam_size: int,
):
"""
Check whether decoding for a sentence is finished, which
occurs when the list of finalized sentences has reached the
beam size, or when we reach the maximum length.
"""
assert finalized_sent_len <= beam_size
if finalized_sent_len == beam_size or step == max_len:
return True
return False
class EnsembleModel(nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models_size = len(models)
# method '__len__' is not supported in ModuleList for torch script
self.single_model = models[0]
self.models = nn.ModuleList(models)
self.has_incremental: bool = False
if all(
hasattr(m, "decoder") and isinstance(m.decoder, FairseqIncrementalDecoder)
for m in models
):
self.has_incremental = True
def forward(self):
pass
def has_encoder(self):
return hasattr(self.single_model, "encoder")
def has_incremental_states(self):
return self.has_incremental
def max_decoder_positions(self):
return min([m.max_decoder_positions() for m in self.models if hasattr(m, "max_decoder_positions")] + [sys.maxsize])
@torch.jit.export
def forward_encoder(self, net_input: Dict[str, Tensor]):
if not self.has_encoder():
return None
return [model.encoder.forward_torchscript(net_input) for model in self.models]
@torch.jit.export
def forward_decoder(
self,
tokens,
encoder_outs: List[Dict[str, List[Tensor]]],
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
temperature: float = 1.0,
):
log_probs = []
avg_attn: Optional[Tensor] = None
encoder_out: Optional[Dict[str, List[Tensor]]] = None
for i, model in enumerate(self.models):
if self.has_encoder():
encoder_out = encoder_outs[i]
# decode each model
if self.has_incremental_states():
decoder_out = model.decoder.forward(
tokens,
encoder_out=encoder_out,
incremental_state=incremental_states[i],
)
else:
if hasattr(model, "decoder"):
decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out)
else:
decoder_out = model.forward(tokens)
attn: Optional[Tensor] = None
decoder_len = len(decoder_out)
if decoder_len > 1 and decoder_out[1] is not None:
if isinstance(decoder_out[1], Tensor):
attn = decoder_out[1]
else:
attn_holder = decoder_out[1]["attn"]
if isinstance(attn_holder, Tensor):
attn = attn_holder
elif attn_holder is not None:
attn = attn_holder[0]
if attn is not None:
attn = attn[:, -1, :]
decoder_out_tuple = (
decoder_out[0][:, -1:, :].div_(temperature),
None if decoder_len <= 1 else decoder_out[1],
)
probs = model.get_normalized_probs(
decoder_out_tuple, log_probs=True, sample=None
)
probs = probs[:, -1, :]
if self.models_size == 1:
return probs, attn
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(
self.models_size
)
if avg_attn is not None:
avg_attn.div_(self.models_size)
return avg_probs, avg_attn
@torch.jit.export
def reorder_encoder_out(
self, encoder_outs: Optional[List[Dict[str, List[Tensor]]]], new_order
):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
new_outs: List[Dict[str, List[Tensor]]] = []
if not self.has_encoder():
return new_outs
for i, model in enumerate(self.models):
assert encoder_outs is not None
new_outs.append(
model.encoder.reorder_encoder_out(encoder_outs[i], new_order)
)
return new_outs
@torch.jit.export
def reorder_incremental_state(
self,
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
new_order,
):
if not self.has_incremental_states():
return
for i, model in enumerate(self.models):
model.decoder.reorder_incremental_state_scripting(
incremental_states[i], new_order
)
class SequenceGeneratorWithAlignment(SequenceGenerator):
def __init__(
self, models, tgt_dict, left_pad_target=False, print_alignment="hard", **kwargs
):
"""Generates translations of a given source sentence.
Produces alignments following "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
left_pad_target (bool, optional): Whether or not the
hypothesis should be left padded or not when they are
teacher forced for generating alignments.
"""
super().__init__(EnsembleModelWithAlignment(models), tgt_dict, **kwargs)
self.left_pad_target = left_pad_target
if print_alignment == "hard":
self.extract_alignment = utils.extract_hard_alignment
elif print_alignment == "soft":
self.extract_alignment = utils.extract_soft_alignment
@torch.no_grad()
def generate(self, models, sample, **kwargs):
finalized = super()._generate(sample, **kwargs)
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.shape[0]
beam_size = self.beam_size
(
src_tokens,
src_lengths,
prev_output_tokens,
tgt_tokens,
) = self._prepare_batch_for_alignment(sample, finalized)
if any(getattr(m, "full_context_alignment", False) for m in self.model.models):
attn = self.model.forward_align(src_tokens, src_lengths, prev_output_tokens)
else:
attn = [
finalized[i // beam_size][i % beam_size]["attention"].transpose(1, 0)
for i in range(bsz * beam_size)
]
if src_tokens.device != "cpu":
src_tokens = src_tokens.to("cpu")
tgt_tokens = tgt_tokens.to("cpu")
attn = [i.to("cpu") for i in attn]
# Process the attn matrix to extract hard alignments.
for i in range(bsz * beam_size):
alignment = self.extract_alignment(
attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos
)
finalized[i // beam_size][i % beam_size]["alignment"] = alignment
return finalized
def _prepare_batch_for_alignment(self, sample, hypothesis):
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.shape[0]
src_tokens = (
src_tokens[:, None, :]
.expand(-1, self.beam_size, -1)
.contiguous()
.view(bsz * self.beam_size, -1)
)
src_lengths = sample["net_input"]["src_lengths"]
src_lengths = (
src_lengths[:, None]
.expand(-1, self.beam_size)
.contiguous()
.view(bsz * self.beam_size)
)
prev_output_tokens = data_utils.collate_tokens(
[beam["tokens"] for example in hypothesis for beam in example],
self.pad,
self.eos,
self.left_pad_target,
move_eos_to_beginning=True,
)
tgt_tokens = data_utils.collate_tokens(
[beam["tokens"] for example in hypothesis for beam in example],
self.pad,
self.eos,
self.left_pad_target,
move_eos_to_beginning=False,
)
return src_tokens, src_lengths, prev_output_tokens, tgt_tokens
class EnsembleModelWithAlignment(EnsembleModel):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__(models)
def forward_align(self, src_tokens, src_lengths, prev_output_tokens):
avg_attn = None
for model in self.models:
decoder_out = model(src_tokens, src_lengths, prev_output_tokens)
attn = decoder_out[1]["attn"][0]
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(self.models) > 1:
avg_attn.div_(len(self.models))
return avg_attn
| av_hubert-main | avhubert/sequence_generator.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import re
from dataclasses import dataclass, field
from typing import List, Optional
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
@dataclass
class AVHubertCriterionConfig(FairseqDataclass):
pred_masked_weight: float = field(
default=1.0,
metadata={"help": "weight for predictive loss for masked frames"},
)
pred_nomask_weight: float = field(
default=0.0,
metadata={"help": "weight for predictive loss for unmasked frames"},
)
loss_weights: Optional[List[float]] = field(
default=None,
metadata={"help": "weights for additional loss terms (not first one)"},
)
log_keys: List[str] = field(
default_factory=lambda: [],
metadata={"help": "output keys to log"},
)
@register_criterion("av_hubert", dataclass=AVHubertCriterionConfig)
class AVHubertCriterion(FairseqCriterion):
def __init__(self, task, pred_masked_weight, pred_nomask_weight, loss_weights=None, log_keys=None):
super().__init__(task)
self.pred_masked_weight = pred_masked_weight
self.pred_nomask_weight = pred_nomask_weight
self.loss_weights = loss_weights
self.log_keys = [] if log_keys is None else log_keys
def forward(self, model, sample, reduce=True, log_pred=False):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(target_list=sample["target_list"], **sample["net_input"])
loss = 0.
sample_size = 0
logging_output = {}
reduction = "sum" if reduce else "none"
loss_m_list = []
logp_m_list, targ_m_list = net_output['logit_m_list'], net_output['target_m_list']
for i, (logp_m, targ_m) in enumerate(zip(logp_m_list, targ_m_list)):
loss_m = F.cross_entropy(logp_m, targ_m, reduction=reduction)
loss_m_list.append(loss_m)
logging_output[f"loss_m_{i}"] = loss_m.detach().item()
if self.pred_masked_weight > 0:
loss += self.pred_masked_weight * sum(loss_m_list)
sample_size += targ_m_list[0].numel()
loss_u_list = []
logp_u_list, targ_u_list = net_output['logit_u_list'], net_output['target_u_list']
for i, (logp_u, targ_u) in enumerate(zip(logp_u_list, targ_u_list)):
loss_u = F.cross_entropy(logp_u, targ_u, reduction=reduction)
loss_u_list.append(loss_u)
logging_output[f"loss_u_{i}"] = loss_u.detach().item()
if self.pred_nomask_weight > 0:
loss += self.pred_nomask_weight * sum(loss_u_list)
sample_size += targ_u_list[0].numel()
if self.loss_weights is not None:
assert hasattr(model, "get_extra_losses")
extra_losses, names = model.get_extra_losses(net_output)
if torch.is_tensor(extra_losses):
extra_losses = [extra_losses]
names = [names]
if len(self.loss_weights) == 1 and len(extra_losses) != 1:
self.loss_weights = [self.loss_weights[0]] * len(extra_losses)
assert len(extra_losses) == len(self.loss_weights), f"{len(extra_losses)}, {len(self.loss_weights)}"
for p, n, coef in zip(extra_losses, names, self.loss_weights):
if coef != 0 and p is not None:
p = coef * p.float() * sample_size
loss += p
logging_output[f"loss_{n}"] = p.item()
logging_output = {
"loss": loss.item() if reduce else loss,
"ntokens": sample_size,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
**logging_output,
}
for lk in self.log_keys:
if lk in net_output:
logging_output[lk] = float((net_output[lk]))
with torch.no_grad():
for i, logp_m in enumerate(logp_m_list):
# corr_m, count_m = compute_correct(logp_m)
if logp_m.numel() == 0:
corr_m, count_m = 0, 0
else:
corr_m, count_m = (logp_m.argmax(dim=-1)==targ_m_list[i]).sum().item(), len(targ_m_list[i])
logging_output[f"correct_m_{i}"] = corr_m
logging_output[f"count_m_{i}"] = count_m
for i, logp_u in enumerate(logp_u_list):
if logp_u.numel() == 0:
corr_u, count_u = 0, 0
else:
corr_u, count_u = (logp_u.argmax(dim=-1)==targ_u_list[i]).sum().item(), len(targ_u_list[i])
logging_output[f"correct_u_{i}"] = corr_u
logging_output[f"count_u_{i}"] = count_u
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training (copied from normal cross entropy)."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar("loss", loss_sum / sample_size / math.log(2), sample_size, round=3)
if sample_size != ntokens:
metrics.log_scalar("nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3)
metrics.log_derived("ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg))
else:
metrics.log_derived("ppl", lambda meters: utils.get_perplexity(meters["loss"].avg))
counts = {}
for lk in logging_outputs[0].keys():
if lk.startswith("count_"):
val = sum(log[lk] for log in logging_outputs)
metrics.log_scalar(lk, val)
counts[lk] = val
for lk in logging_outputs[0].keys():
if lk.startswith("loss_"):
val = sum(log[lk] for log in logging_outputs)
metrics.log_scalar(lk, val / sample_size / math.log(2), round=3)
elif lk.startswith("correct_"):
val = sum(log[lk] for log in logging_outputs)
metrics.log_scalar(lk, val / counts[re.sub("correct", "count", lk)])
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
raise NotImplementedError()
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return False
| av_hubert-main | avhubert/hubert_criterion.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
import torch.nn as nn
import pdb
logger = logging.getLogger(__name__)
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def downsample_basic_block( inplanes, outplanes, stride ):
return nn.Sequential(
nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(outplanes),
)
def downsample_basic_block_v2( inplanes, outplanes, stride ):
return nn.Sequential(
nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False),
nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(outplanes),
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, relu_type = 'relu' ):
super(BasicBlock, self).__init__()
assert relu_type in ['relu','prelu']
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
if relu_type == 'relu':
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
elif relu_type == 'prelu':
self.relu1 = nn.PReLU(num_parameters=planes)
self.relu2 = nn.PReLU(num_parameters=planes)
else:
raise Exception('relu type not implemented')
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu2(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, relu_type = 'relu', gamma_zero = False, avg_pool_downsample = False):
self.inplanes = 64
self.relu_type = relu_type
self.gamma_zero = gamma_zero
self.downsample_block = downsample_basic_block_v2 if avg_pool_downsample else downsample_basic_block
super(ResNet, self).__init__()
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
if self.gamma_zero:
for m in self.modules():
if isinstance(m, BasicBlock ):
m.bn2.weight.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = self.downsample_block( inplanes = self.inplanes,
outplanes = planes * block.expansion,
stride = stride )
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, relu_type = self.relu_type))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, relu_type = self.relu_type))
return nn.Sequential(*layers)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
class ResEncoder(nn.Module):
def __init__(self, relu_type, weights):
super(ResEncoder, self).__init__()
self.frontend_nout = 64
self.backend_out = 512
frontend_relu = nn.PReLU(num_parameters=self.frontend_nout) if relu_type == 'prelu' else nn.ReLU()
self.frontend3D = nn.Sequential(
nn.Conv3d(1, self.frontend_nout, kernel_size=(5, 7, 7), stride=(1, 2, 2), padding=(2, 3, 3), bias=False),
nn.BatchNorm3d(self.frontend_nout),
frontend_relu,
nn.MaxPool3d( kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)))
self.trunk = ResNet(BasicBlock, [2, 2, 2, 2], relu_type=relu_type)
if weights is not None:
logger.info(f"Load {weights} for resnet")
std = torch.load(weights, map_location=torch.device('cpu'))['model_state_dict']
frontend_std, trunk_std = OrderedDict(), OrderedDict()
for key, val in std.items():
new_key = '.'.join(key.split('.')[1:])
if 'frontend3D' in key:
frontend_std[new_key] = val
if 'trunk' in key:
trunk_std[new_key] = val
self.frontend3D.load_state_dict(frontend_std)
self.trunk.load_state_dict(trunk_std)
def forward(self, x):
B, C, T, H, W = x.size()
x = self.frontend3D(x)
Tnew = x.shape[2]
x = self.threeD_to_2D_tensor(x)
x = self.trunk(x)
x = x.view(B, Tnew, x.size(1))
x = x.transpose(1, 2).contiguous()
return x
def threeD_to_2D_tensor(self, x):
n_batch, n_channels, s_time, sx, sy = x.shape
x = x.transpose(1, 2).contiguous()
return x.reshape(n_batch*s_time, n_channels, sx, sy)
| av_hubert-main | avhubert/resnet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys,logging
import contextlib
import tempfile
from argparse import Namespace
from typing import Any, Optional
import torch
import torch.nn as nn
from dataclasses import dataclass, field
from fairseq import checkpoint_utils, tasks, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.models import BaseFairseqModel, FairseqEncoder, FairseqEncoderDecoderModel, register_model
from fairseq.models.hubert.hubert import MASKING_DISTRIBUTION_CHOICES
from fairseq.tasks import FairseqTask
from omegaconf import II, MISSING
DBG=True if len(sys.argv) == 1 else False
if DBG:
from hubert import AVHubertModel
from decoder import TransformerDecoder
else:
from .hubert import AVHubertModel
from .decoder import TransformerDecoder
logger = logging.getLogger(__name__)
@dataclass
class AVHubertAsrConfig(FairseqDataclass):
w2v_path: str = field(
default=MISSING, metadata={"help": "path to hubert model"}
)
no_pretrained_weights: bool = field(
default=False,
metadata={"help": "if true, does not load pretrained weights"},
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
final_dropout: float = field(
default=0.0,
metadata={
"help": "dropout after transformer and before final projection"
},
)
dropout: float = field(
default=0.0,
metadata={"help": "dropout probability inside hubert model"},
)
attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights "
"inside hubert model"
},
)
activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN "
"inside hubert model"
},
)
# masking
apply_mask: bool = field(
default=False, metadata={"help": "apply masking during fine-tuning"}
)
mask_length: int = field(
default=10, metadata={"help": "repeat the mask indices multiple times"}
)
mask_prob: float = field(
default=0.5,
metadata={
"help": "probability of replacing a token with mask "
"(normalized by length)"
},
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose masks"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
# channel masking
mask_channel_length: int = field(
default=10,
metadata={"help": "length of the mask for features (channels)"},
)
mask_channel_prob: float = field(
default=0.0,
metadata={"help": "probability of replacing a feature with 0"},
)
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_channel_overlap: bool = field(
default=False,
metadata={"help": "whether to allow channel masks to overlap"},
)
freeze_finetune_updates: int = field(
default=0,
metadata={"help": "dont finetune hubert for this many updates"},
)
feature_grad_mult: float = field(
default=0.0,
metadata={"help": "reset feature grad mult in hubert to this"},
)
layerdrop: float = field(
default=0.0,
metadata={"help": "probability of dropping a layer in hubert"},
)
normalize: bool = II("task.normalize")
data: str = II("task.data")
# this holds the loaded hubert args
w2v_args: Any = None
@dataclass
class AVHubertCtcConfig(AVHubertAsrConfig):
pass
@register_model("av_hubert_ctc", dataclass=AVHubertCtcConfig)
class AVHubertCtc(BaseFairseqModel):
def __init__(self, cfg: AVHubertCtcConfig, w2v_encoder: BaseFairseqModel):
super().__init__()
self.cfg = cfg
self.w2v_encoder = w2v_encoder
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@classmethod
def build_model(cls, cfg: AVHubertCtcConfig, task: FairseqTask):
"""Build a new model instance."""
w2v_encoder = HubertEncoder(cfg, task.target_dictionary)
return cls(cfg, w2v_encoder)
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output["encoder_out"]
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
def get_logits(self, net_output):
logits = net_output["encoder_out"]
padding = net_output["encoder_padding_mask"]
if padding is not None and padding.any():
padding = padding.T
logits[padding][..., 0] = 0
logits[padding][..., 1:] = float("-inf")
return logits
def forward(self, **kwargs):
x = self.w2v_encoder(**kwargs)
return x
@dataclass
class AVHubertSeq2SeqConfig(AVHubertAsrConfig):
decoder_embed_dim: int = field(
default=768, metadata={"help": "decoder embedding dimension"}
)
decoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "decoder embedding dimension for FFN"}
)
decoder_layers: int = field(
default=6, metadata={"help": "num of decoder layers"}
)
decoder_layerdrop: float = field(
default=0.0, metadata={"help": "decoder layerdrop chance"}
)
decoder_attention_heads: int = field(
default=4, metadata={"help": "num decoder attention heads"}
)
decoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the decoder"},
)
decoder_normalize_before: bool = field(
default=False,
metadata={"help": "apply layernorm before each decoder block"},
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings "
"(outside self attention)"
},
)
decoder_dropout: float = field(
default=0.0, metadata={"help": "dropout probability in the decoder"}
)
decoder_attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights "
"inside the decoder"
},
)
decoder_activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN "
"inside the decoder"
},
)
max_target_positions: int = field(
default=2048, metadata={"help": "max target positions"}
)
share_decoder_input_output_embed: bool = field(
default=False,
metadata={"help": "share decoder input and output embeddings"},
)
no_scale_embedding: bool = field(default=True, metadata={'help': 'scale embedding'})
class HubertEncoder(FairseqEncoder):
def __init__(self, cfg: AVHubertAsrConfig, tgt_dict=None):
self.apply_mask = cfg.apply_mask
arg_overrides = {
"dropout": cfg.dropout,
"activation_dropout": cfg.activation_dropout,
"dropout_input": cfg.dropout_input,
"attention_dropout": cfg.attention_dropout,
"mask_length": cfg.mask_length,
"mask_prob": cfg.mask_prob,
"mask_selection": cfg.mask_selection,
"mask_other": cfg.mask_other,
"no_mask_overlap": cfg.no_mask_overlap,
"mask_channel_length": cfg.mask_channel_length,
"mask_channel_prob": cfg.mask_channel_prob,
"mask_channel_selection": cfg.mask_channel_selection,
"mask_channel_other": cfg.mask_channel_other,
"no_mask_channel_overlap": cfg.no_mask_channel_overlap,
"encoder_layerdrop": cfg.layerdrop,
"feature_grad_mult": cfg.feature_grad_mult,
}
if cfg.w2v_args is None:
state = checkpoint_utils.load_checkpoint_to_cpu(
cfg.w2v_path, arg_overrides
)
w2v_args = state.get("cfg", None)
if w2v_args is None:
w2v_args = convert_namespace_to_omegaconf(state["args"])
cfg.w2v_args = w2v_args
else:
state = None
w2v_args = cfg.w2v_args
if isinstance(w2v_args, Namespace):
cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(
w2v_args
)
assert cfg.normalize == w2v_args.task.normalize, (
"Fine-tuning works best when data normalization is the same. "
"Please check that --normalize is set or unset for "
"both pre-training and here"
)
w2v_args.task.data = cfg.data
task = tasks.setup_task(w2v_args.task)
model = task.build_model(w2v_args.model)
if state is not None and not cfg.no_pretrained_weights:
# set strict=False because we omit some modules
model.load_state_dict(state["model"], strict=False)
model.remove_pretraining_modules()
super().__init__(task.source_dictionary)
d = model.encoder.embedding_dim
self.w2v_model = model
self.final_dropout = nn.Dropout(cfg.final_dropout)
self.freeze_finetune_updates = cfg.freeze_finetune_updates
self.num_updates = 0
if tgt_dict is not None:
self.proj = Linear(d, len(tgt_dict))
elif getattr(cfg, "decoder_embed_dim", d) != d:
self.proj = Linear(d, cfg.decoder_embed_dim)
else:
self.proj = None
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
super().set_num_updates(num_updates)
self.num_updates = num_updates
def forward(self, source, padding_mask, tbc=True, **kwargs):
w2v_args = {
"source": source,
"padding_mask": padding_mask,
"mask": self.apply_mask and self.training,
}
ft = self.freeze_finetune_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
x, padding_mask = self.w2v_model.extract_finetune(**w2v_args)
if tbc:
# B x T x C -> T x B x C
x = x.transpose(0, 1)
x = self.final_dropout(x)
if self.proj:
x = self.proj(x)
return {
"encoder_out": x, # T x B x C
"encoder_padding_mask": padding_mask, # B x T
"padding_mask": padding_mask,
}
def reorder_encoder_out(self, encoder_out, new_order):
if encoder_out["encoder_out"] is not None:
encoder_out["encoder_out"] = encoder_out[
"encoder_out"
].index_select(1, new_order)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(0, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return None
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
class HubertEncoderWrapper(FairseqEncoder):
def __init__(self, w2v_model):
super().__init__(None)
self.w2v_model = w2v_model
def forward(self, source, padding_mask, **kwargs):
w2v_args = {
"source": source,
"padding_mask": padding_mask,
}
x, padding_mask = self.w2v_model.extract_finetune(**w2v_args)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
return {
"encoder_out": x, # T x B x C
"encoder_padding_mask": padding_mask, # B x T
"padding_mask": padding_mask
}
def reorder_encoder_out(self, encoder_out, new_order):
if encoder_out["encoder_out"] is not None:
encoder_out["encoder_out"] = encoder_out[
"encoder_out"
].index_select(1, new_order)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(0, new_order)
if encoder_out["padding_mask"] is not None:
encoder_out["padding_mask"] = encoder_out[
"padding_mask"
].index_select(0, new_order)
return encoder_out
@register_model("av_hubert_seq2seq", dataclass=AVHubertSeq2SeqConfig)
class AVHubertSeq2Seq(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder, tgt_dict, cfg):
super().__init__(encoder, decoder)
self.cfg = cfg
self.freeze_finetune_updates = cfg.freeze_finetune_updates
@classmethod
def build_model(cls, cfg, task):
"""Build a new model instance."""
arg_overrides = {
"dropout": cfg.dropout,
"activation_dropout": cfg.activation_dropout,
"dropout_input": cfg.dropout_input,
"attention_dropout": cfg.attention_dropout,
"mask_length": cfg.mask_length,
"mask_prob": cfg.mask_prob,
"mask_selection": cfg.mask_selection,
"mask_other": cfg.mask_other,
"no_mask_overlap": cfg.no_mask_overlap,
"mask_channel_length": cfg.mask_channel_length,
"mask_channel_prob": cfg.mask_channel_prob,
"mask_channel_selection": cfg.mask_channel_selection,
"mask_channel_other": cfg.mask_channel_other,
"no_mask_channel_overlap": cfg.no_mask_channel_overlap,
"encoder_layerdrop": cfg.layerdrop,
"feature_grad_mult": cfg.feature_grad_mult,
}
if cfg.w2v_args is None:
state = checkpoint_utils.load_checkpoint_to_cpu(
cfg.w2v_path, arg_overrides
)
w2v_args = state.get("cfg", None)
if w2v_args is None:
w2v_args = convert_namespace_to_omegaconf(state["args"])
cfg.w2v_args = w2v_args
else:
state = None
w2v_args = cfg.w2v_args
if isinstance(w2v_args, Namespace):
cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(
w2v_args
)
assert cfg.normalize == w2v_args.task.normalize, (
"Fine-tuning works best when data normalization is the same. "
"Please check that --normalize is set or unset for "
"both pre-training and here"
)
w2v_args.task.data = cfg.data
task_pretrain = tasks.setup_task(w2v_args.task)
if state is not None:
task_pretrain.load_state_dict(state['task_state'])
encoder_ = task_pretrain.build_model(w2v_args.model)
encoder = HubertEncoderWrapper(encoder_)
if state is not None and not cfg.no_pretrained_weights:
# set strict=False because we omit some modules
del state['model']['mask_emb']
encoder.w2v_model.load_state_dict(state["model"], strict=False)
encoder.w2v_model.remove_pretraining_modules()
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx=padding_idx)
return emb
decoder_embed_tokens = build_embedding(tgt_dict, cfg.decoder_embed_dim)
decoder = TransformerDecoder(cfg, tgt_dict, decoder_embed_tokens)
return AVHubertSeq2Seq(encoder, decoder, tgt_dict, cfg)
def forward(self, **kwargs):
ft = self.freeze_finetune_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
output = self.encoder(**kwargs)
decoder_out = self.decoder(prev_output_tokens=kwargs['prev_output_tokens'], encoder_out=output)
return decoder_out
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
super().set_num_updates(num_updates)
self.num_updates = num_updates
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
| av_hubert-main | avhubert/hubert_asr.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import cv2
import torch
import random
import numpy as np
from typing import Dict, List, Optional, Tuple
def load_video(path):
for i in range(3):
try:
cap = cv2.VideoCapture(path)
frames = []
while True:
ret, frame = cap.read()
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frames.append(frame)
else:
break
frames = np.stack(frames)
return frames
except Exception:
print(f"failed loading {path} ({i} / 3)")
if i == 2:
raise ValueError(f"Unable to load {path}")
class Compose(object):
"""Compose several preprocess together.
Args:
preprocess (list of ``Preprocess`` objects): list of preprocess to compose.
"""
def __init__(self, preprocess):
self.preprocess = preprocess
def __call__(self, sample):
for t in self.preprocess:
sample = t(sample)
return sample
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.preprocess:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class Normalize(object):
"""Normalize a ndarray image with mean and standard deviation.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, frames):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized Tensor image.
"""
frames = (frames - self.mean) / self.std
return frames
def __repr__(self):
return self.__class__.__name__+'(mean={0}, std={1})'.format(self.mean, self.std)
class CenterCrop(object):
"""Crop the given image at the center
"""
def __init__(self, size):
self.size = size
def __call__(self, frames):
"""
Args:
img (numpy.ndarray): Images to be cropped.
Returns:
numpy.ndarray: Cropped image.
"""
t, h, w = frames.shape
th, tw = self.size
delta_w = int(round((w - tw))/2.)
delta_h = int(round((h - th))/2.)
frames = frames[:, delta_h:delta_h+th, delta_w:delta_w+tw]
return frames
class RandomCrop(object):
"""Crop the given image at the center
"""
def __init__(self, size):
self.size = size
def __call__(self, frames):
"""
Args:
img (numpy.ndarray): Images to be cropped.
Returns:
numpy.ndarray: Cropped image.
"""
t, h, w = frames.shape
th, tw = self.size
delta_w = random.randint(0, w-tw)
delta_h = random.randint(0, h-th)
frames = frames[:, delta_h:delta_h+th, delta_w:delta_w+tw]
return frames
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class HorizontalFlip(object):
"""Flip image horizontally.
"""
def __init__(self, flip_ratio):
self.flip_ratio = flip_ratio
def __call__(self, frames):
"""
Args:
img (numpy.ndarray): Images to be flipped with a probability flip_ratio
Returns:
numpy.ndarray: Cropped image.
"""
t, h, w = frames.shape
if random.random() < self.flip_ratio:
for index in range(t):
frames[index] = cv2.flip(frames[index], 1)
return frames
def compute_mask_indices(
shape: Tuple[int, int],
padding_mask: Optional[torch.Tensor],
mask_prob: float,
mask_length: int,
mask_type: str = "static",
mask_other: float = 0.0,
min_masks: int = 0,
no_overlap: bool = False,
min_space: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_type: how to compute mask lengths
static = fixed size
uniform = sample from uniform distribution [mask_other, mask_length*2]
normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element
poisson = sample from possion distribution with lambda = mask length
min_masks: minimum number of masked spans
no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping
min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
"""
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(
# add a random number for probabilistic rounding
mask_prob * all_sz / float(mask_length)
+ np.random.rand()
)
all_num_mask = max(min_masks, all_num_mask)
mask_idcs = []
for i in range(bsz):
if padding_mask is not None:
sz = all_sz - padding_mask[i].long().sum().item()
num_mask = int(
# add a random number for probabilistic rounding
mask_prob * sz / float(mask_length)
+ np.random.rand()
)
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
if mask_type == "static":
lengths = np.full(num_mask, mask_length)
elif mask_type == "uniform":
lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)
elif mask_type == "normal":
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
lengths = [max(1, int(round(x))) for x in lengths]
elif mask_type == "poisson":
lengths = np.random.poisson(mask_length, size=num_mask)
lengths = [int(round(x)) for x in lengths]
else:
raise Exception("unknown mask selection " + mask_type)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
if no_overlap:
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, e - length)
mask_idc.extend(span_start + i for i in range(length))
new_parts = []
if span_start - s - min_space >= keep_length:
new_parts.append((s, span_start - min_space + 1))
if e - span_start - keep_length - min_space > keep_length:
new_parts.append((span_start + length + min_space, e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter(
(e - s if e - s >= length + min_space else 0 for s, e in parts),
np.int,
)
l_sum = np.sum(lens)
if l_sum == 0:
break
probs = lens / np.sum(lens)
c = np.random.choice(len(parts), p=probs)
s, e = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
else:
min_len = min(lengths)
if sz - min_len <= num_mask:
min_len = sz - num_mask - 1
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
]
)
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
min_len = min([len(m) for m in mask_idcs])
batch_indexes, starts, ends = [], [], []
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
mask[i, mask_idc] = True
vals, run_starts, run_lengths = find_runs(mask[i])
start_indices, lengths = run_starts[vals == True], run_lengths[vals == True]
starts.append(start_indices)
ends.append(start_indices+lengths)
batch_indexes.append(np.zeros([len(start_indices)])+i)
return mask, np.concatenate(starts).astype(np.int64), np.concatenate(ends).astype(np.int64), np.concatenate(batch_indexes).astype(np.int64)
def find_runs(x):
"""Find runs of consecutive items in an array."""
# ensure array
x = np.asanyarray(x)
if x.ndim != 1:
raise ValueError('only 1D array supported')
n = x.shape[0]
# handle empty array
if n == 0:
return np.array([]), np.array([]), np.array([])
else:
# find run starts
loc_run_start = np.empty(n, dtype=bool)
loc_run_start[0] = True
np.not_equal(x[:-1], x[1:], out=loc_run_start[1:])
run_starts = np.nonzero(loc_run_start)[0]
# find run values
run_values = x[loc_run_start]
# find run lengths
run_lengths = np.diff(np.append(run_starts, n))
return run_values, run_starts, run_lengths
| av_hubert-main | avhubert/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import os
import sys
import time
from typing import Any, List, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data import data_utils
from fairseq.data.fairseq_dataset import FairseqDataset
from python_speech_features import logfbank
from scipy.io import wavfile
DBG=True if len(sys.argv) == 1 else False
if DBG:
import utils as custom_utils
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "DEBUG").upper(),
stream=sys.stdout,
)
else:
from . import utils as custom_utils
logger = logging.getLogger(__name__)
def load_audio_visual(manifest_path, max_keep, min_keep, frame_rate, label_paths, label_rates, tol=0.1):
def is_audio_label_aligned(audio_dur, label_durs):
return all([abs(audio_dur - label_dur)<tol for label_dur in label_durs])
n_long, n_short, n_unaligned = 0, 0, 0
names, inds, sizes = [], [], []
dur_from_label_list = []
is_seq_label = any([x==-1 for x in label_rates])
for label_path, label_rate in zip(label_paths, label_rates):
label_lengths = [len(line.rstrip().split())/label_rate for line in open(label_path).readlines()]
dur_from_label_list.append(label_lengths)
dur_from_label_list = list(zip(*dur_from_label_list))
with open(manifest_path) as f:
root = f.readline().strip()
for ind, line in enumerate(f):
items = line.strip().split("\t")
sz = int(items[-2]) #
if min_keep is not None and sz < min_keep:
n_short += 1
elif max_keep is not None and sz > max_keep:
n_long += 1
elif (not is_seq_label) and (not is_audio_label_aligned(sz/frame_rate, dur_from_label_list[ind])):
n_unaligned += 1
else:
video_path = items[1]
audio_path = items[2]
audio_id = items[0]
names.append((video_path, audio_path+':'+audio_id))
inds.append(ind)
sizes.append(sz)
tot = ind + 1
logger.info(
(
f"max_keep={max_keep}, min_keep={min_keep}, "
f"loaded {len(names)}, skipped {n_short} short and {n_long} long and {n_unaligned} unaligned, "
f"longest-loaded={max(sizes)}, shortest-loaded={min(sizes)}"
)
)
return root, names, inds, tot, sizes
def load_label(label_path, inds, tot):
with open(label_path) as f:
labels = [line.rstrip() for line in f]
assert (
len(labels) == tot
), f"number of labels does not match ({len(labels)} != {tot})"
labels = [labels[i] for i in inds]
return labels
def load_label_offset(label_path, inds, tot):
with open(label_path) as f:
code_lengths = [len(line.encode("utf-8")) for line in f]
assert (
len(code_lengths) == tot
), f"number of labels does not match ({len(code_lengths)} != {tot})"
offsets = list(itertools.accumulate([0] + code_lengths))
offsets = [(offsets[i], offsets[i + 1]) for i in inds]
return offsets
def verify_label_lengths(
audio_sizes,
audio_rate,
label_path,
label_rate,
inds,
tot,
tol=0.1, # tolerance in seconds
):
if label_rate < 0:
logger.info(f"{label_path} is sequence label. skipped")
return
with open(label_path) as f:
lengths = [len(line.rstrip().split()) for line in f]
assert len(lengths) == tot
lengths = [lengths[i] for i in inds]
num_invalid = 0
for i, ind in enumerate(inds):
dur_from_audio = audio_sizes[i] / audio_rate
dur_from_label = lengths[i] / label_rate
if abs(dur_from_audio - dur_from_label) > tol:
logger.warning(
(
f"audio and label duration differ too much "
f"(|{dur_from_audio} - {dur_from_label}| > {tol}) "
f"in line {ind+1} of {label_path}. Check if `label_rate` "
f"is correctly set (currently {label_rate}). "
f"num. of samples = {audio_sizes[i]}; "
f"label length = {lengths[i]}"
)
)
num_invalid += 1
if num_invalid > 0:
logger.warning(
f"total {num_invalid} (audio, label) pairs with mismatched lengths"
)
class AVHubertDataset(FairseqDataset):
def __init__(
self,
manifest_path: str,
sample_rate: float,
label_paths: List[str],
label_rates: Union[List[float], float], # -1 for sequence labels
pad_list: List[str],
eos_list: List[str],
label_processors: Optional[List[Any]] = None,
max_keep_sample_size: Optional[int] = None,
min_keep_sample_size: Optional[int] = None,
max_sample_size: Optional[int] = None,
shuffle: bool = True,
pad_audio: bool = False,
normalize: bool = False,
store_labels: bool = True,
random_crop: bool = False,
single_target: bool = False,
stack_order_audio: int=1,
skip_verify: bool=False,
image_mean: float=0,
image_std: float=1,
image_crop_size: int=88,
image_aug: bool=False,
modalities: Optional[List[str]]=None,
is_s2s=False,
noise_fn=None,
noise_prob=0,
noise_snr=0,
noise_num=1
):
self.label_rates = (
[label_rates for _ in range(len(label_paths))]
if isinstance(label_rates, int)
else label_rates
)
self.modalities = set(modalities)
self.audio_root, self.names, inds, tot, self.sizes = load_audio_visual(manifest_path, max_keep_sample_size, min_keep_sample_size, frame_rate=sample_rate, label_paths=label_paths, label_rates=self.label_rates)
self.sample_rate = sample_rate
self.stack_order_audio = stack_order_audio
self.shuffle = shuffle
self.random_crop = random_crop
self.num_labels = len(label_paths)
self.pad_list = pad_list
self.eos_list = eos_list
self.label_processors = label_processors
self.single_target = single_target
self.store_labels = store_labels
self.is_s2s = is_s2s
self.noise_wav, self.noise_prob, self.noise_snr, self.noise_num = [ln.strip() for ln in open(noise_fn).readlines()] if noise_fn is not None else [], noise_prob, noise_snr, noise_num
assert self.single_target == (self.label_rates[0] == -1), f"single target should be equivalent to sequence label (label_rate==-1)"
if store_labels:
self.label_list = [load_label(p, inds, tot) for p in label_paths]
else:
self.label_paths = label_paths
self.label_offsets_list = [
load_label_offset(p, inds, tot) for p in label_paths
]
assert (
label_processors is None
or len(label_processors) == self.num_labels
)
if not skip_verify:
for label_path, label_rate in zip(label_paths, self.label_rates):
verify_label_lengths(self.sizes, self.sample_rate, label_path, label_rate, inds, tot)
else:
logger.info(f"Skip label alignment verifying")
self.max_sample_size = (
max_sample_size if max_sample_size is not None else sys.maxsize
)
self.pad_audio = pad_audio
self.normalize = normalize
if image_aug:
self.transform = custom_utils.Compose([
custom_utils.Normalize( 0.0,255.0 ),
custom_utils.RandomCrop((image_crop_size, image_crop_size)),
custom_utils.HorizontalFlip(0.5),
custom_utils.Normalize(image_mean, image_std) ])
else:
self.transform = custom_utils.Compose([
custom_utils.Normalize( 0.0,255.0 ),
custom_utils.CenterCrop((image_crop_size, image_crop_size)),
custom_utils.Normalize(image_mean, image_std) ])
logger.info(f"image transform: {self.transform}")
logger.info(
f"pad_audio={pad_audio}, random_crop={random_crop}, "
f"normalize={normalize}, max_sample_size={self.max_sample_size}, "
f"seqs2seq data={self.is_s2s},")
logger.info(
f"Noise wav: {noise_fn}->{len(self.noise_wav)} wav, Prob: {self.noise_prob}, SNR: {self.noise_snr}, Number of mixture: {self.noise_num}"
)
def get_label(self, index, label_idx):
if self.store_labels:
label = self.label_list[label_idx][index]
else:
with open(self.label_paths[label_idx]) as f:
offset_s, offset_e = self.label_offsets_list[label_idx][index]
f.seek(offset_s)
label = f.read(offset_e - offset_s)
if self.label_processors is not None:
label = self.label_processors[label_idx](label)
return label
def get_labels(self, index):
return [self.get_label(index, i) for i in range(self.num_labels)]
def load_feature(self, mix_name):
"""
Load image and audio feature
Returns:
video_feats: numpy.ndarray of shape [T, H, W, 1], audio_feats: numpy.ndarray of shape [T, F]
"""
def stacker(feats, stack_order):
"""
Concatenating consecutive audio frames
Args:
feats - numpy.ndarray of shape [T, F]
stack_order - int (number of neighboring frames to concatenate
Returns:
feats - numpy.ndarray of shape [T', F']
"""
feat_dim = feats.shape[1]
if len(feats) % stack_order != 0:
res = stack_order - len(feats) % stack_order
res = np.zeros([res, feat_dim]).astype(feats.dtype)
feats = np.concatenate([feats, res], axis=0)
feats = feats.reshape((-1, stack_order, feat_dim)).reshape(-1, stack_order*feat_dim)
return feats
video_fn, audio_fn = mix_name
if 'video' in self.modalities:
video_feats = self.load_video(video_fn) # [T, H, W, 1]
else:
video_feats = None
if 'audio' in self.modalities:
audio_fn = audio_fn.split(':')[0]
sample_rate, wav_data = wavfile.read(audio_fn)
assert sample_rate == 16_000 and len(wav_data.shape) == 1
if np.random.rand() < self.noise_prob:
wav_data = self.add_noise(wav_data)
audio_feats = logfbank(wav_data, samplerate=sample_rate).astype(np.float32) # [T, F]
audio_feats = stacker(audio_feats, self.stack_order_audio) # [T/stack_order_audio, F*stack_order_audio]
else:
audio_feats = None
if audio_feats is not None and video_feats is not None:
diff = len(audio_feats) - len(video_feats)
if diff < 0:
audio_feats = np.concatenate([audio_feats, np.zeros([-diff, audio_feats.shape[-1]], dtype=audio_feats.dtype)])
elif diff > 0:
audio_feats = audio_feats[:-diff]
return video_feats, audio_feats
def load_video(self, audio_name):
feats = custom_utils.load_video(os.path.join(self.audio_root, audio_name))
feats = self.transform(feats)
feats = np.expand_dims(feats, axis=-1)
return feats
def select_noise(self):
rand_indexes = np.random.randint(0, len(self.noise_wav), size=self.noise_num)
noise_wav = []
for x in rand_indexes:
noise_wav.append(wavfile.read(self.noise_wav[x])[1].astype(np.float32))
if self.noise_num == 1:
return noise_wav[0]
else:
min_len = min([len(x) for x in noise_wav])
noise_wav = [x[:min_len] for x in noise_wav]
noise_wav = np.floor(np.stack(noise_wav).mean(axis=0))
return noise_wav
def add_noise(self, clean_wav):
clean_wav = clean_wav.astype(np.float32)
noise_wav = self.select_noise()
if type(self.noise_snr) == int or type(self.noise_snr) == float:
snr = self.noise_snr
elif type(self.noise_snr) == tuple:
snr = np.random.randint(self.noise_snr[0], self.noise_snr[1]+1)
clean_rms = np.sqrt(np.mean(np.square(clean_wav), axis=-1))
if len(clean_wav) > len(noise_wav):
ratio = int(np.ceil(len(clean_wav)/len(noise_wav)))
noise_wav = np.concatenate([noise_wav for _ in range(ratio)])
if len(clean_wav) < len(noise_wav):
start = 0
noise_wav = noise_wav[start: start + len(clean_wav)]
noise_rms = np.sqrt(np.mean(np.square(noise_wav), axis=-1))
adjusted_noise_rms = clean_rms / (10**(snr/20))
adjusted_noise_wav = noise_wav * (adjusted_noise_rms / noise_rms)
mixed = clean_wav + adjusted_noise_wav
#Avoid clipping noise
max_int16 = np.iinfo(np.int16).max
min_int16 = np.iinfo(np.int16).min
if mixed.max(axis=0) > max_int16 or mixed.min(axis=0) < min_int16:
if mixed.max(axis=0) >= abs(mixed.min(axis=0)):
reduction_rate = max_int16 / mixed.max(axis=0)
else :
reduction_rate = min_int16 / mixed.min(axis=0)
mixed = mixed * (reduction_rate)
mixed = mixed.astype(np.int16)
return mixed
def __getitem__(self, index):
video_feats, audio_feats = self.load_feature(self.names[index])
audio_feats, video_feats = torch.from_numpy(audio_feats.astype(np.float32)) if audio_feats is not None else None, torch.from_numpy(video_feats.astype(np.float32)) if video_feats is not None else None
if self.normalize and 'audio' in self.modalities:
with torch.no_grad():
audio_feats = F.layer_norm(audio_feats, audio_feats.shape[1:])
labels = self.get_labels(index)
fid = self.names[index][1].split(':')[1]
return {"id": index, 'fid': fid, "video_source": video_feats, 'audio_source': audio_feats, "label_list": labels}
def __len__(self):
return len(self.sizes)
def crop_to_max_size(self, wav, target_size, start=None):
size = len(wav)
diff = size - target_size
if diff <= 0:
return wav, 0
# longer utterances
if start is None:
start, end = 0, target_size
if self.random_crop:
start = np.random.randint(0, diff + 1)
end = size - diff + start
else:
end = start + target_size
return wav[start:end], start
def collater(self, samples):
samples = [s for s in samples if s["id"] is not None]
if len(samples) == 0:
return {}
audio_source, video_source = [s["audio_source"] for s in samples], [s["video_source"] for s in samples]
if audio_source[0] is None:
audio_source = None
if video_source[0] is None:
video_source = None
if audio_source is not None:
audio_sizes = [len(s) for s in audio_source]
else:
audio_sizes = [len(s) for s in video_source]
if self.pad_audio:
audio_size = min(max(audio_sizes), self.max_sample_size)
else:
audio_size = min(min(audio_sizes), self.max_sample_size)
if audio_source is not None:
collated_audios, padding_mask, audio_starts = self.collater_audio(audio_source, audio_size)
else:
collated_audios, audio_starts = None, None
if video_source is not None:
collated_videos, padding_mask, audio_starts = self.collater_audio(video_source, audio_size, audio_starts)
else:
collated_videos = None
targets_by_label = [
[s["label_list"][i] for s in samples]
for i in range(self.num_labels)
]
targets_list, lengths_list, ntokens_list = self.collater_label(
targets_by_label, audio_size, audio_starts
)
source = {"audio": collated_audios, "video": collated_videos}
net_input = {"source": source, "padding_mask": padding_mask}
batch = {
"id": torch.LongTensor([s["id"] for s in samples]),
"net_input": net_input,
"utt_id": [s['fid'] for s in samples]
}
if self.single_target:
batch["target_lengths"] = lengths_list[0]
batch["ntokens"] = ntokens_list[0]
if self.is_s2s:
batch['target'], net_input['prev_output_tokens'] = targets_list[0][0], targets_list[0][1]
else:
batch["target"] = targets_list[0]
else:
batch["target_lengths_list"] = lengths_list
batch["ntokens_list"] = ntokens_list
batch["target_list"] = targets_list
return batch
def collater_audio(self, audios, audio_size, audio_starts=None):
audio_feat_shape = list(audios[0].shape[1:])
collated_audios = audios[0].new_zeros([len(audios), audio_size]+audio_feat_shape)
padding_mask = (
torch.BoolTensor(len(audios), audio_size).fill_(False) #
)
start_known = audio_starts is not None
audio_starts = [0 for _ in audios] if not start_known else audio_starts
for i, audio in enumerate(audios):
diff = len(audio) - audio_size
if diff == 0:
collated_audios[i] = audio
elif diff < 0:
assert self.pad_audio
collated_audios[i] = torch.cat(
[audio, audio.new_full([-diff]+audio_feat_shape, 0.0)]
)
padding_mask[i, diff:] = True
else:
collated_audios[i], audio_starts[i] = self.crop_to_max_size(
audio, audio_size, audio_starts[i] if start_known else None
)
if len(audios[0].shape) == 2:
collated_audios = collated_audios.transpose(1, 2) # [B, T, F] -> [B, F, T]
else:
collated_audios = collated_audios.permute((0, 4, 1, 2, 3)).contiguous() # [B, T, H, W, C] -> [B, C, T, H, W]
return collated_audios, padding_mask, audio_starts
def collater_frm_label(
self, targets, audio_size, audio_starts, label_rate, pad
):
assert label_rate > 0
s2f = label_rate / self.sample_rate # num label per sample
frm_starts = [int(round(s * s2f)) for s in audio_starts]
frm_size = int(round(audio_size * s2f))
if not self.pad_audio:
rem_size = [len(t) - s for t, s in zip(targets, frm_starts)]
frm_size = min(frm_size, *rem_size)
targets = [t[s: s + frm_size] for t, s in zip(targets, frm_starts)]
logger.debug(f"audio_starts={audio_starts}")
logger.debug(f"frame_starts={frm_starts}")
logger.debug(f"frame_size={frm_size}")
lengths = torch.LongTensor([len(t) for t in targets])
ntokens = lengths.sum().item()
targets = data_utils.collate_tokens(
targets, pad_idx=pad, left_pad=False
)
return targets, lengths, ntokens
def collater_seq_label(self, targets, pad):
lengths = torch.LongTensor([len(t) for t in targets])
ntokens = lengths.sum().item()
targets = data_utils.collate_tokens(
targets, pad_idx=pad, left_pad=False
)
return targets, lengths, ntokens
def collater_seq_label_s2s(self, targets, pad):
lengths = torch.LongTensor([len(t) for t in targets])
ntokens = lengths.sum().item()
pad, eos = self.label_processors[0].dictionary.pad(), self.label_processors[0].dictionary.eos()
targets_ = data_utils.collate_tokens(targets, pad_idx=pad, eos_idx=eos, left_pad=False)
prev_output_tokens = data_utils.collate_tokens(targets, pad_idx=pad, eos_idx=eos, left_pad=False, move_eos_to_beginning=True)
return (targets_, prev_output_tokens), lengths, ntokens
def collater_label(self, targets_by_label, audio_size, audio_starts):
targets_list, lengths_list, ntokens_list = [], [], []
itr = zip(targets_by_label, self.label_rates, self.pad_list)
for targets, label_rate, pad in itr:
if label_rate == -1:
if self.is_s2s:
targets, lengths, ntokens = self.collater_seq_label_s2s(targets, pad)
else:
targets, lengths, ntokens = self.collater_seq_label(targets, pad)
else:
targets, lengths, ntokens = self.collater_frm_label(
targets, audio_size, audio_starts, label_rate, pad
)
targets_list.append(targets)
lengths_list.append(lengths)
ntokens_list.append(ntokens)
return targets_list, lengths_list, ntokens_list
def num_tokens(self, index):
return self.size(index)
def size(self, index):
if self.pad_audio:
return self.sizes[index]
return min(self.sizes[index], self.max_sample_size)
def ordered_indices(self):
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)[::-1]
| av_hubert-main | avhubert/hubert_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os,sys
import logging
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
from dataclasses import dataclass, field
from fairseq import utils
from fairseq.data.data_utils import compute_mask_indices
from fairseq.data.dictionary import Dictionary
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
from fairseq.models.wav2vec.wav2vec2 import (
ConvFeatureExtractionModel,
TransformerEncoder,
)
from fairseq.modules import GradMultiply, LayerNorm
from copy import deepcopy
DBG=True if len(sys.argv) == 1 else False
if DBG:
from hubert_pretraining import (
AVHubertPretrainingConfig,
AVHubertPretrainingTask,
)
from resnet import ResEncoder
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
from utils import compute_mask_indices
from decoder import TransformerDecoder
else:
from .hubert_pretraining import (
AVHubertPretrainingConfig,
AVHubertPretrainingTask,
)
from .resnet import ResEncoder
from .utils import compute_mask_indices
from .decoder import TransformerDecoder
from omegaconf import II
logger = logging.getLogger(__name__)
EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"])
MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(
["static", "uniform", "normal", "poisson"]
)
@dataclass
class AVHubertConfig(FairseqDataclass):
label_rate: int = II("task.label_rate")
input_modality: str = II("task.input_modality")
extractor_mode: EXTRACTOR_MODE_CHOICES = field(
default="default",
metadata={
"help": "mode for feature extractor. default has a single group "
"norm with d groups in the first conv block, whereas layer_norm "
"has layer norms in every block (meant to use with normalize=True)"
},
)
encoder_layers: int = field(
default=12, metadata={"help": "num encoder layers in the transformer"}
)
encoder_embed_dim: int = field(
default=768, metadata={"help": "encoder embedding dimension"}
)
encoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "encoder embedding dimension for FFN"}
)
encoder_attention_heads: int = field(
default=12, metadata={"help": "num encoder attention heads"}
)
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="gelu", metadata={"help": "activation function to use"}
)
# dropouts
dropout: float = field(
default=0.1,
metadata={"help": "dropout probability for the transformer"},
)
attention_dropout: float = field(
default=0.1,
metadata={"help": "dropout probability for attention weights"},
)
activation_dropout: float = field(
default=0.0,
metadata={"help": "dropout probability after activation in FFN"},
)
encoder_layerdrop: float = field(
default=0.0,
metadata={"help": "probability of dropping a tarnsformer layer"},
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
dropout_features: float = field(
default=0.0,
metadata={
"help": "dropout to apply to the features (after feat extr)"
},
)
final_dim: int = field(
default=0,
metadata={
"help": "project final representations and targets to this many "
"dimensions. set to encoder_embed_dim is <= 0"
},
)
untie_final_proj: bool = field(
default=False,
metadata={"help": "use separate projection for each target"},
)
layer_norm_first: bool = field(
default=False,
metadata={"help": "apply layernorm first in the transformer"},
)
conv_feature_layers: str = field(
default="[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2",
metadata={
"help": "string describing convolutional feature extraction "
"layers in form of a python list that contains "
"[(dim, kernel_size, stride), ...]"
},
)
conv_bias: bool = field(
default=False, metadata={"help": "include bias in conv encoder"}
)
logit_temp: float = field(
default=0.1, metadata={"help": "temperature to divide logits by"}
)
target_glu: bool = field(
default=False, metadata={"help": "adds projection + glu to targets"}
)
feature_grad_mult: float = field(
default=1.0,
metadata={"help": "multiply feature extractor var grads by this"},
)
# masking
mask_length_audio: int = field(default=10, metadata={"help": "mask length"})
mask_prob_audio: float = field(
default=0.65,
metadata={"help": "probability of replacing a token with mask"},
)
mask_length_image: int = field(default=10, metadata={"help": "mask length"})
mask_prob_image: float = field(
default=0.65,
metadata={"help": "probability of replacing a token with mask"},
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose mask length"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
mask_min_space: int = field(
default=1,
metadata={
"help": "min space between spans (if no overlap is enabled)"
},
)
# channel masking
mask_channel_length: int = field(
default=10,
metadata={"help": "length of the mask for features (channels)"},
)
mask_channel_prob: float = field(
default=0.0,
metadata={"help": "probability of replacing a feature with 0"},
)
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_channel_overlap: bool = field(
default=False,
metadata={"help": "whether to allow channel masks to overlap"},
)
mask_channel_min_space: int = field(
default=1,
metadata={
"help": "min space between spans (if no overlap is enabled)"
},
)
# positional embeddings
conv_pos: int = field(
default=128,
metadata={
"help": "number of filters for convolutional positional embeddings"
},
)
conv_pos_groups: int = field(
default=16,
metadata={
"help": "number of groups for convolutional positional embedding"
},
)
latent_temp: Tuple[float, float, float] = field(
default=(2, 0.5, 0.999995),
metadata={"help": "legacy (to be removed)"},
)
# loss computation
skip_masked: bool = field(
default=False,
metadata={"help": "skip computing losses over masked frames"},
)
skip_nomask: bool = field(
default=False,
metadata={"help": "skip computing losses over unmasked frames"},
)
resnet_relu_type: str = field(default='prelu', metadata={"help": 'relu type for resnet'})
resnet_weights: Optional[str] = field(default=None, metadata={"help": 'resnet weights'})
sim_type: str = field(default='cosine', metadata={"help": 'similarity type'})
sub_encoder_layers: int = field(default=0, metadata={'help': 'number of transformer layers for single modality'})
audio_feat_dim: int = field(default=-1, metadata={'help': 'audio feature dimension'})
modality_dropout: float = field(default=0, metadata={'help': 'drop one modality'})
audio_dropout: float = field(default=0, metadata={'help': 'drop audio feature'})
modality_fuse: str = field(default='concat', metadata={'help': 'fusing two modalities: add,concat'})
selection_type : str = field(default='same_other_seq', metadata={'help': 'type of selectig images, same_other_seq: replace masked span with span from another sequence, same_seq: repace masked span with span of the same sequence'})
masking_type : str = field(default='input', metadata={'help': 'input or feature masking'})
decoder_embed_dim: int = field(
default=768, metadata={"help": "decoder embedding dimension"}
)
decoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "decoder embedding dimension for FFN"}
)
decoder_layers: int = field(
default=6, metadata={"help": "num of decoder layers"}
)
decoder_layerdrop: float = field(
default=0.0, metadata={"help": "decoder layerdrop chance"}
)
decoder_attention_heads: int = field(
default=4, metadata={"help": "num decoder attention heads"}
)
decoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the decoder"},
)
decoder_normalize_before: bool = field(
default=False,
metadata={"help": "apply layernorm before each decoder block"},
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings "
"(outside self attention)"
},
)
decoder_dropout: float = field(
default=0.1, metadata={"help": "dropout probability in the decoder"}
)
decoder_attention_dropout: float = field(
default=0.1,
metadata={
"help": "dropout probability for attention weights "
"inside the decoder"
},
)
decoder_activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN "
"inside the decoder"
},
)
max_target_positions: int = field(
default=2048, metadata={"help": "max target positions"}
)
share_decoder_input_output_embed: bool = field(
default=False,
metadata={"help": "share decoder input and output embeddings"},
)
no_scale_embedding: bool = field(default=True, metadata={'help': 'scale embedding'})
class SubModel(nn.Module):
def __init__(self, resnet=None, input_dim=None, cfg=None):
super().__init__()
self.resnet = resnet
self.proj = nn.Linear(input_dim, cfg.encoder_embed_dim)
self.encoder = TransformerEncoder(cfg) if cfg.encoder_layers > 0 else None
def forward(self, x):
if self.resnet is not None:
x = self.resnet(x)
x = self.proj(x.transpose(1, 2))
if self.encoder is not None:
x = self.encoder(x)[0].transpose(1, 2)
else:
x = x.transpose(1, 2)
return x
@register_model("av_hubert", dataclass=AVHubertConfig)
class AVHubertModel(BaseFairseqModel):
def __init__(
self,
cfg: AVHubertConfig,
task_cfg: AVHubertPretrainingConfig,
dictionaries: List[Dictionary],
**kwargs
) -> None:
super().__init__()
logger.info(f"HubertModel Config: {cfg}")
feature_ds_rate = 1
self.feat2tar_ratio = cfg.label_rate * feature_ds_rate / task_cfg.sample_rate
sub_cfg = deepcopy(cfg)
sub_cfg.encoder_layers = sub_cfg.sub_encoder_layers
resnet = ResEncoder(relu_type=cfg.resnet_relu_type, weights=cfg.resnet_weights)
self.feature_extractor_audio = SubModel(resnet=None, input_dim=cfg.audio_feat_dim, cfg=sub_cfg)
self.feature_extractor_video = SubModel(resnet=resnet, input_dim=resnet.backend_out, cfg=sub_cfg)
self.modality_dropout, self.audio_dropout = cfg.modality_dropout, cfg.audio_dropout
self.modality_fuse = cfg.modality_fuse
self.encoder_embed_dim = cfg.encoder_embed_dim
if self.modality_fuse == 'concat':
self.embed = cfg.encoder_embed_dim * 2
elif self.modality_fuse == 'add':
self.embed = cfg.encoder_embed_dim
self.post_extract_proj = (
nn.Linear(self.embed, cfg.encoder_embed_dim)
if self.embed != cfg.encoder_embed_dim
else None
)
self.mask_prob_image, self.mask_prob_audio = cfg.mask_prob_image, cfg.mask_prob_audio
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length_image, self.mask_length_audio = cfg.mask_length_image, cfg.mask_length_audio
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.logit_temp = cfg.logit_temp
self.skip_masked = cfg.skip_masked
self.skip_nomask = cfg.skip_nomask
self.sim_type = cfg.sim_type
self.selection_type = cfg.selection_type
self.masking_type = cfg.masking_type
final_dim = (
cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim
)
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.audio_feat_dim).uniform_() if self.masking_type == 'input' else torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
self.encoder = TransformerEncoder(cfg)
self.layer_norm = LayerNorm(self.embed)
self.target_glu = None
if cfg.target_glu:
self.target_glu = nn.Sequential(
nn.Linear(final_dim, final_dim * 2), nn.GLU()
)
self.untie_final_proj = cfg.untie_final_proj
if self.untie_final_proj:
self.final_proj = nn.Linear(
cfg.encoder_embed_dim, final_dim * len(dictionaries)
)
else:
self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim)
# modules below are not needed during fine-tuning
if any([d is None for d in dictionaries]):
logger.info(
"cannot find dictionary. assume will be used for fine-tuning"
)
else:
self.num_classes = [len(d) for d in dictionaries]
self.label_embs_concat = nn.Parameter(
torch.FloatTensor(sum(self.num_classes), final_dim)
)
nn.init.uniform_(self.label_embs_concat)
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@classmethod
def build_model(cls, cfg: AVHubertConfig, task: AVHubertPretrainingTask):
"""Build a new model instance."""
kwargs = {}
model = AVHubertModel(cfg, task.cfg, task.dictionaries, **kwargs)
return model
def apply_input_mask(self, x, padding_mask, target_list):
B, C, T = x.shape[:3]
is_audio = True if len(x.shape) == 3 else False
if is_audio:
mask_prob, mask_length = self.mask_prob_audio, self.mask_length_audio
else:
mask_prob, mask_length = self.mask_prob_image, self.mask_length_image
if mask_prob > 0:
mask_indices, starts, ends, batch_indexes = compute_mask_indices(
(B, T),
padding_mask,
mask_prob,
mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices_np = mask_indices
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x = x.transpose(1, 2).contiguous() # [B, T, C, H, W]
if B == 1:
x[mask_indices] = 0
elif is_audio:
x[mask_indices] = self.mask_emb
elif self.selection_type == 'same_other_seq':
perm = (torch.arange(B) + torch.randint(low=1, high=B, size=(1,))) % B
x_perm = x[perm]
x[mask_indices] = x_perm[mask_indices]
elif self.selection_type == 'same_seq':
batch_indexes_, other_indexes = [], []
for batch_index, start, end in zip(batch_indexes, starts, ends):
length = end-start
other_start = np.setdiff1d(np.arange(T), np.arange(max(0, start-length), end))
if len(other_start) > 0:
other_start = np.random.choice(other_start, size=1)
else:
other_start = 0
other_end = other_start + length
other_indexes.append(np.arange(other_start, other_end).clip(max=T-1))
batch_indexes_.append(np.zeros([length], dtype=np.int64)+batch_index)
batch_indexes, other_indexes = np.concatenate(batch_indexes_), np.concatenate(other_indexes)
x[mask_indices] = x[batch_indexes, other_indexes]
x = x.transpose(1, 2).contiguous()
else:
mask_indices = None
if self.mask_channel_prob > 0:
logger.info(f"No mask channel prob for input masking")
return x, mask_indices
def apply_feature_mask(self, x, padding_mask, target_list):
B, T, C = x.shape
assert self.mask_prob_audio == self.mask_prob_image and self.mask_length_audio == self.mask_length_image, f"masking prob/length for image/audio be same for feature masking"
mask_prob, mask_length = self.mask_prob_audio, self.mask_length_image
if mask_prob > 0:
mask_indices, _, _, _ = compute_mask_indices(
(B, T),
padding_mask,
mask_prob,
mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x[mask_indices] = self.mask_emb
else:
mask_indices = None
if self.mask_channel_prob > 0:
mask_channel_indices, _, _, _ = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
return x, mask_indices
def forward_features(self, source: torch.Tensor, modality: str) -> torch.Tensor:
extractor = eval(f"self.feature_extractor_{modality}")
if self.feature_grad_mult > 0:
features = extractor(source)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = extractor(source)
return features
def forward_targets(
self, features: torch.Tensor, mask_indices: torch.Tensor, target_list: List[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
# Trim features to ensure labels exist and then get aligned labels
feat_tsz = features.size(2)
targ_tsz = min([t.size(1) for t in target_list])
if self.feat2tar_ratio * feat_tsz > targ_tsz:
feat_tsz = int(targ_tsz / self.feat2tar_ratio)
features = features[..., :feat_tsz]
if mask_indices is not None:
mask_indices = mask_indices[..., :feat_tsz]
target_inds = torch.arange(feat_tsz).float() * self.feat2tar_ratio
target_list = [t[:, target_inds.long()] for t in target_list]
return features, mask_indices, target_list
def forward_padding_mask(
self, features: torch.Tensor, padding_mask: torch.Tensor,
) -> torch.Tensor:
extra = padding_mask.size(1) % features.size(1)
if extra > 0:
padding_mask = padding_mask[:, :-extra]
padding_mask = padding_mask.view(
padding_mask.size(0), features.size(1), -1
)
padding_mask = padding_mask.all(-1)
return padding_mask
def compute_logits(self, feats, emb_mat):
# feats: [B, T, F], emb_mat: [V, F]
if self.sim_type == 'dot':
logits = torch.matmul(feats, emb_mat.transpose(0, 1))
elif self.sim_type == 'cosine':
batch_size, timesteps, emb_dim = feats.size()
feats_ = feats.view(-1, emb_dim)
nom = (feats_.unsqueeze(dim=1) * emb_mat.unsqueeze(dim=0)).sum(dim=-1) # [B*T, V]
denom = (feats_**2).sum(dim=-1).sqrt().unsqueeze(dim=1) * (emb_mat**2).sum(dim=-1).sqrt().unsqueeze(dim=0) # [B*T, V]
logits = (nom/denom.clamp(min=1e-6)).view(batch_size, timesteps, -1)
else:
raise NotImplementedError
logits = logits / self.logit_temp
return logits
def forward(
self,
source: torch.Tensor,
target_list: Optional[List[torch.Tensor]] = None,
padding_mask: Optional[torch.Tensor] = None,
mask: bool = True,
features_only: bool = False,
output_layer: Optional[int] = None
) -> Dict[str, torch.Tensor]:
"""output layer is 1-based"""
src_audio, src_video = source['audio'], source['video']
if mask and self.masking_type == 'input':
src_video, mask_indices_video = self.apply_input_mask(src_video, padding_mask, target_list)
src_audio, mask_indices_audio = self.apply_input_mask(src_audio, padding_mask, target_list)
mask_indices = torch.logical_or(mask_indices_audio, mask_indices_video)
else:
src_audio, src_video, mask_indices = src_audio, src_video, None
features_audio = self.forward_features(src_audio, modality='audio') # features: [B, F, T]
features_video = self.forward_features(src_video, modality='video')
modality_drop_prob, audio_drop_prob = np.random.random(), np.random.random()
if self.training:
if modality_drop_prob < self.modality_dropout:
if audio_drop_prob < self.audio_dropout:
features_audio = 0 * features_audio
else:
features_video = 0 * features_video
if self.modality_fuse == 'concat':
features = torch.cat([features_audio, features_video], dim=1)
elif self.modality_fuse == 'add':
features = features_audio + features_video
if target_list is not None:
features, mask_indices, target_list = self.forward_targets(features, mask_indices, target_list)
features_pen = features.float().pow(2).mean()
features = features.transpose(1, 2)
features = self.layer_norm(features)
if padding_mask is not None:
padding_mask = self.forward_padding_mask(features, padding_mask)
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
if self.masking_type == 'feature' and mask:
x, mask_indices = self.apply_feature_mask(features, padding_mask, target_list)
else:
x = features
# feature: (B, T, D), float
# target: (B, T), long
# x: (B, T, D), float
# padding_mask: (B, T), bool
# mask_indices: (B, T), bool
x, _ = self.encoder(
x,
padding_mask=padding_mask,
layer=None if output_layer is None else output_layer - 1
)
if features_only:
return {"x": x, "padding_mask": padding_mask, "features": features}
label_embs_list = self.label_embs_concat.split(self.num_classes, 0)
proj_x = self.final_proj(x)
if self.untie_final_proj:
proj_x_list = proj_x.chunk(len(self.num_classes), dim=-1)
else:
proj_x_list = [proj_x for _ in self.num_classes]
logit_list = [self.compute_logits(proj, emb).view(-1, num_class) for proj, emb, num_class in zip(proj_x_list, label_embs_list, self.num_classes)] # [[B*T, V]]
mask, unmask = torch.logical_and(mask_indices, ~padding_mask).view(-1), torch.logical_and(~mask_indices, ~padding_mask).view(-1) # [B*T]
logit_m_list, logit_u_list = [logit[mask] for logit in logit_list], [logit[unmask] for logit in logit_list]
target_m_list, target_u_list = [target.view(-1)[mask].long() for target in target_list], [target.view(-1)[unmask].long() for target in target_list]
result = {
"logit_m_list": logit_m_list,
"logit_u_list": logit_u_list,
"target_m_list": target_m_list,
"target_u_list": target_u_list,
"padding_mask": padding_mask,
"features_pen": features_pen,
}
return result
def extract_features(
self,
source: torch.Tensor,
padding_mask: Optional[torch.Tensor] = None,
mask: bool = False,
ret_conv: bool = False,
output_layer: Optional[int] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
res = self.forward(
source,
padding_mask=padding_mask,
mask=mask,
features_only=True,
output_layer=output_layer,
)
feature = res["features"] if ret_conv else res["x"]
return feature, res["padding_mask"]
def extract_finetune(self, source, padding_mask=None, mask=False, ret_conv=False, output_layer=None):
src_audio, src_video = source['audio'], source['video']
if mask and self.masking_type == 'input':
src_video, mask_indices_video = self.apply_input_mask(src_video, padding_mask, target_list=None)
src_audio, mask_indices_audio = self.apply_input_mask(src_audio, padding_mask, target_list=None)
mask_indices = torch.logical_or(mask_indices_audio, mask_indices_video) # mask_indices not used in fine-tuning
else:
src_audio, src_video, mask_indices = src_audio, src_video, None
if src_audio is not None and src_video is None:
features_audio = self.forward_features(src_audio, modality='audio') # features: [B, F, T]
features_video = features_audio.new_zeros(features_audio.size(0), self.encoder_embed_dim, features_audio.size(-1))
elif src_audio is None and src_video is not None:
features_video = self.forward_features(src_video, modality='video')
features_audio = features_video.new_zeros(features_video.size(0), self.encoder_embed_dim, features_video.size(-1))
elif src_audio is not None and src_video is not None:
features_video = self.forward_features(src_video, modality='video')
features_audio = self.forward_features(src_audio, modality='audio') # features: [B, F, T]
if self.modality_fuse == 'concat':
features = torch.cat([features_audio, features_video], dim=1)
elif self.modality_fuse == 'add':
features = features_audio + features_video
features_pen = features.float().pow(2).mean()
features = features.transpose(1, 2)
features = self.layer_norm(features)
unmasked_features = features.clone()
if padding_mask is not None:
padding_mask = self.forward_padding_mask(features, padding_mask)
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
unmasked_features = self.dropout_features(unmasked_features)
x = features
mask_indices = None
# feature: (B, T, D), float
# target: (B, T), long
# x: (B, T, D), float
# padding_mask: (B, T), bool
# mask_indices: (B, T), bool
x, _ = self.encoder(
x,
padding_mask=padding_mask,
layer=None if output_layer is None else output_layer - 1
)
return x, padding_mask
def get_extra_losses(self, net_output):
extra_losses = []
names = []
if "features_pen" in net_output:
extra_losses.append(net_output["features_pen"])
names.append("features_pen")
return extra_losses, names
def remove_pretraining_modules(self):
self.target_glu = None
self.final_proj = None
def get_logits(self, net_output, is_masked=True):
raise NotImplementedError
def get_targets(self, net_output, is_masked=True):
raise NotImplementedError
def compute_nce(self, x, pos, negs):
neg_is_pos = (pos == negs).all(-1)
pos = pos.unsqueeze(0)
targets = torch.cat([pos, negs], dim=0)
logits = torch.cosine_similarity(
x.float(), targets.float(), dim=-1
).type_as(x)
logits /= self.logit_temp
if neg_is_pos.any():
logits[1:][neg_is_pos] = float("-inf")
logits = logits.transpose(0, 1) # (num_x, num_cls+1)
return logits
| av_hubert-main | avhubert/hubert.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
import argparse
import torch
from fairseq.data import Dictionary, encoders
def add_task_state(ckpt_path):
std = torch.load(ckpt_path)
cfg = std['cfg']
if cfg['model']['_name'] == 'av_hubert':
dictionaries = [Dictionary.load(f"{cfg['task']['label_dir']}/dict.{label}.txt") for label in cfg['task']['labels']]
std['cfg']['task']['fine_tuning'] = False
std['task_state'] = {'dictionaries': dictionaries}
print(dictionaries, std['cfg']['task'])
else:
prt = torch.load(std['cfg']['model']['w2v_path'])
std['cfg']['model']['w2v_args'] = prt['cfg']
std['cfg']['task']['fine_tuning'] = True
dictionaries = [Dictionary.load(f"{prt['cfg']['task']['label_dir']}/dict.{label}.txt") for label in prt['cfg']['task']['labels']]
target_dictionary = Dictionary.load(f"{cfg['task']['label_dir']}/dict.wrd.txt")
tokenizer_fn = std['cfg']['task']['tokenizer_bpe_model']
bpe_args = argparse.Namespace(**{'bpe': 'sentencepiece', f"sentencepiece_model": tokenizer_fn})
bpe_tokenizer = encoders.build_bpe(bpe_args)
std['task_state'] = {'dictionaries': dictionaries, 'target_dictionary': target_dictionary, 's2s_tokenizer': bpe_tokenizer}
torch.save(std, ckpt_path)
return
if __name__ == '__main__':
ckpt_paths = sys.argv[1:]
for ckpt_path in ckpt_paths:
add_task_state(ckpt_path)
| av_hubert-main | avhubert/misc/fix_state.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import numpy as np
import joblib
import torch
import tqdm
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("dump_km_label")
class ApplyKmeans(object):
def __init__(self, km_path):
self.km_model = joblib.load(km_path)
self.C_np = self.km_model.cluster_centers_.transpose()
self.Cnorm_np = (self.C_np ** 2).sum(0, keepdims=True)
self.C = torch.from_numpy(self.C_np)
self.Cnorm = torch.from_numpy(self.Cnorm_np)
if torch.cuda.is_available():
self.C = self.C.cuda()
self.Cnorm = self.Cnorm.cuda()
def __call__(self, x):
if isinstance(x, torch.Tensor):
dist = (
x.pow(2).sum(1, keepdim=True)
- 2 * torch.matmul(x, self.C)
+ self.Cnorm
)
return dist.argmin(dim=1).cpu().numpy()
else:
dist = (
(x ** 2).sum(1, keepdims=True)
- 2 * np.matmul(x, self.C_np)
+ self.Cnorm_np
)
return np.argmin(dist, axis=1)
def get_feat_iterator(feat_dir, split, nshard, rank):
feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy"
leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len"
with open(leng_path, "r") as f:
lengs = [int(line.rstrip()) for line in f]
offsets = [0] + np.cumsum(lengs[:-1]).tolist()
def iterate():
feat = np.load(feat_path, mmap_mode="r")
assert feat.shape[0] == (offsets[-1] + lengs[-1])
for offset, leng in zip(offsets, lengs):
yield feat[offset: offset + leng]
return iterate, len(lengs)
def dump_label(feat_dir, split, km_path, nshard, rank, lab_dir):
apply_kmeans = ApplyKmeans(km_path)
generator, num = get_feat_iterator(feat_dir, split, nshard, rank)
iterator = generator()
lab_path = f"{lab_dir}/{split}_{rank}_{nshard}.km"
os.makedirs(lab_dir, exist_ok=True)
with open(lab_path, "w") as f:
for feat in tqdm.tqdm(iterator, total=num):
# feat = torch.from_numpy(feat).cuda()
lab = apply_kmeans(feat).tolist()
f.write(" ".join(map(str, lab)) + "\n")
logger.info("finished successfully")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("feat_dir")
parser.add_argument("split")
parser.add_argument("km_path")
parser.add_argument("nshard", type=int)
parser.add_argument("rank", type=int)
parser.add_argument("lab_dir")
args = parser.parse_args()
logging.info(str(args))
dump_label(**vars(args))
| av_hubert-main | avhubert/clustering/dump_km_label.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
import os
import sys
import fairseq
import soundfile as sf
import torch
import torch.nn.functional as F
import tqdm
from npy_append_array import NpyAppendArray
import numpy as np
from python_speech_features import logfbank
from scipy.io import wavfile
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("dump_hubert_feature")
class HubertFeatureReader(object):
def __init__(self, ckpt_path, layer, max_chunk=1600000, custom_utils=None):
(
model,
cfg,
task,
) = fairseq.checkpoint_utils.load_model_ensemble_and_task([ckpt_path])
self.model = model[0].eval().cuda()
self.task = task
self.layer = layer
self.max_chunk = max_chunk
self.stack_order_audio = self.task.cfg.stack_order_audio
image_crop_size, image_mean, image_std = self.task.cfg.image_crop_size, self.task.cfg.image_mean, self.task.cfg.image_std
self.transform = custom_utils.Compose([
custom_utils.Normalize( 0.0,255.0 ),
custom_utils.CenterCrop((image_crop_size, image_crop_size)),
custom_utils.Normalize(image_mean, image_std) ])
self.custom_utils = custom_utils
logger.info(f"TASK CONFIG:\n{self.task.cfg}")
logger.info(f" max_chunk = {self.max_chunk}")
logger.info(f"Transform: {self.transform}")
def load_feature(self, mix_name, ref_len=None):
def stacker(feats, stack_order):
feat_dim = feats.shape[1]
if len(feats) % stack_order != 0:
res = stack_order - len(feats) % stack_order
res = np.zeros([res, feat_dim]).astype(feats.dtype)
feats = np.concatenate([feats, res], axis=0)
feats = feats.reshape((-1, stack_order, feat_dim)).reshape(-1, stack_order*feat_dim)
return feats
video_fn, audio_fn = mix_name
video_feats = self.load_image(video_fn)
audio_fn = audio_fn.split(':')[0]
sample_rate, wav_data = wavfile.read(audio_fn)
assert sample_rate == 16_000 and len(wav_data.shape) == 1
audio_feats = logfbank(wav_data, samplerate=sample_rate).astype(np.float32)
audio_feats = stacker(audio_feats, self.stack_order_audio)
diff = len(audio_feats) - len(video_feats)
if diff < 0:
audio_feats = np.concatenate([audio_feats, np.zeros([-diff, audio_feats.shape[-1]], dtype=audio_feats.dtype)])
elif diff > 0:
audio_feats = audio_feats[:-diff]
return video_feats, audio_feats
def load_image(self, audio_name):
feats = self.custom_utils.load_video(audio_name)
feats = self.transform(feats)
feats = np.expand_dims(feats, axis=-1)
return feats
def get_feats(self, path, ref_len=None):
video_feats, audio_feats = self.load_feature(path, ref_len)
with torch.no_grad():
audio_feats, video_feats = torch.from_numpy(audio_feats.astype(np.float32)).cuda(), torch.from_numpy(video_feats.astype(np.float32)).cuda()
if self.task.cfg.normalize:
audio_feats = F.layer_norm(audio_feats, audio_feats.shape[1:])
video_feats = video_feats.unsqueeze(dim=0).permute((0, 4, 1, 2, 3)).contiguous()
audio_feats = audio_feats.unsqueeze(dim=0).transpose(1, 2)
source = {'audio': audio_feats, 'video': video_feats}
if self.layer == 0:
ret_conv, output_layer = True, None
else:
ret_conv, output_layer = False, self.layer
feat, _ = self.model.extract_features(
source=source,
padding_mask=None,
mask=False,
output_layer=output_layer,
ret_conv=ret_conv
# output_layer=self.layer,
)
return feat.squeeze(dim=0)
def get_path_iterator(tsv, nshard, rank):
with open(tsv, "r") as f:
root = f.readline().rstrip()
lines = [line.rstrip() for line in f]
tot = len(lines)
shard_size = math.ceil(tot / nshard)
start, end = rank * shard_size, min((rank + 1) * shard_size, tot)
assert start < end, "start={start}, end={end}"
logger.info(
f"rank {rank} of {nshard}, process {end-start} "
f"({start}-{end}) out of {tot}"
)
lines = lines[start:end]
def iterate():
for line in lines:
items = line.strip().split("\t")
# audio_path = f"{items[1]}:{items[0]}"
yield (items[1], items[2]+':'+items[0]), int(items[3])
return iterate, len(lines)
def dump_feature(
tsv_dir, split, ckpt_path, layer, nshard, rank, feat_dir, max_chunk, custom_utils=None, **kwargs
):
reader = HubertFeatureReader(ckpt_path, layer, max_chunk, custom_utils=custom_utils)
generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank)
iterator = generator()
feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy"
leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len"
os.makedirs(feat_dir, exist_ok=True)
if os.path.exists(feat_path):
os.remove(feat_path)
feat_f = NpyAppendArray(feat_path)
with open(leng_path, "w") as leng_f:
for path, nsample in tqdm.tqdm(iterator, total=num):
feat = reader.get_feats(path, nsample)
feat_f.append(feat.cpu().numpy())
leng_f.write(f"{len(feat)}\n")
logger.info("finished successfully")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("tsv_dir")
parser.add_argument("split")
parser.add_argument("ckpt_path")
parser.add_argument("layer", type=int)
parser.add_argument("nshard", type=int)
parser.add_argument("rank", type=int)
parser.add_argument("feat_dir")
parser.add_argument("--max_chunk", type=int, default=1600000)
parser.add_argument("--user_dir", type=str, default=None)
args = parser.parse_args()
logger.info(args)
fairseq.utils.import_user_module(args)
sys.path.append(args.user_dir)
import utils as custom_utils
kwargs = vars(args)
kwargs.update({'custom_utils': custom_utils})
dump_feature(**kwargs)
| av_hubert-main | avhubert/clustering/dump_hubert_feature.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os, subprocess
import submitit
import argparse
from argparse import Namespace
def dump_av_hubert(*args, **kwargs):
from dump_hubert_feature import dump_feature
import fairseq
import sys
av_hubert_dir = os.path.join(os.getcwd(), '..')
fairseq.utils.import_user_module(Namespace(user_dir=av_hubert_dir))
sys.path.append(av_hubert_dir)
import utils as custom_utils
kwargs.update({'custom_utils': custom_utils})
args = args[0]
dump_feature(*args, **kwargs)
return
def dump_mfcc(*args, **kwargs):
from dump_mfcc_feature import dump_feature
args = args[0]
dump_feature(*args, **kwargs)
return
def run_kmeans(*args, **kwargs):
import sys
from learn_kmeans import learn_kmeans
learn_kmeans(*args, **kwargs)
return
def apply_kmeans(*args, **kwargs):
import sys
from dump_km_label import dump_label
args = args[0]
dump_label(*args, **kwargs)
return
def concatenate(*args, **kwargs):
from concat import main as concat_fn
args = args[0]
concat_fn(*args, **kwargs)
return
def main():
parser = argparse.ArgumentParser(description='clustering', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--tsv', type=str, help='tsv dir')
parser.add_argument('--output', type=str, help='output dir (labels)')
parser.add_argument('--ckpt', type=str, help='checkpoint of last iteration')
parser.add_argument('--nlayer', type=int, default=12, help='layer index for clustering')
parser.add_argument('--ncluster', type=int, default=500, help='number of clusters')
parser.add_argument('--nshard', type=int, default=100, help='number of shards')
parser.add_argument('--percent', type=float, default=0.05, help='Percentage for clustering')
parser.add_argument('--mfcc', action='store_true', help='extracting MFCC feature')
parser.add_argument('--slurm-partition', type=str, help='slurm partitions')
args = parser.parse_args()
tsv_dir = args.tsv
output_dir = args.output
km_dir = output_dir
feat_dir = output_dir
ckpt_path = args.ckpt
nlayer = args.nlayer
nshard = args.nshard
n_clusters = args.ncluster
slurm_partition = args.slurm_partition
is_mfcc = args.mfcc
timeout_min = 240
percent = 0.1
log_folder = "log_submit/%j"
km_path = f"{km_dir}/kmeans.mdl"
os.makedirs(output_dir, exist_ok=True)
ext = submitit.AutoExecutor(folder=log_folder)
args_array = []
if is_mfcc:
print(f"Dump MFCC feature")
for rank in range(nshard):
args = [tsv_dir, 'train', nshard, rank, output_dir]
args_array.append(args)
args_array.append([tsv_dir, 'valid', 1, 0, output_dir])
ext.update_parameters(timeout_min=60, slurm_partition=slurm_partition, cpus_per_task=1, slurm_array_parallelism=100)
jobs = ext.map_array(dump_mfcc, args_array)
else:
print(f"Dump AV-Hubert feature")
for rank in range(nshard):
args = [tsv_dir, 'train', ckpt_path, nlayer, nshard, rank, output_dir, 1600000]
args_array.append(args)
args_array.append([tsv_dir, 'valid', ckpt_path, nlayer, 1, 0, output_dir, 1600000])
ext.update_parameters(timeout_min=60, slurm_partition=slurm_partition, cpus_per_task=1, gpus_per_node=1, slurm_array_parallelism=100)
jobs = ext.map_array(dump_av_hubert, args_array)
[job.result() for job in jobs]
print(f"Learn K-means")
percent, batch_size = percent, 20000
ext.update_parameters(timeout_min=timeout_min, slurm_partition=slurm_partition, cpus_per_task=8, mem_gb=128)
args, kwargs = [feat_dir, 'train', nshard, km_path, n_clusters], vars(Namespace(seed=0, percent=percent, init="k-means++", max_iter=100, batch_size=batch_size, tol=0.0, n_init=20, reassignment_ratio=0.0, max_no_improvement=100))
print(args, kwargs)
job = ext.submit(run_kmeans, *args, **kwargs)
job.result()
print(f"Apply K-means")
args_array = []
for rank in range(nshard):
args = [feat_dir, 'train', km_path, nshard, rank, output_dir]
args_array.append(args)
args_array.append([feat_dir, 'valid', km_path, 1, 0, output_dir])
ext.update_parameters(timeout_min=10, slurm_partition=slurm_partition, cpus_per_task=1, slurm_array_parallelism=500)
jobs = ext.map_array(apply_kmeans, args_array)
[job.result() for job in jobs]
print(f"Concatenate labels")
cont = f"for rank in $(seq 0 {nshard-1}); do cat {output_dir}/train_${{rank}}_{nshard}.km; done > {output_dir}/train.km"
print(cont)
subprocess.call(cont, shell=True)
cont = f"cp {output_dir}/valid*.km {output_dir}/valid.km"
print(cont)
subprocess.call(cont, shell=True)
with open(f"{output_dir}/dict.km.txt", 'w') as fo:
for i in range(n_clusters):
fo.write(f"{i} {10000}\n")
print(f"Please delete intermediate files to save space: rm {output_dir}/*npy")
return
if __name__ == '__main__':
main()
| av_hubert-main | avhubert/clustering/submit_cluster.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import numpy as np
from sklearn.cluster import MiniBatchKMeans
import joblib
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("learn_kmeans")
def get_km_model(
n_clusters,
init,
max_iter,
batch_size,
tol,
max_no_improvement,
n_init,
reassignment_ratio,
):
return MiniBatchKMeans(
n_clusters=n_clusters,
init=init,
max_iter=max_iter,
batch_size=batch_size,
verbose=1,
compute_labels=False,
tol=tol,
max_no_improvement=max_no_improvement,
init_size=None,
n_init=n_init,
reassignment_ratio=reassignment_ratio,
)
def load_feature_shard(feat_dir, split, nshard, rank, percent):
feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy"
leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len"
with open(leng_path, "r") as f:
lengs = [int(line.rstrip()) for line in f]
offsets = [0] + np.cumsum(lengs[:-1]).tolist()
if percent < 0:
return np.load(feat_path, mmap_mode="r")
else:
nsample = int(np.ceil(len(lengs) * percent))
indices = np.random.choice(len(lengs), nsample, replace=False)
feat = np.load(feat_path, mmap_mode="r")
sampled_feat = np.concatenate(
[feat[offsets[i]: offsets[i] + lengs[i]] for i in indices], axis=0
)
logger.info(
(
f"sampled {nsample} utterances, {len(sampled_feat)} frames "
f"from shard {rank}/{nshard}"
)
)
return sampled_feat
def load_feature(feat_dir, split, nshard, seed, percent):
assert percent <= 1.0
feat = np.concatenate(
[
load_feature_shard(feat_dir, split, nshard, r, percent)
for r in range(nshard)
],
axis=0,
)
logging.info(f"loaded feature with dimension {feat.shape}")
return feat
def learn_kmeans(
feat_dir,
split,
nshard,
km_path,
n_clusters,
seed,
percent,
init,
max_iter,
batch_size,
tol,
n_init,
reassignment_ratio,
max_no_improvement,
):
np.random.seed(seed)
feat = load_feature(feat_dir, split, nshard, seed, percent)
km_model = get_km_model(
n_clusters,
init,
max_iter,
batch_size,
tol,
max_no_improvement,
n_init,
reassignment_ratio,
)
km_model.fit(feat)
joblib.dump(km_model, km_path)
inertia = -km_model.score(feat) / len(feat)
logger.info("total intertia: %.5f", inertia)
logger.info("finished successfully")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("feat_dir", type=str)
parser.add_argument("split", type=str)
parser.add_argument("nshard", type=int)
parser.add_argument("km_path", type=str)
parser.add_argument("n_clusters", type=int)
parser.add_argument("--seed", default=0, type=int)
parser.add_argument(
"--percent", default=-1, type=float, help="sample a subset; -1 for all"
)
parser.add_argument("--init", default="k-means++")
parser.add_argument("--max_iter", default=100, type=int)
parser.add_argument("--batch_size", default=10000, type=int)
parser.add_argument("--tol", default=0.0, type=float)
parser.add_argument("--max_no_improvement", default=100, type=int)
parser.add_argument("--n_init", default=20, type=int)
parser.add_argument("--reassignment_ratio", default=0.0, type=float)
args = parser.parse_args()
logging.info(str(args))
learn_kmeans(**vars(args))
| av_hubert-main | avhubert/clustering/learn_kmeans.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
import os
import sys
import soundfile as sf
import torch
import torchaudio
import tqdm
from npy_append_array import NpyAppendArray
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("dump_mfcc_feature")
class MfccFeatureReader(object):
def __init__(self, sample_rate):
self.sample_rate = sample_rate
def read_audio(self, path, ref_len=None):
wav, sr = sf.read(path)
assert sr == self.sample_rate, sr
if wav.ndim == 2:
wav = wav.mean(-1)
assert wav.ndim == 1, wav.ndim
if ref_len is not None and abs(ref_len - len(wav)) > 160:
logging.warning(f"ref {ref_len} != read {len(wav)} ({path})")
return wav
def get_feats(self, path, ref_len=None):
x = self.read_audio(path, ref_len)
with torch.no_grad():
x = torch.from_numpy(x).float()
x = x.view(1, -1)
mfccs = torchaudio.compliance.kaldi.mfcc(
waveform=x,
sample_frequency=self.sample_rate,
use_energy=False,
) # (time, freq)
mfccs = mfccs.transpose(0, 1) # (freq, time)
deltas = torchaudio.functional.compute_deltas(mfccs)
ddeltas = torchaudio.functional.compute_deltas(deltas)
concat = torch.cat([mfccs, deltas, ddeltas], dim=0)
concat = concat.transpose(0, 1).contiguous() # (freq, time)
return concat
def get_path_iterator(tsv, nshard, rank):
with open(tsv, "r") as f:
root = f.readline().rstrip()
lines = [line.rstrip() for line in f]
tot = len(lines)
shard_size = math.ceil(tot / nshard)
start, end = rank * shard_size, min((rank + 1) * shard_size, tot)
assert start < end, "start={start}, end={end}"
logger.info(
f"rank {rank} of {nshard}, process {end-start} "
f"({start}-{end}) out of {tot}"
)
lines = lines[start:end]
def iterate():
for line in lines:
_, video_path, wav_path, nsample_video, nsample_wav = line.split("\t")
yield f"{root}/{wav_path}", int(nsample_wav)
return iterate, len(lines)
def dump_feature(tsv_dir, split, nshard, rank, feat_dir, sample_rate=16_000):
reader = MfccFeatureReader(sample_rate)
generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank)
iterator = generator()
feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy"
leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len"
os.makedirs(feat_dir, exist_ok=True)
if os.path.exists(feat_path):
os.remove(feat_path)
feat_f = NpyAppendArray(feat_path)
with open(leng_path, "w") as leng_f:
for path, nsample in tqdm.tqdm(iterator, total=num):
feat = reader.get_feats(path, nsample)
feat_f.append(feat.cpu().numpy())
leng_f.write(f"{len(feat)}\n")
logger.info("finished successfully")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("tsv_dir")
parser.add_argument("split")
parser.add_argument("nshard", type=int)
parser.add_argument("rank", type=int)
parser.add_argument("feat_dir")
parser.add_argument("--sample_rate", type=int, default=16000)
args = parser.parse_args()
logger.info(args)
dump_feature(**vars(args))
| av_hubert-main | avhubert/clustering/dump_mfcc_feature.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import glob
import shutil
import subprocess
from tqdm import tqdm
from pathlib import Path
from gen_subword import gen_vocab
from tempfile import NamedTemporaryFile
def main():
import argparse
parser = argparse.ArgumentParser(description='LRS3 tsv preparation', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--lrs3', type=str, help='lrs3 root dir')
parser.add_argument('--valid-ids', type=str, help='a list of valid ids')
parser.add_argument('--vocab-size', type=int, default=1000, help='a list of valid ids')
args = parser.parse_args()
file_list, label_list = f"{args.lrs3}/file.list", f"{args.lrs3}/label.list"
assert os.path.isfile(file_list) , f"{file_list} not exist -> run lrs3_prepare.py first"
assert os.path.isfile(label_list) , f"{label_list} not exist -> run lrs3_prepare.py first"
nframes_audio_file, nframes_video_file = f"{args.lrs3}/nframes.audio", f"{args.lrs3}/nframes.video"
assert os.path.isfile(nframes_audio_file) , f"{nframes_audio_file} not exist -> run count_frames.py first"
assert os.path.isfile(nframes_video_file) , f"{nframes_video_file} not exist -> run count_frames.py first"
print(f"Generating sentencepiece units")
vocab_size = args.vocab_size
vocab_dir = (Path(f"{args.lrs3}")/f"spm{vocab_size}").absolute()
# out_root = Path(vocab_dir).absolute()
vocab_dir.mkdir(exist_ok=True)
spm_filename_prefix = f"spm_unigram{vocab_size}"
with NamedTemporaryFile(mode="w") as f:
label_text = [ln.strip() for ln in open(label_list).readlines()]
for t in label_text:
f.write(t.lower() + "\n")
gen_vocab(Path(f.name), vocab_dir/spm_filename_prefix, 'unigram', args.vocab_size)
vocab_path = (vocab_dir/spm_filename_prefix).as_posix()+'.txt'
audio_dir, video_dir = f"{args.lrs3}/audio", f"{args.lrs3}/video"
def setup_target(target_dir, train, valid, test):
for name, data in zip(['train', 'valid', 'test'], [train, valid, test]):
with open(f"{target_dir}/{name}.tsv", 'w') as fo:
fo.write('/\n')
for fid, _, nf_audio, nf_video in data:
fo.write('\t'.join([fid, os.path.abspath(f"{video_dir}/{fid}.mp4"), os.path.abspath(f"{audio_dir}/{fid}.wav"), str(nf_video), str(nf_audio)])+'\n')
with open(f"{target_dir}/{name}.wrd", 'w') as fo:
for _, label, _, _ in data:
fo.write(f"{label}\n")
shutil.copyfile(vocab_path, f"{target_dir}/dict.wrd.txt")
return
fids, labels = [x.strip() for x in open(file_list).readlines()], [x.strip().lower() for x in open(label_list).readlines()]
nfs_audio, nfs_video = [x.strip() for x in open(nframes_audio_file).readlines()], [x.strip() for x in open(nframes_video_file).readlines()]
valid_fids = set([x.strip() for x in open(args.valid_ids).readlines()])
train_all, train_sub, valid, test = [], [], [], []
for fid, label, nf_audio, nf_video in zip(fids, labels, nfs_audio, nfs_video):
part = fid.split('/')[0]
# print(part)
if part == 'test':
test.append([fid, label, nf_audio, nf_video])
else:
if fid in valid_fids:
valid.append([fid, label, nf_audio, nf_video])
else:
train_all.append([fid, label, nf_audio, nf_video])
if part == 'trainval':
train_sub.append([fid, label, nf_audio, nf_video])
dir_30h = f"{args.lrs3}/30h_data"
print(f"Set up 30h dir")
os.makedirs(dir_30h, exist_ok=True)
setup_target(dir_30h, train_sub, valid, test)
dir_433h = f"{args.lrs3}/433h_data"
print(f"Set up 433h dir")
os.makedirs(dir_433h, exist_ok=True)
setup_target(dir_433h, train_all, valid, test)
return
if __name__ == '__main__':
main()
| av_hubert-main | avhubert/preparation/lrs3_manifest.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
from scipy.io import wavfile
from tqdm import tqdm
def mix_audio(wav_fns):
wav_data = [wavfile.read(wav_fn)[1] for wav_fn in wav_fns]
wav_data_ = []
min_len = min([len(x) for x in wav_data])
for item in wav_data:
wav_data_.append(item[:min_len])
wav_data = np.stack(wav_data_).mean(axis=0).astype(np.int16)
return wav_data
def main():
import argparse
parser = argparse.ArgumentParser(description='Generating babble and speech noise from LRS3', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--lrs3', type=str, help='lrs3 root dir')
args = parser.parse_args()
tsv_fn = os.path.join(args.lrs3, '433h_data', 'train.tsv')
output_wav = os.path.join(args.lrs3, 'noise', 'babble', 'noise.wav')
output_tsvs = [os.path.join(args.lrs3, 'noise', 'babble', 'valid.tsv'), os.path.join(args.lrs3, 'noise', 'babble', 'test.tsv')]
os.makedirs(os.path.dirname(output_wav), exist_ok=True)
for output_tsv in output_tsvs:
os.makedirs(os.path.dirname(output_tsv), exist_ok=True)
print(f"Generating babble noise -> {output_tsvs}")
num_samples = 30
sample_rate = 16_000
min_len = 15*sample_rate
lns = open(tsv_fn).readlines()[1:]
wav_fns = [(ln.strip().split('\t')[2], int(ln.strip().split('\t')[-1])) for ln in lns]
wav_fns = list(filter(lambda x: x[1]>min_len, wav_fns))
indexes = np.random.permutation(len(wav_fns))[:num_samples]
wav_fns = [wav_fns[i][0] for i in indexes]
wav_data = mix_audio(wav_fns)
wavfile.write(output_wav, sample_rate, wav_data)
for output_tsv in output_tsvs:
with open(output_tsv, 'w') as fo:
fo.write(os.path.abspath(output_wav)+'\n')
min_len = 20*sample_rate
speech_tsv_dir, speech_wav_dir = os.path.join(args.lrs3, 'noise', 'speech'), os.path.join(args.lrs3, 'noise', 'speech', 'wav')
os.makedirs(speech_tsv_dir, exist_ok=True)
os.makedirs(speech_wav_dir, exist_ok=True)
print(f'Generating speech noise -> {speech_tsv_dir}')
lns = open(tsv_fn).readlines()[1:]
wav_fns = [(ln.strip().split('\t')[2], int(ln.strip().split('\t')[-1])) for ln in lns]
wav_fns = list(filter(lambda x: x[1]>min_len, wav_fns))
wav_fns = [x[0] for x in wav_fns]
print(f"# speech noise audios: {len(wav_fns)}")
noise_fns = []
for wav_fn in tqdm(wav_fns):
sample_rate, wav_data = wavfile.read(wav_fn)
wav_data = wav_data[:min_len]
filename = '_'.join(wav_fn.split('/')[-2:])
noise_fn = f"{speech_wav_dir}/{filename}"
noise_fns.append(noise_fn)
wavfile.write(noise_fn, sample_rate, wav_data.astype(np.int16))
num_train, num_valid, num_test = int(len(noise_fns)*0.6), int(len(noise_fns)*0.2), int(len(noise_fns)*0.2)
prev = 0
for split in ['train', 'valid', 'test']:
split_fns = []
num_x, tsv_x = eval(f"num_{split}"), f"{speech_tsv_dir}/{split}.tsv"
for fn in noise_fns[prev: prev+num_x]:
split_fns.append(os.path.abspath(fn))
with open(tsv_x, 'w') as fo:
fo.write('\n'.join(split_fns)+'\n')
prev += num_x
return
if __name__ == '__main__':
main()
| av_hubert-main | avhubert/preparation/lrs3_noise.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os, sys, glob, subprocess, json, math
import numpy as np
from scipy.io import wavfile
from os.path import basename, dirname
from tqdm import tqdm
import tempfile, shutil
def get_filelist(root_dir):
fids = []
for split in ['dev', 'test']:
all_fns = glob.glob(f"{root_dir}/{split}/mp4/*/*/*mp4")
for fn in all_fns:
fids.append('/'.join(fn.split('/')[-5:])[:-4])
output_fn = f"{root_dir}/file.list"
with open(output_fn, 'w') as fo:
fo.write('\n'.join(fids)+'\n')
return
def prep_wav(root_dir, wav_dir, flist, ffmpeg, rank, nshard):
input_dir, output_dir = root_dir, wav_dir
os.makedirs(output_dir, exist_ok=True)
fids = [ln.strip() for ln in open(flist).readlines()]
num_per_shard = math.ceil(len(fids)/nshard)
start_id, end_id = num_per_shard*rank, num_per_shard*(rank+1)
fids = fids[start_id: end_id]
print(f"{len(fids)} videos")
for i, fid in enumerate(tqdm(fids)):
video_fn = f"{input_dir}/{fid}.mp4"
audio_fn = f"{output_dir}/{fid}.wav"
os.makedirs(os.path.dirname(audio_fn), exist_ok=True)
cmd = ffmpeg + " -i " + video_fn + " -f wav -vn -y " + audio_fn + ' -loglevel quiet'
# print(cmd)
subprocess.call(cmd, shell=True)
# print(f"{video_fn} -> {audio_fn}")
return
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='VoxCeleb2 data preparation', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--vox', type=str, help='VoxCeleb2 dir')
parser.add_argument('--ffmpeg', type=str, help='ffmpeg path')
parser.add_argument('--step', type=int, help='Steps(1: get file list, 2: extract audio)')
parser.add_argument('--rank', type=int, help='rank id')
parser.add_argument('--nshard', type=int, help='number of shards')
args = parser.parse_args()
if args.step == 1:
print(f"Get file list")
get_filelist(args.vox)
elif args.step == 2:
print(f"Extract audio")
output_dir = f"{args.vox}/audio"
manifest = f"{args.vox}/file.list"
prep_wav(args.vox, output_dir, manifest, args.ffmpeg, args.rank, args.nshard)
| av_hubert-main | avhubert/preparation/vox_prepare.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import cv2, math, os
import submitit
import tempfile
import shutil
from tqdm import tqdm
from scipy.io import wavfile
def count_frames(fids, audio_dir, video_dir):
total_num_frames = []
for fid in tqdm(fids):
wav_fn = f"{audio_dir}/{fid}.wav"
video_fn = f"{video_dir}/{fid}.mp4"
num_frames_audio = len(wavfile.read(wav_fn)[1])
cap = cv2.VideoCapture(video_fn)
num_frames_video = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
total_num_frames.append([num_frames_audio, num_frames_video])
return total_num_frames
def check(fids, audio_dir, video_dir):
missing = []
for fid in tqdm(fids):
wav_fn = f"{audio_dir}/{fid}.wav"
video_fn = f"{video_dir}/{fid}.mp4"
is_file = os.path.isfile(wav_fn) and os.path.isfile(video_fn)
if not is_file:
missing.append(fid)
return missing
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='count number of frames (on slurm)', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--root', type=str, help='root dir')
parser.add_argument('--manifest', type=str, help='a list of filenames')
parser.add_argument('--nshard', type=int, default=1, help='number of shards')
parser.add_argument('--slurm_partition', type=str, default='cpu', help='slurm partition')
args = parser.parse_args()
fids = [ln.strip() for ln in open(args.manifest).readlines()]
print(f"{len(fids)} files")
audio_dir, video_dir = f"{args.root}/audio", f"{args.root}/video"
tmp_dir = tempfile.mkdtemp(dir='./')
executor = submitit.AutoExecutor(folder=tmp_dir)
executor.update_parameters(slurm_array_parallelism=100, slurm_partition=args.slurm_partition, timeout_min=240)
ranks = list(range(0, args.nshard))
fids_arr = []
num_per_shard = math.ceil(len(fids)/args.nshard)
for rank in ranks:
sub_fids = fids[rank*num_per_shard: (rank+1)*num_per_shard]
if len(sub_fids) > 0:
fids_arr.append(sub_fids)
jobs = executor.map_array(check, fids_arr, [audio_dir for _ in fids_arr], [video_dir for _ in fids_arr])
missing_fids = [job.result() for job in jobs]
missing_fids = [x for item in missing_fids for x in item]
if len(missing_fids) > 0:
print(f"Some audio/video files not exist, see {args.root}/missing.list")
with open(f"{args.root}/missing.list", 'w') as fo:
fo.write('\n'.join(missing_fids)+'\n')
shutil.rmtree(tmp_dir)
else:
jobs = executor.map_array(count_frames, fids_arr, [audio_dir for _ in fids_arr], [video_dir for _ in fids_arr])
num_frames = [job.result() for job in jobs]
audio_num_frames, video_num_frames = [], []
for item in num_frames:
audio_num_frames.extend([x[0] for x in item])
video_num_frames.extend([x[1] for x in item])
with open(f"{args.root}/nframes.audio", 'w') as fo:
fo.write(''.join([f"{x}\n" for x in audio_num_frames]))
with open(f"{args.root}/nframes.video", 'w') as fo:
fo.write(''.join([f"{x}\n" for x in video_num_frames]))
shutil.rmtree(tmp_dir)
| av_hubert-main | avhubert/preparation/count_frames_slurm.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys, os, glob, subprocess, shutil, math
from datetime import timedelta
import tempfile
from collections import OrderedDict
from pydub import AudioSegment
from tqdm import tqdm
def read_csv(csv_file, delimit=','):
lns = open(csv_file, 'r').readlines()
keys = lns[0].strip().split(delimit)
df = {key: [] for key in keys}
for ln in lns[1:]:
ln = ln.strip().split(delimit)
for j, key in enumerate(keys):
df[key].append(ln[j])
return df
def make_short_manifest(pretrain_dir, output_fn):
subdirs = os.listdir(pretrain_dir)
min_interval = 0.4
max_duration = 15
df = {'fid': [], 'sent': [], 'start': [], 'end': []}
for subdir in tqdm(subdirs):
txt_fns = glob.glob(os.path.join(pretrain_dir, subdir+'/*txt'))
for txt_fn in txt_fns:
fid = os.path.relpath(txt_fn, pretrain_dir)[:-4]
lns = open(txt_fn).readlines()
raw_text = lns[0].strip().split(':')[-1].strip()
conf = lns[1].strip().split(':')[-1].strip()
word_intervals = []
for i_line, ln in enumerate(lns):
if ln[:4] == 'WORD':
start_index = i_line
break
for ln in lns[start_index+1:]:
word, start, end, score = ln.strip().split()
word_intervals.append([word, float(start), float(end)])
if word_intervals[-1][-1] < max_duration:
df['fid'].append(fid)
df['sent'].append(raw_text)
df['start'].append(0)
df['end'].append(-1)
continue
sents, cur_sent = [], []
for i_word, (word, start, end) in enumerate(word_intervals):
if i_word == 0:
cur_sent.append([word, start, end])
else:
assert start >= cur_sent[-1][-1], f"{fid} , {word}, start-{start}, prev-{cur_sent[-1][-1]}"
if start - cur_sent[-1][-1] > min_interval:
sents.append(cur_sent)
cur_sent = [[word, start, end]]
else:
cur_sent.append([word, start, end])
if len(cur_sent) > 0:
sents.append(cur_sent)
for i_sent, sent in enumerate(sents):
df['fid'].append(fid+'_'+str(i_sent))
sent_words = ' '.join([x[0] for x in sent])
if i_sent == 0:
sent_start = 0
else:
sent_start = (sent[0][1] + sents[i_sent-1][-1][2])/2
if i_sent == len(sents)-1:
sent_end = -1
else:
sent_end = (sent[-1][2] + sents[i_sent+1][0][1])/2
df['sent'].append(sent_words)
df['start'].append(sent_start)
df['end'].append(sent_end)
durations = [y-x for x, y in zip(df['start'], df['end'])]
num_long = len(list(filter(lambda x: x > 15, durations)))
print(f"Percentage of >15 second: {100*num_long/len(durations)}%")
num_long = len(list(filter(lambda x: x > 20, durations)))
print(f"Percentage of >20 second: {100*num_long/len(durations)}%")
with open(output_fn, 'w') as fo:
fo.write('id,text,start,end\n')
for i in range(len(df['fid'])):
fo.write(','.join([df['fid'][i], df['sent'][i], '%.3f' % (df['start'][i]), '%.3f' % (df['end'][i])])+'\n')
return
def trim_video_frame(csv_fn, raw_dir, output_dir, ffmpeg, rank, nshard):
df = read_csv(csv_fn)
raw2fid = OrderedDict()
decimal, fps = 9, 25
for fid, start, end in zip(df['id'], df['start'], df['end']):
if '_' in fid:
raw_fid = '_'.join(fid.split('_')[:-1])
else:
raw_fid = fid
if raw_fid in raw2fid:
raw2fid[raw_fid].append([fid, start, end])
else:
raw2fid[raw_fid] = [[fid, start, end]]
i_raw = -1
num_per_shard = math.ceil(len(raw2fid.keys())/nshard)
start_id, end_id = num_per_shard*rank, num_per_shard*(rank+1)
fid_info_shard = list(raw2fid.items())[start_id: end_id]
print(f"Total videos in current shard: {len(fid_info_shard)}/{len(raw2fid.keys())}")
for raw_fid, fid_info in tqdm(fid_info_shard):
i_raw += 1
raw_path = os.path.join(raw_dir, raw_fid+'.mp4')
tmp_dir = tempfile.mkdtemp()
cmd = ffmpeg + " -i " + raw_path + " " + tmp_dir + '/%0' + str(decimal) + 'd.png -loglevel quiet'
subprocess.call(cmd, shell=True)
num_frames = len(glob.glob(tmp_dir+'/*png'))
for fid, start_sec, end_sec in fid_info:
sub_dir = os.path.join(tmp_dir, fid)
os.makedirs(sub_dir, exist_ok=True)
start_sec, end_sec = float(start_sec), float(end_sec)
if end_sec == -1:
end_sec = 24*3600
start_frame_id, end_frame_id = int(start_sec*fps), min(int(end_sec*fps), num_frames)
imnames = [tmp_dir+'/'+str(x+1).zfill(decimal)+'.png' for x in range(start_frame_id, end_frame_id)]
for ix, imname in enumerate(imnames):
shutil.copyfile(imname, sub_dir+'/'+str(ix).zfill(decimal)+'.png')
output_path = os.path.join(output_dir, fid+'.mp4')
os.makedirs(os.path.dirname(output_path), exist_ok=True)
cmd = [ffmpeg, "-i", sub_dir+'/%0'+str(decimal)+'d.png', "-y", "-crf", "20", output_path, "-loglevel", "quiet"]
pipe = subprocess.call(cmd, stdout = subprocess.PIPE, stderr = subprocess.STDOUT) # subprocess.PIPE
shutil.rmtree(tmp_dir)
return
def trim_audio(csv_fn, raw_dir, output_dir, ffmpeg, rank, nshard):
df = read_csv(csv_fn)
raw2fid = OrderedDict()
for fid, start, end in zip(df['id'], df['start'], df['end']):
if '_' in fid:
raw_fid = '_'.join(fid.split('_')[:-1])
else:
raw_fid = fid
if raw_fid in raw2fid:
raw2fid[raw_fid].append([fid, start, end])
else:
raw2fid[raw_fid] = [[fid, start, end]]
i_raw = -1
num_per_shard = math.ceil(len(raw2fid.keys())/nshard)
start_id, end_id = num_per_shard*rank, num_per_shard*(rank+1)
fid_info_shard = list(raw2fid.items())[start_id: end_id]
print(f"Total audios in current shard: {len(fid_info_shard)}/{len(raw2fid.keys())}")
for raw_fid, fid_info in tqdm(fid_info_shard):
i_raw += 1
tmp_dir = tempfile.mkdtemp()
wav_path = os.path.join(tmp_dir, 'tmp.wav')
cmd = ffmpeg + " -i " + os.path.join(raw_dir, raw_fid+'.mp4') + " -f wav -vn -y " + wav_path + ' -loglevel quiet'
subprocess.call(cmd, shell=True)
raw_audio = AudioSegment.from_wav(wav_path)
for fid, start_sec, end_sec in fid_info:
start_sec, end_sec = float(start_sec), float(end_sec)
if end_sec == -1:
end_sec = 24*3600
t1, t2 = int(start_sec*1000), int(end_sec*1000)
new_audio = raw_audio[t1: t2]
output_path = os.path.join(output_dir, fid+'.wav')
os.makedirs(os.path.dirname(output_path), exist_ok=True)
new_audio.export(output_path, format="wav")
shutil.rmtree(tmp_dir)
return
def trim_pretrain(root_dir, ffmpeg, rank=0, nshard=1, step=1):
pretrain_dir = os.path.join(root_dir, 'pretrain')
print(f"Trim original videos in pretrain")
csv_fn = os.path.join(root_dir, 'short-pretrain.csv')
if step == 1:
print(f"Step 1. Make csv file {csv_fn}")
make_short_manifest(pretrain_dir, csv_fn)
else:
print(f"Step 2. Trim video and audio")
output_video_dir, output_audio_dir = os.path.join(root_dir, 'short-pretrain'), os.path.join(root_dir, 'audio/short-pretrain/')
os.makedirs(output_video_dir, exist_ok=True)
os.makedirs(output_audio_dir, exist_ok=True)
trim_video_frame(csv_fn, pretrain_dir, output_video_dir, ffmpeg, rank, nshard)
trim_audio(csv_fn, pretrain_dir, output_audio_dir, ffmpeg, rank, nshard)
return
def prep_wav(lrs3_root, ffmpeg, rank, nshard):
output_dir = f"{lrs3_root}/audio/"
video_fns = glob.glob(lrs3_root + '/trainval/*/*mp4') + glob.glob(lrs3_root + '/test/*/*mp4')
video_fns = sorted(video_fns)
num_per_shard = math.ceil(len(video_fns)/nshard)
start_id, end_id = num_per_shard*rank, num_per_shard*(rank+1)
video_fns = video_fns[start_id: end_id]
print(f"{len(video_fns)} videos")
# subdirs = os.listdir(input_dir)
for video_fn in tqdm(video_fns):
base_name = '/'.join(video_fn.split('/')[-3:])
audio_fn = os.path.join(output_dir, base_name.replace('mp4', 'wav'))
os.makedirs(os.path.dirname(audio_fn), exist_ok=True)
cmd = ffmpeg + " -i " + video_fn + " -f wav -vn -y " + audio_fn + ' -loglevel quiet'
subprocess.call(cmd, shell=True)
return
def get_file_label(lrs3_root):
video_ids_total, labels_total = [], []
for split in ['trainval', 'test']:
subdirs = os.listdir(os.path.join(lrs3_root, split))
for subdir in tqdm(subdirs):
video_fns = glob.glob(os.path.join(lrs3_root, split, subdir, '*mp4'))
video_ids = ['/'.join(x.split('/')[-3:])[:-4] for x in video_fns]
for video_id in video_ids:
txt_fn = os.path.join(lrs3_root, video_id+'.txt')
label = open(txt_fn).readlines()[0].split(':')[1].strip()
labels_total.append(label)
video_ids_total.append(video_id)
pretrain_csv = os.path.join(lrs3_root, 'short-pretrain.csv')
df = read_csv(pretrain_csv)
for video_id, label in zip(df['id'], df['text']):
video_ids_total.append(os.path.join('short-pretrain', video_id))
labels_total.append(label)
video_id_fn, label_fn = os.path.join(lrs3_root, 'file.list'), os.path.join(lrs3_root, 'label.list')
print(video_id_fn, label_fn)
with open(video_id_fn, 'w') as fo:
fo.write('\n'.join(video_ids_total)+'\n')
with open(label_fn, 'w') as fo:
fo.write('\n'.join(labels_total)+'\n')
return
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='LRS3 preprocess pretrain dir', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--lrs3', type=str, help='lrs3 root dir')
parser.add_argument('--ffmpeg', type=str, help='path to ffmpeg')
parser.add_argument('--rank', type=int, help='rank id')
parser.add_argument('--nshard', type=int, help='number of shards')
parser.add_argument('--step', type=int, help='Steps (1: split labels, 2: trim video/audio, 3: prep audio for trainval/test, 4: get labels and file list)')
args = parser.parse_args()
if args.step <= 2:
trim_pretrain(args.lrs3, args.ffmpeg, args.rank, args.nshard, step=args.step)
elif args.step == 3:
print(f"Extracting audio for trainval/test")
prep_wav(args.lrs3, args.ffmpeg, args.rank, args.nshard)
elif args.step == 4:
get_file_label(args.lrs3)
| av_hubert-main | avhubert/preparation/lrs3_prepare.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys,os,pickle,math
import cv2,dlib,time
import numpy as np
from tqdm import tqdm
def load_video(path):
videogen = skvideo.io.vread(path)
frames = np.array([frame for frame in videogen])
return frames
def detect_face_landmarks(face_predictor_path, cnn_detector_path, root_dir, landmark_dir, flist_fn, rank, nshard):
def detect_landmark(image, detector, cnn_detector, predictor):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
rects = detector(gray, 1)
if len(rects) == 0:
rects = cnn_detector(gray)
rects = [d.rect for d in rects]
coords = None
for (_, rect) in enumerate(rects):
shape = predictor(gray, rect)
coords = np.zeros((68, 2), dtype=np.int32)
for i in range(0, 68):
coords[i] = (shape.part(i).x, shape.part(i).y)
return coords
detector = dlib.get_frontal_face_detector()
cnn_detector = dlib.cnn_face_detection_model_v1(cnn_detector_path)
predictor = dlib.shape_predictor(face_predictor_path)
input_dir = root_dir #
output_dir = landmark_dir #
fids = [ln.strip() for ln in open(flist_fn).readlines()]
num_per_shard = math.ceil(len(fids)/nshard)
start_id, end_id = num_per_shard*rank, num_per_shard*(rank+1)
fids = fids[start_id: end_id]
print(f"{len(fids)} files")
for fid in tqdm(fids):
output_fn = os.path.join(output_dir, fid+'.pkl')
video_path = os.path.join(input_dir, fid+'.mp4')
frames = load_video(video_path)
landmarks = []
for frame in frames:
landmark = detect_landmark(frame, detector, cnn_detector, predictor)
landmarks.append(landmark)
os.makedirs(os.path.dirname(output_fn), exist_ok=True)
pickle.dump(landmarks, open(output_fn, 'wb'))
return
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='detecting facial landmarks', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--root', type=str, help='root dir')
parser.add_argument('--landmark', type=str, help='landmark dir')
parser.add_argument('--manifest', type=str, help='a list of filenames')
parser.add_argument('--cnn_detector', type=str, help='path to cnn detector (download and unzip from: http://dlib.net/files/mmod_human_face_detector.dat.bz2)')
parser.add_argument('--face_predictor', type=str, help='path to face predictor (download and unzip from: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2)')
parser.add_argument('--rank', type=int, help='rank id')
parser.add_argument('--nshard', type=int, help='number of shards')
parser.add_argument('--ffmpeg', type=str, help='ffmpeg path')
args = parser.parse_args()
import skvideo
skvideo.setFFmpegPath(os.path.dirname(args.ffmpeg))
print(skvideo.getFFmpegPath())
import skvideo.io
detect_face_landmarks(args.face_predictor, args.cnn_detector, args.root, args.landmark, args.manifest, args.rank, args.nshard)
| av_hubert-main | avhubert/preparation/detect_landmark.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from tempfile import NamedTemporaryFile
import csv
from pathlib import Path
import zipfile
from functools import reduce
from multiprocessing import cpu_count
from typing import Any, Dict, List, Optional, Union
import numpy as np
# import pandas as pd
import sentencepiece as sp
# from fairseq.data.audio.audio_utils import (
# _convert_to_mono, _get_kaldi_fbank, _get_torchaudio_fbank
# )
# import torch
from tqdm import tqdm
UNK_TOKEN, UNK_TOKEN_ID = "<unk>", 3
BOS_TOKEN, BOS_TOKEN_ID = "<s>", 0
EOS_TOKEN, EOS_TOKEN_ID = "</s>", 2
PAD_TOKEN, PAD_TOKEN_ID = "<pad>", 1
def gen_vocab(
input_path: Path, output_path_prefix: Path, model_type="bpe",
vocab_size=1000, special_symbols: Optional[List[str]] = None
):
# Train SentencePiece Model
arguments = [
f"--input={input_path.as_posix()}",
f"--model_prefix={output_path_prefix.as_posix()}",
f"--model_type={model_type}",
f"--vocab_size={vocab_size}",
"--character_coverage=1.0",
f"--num_threads={cpu_count()}",
f"--unk_id={UNK_TOKEN_ID}",
f"--bos_id={BOS_TOKEN_ID}",
f"--eos_id={EOS_TOKEN_ID}",
f"--pad_id={PAD_TOKEN_ID}",
]
if special_symbols is not None:
_special_symbols = ",".join(special_symbols)
arguments.append(f"--user_defined_symbols={_special_symbols}")
sp.SentencePieceTrainer.Train(" ".join(arguments))
# Export fairseq dictionary
spm = sp.SentencePieceProcessor()
spm.Load(output_path_prefix.as_posix() + ".model")
vocab = {i: spm.IdToPiece(i) for i in range(spm.GetPieceSize())}
assert (
vocab.get(UNK_TOKEN_ID) == UNK_TOKEN
and vocab.get(PAD_TOKEN_ID) == PAD_TOKEN
and vocab.get(BOS_TOKEN_ID) == BOS_TOKEN
and vocab.get(EOS_TOKEN_ID) == EOS_TOKEN
)
vocab = {
i: s
for i, s in vocab.items()
if s not in {UNK_TOKEN, BOS_TOKEN, EOS_TOKEN, PAD_TOKEN}
}
with open(output_path_prefix.as_posix() + ".txt", "w") as f_out:
for _, s in sorted(vocab.items(), key=lambda x: x[0]):
f_out.write(f"{s} 1\n")
return
| av_hubert-main | avhubert/preparation/gen_subword.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
## Based on: https://github.com/mpc001/Lipreading_using_Temporal_Convolutional_Networks/blob/master/preprocessing/crop_mouth_from_video.py
""" Crop Mouth ROIs from videos for lipreading"""
import os,pickle,shutil,tempfile
import math
import cv2
import glob
import subprocess
import argparse
import numpy as np
from collections import deque
import cv2
from skimage import transform as tf
from tqdm import tqdm
# -- Landmark interpolation:
def linear_interpolate(landmarks, start_idx, stop_idx):
start_landmarks = landmarks[start_idx]
stop_landmarks = landmarks[stop_idx]
delta = stop_landmarks - start_landmarks
for idx in range(1, stop_idx-start_idx):
landmarks[start_idx+idx] = start_landmarks + idx/float(stop_idx-start_idx) * delta
return landmarks
# -- Face Transformation
def warp_img(src, dst, img, std_size):
tform = tf.estimate_transform('similarity', src, dst) # find the transformation matrix
warped = tf.warp(img, inverse_map=tform.inverse, output_shape=std_size) # warp
warped = warped * 255 # note output from wrap is double image (value range [0,1])
warped = warped.astype('uint8')
return warped, tform
def apply_transform(transform, img, std_size):
warped = tf.warp(img, inverse_map=transform.inverse, output_shape=std_size)
warped = warped * 255 # note output from warp is double image (value range [0,1])
warped = warped.astype('uint8')
return warped
def get_frame_count(filename):
cap = cv2.VideoCapture(filename)
total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
cap.release()
return total
def read_video(filename):
cap = cv2.VideoCapture(filename)
while(cap.isOpened()):
ret, frame = cap.read() # BGR
if ret:
yield frame
else:
break
cap.release()
# -- Crop
def cut_patch(img, landmarks, height, width, threshold=5):
center_x, center_y = np.mean(landmarks, axis=0)
if center_y - height < 0:
center_y = height
if center_y - height < 0 - threshold:
raise Exception('too much bias in height')
if center_x - width < 0:
center_x = width
if center_x - width < 0 - threshold:
raise Exception('too much bias in width')
if center_y + height > img.shape[0]:
center_y = img.shape[0] - height
if center_y + height > img.shape[0] + threshold:
raise Exception('too much bias in height')
if center_x + width > img.shape[1]:
center_x = img.shape[1] - width
if center_x + width > img.shape[1] + threshold:
raise Exception('too much bias in width')
cutted_img = np.copy(img[ int(round(center_y) - round(height)): int(round(center_y) + round(height)),
int(round(center_x) - round(width)): int(round(center_x) + round(width))])
return cutted_img
def write_video_ffmpeg(rois, target_path, ffmpeg):
os.makedirs(os.path.dirname(target_path), exist_ok=True)
decimals = 10
fps = 25
tmp_dir = tempfile.mkdtemp()
for i_roi, roi in enumerate(rois):
cv2.imwrite(os.path.join(tmp_dir, str(i_roi).zfill(decimals)+'.png'), roi)
list_fn = os.path.join(tmp_dir, "list")
with open(list_fn, 'w') as fo:
fo.write("file " + "'" + tmp_dir+'/%0'+str(decimals)+'d.png' + "'\n")
## ffmpeg
if os.path.isfile(target_path):
os.remove(target_path)
cmd = [ffmpeg, "-f", "concat", "-safe", "0", "-i", list_fn, "-q:v", "1", "-r", str(fps), '-y', '-crf', '20', target_path]
pipe = subprocess.run(cmd, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
# rm tmp dir
shutil.rmtree(tmp_dir)
return
def load_args(default_config=None):
parser = argparse.ArgumentParser(description='Lipreading Pre-processing', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--video-direc', default=None, help='raw video directory')
parser.add_argument('--landmark-direc', default=None, help='landmark directory')
parser.add_argument('--filename-path', help='list of detected video and its subject ID')
parser.add_argument('--save-direc', default=None, help='the directory of saving mouth ROIs')
# -- mean face utils
parser.add_argument('--mean-face', type=str, help='reference mean face (download from: https://github.com/mpc001/Lipreading_using_Temporal_Convolutional_Networks/blob/master/preprocessing/20words_mean_face.npy)')
# -- mouthROIs utils
parser.add_argument('--crop-width', default=96, type=int, help='the width of mouth ROIs')
parser.add_argument('--crop-height', default=96, type=int, help='the height of mouth ROIs')
parser.add_argument('--start-idx', default=48, type=int, help='the start of landmark index')
parser.add_argument('--stop-idx', default=68, type=int, help='the end of landmark index')
parser.add_argument('--window-margin', default=12, type=int, help='window margin for smoothed_landmarks')
parser.add_argument('--ffmpeg', type=str, help='ffmpeg path')
parser.add_argument('--rank', type=int, help='rank id')
parser.add_argument('--nshard', type=int, help='number of shards')
args = parser.parse_args()
return args
def crop_patch(video_pathname, landmarks, mean_face_landmarks, stablePntsIDs, STD_SIZE, window_margin, start_idx, stop_idx, crop_height, crop_width):
"""Crop mouth patch
:param str video_pathname: pathname for the video_dieo
:param list landmarks: interpolated landmarks
"""
frame_idx = 0
num_frames = get_frame_count(video_pathname)
frame_gen = read_video(video_pathname)
margin = min(num_frames, window_margin)
while True:
try:
frame = frame_gen.__next__() ## -- BGR
except StopIteration:
break
if frame_idx == 0:
q_frame, q_landmarks = deque(), deque()
sequence = []
q_landmarks.append(landmarks[frame_idx])
q_frame.append(frame)
if len(q_frame) == margin:
smoothed_landmarks = np.mean(q_landmarks, axis=0)
cur_landmarks = q_landmarks.popleft()
cur_frame = q_frame.popleft()
# -- affine transformation
trans_frame, trans = warp_img( smoothed_landmarks[stablePntsIDs, :],
mean_face_landmarks[stablePntsIDs, :],
cur_frame,
STD_SIZE)
trans_landmarks = trans(cur_landmarks)
# -- crop mouth patch
sequence.append( cut_patch( trans_frame,
trans_landmarks[start_idx:stop_idx],
crop_height//2,
crop_width//2,))
if frame_idx == len(landmarks)-1:
while q_frame:
cur_frame = q_frame.popleft()
# -- transform frame
trans_frame = apply_transform( trans, cur_frame, STD_SIZE)
# -- transform landmarks
trans_landmarks = trans(q_landmarks.popleft())
# -- crop mouth patch
sequence.append( cut_patch( trans_frame,
trans_landmarks[start_idx:stop_idx],
crop_height//2,
crop_width//2,))
return np.array(sequence)
frame_idx += 1
return None
def landmarks_interpolate(landmarks):
"""Interpolate landmarks
param list landmarks: landmarks detected in raw videos
"""
valid_frames_idx = [idx for idx, _ in enumerate(landmarks) if _ is not None]
if not valid_frames_idx:
return None
for idx in range(1, len(valid_frames_idx)):
if valid_frames_idx[idx] - valid_frames_idx[idx-1] == 1:
continue
else:
landmarks = linear_interpolate(landmarks, valid_frames_idx[idx-1], valid_frames_idx[idx])
valid_frames_idx = [idx for idx, _ in enumerate(landmarks) if _ is not None]
# -- Corner case: keep frames at the beginning or at the end failed to be detected.
if valid_frames_idx:
landmarks[:valid_frames_idx[0]] = [landmarks[valid_frames_idx[0]]] * valid_frames_idx[0]
landmarks[valid_frames_idx[-1]:] = [landmarks[valid_frames_idx[-1]]] * (len(landmarks) - valid_frames_idx[-1])
valid_frames_idx = [idx for idx, _ in enumerate(landmarks) if _ is not None]
assert len(valid_frames_idx) == len(landmarks), "not every frame has landmark"
return landmarks
if __name__ == '__main__':
args = load_args()
# -- mean face utils
STD_SIZE = (256, 256)
mean_face_landmarks = np.load(args.mean_face)
stablePntsIDs = [33, 36, 39, 42, 45]
lines = open(args.filename_path).readlines()
fids = [ln.strip() for ln in lines]
num_per_shard = math.ceil(len(fids)/args.nshard)
start_id, end_id = num_per_shard*args.rank, num_per_shard*(args.rank+1)
fids = fids[start_id: end_id]
for filename_idx, filename in enumerate(tqdm(fids)):
video_pathname = os.path.join(args.video_direc, filename+'.mp4')
landmarks_pathname = os.path.join(args.landmark_direc, filename+'.pkl')
dst_pathname = os.path.join(args.save_direc, filename+'.mp4')
assert os.path.isfile(video_pathname), "File does not exist. Path input: {}".format(video_pathname)
assert os.path.isfile(landmarks_pathname), "File does not exist. Path input: {}".format(landmarks_pathname)
if os.path.exists(dst_pathname):
continue
landmarks = pickle.load(open(landmarks_pathname, 'rb'))
# -- pre-process landmarks: interpolate frames not being detected.
preprocessed_landmarks = landmarks_interpolate(landmarks)
if not preprocessed_landmarks:
print(f"resizing {filename}")
frame_gen = read_video(video_pathname)
frames = [cv2.resize(x, (args.crop_width, args.crop_height)) for x in frame_gen]
write_video_ffmpeg(frames, dst_pathname, args.ffmpeg)
continue
# -- crop
sequence = crop_patch(video_pathname, preprocessed_landmarks, mean_face_landmarks, stablePntsIDs, STD_SIZE, window_margin=args.window_margin, start_idx=args.start_idx, stop_idx=args.stop_idx, crop_height=args.crop_height, crop_width=args.crop_width)
assert sequence is not None, "cannot crop from {}.".format(filename)
# -- save
os.makedirs(os.path.dirname(dst_pathname), exist_ok=True)
write_video_ffmpeg(sequence, dst_pathname, args.ffmpeg)
print('Done.')
| av_hubert-main | avhubert/preparation/align_mouth.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import tempfile
import shutil
import submitit
import os, sys, subprocess, glob, re
import numpy as np
from collections import defaultdict
from scipy.io import wavfile
from tqdm import tqdm
def split_musan(musan_root, rank, nshard):
wav_fns = glob.glob(f"{musan_root}/speech/*/*wav") + glob.glob(f"{musan_root}/music/*/*wav") + glob.glob(f"{musan_root}/noise/*/*wav")
num_per_shard = math.ceil(len(wav_fns)/nshard)
start_id, end_id = num_per_shard*rank, num_per_shard*(rank+1)
wav_fns = wav_fns[start_id: end_id]
print(f"{len(wav_fns)} raw audios")
output_dir = f"{musan_root}/short-musan"
dur = 10
for wav_fn in tqdm(wav_fns):
sample_rate, wav_data = wavfile.read(wav_fn)
assert sample_rate == 16_000 and len(wav_data.shape) == 1
if len(wav_data) > dur * sample_rate:
num_split = int(np.ceil(len(wav_data) / (dur*sample_rate)))
for i in range(num_split):
filename = '/'.join(wav_fn.split('/')[-3:])[:-4]
output_wav_fn = os.path.join(output_dir, filename + f'-{i}.wav')
sub_data = wav_data[i*dur*sample_rate: (i+1)*dur*sample_rate]
os.makedirs(os.path.dirname(output_wav_fn), exist_ok=True)
wavfile.write(output_wav_fn, sample_rate, sub_data.astype(np.int16))
return
def mix_audio(wav_fns):
wav_data = [wavfile.read(wav_fn)[1] for wav_fn in wav_fns]
wav_data_ = []
min_len = min([len(x) for x in wav_data])
for item in wav_data:
wav_data_.append(item[:min_len])
wav_data = np.stack(wav_data_).mean(axis=0).astype(np.int16)
return wav_data
def get_speaker_info(musan_root):
wav_fns = glob.glob(f"{musan_root}/speech/*/*wav")
spk2wav = {}
for wav_fn in tqdm(wav_fns):
speaker = '-'.join(os.path.basename(wav_fn).split('-')[:-1])
if speaker not in spk2wav:
spk2wav[speaker] = []
spk2wav[speaker].append(wav_fn)
speakers = sorted(list(spk2wav.keys()))
print(f"{len(speakers)} speakers")
np.random.shuffle(speakers)
output_dir = f"{musan_root}/speech/"
num_train, num_valid = int(len(speakers)*0.8), int(len(speakers)*0.1)
train_speakers, valid_speakers, test_speakers = speakers[:num_train], speakers[num_train: num_train+num_valid], speakers[num_train+num_valid:]
for split in ['train', 'valid', 'test']:
speakers = eval(f"{split}_speakers")
with open(f"{output_dir}/spk.{split}", 'w') as fo:
fo.write('\n'.join(speakers)+'\n')
return
def make_musan_babble(musan_root, rank, nshard):
babble_dir = f"{musan_root}/babble/wav/"
num_per_mixture = 30
sample_rate = 16_000
num_train, num_valid, num_test = 8000, 1000, 1000
os.makedirs(babble_dir, exist_ok=True)
wav_fns = glob.glob(f"{musan_root}/speech/*/*wav")
spk2wav = {}
for wav_fn in tqdm(wav_fns):
speaker = '-'.join(os.path.basename(wav_fn).split('-')[:-1])
if speaker not in spk2wav:
spk2wav[speaker] = []
spk2wav[speaker].append(wav_fn)
for split in ['train', 'valid', 'test']:
speakers = [ln.strip() for ln in open(f"{musan_root}/speech/spk.{split}").readlines()]
num_split = eval(f"num_{split}")
wav_fns = []
for x in speakers:
wav_fns.extend(spk2wav[x])
print(f"{split} -> # speaker {len(speakers)}, # wav {len(wav_fns)}")
num_per_shard = math.ceil(num_split/nshard)
start_id, end_id = num_per_shard*rank, num_per_shard*(rank+1)
for i in tqdm(range(num_split)):
if not (i >= start_id and i < end_id):
continue
np.random.seed(i)
perm = np.random.permutation(len(wav_fns))[:num_per_mixture]
output_fn = f"{babble_dir}/{split}-{str(i+1).zfill(5)}.wav"
wav_data = mix_audio([wav_fns[x] for x in perm])
wavfile.write(output_fn, sample_rate, wav_data)
return
def count_frames(wav_fns, rank, nshard):
num_per_shard = math.ceil(len(wav_fns)/nshard)
start_id, end_id = num_per_shard*rank, num_per_shard*(rank+1)
wav_fns = wav_fns[start_id: end_id]
nfs = []
for wav_fn in tqdm(wav_fns):
sample_rate, wav_data = wavfile.read(wav_fn)
nfs.append(len(wav_data))
return nfs
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='MUSAN audio preparation', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--musan', type=str, help='MUSAN root')
parser.add_argument('--nshard', type=int, default=1, help='number of shards')
parser.add_argument('--slurm_partition', type=str, default='cpu', help='slurm partition')
args = parser.parse_args()
tmp_dir = tempfile.mkdtemp(dir='./')
executor = submitit.AutoExecutor(folder=tmp_dir)
executor.update_parameters(slurm_array_parallelism=100, slurm_partition=args.slurm_partition, timeout_min=240)
ranks = list(range(0, args.nshard))
print(f"Split raw audio")
jobs = executor.map_array(split_musan, [args.musan for _ in ranks], ranks, [args.nshard for _ in ranks])
[job.result() for job in jobs]
short_musan = os.path.join(args.musan, 'short-musan')
print(f"Get speaker info")
get_speaker_info(short_musan)
print(f"Mix audio")
jobs = executor.map_array(make_musan_babble, [short_musan for _ in ranks], ranks, [args.nshard for _ in ranks])
[job.result() for job in jobs]
print(f"Count number of frames")
wav_fns = glob.glob(f"{short_musan}/babble/*/*wav") + glob.glob(f"{short_musan}/music/*/*wav") + glob.glob(f"{short_musan}/noise/*/*wav")
jobs = executor.map_array(count_frames, [wav_fns for _ in ranks], ranks, [args.nshard for _ in ranks])
nfs = [job.result() for job in jobs]
nfs_ = []
for nf in nfs:
nfs_.extend(nf)
nfs = nfs_
num_frames_fn = f"{short_musan}/nframes.audio"
with open(num_frames_fn, 'w') as fo:
for wav_fn, nf in zip(wav_fns, nfs):
fo.write(os.path.abspath(wav_fn)+'\t'+str(nf)+'\n')
shutil.rmtree(tmp_dir)
| av_hubert-main | avhubert/preparation/musan_prepare.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math, time
import os, sys, subprocess, glob, re
import numpy as np
from collections import defaultdict
from scipy.io import wavfile
from tqdm import tqdm
def make_musan_tsv(musan_root, output_dir):
os.makedirs(output_dir, exist_ok=True)
sample_rate = 16_000
min_dur, max_dur = 3*sample_rate, 11*sample_rate
part_ratios = zip(['train', 'valid', 'test'], [0.8, 0.1, 0.1])
all_fns = {}
nfs = f"{musan_root}/nframes.audio"
nfs = dict([x.strip().split('\t') for x in open(nfs).readlines()])
for category in ['babble', 'music', 'noise']:
wav_fns = glob.glob(f"{musan_root}/{category}/*/*wav")
target_fns = []
for wav_fn in tqdm(wav_fns):
dur = int(nfs[os.path.abspath(wav_fn)])
if dur >= min_dur and dur < max_dur:
target_fns.append(wav_fn)
print(f"{category}: {len(target_fns)}/{len(wav_fns)}")
all_fns[category] = target_fns
output_subdir = f"{output_dir}/{category}"
os.makedirs(output_subdir, exist_ok=True)
num_train, num_valid, num_test = int(0.8*len(target_fns)), int(0.1*len(target_fns)), int(0.1*len(target_fns))
if category in {'music', 'noise'}:
np.random.shuffle(target_fns)
train_fns, valid_fns, test_fns = target_fns[:num_train], target_fns[num_train:num_train+num_valid], target_fns[num_train+num_valid:]
elif category == 'babble':
train_fns, valid_fns, test_fns = [], [], []
for wav_fn in target_fns:
split_id = os.path.basename(wav_fn)[:-4].split('-')[0]
if split_id == 'train':
train_fns.append(wav_fn)
elif split_id == 'valid':
valid_fns.append(wav_fn)
elif split_id == 'test':
test_fns.append(wav_fn)
for x in ['train', 'valid', 'test']:
x_fns = eval(f"{x}_fns")
x_fns = [os.path.abspath(x_fn) for x_fn in x_fns]
print(os.path.abspath(output_subdir), x, len(x_fns))
with open(f"{output_subdir}/{x}.tsv", 'w') as fo:
fo.write('\n'.join(x_fns)+'\n')
return
def combine(input_tsv_dirs, output_dir):
output_subdir = f"{output_dir}/all"
os.makedirs(output_subdir, exist_ok=True)
num_train_per_cat = 20_000
train_fns, valid_fns, test_fns = [], [], []
for input_tsv_dir in input_tsv_dirs:
train_fn, valid_fn, test_fn = [ln.strip() for ln in open(f"{input_tsv_dir}/train.tsv").readlines()], [ln.strip() for ln in open(f"{input_tsv_dir}/valid.tsv").readlines()], [ln.strip() for ln in open(f"{input_tsv_dir}/test.tsv").readlines()]
num_repeats = int(np.ceil(num_train_per_cat/len(train_fn)))
train_fn_ = []
for i in range(num_repeats):
train_fn_.extend(train_fn)
train_fn = train_fn_[:num_train_per_cat]
train_fns.extend(train_fn)
valid_fns.extend(valid_fn)
test_fns.extend(test_fn)
for x in ['train', 'valid', 'test']:
x_fns = eval(f"{x}_fns")
print(os.path.abspath(output_subdir), x, len(x_fns))
with open(f"{output_subdir}/{x}.tsv", 'w') as fo:
fo.write('\n'.join(x_fns)+'\n')
return
def main():
import argparse
parser = argparse.ArgumentParser(description='Set up noise manifest', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--musan', type=str, help='MUSAN root')
parser.add_argument('--lrs3', type=str, help='LRS3 root')
args = parser.parse_args()
short_musan, output_tsv_dir = f"{args.musan}/short-musan", f"{args.musan}/tsv"
print(f"Make tsv for babble, music, noise")
make_musan_tsv(short_musan, output_tsv_dir)
print(f"Combine tsv")
input_tsv_dirs = [f"{output_tsv_dir}/{x}" for x in ['noise', 'music', 'babble']] + [f"{args.lrs3}/noise/speech"]
combine(input_tsv_dirs, output_tsv_dir)
return
if __name__ == '__main__':
main()
| av_hubert-main | avhubert/preparation/noise_manifest.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import cv2, math, os
import tempfile
import shutil
from tqdm import tqdm
from scipy.io import wavfile
def count_frames(fids, audio_dir, video_dir):
total_num_frames = []
for fid in tqdm(fids):
wav_fn = f"{audio_dir}/{fid}.wav"
video_fn = f"{video_dir}/{fid}.mp4"
num_frames_audio = len(wavfile.read(wav_fn)[1])
cap = cv2.VideoCapture(video_fn)
num_frames_video = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
total_num_frames.append([num_frames_audio, num_frames_video])
return total_num_frames
def check(fids, audio_dir, video_dir):
missing = []
for fid in tqdm(fids):
wav_fn = f"{audio_dir}/{fid}.wav"
video_fn = f"{video_dir}/{fid}.mp4"
is_file = os.path.isfile(wav_fn) and os.path.isfile(video_fn)
if not is_file:
missing.append(fid)
return missing
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='count number of frames', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--root', type=str, help='root dir')
parser.add_argument('--manifest', type=str, help='a list of filenames')
parser.add_argument('--nshard', type=int, default=1, help='number of shards')
parser.add_argument('--rank', type=int, default=0, help='rank id')
args = parser.parse_args()
fids = [ln.strip() for ln in open(args.manifest).readlines()]
print(f"{len(fids)} files")
audio_dir, video_dir = f"{args.root}/audio", f"{args.root}/video"
ranks = list(range(0, args.nshard))
fids_arr = []
num_per_shard = math.ceil(len(fids)/args.nshard)
for rank in ranks:
sub_fids = fids[rank*num_per_shard: (rank+1)*num_per_shard]
if len(sub_fids) > 0:
fids_arr.append(sub_fids)
if args.rank >= len(fids_arr):
open(f"{args.root}/nframes.audio.{args.rank}", 'w').write('')
open(f"{args.root}/nframes.video.{args.rank}", 'w').write('')
else:
fids = fids_arr[args.rank]
missing_fids = check(fids, audio_dir, video_dir)
if len(missing_fids) > 0:
print(f"Some audio/video files not exist, see {args.root}/missing.list.{args.rank}")
with open(f"{args.root}/missing.list.{args.rank}", 'w') as fo:
fo.write('\n'.join(missing_fids)+'\n')
else:
num_frames = count_frames(fids, audio_dir, video_dir)
audio_num_frames = [x[0] for x in num_frames]
video_num_frames = [x[1] for x in num_frames]
with open(f"{args.root}/nframes.audio.{args.rank}", 'w') as fo:
fo.write(''.join([f"{x}\n" for x in audio_num_frames]))
with open(f"{args.root}/nframes.video.{args.rank}", 'w') as fo:
fo.write(''.join([f"{x}\n" for x in video_num_frames]))
| av_hubert-main | avhubert/preparation/count_frames.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.