python_code
stringlengths 0
91.3k
|
---|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/henv python3
# -*- coding: utf-8 -*-
"""
This script trains a model that uses ECOC coding. It defines many types of models (baseline and ensemble).
Uncomment the final two lines corresponding to the model of interest from one of the below model definition "code blocks" to train that model.
Next run "AttackModel" to then attack this model.
"""
# IMPORTS
import tensorflow as tf;
import numpy as np
from tensorflow.keras.datasets import mnist, cifar10
from Model_Implementations import Model_Softmax_Baseline, \
Model_Logistic_Baseline, Model_Logistic_Ensemble, Model_Tanh_Ensemble, \
Model_Tanh_Baseline
import scipy.linalg
# GENERAL PARAMETERS - SET THESE APPROPRIATELY
model_path = 'checkpoints' # path to save model weights to
weight_save_freq = 10 # how frequently (in epochs, e.g. every 10 epochs) to save weights to disk
tf.set_random_seed(1)
########DATASET-SPECIFIC PARAMETERS: CHOOSE THIS BLOCK FOR MNIST
# DATA_DESC = 'MNIST'; (X_train, Y_train), (X_test, Y_test) = mnist.load_data()
# Y_train = np.squeeze(Y_train); Y_test = np.squeeze(Y_test)
# num_channels = 1; inp_shape = (28,28,1); num_classes=10
##MODEL-SPECIFIC PARAMETERS: MNIST
##PARAMETERS RELATED TO SGD OPTIMIZATION
# epochs=150; batch_size=200; lr=3e-4;
##MODEL DEFINTION PARAMETERS
# num_filters_std = [64, 64, 64]; num_filters_ens=[32, 32, 32]; num_filters_ens_2=4;
# dropout_rate_std=0.0; dropout_rate_ens=0.0; weight_decay = 0
# noise_stddev = 0.3; blend_factor=0.3;
# model_rep_baseline=1; model_rep_ens=2;
# DATA_AUGMENTATION_FLAG=0; BATCH_NORMALIZATION_FLAG=0
########END: DATASET-SPECIFIC PARAMETERS: MNIST
##########DATASET-SPECIFIC PARAMETERS: CHOOSE THIS BLOCK FOR CIFAR10
DATA_DESC = 'CIFAR10';
(X_train, Y_train), (X_test, Y_test) = cifar10.load_data()
Y_train = np.squeeze(Y_train);
Y_test = np.squeeze(Y_test)
num_channels = 3;
inp_shape = (32, 32, 3);
num_classes = 10
# MODEL-SPECIFIC PARAMETERS: CIFAR10
# PARAMETERS RELATED TO SGD OPTIMIZATION
epochs = 400;
batch_size = 200;
lr = 2e-4;
# MODEL DEFINTION PARAMETERS
num_filters_std = [32, 64, 128];
num_filters_ens = [32, 64, 128];
num_filters_ens_2 = 16;
dropout_rate_std = 0.0;
dropout_rate_ens = 0.0;
weight_decay = 0
noise_stddev = 0.032;
blend_factor = 0.032;
model_rep_baseline = 2;
model_rep_ens = 2;
DATA_AUGMENTATION_FLAG = 1;
BATCH_NORMALIZATION_FLAG = 1
##########END: DATASET-SPECIFIC PARAMETERS: CIFAR10
# DATA PRE-PROCESSING
X_train = (X_train / 255).astype(np.float32);
X_test = (X_test / 255).astype(np.float32); # scale data to (0,1)
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], X_train.shape[2],
num_channels);
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2],
num_channels)
X_valid = X_test[0:1000];
Y_valid = Y_test[
0:1000]; # validation data (to monitor accuracy during training)
X_train = X_train - 0.5;
X_test = X_test - 0.5;
X_valid = X_valid - 0.5; # map to range (-0.5,0.5)
data_dict = {'X_train': X_train, 'Y_train_cat': Y_train, 'X_test': X_test,
'Y_test_cat': Y_test}
### TRAIN MODEL. each block below corresponds to one of the models in Table 1 of the paper. In order to train,
# uncomment the final two lines of the block of interest and then run this script
"""
### BASELINE SOFTMAX MODEL DEFINITION
name = 'softmax_baseline'+'_'+DATA_DESC; num_chunks=1
M = np.eye(num_classes).astype(np.float32)
output_activation = 'softmax'; base_model=None
params_dict = {'weight_decay':weight_decay, 'num_filters_std':num_filters_std, 'BATCH_NORMALIZATION_FLAG':BATCH_NORMALIZATION_FLAG, 'DATA_AUGMENTATION_FLAG':DATA_AUGMENTATION_FLAG, 'M':M, 'model_rep':model_rep_baseline, 'base_model':base_model, 'num_chunks':num_chunks, 'output_activation':output_activation, 'batch_size':batch_size, 'epochs':epochs, 'lr':lr, 'dropout_rate':dropout_rate_std, 'blend_factor':blend_factor, 'inp_shape':inp_shape, 'noise_stddev':noise_stddev, 'weight_save_freq':weight_save_freq, 'name':name, 'model_path':model_path}
#m0 = Model_Softmax_Baseline(data_dict, params_dict)
#m0.defineModel(); m0.trainModel()
## BASELINE LOGISTIC MODEL DEFINITION
name = 'logistic_baseline'+'_'+DATA_DESC; num_chunks=1
M = np.eye(num_classes).astype(np.float32)
output_activation = 'sigmoid'; base_model=None
params_dict = {'weight_decay':weight_decay, 'num_filters_std':num_filters_std, 'BATCH_NORMALIZATION_FLAG':BATCH_NORMALIZATION_FLAG, 'DATA_AUGMENTATION_FLAG':DATA_AUGMENTATION_FLAG, 'M':M, 'model_rep':model_rep_baseline, 'base_model':base_model, 'num_chunks':num_chunks, 'output_activation':output_activation, 'batch_size':batch_size, 'epochs':epochs, 'lr':lr, 'dropout_rate':dropout_rate_std, 'blend_factor':blend_factor, 'inp_shape':inp_shape, 'noise_stddev':noise_stddev, 'weight_save_freq':weight_save_freq, 'name':name, 'model_path':model_path}
#m1 = Model_Logistic_Baseline(data_dict, params_dict)
#m1.defineModel(); m1.trainModel()
## BASELINE TANH MODEL DEFINITION
name = 'Tanh_baseline_16'+'_'+DATA_DESC; seed = 59; num_chunks=1; code_length=16; num_codes=num_classes; code_length_true=code_length
M = scipy.linalg.hadamard(code_length).astype(np.float32)
M[np.arange(0, num_codes,2), 0]= -1#replace first col, which for this Hadamard construction is always 1, hence not a useful bit
np.random.seed(seed); np.random.shuffle(M)
idx=np.random.permutation(code_length)
M = M[0:num_codes, idx[0:code_length_true]]
base_model=None
def output_activation(x):
return tf.nn.tanh(x)
params_dict = {'weight_decay':weight_decay, 'num_filters_std':num_filters_std, 'BATCH_NORMALIZATION_FLAG':BATCH_NORMALIZATION_FLAG, 'DATA_AUGMENTATION_FLAG':DATA_AUGMENTATION_FLAG, 'M':M, 'model_rep':model_rep_baseline, 'base_model':base_model, 'num_chunks':num_chunks, 'output_activation':output_activation, 'batch_size':batch_size, 'epochs':epochs, 'dropout_rate':dropout_rate_std, 'lr':lr, 'blend_factor':blend_factor, 'inp_shape':inp_shape, 'noise_stddev':noise_stddev, 'weight_save_freq':weight_save_freq, 'name':name, 'model_path':model_path}
#m2 = Model_Tanh_Baseline(data_dict, params_dict)
#m2.defineModel(); m2.trainModel()
## ENSEMBLE LOGISTIC MODEL DEFINITION
name = 'logistic_diverse'+'_'+DATA_DESC; num_chunks=2
M = np.eye(num_classes).astype(np.float32)
base_model=None
def output_activation(x):
return tf.nn.sigmoid(x)
params_dict = {'BATCH_NORMALIZATION_FLAG':BATCH_NORMALIZATION_FLAG, 'DATA_AUGMENTATION_FLAG':DATA_AUGMENTATION_FLAG, 'M':M, 'base_model':base_model, 'num_chunks':num_chunks, 'model_rep': model_rep_ens, 'output_activation':output_activation, 'num_filters_ens':num_filters_ens, 'num_filters_ens_2':num_filters_ens_2,'batch_size':batch_size, 'epochs':epochs, 'dropout_rate':dropout_rate_ens, 'lr':lr, 'blend_factor':blend_factor, 'inp_shape':inp_shape, 'noise_stddev':noise_stddev, 'weight_save_freq':weight_save_freq, 'name':name, 'model_path':model_path}
#m3 = Model_Logistic_Ensemble(data_dict, params_dict)
#m3.defineModel(); m3.trainModel()
#COMMENTS FOR ALL TANH ENSEMBLE MODELS:
#1. num_chunks refers to how many models comprise the ensemble (4 is used in the paper); code_length/num_chunks shoould be an integer
#2. output_activation is the function to apply to the logits
# a. one can use anything which gives support to positive and negative values (since output code has +1/-1 elements); tanh or identity maps both work
# b. in order to alleviate potential concerns of gradient masking with tanh, one can use identity as well
#3. M is the actual coding matrix (referred to in the paper as H). Each row is a codeword
# note that any random shuffle of a Hadmard matrix's rows or columns is still orthogonal
#4. There is nothing particularly special about the seed (which effectively determines the coding matrix).
# We tried several seeds from 0-60 and found that all give comparable model performance (e.g. benign and adversarial accuracy).
## ENSEMBLE TANH 16 MODEL DEFINITION
name = 'tanh_16_diverse'+'_'+DATA_DESC; seed = 59; code_length=16; num_codes=code_length; num_chunks=4; base_model=None;
def output_activation(x):
return tf.nn.tanh(x)
M = scipy.linalg.hadamard(code_length).astype(np.float32)
M[np.arange(0, num_codes,2), 0]= -1#replace first col, which for scipy's Hadamard construction is always 1, hence not a useful classifier; this change still ensures all codewords have dot product <=0; since our decoder ignores negative correlations anyway, this has no net effect on probability estimation
np.random.seed(seed)
np.random.shuffle(M)
idx=np.random.permutation(code_length)
M = M[0:num_codes, idx[0:code_length]]
params_dict = {'BATCH_NORMALIZATION_FLAG':BATCH_NORMALIZATION_FLAG, 'DATA_AUGMENTATION_FLAG':DATA_AUGMENTATION_FLAG, 'M':M, 'base_model':base_model, 'num_chunks':num_chunks, 'model_rep': model_rep_ens, 'output_activation':output_activation, 'num_filters_ens':num_filters_ens, 'num_filters_ens_2':num_filters_ens_2,'batch_size':batch_size, 'epochs':epochs, 'dropout_rate':dropout_rate_ens, 'lr':lr, 'blend_factor':blend_factor, 'inp_shape':inp_shape, 'noise_stddev':noise_stddev, 'weight_save_freq':weight_save_freq, 'name':name, 'model_path':model_path}
#m4 = Model_Tanh_Ensemble(data_dict, params_dict)
#m4.defineModel(); m4.trainModel()
"""
## ENSEMBLE TANH 32 MODEL DEFINITION
name = 'tanh_32_diverse' + '_' + DATA_DESC;
seed = 59;
code_length = 32;
num_codes = code_length;
num_chunks = 4;
base_model = None;
def output_activation(x):
return tf.nn.tanh(x)
M = scipy.linalg.hadamard(code_length).astype(np.float32)
M[np.arange(0, num_codes,
2), 0] = -1 # replace first col, which for scipy's Hadamard construction is always 1, hence not a useful classifier; this change still ensures all codewords have dot product <=0; since our decoder ignores negative correlations anyway, this has no net effect on probability estimation
np.random.seed(seed)
np.random.shuffle(M)
idx = np.random.permutation(code_length)
M = M[0:num_codes, idx[0:code_length]]
params_dict = {'BATCH_NORMALIZATION_FLAG': BATCH_NORMALIZATION_FLAG,
'DATA_AUGMENTATION_FLAG': DATA_AUGMENTATION_FLAG, 'M': M,
'base_model': base_model, 'num_chunks': num_chunks,
'model_rep': model_rep_ens,
'output_activation': output_activation,
'num_filters_ens': num_filters_ens,
'num_filters_ens_2': num_filters_ens_2, 'batch_size': batch_size,
'epochs': epochs, 'dropout_rate': dropout_rate_ens, 'lr': lr,
'blend_factor': blend_factor, 'inp_shape': inp_shape,
'noise_stddev': noise_stddev,
'weight_save_freq': weight_save_freq, 'name': name,
'model_path': model_path}
m5 = Model_Tanh_Ensemble(data_dict, params_dict)
m5.defineModel();
m5.trainModel()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This defines a general "Model", i.e. architecture and decoding operations. It is an abstract base class for all models,
e.g. the baseline softmax model or the ensemble Tanh model
"""
import tensorflow as tf
from utils_keras import KerasModelWrapper as CleverHansKerasModelWrapper
from tensorflow.keras.layers import BatchNormalization, Dropout, Lambda, Input, Dense, Conv2D, Flatten, Activation, Concatenate, GaussianNoise
from tensorflow.keras.utils import plot_model
from tensorflow.keras import regularizers
from tensorflow.keras.models import load_model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.models import Model as KerasModel
import pickle
import numpy as np
from ClassBlender import ClassBlender
from DataAugmenter import DataAugmenter
from Clipper import Clipper
from Grayscaler import Grayscaler
class WeightsSaver(Callback):
def __init__(self, N):
self.N = N
self.epoch = 0
def specifyFilePath(self, path):
self.full_path = path #full path to file, including file name
def on_epoch_end(self, epoch, logs={}):
if self.epoch % self.N == 0:
print("SAVING WEIGHTS")
w= self.model.get_weights()
pklfile= self.full_path + '_' + str(self.epoch) + '.pkl'
fpkl= open(pklfile, 'wb')
pickle.dump(w, fpkl)
fpkl.close()
self.epoch += 1
#Abstract base class for all model classes
class Model(object):
def __init__(self, data_dict, params_dict):
self.data_dict = data_dict
self.params_dict = params_dict
self.input = Input(shape=self.params_dict['inp_shape'], name='input')
self.TRAIN_FLAG=1
if len(data_dict) > 0:
self.encodeData()
else:
import warnings
warnings.warn("no data passed; cannot encode it")
#map categorical class labels (numbers) to encoded (e.g., one hot or ECOC) vectors
def encodeData(self):
self.Y_train = np.zeros((self.data_dict['X_train'].shape[0], self.params_dict['M'].shape[1]))
self.Y_test = np.zeros((self.data_dict['X_test'].shape[0], self.params_dict['M'].shape[1]))
for k in np.arange(self.params_dict['M'].shape[1]):
self.Y_train[:,k] = self.params_dict['M'][self.data_dict['Y_train_cat'], k]
self.Y_test[:,k] = self.params_dict['M'][self.data_dict['Y_test_cat'], k]
#define the neural network
def defineModel(self):
outputs=[]
self.penultimate = []
self.penultimate2 = []
features = []
n = int(self.params_dict['M'].shape[1]/self.params_dict['num_chunks'])
for k in np.arange(0,self.params_dict['num_chunks']):
x = self.input
if self.params_dict.get('zero_one_input', False):
x = x - 0.5
if self.params_dict['inp_shape'][2]>1:
x_gs = Grayscaler()(x)
else:
x_gs = x
if (self.TRAIN_FLAG==1):
x = GaussianNoise(self.params_dict['noise_stddev'], input_shape=self.params_dict['inp_shape'])(x)
x_gs = GaussianNoise(self.params_dict['noise_stddev'], input_shape=self.params_dict['inp_shape'])(x_gs)
if self.params_dict['DATA_AUGMENTATION_FLAG']>0:
x = DataAugmenter(self.params_dict['batch_size'])(x)
x_gs = DataAugmenter(self.params_dict['batch_size'])(x_gs)
x = ClassBlender(self.params_dict['blend_factor'], self.params_dict['batch_size'])(x)
x_gs = ClassBlender(self.params_dict['blend_factor'], self.params_dict['batch_size'])(x_gs)
#x = Lambda(lambda x: x-0.5)(x)
x = Clipper()(x)
x_gs = Clipper()(x_gs)
# TODO: verify that this modifcation makes sense
# Added trainable=self.TRAIN_FLAG==1 for all batchnorm layers to make
# sure they stay fixed during eval (modification by AUTHOR)
for rep in np.arange(self.params_dict['model_rep']):
x = Conv2D(self.params_dict['num_filters_ens'][0], (5,5), activation='elu', padding='same')(x)
if self.params_dict['BATCH_NORMALIZATION_FLAG']>0:
x = BatchNormalization()(x)
x = Conv2D(self.params_dict['num_filters_ens'][0], (3,3), strides=(2,2), activation='elu', padding='same')(x)
if self.params_dict['BATCH_NORMALIZATION_FLAG']>0:
x = BatchNormalization()(x)
for rep in np.arange(self.params_dict['model_rep']):
x = Conv2D(self.params_dict['num_filters_ens'][1], (3, 3), activation='elu', padding='same')(x)
if self.params_dict['BATCH_NORMALIZATION_FLAG']>0:
x = BatchNormalization()(x)
x = Conv2D(self.params_dict['num_filters_ens'][1], (3,3), strides=(2,2), activation='elu', padding='same')(x)
if self.params_dict['BATCH_NORMALIZATION_FLAG']>0:
x = BatchNormalization()(x)
for rep in np.arange(self.params_dict['model_rep']):
x = Conv2D(self.params_dict['num_filters_ens'][2], (3, 3), activation='elu', padding='same')(x)
if self.params_dict['BATCH_NORMALIZATION_FLAG']>0:
x = BatchNormalization()(x)
x = Conv2D(self.params_dict['num_filters_ens'][2], (3,3), strides=(2,2), activation='elu', padding='same')(x)
#x = BatchNormalization()(x)
pens = []
out=[]
for k2 in np.arange(n):
x0 = Conv2D(self.params_dict['num_filters_ens_2'], (5, 5), strides=(2,2), activation='elu', padding='same')(x_gs)
x0 = Conv2D(self.params_dict['num_filters_ens_2'], (3, 3), strides=(2,2), activation='elu', padding='same')(x0)
x0 = Conv2D(self.params_dict['num_filters_ens_2'], (3, 3), strides=(2,2), activation='elu', padding='same')(x0)
x_= Concatenate()([x0, x])
x_ = Conv2D(self.params_dict['num_filters_ens_2'], (2, 2), activation='elu', padding='same')(x_)
x_ = Conv2D(self.params_dict['num_filters_ens_2'], (2, 2), activation='elu', padding='same')(x_)
x_ = Flatten()(x_)
features.append(x_)
x_ = Dense(16, activation='elu')(x_)
x_ = Dense(8, activation='elu')(x_)
x_ = Dense(4, activation='elu')(x_)
x0 = Dense(2, activation='linear')(x_)
pens += [x0]
x1 = Dense(1, activation='linear', name='w_'+str(k)+'_'+str(k2)+'_'+self.params_dict['name'], kernel_regularizer=regularizers.l2(0.0))(x0)
out += [x1]
self.penultimate += [pens]
if len(pens) > 1:
self.penultimate2 += [Concatenate()(pens)]
else:
self.penultimate2 += pens
if len(out)>1:
outputs += [Concatenate()(out)]
else:
outputs += out
self.features = features
self.model = KerasModel(inputs=self.input, outputs=outputs)
# print(self.model.summary())
#plot_model(self.model, to_file=self.params_dict['model_path'] + '/' + self.params_dict['name'] + '.png')
return outputs
def defineLoss(self):
error = "Sub-classes must implement defineLoss."
raise NotImplementedError(error)
def defineMetric(self):
error = "Sub-classes must implement defineMetric."
raise NotImplementedError(error)
def trainModel(self):
opt = Adam(lr=self.params_dict['lr'])
self.model.compile(optimizer=opt, loss=[self.defineLoss(k) for k in np.arange(self.params_dict['num_chunks'])], metrics=self.defineMetric())
WS = WeightsSaver(self.params_dict['weight_save_freq'])
WS.specifyFilePath(self.params_dict['model_path'] + self.params_dict['name'])
Y_train_list=[]
Y_test_list=[]
start = 0
for k in np.arange(self.params_dict['num_chunks']):
end = start + int(self.params_dict['M'].shape[1]/self.params_dict['num_chunks'])
Y_train_list += [self.Y_train[:,start:end]]
Y_test_list += [self.Y_test[:,start:end]]
start=end
self.model.fit(self.data_dict['X_train'], Y_train_list,
epochs=self.params_dict['epochs'],
batch_size=self.params_dict['batch_size'],
shuffle=True,
validation_data=[self.data_dict['X_test'], Y_test_list],
callbacks=[WS])
self.saveModel()
def resumeTrainModel(self):
Y_train_list=[]
Y_test_list=[]
start = 0
for k in np.arange(self.params_dict['num_chunks']):
end = start + int(self.params_dict['M'].shape[1]/self.params_dict['num_chunks'])
Y_train_list += [self.Y_train[:,start:end]]
Y_test_list += [self.Y_test[:,start:end]]
start=end
def hinge_loss(y_true, y_pred):
loss = tf.reduce_mean(tf.maximum(1.0-y_true*y_pred, 0))
return loss
def hinge_pred(y_true, y_pred):
corr = tf.to_float((y_pred*y_true)>0)
return tf.reduce_mean(corr)
self.model = load_model(self.params_dict['model_path'] + self.params_dict['name'] + '_final.h5', custom_objects={'DataAugmenter':DataAugmenter, 'ClassBlender':ClassBlender, 'Clipper':Clipper, 'Grayscaler':Grayscaler, 'hinge_loss':hinge_loss, 'hinge_pred':hinge_pred})
WS = WeightsSaver(self.params_dict['weight_save_freq'])
WS.specifyFilePath(self.params_dict['model_path'] + self.params_dict['name'])
self.model.fit(self.data_dict['X_train'], Y_train_list,
epochs=self.params_dict['epochs'],
batch_size=self.params_dict['batch_size'],
shuffle=True,
validation_data=[self.data_dict['X_test'], Y_test_list],
callbacks=[WS])
self.saveModel()
#this function takes the output of the NN and maps into logits (which will be passed into softmax to give a prob. dist.)
#It effectively does a Hamming decoding by taking the inner product of the output with each column of the coding matrix (M)
#obviously, the better the match, the larger the dot product is between the output and a given row
#it is simply a log ReLU on the output
def outputDecoder(self, x, M=None):
if M is None:
M = self.params_dict['M']
mat1 = tf.matmul(x, M, transpose_b=True)
if not self.params_dict['adaptive_attack']:
mat1 = tf.maximum(mat1, 0)
mat1 = tf.log(mat1+1e-6) #floor negative values
return mat1
def defineBinarizedModel(self):
assert hasattr(self, "penultimate2"), "model needs to be defined first"
readouts = []
individual_logits = []
for k in range(len(self.features)):
readout = Dense(1, activation='linear',
name='binarized_readout_' + str(k),
kernel_regularizer=regularizers.l2(0.0)
)
logit = readout(self.features[k])
logit = Lambda(self.params_dict['output_activation'])(logit)
readouts.append(readout)
individual_logits.append(logit)
if len(individual_logits)>1:
logits = Concatenate()(individual_logits)
else: #if only a single chunk
logits = individual_logits[0]
M = np.stack([np.ones(logits.shape[-1]), -np.ones(logits.shape[-1])], 0).astype(np.float32)
logits = Lambda(
lambda x: self.outputDecoder(
x,
M=M
))(logits)
probs = Activation('softmax')(logits) #return probs
self.binarized_logit = logits
self.binarized_probs = probs
self.binarized_readouts = readouts
self.model_binarized = KerasModel(inputs=self.input, outputs=self.binarized_probs)
def defineFullModel(self):
self.TRAIN_FLAG=0
outputs = self.defineModel()
if len(outputs)>1:
self.raw_output = Concatenate()(outputs)
else: #if only a single chunk
self.raw_output = outputs[0]
#pass output logits through activation
for idx,o in enumerate(outputs):
outputs[idx] = Lambda(self.params_dict['output_activation'])(o)
if len(outputs)>1:
x = Concatenate()(outputs)
else: #if only a single chunk
x = outputs[0]
x = Lambda(self.outputDecoder)(x) #logits
logits = x
x = Activation('softmax')(x) #return probs
self.logits = logits
self.probabilities = x
if self.params_dict['base_model'] == None:
self.model_full = KerasModel(inputs=self.input, outputs=x)
else:
self.model_full = KerasModel(inputs=self.params_dict['base_model'].input, outputs=x)
#CleverHans model wrapper; returns a model that CH can attack
def modelCH(self):
return CleverHansKerasModelWrapper(self.model_full)
def modelBinarizedCH(self):
return CleverHansKerasModelWrapper(self.model_binarized)
def saveModel(self):
w= self.model.get_weights()
pklfile= self.params_dict['model_path'] + self.params_dict['name'] + '_final.pkl'
fpkl= open(pklfile, 'wb')
pickle.dump(w, fpkl)
fpkl.close()
self.model.save(self.params_dict['model_path'] + self.params_dict['name'] + '_final.h5')
def loadModel(self):
pklfile= self.params_dict['model_path'] + self.params_dict['name'] + '_final.pkl'
f= open(pklfile, 'rb')
weigh= pickle.load(f);
f.close();
self.defineModel()
self.model.set_weights(weigh)
def loadFullModel(self):
pklfile= self.params_dict['model_path'] + self.params_dict['name'] + '_final.pkl'
f= open(pklfile, 'rb')
weigh= pickle.load(f);
f.close();
self.defineFullModel()
self.model_full.set_weights(weigh)
def predict(self, X):
return self.model_full(X)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Run this to attack a trained model via TrainModel.
Use the "loadFullModel" submethod to load in an already trained model (trained via TrainModel)
The main attack function is "runAttacks" which runs attacks on trained models
"""
import pdb
from cleverhans.attacks import Noise, CarliniWagnerL2, MaxConfidence, FastGradientMethod, BasicIterativeMethod, DeepFool, MomentumIterativeMethod, ProjectedGradientDescent
from Model_Implementations import Model_Softmax_Baseline, Model_Logistic_Baseline, Model_Logistic_Ensemble, Model_Tanh_Ensemble, Model_Tanh_Baseline
from tensorflow.keras.datasets import mnist, cifar10
from tensorflow.keras import backend
import tensorflow as tf; import numpy as np
import scipy.linalg
from scipy import stats
import matplotlib.pyplot as plt
model_path = 'checkpoints/ECOC/tanh32/checkpoints' #path with saved model parameters
sess = backend.get_session()
backend.set_learning_phase(0) #need to do this to get CleverHans to work with batchnorm
#Dataset-specific parameters - should be same as those used in TrainModel
DATA_DESC = 'CIFAR10'; (X_train, Y_train), (X_test, Y_test) = cifar10.load_data()
epochs=None; weight_save_freq=None
num_classes=10 #how many classes (categories) are in this dataset?
Y_train = np.squeeze(Y_train); Y_test = np.squeeze(Y_test)
num_filters_std = [32, 64, 128]; num_filters_ens=[32, 64, 128]; num_filters_ens_2=16; dropout_rate_std=0.0; dropout_rate_ens=0.0; weight_decay = 0
model_rep_baseline=2; model_rep_ens=2; DATA_AUGMENTATION_FLAG=1; BATCH_NORMALIZATION_FLAG=1
num_channels = 3; inp_shape = (32,32,3); lr=1e-4; batch_size=80;
noise_stddev = 0.032; blend_factor = .032
#Attack parameters
eps_val = 8/255.0; PGD_iters = 200; eps_iter=(2/3)*eps_val;
eps_range = np.linspace(0, 0.33, 10)
noise_eps=0.1
# DATA PRE-PROCESSING
X_train = (X_train/255).astype(np.float32); X_test = (X_test/255).astype(np.float32)
#reshape (add third (image) channel)
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], X_train.shape[2],num_channels); X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2],num_channels)
X_valid = X_test[1000:2000]; Y_valid = Y_test[1000:2000]; #validation data, used to attack model
#X_train = X_train-0.5; X_test = X_test-0.5; X_valid = X_valid-0.5; #map to range (-0.5,0.5)
data_dict = {'X_train':X_train, 'Y_train_cat':Y_train, 'X_test':X_test, 'Y_test_cat':Y_test}
X_random = np.random.rand(X_valid.shape[0],X_valid.shape[1],X_valid.shape[2],X_valid.shape[3])-0.5; X_random = X_random.astype(np.float32)
#Model definition of the model we want to attack; should be same as the definition used in TrainModel
## ENSEMBLE TANH 32 MODEL DEFINITION
name = 'tanh_32_diverse' + '_' + DATA_DESC;
seed = 59;
code_length = 32;
num_codes = code_length;
num_chunks = 4;
base_model = None;
def output_activation(x):
return tf.nn.tanh(x)
M = scipy.linalg.hadamard(code_length).astype(np.float32)
M[np.arange(0, num_codes,
2), 0] = -1 # replace first col, which for scipy's Hadamard construction is always 1, hence not a useful classifier; this change still ensures all codewords have dot product <=0; since our decoder ignores negative correlations anyway, this has no net effect on probability estimation
np.random.seed(seed)
np.random.shuffle(M)
idx = np.random.permutation(code_length)
M = M[0:num_codes, idx[0:code_length]]
params_dict = {'BATCH_NORMALIZATION_FLAG': BATCH_NORMALIZATION_FLAG,
'DATA_AUGMENTATION_FLAG': DATA_AUGMENTATION_FLAG, 'M': M,
'base_model': base_model, 'num_chunks': num_chunks,
'model_rep': model_rep_ens,
'output_activation': output_activation,
'num_filters_ens': num_filters_ens,
'num_filters_ens_2': num_filters_ens_2, 'batch_size': batch_size,
'epochs': epochs, 'dropout_rate': dropout_rate_ens, 'lr': lr,
'blend_factor': blend_factor, 'inp_shape': inp_shape,
'noise_stddev': noise_stddev,
'weight_save_freq': weight_save_freq, 'name': name,
'model_path': model_path,
'zero_one_input': True
}
m4 = Model_Tanh_Ensemble({}, params_dict)
m4.loadFullModel() # load in the saved model, which should have already been trained first via TrainModel
m4.legend = 'TEns32';
m4.X_valid = X_valid; m4.Y_valid = Y_valid;
m4.X_test = X_test; m4.Y_test = Y_test;
m4.X_random = X_random;
#m4.minval = -0.5; m4.maxval = 0.5
m4.minval = 0; m4.maxval = 1
def benignAccuracy(model, X, Y):
acc_vec=[]; probs_benign_list=[]
for rep in np.arange(0, X.shape[0], 1000):
x = X[rep:rep+1000]
probs_benign = sess.run(model.predict(tf.convert_to_tensor(x)))
print(probs_benign.shape)
acc= np.mean(np.argmax(probs_benign, 1)==Y[rep:rep+1000])
acc_vec += [acc]
probs_benign_list += list(np.max(probs_benign, 1))
acc = np.mean(acc_vec)
print("Accuracy for model " + model.params_dict['name'] + " : ", acc)
return probs_benign_list
def wbAttack(model, attack, att_params, X, Y):
sess = backend.get_session()
modelCH = model.modelCH()
adv_model = attack(modelCH, sess=sess)
acc_vec=[]; probs_adv_list=[]
inc=64
for rep in np.arange(0, X.shape[0], inc):
x = X[rep:rep+inc]
y = Y[rep:rep+inc]
X_adv = adv_model.generate(tf.convert_to_tensor(x), **att_params).eval(session=sess)
temp = sess.run(model.predict(tf.convert_to_tensor(X_adv)))
print(temp.shape)
preds = np.argmax(temp, 1)
acc = np.mean(np.equal(preds, y))
probs_adv = np.max(sess.run(model.predict(tf.convert_to_tensor(X_adv))), 1)
probs_adv = probs_adv[preds != y]
acc= np.mean(np.equal(preds, y))
acc_vec += [acc]
probs_adv_list += list(probs_adv)
acc = np.mean(acc_vec)
print("Adv accuracy for model " + model.params_dict['name'] + " : ", acc)
return probs_adv_list, acc, X_adv
def runAttacks(models_list):
#CW attack
for model in models_list:
print(""); print(""); print("");
print("Running tests on model: ", model.params_dict['name'])
print("Clean accuracy of model:")
probs_benign = benignAccuracy(model, model.X_test, model.Y_test)
print("")
print("Running PGD attack:")
att_params = {'clip_min': model.minval, 'clip_max':model.maxval, 'eps':eps_val, 'eps_iter':eps_iter, 'nb_iter':PGD_iters,'ord':np.inf}
probs_adv, junk, X_adv = wbAttack(model, ProjectedGradientDescent, att_params, model.X_valid, model.Y_valid)
print("")
# print("Running CW attack:")
# att_params = {'clip_min': model.minval, 'clip_max':model.maxval, 'binary_search_steps':10, 'learning_rate':1e-3}
# probs_adv, junk, X_adv = wbAttack(model, CarliniWagnerL2, att_params, model.X_valid[0:100], model.Y_valid[0:100])
# print("")
#
# print("Running Blind Spot attack, alpha=0.8:")
# att_params = {'clip_min': model.minval, 'clip_max':model.maxval, 'binary_search_steps':10, 'learning_rate':1e-3}
# probs_adv, junk, X_adv = wbAttack(model, CarliniWagnerL2, att_params, 0.8*model.X_valid[0:100], model.Y_valid[0:100])
# print("")
#Random ATTACK (0 SNR inputs)
print("Running random attack:")
probs_random = np.max(sess.run(model.predict(tf.convert_to_tensor(model.X_random))), 1)
print('Prob. that ', model.params_dict['name'], ' < 0.9 on random data: ', np.mean(probs_random<0.9))
#Noise ATTACK (low SNR inputs)
print("Running Noise attack:")
att_params = {'clip_min': model.minval, 'clip_max':model.maxval, 'eps':noise_eps}
probs_noise, junk, X_adv = wbAttack(model, Noise, att_params, model.X_valid, model.Y_valid)
print("")
return probs_benign, probs_adv, probs_noise
models_list = [m4]
probs_benign, probs_adv, probs_noise = runAttacks(models_list)
plt.figure(1)
kernel = stats.gaussian_kde(probs_benign, bw_method=0.5)
plt.plot(np.arange(0, 1, .01), kernel.pdf(np.arange(0, 1, .01)), linewidth=4)
plt.figure(2)
kernel = stats.gaussian_kde(probs_adv, bw_method=0.5)
plt.plot(np.arange(0, 1, .01), kernel.pdf(np.arange(0, 1, .01)), linewidth=4)
plt.figure(3)
kernel = stats.gaussian_kde(probs_noise, bw_method=0.5)
plt.plot(np.arange(0, 1, .01), kernel.pdf(np.arange(0, 1, .01)), linewidth=4)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Model construction utilities based on keras
"""
from distutils.version import LooseVersion
# modified to work with error-correcting codes
#import keras
#from keras.models import Sequential
#from keras.layers import Dense, Activation, Flatten
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Flatten
from cleverhans.model import Model, NoSuchLayerError
if LooseVersion(keras.__version__) >= LooseVersion('2.0.0'):
from keras.layers import Conv2D
else:
from keras.layers import Convolution2D
def conv_2d(filters, kernel_shape, strides, padding, input_shape=None):
"""
Defines the right convolutional layer according to the
version of Keras that is installed.
:param filters: (required integer) the dimensionality of the output
space (i.e. the number output of filters in the
convolution)
:param kernel_shape: (required tuple or list of 2 integers) specifies
the strides of the convolution along the width and
height.
:param padding: (required string) can be either 'valid' (no padding around
input or feature map) or 'same' (pad to ensure that the
output feature map size is identical to the layer input)
:param input_shape: (optional) give input shape if this is the first
layer of the model
:return: the Keras layer
"""
if LooseVersion(keras.__version__) >= LooseVersion('2.0.0'):
if input_shape is not None:
return Conv2D(filters=filters, kernel_size=kernel_shape,
strides=strides, padding=padding,
input_shape=input_shape)
else:
return Conv2D(filters=filters, kernel_size=kernel_shape,
strides=strides, padding=padding)
else:
if input_shape is not None:
return Convolution2D(filters, kernel_shape[0], kernel_shape[1],
subsample=strides, border_mode=padding,
input_shape=input_shape)
else:
return Convolution2D(filters, kernel_shape[0], kernel_shape[1],
subsample=strides, border_mode=padding)
def cnn_model(logits=False, input_ph=None, img_rows=28, img_cols=28,
channels=1, nb_filters=64, nb_classes=10):
"""
Defines a CNN model using Keras sequential model
:param logits: If set to False, returns a Keras model, otherwise will also
return logits tensor
:param input_ph: The TensorFlow tensor for the input
(needed if returning logits)
("ph" stands for placeholder but it need not actually be a
placeholder)
:param img_rows: number of row in the image
:param img_cols: number of columns in the image
:param channels: number of color channels (e.g., 1 for MNIST)
:param nb_filters: number of convolutional filters per layer
:param nb_classes: the number of output classes
:return:
"""
model = Sequential()
# Define the layers successively (convolution layers are version dependent)
if keras.backend.image_dim_ordering() == 'th':
input_shape = (channels, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, channels)
layers = [conv_2d(nb_filters, (8, 8), (2, 2), "same",
input_shape=input_shape),
Activation('relu'),
conv_2d((nb_filters * 2), (6, 6), (2, 2), "valid"),
Activation('relu'),
conv_2d((nb_filters * 2), (5, 5), (1, 1), "valid"),
Activation('relu'),
Flatten(),
Dense(nb_classes)]
for layer in layers:
model.add(layer)
if logits:
logits_tensor = model(input_ph)
model.add(Activation('softmax'))
if logits:
return model, logits_tensor
else:
return model
class KerasModelWrapper(Model):
"""
An implementation of `Model` that wraps a Keras model. It
specifically exposes the hidden features of a model by creating new models.
The symbolic graph is reused and so there is little overhead. Splitting
in-place operations can incur an overhead.
"""
def __init__(self, model):
"""
Create a wrapper for a Keras model
:param model: A Keras model
"""
super(KerasModelWrapper, self).__init__(None, None, {})
if model is None:
raise ValueError('model argument must be supplied.')
self.model = model
self.keras_model = None
def _get_softmax_name(self):
"""
Looks for the name of the softmax layer.
:return: Softmax layer name
"""
for layer in self.model.layers:
cfg = layer.get_config()
if 'activation' in cfg and cfg['activation'] == 'softmax':
return layer.name
raise Exception("No softmax layers found")
def _get_logits_name(self):
"""
Looks for the name of the layer producing the logits.
:return: name of layer producing the logits
"""
softmax_name = self._get_softmax_name()
softmax_layer = self.model.get_layer(softmax_name)
if not isinstance(softmax_layer, Activation):
# In this case, the activation is part of another layer
return softmax_name
if not hasattr(softmax_layer, '_inbound_nodes'):
raise RuntimeError("Please update keras to version >= 2.1.3")
node = softmax_layer._inbound_nodes[0]
# modified for error-correcting codes, first line was original
if isinstance(node.inbound_layers, (list, tuple)):
logits_name = node.inbound_layers[0].name
else:
logits_name = node.inbound_layers.name
return logits_name
def get_logits(self, x):
"""
:param x: A symbolic representation of the network input.
:return: A symbolic representation of the logits
"""
logits_name = self._get_logits_name()
logits_layer = self.get_layer(x, logits_name)
# Need to deal with the case where softmax is part of the
# logits layer
if logits_name == self._get_softmax_name():
softmax_logit_layer = self.get_layer(x, logits_name)
# The final op is the softmax. Return its input
logits_layer = softmax_logit_layer._op.inputs[0]
return logits_layer
def get_probs(self, x):
"""
:param x: A symbolic representation of the network input.
:return: A symbolic representation of the probs
"""
name = self._get_softmax_name()
return self.get_layer(x, name)
def get_layer_names(self):
"""
:return: Names of all the layers kept by Keras
"""
layer_names = [x.name for x in self.model.layers]
return layer_names
def fprop(self, x):
"""
Exposes all the layers of the model returned by get_layer_names.
:param x: A symbolic representation of the network input
:return: A dictionary mapping layer names to the symbolic
representation of their output.
"""
# modified to work with error-correcting codes defense
from tensorflow.keras.models import Model as KerasModel
if self.keras_model is None:
# Get the input layer
new_input = self.model.get_input_at(0)
# Make a new model that returns each of the layers as output
out_layers = [x_layer.output for x_layer in self.model.layers]
self.keras_model = KerasModel(new_input, out_layers)
# and get the outputs for that model on the input x
outputs = self.keras_model(x)
# Keras only returns a list for outputs of length >= 1, if the model
# is only one layer, wrap a list
if len(self.model.layers) == 1:
outputs = [outputs]
# compute the dict to return
fprop_dict = dict(zip(self.get_layer_names(), outputs))
return fprop_dict
def get_layer(self, x, layer):
"""
Expose the hidden features of a model given a layer name.
:param x: A symbolic representation of the network input
:param layer: The name of the hidden layer to return features at.
:return: A symbolic representation of the hidden features
:raise: NoSuchLayerError if `layer` is not in the model.
"""
# Return the symbolic representation for this layer.
output = self.fprop(x)
try:
requested = output[layer]
except KeyError:
raise NoSuchLayerError()
return requested |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Run this to attack a trained model via TrainModel.
Use the "loadFullModel" submethod to load in an already trained model (trained via TrainModel)
The main attack function is "runAttacks" which runs attacks on trained models
"""
import warnings
import torch
from cleverhans.attacks import ProjectedGradientDescent
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from Model_Implementations import Model_Softmax_Baseline, \
Model_Logistic_Baseline, Model_Logistic_Ensemble, Model_Tanh_Ensemble, \
Model_Tanh_Baseline
from tensorflow.keras.datasets import cifar10
from tensorflow.keras import backend
import tensorflow as tf;
import numpy as np
import scipy.linalg
from adversarial_evaluation import setup_model_and_data, patch_pgd_loss
from active_tests import decision_boundary_binarization as dbb
from functools import partial
from tensorflow_wrapper import TensorFlow1ToPyTorchWrapper
from utils import build_dataloader_from_arrays
def train_classifier(
n_features: int,
train_loader: DataLoader,
raw_train_loader: DataLoader,
logits: torch.Tensor,
device: str,
rescale_logits: dbb.LogitRescalingType,
model,
sess,
):
del raw_train_loader
# fit a linear readout for each of the submodels of the ensemble
assert len(train_loader.dataset.tensors[0].shape) == 3
assert train_loader.dataset.tensors[0].shape[1] == len(model.binarized_readouts)
classifier_weights = []
classifier_biases = []
for i in range(len(model.binarized_readouts)):
x_ = train_loader.dataset.tensors[0][:, i]
y_ = train_loader.dataset.tensors[1]
cls = dbb._train_logistic_regression_classifier(
n_features,
DataLoader(TensorDataset(x_, y_), batch_size=train_loader.batch_size),
logits[:, i] if logits is not None else None,
"sklearn",
20000,
device,
n_classes=2,
rescale_logits=rescale_logits,
solution_goodness="good",
class_weight="balanced"
)
classifier_weights.append(cls.weight.data.cpu().numpy().transpose()[:, [0]])
classifier_biases.append(cls.bias.data.cpu().numpy()[0])
# update weights of the binary models
for l, vw, vb in zip(model.binarized_readouts, classifier_weights, classifier_biases):
l.set_weights([vw, vb.reshape((1,))])
return BinarizedModelWrapper(model, sess)
class BinarizedModelWrapper:
def __init__(self, model, sess):
self.model = model
self.sess = sess
def __call__(self, x):
x = x.numpy()
x = x.transpose((0, 2, 3, 1))
p = self.sess.run(self.model.binarized_probs, {self.model.input: x})
return torch.tensor(p)
def main():
sess = backend.get_session()
backend.set_learning_phase(
0) # need to do this to get CleverHans to work with batchnorm
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--eps", type=float, default=8, help="in 0-255")
parser.add_argument("--pgd-n-steps", default=200, type=int)
parser.add_argument("--pgd-step-size", type=float, default=2 / 3 * 8, help="in 0-255")
parser.add_argument("--n-samples", type=int, default=512)
parser.add_argument("--adaptive-attack", action="store_true")
parser.add_argument("-n-inner-points", type=int, default=999)
parser.add_argument("-n-boundary-points", type=int, default=1)
parser.add_argument("--sample-from-corners", action="store_true")
args = parser.parse_args()
model, (X_valid, Y_valid), (X_test, Y_test) = setup_model_and_data(adaptive_attack=args.adaptive_attack)
model.defineBinarizedModel()
binarized_model_ch = model.modelBinarizedCH()
if args.adaptive_attack:
patch_pgd_loss()
attack = ProjectedGradientDescent(binarized_model_ch, sess=sess)
att_params = {'clip_min': 0.0, 'clip_max': 1.0,
'eps': args.eps / 255.0, 'eps_iter': args.pgd_step_size / 255.0,
'nb_iter': args.pgd_n_steps, 'ord': np.inf}
x_ph = tf.placeholder(shape=model.input.shape, dtype=tf.float32)
x_adv_op = attack.generate(x_ph, **att_params)
def _model_forward_pass(x_np, features_only=False, features_and_logits=False):
x_np = np.transpose(x_np, (0, 2, 3, 1))
if features_only:
f = sess.run(model.features, {model.input : x_np})
f = np.stack(f, 1)
return f
elif features_and_logits:
f, l = sess.run((model.features,
model.logits), {model.input : x_np})
f = np.stack(f, 1)
return f, l
else:
l = sess.run(model.logits, {model.input : x_np})
return l
def run_attack(m, l, sess):
model = m.model
for x, y in l:
assert len(x) == 1
x, y = x.numpy(), y.numpy()
x = x.transpose((0, 2, 3, 1))
x_adv = sess.run(x_adv_op, {x_ph: x})
warnings.warn("ATTENTION: Clipping perturbation just to TEST something. Remove this again!")
delta = x_adv - x
delta[delta > 0] = args.eps / 255.0
delta[delta < 0] = -args.eps / 255.0
x_adv = np.clip(x + delta, 0, 1)
logits, probs = sess.run((model.binarized_logit, model.binarized_probs),
{model.input: x_adv})
is_adv = np.argmax(probs) != y
return is_adv, (torch.tensor(x_adv.transpose((0, 3, 1, 2))), torch.tensor(logits))
feature_extractor = TensorFlow1ToPyTorchWrapper(
logit_forward_pass=_model_forward_pass,
logit_forward_and_backward_pass=None
)
test_indices = list(range(len(X_test)))
np.random.shuffle(test_indices)
X_test, Y_test = X_test[test_indices], Y_test[test_indices]
X_test = np.transpose(X_test, (0, 3, 1, 2))
test_loader = build_dataloader_from_arrays(X_test, Y_test, batch_size=32)
from argparse_utils import DecisionBoundaryBinarizationSettings
scores_logit_differences_and_validation_accuracies = \
dbb.interior_boundary_discrimination_attack(
feature_extractor,
test_loader,
attack_fn=lambda m, l, kwargs: run_attack(m, l, sess),
linearization_settings=DecisionBoundaryBinarizationSettings(
epsilon=args.eps / 255.0,
norm="linf",
lr=25000,
n_boundary_points=args.n_boundary_points,
n_inner_points=args.n_inner_points,
adversarial_attack_settings=None,
optimizer="sklearn"
),
n_samples=args.n_samples,
device="cpu",
n_samples_evaluation=200,
n_samples_asr_evaluation=200,
train_classifier_fn=partial(
train_classifier,
model=model,
sess=sess
),
fail_on_exception=False,
# needs to be set to None as logit rescaling introduces a weird behavior
# of very high R. ASR (probably due to the log in the logit calculation)
rescale_logits=None,
decision_boundary_closeness=0.999,
sample_training_data_from_corners=args.sample_from_corners
)
print(dbb.format_result(scores_logit_differences_and_validation_accuracies,
args.n_samples))
if __name__ == "__main__":
main() |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Adapted and heavily modified from
https://github.com/wielandbrendel/adaptive_attacks_paper/blob/master/01_kwta/kwta_attack.ipynb
"""
from typing import Callable
from typing import Optional
import numpy as np
import torch
import torch.nn.functional
from tqdm import tqdm
import utils as ut
def __best_other_classes(logits: torch.Tensor,
exclude: torch.Tensor) -> torch.Tensor:
other_logits = logits - torch.nn.functional.one_hot(exclude,
num_classes=logits.shape[
-1]) * np.inf
return other_logits.argmax(axis=-1)
def __logit_diff_loss_fn(model: Callable, x: torch.Tensor,
classes: torch.Tensor,
targeted: bool):
with torch.no_grad():
logits = model(x)
if targeted:
c_minimize = classes
c_maximize = __best_other_classes(logits, classes)
else:
c_minimize = __best_other_classes(logits, classes)
c_maximize = classes
N = len(x)
rows = range(N)
logits_diffs = logits[rows, c_minimize] - logits[rows, c_maximize]
assert logits_diffs.shape == (N,)
return logits_diffs
def __es_gradient_estimator(loss_fn: Callable, x: torch.Tensor, y: torch.Tensor,
n_samples: int, sigma: float, clip=False, bounds=(0, 1)):
assert len(x) == len(y)
assert n_samples > 0
gradient = torch.zeros_like(x)
with torch.no_grad():
for k in range(n_samples // 2):
noise = torch.randn_like(x)
pos_theta = x + sigma * noise
neg_theta = x - sigma * noise
if clip:
pos_theta = pos_theta.clip(*bounds)
neg_theta = neg_theta.clip(*bounds)
pos_loss = loss_fn(pos_theta, y)
neg_loss = loss_fn(neg_theta, y)
gradient += (pos_loss - neg_loss)[:, None, None, None] * noise
gradient /= 2 * sigma * 2 * n_samples
return gradient
def gradient_estimator_pgd(model: Callable,
x: torch.Tensor, y: torch.Tensor,
n_steps: int,
step_size: float, epsilon: float, norm: ut.NormType,
loss_fn: Optional[Callable] = None,
random_start: bool = True,
early_stopping: bool = False, targeted: bool = False):
if loss_fn is None:
loss_fn = lambda x, y: __logit_diff_loss_fn(model, x, y, targeted)
assert len(x) == len(y)
if random_start:
delta = torch.rand_like(x)
delta = ut.normalize(delta, norm)
x_adv, delta = ut.clipping_aware_rescaling(x, delta, epsilon, norm=norm,
growing=False, return_delta=True)
else:
x_adv = x
delta = torch.zeros_like(x)
if targeted:
is_adversarial_fn = lambda x: model(x).argmax(-1) == y
else:
is_adversarial_fn = lambda x: model(x).argmax(-1) != y
mask = ~is_adversarial_fn(x_adv)
if not early_stopping:
mask = torch.ones_like(mask)
else:
if mask.sum() == 0:
return x_adv.detach(), ~mask.detach()
if len(x) > 1:
iterator = tqdm(range(n_steps))
else:
iterator = range(n_steps)
for it in iterator:
if it < 0.6 * n_steps:
n_samples = 100
elif it < 0.8 * n_steps:
n_samples = 1000
elif it >= 0.8 * n_steps:
n_samples = 20000
pert_x = (x + delta).clip(0, 1)
grad_x = __es_gradient_estimator(loss_fn, pert_x[mask], y[mask], n_samples,
epsilon)
grad_x = ut.normalize(grad_x, norm)
# update only subportion of deltas
delta[mask] = delta[mask] - step_size * grad_x
# project back to feasible set
x_adv, delta = ut.clipping_aware_rescaling(x, delta, epsilon, norm=norm,
growing=False, return_delta=True)
mask = ~is_adversarial_fn(x_adv)
# new_logit_diffs = loss_fn(x_adv, y)
# mask = new_logit_diffs >= 0
if not early_stopping:
mask = torch.ones_like(mask)
if early_stopping and mask.sum() == 0:
break
return x_adv.detach(), ~mask.detach()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable
import torch
import utils as ut
def general_pgd(loss_fn: Callable, is_adversarial_fn: Callable, x: torch.Tensor,
y: torch.Tensor, n_steps: int,
step_size: float, epsilon: float, norm: ut.NormType,
random_start: bool = True,
early_stopping: bool = False,
n_averaging_steps: int = 1):
"""Performs a projected gradient descent (PGD) for an arbitrary loss function
and success criterion.
:param loss_fn: Loss function to minimize.
:param is_adversarial_fn: Check if examples are adversarial
:param x: Input images.
:param y: Ground-truth labels.
:param n_steps: Number of steps.
:param step_size: Size of the steps/learning rate.
:param epsilon: Maximum size of the perturbation measured by the norm.
:param norm: Norm to use for measuring the size of the perturbation.
:param random_start: Randomly start within the epsilon ball.
:param early_stopping: Stop once an adversarial perturbation for all
examples have been found.
:param n_averaging_steps: Number over repetitions for every gradient
calculation.
:return: (Adversarial examples, attack success for each sample)
"""
assert norm in ("linf", "l2", "l1")
x_orig = x
x = x.clone()
if random_start:
delta = torch.rand_like(x)
delta = ut.normalize(delta, norm)
x = ut.clipping_aware_rescaling(x_orig, delta, epsilon, norm=norm,
growing=False)
for step in range(n_steps):
x = x.requires_grad_()
# check early stopping
with torch.no_grad():
is_adv = is_adversarial_fn(x, y)
if early_stopping and torch.all(is_adv): #
return x.detach(), is_adv.detach()
grad_x = torch.zeros_like(x)
for _ in range(n_averaging_steps):
# get gradient of cross-entropy wrt to input
loss = loss_fn(x, y)
grad_x += torch.autograd.grad(loss, x)[0].detach() / n_averaging_steps
# normalize gradient
grad_x = ut.normalize(grad_x, norm)
# perform step
delta = (x - x_orig).detach() - step_size * grad_x.detach()
# project back to feasible set
x = ut.clipping_aware_rescaling(x_orig, delta, epsilon, norm=norm,
growing=False)
del loss, grad_x
with torch.no_grad():
is_adv = is_adversarial_fn(x, y)
return x.detach(), is_adv.detach()
def pgd(model: Callable, x: torch.Tensor, y: torch.Tensor, n_steps: int,
step_size: float, epsilon: float, norm: ut.NormType,
random_start: bool = True,
early_stopping: bool = False,
targeted: bool = False,
n_averaging_steps: int = 1):
"""Performs a standard projected gradient descent (PGD) with a cross-entropy
objective.
:param x: Input images.
:param y: Ground-truth labels.
:param n_steps: Number of steps.
:param step_size: Size of the steps/learning rate.
:param epsilon: Maximum size of the perturbation measured by the norm.
:param norm: Norm to use for measuring the size of the perturbation.
:param random_start: Randomly start within the epsilon ball.
:param early_stopping: Stop once an adversarial perturbation for all
examples have been found.
:param targeted: Perform a targeted adversarial attack.
:param n_averaging_steps: Number over repetitions for every gradient
calculation.
:return: (Adversarial examples, attack success for each sample)
"""
assert norm in ("linf", "l2", "l1")
criterion = torch.nn.CrossEntropyLoss()
sign = 1 if targeted else -1
return general_pgd(loss_fn=lambda x, y: sign * criterion(model(x), y),
is_adversarial_fn=lambda x, y: model(x).argmax(
-1) == y if targeted else model(x).argmax(-1) != y,
x=x, y=y, n_steps=n_steps, step_size=step_size,
epsilon=epsilon, norm=norm, random_start=random_start,
n_averaging_steps=n_averaging_steps,
early_stopping=early_stopping) |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing import Callable
import utils as ut
from autoattack import fab_pt
def fab(model: Callable, x: torch.Tensor, y: torch.Tensor,
n_steps: int,
epsilon: float, norm: ut.NormType,
targeted: bool = False,
n_restarts: int = 1,
n_classes: int = 10):
"""Runs the Fast Adaptive Boundary Attack (Linf, L2, L1).
:param model: Inference function of the model yielding logits.
:param x: Input images.
:param y: Ground-truth labels.
:param n_steps: Number of steps.
:param epsilon: Maximum size of the perturbation measured by the norm.
:param norm: Norm to use for measuring the size of the perturbation.
examples have been found.
:param targeted: Perform a targeted adversarial attack.
:param n_restarts: How often to restart attack.
:return: (Adversarial examples, attack success for each sample,
target labels (optional))
"""
assert norm in ("linf", "l2", "l1")
norm = {"linf": "Linf", "l2": "L2", "l1": "L1"}[norm]
n_restarts += 1
optional_kwargs = {}
if targeted:
optional_kwargs["n_target_classes"] = n_classes - 1
attack = fab_pt.FABAttack_PT(
predict=model, n_iter=n_steps, norm=norm,
n_restarts=n_restarts, eps=epsilon,
device=x.device, targeted=targeted,
**optional_kwargs)
x_adv = attack.perturb(x, y)
y_pred = model(x_adv).argmax(-1)
if targeted:
is_adv = y_pred == y
else:
is_adv = y_pred != y
if targeted:
return x_adv.detach(), is_adv.detach(), attack.y_target.detach()
else:
return x_adv.detach(), is_adv.detach()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable
import numpy as np
import torch
import utils as ut
def general_thermometer_ls_pgd(
loss_fn: Callable, is_adversarial_fn: Callable,
x: torch.Tensor,
y: torch.Tensor, n_steps: int,
step_size: float, epsilon: float, norm: ut.NormType,
l: int, temperature: float = 1.0, annealing_factor=1.0 / 1.2,
random_start: bool = True,
n_restarts=0,
early_stopping: bool = False,
n_averaging_steps: int = 1):
"""Performs a logit-space projected gradient descent (PGD) for an arbitrary loss function
and success criterion for a thermometer-encoded model.
:param loss_fn: Loss function to minimize.
:param is_adversarial_fn: Check if examples are adversarial
:param x: Input images.
:param y: Ground-truth labels.
:param n_steps: Number of steps.
:param step_size: Size of the steps/learning rate.
:param epsilon: Maximum size of the perturbation measured by the norm.
:param norm: Norm to use for measuring the size of the perturbation.
:param l:
:param temperature:
:param annealing_factor:
:param random_start: Randomly start within the epsilon ball.
:param early_stopping: Stop once an adversarial perturbation for all
examples have been found.
:param n_averaging_steps: Number over repetitions for every gradient
calculation.
:return: (Adversarial examples, attack success for each sample)
"""
assert norm in ("linf",), "LS-PGD only supports linf norm"
assert random_start, "LS-PGD only works with random starts"
#assert epsilon < 1.0 / l, f"Epsilon ({epsilon}) must be smaller " \
# f"than 1.0/l ({1.0 / l})"
def one_hot(y):
L = torch.arange(l, dtype=x.dtype, device=x.device)
L = L.view((1, 1, l, 1, 1)) / l
y = torch.unsqueeze(y, 2)
y = torch.logical_and(
y >= L,
y <= L + 1 / l).float()
return y
def init_mask(x):
# Compute the mask over the bits that we are allowed to attack
mask = torch.zeros((len(x), 3, l, x.shape[-2], x.shape[-1]), dtype=x.dtype,
device=x.device)
for alpha in np.linspace(0, 1, l):
mask += one_hot(torch.maximum(torch.zeros_like(x), x - alpha * epsilon))
mask += one_hot(torch.minimum(torch.ones_like(x), x + alpha * epsilon))
mask = (mask > 0).float()
return mask
def get_final_x(u):
x = torch.argmax(u, 2) / l
# now move x as close as possible to x_orig without changing
# the argmax of the logits
delta = x - x_orig
delta[delta > 0] = torch.floor(delta[delta > 0] * l) / l
delta[delta < 0] = torch.ceil(delta[delta < 0] * l) / l
# only relevant for debugging:
# assert torch.all(torch.abs(delta) <= 1.0/l)
delta = torch.minimum(torch.ones_like(delta) * epsilon, delta)
delta = torch.maximum(-torch.ones_like(delta) * epsilon, delta)
x = x_orig + delta
# only relevant for debugging:
# project back to feasible set (if everything was correct before,
# this shouldn't change anything)
# x2 = ut.clipping_aware_rescaling(x_orig, delta, epsilon, norm=norm,
# growing=False)
# assert torch.all(torch.abs(x - x2) < 1e-8)
return x
x_final = x.clone()
x_orig = x
mask = init_mask(x_orig)
for _ in range(n_restarts + 1):
x_logits = torch.randn_like(mask)
for step in range(n_steps):
# mask u so that x(u) is within the epsilon ball
x_logits = x_logits * mask - (1.0 - mask) * 1e12
# check early stopping
x = get_final_x(x_logits)
is_adv = is_adversarial_fn(x, y)
# print(is_adv[:32].long())
if early_stopping and torch.all(is_adv): #
return x.detach(), is_adv.detach()
x_logits = x_logits.requires_grad_()
x_thermometer = torch.softmax(x_logits / temperature, 2)
# convert something like [0, 0, 1, 0, .., 0] to [1, 1, 1, 0, ..., 0]
x_thermometer = torch.flip(
torch.cumsum(torch.flip(x_thermometer, (2,)), 2), (2,))
x_thermometer = x_thermometer.view((
x_thermometer.shape[0], -1, x_thermometer.shape[-2],
x_thermometer.shape[-1]))
grad_x_logits = torch.zeros_like(x_logits)
for _ in range(n_averaging_steps):
# get gradient of cross-entropy wrt to the thermometer encoded input
loss = loss_fn(x_thermometer, y)
# print(step, loss.item(), is_adv.sum().item())
grad_x_logits += torch.autograd.grad(loss, x_logits)[0] / n_averaging_steps
# perform step
x_logits = (x_logits - step_size * torch.sign(grad_x_logits)).detach()
temperature *= annealing_factor
x = get_final_x(x_logits)
is_adv = is_adversarial_fn(x, y)
x_final[is_adv] = x[is_adv]
is_adv = is_adversarial_fn(x, y)
return x.detach(), is_adv.detach()
def thermometer_ls_pgd(model: Callable, x: torch.Tensor, y: torch.Tensor,
n_steps: int,
step_size: float, epsilon: float, norm: ut.NormType,
l: int,
temperature: float = 1.0,
annealing_factor=1.0 / 1.2,
random_start: bool = True,
early_stopping: bool = False,
targeted: bool = False,
n_averaging_steps: int = 1):
"""Performs a logit-space projected gradient descent (PGD) with a cross-entropy
objective for a thermometer-encoded model.
:param x: Input images.
:param y: Ground-truth labels.
:param n_steps: Number of steps.
:param step_size: Size of the steps/learning rate.
:param epsilon: Maximum size of the perturbation measured by the norm.
:param norm: Norm to use for measuring the size of the perturbation.
:param l:
:param temperature:
:param annealing_factor:
:param random_start: Randomly start within the epsilon ball.
:param early_stopping: Stop once an adversarial perturbation for all
examples have been found.
:param targeted: Perform a targeted adversarial attack.
:param n_averaging_steps: Number over repetitions for every gradient
calculation.
:return: (Adversarial examples, attack success for each sample)
"""
assert norm in ("linf", "l2", "l1")
criterion = torch.nn.CrossEntropyLoss()
sign = 1 if targeted else -1
return general_thermometer_ls_pgd(
loss_fn=lambda x, y: sign * criterion(model(x), y),
is_adversarial_fn=lambda x, y: model(x).argmax(
-1) == y if targeted else model(x).argmax(-1) != y,
x=x, y=y, n_steps=n_steps, step_size=step_size,
epsilon=epsilon, norm=norm,
l=l, temperature=temperature,
annealing_factor=annealing_factor,
random_start=random_start,
n_averaging_steps=n_averaging_steps,
early_stopping=early_stopping)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing_extensions import Literal
from typing import Tuple
import torch
from typing import Callable
import utils as ut
from autoattack import autopgd_base
class __PatchedAPGDAttack(autopgd_base.APGDAttack):
def dlr_loss(self, x, y):
"""Patched DLR loss that works with less than 3 classes. Taken and modified
from: https://github.com/fra31/auto-attack/blob/master/autoattack/
autopgd_base.py#L567"""
x_sorted, ind_sorted = x.sort(dim=1)
ind = (ind_sorted[:, -1] == y).float()
u = torch.arange(x.shape[0])
if x_sorted.shape[-1] > 2:
# normal dlr loss
return -(x[u, y] - x_sorted[:, -2] * ind - x_sorted[:, -1] * (
1. - ind)) / (x_sorted[:, -1] - x_sorted[:, -3] + 1e-12)
else:
# modified dlr loss (w/o the normalizer)
return -(x[u, y] - x_sorted[:, -2] * ind - x_sorted[:, -1] * (
1. - ind))
class __PatchedAPGDAttack_targeted(autopgd_base.APGDAttack_targeted):
def dlr_loss_targeted(self, x, y):
"""Patched DLR loss that works with less than 3 classes. Taken and modified
from: https://github.com/fra31/auto-attack/blob/master/autoattack/
autopgd_base.py#L606"""
x_sorted, ind_sorted = x.sort(dim=1)
u = torch.arange(x.shape[0])
if x_sorted.shape[-1] > 2:
# normal dlr loss
return -(x[u, y] - x[u, self.y_target]) / (x_sorted[:, -1] - .5 * (
x_sorted[:, -3] + x_sorted[:, -4]) + 1e-12)
else:
# modified dlr loss (w/o the normalizer)
return -(x[u, y] - x[u, self.y_target])
def auto_pgd(model: Callable, x: torch.Tensor, y: torch.Tensor,
n_steps: int,
epsilon: float, norm: ut.NormType,
loss: Tuple[Literal["ce"], Literal["logit-diff"]] = "ce",
targeted: bool = False,
n_restarts: int = 1,
n_averaging_steps: int = 1,
n_classes: int = 10):
"""Performs a standard projected gradient descent (PGD) with a cross-entropy
objective.
:param model: Inference function of the model yielding logits.
:param x: Input images.
:param y: Ground-truth labels.
:param n_steps: Number of steps.
:param epsilon: Maximum size of the perturbation measured by the norm.
:param norm: Norm to use for measuring the size of the perturbation.
examples have been found.
:param targeted: Perform a targeted adversarial attack.
:param n_restarts: How often to restart attack.
:param n_averaging_steps: Number over repetitions for every gradient
calculation.
:return: (Adversarial examples, attack success for each sample,
target labels (optional))
"""
assert norm in ("linf", "l2", "l1")
norm = {"linf": "Linf", "l2": "L2", "l1": "L1"}[norm]
attack_cls = __PatchedAPGDAttack_targeted if targeted \
else __PatchedAPGDAttack
n_restarts += 1
optional_kwargs = {}
if targeted:
optional_kwargs["n_target_classes"] = n_classes - 1
attack = attack_cls(predict=model, n_iter=n_steps, norm=norm,
n_restarts=n_restarts, eps=epsilon,
eot_iter=n_averaging_steps, device=x.device,
seed=None, **optional_kwargs)
attack.loss = "ce" if loss == "ce" else "dlr"
if targeted:
attack.loss += "-targeted"
x_adv = attack.perturb(x, y)
y_pred = model(x_adv).argmax(-1)
if targeted:
is_adv = y_pred == y
else:
is_adv = y_pred != y
if targeted:
return x_adv.detach(), is_adv.detach(), attack.y_target.detach()
else:
return x_adv.detach(), is_adv.detach()
def fix_autoattack(attack):
attack.apgd_targeted = __PatchedAPGDAttack_targeted(
attack.model, n_restarts=attack.apgd_targeted.n_restarts, n_iter=attack.apgd_targeted.n_iter,
verbose=attack.apgd_targeted.verbose, eps=attack.apgd_targeted.eps, norm=attack.apgd_targeted.norm,
eot_iter=attack.apgd_targeted.eot_iter, rho=attack.apgd_targeted.thr_decr, seed=attack.apgd_targeted.seed,
device=attack.apgd_targeted.device, is_tf_model=attack.apgd_targeted.is_tf_model,
logger=attack.apgd_targeted.logger)
attack.apgd = __PatchedAPGDAttack(
attack.model, n_restarts=attack.apgd.n_restarts, n_iter=attack.apgd.n_iter, verbose=attack.apgd.verbose,
eps=attack.apgd.eps, norm=attack.apgd.norm, eot_iter=attack.apgd.eot_iter, rho=attack.apgd.thr_decr,
seed=attack.apgd.seed, device=attack.apgd.device, is_tf_model=attack.apgd.is_tf_model, logger=attack.apgd.logger)
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: [email protected] (Stefan Popov)
#
"""Miscellaneous utilities that don't fit anywhere else."""
import colorsys
import dataclasses
import re
from typing import Any, Callable, Iterable, TypeVar
import numpy as np
import torch as t
InputTensor = t.Tensor | np.ndarray | int | float | Iterable
TorchDevice = t.device | str | None
T = TypeVar("T")
class TensorContainerMixin:
"""Allows unified operation on all tensors contained in a dataclass."""
def _apply(self, fn: Callable[[t.Tensor], t.Tensor]):
result = []
for field in dataclasses.fields(self):
field = getattr(self, field.name)
if t.is_tensor(field):
field = fn(field)
elif isinstance(field, list) or isinstance(field, tuple):
field = [fn(e) if t.is_tensor(e) else e for e in field]
elif isinstance(field, TensorContainerMixin):
field = field._apply(fn)
result.append(field)
return type(self)(*result)
def cuda(self: T) -> T:
return self._apply(lambda v: v.cuda())
def cpu(self: T) -> T:
return self._apply(lambda v: v.cpu())
def detach(self: T) -> T:
return self._apply(lambda v: v.detach())
def numpy(self: T) -> T:
return self._apply(lambda v: v.numpy())
def to(self: T, device: TorchDevice) -> T:
return self._apply(lambda v: v.to(device))
def __getitem__(self: T, index: Any) -> T:
return self._apply(lambda v: v[index])
def get_structure(self) -> dict[str, str]:
"""Debugging routine, returns type and shape of the each field."""
result = {}
for field in dataclasses.fields(self):
v = getattr(self, field.name)
if t.is_tensor(v):
v: t.Tensor = v.detach()
dtype = re.sub(r"^torch\.", "", str(v.dtype))
structure = f"t.{dtype}{list(v.shape)}({v.device})"
elif isinstance(v, np.ndarray):
structure = f"np.{v.dtype.name}{list(v.shape)}"
elif isinstance(v, list):
structure = f"list[{len(v)}]"
elif isinstance(v, tuple):
structure = f"tuple[{len(v)}]"
else:
structure = f"{type(v).__name__}"
result[field.name] = structure
return result
def to_tensor(v: InputTensor, dtype: t.dtype,
device: t.device | str | None = None) -> t.Tensor:
"""Converts a value to tensor, checking the type.
Args:
v: The value to convert. If it is already a tensor or an array, this
function checks that the type is equal to dtype. Otherwise, uses
torch.as_tensor to convert it to tensor.
dtype: The required type.
device: The target tensor device (optional(.
Returns:
The resulting tensor
"""
if not t.is_tensor(v):
if hasattr(v, "__array_interface__"):
# Preserve the types of arrays. The must match the given type.
v = t.as_tensor(v)
else:
v = t.as_tensor(v, dtype=dtype)
if v.dtype != dtype:
raise ValueError(f"Expecting type '{dtype}', found '{v.dtype}'")
if device is not None:
v = v.to(device)
return v
def dynamic_tile(partition_lengths: t.Tensor) -> t.Tensor:
"""Computes dynamic tiling with the given partition lengths.
Args:
partition_lengths: The partition lengths, int64[num_partitions]
Returns:
A 1D int tensor, containing partition_lengths[0] zeros,
followed by partition_lengths[1] ones, followed by
partition_lengths[2] twos, and so on.
"""
partition_lengths = t.as_tensor(partition_lengths)
non_zero_idx = partition_lengths.nonzero()[:, 0]
partition_lengths = partition_lengths[non_zero_idx]
start_index = partition_lengths.cumsum(0)
if start_index.shape == (0,):
return start_index
start_index, num_elements = start_index[:-1], start_index[-1]
result: t.Tensor = partition_lengths.new_zeros([num_elements.item()])
start_index = start_index[start_index < num_elements]
result[start_index] = 1
result = result.cumsum(0, dtype=partition_lengths.dtype)
return non_zero_idx[result]
def get_palette():
"""Creates a color palette with 32 entries."""
color_palette = []
for h in t.arange(0., 1., 1 / 32):
color_palette.append(colorsys.hsv_to_rgb(h, 1, 0.7))
color_palette.append(colorsys.hsv_to_rgb(h, 0.5, 0.7))
color_palette = t.tensor(color_palette, dtype=t.float32)
g = t.Generator()
g.manual_seed(1)
color_palette = color_palette[t.randperm(color_palette.shape[0], generator=g)]
return color_palette
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: [email protected] (Stefan Popov)
#
"""Functions to compute transformation matrices."""
import torch as t
from torch.nn import functional as F
def scale(v: t.Tensor) -> t.Tensor:
"""Computes homogeneous scale matrices from scale vectors.
Args:
v: Scale vectors, `float32[B*, N]`
Returns:
Scale matrices, `float32[B*, N+1, N+1]`
"""
v = t.as_tensor(v, dtype=t.float32)
batch_dims = v.shape[:-1]
v = v.reshape([-1, (v.shape[-1])])
index_batch_flat = t.arange(v.shape[0], dtype=t.int64, device=v.device)
index_diag = t.arange(v.shape[1], dtype=t.int64, device=v.device)
index_batch, index_diag = t.meshgrid(index_batch_flat, index_diag,
indexing="ij")
index_batch = index_batch.reshape([-1])
index_diag = index_diag.reshape([-1])
result = v.new_zeros([v.shape[0], v.shape[1] + 1, v.shape[1] + 1])
result[index_batch, index_diag, index_diag] = v.reshape([-1])
result[index_batch_flat, v.shape[-1], v.shape[-1]] = 1
result = result.reshape(batch_dims + result.shape[-2:])
return result
def translate(v: t.Tensor) -> t.Tensor:
"""Computes a homogeneous translation matrices from translation vectors.
Args:
v: Translation vectors, `float32[B*, N]`
Returns:
Translation matrices, `float32[B*, N+1, N+1]`
"""
result = t.as_tensor(v, dtype=t.float32)
dimensions = result.shape[-1]
result = result[..., None, :].transpose(-1, -2)
result = t.constant_pad_nd(result, [dimensions, 0, 0, 1])
id_matrix = t.diag(result.new_ones([dimensions + 1]))
id_matrix = id_matrix.expand_as(result)
result = result + id_matrix
return result
def rotate(
angle: t.Tensor,
axis: t.Tensor,
) -> t.Tensor:
"""Computes a 3D rotation matrices from angle and axis inputs.
The formula used in this function is explained here:
https://en.wikipedia.org/wiki/Rotation_matrix#Conversion_from_and_to_axis–angle
Args:
angle: The rotation angles, `float32[B*]`
axis: The rotation axes, `float32[B*, 3]`
Returns:
The rotation matrices, `float32[B*, 4, 4]`
"""
axis = t.as_tensor(axis, dtype=t.float32)
angle = t.as_tensor(angle, dtype=t.float32)
axis = F.normalize(axis, dim=-1)
sin_axis = t.sin(angle)[..., None] * axis
cos_angle = t.cos(angle)
cos1_axis = (1.0 - cos_angle)[..., None] * axis
_, axis_y, axis_z = t.unbind(axis, dim=-1)
cos1_axis_x, cos1_axis_y, _ = t.unbind(cos1_axis, dim=-1)
sin_axis_x, sin_axis_y, sin_axis_z = t.unbind(sin_axis, dim=-1)
tmp = cos1_axis_x * axis_y
m01 = tmp - sin_axis_z
m10 = tmp + sin_axis_z
tmp = cos1_axis_x * axis_z
m02 = tmp + sin_axis_y
m20 = tmp - sin_axis_y
tmp = cos1_axis_y * axis_z
m12 = tmp - sin_axis_x
m21 = tmp + sin_axis_x
zero = t.zeros_like(m01)
one = t.ones_like(m01)
diag = cos1_axis * axis + cos_angle[..., None]
diag_x, diag_y, diag_z = t.unbind(diag, dim=-1)
matrix = t.stack((diag_x, m01, m02, zero, m10, diag_y, m12, zero, m20, m21,
diag_z, zero, zero, zero, zero, one), dim=-1)
output_shape = axis.shape[:-1] + (4, 4)
result = matrix.reshape(output_shape)
return result
def transform_points_homogeneous(points: t.Tensor, matrix: t.Tensor,
w: float) -> t.Tensor:
"""Transforms 3D points with a homogeneous matrix.
Args:
points: The points to transform, `float32[B*, N, 3]`
matrix: The transformation matrices, `float32[B*, 4, 4]`
w: The W value to add to the points to make them homogeneous. Should be 1
for affine points and 0 for vectors.
Returns:
The transformed points in homogeneous space (with a 4th coordinate),
`float32[B*, N, 4]`
"""
batch_dims = points.shape[:-2]
# Fold all batch dimensions into a single one
points = points.reshape([-1] + list(points.shape[-2:]))
matrix = matrix.reshape([-1] + list(matrix.shape[-2:]))
points = t.constant_pad_nd(points, [0, 1], value=w)
result = t.einsum("bnm,bvm->bvn", matrix, points)
result = result.reshape(batch_dims + result.shape[-2:])
return result
def transform_mesh(mesh: t.Tensor, matrix: t.Tensor,
vertices_are_points=True) -> t.Tensor:
"""Transforms a single 3D mesh.
Args:
mesh: The mesh's triangle vertices, `float32[B*, N, 3, 3]`
matrix: The transformation matrix, `float32[B*, 4, 4]`
vertices_are_points: Whether to interpret the vertices as affine points
or vectors.
Returns:
The transformed mesh, `float32[B*, N, 3, 3]`
"""
original_shape = mesh.shape
mesh = mesh.reshape([-1, mesh.shape[-3] * 3, 3])
matrix = matrix.reshape([-1, 4, 4])
w = 1 if vertices_are_points else 0
mesh = transform_points_homogeneous(mesh, matrix, w=w)
if vertices_are_points:
mesh = mesh[..., :3] / mesh[..., 3:4]
else:
mesh = mesh[..., :3]
return mesh.reshape(original_shape)
def transform_points(points: t.Tensor, matrix: t.Tensor) -> t.Tensor:
"""Transforms points.
Args:
points: The points to transform, `float32[B*, N, 3]`
matrix: Transformation matrices, `float32[B*, 4, 4]`
Result:
The transformed points, `float32[B*, N, 3]`
"""
result = transform_points_homogeneous(points, matrix, w=1)
result = result[..., :3] / result[..., 3:4]
return result
def chain(transforms: list[t.Tensor], reverse=True) -> t.Tensor:
"""Chains transformations expressed as matrices.
Args:
transforms: The list of transformations to chain
reverse: The order in which transformations are applied. If true, the last
transformation is applied first (which matches matrix multiplication
order). False matches natural order, where the first transformation is
applied first.
Returns:
Matrix combining all transformations.
"""
assert transforms
if not reverse:
transforms = transforms[::-1]
result = transforms[0]
for transform in transforms[1:]:
result = result @ transform
return result
def gl_projection_matrix_from_intrinsics( #
width: t.Tensor, height: t.Tensor, fx: t.Tensor, fy: t.Tensor, cx: t.Tensor,
cy: t.Tensor, znear: float = 0.001, zfar: float = 20.) -> t.Tensor:
"""Computes the camera projection matrix for rendering square images.
Args:
width: Image's `width`, `float32[B*]`.
height: Image's `heigh`t,` float32[B*]`.
fx: Camera's `fx`, `float32[B*]`.
fy: Camera's `fy`, `float32[B*]`.
cx: Camera's `cx`, `float32[B*]`.
cy: Camera's `cy`, `float32[B*]`.
znear: The near plane location.
zfar: The far plane location.
Returns:
World to OpenGL's normalized device coordinates transformation matrices,
`float32[B*, 4, 4]`.
"""
z = t.zeros_like(t.as_tensor(fx))
o = t.ones_like(z)
zn = znear * o
zf = zfar * o
# yapf: disable
result = [
2 * fx / width, z, 2 * (cx / width) - 1, z,
z, 2 * fy / height, 2 * (cy / height) - 1, z,
z, z, (zf + zn) / (zf - zn), -2 * zn * zf / (zf - zn),
z, z, o, z
]
# yapf: enable
result = t.stack([t.as_tensor(v, dtype=t.float32)
for v in result]).reshape((4, 4) + z.shape)
result = result.permute(tuple(range(len(result.shape)))[2:] + (0, 1))
return result
def quaternion_to_rotation_matrix(q: t.Tensor) -> t.Tensor:
"""Computes a rotation matrix from a quaternion.
Args:
q: Rotation quaternions, float32[B*, 4]
Returns:
Rotation matrices, float32[B, 4, 4]
"""
q = t.as_tensor(q, dtype=t.float32)
w, x, y, z = t.unbind(q, dim=-1)
zz = t.zeros_like(z)
oo = t.ones_like(z)
s = 2.0 / (q * q).sum(dim=-1)
# yapf: disable
return t.stack([
1 - s * (y ** 2 + z ** 2), s * (x * y - z * w), s * (x * z + y * w), zz,
s * (x * y + z * w), 1 - s * (x ** 2 + z ** 2), s * (y * z - x * w), zz,
s * (x * z - y * w), s * (y * z + x * w), 1 - s * (x ** 2 + y ** 2), zz,
zz, zz, zz, oo
], dim=-1).reshape(q.shape[:-1] + (4, 4))
# yapf: enable
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: [email protected] (Stefan Popov)
#
"""Type annotations for the input files structures."""
from typing import TypedDict
import numpy as np
# Camera extrinsics, consisting of rotation, followed by translation.
# Used in `Frame` below.
CameraExtrinsics = TypedDict(
"CameraExtrinsics", {
# 3D translation `(x, y, z)`
"translation": tuple[float, float, float],
# Rotation quaternion `(w, x, y, z)`
"rotation": tuple[float, float, float, float]
})
# A single frame annotation, used in `FramesFile` below
Frame = TypedDict(
"Frame", {
# Frame timestamp
"timestamp": int,
# Whether tracks were completed manually on this frame.
"key_frame": bool,
# The camera intrinsics `(fx, fy, cx, cy)`.
"intrinsics": tuple[float, float, float, float],
# The camera extrinsics.
"extrinsics": CameraExtrinsics
})
# A single annotated point correspondence, used in `AnnotatedObject` below.
PointCorrespondence = TypedDict("PointCorrespondence", {
# 2D point on the video frame (normalized)
"2d": tuple[float, float],
# 3D point on the CAD model (in object space)
"3d": tuple[float, float, float]
})
# Track annotations on a frame, used in `AnnotatedObject` below.
TrackEntry = TypedDict(
"TrackEntry", {
# The frame timestamp
"timestamp": int,
# The track box, in normalized coordinates
"box": tuple[float, float, float, float],
# The annotated 2D <=> 3D correspondences
"correspondences": list[PointCorrespondence] | None
})
# An annotated object
AnnotatedObject = TypedDict(
"AnnotatedObject", {
# Unique track/object ID
"track_id": str,
# Whether the object track was annotated automatically on manually
"is_track_automatic": bool,
# ShapeNet model ID. Can be absent, if the annotation pipeline failed
# to propose relevant 3D models to the annotator.
"cad_id": str,
# ShapeNet class of the track
"class": str,
# Object offset in world space `(x, y, z)`. Absent, if the
# aligned 3D object did not pass verification. Order of transformations:
# mirror => scale => rotation => translation
"translation": tuple[float, float, float],
# The rotation quaternion in object space, `(w, x, y, z)`. Absent if
# the aligned 3D object did not pass verification.
"rotation": tuple[float, float, float, float],
# Scale in object space, `(sx, sy, sz)`. Absent if the
# aligned 3D object did not pass verification.
"scale": tuple[float, float, float],
# Whether the object needs to be mirrored in object space
"is_mirrored": bool,
# Track annotations
"track": list[TrackEntry]
})
# Describes the structure of an `objects.json` file.
ObjectsFile = TypedDict("ObjectsFile", {
"clip_name": str,
"objects": list[AnnotatedObject]
})
# Describes the structure of a `frames.json` file.
FramesFile = TypedDict(
"FramesFile", {
# The clip name
"clip_name": str,
# Image size, `(height, width)`
"image_size": tuple[float, float] | None,
# The frame annotations
"frames": list[Frame],
})
# Describes the structure of a `room_structure.npz` file.
RoomStructureFile = TypedDict(
"RoomStructureFile", {
# The clip name, `str[]`
"clip_name": np.ndarray,
# Structural element triangles, `float32[NUM_STRUCT_TRI, 3, 3]`.
# Triangles of each structural element are stored sequentially.
"layout_triangles": np.ndarray,
# Additional per-triangle flags, `int64[NUM_STRUCT_TRI, 3]`.
# Bit 1 indicates the triangle is part of a window frame.
# Bit 2 -- part of a closed door.
"layout_triangle_flags": np.ndarray,
# Number of triangles for each structural element,
# `int64[NUM_STRUCT_ELEM]`. The first `num_tri[0]` triangles in
# `triangles` belong to the first structural element, the next
# `num_tri[1]` -- to the second, and so on.
"layout_num_tri": np.ndarray,
# Semantic labels of the structural elements, `int64[NUM_STRUCT_ELEM]`.
# STRUCTURAL_ELEMENT_TYPES maps these to class names.
"layout_labels": np.ndarray,
#Timestamps of the annotated frames, `int64[NUM_ANN_FRAMES]`.
"annotated_timestamps": np.ndarray,
})
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: [email protected] (Stefan Popov)
"""Library for loading room structures."""
import asyncio
import dataclasses
import re
import torch as t
from cad_estate import file_system as fs
from cad_estate import frames as frame_lib
from cad_estate import input_file_structures as input_struct_lib
from cad_estate import misc_util
# We use the following symbols to describe tensors below:
# NUM_FRAMES : Number of loaded frames in a video
# NUM_STRUCT_ELEM : Number of structural elements
# NUM_STRUCT_TRI : The total number of triangles in the room structure
# NUM_ANN_FRAMES : The number of annotated frames
# AH, AW : Height and width of the annotations
STRUCTURAL_ELEMENT_TYPES = ["<ignore>", "wall", "floor", "ceiling", "slanted"]
@dataclasses.dataclass
class RoomStructure(misc_util.TensorContainerMixin):
"""3D structural elements for a clip."""
clip_name: str
"""The RealEstate-10K clip name.
Uniquely identifies the scene. Consists of a YouTube video ID,
followed by "_", and then by a start timestamp
"""
triangles: t.Tensor
"""Structural element triangles, `float32[NUM_STRUCT_TRI, 3, 3]`.
Triangles of each structural element are stored sequentially.
"""
num_tri: t.Tensor
"""Number of triangles for each structural element, `int64[NUM_STRUCT_ELEM]`.
The first `num_tri[0]` triangles in `triangles` belong to the first
structural element, the next `num_tri[1]` -- to the second, and so on.
"""
triangle_flags: t.Tensor
"""Additional per-triangle flags, `int64[NUM_STRUCT_TRI, 3]`.
Bit 1 indicates the triangle is part of a window frame. Bit 2 -- part of a
closed door.
"""
labels: t.Tensor
"""Semantic labels of the structural elements, `int64[NUM_STRUCT_ELEM]`.
STRUCTURAL_ELEMENT_TYPES maps these to class names.
"""
annotated_timestamps: t.Tensor
"""Timestamps of the annotated frames, `int64[NUM_ANN_FRAMES]`."""
@dataclasses.dataclass
class StructureAnnotations(misc_util.TensorContainerMixin):
"""Structural element annotations."""
structural_element_masks: t.Tensor
"""Amodal structural element masks, `uint8[NUM_FRAMES, AH, AW]`."""
visible_parts_masks: t.Tensor
"""Structural elements visible parts, `uint8[NUM_FRAMES, AH, AW]`."""
def load_room_structure(struct_npz: input_struct_lib.RoomStructureFile):
"""Loads room structures."""
# Load the frame information
clip_name = struct_npz["clip_name"].item()
return RoomStructure(
clip_name=clip_name,
triangles=t.as_tensor(struct_npz["layout_triangles"], dtype=t.float32),
triangle_flags=t.as_tensor(struct_npz["layout_triangle_flags"],
dtype=t.int64),
num_tri=t.as_tensor(struct_npz["layout_num_tri"], dtype=t.int64),
labels=t.as_tensor(struct_npz["layout_labels"], dtype=t.int64),
annotated_timestamps=t.as_tensor(struct_npz["annotated_timestamps"],
dtype=t.int64),
)
async def _load_annotation(target_ref: list[t.Tensor], num_frames: int,
index: int, path: str):
img = await frame_lib.read_and_decode_image(path)
if img.dtype == t.int16:
if img.min() < 0 or img.max() > 255:
raise ValueError("Mask contains values outside of [0, 255].")
img = img.to(t.uint8)
target, = target_ref
if target is None:
h, w = img.shape
target_ref[0] = t.full([num_frames, h, w], -1, dtype=t.uint8)
target, = target_ref
target[index] = img
async def load_annotations(room: RoomStructure, frames: frame_lib.Frames,
annotation_dir: str, raw: bool = False):
"""Loads the room structure annotations."""
assert frames.clip_name == room.clip_name
num_frames, c, h, w = frames.frame_images.shape
assert c == 3
structure_ref, visible_ref = [None], [None]
annotated_timestamps = set(room.annotated_timestamps.tolist())
video_name = re.match(r"^(.+)_\d+$", room.clip_name).group(1)
prefix = "raw" if raw else "processed"
tasks = []
for i, timestamp in enumerate(frames.frame_timestamps.tolist()):
if timestamp not in annotated_timestamps:
continue
root_dir = fs.join(annotation_dir, room.clip_name, "structure_annotations")
struct_path = fs.join(root_dir,
f"{prefix}_structure_{video_name}_{timestamp}.png")
tasks.append(_load_annotation(structure_ref, num_frames, i, struct_path))
visible_path = fs.join(root_dir,
f"{prefix}_visible_{video_name}_{timestamp}.png")
tasks.append(_load_annotation(visible_ref, num_frames, i, visible_path))
if tasks:
await asyncio.gather(*tasks)
structure_masks, = structure_ref
visible_masks, = visible_ref
else:
structure_masks = t.full([num_frames, h, w], -1, dtype=t.uint8)
visible_masks = t.full([num_frames, h, w], -1, dtype=t.uint8)
return StructureAnnotations(structural_element_masks=structure_masks,
visible_parts_masks=visible_masks)
def annotated_frames_mask(room: RoomStructure, frames: frame_lib.Frames):
mask = room.annotated_timestamps[None, :] == frames.frame_timestamps[:, None]
return mask.any(dim=1)
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: [email protected] (Stefan Popov)
#
"""Dataset classes for reading CAD-Estate annotations and their videos.
Example usage during training:
```python
from cad_estate import objects as obj_lib
from cad_estate import datasets as dataset_lib
cfg = datasets.ObjectDatasetConfig(
split_file_path=fs.abspath("~/prj/cad_estate/data/obj_test.txt"),
annotation_directory=fs.abspath("~/prj/cad_estate/data/annotations"),
frames_directory=fs.abspath("~/prj/cad_estate/data/frames"), num_frames=12)
shapenet_meta = obj_lib.load_shapenet_metadata(
fs.abspath("~/prj/cad_estate/data/shapenet_npz"))
for epoch in range(num_epochs):
dataset = ObjectDataset(cfg, shapenet_meta, seed=epoch)
data_loader = DataLoader(ObjectDataset(config, shapenet_meta, epoch))
for batch in data_loader:
...
```
Example usage during evaluation:
```python
dataset = ObjectDataset(cfg, shapenet_meta, seed=None)
data_loader = DataLoader(ObjectDataset(config, shapenet_meta, epoch))
for batch in data_loader:
...
```
"""
import asyncio
import dataclasses
import io
import json
import math
import re
from typing import Generic, TypeVar
import numpy as np
import torch as t
import torch.utils.data
from cad_estate import file_system as fs
from cad_estate import frames as frame_lib
from cad_estate import objects as obj_lib
from cad_estate import room_structure as struct_lib
@dataclasses.dataclass
class DatasetCommonConfig:
"""Configures the common part between structure and object datasets."""
split_file_path: str
"""Path to a text file containing the scene names that are part of the
dataset split (one scene per line)."""
annotation_directory: str
"""Path to directory containing the annotations."""
frames_directory: str
"""Path to directory containing the frame images."""
num_frames: int
"""Number of frames to sample for a clip."""
replication_factor: float = 1.0
"""Allows replicating/trimming the dataset."""
znear: float = 0.1
"""ZNear, used to compute an OpenGL compatible projection matrix."""
zfar: float = 50
"""ZFar, used to compute an OpenGL compatible projection matrix."""
@dataclasses.dataclass
class ObjectDatasetConfig(DatasetCommonConfig):
"""Configures an object dataset."""
read_tracks: bool = False
"""Whether to read the 2D object tracks."""
only_frames_with_manual_tracks: bool = False
"""If there are frames with manual track completion, sample only from them."""
@dataclasses.dataclass
class RoomStructureDatasetConfig(DatasetCommonConfig):
"""Configures a room structures dataset."""
load_raw_annotations: bool | None = None
"""Whether to load raw or processed annotations.
If None, annotations are not loaded."""
only_frames_with_annotations: bool = False
"""Whether to sample only from frames with annotations."""
@dataclasses.dataclass
class ObjectsElement:
"""Element returned when iterating an objects dataset."""
frames: frame_lib.Frames
"""The loaded frames."""
objects: obj_lib.Objects
"""The loaded objects."""
track_boxes: t.Tensor | None
"""The track boxes, `float32[NUM_OBJ, NUM_FRAMES, 4]`. See the
`cad_estate.objects.load_track_boxes` for more details."""
@dataclasses.dataclass
class RoomStructuresElement:
"""Element returned when iterating a room structure dataset."""
frames: frame_lib.Frames
"""The loaded frames."""
room: struct_lib.RoomStructure
"""The loaded room structure."""
annotations: struct_lib.StructureAnnotations | None
"""The room structure annotations."""
_DATASET_CACHE: dict[str, str] = {} # t[str, str] = {}
def _read_cached(path: str, cache_enabled: bool) -> str:
if path not in _DATASET_CACHE or not cache_enabled:
_DATASET_CACHE[path] = fs.read_text(path)
return _DATASET_CACHE[path]
Config = TypeVar("Config", bound=DatasetCommonConfig)
class DatasetBase(Generic[Config]):
"""Contains the common functionality between objects and room structures."""
def __init__(self, config: Config, seed: int | None,
cache_dataset_description: bool):
"""Initializes the dataset.
Args:
config: The dataset config
seed: Seed for sampling frames and shuffling scenes (see below).
cache_dataset_description: Whether to cache the contents of
`config.split_file_path` in memory, for faster re-initialization.
The scene iteration order and the frames sampled for a scene, depend only
on the construction arguments (configuration and seed). Iterating over two
datasets with identical construction arguments, or iterating over a dataset
multiple times will yield the exact same results.
To sample different frames and to visit the scenes in a different order
during training, create a new dataset each for each epoch:
```
for epoch in range(num_epochs):
data_loader = DataLoader(ObjectDataset(config, shapenet_meta, epoch))
for batch in data_loader:
...
```
Frames are chosen by sampling their indices in a stratified manner.
If `seed` is `None`, the indices are evenly spaced.
The `_compute_indices` and `_sample_frames` methods can be overriden for
a different shuffling and sampling behavior.
"""
self.config: Config = config
split_description = _read_cached(self.config.split_file_path,
cache_dataset_description)
self.original_scene_names = self._read_scene_names(split_description)
self.seed = seed
self.indices = self._compute_indices()
def _read_scene_names(self, split_description: str):
result = [v for v in split_description.splitlines() if v]
result = [v for v in result if not re.match(r"^(\s*#.*|\s*)$", v)]
return np.array(result, dtype=np.str_)
def _compute_indices(self) -> t.Tensor:
num_scenes = len(self.original_scene_names)
if self.seed is None:
_i = lambda: t.arange(num_scenes, device="cpu")
else:
g = t.Generator()
g.manual_seed(self.seed)
_i = lambda: t.randperm(num_scenes, generator=g, device="cpu")
max_repeats = int(math.ceil(self.config.replication_factor))
indices = t.cat([_i() for _ in range(max_repeats)])
indices_len = int(num_scenes * self.config.replication_factor)
indices = indices[:indices_len]
return indices
@property
def scene_names(self):
"""Returns the already shuffled scene names."""
return self.original_scene_names[self.indices]
def __len__(self):
return self.indices.shape[0]
def _sample_frames(self, frames: frame_lib.Frames):
if self.seed is None:
return frame_lib.sample_regular(frames, self.config.num_frames)
g = t.Generator()
g.manual_seed(int(self.indices[self.seed]))
return frame_lib.sample_stratified(frames, self.config.num_frames, g)
def _get_frame_metadata(self, index: int):
scene_name = self.scene_names[index]
json_path = fs.join(self.config.annotation_directory, scene_name,
"frames.json")
frames = frame_lib.load_metadata(json.loads(fs.read_text(json_path)))
return frames
class ObjectDataset(DatasetBase[ObjectDatasetConfig],
torch.utils.data.Dataset[ObjectsElement]):
def __init__(self, config: ObjectDatasetConfig,
shapenet_meta: obj_lib.ShapeNetMetadata, seed: int | None,
cache_dataset_description: bool = True):
super().__init__(config, seed, cache_dataset_description)
self.shapenet_meta = shapenet_meta
def __getitem__(self, index: int) -> ObjectsElement:
scene_name = self.scene_names[index]
json_path = fs.join(self.config.annotation_directory, scene_name,
"objects.json")
obj_json = json.loads(fs.read_text(json_path))
objects = asyncio.run(obj_lib.load_objects(obj_json, self.shapenet_meta))
frames = self._get_frame_metadata(index)
if (self.config.only_frames_with_manual_tracks
and frames.manual_track_annotations.any()):
frames = frame_lib.filter(frames, frames.manual_track_annotations)
frames = frame_lib.filter(frames, self._sample_frames(frames))
frames = asyncio.run(
frame_lib.load_images(frames, self.config.frames_directory))
tracks = None
if self.config.read_tracks:
tracks = obj_lib.load_track_boxes(obj_json, frames)
return ObjectsElement(frames, objects, tracks)
class RoomStructuresDataset(
DatasetBase[RoomStructureDatasetConfig],
torch.utils.data.Dataset[RoomStructureDatasetConfig],
):
def __init__(self, config: ObjectDatasetConfig, seed: int | None,
cache_dataset_description: bool = True):
super().__init__(config, seed, cache_dataset_description)
def __getitem__(self, index: int) -> RoomStructuresElement:
scene_name = self.scene_names[index]
room_bytes = fs.read_bytes(
fs.join(self.config.annotation_directory, scene_name,
"room_structure.npz"))
room = struct_lib.load_room_structure(np.load(io.BytesIO(room_bytes)))
frames = self._get_frame_metadata(index)
if self.config.only_frames_with_annotations:
mask = struct_lib.annotated_frames_mask(room, frames)
frames = frame_lib.filter(frames, mask)
frames = frame_lib.filter(frames, self._sample_frames(frames))
frames = asyncio.run(
frame_lib.load_images(frames, self.config.frames_directory))
annotations = None
if self.config.load_raw_annotations is not None:
annotations = asyncio.run(
struct_lib.load_annotations(room, frames,
self.config.annotation_directory,
self.config.load_raw_annotations))
return RoomStructuresElement(frames, room, annotations)
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: [email protected] (Stefan Popov)
#
"""Various debugging helpers."""
import base64
import io
import logging
import re
import typing
import numpy as np
import PIL.Image
import torch as t
from IPython.core import display
log = logging.getLogger(__name__)
def to_hwc_rgb8(imgarr: typing.Any) -> np.ndarray:
if t.is_tensor(imgarr): # Torch -> Numpy
imgarr = imgarr.detach().cpu().numpy()
if hasattr(imgarr, "numpy"): # TF -> Numpy
imgarr = imgarr.numpy()
if len(imgarr.shape) == 2: # Monochrome -> RGB
imgarr = np.stack([imgarr] * 3, -1)
if (len(imgarr.shape) == 3 and imgarr.shape[0] <= 4
and (imgarr.shape[1] > 4 or imgarr.shape[2] > 4)): # CHW -> HWC
imgarr = np.transpose(imgarr, [1, 2, 0])
if len(imgarr.shape) == 3 and imgarr.shape[-1] == 4: # RGBA -> RGB
imgarr = imgarr[:, :, :3]
if len(imgarr.shape) == 3 and imgarr.shape[-1] == 1: # Monochrome -> RGB
imgarr = np.concatenate([imgarr] * 3, -1)
if imgarr.dtype == np.float32 or imgarr.dtype == np.float64:
imgarr = np.minimum(np.maximum(imgarr * 255, 0), 255).astype(np.uint8)
if imgarr.dtype == np.int32 or imgarr.dtype == np.int64:
imgarr = np.minimum(np.maximum(imgarr, 0), 255).astype(np.uint8)
if imgarr.dtype == np.bool_:
imgarr = imgarr.astype(np.uint8) * 255
if (len(imgarr.shape) != 3 or imgarr.shape[-1] != 3
or imgarr.dtype != np.uint8):
raise ValueError(
"Cannot display image from array with type={} and shape={}".format(
imgarr.dtype, imgarr.shape))
return imgarr[..., :3]
def image_as_url(imgarr: np.ndarray, fmt: str = "png") -> str:
img = PIL.Image.fromarray(imgarr, "RGB")
buf = io.BytesIO()
img.save(buf, fmt)
b64 = base64.encodebytes(buf.getvalue()).decode("utf8")
b64 = "data:image/png;base64,{}".format(b64)
return b64
class Image(typing.NamedTuple):
image: typing.Any
label: str
width: int
def get_html_for_images(*orig_images, fmt="png", w=None):
table_template = """
<div style="display: inline-flex; flex-direction: row; flex-wrap:wrap">
{}
</div>
"""
item_template = """
<div style="display: inline-flex; flex-direction: column; flex-wrap:
nowrap; align-items: center">
<img style="margin-right: 0.5em" src="{image}" width="{width}"/>
<div style="margin-bottom: 0.5em; margin-right: 0.5em">{label}</div>
</div>
"""
images = []
def append_image(image):
image = to_hwc_rgb8(image)
width = image.shape[1] if not w else w
images.append(Image(label="Image {}".format(idx), image=image, width=width))
for idx, item in enumerate(orig_images):
if isinstance(item, str) and images:
images[-1] = images[-1]._replace(label=item)
elif isinstance(item, bytes):
image = np.array(PIL.Image.open(io.BytesIO(item)))
append_image(image)
elif isinstance(item, PIL.Image.Image):
append_image(np.array(item))
elif isinstance(item, int) and images:
images[-1] = images[-1]._replace(width=item)
else:
append_image(item)
images = [v._replace(image=image_as_url(v.image, fmt)) for v in images]
table = [item_template.format(**v._asdict()) for v in images]
table = table_template.format("".join(table))
return table
def display_images(*orig_images, **kwargs):
"""Display images in a IPython environment"""
display.display(display.HTML(get_html_for_images(*orig_images, **kwargs)))
def print_tensor(v: t.Tensor):
v = v.detach()
dtype = re.sub(r"^torch\.", "", str(v.dtype))
sep = "\n" if len(v.shape) > 1 else " "
return f"{dtype}{list(v.shape)}({v.device}){{{sep}{v.cpu().numpy()}{sep}}}"
def better_tensor_display():
"""Better string representation of tensors for python debuggers."""
np.set_printoptions(4, suppress=True)
t.set_printoptions(4, sci_mode=False)
t.Tensor.__repr__ = print_tensor
def better_jupyter_display():
try:
def _print_key_dict(v, p, cycle):
p.text(str(list(v)))
formatters = get_ipython().display_formatter.formatters['text/plain']
formatters.for_type("collections.abc.KeysView", _print_key_dict)
except Exception as e:
log.exception("Unable to instrument Jupyter notebook")
def dump_state(path: str, **kwargs):
state_dict = {}
for k, v in kwargs.items():
assert isinstance(k, str)
if t.is_tensor(v):
v = v.detach().cpu()
state_dict[k] = v
t.save(state_dict, path)
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: [email protected] (Stefan Popov)
#
"""Library for loading and manipulating video frames."""
import dataclasses
import io
import re
import PIL.Image
import numpy as np
import torch as t
from cad_estate import file_system as fs
from cad_estate import misc_util
from cad_estate import input_file_structures as input_struct_lib
from cad_estate import transformations as transform_lib
async def read_and_decode_image(image_path: str):
"""Asynchronously reads and decodes an image."""
image_bytes = await fs.read_bytes_async(image_path)
image = PIL.Image.open(io.BytesIO(image_bytes))
image = t.as_tensor(np.array(image), dtype=t.uint8)
if len(image.shape) == 3:
if image.shape[-1] != 3:
raise ValueError("Only RGB and GrayScale images supported!")
image = image.permute([2, 0, 1])
elif len(image.shape) != 2:
raise ValueError("Only RGB and GrayScale images supported!")
return image
# We use the following symbols to describe tensors below:
# IH, IW : Image height and width
# NUM_FRAMES : Number of loaded frames in a video
@dataclasses.dataclass
class Frames(misc_util.TensorContainerMixin):
"""Contains frames of a video."""
clip_name: str
"""The RealEstate-10K clip name.
Uniquely identifies the scene. Consists of a YouTube video ID,
followed by "_", and then by a start timestamp.
"""
frame_timestamps: t.Tensor
"""Frame timestamps (microseconds since video start), `int[NUM_FRAMES]`."""
frame_images: t.Tensor | None
"""The frame images, `uint8[NUM_FRAMES, 3, IH, IW]`.
None, if the frames are not loaded yet.
"""
camera_intrinsics: t.Tensor
"""Camera intrinsics (view->screen transform), `float32[NUM_FRAMES, 4, 4]`.
Entries correspond to frames.
"""
camera_extrinsics: t.Tensor
"""Camera extrinsics (world->view transform), `float32[NUM_FRAMES, 4, 4]`.
Entries correspond to frames.
"""
manual_track_annotations: t.Tensor
"""Whether tracks were completed manually on a frame, `bool[NUM_FRAMES]`."""
def load_metadata(frames_json: input_struct_lib.FramesFile, z_near=0.1,
z_far=200.0):
"""Loads the frames metadata."""
clip_name = frames_json["clip_name"]
frame_timestamps, manual_track_annotations = [], []
camera_intrinsics, camera_t, camera_r = [], [], []
for frame in frames_json["frames"]:
frame_timestamps.append(frame["timestamp"])
camera_intrinsics.append(frame["intrinsics"])
camera_t.append(frame["extrinsics"]["translation"])
camera_r.append(frame["extrinsics"]["rotation"])
manual_track_annotations.append(frame["key_frame"])
h, w = frames_json["image_size"]
fx, fy, cx, cy = t.as_tensor(camera_intrinsics, dtype=t.float32).unbind(-1)
camera_intrinsics = transform_lib.gl_projection_matrix_from_intrinsics(
w, h, fx, fy, cx, cy, z_near, z_far)
camera_extrinsics = transform_lib.chain([
transform_lib.translate(camera_t),
transform_lib.quaternion_to_rotation_matrix(camera_r)
])
frame_timestamps = t.as_tensor(frame_timestamps)
manual_track_annotations = t.as_tensor(manual_track_annotations)
return Frames(clip_name=clip_name, frame_timestamps=frame_timestamps,
frame_images=None, camera_intrinsics=camera_intrinsics,
camera_extrinsics=camera_extrinsics,
manual_track_annotations=manual_track_annotations)
async def load_images(frames: Frames, frames_root_dir: str) -> Frames:
"""Reads the frame images in parallel (using async)."""
video_name = re.match(r"^(.+)_\d+$", frames.clip_name).group(1)
image_paths = [
fs.join(frames_root_dir, video_name, f"{video_name}_{v}.jpg")
for v in frames.frame_timestamps.cpu()
]
# Load images in parallel to mask latencies
images = await fs.await_in_parallel(
[read_and_decode_image(v) for v in image_paths])
images = t.stack(images, dim=0)
return dataclasses.replace(frames, frame_images=images)
def filter(frames: Frames, keep: t.Tensor) -> Frames:
"""Filters the frames and their metadata.
Args:
frames: The input frames
keep: Which frames to keep. Either a boolean mask (`bool[NUM_FRAMES]`) or a
list of indices (`int64[NUM_FRAMES_TO_KEEP]`).
Returns:
The frames object, with frames filtered according to the arguments.
"""
frame_images = frames.frame_images
if frame_images is not None:
frame_images = frame_images[keep]
return Frames(
clip_name=frames.clip_name,
frame_timestamps=frames.frame_timestamps[keep],
frame_images=frame_images,
camera_intrinsics=frames.camera_intrinsics[keep],
camera_extrinsics=frames.camera_extrinsics[keep],
manual_track_annotations=frames.manual_track_annotations[keep],
)
def _regular_indices_impl(frames: Frames, num_frames_to_keep: int,
offset: t.Tensor) -> t.Tensor:
"""Returns evenly spaced frame indices."""
device = frames.frame_timestamps.device
frame_index = t.arange(num_frames_to_keep, dtype=t.float32, device=device)
if offset is not None:
frame_index = frame_index + offset
num_frames = len(frames.frame_timestamps)
frame_index = frame_index / num_frames_to_keep * num_frames
frame_index = frame_index.floor().clip(0, num_frames - 1).to(t.int64)
return frame_index
def sample_stratified(frames: Frames, num_frames_to_keep: int,
rng: t.Generator | None = None) -> t.Tensor:
"""Samples frame indices in a stratified manned, returns boolean mask."""
offset = t.rand(num_frames_to_keep, generator=rng,
device=frames.frame_timestamps.device)
return _regular_indices_impl(frames, num_frames_to_keep, offset)
def sample_regular(frames: Frames, num_frames_to_keep: int) -> t.Tensor:
"""Samples evenly spaced frame indices, returns boolean mask."""
return _regular_indices_impl(frames, num_frames_to_keep, None)
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: [email protected] (Stefan Popov)
#
"""Library for handling command line flags in a structured way."""
import argparse
import dataclasses
import enum
import re
import typing
from typing import Any, Sequence, Type, TypeVar
T = TypeVar('T')
class ArgType(enum.Enum):
"""The possible argument types"""
FLAG = 1 # Flag, prefixed by a "--"
POSITIONAL = 2 # Positional argument
REMAINDER = 3 # The remaining arguments
FLAG = ArgType.FLAG
POSITIONAL = ArgType.POSITIONAL
REMAINDER = ArgType.REMAINDER
def flag(help_message: str, *, default: Any = None,
arg_type: ArgType = ArgType.FLAG, short_name: str | None = None):
"""Marks a dataclass field as a command line flag.
Args:
help_message: The help message.
default: The default value. If `None`, the flag becomes required. Has no
effect on multi-value flags (ie fields with list type).
arg_type: Whether this is a positional argument
short_name: An optional alternative short name for the flag. "-" will be
automatically prepended to this name.
Returns:
A dataclass field, populated with metadata corresponding to the arguments
of this function.
The flag name will match the field name, with "--" prepended to it.
Supported field/flag types: `str`, `int`, `float`, `bool`, `List[str]`,
`List[int]`, `List[float]`.
"""
return dataclasses.field(
default=default, metadata={
"help": help_message,
"arg_type": arg_type,
"short_name": short_name
})
def parse_flags(flag_struct_type: Type[T],
flags: Sequence[str] | None = None) -> T:
"""Parses command line flags into a structured dataclass representation.
Args:
flag_struct_type: The class of the flags dataclass structure. The docstring
of this class becomes the program description. Each field must be marked
as a flag, using the `flag` function above.
flags: The flags passed to the program. Will be taken from `sys.argv` by
default.
Returns:
The parsed flags, filled into a new object of type `arg_type`.
"""
list_flag_marker = object()
list_default_args = {}
parser = argparse.ArgumentParser(description=flag_struct_type.__doc__)
for field in dataclasses.fields(flag_struct_type):
field_meta = field.metadata
help_message = field_meta["help"]
short_name = field_meta["short_name"]
arg_type = field_meta["arg_type"]
if arg_type in {ArgType.POSITIONAL, ArgType.REMAINDER}:
flag_name = [field.name]
else:
flag_name = ["--" + field.name]
if short_name:
flag_name += ["-" + short_name]
default_value = field.default
is_required = field.default is None
flag_type = field.type
is_list = typing.get_origin(field.type) == list
if is_list:
flag_type, = typing.get_args(flag_type)
list_default_args[field.name] = default_value or []
default_value = list_flag_marker
is_required = False
if flag_type in {str, int, float}:
if arg_type == ArgType.POSITIONAL:
kwargs = dict(nargs=("*" if is_list else None))
elif arg_type == ArgType.REMAINDER:
kwargs = dict(nargs="...")
else:
kwargs = dict(required=is_required, nargs=("*" if is_list else None))
parser.add_argument(*flag_name, type=flag_type, default=default_value,
help=help_message, **kwargs)
elif field.type == bool:
assert not is_list
group = parser.add_mutually_exclusive_group(required=is_required)
group.add_argument(*flag_name, default=default_value, dest=field.name,
action="store_true", help=help_message)
neg_flag_name = [re.sub(r"^(--?)", r"\1no", v) for v in flag_name]
group.add_argument(*neg_flag_name, default=default_value, dest=field.name,
action="store_false", help=help_message)
else:
raise ValueError(
f"Unsupported type '{field.type}' for argument '{field.name}'")
result_args = parser.parse_args(args=flags)
result_args = {
k: (v if v != list_flag_marker else list_default_args[k])
for k, v in vars(result_args).items()
}
return flag_struct_type(**result_args)
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: [email protected] (Stefan Popov)
#
"""Library for loading aligned objects."""
import dataclasses
import io
import json
import numpy as np
import torch as t
from cad_estate import file_system as fs
from cad_estate import frames as frame_lib
from cad_estate import input_file_structures as input_struct_lib
from cad_estate import misc_util
from cad_estate import transformations as transform_lib
# We use the following symbols to describe tensors below:
# NUM_OBJ : Number of objects
# NUM_OBJ_TRI : Total number of triangles in all objects
# NUM_FRAMES : Number of loaded frames in a video
@dataclasses.dataclass
class Objects(misc_util.TensorContainerMixin):
"""3D objects inside a video clip."""
clip_name: str
"""The RealEstate-10K clip name.
Uniquely identifies the scene. Consists of a YouTube video ID,
followed by "_", and then by a start timestamp
"""
triangles: t.Tensor
"""The triangles of all objects, `float32[NUM_OBJ_TRI, 3, 3]`.
Coordinates are in world space. The second dimension is triangle vertices,
the third dimension is 3D coordinates (`X`, `Y`, `Z`). Triangles of each
object instance are stored sequentially.
"""
num_tri: t.Tensor
"""The number of triangles in each object, `int64[NUM_OBJ]`.
The first `num_tri[0]` triangles in `triangles` belong to the first
object, the next `num_tri[1]` belong to the second object, and so on.
"""
object_to_world: t.Tensor
"""Object->world transformation matrices, `float32[NUM_OBJ, 4, 4]`.
Applying the inverse matrix to the triangles of an object will result in the
original ShapeNet geometry.
"""
mesh_ids: np.ndarray
"""The ShapeNet model IDs for the objects, `str[NUM_OBJ]`."""
symmetries: t.Tensor
"""The object symmetries, `int64[NUM_OBJ]`.
A value of N means that the object is N-way symmetric. That is, rotating it
by `pi * 2 / N` degrees, results in the same geometry. This value is
1 for asymmetric objects.
"""
labels: np.ndarray
"""The semantic labels of the objects, `int64[NUM_OBJ]`."""
from_automatic_track: t.Tensor
"""Whether an object was created from an automatic or a manual track,
`bool[NUM_OBJ]`."""
@dataclasses.dataclass
class ShapeNetMetadata:
"""Metadata for the ShapeNet dataset."""
shapenet_root_dir: str
"""Root directory for all ShapeNet shapes."""
label_synset: list[str]
"""Maps integer labels to ShapeNet Synsets."""
label_human_readable: list[str]
"""Maps integer labels to human readable class names."""
synset_to_index: dict[str, int]
"""Maps ShapeNet Synsets to integer labels"""
symmetry_dict: dict[str, int]
"""The symmetry for each ShapeNet shape. Symmetry `K` means that the shape
does not change when rotated by `360/K` degrees around the up axis.
"""
def load_shapenet_metadata(shapenet_root_dir: str) -> ShapeNetMetadata:
"""Loads saved ShapeNet metadata."""
labels = json.loads(
fs.read_text(fs.join(shapenet_root_dir, "shapenet_classes.json")))
label_synset = [v["id"] for v in labels]
label_human_readable = [v["human_readable"] for v in labels]
synset_to_index = {k: i for i, k in enumerate(label_synset)}
symmetry_dict = json.loads(
fs.read_text(fs.join(shapenet_root_dir, "symmetry_dict.json")))
return ShapeNetMetadata(shapenet_root_dir=shapenet_root_dir,
label_synset=label_synset,
label_human_readable=label_human_readable,
synset_to_index=synset_to_index,
symmetry_dict=symmetry_dict)
async def load_objects(obj_json: input_struct_lib.ObjectsFile,
shapenet_meta: ShapeNetMetadata):
"""Loads objects from a JSON."""
clip_name = obj_json["clip_name"]
obj_paths, obj_labels, obj_mesh_ids = [], [], []
from_automatic_track = []
for obj in obj_json["objects"]:
if "translation" in obj:
obj_paths.append(
fs.join(shapenet_meta.shapenet_root_dir, obj["class"],
obj["cad_id"] + ".npz"))
obj_mesh_ids.append(obj["cad_id"])
else:
obj_paths.append("")
obj_mesh_ids.append("")
obj_labels.append(shapenet_meta.synset_to_index[obj["class"]])
from_automatic_track.append(obj["is_track_automatic"])
obj_labels = t.as_tensor(obj_labels, dtype=t.int64)
obj_mesh_ids = np.array(obj_mesh_ids, dtype=np.str_)
from_automatic_track = t.as_tensor(from_automatic_track, dtype=t.bool)
unique_cad_paths = sorted(set([v for v in obj_paths if v]))
npz_bytes = await fs.read_all_bytes_async(unique_cad_paths)
meshes = [np.load(io.BytesIO(v))["vertices"] for v in npz_bytes]
meshes = [t.as_tensor(v, dtype=t.float32) for v in meshes]
meshes = {k: v for k, v in zip(unique_cad_paths, meshes)}
obj_num_tri, obj_triangles, object_to_world = [], [], []
obj_symmetries = []
for obj_idx, obj in enumerate(obj_json["objects"]):
if "translation" in obj:
mesh = meshes[obj_paths[obj_idx]]
obj_num_tri.append(mesh.shape[0])
o2w = [
transform_lib.translate(obj["translation"]),
transform_lib.quaternion_to_rotation_matrix(obj["rotation"]),
transform_lib.scale(obj["scale"]),
]
if obj["is_mirrored"]:
o2w.append(transform_lib.scale([-1, 1, 1]))
o2w = transform_lib.chain(o2w)
object_to_world.append(o2w)
obj_triangles.append(transform_lib.transform_mesh(mesh, o2w))
obj_symmetries.append(
shapenet_meta.symmetry_dict[f"{obj['class']}_{obj['cad_id']}"])
else:
obj_num_tri.append(0)
object_to_world.append(t.eye(4))
obj_symmetries.append(1)
obj_num_tri = t.as_tensor(obj_num_tri, dtype=t.int64)
if obj_triangles:
obj_triangles = t.concat(obj_triangles, 0)
object_to_world = t.stack(object_to_world)
obj_symmetries = t.as_tensor(obj_symmetries, dtype=t.int64)
else:
obj_triangles = t.empty([0, 3, 3], dtype=t.float32)
object_to_world = t.empty([0, 4, 4], dtype=t.float32)
obj_symmetries = t.empty([0], dtype=t.int64)
return Objects(clip_name=clip_name, triangles=obj_triangles,
num_tri=obj_num_tri, object_to_world=object_to_world,
labels=obj_labels, mesh_ids=obj_mesh_ids,
symmetries=obj_symmetries,
from_automatic_track=from_automatic_track)
def load_track_boxes(obj_json: input_struct_lib.ObjectsFile,
frames: frame_lib.Frames):
"""Loads the annotated object tracks, `float32[NUM_OBJ, NUM_FRAMES, 4]`.
`result[i, j]` contains the bounding box (`ymin, xmin, ymax, xmax`) of
object `i` on frame `j`. Coordinates are relative, in the range `[0, 1]`.
If there is no track annotation for an object/frame pair, all box
coordinates are set to -1.
"""
num_obj = len(obj_json["objects"])
num_frames = frames.frame_timestamps.shape[0]
track_boxes = t.full((num_obj, num_frames, 4), -1, dtype=t.float32)
time_stamp_to_index = {
int(v): i for i, v in enumerate(frames.frame_timestamps)
}
for obj_idx, obj in enumerate(obj_json["objects"]):
for track_entry in obj["track"]:
frame_idx = time_stamp_to_index.get(track_entry["timestamp"], -1)
if frame_idx >= 0:
track_boxes[obj_idx, frame_idx] = t.as_tensor(track_entry["box"],
dtype=t.float32)
return track_boxes
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: [email protected] (Stefan Popov)
#
"""File library with support for local and GCS file systems."""
import asyncio
import contextlib
import fnmatch
import glob
import logging
import os
import re
import typing as t
import aiofiles
import aiohttp
import aiohttp.client_exceptions
import backoff
import gcloud.aio.storage as aio_storage
import google.api_core.exceptions
from google.cloud import storage
_gcs_client: storage.Client | None = None
_gcs_async_client: aio_storage.Storage | None = None
log = logging.getLogger(__name__)
T = t.TypeVar("T")
NUM_GCS_RETRIES = 3
RECOVERABLE_ERRORS = (aiohttp.ClientResponseError,
aiohttp.client_exceptions.ClientError,
aiohttp.client_exceptions.ClientResponseError,
asyncio.TimeoutError)
def _should_giveup(e: Exception):
if isinstance(e, aiohttp.ClientResponseError) and e.status == 404:
return True
return False
backoff_decorator = backoff.on_exception(
backoff.expo, RECOVERABLE_ERRORS, max_tries=NUM_GCS_RETRIES,
jitter=backoff.full_jitter, backoff_log_level=logging.DEBUG,
giveup_log_level=logging.DEBUG, giveup=_should_giveup)
@contextlib.contextmanager
def auto_close_async_session():
try:
yield None
finally:
if _gcs_async_client:
asyncio.get_event_loop().run_until_complete(_gcs_async_client.close())
_gcs_async_client = None
def is_gs_path(p: str):
return p.startswith("gs://")
def splitall(path: str):
"""Splits a path into all of its components."""
result = []
if is_gs_path(path):
result.append(path[:5])
path = path[5:]
while True:
head, tail = os.path.split(path)
if head == path:
result.append(head)
break
if tail == path:
result.append(tail)
break
else:
path = head
result.append(tail)
result.reverse()
return result
def parse_gs_path(p: str):
assert p.startswith("gs://")
p = p[5:]
parts = splitall(p)
bucket, path = parts[0], "/".join(parts[1:])
return bucket, path
def get_gcs_client():
global _gcs_client
if not _gcs_client:
_gcs_client = storage.Client()
return _gcs_client
def get_gcs_async_client():
global _gcs_async_client
if not _gcs_async_client:
_gcs_async_client = aio_storage.Storage()
return _gcs_async_client
def repeat_if_error(fn: t.Callable[[], T], num_tries, not_found_ok=False) -> T:
for try_index in range(num_tries - 1):
try:
return fn()
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
if isinstance(e, google.api_core.exceptions.NotFound) and not_found_ok:
return None
log.exception(f"Error in file operation, try={try_index}. Retrying ...")
return fn()
def read_bytes(path: str) -> bytes:
if is_gs_path(path):
bucket_name, gcs_path = parse_gs_path(path)
def _impl():
bucket = get_gcs_client().get_bucket(bucket_name)
return bucket.blob(gcs_path).download_as_string()
return repeat_if_error(_impl, NUM_GCS_RETRIES)
else:
with open(path, "rb") as fl:
return fl.read()
@backoff_decorator
async def read_bytes_async(path: str) -> bytes:
if is_gs_path(path):
bucket_name, gcs_path = parse_gs_path(path)
client = get_gcs_async_client()
return await client.download(bucket_name, gcs_path)
else:
async with aiofiles.open(path, "rb") as fl:
return await fl.read()
async def await_in_parallel(awaitables: t.Collection[t.Awaitable[T]],
max_parallelism=50) -> list[T]:
"""Awaits tasks in parallel, with an upper bound on parallelism."""
results = [None] * len(awaitables)
async def await_result(index: int, awaitable: t.Awaitable[T]):
results[index] = await awaitable
remaining_tasks = [await_result(i, v) for i, v in enumerate(awaitables)]
current_tasks = []
while True:
if current_tasks:
done, pending = await asyncio.wait(current_tasks, timeout=5)
else:
done, pending = [], []
for v in done:
if e := v.exception():
raise e
current_tasks = list(pending)
new_tasks = remaining_tasks[:max_parallelism]
remaining_tasks = remaining_tasks[len(new_tasks):]
current_tasks += [asyncio.create_task(v) for v in new_tasks]
if not current_tasks:
break
return results
async def read_all_bytes_async(
file_paths: t.Sequence[str], max_parallel_read_tasks: int = 50,
progress_callback: t.Callable[[], None] | None = None) -> list[bytes]:
"""Reads binary files in parallel, using the async interface."""
async def read_file(path: str):
result = await read_bytes_async(path)
if progress_callback:
progress_callback()
return result
tasks = [read_file(v) for v in file_paths]
return await await_in_parallel(tasks, max_parallel_read_tasks)
def read_text(path: str) -> str:
return read_bytes(path).decode()
async def read_text_async(path: str) -> str:
return (await read_bytes_async(path)).decode()
def write_bytes(path: str, contents: bytes):
if is_gs_path(path):
bucket_name, gcs_path = parse_gs_path(path)
def _impl():
bucket = get_gcs_client().get_bucket(bucket_name)
bucket.blob(gcs_path).upload_from_string(contents)
repeat_if_error(_impl, NUM_GCS_RETRIES)
else:
with open(path, "wb") as fl:
fl.write(contents)
@backoff_decorator
async def write_bytes_async(path: str, contents: bytes):
if is_gs_path(path):
bucket_name, gcs_path = parse_gs_path(path)
client = get_gcs_async_client()
await client.upload(bucket_name, gcs_path, contents)
else:
async with aiofiles.open(path, "wb") as fl:
await fl.write(contents)
async def write_all_bytes_async( #
paths_and_bytes: t.Iterable[t.Tuple[str, bytes]]):
await asyncio.gather(*[write_bytes_async(p, b) for p, b in paths_and_bytes])
def write_all_bytes(paths_and_bytes: t.Iterable[t.Tuple[str, bytes]]):
for k, v in paths_and_bytes:
write_bytes(k, v)
def write_text(path: str, text: str):
write_bytes(path, text.encode())
async def write_text_async(path: str, text: str):
await write_bytes_async(path, text.encode())
def glob_pattern(pattern: str) -> t.Iterable[str]:
if is_gs_path(pattern):
bucket_name, gcs_path = parse_gs_path(pattern)
parts = splitall(gcs_path)
prefix = ""
for part in parts:
if re.match(r".*[?*\[].*", part):
break
prefix = os.path.join(prefix, part)
def _impl():
blobs = get_gcs_client().list_blobs(bucket_name, prefix=prefix)
result = [
f"gs://{bucket_name}/{v.name}" for v in blobs
if fnmatch.fnmatch(v.name, gcs_path)
]
return result
return repeat_if_error(_impl, NUM_GCS_RETRIES)
else:
return glob.glob(pattern)
def unlink_file(path: str):
if is_gs_path(path):
bucket_name, gcs_path = parse_gs_path(path)
return repeat_if_error(
lambda: get_gcs_client().bucket(bucket_name).blob(gcs_path).delete(),
NUM_GCS_RETRIES, not_found_ok=True)
else:
os.unlink(path)
def rename_file(old_path: str, new_path: str):
if is_gs_path(old_path) != is_gs_path(new_path):
log.error("Invalid rename (different file systems): "
f"'{old_path}'->'{new_path}'")
raise ValueError("Both files must be on the same file system")
if is_gs_path(old_path):
bucket_name, old_gcs_path = parse_gs_path(old_path)
_, new_gcs_path = parse_gs_path(new_path)
def _impl():
bucket = get_gcs_client().bucket(bucket_name)
bucket.rename_blob(bucket.blob(old_gcs_path), new_gcs_path)
return repeat_if_error(_impl, NUM_GCS_RETRIES)
else:
os.rename(old_path, new_path)
def make_dirs(path: str):
if is_gs_path(path):
return
os.makedirs(path, exist_ok=True)
def join(*args):
if len(args) == 1:
return args[0]
for i, v in enumerate(args[1:]):
if is_gs_path(v):
return join(*args[i + 1:])
return os.path.join(*args)
def normpath(p: str):
if is_gs_path(p):
return f"gs://{os.path.normpath(p[5:])}"
return os.path.normpath(p)
def isabs(p: str):
if is_gs_path(p):
return True
return os.path.isabs(p)
def dirname(p: str):
return os.path.dirname(p)
def abspath(p: str):
if is_gs_path(p):
return p
return os.path.abspath(os.path.expanduser(p))
def basename(p: str):
return os.path.basename(p)
def relpath(p: str, prefix: str) -> str:
if is_gs_path(p) != is_gs_path(prefix):
raise ValueError("Both paths have to be on the same storage system "
"(either GCS or local)")
if is_gs_path(p):
p = p[5:]
prefix = prefix[5:]
return os.path.relpath(p, prefix)
def splitext(p: str):
return os.path.splitext(p)
@backoff_decorator
def exists(path: str):
if is_gs_path(path):
bucket_name, gcs_path = parse_gs_path(path)
bucket = get_gcs_client().bucket(bucket_name)
return bucket.blob(gcs_path).exists()
else:
return os.path.exists(path)
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: [email protected] (Stefan Popov)
#
"""Converts ShapeNet CAD models to binary format."""
import dataclasses
import logging
import io
import numpy as np
import ray
import tqdm
from cad_estate import structured_arg_parser as arg_lib
from cad_estate import file_system as fs
log = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True)
class Args:
"""Converts ShapeNet CAD models to binary format."""
shapenet_root: str = arg_lib.flag("Path to ShapeNet's root directory.")
output_root: str = arg_lib.flag("Path to the output root directory.")
def read_obj(opj_path: str):
"""A simple OBJ file reader."""
vertices = []
faces = []
for line in fs.read_text(opj_path).split("\n"):
parts = line.strip().split()
if not parts:
continue
if parts[0] == 'v':
vertices.append([float(v) for v in parts[1:4]])
if parts[0] == 'f':
faces.append([int(p.split('/')[0]) - 1 for p in parts[1:4]])
vertices = np.array(vertices, np.float32)
faces = np.array(faces, np.int32)
return vertices[faces]
def cleanup_mesh(mesh: np.ndarray):
"""Removes degenerate triangles from a mesh."""
s1 = mesh[:, 2] - mesh[:, 0]
s2 = mesh[:, 1] - mesh[:, 0]
l1 = np.linalg.norm(s1, axis=-1)
l2 = np.linalg.norm(s2, axis=-1)
eps = 1e-27
is_degenerate = (l1 < eps) | (l2 < eps)
l1 = np.maximum(l1, eps)
l2 = np.maximum(l1, eps)
s1 /= l1[..., None]
s2 /= l2[..., None]
g = np.cross(s1, s2, axis=-1)
lg = np.linalg.norm(g, axis=-1)
is_degenerate |= lg < 1e-10
keep_indices, = np.where(~is_degenerate)
mesh = mesh[keep_indices]
return mesh
def process_mesh(input_path: str, output_root: str):
log.info(f"Processing {input_path}...")
fn_parts = fs.splitall(input_path)
label = fn_parts[-4]
mesh_id = fn_parts[-3]
mesh = read_obj(input_path)
mesh = cleanup_mesh(mesh)
npz_path = fs.join(output_root, label, mesh_id + ".npz")
np.savez_compressed(fl := io.BytesIO(), vertices=mesh, label=label,
mesh_id=mesh_id)
fs.make_dirs(fs.dirname(npz_path))
fs.write_bytes(npz_path, fl.getvalue())
def main():
args = arg_lib.parse_flags(Args)
sn_root_dir = fs.normpath(fs.abspath(args.shapenet_root))
print("Reading mesh file names ...")
obj_files = sorted(
fs.glob_pattern(fs.join(sn_root_dir, "*/*/models/model_normalized.obj")))
out_dir = fs.normpath(fs.abspath(args.output_root))
print(f"Converting {len(obj_files)} meshes from {sn_root_dir} to {out_dir}")
ray.init()
process_fn = ray.remote(process_mesh)
tasks = [process_fn.remote(v, out_dir) for v in obj_files]
progress_bar = tqdm.tqdm(total=len(tasks))
while tasks:
done, tasks = ray.wait(tasks, num_returns=len(tasks), timeout=0.3)
progress_bar.update(len(done))
if __name__ == '__main__':
main()
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: [email protected] (Stefan Popov)
#
"""Downloads CAD-estate videos and extracts their frames."""
import asyncio
import collections
import contextlib
import dataclasses
import datetime
import functools
import json
import logging
import pathlib
import re
import shutil
import subprocess
import tempfile
from concurrent import futures
from typing import Iterable
import appdirs
import numpy as np
import pandas
import tqdm
import yt_dlp
from cad_estate import structured_arg_parser as arg_lib
log = logging.getLogger(__name__)
log_ytdlp = logging.getLogger(yt_dlp.__name__)
FFMPEG_PATH = "/usr/bin/ffmpeg"
def download_video(video_id: str, out_path: pathlib.Path | None = None):
video_url = f"https://www.youtube.com/watch?v={video_id}"
if not out_path:
out_path = (
pathlib.Path(appdirs.user_cache_dir("cad_estate")) / "videos" /
f"{video_id}.mp4")
out_path.parent.mkdir(parents=True, exist_ok=True)
if not out_path.exists():
log.info(f"Downloading video to '{out_path}'")
ytdl = yt_dlp.YoutubeDL(
params=dict(format="bv", logger=log_ytdlp, paths=dict(
home=str(out_path.parent)), outtmpl=dict(default=out_path.name)))
ytdl.download(video_url)
else:
log.info(f"Using previously downloaded video in '{out_path}'")
return out_path
def extract_frames(video_path: pathlib.Path, out_dir: pathlib.Path,
frame_timestamps: Iterable[int],
tmp_dir: pathlib.Path | None = None,
ffmpeg_path=FFMPEG_PATH):
frame_discrepancy_path = out_dir / "frame_discrepancy.json"
if frame_discrepancy_path.exists():
return json.loads(frame_discrepancy_path.read_text())
if not tmp_dir:
tmp_dir_ctx = tempfile.TemporaryDirectory()
tmp_dir = pathlib.Path(tmp_dir_ctx.name)
else:
tmp_dir_ctx = contextlib.nullcontext()
with tmp_dir_ctx:
# Extract the video frames
assert tmp_dir.exists()
if any(tmp_dir.iterdir()):
raise ValueError("Unpack directory must be empty!")
args = [
ffmpeg_path, "-hide_banner", "-loglevel", "error", "-y", "-vsync",
"vfr", "-i", video_path, "-frame_pts", "1", "-r", "1000000", "-f",
"image2", "-qscale:v", "2", f"{tmp_dir}/out%d.jpg"
]
log.info(f"Extracting frames of '{video_path}', using:\n", " ".join(args))
ff_proc = subprocess.run(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, text=True)
if ff_proc.returncode != 0:
raise ValueError(f"ffmpeg failed to extract frames:\n{ff_proc.stdout}")
# Match disk timestamps to annotation timestamps
tmp_dir.parent.mkdir(parents=True, exist_ok=True)
jpg_files = tmp_dir.glob("out*.jpg")
disk_timestamps = sorted(
[int(v.stem.removeprefix("out")) for v in jpg_files])
disk_timestamps = np.array(disk_timestamps, np.int64)
frame_timestamps = np.array(frame_timestamps, np.int64)
idx = np.searchsorted(disk_timestamps, frame_timestamps, side="right")
idx1 = np.clip(idx - 1, 0, len(disk_timestamps) - 1)
idx2 = np.clip(idx, 0, len(disk_timestamps) - 1)
dist1 = np.abs(disk_timestamps[idx1] - frame_timestamps)
dist2 = np.abs(disk_timestamps[idx2] - frame_timestamps)
found = disk_timestamps[np.where(dist1 < dist2, idx1, idx2)]
frame_discrepancy = np.abs(found - frame_timestamps)
frame_discrepancy_stats = {
"min": float(frame_discrepancy.min()),
"max": float(frame_discrepancy.max()),
"mean": float(frame_discrepancy.mean()),
"std": float(frame_discrepancy.std())
}
src_paths = [tmp_dir / f"out{v}.jpg" for v in found]
dst_paths = [
out_dir / f"{video_path.stem}_{v}.jpg" for v in frame_timestamps
]
for src_path, dst_path in zip(src_paths, dst_paths, strict=True):
shutil.copy(src_path, dst_path)
frame_discrepancy_path.write_text(json.dumps(frame_discrepancy_stats))
return frame_discrepancy_stats
def get_video_timestamps(json_path: pathlib.Path):
ann_json = json.loads(json_path.read_text())
clip_name = ann_json["clip_name"]
video_id = re.match(r"^(.+)_\d+$", clip_name).group(1)
timestamps = [v["timestamp"] for v in ann_json["frames"]]
return video_id, timestamps
@dataclasses.dataclass
class Args:
cad_estate_dir: str = arg_lib.flag("Root directory of CAD estate.")
skip_download: bool = arg_lib.flag("Skip the video download step.",
default=False)
skip_extract: bool = arg_lib.flag("Skip the frame extraction step.",
default=False)
parallel_extract_tasks: int = arg_lib.flag(
"How many frame extraction tasks to run in parallel", default=1)
debug_run: bool = arg_lib.flag("Run in debug mode (process less videos).",
default=False)
class CadEstateDirs:
def __init__(self, root_dir: str | pathlib.Path):
self.root = pathlib.Path(root_dir)
self.annotations = self.root / "annotations"
self.raw_videos = self.root / "raw_videos"
self.frames = self.root / "frames"
def download_video_and_catch_errors(video_id: str, args: Args):
try:
dirs = CadEstateDirs(args.cad_estate_dir)
video_path = dirs.raw_videos / f"{video_id}.mp4"
download_video(video_id, video_path)
return (video_id, True, "Download successful.")
except Exception as e:
return video_id, False, "; ".join(e.args)
def extract_frames_and_catch_errors(video_id: str, timestamps: list[int],
args: Args):
try:
dirs = CadEstateDirs(args.cad_estate_dir)
video_path = dirs.raw_videos / f"{video_id}.mp4"
out_dir = dirs.frames / video_path.stem
out_dir.mkdir(exist_ok=True, parents=True)
frame_stats = extract_frames(video_path, out_dir, timestamps)
return (video_id, True, frame_stats["max"], "Timestamp discrepancy (µs): " +
" ".join(f"{k}={float(v):.0f}" for k, v in frame_stats.items()))
except Exception as e:
return video_id, False, -1, "; ".join(e.args)
async def extract_frames_in_parallel(frames_of_interest: dict[str, list[int]],
args: Args):
extract_results = []
with futures.ThreadPoolExecutor(
max_workers=args.parallel_extract_tasks) as pool:
loop = asyncio.get_running_loop()
tasks = [
loop.run_in_executor(
pool,
functools.partial(extract_frames_and_catch_errors, video_id,
time_stamps, args))
for video_id, time_stamps in frames_of_interest.items()
]
iter_with_progress = tqdm.tqdm(
asyncio.as_completed(tasks), "Extracting frames",
total=len(frames_of_interest))
for f in iter_with_progress:
extract_results.append(await f)
return extract_results
def main():
args = arg_lib.parse_flags(Args)
dirs = CadEstateDirs(args.cad_estate_dir)
now = datetime.datetime.now()
log_path = dirs.root / f"log_{now:%y_%m_%d__%H_%M}.txt"
logging.basicConfig(filename=str(log_path), level=logging.INFO)
annotation_paths = sorted(dirs.annotations.glob("*/frames.json"))
if args.debug_run:
annotation_paths = annotation_paths[:50]
video_timestamps = [
get_video_timestamps(v)
for v in tqdm.tqdm(annotation_paths, "Loading frame timestamps")
]
frames_of_interest = collections.defaultdict(lambda: set())
for video_id, timestamps in video_timestamps:
frames_of_interest[video_id] |= set(timestamps)
frames_of_interest = {k: sorted(v) for k, v in frames_of_interest.items()}
if not args.skip_download:
download_results = [
download_video_and_catch_errors(video_id, args)
for video_id in tqdm.tqdm(frames_of_interest, "Downloading videos")
]
df = pandas.DataFrame(download_results,
columns=["video_id", "is_successful", "log"])
csv_path = dirs.root / f"download_results_{now:%y_%m_%d__%H_%M}.csv"
csv_path.write_text(df.set_index("video_id").to_csv())
print(f"Downloaded {df.shape[0]} videos, "
f"with {(~df.is_successful).sum()} errors.")
if not args.skip_extract:
extract_results = asyncio.run(
extract_frames_in_parallel(frames_of_interest, args))
df = pandas.DataFrame(
extract_results,
columns=["video_id", "is_successful", "max_frame_discrepancy", "log"])
df.sort_values("video_id", inplace=True)
csv_path = dirs.root / f"process_results_{now:%y_%m_%d__%H_%M}.csv"
csv_path.write_text(df.set_index("video_id").to_csv())
downloaded_videos: set[str] = set(df[df.is_successful].video_id.tolist())
scene_list = sorted([
v.parent.name
for v in annotation_paths
if re.match(r"^(.+)_\d+$", v.parent.name).group(1) in downloaded_videos
])
scene_list_path = dirs.root / f"scene_list_{now:%y_%m_%d__%H_%M}.txt"
scene_list_path.write_text("\n".join(scene_list))
print(f"Extracted frames for {df.shape[0]} videos, "
f"with {(~df.is_successful).sum()} errors.")
if __name__ == "__main__":
main()
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: [email protected] (Stefan Popov)
#
"""High level routines for rendering scenes."""
import io
from importlib import resources
from typing import Iterable
import numpy as np
import PIL.Image
import torch as t
from cad_estate import misc_util
from cad_estate.gl import rasterizer
from cad_estate.gl import shaders
InputTensor = t.Tensor | np.ndarray | int | float | Iterable | None
def load_textures(
encoded_images: Iterable[bytes],
texture_size: tuple[int, int],
) -> tuple[t.Tensor, t.Tensor]:
"""Composes a texture array from encoded images contained in strings.
Args:
encoded_images: The encoded images, string[num_images]. Each entry must
either be a valid image (e.g. PNG or JPEG) or an empty string.
texture_size: Tuple (height, width) giving the desired dimensions of the
output texture array.
Returns:
texture_array: uint8[num_non_empty_images, height, width, 3] tensor
containing the decoded images from the non-empty entries in
encoded_images. All images are scaled to the desired height and width
and flipped along the Y axis.
image_indices: int32[num_images] tensor that defines the mapping between
encoded_images and texture_array. The j-th entry in encoded_images
will be decoded to texture_array[image_indices[j]]. If encoded_images[j]
is empty then image_indices[j] = -1.
"""
# The empty string maps to -1
image_to_index = {b"": -1}
image_indices = []
height, width = texture_size
texture_array = []
for encoded_image in encoded_images:
if encoded_image not in image_to_index:
image_to_index[encoded_image] = len(image_to_index) - 1
pil_image = (PIL.Image.open(io.BytesIO(encoded_image))
) # type: PIL.Image.Image
image = np.array(
pil_image.convert("RGB").resize((width, height),
resample=PIL.Image.BICUBIC))
assert (len(image.shape) == 3 and image.shape[-1] == 3
and image.dtype == np.uint8)
texture_array.append(image)
image_indices.append(image_to_index[encoded_image])
image_indices = misc_util.to_tensor(image_indices, t.int32, "cpu")
if texture_array:
texture_array = misc_util.to_tensor(texture_array, t.uint8, "cpu")
else:
texture_array = t.zeros([1, 1, 1, 3], dtype=t.uint8)
texture_array = texture_array.flip(1).contiguous()
return texture_array, image_indices
def render_scene(
vertex_positions: InputTensor,
view_projection_matrix: InputTensor,
image_size: tuple[int, int] = (256, 256),
*,
normals: InputTensor = None,
vertex_colors: InputTensor = None,
tex_coords: InputTensor = None,
material_ids: InputTensor = None,
diffuse_coefficients: InputTensor = None,
diffuse_textures: InputTensor = None,
diffuse_texture_indices: InputTensor = None,
specular_coefficient: InputTensor = None,
ambient_coefficients: InputTensor = None,
cull_back_facing=True,
light_position: InputTensor = None,
light_color: InputTensor = (1.0, 1.0, 1.0),
ambient_light_color: InputTensor = (0.2, 0.2, 0.2),
clear_color: InputTensor = (0, 0, 0, 1),
output_type=t.uint8,
vertex_shader=None,
geometry_shader=None,
fragment_shader=None,
debug_io_buffer=None,
return_rgb=True,
device=None,
):
"""Renders the given scene.
Args:
vertex_positions: The triangle geometry, specified through the triangle
vertex positions, float32[num_triangles, 3, 3]
view_projection_matrix: The view projection matrix, float32[4, 4]
image_size: Desired output image size, (height, width),
normals: Per-vertex shading normals, float32[num_triangles, 3, 3]. If set to
None, normals will be computed from the vertex positions.
vertex_colors: Optional per-vertex colors, float32[num_triangles, 3, 3].
tex_coords: Texture coordinate, float32[num_triangles, 3, 2]. If set to
None, all texture coordinates will be 0.
material_ids: Per-triangle material indices used to index in the various
coefficient tensors below, int32[num_triangles]. If set to None, all
triangles will have the same default material.
diffuse_coefficients: The diffuse coefficients, one per material,
float32[num_materials, 3]. Cannot be None if material_ids is not None.
Must be None if material_ids is None.
diffuse_textures: uint8[num_textures, height, width, 3]. Can be None if
there are no textures used in the mesh.
diffuse_texture_indices: Diffuse texture indices, one per material,
int32[num_materials]. If set to None, the texture indices for all
materials will be -1.
specular_coefficient: Specular coefficients, one per material,
float32[num_materials, 4]. The first 3 channels are the R, G, and B
specular coefficients, the last channel is the specular power. If set to
None, R, G, and B will be 0 for all materials and power will be 2048.
ambient_coefficients: float32[num_materials, 3]. The ambient coefficients.
If None, all ambient coefficient will be 0.05.
cull_back_facing: whether to cull backfacing triangles.
light_position: float32[3], the light position. If set to None, the light
will be placed at the camera origin.
light_color: The light diffuse RGB color, float32[3]
ambient_light_color: The light ambient RGB color, float32[3]
clear_color: The RGB color to use when clearing the image, float32[3]
output_type: The desired output type. Either tf.uint8 or tf.float32.
vertex_shader: The vertex shader to use. If empty, uses a default shader.
geometry_shader: The geometry shader. If empty, uses a default shader.
fragment_shader: The fragment shader. If empty, uses a default shader.
debug_io_buffer: Aids debugging of shaders. Shaders can communicate with
host programs through OpenGL input/output buffers. Any tensor passed in
this argument will be forwarded to the shaders as buffer with name
"debug_io".
return_rgb: If true, returns a 3 channel image, otherwise returns a 4
channel image.
device: The index of the GPU to use, given as CUDA device
Returns:
The rendered image, dt[height, width, c] where dt is either float32 or uint8
depending on the value of output_type and c is either 3 or 4, depending on
return_rgb. If the debug_io_buffer argument was not None, returns a
tuple containing the rendered image, and the shader output from the
"debug_io" buffer. The second element of the tuple has the same shape
and type as debug_io_buffer.
"""
if device is None:
device = t.cuda.current_device()
height, width = image_size
vertex_positions = misc_util.to_tensor(vertex_positions, t.float32, device)
assert (len(vertex_positions.shape) == 3
and vertex_positions.shape[1:] == (3, 3))
num_triangles = vertex_positions.shape[0]
view_projection_matrix = misc_util.to_tensor(view_projection_matrix,
t.float32, device)
assert view_projection_matrix.shape == (4, 4)
has_normals = True
if normals is None:
# normals = t.zeros_like(vertex_positions)
normals = t.zeros([1, 3, 3], device=device)
has_normals = False
else:
assert normals.shape == (num_triangles, 3, 3)
if vertex_colors is None:
vertex_colors = t.zeros((1, 3, 3), dtype=t.float32, device=device)
has_vertex_colors = False
else:
has_vertex_colors = True
assert vertex_colors.shape == (num_triangles, 3, 3)
if tex_coords is None:
tex_coords = t.zeros([1, 3, 2], dtype=t.float32)
else:
tex_coords = misc_util.to_tensor(tex_coords, t.float32, device)
assert tex_coords.shape == (num_triangles, 3, 2)
if material_ids is None:
material_ids = t.zeros([num_triangles], dtype=t.int32)
material_ids = misc_util.to_tensor(material_ids, t.int32, device)
assert material_ids.shape == (num_triangles,)
num_used_materials = material_ids.max().cpu().numpy() + 1 # type: int
def create_coefficient_array(cur_tensor: InputTensor, num_channels,
default_value):
arr = cur_tensor
if arr is None:
arr = (
t.ones([num_used_materials, num_channels], dtype=t.float32) *
t.tensor(default_value))
arr = misc_util.to_tensor(arr, t.float32, device)
assert len(arr.shape) == 2
arr = arr[:num_used_materials]
assert arr.shape == (num_used_materials, num_channels)
return arr
diffuse_coefficients = create_coefficient_array(diffuse_coefficients, 3, 0.8)
ambient_coefficients = create_coefficient_array(ambient_coefficients, 3, 0.05)
specular_coefficient = create_coefficient_array(specular_coefficient, 4,
(0, 0, 0, 2048.0))
if diffuse_texture_indices is None:
diffuse_texture_indices = t.ones([num_used_materials], dtype=t.int32) * -1
diffuse_texture_indices = misc_util.to_tensor(diffuse_texture_indices,
t.int32, device)
assert len(diffuse_texture_indices.shape) == 1
diffuse_texture_indices = diffuse_texture_indices[:num_used_materials]
assert diffuse_texture_indices.shape == (num_used_materials,)
num_used_textures = diffuse_texture_indices.max().cpu().numpy() + 1
num_used_textures = max(num_used_textures, 1)
if diffuse_textures is None:
diffuse_textures = t.ones([num_used_textures, 1, 1, 3], dtype=t.uint8)
diffuse_textures = misc_util.to_tensor(diffuse_textures, t.uint8, device)
assert len(diffuse_textures.shape) == 4
diffuse_textures = diffuse_textures[:num_used_textures]
assert (diffuse_textures.shape[0] == num_used_textures
and diffuse_textures.shape[3] == 3)
# The projection center transforms to (0, 0, -a, 0) in NDC space, assuming
# default GL conventions for the projection matrix (i.e. its last column in
# (0, 0, -a, 0). To recover its position in world space, we multiply by
# the inverse view-projection matrix. Tha value of `a` doesn't matter, we
# use 1.
camera_position = t.mv(
t.inverse(view_projection_matrix),
t.tensor([0, 0, -1, 0], dtype=t.float32, device=device))
camera_position = camera_position[:3] / camera_position[3]
if light_position is None:
light_position = camera_position
light_position = misc_util.to_tensor(light_position, t.float32, device)
assert light_position.shape == (3,)
light_color = misc_util.to_tensor(light_color, t.float32, device)
assert light_color.shape == (3,)
ambient_light_color = misc_util.to_tensor(ambient_light_color, t.float32,
device)
assert ambient_light_color.shape == (3,)
ambient_coefficients = t.constant_pad_nd(ambient_coefficients, [0, 1])
diffuse_coefficients = t.cat([
diffuse_coefficients,
diffuse_texture_indices.to(t.float32)[:, np.newaxis]
], -1)
materials = t.cat(
[ambient_coefficients, diffuse_coefficients, specular_coefficient],
dim=-1)
render_args = [
rasterizer.Uniform("view_projection_matrix", view_projection_matrix),
rasterizer.Uniform("light_position", light_position),
rasterizer.Uniform("has_normals", has_normals),
rasterizer.Uniform("has_vertex_colors", has_vertex_colors),
rasterizer.Uniform("has_texcoords", True),
rasterizer.Buffer(0, vertex_positions.reshape([-1])),
rasterizer.Buffer(1, normals.reshape([-1])),
rasterizer.Buffer(2, vertex_colors.reshape([-1])),
rasterizer.Buffer(3, tex_coords.reshape([-1])),
rasterizer.Buffer(4, material_ids.reshape([-1])),
rasterizer.Buffer(5, materials.reshape([-1])),
rasterizer.Texture("textures", diffuse_textures, bind_as_array=True),
rasterizer.Uniform("light_color", light_color),
rasterizer.Uniform("camera_position", camera_position),
rasterizer.Uniform("ambient_light_color", ambient_light_color),
rasterizer.Uniform("cull_backfacing", cull_back_facing),
]
if debug_io_buffer is not None:
render_args.append(rasterizer.Buffer(5, debug_io_buffer, is_io=True))
if not geometry_shader:
geometry_shader = resources.read_text(shaders, "triangle_renderer.geom")
if not vertex_shader:
vertex_shader = resources.read_text(shaders, "noop.vert")
if not fragment_shader:
fragment_shader = resources.read_text(shaders,
"point_light_illumination.frag")
result = rasterizer.gl_simple_render(
rasterizer.RenderInput(
num_points=num_triangles,
arguments=render_args,
output_resolution=(height, width),
clear_color=clear_color,
output_type=output_type,
vertex_shader=vertex_shader,
geometry_shader=geometry_shader,
fragment_shader=fragment_shader,
), cuda_device=device)
c = 3 if return_rgb else 4
if debug_io_buffer is None:
return result[..., :c]
else:
return result[..., :c], render_args[-1].value
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: [email protected] (Stefan Popov)
#
"""An OpenGL rasterizer for PyTorch."""
import dataclasses
import importlib
import logging
from typing import Iterable
import glcontext
import moderngl
import numpy as np
import torch as t
from cad_estate.gl import egl_context
importlib.reload(glcontext)
egl_context.monkey_patch_moderngl()
InputTensor = t.Tensor | np.ndarray | int | float | bool | Iterable
log = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True)
class Uniform:
name: str
value: InputTensor
@dataclasses.dataclass()
class Buffer:
binding: int
value: InputTensor
is_io: bool = False
@dataclasses.dataclass(frozen=True)
class Texture:
name: str
value: InputTensor
bind_as_array: bool = False
@dataclasses.dataclass(frozen=True)
class RenderInput:
# The number of points to render. The geometry shader can then convert these
# into other geometry.
num_points: int
# The parameters to pass to the shaders.
arguments: Iterable[Uniform | Buffer | Texture]
# The vertex shader
vertex_shader: str
# The geometry shader
geometry_shader: str
# The fragment shader
fragment_shader: str
# The output resolution, tuple (height, width)
output_resolution: tuple[int, int] = (256, 256)
# The clear color, tuple (R, G, B) or (R, G, B, A).
clear_color: Iterable[float] = (0, 0, 0)
# The output type (either uint8 or float32).
output_type: t.dtype = t.uint8
# Whether depth testing is enabled.
depth_test_enabled: bool = True
class _EglRenderer:
_context_cache: dict[int, "_EglRenderer"] = {}
def __init__(self, gl_context: moderngl.Context, cuda_device: int):
self._program_cache: dict[str, moderngl.Program] = {}
self._gl_context = gl_context
self.cuda_device = cuda_device
@classmethod
def get_instance(cls, cuda_device: int):
if cuda_device not in cls._context_cache:
# Cuda has to be initialized for the cuda_egl backend to work
t.cuda.init()
ctx = moderngl.create_standalone_context(backend="cuda_egl",
cuda_device=cuda_device)
cls._context_cache[cuda_device] = _EglRenderer(ctx, cuda_device)
return cls._context_cache[cuda_device]
def _check_gl_error(self):
err = self._gl_context.error
if err != "GL_NO_ERROR":
raise ValueError(f"OpenGL error encountered: {err}")
def _upload_uniform(self, program: moderngl.Program, parameter: Uniform):
if parameter.name not in program:
log.info(f"Uniform {parameter.name} not found in program")
else:
value = t.as_tensor(parameter.value).cpu()
if len(value.shape) == 2:
value = tuple(value.transpose(0, 1).reshape([-1]))
elif len(value.shape) == 1:
value = tuple(value)
elif value.shape == ():
value = value.item()
else:
raise ValueError("Only supports 0, 1, and 2 dim tensors.")
program[parameter.name].value = value
def _upload_buffer(self, parameter: Buffer) -> moderngl.Buffer:
value = t.as_tensor(parameter.value)
value = value.reshape([-1]).cpu().numpy()
buffer = self._gl_context.buffer(value)
buffer.bind_to_storage_buffer(parameter.binding)
return buffer
def _download_buffer(self, parameter: Buffer, buffer: moderngl.Buffer):
value = parameter.value
np_dtype = {t.float32: np.float32, t.int32: np.int32}[value.dtype]
temp_buffer = np.zeros(value.shape, np_dtype)
assert isinstance(buffer, moderngl.Buffer)
buffer.read_into(temp_buffer)
parameter.value = t.as_tensor(temp_buffer)
def render(self, render_input: RenderInput):
inp = render_input
with self._gl_context:
program_key = (
f"{inp.vertex_shader}|{inp.geometry_shader}|{inp.fragment_shader}")
if program_key not in self._program_cache:
self._program_cache[program_key] = self._gl_context.program(
vertex_shader=inp.vertex_shader,
fragment_shader=inp.fragment_shader,
geometry_shader=inp.geometry_shader)
program = self._program_cache[program_key]
objects_to_delete = []
buffer_bindings: dict[int, moderngl.Buffer] = {}
texture_location = 0
try:
for parameter in inp.arguments:
if isinstance(parameter, Uniform):
self._upload_uniform(program, parameter)
elif isinstance(parameter, Buffer):
buffer = self._upload_buffer(parameter)
buffer_bindings[parameter.binding] = buffer
objects_to_delete.append(buffer)
elif isinstance(parameter, Texture):
if not parameter.bind_as_array:
raise NotImplementedError()
if parameter.name not in program:
log.info(f"Uniform {parameter.name} not found in program")
else:
val = parameter.value.cpu().numpy()
texture = self._gl_context.texture_array(
val.shape[1:3] + val.shape[0:1], val.shape[3], val)
texture.repeat_x = True
texture.repeat_y = True
texture.use(location=texture_location)
program.get(parameter.name, None).value = texture_location
texture_location += 1
objects_to_delete.append(texture)
else:
raise ValueError("Unknown parameter type")
self._check_gl_error()
h, w = inp.output_resolution
gl_dtype, np_dtype = {
t.uint8: ("f1", np.uint8),
t.float32: ("f4", np.float32)
}[inp.output_type]
render_buffer = self._gl_context.renderbuffer((w, h), components=4,
samples=0, dtype=gl_dtype)
objects_to_delete.append(render_buffer)
depth_buffer = self._gl_context.depth_renderbuffer((w, h), samples=0)
objects_to_delete.append(depth_buffer)
framebuffer = self._gl_context.framebuffer(render_buffer, depth_buffer)
objects_to_delete.append(framebuffer)
framebuffer.use()
vertex_array = self._gl_context.vertex_array(program, ())
objects_to_delete.append(vertex_array)
self._gl_context.clear(*inp.clear_color)
self._gl_context.disable(moderngl.CULL_FACE)
if inp.depth_test_enabled:
self._gl_context.enable(moderngl.DEPTH_TEST)
self._gl_context.depth_func = "<="
else:
self._gl_context.disable(moderngl.DEPTH_TEST)
vertex_array.render(mode=moderngl.POINTS, vertices=inp.num_points)
self._check_gl_error()
result = np.zeros([h, w, 4], dtype=np_dtype)
framebuffer.read_into(result, components=4, dtype=gl_dtype)
self._check_gl_error()
for parameter in inp.arguments:
if isinstance(parameter, Buffer) and parameter.is_io:
self._download_buffer(parameter, buffer_bindings[parameter.binding])
return t.as_tensor(result)
finally:
for v in objects_to_delete:
v.release()
def get_egl_device(cuda_device: int) -> int:
"""Returns the EGL device corresponding to a CUDA device (-1 if none)."""
pass
def gl_simple_render(render_input: RenderInput, cuda_device: int | None = None):
"""Renders the supplied configuration with OpenGL.
Args:
render_input: The render input
cuda_device: The GPU to use, given as a CUDA device number
Returns:
The rendered image, output_type[height, width, 4]
Buffer parameters with is_io set to True will be read back. This can either
happen in the original buffer or in a new buffer. The implementation will
update the value field in the buffer parameter in the latter case.
There is no guarantee about the device of both the rendered image and the
buffers that are read back.
"""
if cuda_device is None:
cuda_device = t.cuda.current_device()
instance = _EglRenderer.get_instance(cuda_device)
return instance.render(render_input)
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: [email protected] (Stefan Popov)
#
"""Routines for working with OpenGL EGL contexts."""
import ctypes
import logging
import os
import glcontext
log = logging.getLogger(__name__)
# Keeps the OpenGL context active for the specified CUDA device (if >= 0).
keep_context_active_for_cuda_device = -1
class EGL:
EGLAttrib = ctypes.c_ssize_t
EGLBoolean = ctypes.c_bool
EGLConfig = ctypes.c_void_p
EGLContext = ctypes.c_void_p
EGLDeviceEXT = ctypes.c_void_p
EGLDisplay = ctypes.c_void_p
EGLSurface = ctypes.c_void_p
EGLenum = ctypes.c_uint
EGLint = ctypes.c_int32
EGL_BLUE_SIZE = 0x3022
EGL_CUDA_DEVICE_NV = 0x323A
EGL_DEPTH_SIZE = 0x3025
EGL_DRM_DEVICE_FILE_EXT = 0x3233
EGL_EXTENSIONS = 0x3055
EGL_GREEN_SIZE = 0x3023
EGL_NONE = 0x3038
EGL_NO_CONTEXT = 0
EGL_NO_SURFACE = 0
EGL_OPENGL_API = 0x30A2
EGL_OPENGL_BIT = 0x0008
EGL_PBUFFER_BIT = 0x0001
EGL_PLATFORM_DEVICE_EXT = 0x313F
EGL_RED_SIZE = 0x3024
EGL_RENDERABLE_TYPE = 0x3040
EGL_SUCCESS = 0x3000
EGL_SURFACE_TYPE = 0x3033
EGL_VENDOR = 0x3053
def __init__(self):
egl_lib_path = os.getenv("LIB_EGL_PATH", "libEGL.so.1")
log.debug(f"Initializing EGL using library {egl_lib_path}")
self.egl_lib = ctypes.cdll.LoadLibrary(egl_lib_path)
self.eglGetProcAddress = self.egl_lib.eglGetProcAddress
self.eglGetProcAddress.argtypes = [ctypes.c_char_p]
self.eglGetProcAddress.restype = ctypes.c_void_p
self.eglGetError = ctypes.CFUNCTYPE(EGL.EGLint)(
self.load_function(b"eglGetError"))
self.eglQueryDevicesEXT = ctypes.CFUNCTYPE(
EGL.EGLBoolean, EGL.EGLint, ctypes.POINTER(EGL.EGLDeviceEXT),
ctypes.POINTER(EGL.EGLint))(
self.load_function(b"eglQueryDevicesEXT"))
self.eglQueryDeviceAttribEXT = ctypes.CFUNCTYPE(
EGL.EGLBoolean, EGL.EGLDeviceEXT, EGL.EGLint,
ctypes.POINTER(EGL.EGLAttrib))(
self.load_function(b"eglQueryDeviceAttribEXT"))
self.eglQueryDeviceStringEXT = ctypes.CFUNCTYPE(
ctypes.c_char_p, EGL.EGLDeviceEXT, EGL.EGLint)(
self.load_function(b"eglQueryDeviceStringEXT"))
self.eglGetPlatformDisplayEXT = ctypes.CFUNCTYPE(
EGL.EGLDisplay, EGL.EGLenum, EGL.EGLDeviceEXT,
ctypes.POINTER(EGL.EGLint))(
self.load_function(b"eglGetPlatformDisplayEXT"))
self.eglQueryString = ctypes.CFUNCTYPE(
ctypes.c_char_p, EGL.EGLDisplay, EGL.EGLint)(
self.load_function(b"eglQueryString"))
self.eglInitialize = ctypes.CFUNCTYPE(
EGL.EGLBoolean, EGL.EGLDisplay, ctypes.POINTER(EGL.EGLint),
ctypes.POINTER(EGL.EGLint))(
self.load_function(b"eglInitialize"))
self.eglBindAPI = ctypes.CFUNCTYPE(EGL.EGLBoolean, EGL.EGLenum)(
self.load_function(b"eglBindAPI"))
self.eglChooseConfig = ctypes.CFUNCTYPE(
EGL.EGLBoolean, EGL.EGLDisplay, ctypes.POINTER(EGL.EGLint),
ctypes.POINTER(EGL.EGLConfig), EGL.EGLint, ctypes.POINTER(EGL.EGLint))(
self.load_function(b"eglChooseConfig"))
self.eglCreateContext = ctypes.CFUNCTYPE(
EGL.EGLContext, EGL.EGLDisplay, EGL.EGLConfig, EGL.EGLContext,
ctypes.POINTER(EGL.EGLint))(
self.load_function(b"eglCreateContext"))
self.eglMakeCurrent = ctypes.CFUNCTYPE(
EGL.EGLBoolean, EGL.EGLDisplay, EGL.EGLSurface, EGL.EGLSurface,
EGL.EGLContext)(
self.load_function(b"eglMakeCurrent"))
self.eglGetCurrentContext = ctypes.CFUNCTYPE(EGL.EGLContext)(
self.load_function(b"eglGetCurrentContext"))
self.eglDestroyContext = ctypes.CFUNCTYPE(
EGL.EGLBoolean, EGL.EGLDisplay, EGL.EGLContext)(
self.load_function(b"eglDestroyContext"))
def load_function(self, name: bytes):
result = self.eglGetProcAddress(name)
if not result:
raise ValueError(f"Failed to load function '{name.decode()}'")
return result
_eglInstance: EGL | None = None
def _egl():
global _eglInstance
if not _eglInstance:
_eglInstance = EGL()
return _eglInstance
class ContextError(Exception):
pass
class EglContext:
_context_cache: dict[int, EGL.EGLDisplay] = {}
_cuda_to_egl_mapping: dict[int, EGL.EGLDeviceEXT] = {}
def __init__(self, cuda_device: int):
if cuda_device in EglContext._context_cache:
log.debug(f"Using cached EGL context for cuda device {cuda_device}")
self._display = EglContext._context_cache[cuda_device]
else:
log.debug(f"Creating EGL context for cuda device {cuda_device}")
self._display = self._create_display(cuda_device)
EglContext._context_cache[cuda_device] = self._display
self._context = self._create_context(self._display)
self._cuda_device = cuda_device
self.__enter__()
@classmethod
def _egl_check_success(cls, pred: bool, msg: str, level=logging.FATAL):
err = _egl().eglGetError()
if not pred or err != EGL.EGL_SUCCESS:
msg = f"{msg}. EGL error is: 0x{err:x}."
log.log(level, msg)
if level >= logging.FATAL:
raise ContextError(msg)
return False
return True
@classmethod
def _get_egl_device_map(cls):
if not cls._cuda_to_egl_mapping:
max_devices = 64
devices = (EGL.EGLDeviceEXT * max_devices)()
num_devices = EGL.EGLint()
cls._egl_check_success(
_egl().eglQueryDevicesEXT(max_devices, devices,
ctypes.pointer(num_devices)),
"Failed to retrieve EGL devices")
log.debug(f"Found {num_devices} GPUs with EGL support.")
for device_idx, device in enumerate(list(devices[:num_devices.value])):
device_extensions = (_egl().eglQueryDeviceStringEXT(
device, EGL.EGL_EXTENSIONS)) # type: bytes
cls._egl_check_success(
bool(device_extensions), "Unable to retrieve device extensions")
device_extensions_set = set(device_extensions.split(b" "))
if b"EGL_NV_device_cuda" not in device_extensions_set:
log.debug(f"Ignoring EGL device {device_idx}. "
f"No support for EGL_NV_device_cuda.")
continue
cuda_device_attr = EGL.EGLAttrib(-1)
st = _egl().eglQueryDeviceAttribEXT(device, EGL.EGL_CUDA_DEVICE_NV,
ctypes.pointer(cuda_device_attr))
if not st or _egl().eglGetError() != EGL.EGL_SUCCESS:
log.debug(f"Unable to get CUDA device for EGL device {device_idx}")
continue
cuda_device = cuda_device_attr.value
cls._cuda_to_egl_mapping[cuda_device] = device
return cls._cuda_to_egl_mapping
def _create_display(self, cuda_device: int):
devices = self._get_egl_device_map()
if cuda_device not in devices:
raise ContextError(
f"Could not find EGL device for CUDA device {cuda_device}")
device = devices[cuda_device]
display = _egl().eglGetPlatformDisplayEXT(EGL.EGL_PLATFORM_DEVICE_EXT,
device, None)
self._egl_check_success(bool(display), "Unable to create EGL display")
major, minor = EGL.EGLint(), EGL.EGLint()
self._egl_check_success(
_egl().eglInitialize(display, ctypes.pointer(major),
ctypes.pointer(minor)),
"Unable to initialize display")
self._egl_check_success(_egl().eglBindAPI(EGL.EGL_OPENGL_API),
"Unable to bind OpenGL API to display")
return display
def _create_context(self, display: EGL.EGLDisplay):
config_attrib_list = [
EGL.EGL_SURFACE_TYPE, EGL.EGL_PBUFFER_BIT, EGL.EGL_RED_SIZE, 8,
EGL.EGL_GREEN_SIZE, 8, EGL.EGL_BLUE_SIZE, 8, EGL.EGL_DEPTH_SIZE, 8,
EGL.EGL_RENDERABLE_TYPE, EGL.EGL_OPENGL_BIT, EGL.EGL_NONE
]
ct_config_attrib_list = (EGL.EGLint *
len(config_attrib_list))(*config_attrib_list)
config = EGL.EGLConfig()
num_config = EGL.EGLint()
is_successful = _egl().eglChooseConfig(display, ct_config_attrib_list,
ctypes.pointer(config), 1,
ctypes.pointer(num_config))
self._egl_check_success(is_successful and num_config.value == 1,
"Unable to choose EGL config")
context_attrib = EGL.EGLint(EGL.EGL_NONE)
context = _egl().eglCreateContext(display, config, None,
ctypes.pointer(context_attrib))
self._egl_check_success(context, "Unable to create context")
return context
def load(self, name: str) -> int:
log.debug(f"Loading function {name}")
return _egl().load_function(name.encode())
def _check_valid_context(self):
if not self._context:
raise ContextError("Context already released!")
def __enter__(self):
self._check_valid_context()
if _egl().eglGetCurrentContext() == self._context:
return
self._egl_check_success(
_egl().eglMakeCurrent(self._display, EGL.EGL_NO_SURFACE,
EGL.EGL_NO_SURFACE, self._context),
"Unable to make context current")
def __exit__(self, *args):
if (_egl().eglGetCurrentContext() == self._context
and self._cuda_device == keep_context_active_for_cuda_device):
return
self._egl_check_success(
_egl().eglMakeCurrent(self._display, EGL.EGL_NO_SURFACE,
EGL.EGL_NO_SURFACE, EGL.EGL_NO_CONTEXT),
"Unable to release context")
def release(self):
self._check_valid_context()
if _egl().eglGetCurrentContext == self._context:
self._egl_check_success(
_egl().eglMakeCurrent(self._display, EGL.EGL_NO_SURFACE,
EGL.EGL_NO_SURFACE, EGL.EGL_NO_CONTEXT),
"Unable to release context")
self._egl_check_success(
_egl().eglDestroyContext(self._display, self._context),
"Unable to destroy context")
self._context = None
def create_moderngl_context(*args, **kwargs):
assert len(args) == 0
if "cuda_device" not in kwargs:
raise ContextError("cuda_device must be specified.")
cuda_device = kwargs["cuda_device"]
return EglContext(cuda_device)
def monkey_patch_moderngl():
old_fn = glcontext.get_backend_by_name
def get_backend_by_name(name):
if name == "cuda_egl":
return create_moderngl_context
else:
return old_fn(name)
glcontext.get_backend_by_name = get_backend_by_name
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: [email protected] (Stefan Popov)
import enum
import io
import pathlib
import re
import appdirs
import ipywidgets
import numpy as np
import torch as t
import torchvision.transforms.functional as tvtF
from cad_estate import download_and_extract_frames
from cad_estate import file_system as fs
from cad_estate import frames as frame_lib
from cad_estate import misc_util
from cad_estate import room_structure as struct_lib
from cad_estate.gl import scene_renderer
def download_frames(frames: frame_lib.Frames, frames_dir: str | None):
if frames_dir:
frames_dir = fs.abspath(frames_dir)
return frames_dir
video_id = re.match(r"^(.+)_\d+$", frames.clip_name).group(1)
cache_dir = pathlib.Path(appdirs.user_cache_dir())
frames_dir = cache_dir / "cad_estate" / "frames"
video_path = cache_dir / "cad_estate" / "videos" / f"{video_id}.mp4"
download_and_extract_frames.download_video(video_id, video_path)
clip_frames_dir = frames_dir / video_path.stem
clip_frames_dir.mkdir(parents=True, exist_ok=True)
download_and_extract_frames.extract_frames(video_path, clip_frames_dir,
frames.frame_timestamps)
return frames_dir
def render_frame(frames: frame_lib.Frames, room: struct_lib.RoomStructure,
frame_index: int, width: int):
"""Renders a single frame."""
palette = t.cat([misc_util.get_palette()] * 12)
num_palette_colors = palette.shape[0]
palette_darker = palette * 0.8
palette_lighter = palette * 1.2
palette = t.concat([palette, palette_darker, palette_lighter])
material_ids = misc_util.dynamic_tile(room.num_tri)
is_window_frame = (room.triangle_flags & 1) == 1
is_door = (room.triangle_flags & 2) == 2
material_ids[is_window_frame] += 2 * num_palette_colors
material_ids[is_door] += num_palette_colors
_, _, h, w = frames.frame_images.shape
h = h * width // w
w = width
cam_mat = (
frames.camera_intrinsics[frame_index]
@ frames.camera_extrinsics[frame_index])
synth = scene_renderer.render_scene( #
room.triangles, cam_mat, (h, w), diffuse_coefficients=palette,
material_ids=material_ids.to(t.int32), cull_back_facing=False)
synth = tvtF.convert_image_dtype(synth.permute([2, 0, 1]), t.float32)
rgb = tvtF.resize(frames.frame_images[frame_index], (h, w), antialias=True)
rgb = tvtF.convert_image_dtype(rgb, t.float32)
return synth, rgb
def render_annotations(annotations: struct_lib.StructureAnnotations,
frame_index: int, width: int):
_, sh, sw = annotations.structural_element_masks.shape
h = sh * width // sw
w = width
palette_str = t.cat([misc_util.get_palette()] * 12)
palette_vis = palette_str.clone()
palette_vis[0, :] = 0
palette_vis[1, :] = palette_vis.new_tensor([0, 0, 1.0])
str_ann = annotations.structural_element_masks[frame_index].to(t.int64)
vis_ann = annotations.visible_parts_masks[frame_index].to(t.int64)
str_ann = palette_str[str_ann.reshape([-1, 1])].reshape([sh, sw, 3])
vis_ann = palette_vis[vis_ann.reshape([-1, 1])].reshape([sh, sw, 3])
ann_tup = [str_ann, vis_ann]
ann_tup = [
tvtF.resize(v.permute([2, 0, 1]), (h, w), antialias=True) for v in ann_tup
]
ann_tup = [tvtF.convert_image_dtype(v, t.float32) for v in ann_tup]
return ann_tup
def get_legend_html(room: struct_lib.RoomStructure):
html = "Legend: "
style = ("color:black; font-weight: bold; padding: .2em; "
"display: inline-block")
for l, c in zip(room.labels[1:], misc_util.get_palette()[1:]):
text = struct_lib.STRUCTURAL_ELEMENT_TYPES[l]
c = (c * 255).to(t.int32).tolist()
html += f"<div style='{style}; background: rgb{tuple(c)};'>{text}</div>\n"
return html
class AnnotationType(enum.Enum):
"""How to display the annotions."""
# Don't display annotations
NONE = "None",
# Display annotations as drawn by the annotators
RAW = "Raw",
# Display processed annotations, where instance labels match geometry,
# visible parts and structure are combined, and doors/windows in-painted
PROCESSED = "Processed"
def create_interactive_widgets(example_scenes: list[str], num_frames: int,
annotations_dir: str):
"""Setup the interactive widgets."""
scene_name = ipywidgets.Dropdown(options=example_scenes)
frame_index = ipywidgets.IntSlider(0, 0, num_frames - 1, 1)
annotation_type = ipywidgets.Dropdown(options=AnnotationType)
def compute_frame_index_bounds(unused):
if annotation_type.value == AnnotationType.NONE:
frame_index.max = num_frames - 1
else:
# When annotations are displayed, the frames match the annotated ones
room_npz = fs.read_bytes(
fs.join(annotations_dir, scene_name.value, "room_structure.npz"))
room = struct_lib.load_room_structure(np.load(io.BytesIO(room_npz)))
frame_index.max = room.annotated_timestamps.shape[0] - 1
annotation_type.observe(compute_frame_index_bounds)
scene_name.observe(compute_frame_index_bounds)
return scene_name, frame_index, annotation_type
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: [email protected] (Stefan Popov)
import json
import pathlib
import re
import appdirs
import ipywidgets
import torch as t
import torchvision.utils as tvU
from cad_estate import download_and_extract_frames
from cad_estate import file_system as fs
from cad_estate import frames as frame_lib
from cad_estate import misc_util
from cad_estate import objects as obj_lib
from cad_estate.gl import scene_renderer
def download_frames(frames: frame_lib.Frames, frames_dir: str | None):
if frames_dir:
frames_dir = fs.abspath(frames_dir)
return frames_dir
video_id = re.match(r"^(.+)_\d+$", frames.clip_name).group(1)
cache_dir = pathlib.Path(appdirs.user_cache_dir())
frames_dir = cache_dir / "cad_estate" / "frames"
video_path = cache_dir / "cad_estate" / "videos" / f"{video_id}.mp4"
download_and_extract_frames.download_video(video_id, video_path)
clip_frames_dir = frames_dir / video_path.stem
clip_frames_dir.mkdir(parents=True, exist_ok=True)
download_and_extract_frames.extract_frames(video_path, clip_frames_dir,
frames.frame_timestamps)
return frames_dir
def create_interactive_widgets(example_scenes: list[str], num_frames: int,
annotations_dir: str):
"""Setup the interactive widgets."""
w_scene_name = ipywidgets.Dropdown(options=example_scenes)
w_frame_index = ipywidgets.IntSlider(0, 0, num_frames - 1, 1)
w_show_tracks = ipywidgets.Checkbox()
def compute_frame_index_bounds(unused):
if w_show_tracks.value:
# When tracks are displayed, the set of frames matches the ones with
# manual track completion
frames_json = json.loads(
fs.read_text(
fs.join(annotations_dir, w_scene_name.value, "frames.json")))
frames = frame_lib.load_metadata(frames_json)
w_frame_index.max = (
int(frames.manual_track_annotations.to(t.int64).sum()) - 1)
else:
w_frame_index.max = num_frames - 1
w_show_tracks.observe(compute_frame_index_bounds)
w_scene_name.observe(compute_frame_index_bounds)
return w_scene_name, w_frame_index, w_show_tracks
def render_objects(objects: obj_lib.Objects, frames: frame_lib.Frames,
frame_index: int):
pallette = misc_util.get_palette()
rgb = frames.frame_images[frame_index]
cam_mat = (
frames.camera_intrinsics[frame_index]
@ frames.camera_extrinsics[frame_index])
if objects.num_tri.sum() == 0:
synth = t.zeros_like(rgb)
else:
synth: t.Tensor = scene_renderer.render_scene(
objects.triangles, cam_mat, rgb.shape[-2:], cull_back_facing=False,
diffuse_coefficients=pallette[1:],
material_ids=misc_util.dynamic_tile(objects.num_tri).to(t.int32))
synth = synth.permute([2, 0, 1])
return synth, rgb
def render_tracks(img: t.Tensor, objects: obj_lib.Objects,
track_boxes: t.Tensor | None, frame_index: int):
if track_boxes is None:
return img
pallette = misc_util.get_palette()
_, h, w = img.shape
boxes = track_boxes[:, frame_index] * track_boxes.new_tensor([h, w, h, w])
mask = ((boxes >= 0).all(dim=1) & (objects.num_tri == 0) &
~objects.from_automatic_track)
boxes = boxes[mask][:, [1, 0, 3, 2]].to(t.int32)
if not boxes.shape[0]:
return img
box_colors = t.cat([pallette] * 12)[mask.nonzero()[:, 0]]
box_colors = [tuple(v) for v in (box_colors * 255).to(t.int32)]
return tvU.draw_bounding_boxes(img, boxes, width=2, colors=box_colors)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configargparse
def config_parser():
parser = configargparse.ArgumentParser()
parser.add_argument('--config', is_config_file=True, help='config file path')
# general
parser.add_argument('--rootdir', type=str, default='./',
help='the path to the project root directory.')
parser.add_argument("--expname", type=str, default='exp', help='experiment name')
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('--distributed', action='store_true', help='if use distributed training')
parser.add_argument("--local_rank", type=int, default=0, help='rank for distributed training')
parser.add_argument("--eval_mode", action='store_true', help='if in eval mode')
########## dataset options ##########
# train and eval dataset
parser.add_argument("--train_dataset", type=str, default='vimeo',
help='the training dataset')
parser.add_argument("--dataset_weights", nargs='+', type=float, default=[],
help='the weights for training datasets, used when multiple datasets are used.')
parser.add_argument('--eval_dataset', type=str, default='vimeo', help='the dataset to evaluate')
parser.add_argument("--batch_size", type=int, default=1, help='batch size, currently only support 1')
########## network architecture ##########
parser.add_argument("--feature_dim", type=int, default=32, help='the dimension of the extracted features')
########## training options ##########
parser.add_argument("--use_inpainting_mask_for_feature", action='store_true')
parser.add_argument("--inpainting", action='store_true', help='if do inpainting')
parser.add_argument("--train_raft", action='store_true', help='if train raft')
parser.add_argument('--boundary_crop_ratio', type=float, default=0, help='crop the image before computing loss')
parser.add_argument("--vary_pts_radius", action='store_true', help='if vary point radius as augmentation')
parser.add_argument("--adaptive_pts_radius", action='store_true', help='if use adaptive point radius')
parser.add_argument("--use_mask_for_decoding", action='store_true', help='if use mask for decoding')
########## rendering/evaluation ##########
parser.add_argument("--use_depth_for_feature", action='store_true',
help='if use depth map when extracting features')
parser.add_argument("--use_depth_for_decoding", action='store_true',
help='if use depth map when decoding')
parser.add_argument("--point_radius", type=float, default=1.5,
help='point radius for rasterization')
parser.add_argument("--input_dir", type=str, default='', help='input folder that contains a pair of images')
parser.add_argument("--visualize_rgbda_layers", action='store_true',
help="if visualize rgbda layers, save in out dir")
########### iterations & learning rate options & loss ##########
parser.add_argument("--n_iters", type=int, default=250000, help='num of iterations')
parser.add_argument("--lr", type=float, default=3e-4, help='learning rate for feature extractor')
parser.add_argument("--lr_raft", type=float, default=5e-6, help='learning rate for raft')
parser.add_argument("--lrate_decay_factor", type=float, default=0.5,
help='decay learning rate by a factor every specified number of steps')
parser.add_argument("--lrate_decay_steps", type=int, default=50000,
help='decay learning rate by a factor every specified number of steps')
parser.add_argument('--loss_mode', type=str, default='lpips',
help='the loss function to use')
########## checkpoints ##########
parser.add_argument("--ckpt_path", type=str, default="",
help='specific weights npy file to reload for coarse network')
parser.add_argument("--no_reload", action='store_true',
help='do not reload weights from saved ckpt')
parser.add_argument("--no_load_opt", action='store_true',
help='do not load optimizer when reloading')
parser.add_argument("--no_load_scheduler", action='store_true',
help='do not load scheduler when reloading')
########## logging/saving options ##########
parser.add_argument("--i_print", type=int, default=100, help='frequency of console printout and metric loggin')
parser.add_argument("--i_img", type=int, default=500, help='frequency of tensorboard image logging')
parser.add_argument("--i_weights", type=int, default=10000, help='frequency of weight ckpt saving')
args = parser.parse_args()
return args
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from networks.resunet import ResUNet
from networks.img_decoder import ImgDecoder
from third_party.RAFT.core.raft import RAFT
from utils import de_parallel
class Namespace:
def __init__(self, **kwargs):
for name in kwargs:
setattr(self, name, kwargs[name])
def __eq__(self, other):
if not isinstance(other, Namespace):
return NotImplemented
return vars(self) == vars(other)
def __contains__(self, key):
return key in self.__dict__
def get_raft_model(args):
flow_args = Namespace()
setattr(flow_args, 'model', 'third_party/RAFT/models/raft-things.pth')
setattr(flow_args, 'small', False)
setattr(flow_args, 'mixed_precision', False)
setattr(flow_args, 'alternate_corr', False)
device = "cuda:{}".format(args.local_rank)
if args.distributed:
model = RAFT(flow_args).to(device)
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
)
else:
model = torch.nn.DataParallel(RAFT(flow_args)).to(device)
model.load_state_dict(torch.load(flow_args.model, map_location='cuda:{}'.format(args.local_rank)))
return model
########################################################################################################################
# creation/saving/loading of the model
########################################################################################################################
class SpaceTimeModel(object):
def __init__(self, args):
self.args = args
load_opt = not args.no_load_opt
load_scheduler = not args.no_load_scheduler
device = torch.device('cuda:{}'.format(args.local_rank))
# initialize feature extraction network
feat_in_ch = 4
if args.use_inpainting_mask_for_feature:
feat_in_ch += 1
if args.use_depth_for_feature:
feat_in_ch += 1
self.feature_net = ResUNet(args, in_ch=feat_in_ch, out_ch=args.feature_dim).to(device)
# initialize decoder
decoder_in_ch = args.feature_dim + 4
decoder_out_ch = 3
if args.use_depth_for_decoding:
decoder_in_ch += 1
if args.use_mask_for_decoding:
decoder_in_ch += 1
self.img_decoder = ImgDecoder(args, in_ch=decoder_in_ch, out_ch=decoder_out_ch).to(device)
self.raft = get_raft_model(args)
learnable_params = list(self.feature_net.parameters())
learnable_params += list(self.img_decoder.parameters())
self.learnable_params = learnable_params
if args.train_raft:
self.optimizer = torch.optim.Adam([
{'params': learnable_params},
{'params': filter(lambda p: p.requires_grad, self.raft.parameters()), 'lr': self.args.lr_raft}],
lr=args.lr, weight_decay=1e-4, betas=(0.9, 0.999))
else:
self.raft.eval()
self.optimizer = torch.optim.Adam(learnable_params, lr=args.lr, weight_decay=1e-4, betas=(0.9, 0.999))
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer,
step_size=args.lrate_decay_steps,
gamma=args.lrate_decay_factor)
out_folder = os.path.join(args.rootdir, 'out', args.expname)
self.start_step = self.load_from_ckpt(out_folder,
load_opt=load_opt,
load_scheduler=load_scheduler)
if args.distributed:
self.feature_net = torch.nn.parallel.DistributedDataParallel(
self.feature_net,
device_ids=[args.local_rank],
output_device=args.local_rank,
)
self.img_decoder = torch.nn.parallel.DistributedDataParallel(
self.img_decoder,
device_ids=[args.local_rank],
output_device=args.local_rank,
)
def switch_to_eval(self):
self.feature_net.eval()
self.img_decoder.eval()
self.raft.eval()
def switch_to_train(self):
self.feature_net.train()
self.img_decoder.train()
if self.args.train_raft:
self.raft.train()
def save_model(self, filename):
to_save = {'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
'feature_net': de_parallel(self.feature_net).state_dict(),
'img_decoder': de_parallel(self.img_decoder).state_dict(),
'raft': self.raft.state_dict()
}
torch.save(to_save, filename)
def load_model(self, filename, load_opt=True, load_scheduler=True):
if self.args.distributed:
to_load = torch.load(filename, map_location='cuda:{}'.format(self.args.local_rank))
else:
to_load = torch.load(filename)
if load_opt:
self.optimizer.load_state_dict(to_load['optimizer'])
if load_scheduler:
self.scheduler.load_state_dict(to_load['scheduler'])
self.feature_net.load_state_dict(to_load['feature_net'])
self.img_decoder.load_state_dict(to_load['img_decoder'])
if 'raft' in to_load.keys():
self.raft.load_state_dict(to_load['raft'])
def load_from_ckpt(self, out_folder,
load_opt=True,
load_scheduler=True,
force_latest_ckpt=False):
'''
load model from existing checkpoints and return the current step
:param out_folder: the directory that stores ckpts
:return: the current starting step
'''
# all existing ckpts
ckpts = []
if os.path.exists(out_folder):
ckpts = [os.path.join(out_folder, f)
for f in sorted(os.listdir(out_folder)) if f.endswith('.pth')]
if self.args.ckpt_path is not None and not force_latest_ckpt:
if os.path.isfile(self.args.ckpt_path): # load the specified ckpt
ckpts = [self.args.ckpt_path]
if len(ckpts) > 0 and not self.args.no_reload:
fpath = ckpts[-1]
self.load_model(fpath, load_opt, load_scheduler)
step = int(fpath[-10:-4])
print('Reloading from {}, starting at step={}'.format(fpath, step))
else:
print('No ckpts found, training from scratch...')
step = 0
return step
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
from datetime import datetime
import shutil
TINY_NUMBER = 1e-6 # float32 only has 7 decimal digits precision
def de_parallel(model):
return model.module if hasattr(model, 'module') else model
def cycle(iterable):
while True:
for x in iterable:
yield x
def dict_to_device(dict_):
for k in dict_.keys():
if type(dict_[k]) == torch.Tensor:
dict_[k] = dict_[k].cuda()
return dict_
def save_current_code(outdir):
now = datetime.now() # current date and time
date_time = now.strftime("%m_%d-%H:%M:%S")
src_dir = '.'
code_out_dir = os.path.join(outdir, 'code')
os.makedirs(code_out_dir, exist_ok=True)
dst_dir = os.path.join(code_out_dir, '{}'.format(date_time))
shutil.copytree(src_dir, dst_dir,
ignore=shutil.ignore_patterns('pretrained*', '*logs*', 'out*', '*.png', '*.mp4', 'eval*',
'*__pycache__*', '*.git*', '*.idea*', '*.zip', '*.jpg'))
def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin
# assert isinstance(input, torch.Tensor)
if posinf is None:
posinf = torch.finfo(input.dtype).max
if neginf is None:
neginf = torch.finfo(input.dtype).min
assert nan == 0
return torch.clamp(input.unsqueeze(0).nansum(0), min=neginf, max=posinf, out=out)
def img2mse(x, y, mask=None):
'''
:param x: img 1, [(...), 3]
:param y: img 2, [(...), 3]
:param mask: optional, [(...)]
:return: mse score
'''
if mask is None:
return torch.mean((x - y) * (x - y))
else:
return torch.sum((x - y) * (x - y) * mask.unsqueeze(-1)) / (torch.sum(mask) * x.shape[-1] + TINY_NUMBER)
mse2psnr = lambda x: -10. * np.log(x+TINY_NUMBER) / np.log(10.)
def img2psnr(x, y, mask=None):
return mse2psnr(img2mse(x, y, mask).item())
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import config
import warnings
warnings.filterwarnings("ignore", category=UserWarning, module="torch.nn.functional")
import time
import torch.utils.data.distributed
import torch.distributed as dist
from tensorboardX import SummaryWriter
from data_loaders import dataset_dict
from data_loaders.create_training_dataset import create_training_dataset
from utils import *
from model import SpaceTimeModel
from core.utils import *
from criterion import Criterion
from core.scene_flow import SceneFlowEstimator
from core.renderer import ImgRenderer
from core.inpainter import Inpainter
def worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def train(args):
device = "cuda:{}".format(args.local_rank)
out_folder = os.path.join(args.rootdir, 'out', args.expname)
print('outputs will be saved to {}'.format(out_folder))
os.makedirs(out_folder, exist_ok=True)
if args.local_rank == 0:
save_current_code(out_folder)
# save the args and config files
f = os.path.join(out_folder, 'args.txt')
with open(f, 'w') as file:
for arg in sorted(vars(args)):
attr = getattr(args, arg)
file.write('{} = {}\n'.format(arg, attr))
if args.config is not None:
f = os.path.join(out_folder, 'config.txt')
if not os.path.isfile(f):
shutil.copy(args.config, f)
# create training dataset
train_dataset, train_sampler = create_training_dataset(args)
assert args.batch_size == 1, "only support batch size == 1"
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size,
worker_init_fn=lambda _: np.random.seed(),
num_workers=args.workers,
pin_memory=True,
sampler=train_sampler,
shuffle=True if train_sampler is None else False)
# create validation dataset
val_dataset = dataset_dict[args.eval_dataset](args, 'val')
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=1)
val_loader_iterator = iter(cycle(val_loader))
model = SpaceTimeModel(args)
scene_flow_estimator = SceneFlowEstimator(args, model.raft)
inpainter = Inpainter(args, device=device)
renderer = ImgRenderer(args, model, scene_flow_estimator, inpainter, device)
tb_dir = os.path.join(args.rootdir, 'logs/', args.expname)
if args.local_rank == 0:
writer = SummaryWriter(tb_dir)
print('saving tensorboard files to {}'.format(tb_dir))
scalars_to_log = {}
global_step = model.start_step + 1
epoch = 0
criterion = Criterion(args)
while global_step < model.start_step + args.n_iters + 1:
np.random.seed()
if args.distributed:
train_sampler.set_epoch(epoch)
for data in train_loader:
if (data['src_depth1'].min() <= 1e-2) or (data['src_depth2'].min() <= 1e-2):
continue
start = time.time()
res_dict = renderer.get_prediction(data)
pred_img = res_dict['pred_img']
gt_img = res_dict['gt_img']
direct_rgb_out = res_dict['direct_rgb_out']
src_img1 = res_dict['src_img1']
src_img2 = res_dict['src_img2']
mask = res_dict['mask']
if res_dict['skip']:
continue
### loss term
model.optimizer.zero_grad()
loss, scalars_to_log = criterion(pred_img, gt_img, mask, data['multi_view'][0],
res_dict, scalars_to_log, global_step)
loss.backward()
for param in model.learnable_params:
if param.grad is not None:
nan_to_num(param.grad, nan=0, posinf=1e5, neginf=-1e5, out=param.grad)
model.optimizer.step()
model.scheduler.step()
duration = time.time() - start
# log
scalars_to_log['loss'] = loss.item()
scalars_to_log['psnr'] = img2psnr(pred_img, gt_img)
scalars_to_log['psnr_direct'] = img2psnr(direct_rgb_out, gt_img)
scalars_to_log['multi_view'] = data['multi_view'][0]
scalars_to_log['lr'] = model.scheduler.get_last_lr()[0]
scalars_to_log['time'] = duration
# Rest is logging
if args.local_rank == 0:
if global_step % args.i_print == 0 or global_step < 10:
logstr = '{} Epoch: {} step: {} '.format(args.expname, epoch, global_step)
for k in scalars_to_log.keys():
logstr += ' {}: {:.6f}'.format(k, scalars_to_log[k])
writer.add_scalar(k, scalars_to_log[k], global_step)
print(logstr)
if global_step % args.i_weights == 0:
print('Saving checkpoints at {} to {}...'.format(global_step, out_folder))
fpath = os.path.join(out_folder, 'model_{:06d}.pth'.format(global_step))
model.save_model(fpath)
if global_step % args.i_img == 0:
# only the first example in the batch
writer.add_image('train/gt_pred_img',
torch.cat([gt_img[0], direct_rgb_out[0], pred_img[0]], dim=2),
global_step, dataformats='CHW')
# ref and src images
writer.add_image('train/src_imgs',
torch.cat([src_img1[0], src_img2[0]], dim=2),
global_step, dataformats='CHW')
val_data = next(val_loader_iterator)
torch.cuda.empty_cache()
model.switch_to_eval()
with torch.no_grad():
val_res_dict = renderer.get_prediction(val_data)
val_pred_img = val_res_dict['pred_img']
val_gt_img = val_res_dict['gt_img']
val_src_img1 = val_res_dict['src_img1']
val_src_img2 = val_res_dict['src_img2']
val_direct_rgb_out = val_res_dict['direct_rgb_out']
model.switch_to_train()
writer.add_image('val/gt_pred_img',
torch.cat([val_gt_img[0], val_direct_rgb_out[0], val_pred_img[0]], dim=2),
global_step, dataformats='CHW')
# ref and src images
writer.add_image('val/src_imgs',
torch.cat([val_src_img1[0], val_src_img2[0]], dim=2),
global_step, dataformats='CHW')
global_step += 1
if global_step > model.start_step + args.n_iters + 1:
break
epoch += 1
if __name__ == '__main__':
args = config.config_parser()
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
synchronize()
train(args)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lpips
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
from core.utils import crop_boundary
def masked_mse_loss(pred, gt, mask=None):
if mask is None:
return F.mse_loss(pred, gt)
else:
sum_loss = F.mse_loss(pred, gt, reduction='none')
ndim = sum_loss.shape[1]
return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8)
def masked_l1_loss(pred, gt, mask=None):
if mask is None:
return F.l1_loss(pred, gt)
else:
if mask.shape[-2:] != pred.shape[-2:]:
mask = F.interpolate(mask, size=pred.shape[-2:])
sum_loss = F.l1_loss(pred, gt, reduction='none')
ndim = sum_loss.shape[1]
return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8)
class Vgg16(nn.Module):
def __init__(self):
super(Vgg16, self).__init__()
features = models.vgg16(pretrained=True).features
self.to_relu_1_2 = nn.Sequential()
self.to_relu_2_2 = nn.Sequential()
self.to_relu_3_3 = nn.Sequential()
self.to_relu_4_3 = nn.Sequential()
for x in range(4):
self.to_relu_1_2.add_module(str(x), features[x])
for x in range(4, 9):
self.to_relu_2_2.add_module(str(x), features[x])
for x in range(9, 16):
self.to_relu_3_3.add_module(str(x), features[x])
for x in range(16, 23):
self.to_relu_4_3.add_module(str(x), features[x])
# don't need the gradients, just want the features
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
h = self.to_relu_1_2(x)
h_relu_1_2 = h
h = self.to_relu_2_2(h)
h_relu_2_2 = h
h = self.to_relu_3_3(h)
h_relu_3_3 = h
h = self.to_relu_4_3(h)
h_relu_4_3 = h
out = [h_relu_1_2, h_relu_2_2, h_relu_3_3, h_relu_4_3]
return out
class Vgg19(nn.Module):
def __init__(self, requires_grad=False):
super(Vgg19, self).__init__()
vgg_pretrained_features = models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
h_relu1 = self.slice1(x)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
class VGGLoss(nn.Module):
def __init__(self, model='vgg19', device='cuda'):
super().__init__()
if model == 'vgg16':
self.vgg = Vgg16().to(device)
self.weights = [1.0/16, 1.0/8, 1.0/4, 1.0]
elif model == 'vgg19':
self.vgg = Vgg19().to(device)
self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]
# self.weights = [1/2.6, 1/4.8, 1/3.7, 1/5.6, 10/1.5]
# self.weights = [1/2.6, 1/4.8, 1/3.7, 1/5.6, 2/1.5]
# self.criterion = nn.L1Loss()
self.loss_func = masked_l1_loss
@staticmethod
def preprocess(x, size=224):
# B, C, H, W
device = x.device
mean = torch.tensor([0.485, 0.456, 0.406]).to(device)
std = torch.tensor([0.229, 0.224, 0.225]).to(device)
x = (x - mean.reshape(1, 3, 1, 1)) / std.reshape(1, 3, 1, 1)
return x
def forward(self, x, y, mask=None, size=224):
x = self.preprocess(x, size=size) # assume x, y are inside (0, 1)
y = self.preprocess(y, size=size)
if mask is not None:
if min(mask.shape[-2:]) <= size:
mode = 'bilinear'
align_corners = True
else:
mode = 'area'
align_corners = None
mask = F.interpolate(mask, size=size, mode=mode, align_corners=align_corners)
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
# loss = 0
loss = self.loss_func(x, y, mask)
for i in range(len(x_vgg)):
loss += self.weights[i] * self.loss_func(x_vgg[i], y_vgg[i], mask)
return loss
def normalize_minus_one_to_one(x):
x_min = x.min()
x_max = x.max()
return 2. * (x - x_min) / (x_max - x_min) - 1.
def get_flow_smoothness_loss(flow, alpha):
flow_gradient_x = flow[:, :, :, 1:, :] - flow[:, :, :, -1:, :]
flow_gradient_y = flow[:, :, :, :, 1:] - flow[:, :, :, :, -1:]
cost_x = (alpha[:, :, :, 1:, :] * torch.norm(flow_gradient_x, dim=2, keepdim=True)).sum()
cost_y = (alpha[:, :, :, :, 1:] * torch.norm(flow_gradient_y, dim=2, keepdim=True)).sum()
avg_cost = (cost_x + cost_y) / (2 * alpha.sum() + 1e-6)
return avg_cost
class Criterion(nn.Module):
def __init__(self, args):
super(Criterion, self).__init__()
device = "cuda:{}".format(args.local_rank)
self.args = args
self.crop_ratio = args.boundary_crop_ratio
self.loss_mode = args.loss_mode
if 'vgg' in self.loss_mode:
self.loss_func = VGGLoss(model=self.loss_mode, device=device)
elif self.loss_mode == 'lpips':
self.loss_func = lpips.LPIPS(net='vgg').to(device)
elif self.loss_mode == 'mse':
self.loss_func = masked_mse_loss
elif self.loss_mode == 'l1':
self.loss_func = masked_l1_loss
else:
raise NotImplementedError
def forward(self, pred, target, mask, is_multi_view, res_dict, scalar_to_log, step):
if self.crop_ratio > 0:
pred = crop_boundary(pred, self.crop_ratio)
target = crop_boundary(target, self.crop_ratio)
if self.loss_mode == 'lpips':
pred_normed = 2 * pred - 1.
target_normed = 2 * target - 1.
loss = self.loss_func(pred_normed, target_normed)
scalar_to_log['loss_perceptual'] = loss.item()
if is_multi_view == 0:
l1_loss = masked_l1_loss(pred, target, mask)
loss = loss + l1_loss
scalar_to_log['loss_l1'] = l1_loss.item()
else:
loss = self.loss_func(pred, target, mask)
return loss, scalar_to_log
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import time
import imageio
import cv2
import config
import torchvision
import torch.utils.data.distributed
from tqdm import tqdm
from model import get_raft_model
from third_party.RAFT.core.utils.utils import InputPadder
from third_party.DPT.run_monodepth import run_dpt
from utils import *
from model import SpaceTimeModel
from core.utils import *
from core.scene_flow import SceneFlowEstimator
from core.renderer import ImgRenderer
from core.inpainter import Inpainter
from data_loaders.data_utils import resize_img
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
def process_boundary_mask(mask):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel, iterations=5)
dilation = cv2.dilate(closing, kernel, iterations=1, borderType=cv2.BORDER_CONSTANT, borderValue=0.)
return dilation
def compute_optical_flow(args, img1, img2, return_np_array=False):
raft_model = get_raft_model(args)
with torch.no_grad():
img1 = torch.from_numpy(img1).float().to("cuda:{}".format(args.local_rank)).permute(2, 0, 1)[None, ...]
img2 = torch.from_numpy(img2).float().to("cuda:{}".format(args.local_rank)).permute(2, 0, 1)[None, ...]
padder = InputPadder(img1.shape)
image1, image2 = padder.pad(img1, img2)
flow_low, flow_up = raft_model.module(image1, image2, iters=20, test_mode=True, padder=padder)
del raft_model
torch.cuda.empty_cache()
if return_np_array:
return flow_up.cpu().numpy().transpose(0, 2, 3, 1)
return flow_up.permute(0, 2, 3, 1).detach() # [B, h, w, 2]
# homography alignment
def homography_warp_pairs(args):
input_dir = args.input_dir
print('processing input folder {}...'.format(input_dir))
img_files = sorted(glob.glob(os.path.join(input_dir, '*.png'))) + \
sorted(glob.glob(os.path.join(input_dir, '*.jpg')))
assert len(img_files) == 2, 'input folder must contain 2 images, found {} images instead'.format(len(img_files))
warped_out_dir = os.path.join(input_dir, 'warped')
os.makedirs(warped_out_dir, exist_ok=True)
mask_out_dir = os.path.join(warped_out_dir, 'mask')
os.makedirs(mask_out_dir, exist_ok=True)
dpt_out_dir = os.path.join(input_dir, 'dpt_depth')
os.makedirs(dpt_out_dir, exist_ok=True)
warped_dpt_out_dir = os.path.join(warped_out_dir, 'dpt_depth')
os.makedirs(warped_dpt_out_dir, exist_ok=True)
dpt_model_path = 'third_party/DPT/weights/dpt_hybrid-midas-501f0c75.pt'
run_dpt(input_path=input_dir, output_path=dpt_out_dir, model_path=dpt_model_path, optimize=False)
disp_files = sorted(glob.glob(os.path.join(dpt_out_dir, '*.png')))
img1 = imageio.imread(img_files[0])
img2 = imageio.imread(img_files[1])
disp1 = imageio.imread(disp_files[0])
disp2 = imageio.imread(disp_files[1])
img_h = img1.shape[0]
img_w = img1.shape[1]
x = np.arange(img_h)
y = np.arange(img_w)
coords = np.stack(np.meshgrid(y, x), -1)
print('=========================aligning the two input images via a homography...=========================')
flow12 = compute_optical_flow(args, img1, img2, return_np_array=True)[0]
flow12_norm = np.linalg.norm(flow12, axis=-1)
mask_valid = (flow12_norm < np.inf) * (disp1 > 0)
mask_small_flow = flow12_norm < np.percentile(flow12_norm[mask_valid], 75)
mask_valid *= mask_small_flow
coords1 = coords + flow12
pt1 = coords[mask_valid][::10]
pt2 = coords1[mask_valid][::10]
H, mask = cv2.findHomography(pt2, pt1, method=cv2.RANSAC, ransacReprojThreshold=1)
np.savetxt(os.path.join(input_dir, 'H.txt'), H)
img2_warped = cv2.warpPerspective(img2, H, (img_w, img_h), flags=cv2.INTER_LINEAR)
imageio.imwrite(os.path.join(warped_out_dir, os.path.basename(img_files[0])), img1)
imageio.imwrite(os.path.join(warped_out_dir, os.path.basename(img_files[1])), img2_warped)
print('finished')
disp2_warped = cv2.warpPerspective(disp2, H, (img_w, img_h), flags=cv2.INTER_LINEAR)
scale21 = np.mean(mask_valid * (disp2_warped / np.clip(disp1, a_min=1e-6, a_max=np.inf))) / np.mean(mask_valid)
if scale21 < 1:
disp1 = (disp1 * scale21).astype(np.uint16)
else:
disp2_warped = (disp2_warped / scale21).astype(np.uint16)
imageio.imwrite(os.path.join(warped_dpt_out_dir, os.path.basename(disp_files[0])), disp1)
imageio.imwrite(os.path.join(warped_dpt_out_dir, os.path.basename(disp_files[1])), disp2_warped)
# generate mask
mask_save_dir = os.path.join(warped_out_dir, 'mask')
os.makedirs(mask_save_dir, exist_ok=True)
mask = 255 * np.ones((img_h, img_w), dtype=np.uint8)
mask_warped = cv2.warpPerspective(mask, H, (img_w, img_h))
imageio.imwrite(os.path.join(mask_save_dir, '0.png'), mask)
imageio.imwrite(os.path.join(mask_save_dir, '1.png'), mask_warped)
def get_input_data(args, ds_factor=1):
to_tensor = torchvision.transforms.ToTensor()
input_dir = os.path.join(args.input_dir, 'warped')
img_files = sorted(glob.glob(os.path.join(input_dir, '*.png'))) + \
sorted(glob.glob(os.path.join(input_dir, '*.jpg')))
img_file1, img_file2 = img_files
src_img1 = imageio.imread(img_file1) / 255.
src_img2 = imageio.imread(img_file2) / 255.
src_img1 = resize_img(src_img1, ds_factor)
src_img2 = resize_img(src_img2, ds_factor)
h1, w1 = src_img1.shape[:2]
h2, w2 = src_img2.shape[:2]
src_disp1 = imageio.imread(os.path.join(input_dir, 'dpt_depth', os.path.basename(img_file1))) / 65535.
src_disp2 = imageio.imread(os.path.join(input_dir, 'dpt_depth', os.path.basename(img_file2))) / 65535.
src_disp1 = remove_noise_in_dpt_disparity(src_disp1)
src_disp2 = remove_noise_in_dpt_disparity(src_disp2)
src_depth1 = 1. / np.maximum(src_disp1, 1e-2)
src_depth2 = 1. / np.maximum(src_disp2, 1e-2)
src_depth1 = resize_img(src_depth1, ds_factor)
src_depth2 = resize_img(src_depth2, ds_factor)
intrinsic1 = np.array([[max(h1, w1), 0, w1 // 2],
[0, max(h1, w1), h1 // 2],
[0, 0, 1]])
intrinsic2 = np.array([[max(h2, w2), 0, w2 // 2],
[0, max(h2, w2), h2 // 2],
[0, 0, 1]])
pose = np.eye(4)
return {
'src_img1': to_tensor(src_img1).float()[None],
'src_img2': to_tensor(src_img2).float()[None],
'src_depth1': to_tensor(src_depth1).float()[None],
'src_depth2': to_tensor(src_depth2).float()[None],
'intrinsic1': torch.from_numpy(intrinsic1).float()[None],
'intrinsic2': torch.from_numpy(intrinsic2).float()[None],
'tgt_intrinsic': torch.from_numpy(intrinsic2).float()[None],
'pose': torch.from_numpy(pose).float()[None],
'scale_shift1': torch.tensor([1., 0.]).float()[None],
'scale_shift2': torch.tensor([1., 0.]).float()[None],
'src_rgb_file1': [img_file1],
'src_rgb_file2': [img_file2],
'multi_view': [False]
}
def render(args):
device = "cuda:{}".format(args.local_rank)
homography_warp_pairs(args)
print('=========================run 3D Moments...=========================')
data = get_input_data(args)
rgb_file1 = data['src_rgb_file1'][0]
rgb_file2 = data['src_rgb_file2'][0]
frame_id1 = os.path.basename(rgb_file1).split('.')[0]
frame_id2 = os.path.basename(rgb_file2).split('.')[0]
scene_id = rgb_file1.split('/')[-3]
video_out_folder = os.path.join(args.input_dir, 'out')
os.makedirs(video_out_folder, exist_ok=True)
model = SpaceTimeModel(args)
if model.start_step == 0:
raise Exception('no pretrained model found! please check the model path.')
scene_flow_estimator = SceneFlowEstimator(args, model.raft)
inpainter = Inpainter(args)
renderer = ImgRenderer(args, model, scene_flow_estimator, inpainter, device)
model.switch_to_eval()
with torch.no_grad():
renderer.process_data(data)
pts1, pts2, rgb1, rgb2, feat1, feat2, mask, side_ids, optical_flow = \
renderer.render_rgbda_layers_with_scene_flow(return_pts=True)
num_frames = [60, 60, 60, 90]
video_paths = ['up-down', 'zoom-in', 'side', 'circle']
Ts = [
define_camera_path(num_frames[0], 0., -0.08, 0., path_type='double-straight-line', return_t_only=True),
define_camera_path(num_frames[1], 0., 0., -0.24, path_type='straight-line', return_t_only=True),
define_camera_path(num_frames[2], -0.09, 0, -0, path_type='double-straight-line', return_t_only=True),
define_camera_path(num_frames[3], -0.04, -0.04, -0.09, path_type='circle', return_t_only=True),
]
crop = 32
for j, T in enumerate(Ts):
T = torch.from_numpy(T).float().to(renderer.device)
time_steps = np.linspace(0, 1, num_frames[j])
frames = []
for i, t_step in tqdm(enumerate(time_steps), total=len(time_steps),
desc='generating video of {} camera trajectory'.format(video_paths[j])):
pred_img, _, meta = renderer.render_pcd(pts1, pts2, rgb1, rgb2,
feat1, feat2, mask, side_ids,
t=T[i], time=t_step)
frame = (255. * pred_img.detach().cpu().squeeze().permute(1, 2, 0).numpy()).astype(np.uint8)
# mask out fuzzy image boundaries due to no outpainting
img_boundary_mask = (meta['acc'] > 0.5).detach().cpu().squeeze().numpy().astype(np.uint8)
img_boundary_mask_cleaned = process_boundary_mask(img_boundary_mask)
frame = frame * img_boundary_mask_cleaned[..., None]
frame = frame[crop:-crop, crop:-crop]
frames.append(frame)
video_out_file = os.path.join(video_out_folder, '{}_{}-{}-{}.mp4'.format(
video_paths[j], scene_id, frame_id1, frame_id2))
imageio.mimwrite(video_out_file, frames, fps=25, quality=8)
print('space-time videos have been saved in {}.'.format(video_out_folder))
if __name__ == '__main__':
args = config.config_parser()
render(args)
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from third_party.RAFT.core.utils.utils import InputPadder
from core.utils import *
class SceneFlowEstimator():
def __init__(self, args, model):
device = "cuda:{}".format(args.local_rank)
self.device = device
self.raft_model = model
self.train_raft = args.train_raft
def compute_optical_flow(self, img1, img2, return_np_array=False):
'''
:param img1: [B, 3, H, W]
:param img2: [B, 3, H, W]
:return: optical_flow, [B, H, W, 2]
'''
if not self.train_raft:
with torch.no_grad():
assert img1.max() <= 1 and img2.max() <= 1
image1 = img1 * 255.
image2 = img2 * 255.
padder = InputPadder(image1.shape)
image1, image2 = padder.pad(image1, image2)
flow_low, flow_up = self.raft_model.module(image1, image2, iters=20, test_mode=True, padder=padder)
if return_np_array:
return flow_up.cpu().numpy().transpose(0, 2, 3, 1)
return flow_up.permute(0, 2, 3, 1).detach() # [B, h, w, 2]
else:
assert img1.max() <= 1 and img2.max() <= 1
image1 = img1 * 255.
image2 = img2 * 255.
padder = InputPadder(image1.shape)
image1, image2 = padder.pad(image1, image2)
flow_predictions = self.raft_model.module(image1, image2, iters=20, padder=padder)
return flow_predictions[-1].permute(0, 2, 3, 1) # [B, h, w, 2]
def get_mutual_matches(self, flow_f, flow_b, th=2., return_mask=False):
assert flow_f.shape == flow_b.shape
batch_size = flow_f.shape[0]
assert flow_f.shape[1:3] == flow_b.shape[1:3]
h, w = flow_f.shape[1:3]
grid = get_coord_grids_pt(h, w, self.device)[None].float().repeat(batch_size, 1, 1, 1) # [B, h, w, 2]
grid2 = grid + flow_f
mask_boundary = (grid2[..., 0] >= 0) * (grid2[..., 0] <= w - 1) * \
(grid2[..., 1] >= 0) * (grid2[..., 1] <= h - 1)
grid2_normed = normalize_for_grid_sample(grid2, h, w)
flow_b_sampled = F.grid_sample(flow_b.permute(0, 3, 1, 2), grid2_normed,
align_corners=True).permute(0, 2, 3, 1)
grid1 = grid2 + flow_b_sampled
mask_boundary *= (grid1[..., 0] >= 0) * (grid1[..., 0] <= w - 1) * \
(grid1[..., 1] >= 0) * (grid1[..., 1] <= h - 1)
fb_map = flow_f + flow_b_sampled
mask_valid = mask_boundary * (torch.norm(fb_map, dim=-1) < th)
if return_mask:
return mask_valid
coords1 = grid[mask_valid] # [n, 2]
coords2 = grid2[mask_valid] # [n, 2]
return coords1, coords2 |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import imageio
import torch.utils.data.distributed
from pytorch3d.structures import Pointclouds
from core.utils import *
from core.depth_layering import get_depth_bins
from core.pcd import linear_interpolation, create_pcd_renderer
class ImgRenderer():
def __init__(self, args, model, scene_flow_estimator, inpainter, device):
self.args = args
self.model = model
self.scene_flow_estimator = scene_flow_estimator
self.inpainter = inpainter
self.device = device
def process_data(self, data):
self.src_img1 = data['src_img1'].to(self.device)
self.src_img2 = data['src_img2'].to(self.device)
assert self.src_img1.shape == self.src_img2.shape
self.h, self.w = self.src_img1.shape[-2:]
self.src_depth1 = data['src_depth1'].to(self.device)
self.src_depth2 = data['src_depth2'].to(self.device)
self.intrinsic1 = data['intrinsic1'].to(self.device)
self.intrinsic2 = data['intrinsic2'].to(self.device)
self.pose = data['pose'].to(self.device)
self.scale_shift1 = data['scale_shift1'][0]
self.scale_shift2 = data['scale_shift2'][0]
self.is_multi_view = data['multi_view'][0]
self.src_rgb_file1 = data['src_rgb_file1'][0]
self.src_rgb_file2 = data['src_rgb_file2'][0]
if 'tgt_img' in data.keys():
self.tgt_img = data['tgt_img'].to(self.device)
if 'tgt_intrinsic' in data.keys():
self.tgt_intrinsic = data['tgt_intrinsic'].to(self.device)
if 'tgt_pose' in data.keys():
self.tgt_pose = data['tgt_pose'].to(self.device)
if 'time' in data.keys():
self.time = data['time'].item()
if 'src_mask1' in data.keys():
self.src_mask1 = data['src_mask1'].to(self.device)
else:
self.src_mask1 = torch.ones_like(self.src_depth1)
if 'src_mask2' in data.keys():
self.src_mask2 = data['src_mask2'].to(self.device)
else:
self.src_mask2 = torch.ones_like(self.src_depth2)
def feature_extraction(self, rgba_layers, mask_layers, depth_layers):
rgba_layers_in = rgba_layers.squeeze(1)
if self.args.use_inpainting_mask_for_feature:
rgba_layers_in = torch.cat([rgba_layers_in, mask_layers.squeeze(1)], dim=1)
if self.args.use_depth_for_feature:
rgba_layers_in = torch.cat([rgba_layers_in, 1. / torch.clamp(depth_layers.squeeze(1), min=1.)], dim=1)
featmaps = self.model.feature_net(rgba_layers_in)
return featmaps
def apply_scale_shift(self, depth, scale, shift):
disp = 1. / torch.clamp(depth, min=1e-3)
disp = scale * disp + shift
return 1 / torch.clamp(disp, min=1e-3*scale)
def masked_diffuse(self, x, mask, iter=10, kernel_size=35, median_blur=False):
if median_blur:
x = masked_median_blur(x, mask.repeat(1, x.shape[1], 1, 1), kernel_size=5)
for _ in range(iter):
x, mask = masked_smooth_filter(x, mask, kernel_size=kernel_size)
return x, mask
def compute_weight_for_two_frame_blending(self, time, disp1, disp2, alpha1, alpha2):
alpha = 4
weight1 = (1 - time) * torch.exp(alpha*disp1) * alpha1
weight2 = time * torch.exp(alpha*disp2) * alpha2
sum_weight = torch.clamp(weight1 + weight2, min=1e-6)
out_weight1 = weight1 / sum_weight
out_weight2 = weight2 / sum_weight
return out_weight1, out_weight2
def transform_all_pts(self, all_pts, pose):
all_pts_out = []
for pts in all_pts:
pts_out = transform_pts_in_3D(pts, pose)
all_pts_out.append(pts_out)
return all_pts_out
def render_pcd(self, pts1, pts2, rgbs1, rgbs2, feats1, feats2, mask, side_ids, R=None, t=None, time=0):
pts = linear_interpolation(pts1, pts2, time)
rgbs = linear_interpolation(rgbs1, rgbs2, time)
feats = linear_interpolation(feats1, feats2, time)
rgb_feat = torch.cat([rgbs, feats], dim=-1)
num_sides = side_ids.max() + 1
assert num_sides == 1 or num_sides == 2
if R is None:
R = torch.eye(3, device=self.device)
if t is None:
t = torch.zeros(3, device=self.device)
pts_ = (R.mm(pts.T) + t.unsqueeze(-1)).T
if self.args.adaptive_pts_radius:
radius = self.args.point_radius / min(self.h, self.w) * 2.0 * pts[..., -1][None] / \
torch.clamp(pts_[..., -1][None], min=1e-6)
else:
radius = self.args.point_radius / min(self.h, self.w) * 2.0
if self.args.vary_pts_radius and np.random.choice([0, 1], p=[0.6, 0.4]):
if type(radius) == torch.Tensor:
factor = 1 + (0.2 * (torch.rand_like(radius) - 0.5))
else:
factor = 1 + (0.2 * (np.random.rand() - 0.5))
radius *= factor
if self.args.use_mask_for_decoding:
rgb_feat = torch.cat([rgb_feat, mask], dim=-1)
if self.args.use_depth_for_decoding:
disp = normalize_0_1(1. / torch.clamp(pts_[..., [-1]], min=1e-6))
rgb_feat = torch.cat([rgb_feat, disp], dim=-1)
global_out_list = []
direct_color_out_list = []
meta = {}
for j in range(num_sides):
mask_side = side_ids == j
renderer = create_pcd_renderer(self.h, self.w, self.tgt_intrinsic.squeeze()[:3, :3],
radius=radius[:, mask_side] if type(radius) == torch.Tensor else radius)
all_pcd_j = Pointclouds(points=[pts_[mask_side]], features=[rgb_feat[mask_side]])
global_out_j = renderer(all_pcd_j)
all_colored_pcd_j = Pointclouds(points=[pts_[mask_side]], features=[rgbs[mask_side]])
direct_rgb_out_j = renderer(all_colored_pcd_j)
global_out_list.append(global_out_j)
direct_color_out_list.append(direct_rgb_out_j)
w1, w2 = self.compute_weight_for_two_frame_blending(time,
global_out_list[0][..., [-1]],
global_out_list[-1][..., [-1]],
global_out_list[0][..., [3]],
global_out_list[-1][..., [3]]
)
direct_rgb_out = w1 * direct_color_out_list[0] + w2 * direct_color_out_list[-1]
pred_rgb = self.model.img_decoder(global_out_list[0].permute(0, 3, 1, 2),
global_out_list[-1].permute(0, 3, 1, 2),
time)
direct_rgb = direct_rgb_out[..., :3].permute(0, 3, 1, 2)
acc = 0.5 * (global_out_list[0][..., [3]] + global_out_list[1][..., [3]]).permute(0, 3, 1, 2)
meta['acc'] = acc
return pred_rgb, direct_rgb, meta
def get_reprojection_mask(self, pts, R, t):
pts1_ = (R.mm(pts.T) + t.unsqueeze(-1)).T
mask1 = torch.ones_like(self.src_img1[:, :1].reshape(-1, 1))
mask_renderer = create_pcd_renderer(self.h, self.w, self.tgt_intrinsic.squeeze()[:3, :3],
radius=1.0 / min(self.h, self.w) * 4.)
mask_pcd = Pointclouds(points=[pts1_], features=[mask1])
mask = mask_renderer(mask_pcd).permute(0, 3, 1, 2)
mask = F.max_pool2d(mask, kernel_size=7, stride=1, padding=3)
return mask
def get_cropping_ids(self, mask):
assert mask.shape[:2] == (1, 1)
mask = mask.squeeze()
h, w = mask.shape
mask_mean_x_axis = mask.mean(dim=0)
x_valid = torch.nonzero(mask_mean_x_axis > 0.5)
bad = False
if len(x_valid) < 0.75 * w:
left, right = 0, w - 1 # invalid
bad = True
else:
left, right = x_valid[0][0], x_valid[-1][0]
mask_mean_y_axis = mask.mean(dim=1)
y_valid = torch.nonzero(mask_mean_y_axis > 0.5)
if len(y_valid) < 0.75 * h:
top, bottom = 0, h - 1 # invalid
bad = True
else:
top, bottom = y_valid[0][0], y_valid[-1][0]
assert 0 <= top <= h - 1 and 0 <= bottom <= h - 1 and 0 <= left <= w - 1 and 0 <= right <= w - 1
return top, bottom, left, right, bad
def render_depth_from_mdi(self, depth_layers, alpha_layers):
'''
:param depth_layers: [n_layers, 1, h, w]
:param alpha_layers: [n_layers, 1, h, w]
:return: rendered depth [1, 1, h, w]
'''
num_layers = len(depth_layers)
h, w = depth_layers.shape[-2:]
layer_id = torch.arange(num_layers, device=self.device).float()
layer_id_maps = layer_id[..., None, None, None, None].repeat(1, 1, 1, h, w)
T = torch.cumprod(1. - alpha_layers, dim=0)[:-1]
T = torch.cat([torch.ones_like(T[:1]), T], dim=0)
weights = alpha_layers * T
depth_map = torch.sum(weights * depth_layers, dim=0)
depth_map = torch.clamp(depth_map, min=1.)
layer_id_map = torch.sum(weights * layer_id_maps, dim=0)
return depth_map, layer_id_map
def render_rgbda_layers_from_one_view(self):
depth_bins = get_depth_bins(depth=self.src_depth1)
rgba_layers, depth_layers, mask_layers = \
self.inpainter.sequential_inpainting(self.src_img1, self.src_depth1, depth_bins)
coord1 = get_coord_grids_pt(self.h, self.w, device=self.device).float()
src_depth1 = self.apply_scale_shift(self.src_depth1, self.scale_shift1[0], self.scale_shift1[1])
pts1 = unproject_pts_pt(self.intrinsic1, coord1.reshape(-1, 2), src_depth1.flatten())
featmaps = self.feature_extraction(rgba_layers, mask_layers, depth_layers)
depth_layers = self.apply_scale_shift(depth_layers, self.scale_shift1[0], self.scale_shift1[1])
num_layers = len(rgba_layers)
all_pts = []
all_rgbas = []
all_feats = []
all_masks = []
for i in range(num_layers):
alpha_i = rgba_layers[i][:, -1] > 0.5
rgba_i = rgba_layers[i]
mask_i = mask_layers[i]
featmap = featmaps[i][None]
featmap = F.interpolate(featmap, size=(self.h, self.w), mode='bilinear', align_corners=True)
pts1_i = unproject_pts_pt(self.intrinsic1, coord1.reshape(-1, 2), depth_layers[i].flatten())
pts1_i = pts1_i.reshape(1, self.h, self.w, 3)
all_pts.append(pts1_i[alpha_i])
all_rgbas.append(rgba_i.permute(0, 2, 3, 1)[alpha_i])
all_feats.append(featmap.permute(0, 2, 3, 1)[alpha_i])
all_masks.append(mask_i.permute(0, 2, 3, 1)[alpha_i])
all_pts = torch.cat(all_pts)
all_rgbas = torch.cat(all_rgbas)
all_feats = torch.cat(all_feats)
all_masks = torch.cat(all_masks)
all_side_ids = torch.zeros_like(all_masks.squeeze(), dtype=torch.long)
R = self.tgt_pose[0, :3, :3]
t = self.tgt_pose[0, :3, 3]
pred_img, direct_rgb_out, meta = self.render_pcd(all_pts, all_pts,
all_rgbas, all_rgbas,
all_feats, all_feats,
all_masks, all_side_ids,
R, t, 0)
mask = self.get_reprojection_mask(pts1, R, t)
t, b, l, r, bad = self.get_cropping_ids(mask)
gt_img = self.src_img2
skip = False
if not skip and not self.args.eval_mode:
pred_img = pred_img[:, :, t:b, l:r]
mask = mask[:, :, t:b, l:r]
direct_rgb_out = direct_rgb_out[:, :, t:b, l:r]
gt_img = gt_img[:, :, t:b, l:r]
else:
skip = True
res_dict = {
'src_img1': self.src_img1,
'src_img2': self.src_img2,
'pred_img': pred_img,
'gt_img': gt_img,
'mask': mask,
'direct_rgb_out': direct_rgb_out,
'skip': skip
}
return res_dict
def compute_scene_flow_one_side(self, coord, pose,
rgb1, rgb2,
rgba_layers1, rgba_layers2,
featmaps1, featmaps2,
pts1, pts2,
depth_layers1, depth_layers2,
mask_layers1, mask_layers2,
flow_f, flow_b, kernel,
with_inpainted=False):
num_layers1 = len(rgba_layers1)
pts2 = transform_pts_in_3D(pts2, pose).T.reshape(1, 3, self.h, self.w)
mask_mutual_flow = self.scene_flow_estimator.get_mutual_matches(flow_f, flow_b, th=5, return_mask=True).float()
mask_mutual_flow = mask_mutual_flow.unsqueeze(1)
coord1_corsp = coord + flow_f
coord1_corsp_normed = normalize_for_grid_sample(coord1_corsp, self.h, self.w)
pts2_sampled = F.grid_sample(pts2, coord1_corsp_normed, align_corners=True,
mode='nearest', padding_mode="border")
depth2_sampled = pts2_sampled[:, -1:]
rgb2_sampled = F.grid_sample(rgb2, coord1_corsp_normed, align_corners=True, padding_mode="border")
mask_layers2_ds = F.interpolate(mask_layers2.squeeze(1), size=featmaps2.shape[-2:], mode='area')
featmap2 = torch.sum(featmaps2 * mask_layers2_ds, dim=0, keepdim=True)
context2 = torch.sum(mask_layers2_ds, dim=0, keepdim=True)
featmap2_sampled = F.grid_sample(featmap2, coord1_corsp_normed, align_corners=True, padding_mode="border")
context2_sampled = F.grid_sample(context2, coord1_corsp_normed, align_corners=True, padding_mode="border")
mask2_sampled = F.grid_sample(self.src_mask2, coord1_corsp_normed, align_corners=True, padding_mode="border")
featmap2_sampled = featmap2_sampled / torch.clamp(context2_sampled, min=1e-6)
context2_sampled = (context2_sampled > 0.5).float()
last_pts2_i = torch.zeros_like(pts2.permute(0, 2, 3, 1))
last_alpha_i = torch.zeros_like(rgba_layers1[0][:, -1], dtype=torch.bool)
all_pts = []
all_rgbas = []
all_feats = []
all_rgbas_end = []
all_feats_end = []
all_masks = []
all_pts_end = []
all_optical_flows = []
for i in range(num_layers1):
alpha_i = (rgba_layers1[i][:, -1]*self.src_mask1.squeeze(1)*mask2_sampled.squeeze(1)) > 0.5
rgba_i = rgba_layers1[i]
mask_i = mask_layers1[i]
mask_no_mutual_flow = mask_i * context2_sampled
mask_gau_i = mask_no_mutual_flow * mask_mutual_flow
mask_no_mutual_flow = erosion(mask_no_mutual_flow, kernel)
mask_gau_i = erosion(mask_gau_i, kernel)
featmap1 = featmaps1[i][None]
featmap1 = F.interpolate(featmap1, size=(self.h, self.w), mode='bilinear', align_corners=True)
pts1_i = unproject_pts_pt(self.intrinsic1, coord.reshape(-1, 2), depth_layers1[i].flatten())
pts1_i = pts1_i.reshape(1, self.h, self.w, 3)
flow_inpainted, mask_no_mutual_flow_ = self.masked_diffuse(flow_f.permute(0, 3, 1, 2),
mask_no_mutual_flow,
kernel_size=15, iter=7)
coord_inpainted = coord.clone()
coord_inpainted_ = coord + flow_inpainted.permute(0, 2, 3, 1)
mask_no_mutual_flow_bool = (mask_no_mutual_flow_ > 1e-6).squeeze(1)
coord_inpainted[mask_no_mutual_flow_bool] = coord_inpainted_[mask_no_mutual_flow_bool]
depth_inpainted = depth_layers1[i].clone()
depth_inpainted_, mask_gau_i_ = self.masked_diffuse(depth2_sampled, mask_gau_i,
kernel_size=15, iter=7)
mask_gau_i_bool = (mask_gau_i_ > 1e-6).squeeze(1)
depth_inpainted.squeeze(1)[mask_gau_i_bool] = depth_inpainted_.squeeze(1)[mask_gau_i_bool]
pts2_i = unproject_pts_pt(self.intrinsic2, coord_inpainted.contiguous().reshape(-1, 2),
depth_inpainted.flatten()).reshape(1, self.h, self.w, 3)
if i > 0:
mask_wrong_ordering = (pts2_i[..., -1] <= last_pts2_i[..., -1]) * last_alpha_i
pts2_i[mask_wrong_ordering] = last_pts2_i[mask_wrong_ordering] * 1.01
rgba_end = mask_gau_i * torch.cat([rgb2_sampled, mask_gau_i], dim=1) + (1 - mask_gau_i) * rgba_i
feat_end = mask_gau_i * featmap2_sampled + (1 - mask_gau_i) * featmap1
last_alpha_i[alpha_i] = True
last_pts2_i[alpha_i] = pts2_i[alpha_i]
if with_inpainted:
mask_keep = alpha_i
else:
mask_keep = mask_i.squeeze(1).bool()
all_pts.append(pts1_i[mask_keep])
all_rgbas.append(rgba_i.permute(0, 2, 3, 1)[mask_keep])
all_feats.append(featmap1.permute(0, 2, 3, 1)[mask_keep])
all_masks.append(mask_i.permute(0, 2, 3, 1)[mask_keep])
all_pts_end.append(pts2_i[mask_keep])
all_rgbas_end.append(rgba_end.permute(0, 2, 3, 1)[mask_keep])
all_feats_end.append(feat_end.permute(0, 2, 3, 1)[mask_keep])
all_optical_flows.append(flow_inpainted.permute(0, 2, 3, 1)[mask_keep])
return all_pts, all_pts_end, all_rgbas, all_rgbas_end, all_feats, all_feats_end, all_masks, all_optical_flows
def render_rgbda_layers_with_scene_flow(self, return_pts=False):
kernel = torch.ones(5, 5, device=self.device)
flow_f = self.scene_flow_estimator.compute_optical_flow(self.src_img1, self.src_img2)
flow_b = self.scene_flow_estimator.compute_optical_flow(self.src_img2, self.src_img1)
depth_bins1 = get_depth_bins(depth=self.src_depth1)
depth_bins2 = get_depth_bins(depth=self.src_depth2)
rgba_layers1, depth_layers1, mask_layers1 = \
self.inpainter.sequential_inpainting(self.src_img1, self.src_depth1, depth_bins1)
rgba_layers2, depth_layers2, mask_layers2 = \
self.inpainter.sequential_inpainting(self.src_img2, self.src_depth2, depth_bins2)
if self.args.visualize_rgbda_layers:
self.save_rgbda_layers(self.src_rgb_file1, rgba_layers1, depth_layers1, mask_layers1)
self.save_rgbda_layers(self.src_rgb_file2, rgba_layers2, depth_layers2, mask_layers2)
featmaps1 = self.feature_extraction(rgba_layers1, mask_layers1, depth_layers1)
featmaps2 = self.feature_extraction(rgba_layers2, mask_layers2, depth_layers2)
depth_layers1 = self.apply_scale_shift(depth_layers1, self.scale_shift1[0], self.scale_shift1[1])
depth_layers2 = self.apply_scale_shift(depth_layers2, self.scale_shift2[0], self.scale_shift2[1])
processed_depth1, layer_id_map1 = self.render_depth_from_mdi(depth_layers1, rgba_layers1[:, :, -1:])
processed_depth2, layer_id_map2 = self.render_depth_from_mdi(depth_layers2, rgba_layers2[:, :, -1:])
assert self.src_img1.shape[-2:] == self.src_img2.shape[-2:]
h, w = self.src_img1.shape[-2:]
coord = get_coord_grids_pt(h, w, device=self.device).float()[None]
pts1 = unproject_pts_pt(self.intrinsic1, coord.reshape(-1, 2), processed_depth1.flatten())
pts2 = unproject_pts_pt(self.intrinsic2, coord.reshape(-1, 2), processed_depth2.flatten())
all_pts_11, all_pts_12, all_rgbas_11, all_rgbas_12, all_feats_11, all_feats_12,\
all_masks_1, all_optical_flow_1 = \
self.compute_scene_flow_one_side(coord, torch.inverse(self.pose), self.src_img1, self.src_img2,
rgba_layers1, rgba_layers2, featmaps1, featmaps2,
pts1, pts2, depth_layers1, depth_layers2, mask_layers1, mask_layers2,
flow_f, flow_b, kernel, with_inpainted=True)
all_pts_22, all_pts_21, all_rgbas_22, all_rgbas_21, all_feats_22, all_feats_21,\
all_masks_2, all_optical_flow_2 = \
self.compute_scene_flow_one_side(coord, self.pose, self.src_img2, self.src_img1,
rgba_layers2, rgba_layers1, featmaps2, featmaps1,
pts2, pts1, depth_layers2, depth_layers1, mask_layers2, mask_layers1,
flow_b, flow_f, kernel, with_inpainted=True)
if not torch.allclose(self.pose, torch.eye(4, device=self.device)):
all_pts_21 = self.transform_all_pts(all_pts_21, torch.inverse(self.pose))
all_pts_22 = self.transform_all_pts(all_pts_22, torch.inverse(self.pose))
all_pts = torch.cat(all_pts_11+all_pts_21)
all_rgbas = torch.cat(all_rgbas_11+all_rgbas_21)
all_feats = torch.cat(all_feats_11+all_feats_21)
all_masks = torch.cat(all_masks_1+all_masks_2)
all_pts_end = torch.cat(all_pts_12+all_pts_22)
all_rgbas_end = torch.cat(all_rgbas_12+all_rgbas_22)
all_feats_end = torch.cat(all_feats_12+all_feats_22)
all_side_ids = torch.zeros_like(all_masks.squeeze(), dtype=torch.long)
num_pts_2 = sum([len(x) for x in all_pts_21])
all_side_ids[-num_pts_2:] = 1
all_optical_flow = torch.cat(all_optical_flow_1+all_optical_flow_2)
if return_pts:
return all_pts, all_pts_end, all_rgbas, all_rgbas_end, all_feats, all_feats_end, \
all_masks, all_side_ids, all_optical_flow
R = self.tgt_pose[0, :3, :3]
t = self.tgt_pose[0, :3, 3]
pred_img, direct_rgb_out, meta = self.render_pcd(all_pts, all_pts_end,
all_rgbas, all_rgbas_end,
all_feats, all_feats_end,
all_masks, all_side_ids,
R, t, self.time)
mask1 = self.get_reprojection_mask(pts1, R, t)
pose2_to_tgt = self.tgt_pose.bmm(torch.inverse(self.pose))
mask2 = self.get_reprojection_mask(pts2, pose2_to_tgt[0, :3, :3], pose2_to_tgt[0, :3, 3])
mask = (mask1+mask2) * 0.5
gt_img = self.tgt_img
t, b, l, r, bad = self.get_cropping_ids(mask)
skip = False
if not skip and not self.args.eval_mode:
pred_img = pred_img[:, :, t:b, l:r]
mask = mask[:, :, t:b, l:r]
direct_rgb_out = direct_rgb_out[:, :, t:b, l:r]
gt_img = gt_img[:, :, t:b, l:r]
else:
skip = True
res_dict = {
'src_img1': self.src_img1,
'src_img2': self.src_img2,
'pred_img': pred_img,
'gt_img': gt_img,
'mask': mask,
'direct_rgb_out': direct_rgb_out,
'alpha_layers1': rgba_layers1[:, :, [-1]],
'alpha_layers2': rgba_layers2[:, :, [-1]],
'mask_layers1': mask_layers1,
'mask_layers2': mask_layers2,
'skip': skip
}
return res_dict
def dynamic_view_synthesis_with_inpainting(self):
if self.is_multi_view:
return self.render_rgbda_layers_from_one_view()
else:
return self.render_rgbda_layers_with_scene_flow()
def get_prediction(self, data):
# process data first
self.process_data(data)
return self.dynamic_view_synthesis_with_inpainting()
def save_rgbda_layers(self, src_rgb_file, rgba_layers, depth_layers, mask_layers):
frame_id = os.path.basename(src_rgb_file).split('.')[0]
scene_id = src_rgb_file.split('/')[-3]
out_dir = os.path.join(self.args.rootdir, 'out', self.args.expname, 'vis',
'{}-{}'.format(scene_id, frame_id))
os.makedirs(out_dir, exist_ok=True)
alpha_layers = rgba_layers[:, :, [-1]]
for i, rgba_layer in enumerate(rgba_layers):
save_filename = os.path.join(out_dir, 'rgb_original_{}.png'.format(i))
rgba_layer_ = rgba_layer * mask_layers[i]
rgba_np = rgba_layer_.detach().squeeze().permute(1, 2, 0).cpu().numpy()
imageio.imwrite(save_filename, float2uint8(rgba_np))
for i, rgba_layer in enumerate(rgba_layers):
save_filename = os.path.join(out_dir, 'rgb_{}.png'.format(i))
rgba_np = rgba_layer.detach().squeeze().permute(1, 2, 0).cpu().numpy()
imageio.imwrite(save_filename, float2uint8(rgba_np))
for i, depth_layer in enumerate(depth_layers):
save_filename = os.path.join(out_dir, 'disparity_original_{}.png'.format(i))
disparity = (1. / torch.clamp(depth_layer, min=1e-6)) * alpha_layers[i]
disparity = torch.cat([disparity, disparity, disparity, alpha_layers[i]*mask_layers[i]], dim=1)
disparity_np = disparity.detach().squeeze().cpu().numpy().transpose(1, 2, 0)
imageio.imwrite(save_filename, float2uint8(disparity_np))
for i, depth_layer in enumerate(depth_layers):
save_filename = os.path.join(out_dir, 'disparity_{}.png'.format(i))
disparity = (1. / torch.clamp(depth_layer, min=1e-6)) * alpha_layers[i]
disparity = torch.cat([disparity, disparity, disparity, alpha_layers[i]], dim=1)
disparity_np = disparity.detach().squeeze().cpu().numpy().transpose(1, 2, 0)
imageio.imwrite(save_filename, float2uint8(disparity_np))
for i, mask_layer in enumerate(mask_layers):
save_filename = os.path.join(out_dir, 'mask_{}.png'.format(i))
tri_mask = 0.5 * alpha_layers[i] + 0.5 * mask_layer
tri_mask_np = tri_mask.detach().squeeze().cpu().numpy()
imageio.imwrite(save_filename, float2uint8(tri_mask_np))
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from sklearn.cluster import AgglomerativeClustering
def get_depth_bins(depth=None, disparity=None, num_bins=None):
"""
:param depth: [1, 1, H, W]
:param disparity: [1, 1, H, W]
:return: depth_bins
"""
assert (disparity is not None) or (depth is not None)
if disparity is None:
assert depth.min() > 1e-2
disparity = 1. / depth
if depth is None:
depth = 1. / torch.clamp(disparity, min=1e-2)
assert depth.shape[:2] == (1, 1) and disparity.shape[:2] == (1, 1)
disparity_max = disparity.max().item()
disparity_min = disparity.min().item()
disparity_feat = disparity[:, :, ::10, ::10].reshape(-1, 1).cpu().numpy()
disparity_feat = (disparity_feat - disparity_min) / (disparity_max - disparity_min)
if num_bins is None:
n_clusters = None
distance_threshold = 5
else:
n_clusters = num_bins
distance_threshold = None
result = AgglomerativeClustering(n_clusters=n_clusters, distance_threshold=distance_threshold).fit(disparity_feat)
num_bins = result.n_clusters_ if n_clusters is None else n_clusters
depth_bins = [depth.min().item()]
for i in range(num_bins):
th = (disparity_feat[result.labels_ == i]).min()
th = th * (disparity_max - disparity_min) + disparity_min
depth_bins.append(1. / th)
depth_bins = sorted(depth_bins)
depth_bins[0] = depth.min() - 1e-6
depth_bins[-1] = depth.max() + 1e-6
return depth_bins
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from kornia.morphology import opening, erosion
from kornia.filters import gaussian_blur2d
from networks.inpainting_nets import Inpaint_Depth_Net, Inpaint_Color_Net
from core.utils import masked_median_blur
def refine_near_depth_discontinuity(depth, alpha, kernel_size=11):
'''
median filtering the depth discontinuity boundary
'''
depth = depth * alpha
depth_median_blurred = masked_median_blur(depth, alpha, kernel_size=kernel_size) * alpha
alpha_eroded = erosion(alpha, kernel=torch.ones(kernel_size, kernel_size).to(alpha.device))
depth[alpha_eroded == 0] = depth_median_blurred[alpha_eroded == 0]
return depth
def define_inpainting_bbox(alpha, border=40):
'''
define the bounding box for inpainting
:param alpha: alpha map [1, 1, h, w]
:param border: the minimum distance from a valid pixel to the border of the bbox
:return: [1, 1, h, w], a 0/1 map that indicates the inpainting region
'''
assert alpha.ndim == 4 and alpha.shape[:2] == (1, 1)
x, y = torch.nonzero(alpha)[:, -2:].T
h, w = alpha.shape[-2:]
row_min, row_max = x.min(), x.max()
col_min, col_max = y.min(), y.max()
out = torch.zeros_like(alpha)
x0, x1 = max(row_min - border, 0), min(row_max + border, h - 1)
y0, y1 = max(col_min - border, 0), min(col_max + border, w - 1)
out[:, :, x0:x1, y0:y1] = 1
return out
class Inpainter():
def __init__(self, args, device='cuda'):
self.args = args
self.device = device
print("Loading depth model...")
depth_feat_model = Inpaint_Depth_Net()
depth_feat_weight = torch.load('inpainting_ckpts/depth-model.pth', map_location=torch.device(device))
depth_feat_model.load_state_dict(depth_feat_weight)
depth_feat_model = depth_feat_model.to(device)
depth_feat_model.eval()
self.depth_feat_model = depth_feat_model.to(device)
print("Loading rgb model...")
rgb_model = Inpaint_Color_Net()
rgb_feat_weight = torch.load('inpainting_ckpts/color-model.pth', map_location=torch.device(device))
rgb_model.load_state_dict(rgb_feat_weight)
rgb_model.eval()
self.rgb_model = rgb_model.to(device)
# kernels
self.context_erosion_kernel = torch.ones(10, 10).to(self.device)
self.alpha_kernel = torch.ones(3, 3).to(self.device)
@staticmethod
def process_depth_for_network(depth, context, log_depth=True):
if log_depth:
log_depth = torch.log(depth + 1e-8) * context
mean_depth = torch.mean(log_depth[context > 0])
zero_mean_depth = (log_depth - mean_depth) * context
else:
zero_mean_depth = depth
mean_depth = 0
return zero_mean_depth, mean_depth
@staticmethod
def deprocess_depth(zero_mean_depth, mean_depth, log_depth=True):
if log_depth:
depth = torch.exp(zero_mean_depth + mean_depth)
else:
depth = zero_mean_depth
return depth
def inpaint_rgb(self, holes, context, context_rgb, edge):
# inpaint rgb
with torch.no_grad():
inpainted_rgb = self.rgb_model.forward_3P(holes, context, context_rgb, edge,
unit_length=128, cuda=self.device)
inpainted_rgb = inpainted_rgb.detach() * holes + context_rgb
inpainted_a = holes + context
inpainted_a = opening(inpainted_a, self.alpha_kernel)
inpainted_rgba = torch.cat([inpainted_rgb, inpainted_a], dim=1)
return inpainted_rgba
def inpaint_depth(self, depth, holes, context, edge, depth_range):
zero_mean_depth, mean_depth = self.process_depth_for_network(depth, context)
with torch.no_grad():
inpainted_depth = self.depth_feat_model.forward_3P(holes, context, zero_mean_depth, edge,
unit_length=128, cuda=self.device)
inpainted_depth = self.deprocess_depth(inpainted_depth.detach(), mean_depth)
inpainted_depth[context > 0.5] = depth[context > 0.5]
inpainted_depth = gaussian_blur2d(inpainted_depth, (3, 3), (1.5, 1.5))
inpainted_depth[context > 0.5] = depth[context > 0.5]
# if the inpainted depth in the background is smaller that the foreground depth,
# then the inpainted content will mistakenly occlude the foreground.
# Clipping the inpainted depth in this situation.
mask_wrong_depth_ordering = inpainted_depth < depth
inpainted_depth[mask_wrong_depth_ordering] = depth[mask_wrong_depth_ordering] * 1.01
inpainted_depth = torch.clamp(inpainted_depth, min=min(depth_range)*0.9)
return inpainted_depth
def sequential_inpainting(self, rgb, depth, depth_bins):
'''
:param rgb: [1, 3, H, W]
:param depth: [1, 1, H, W]
:return: rgba_layers: [N, 1, 3, H, W]: the inpainted RGBA layers
depth_layers: [N, 1, 1, H, W]: the inpainted depth layers
mask_layers: [N, 1, 1, H, W]: the original alpha layers (before inpainting)
'''
num_bins = len(depth_bins) - 1
rgba_layers = []
depth_layers = []
mask_layers = []
for i in range(num_bins):
alpha_i = (depth >= depth_bins[i]) * (depth < depth_bins[i+1])
alpha_i = alpha_i.float()
if i == 0:
rgba_i = torch.cat([rgb*alpha_i, alpha_i], dim=1)
rgba_layers.append(rgba_i)
depth_i = refine_near_depth_discontinuity(depth, alpha_i)
depth_layers.append(depth_i)
mask_layers.append(alpha_i)
pre_alpha = alpha_i.bool()
pre_inpainted_depth = depth * alpha_i
else:
alpha_i_eroded = erosion(alpha_i, self.context_erosion_kernel)
if alpha_i_eroded.sum() < 10:
continue
context = erosion((depth >= depth_bins[i]).float(), self.context_erosion_kernel)
holes = 1. - context
bbox = define_inpainting_bbox(context, border=40)
holes *= bbox
edge = torch.zeros_like(holes)
context_rgb = rgb * context
# inpaint depth
inpainted_depth_i = self.inpaint_depth(depth, holes, context, edge, (depth_bins[i], depth_bins[i+1]))
depth_near_mask = (inpainted_depth_i < depth_bins[i+1]).float()
# inpaint rgb
inpainted_rgba_i = self.inpaint_rgb(holes, context, context_rgb, edge)
if i < num_bins - 1:
# only keep the content whose depth is smaller than the upper limit of the current layer
# otherwise the inpainted content on the far-depth edge will falsely occlude the next layer.
inpainted_rgba_i *= depth_near_mask
inpainted_depth_i = refine_near_depth_discontinuity(inpainted_depth_i, inpainted_rgba_i[:, [-1]])
inpainted_alpha_i = inpainted_rgba_i[:, [-1]].bool()
mask_wrong_ordering = (inpainted_depth_i <= pre_inpainted_depth) * inpainted_alpha_i
inpainted_depth_i[mask_wrong_ordering] = pre_inpainted_depth[mask_wrong_ordering] * 1.05
rgba_layers.append(inpainted_rgba_i)
depth_layers.append(inpainted_depth_i)
mask_layers.append(context * depth_near_mask) # original mask
pre_alpha[inpainted_alpha_i] = True
pre_inpainted_depth[inpainted_alpha_i > 0] = inpainted_depth_i[inpainted_alpha_i > 0]
rgba_layers = torch.stack(rgba_layers)
depth_layers = torch.stack(depth_layers)
mask_layers = torch.stack(mask_layers)
return rgba_layers, depth_layers, mask_layers
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
import config
import numpy as np
import torch
import torch.nn as nn
from pytorch3d.renderer import (
PerspectiveCameras,
PointsRasterizationSettings,
PointsRasterizer,
AlphaCompositor,
)
args = config.config_parser()
class PointsRenderer(nn.Module):
"""
A class for rendering a batch of points. The class should
be initialized with a rasterizer and compositor class which each have a forward
function.
"""
def __init__(self, rasterizer, compositor) -> None:
super().__init__()
self.rasterizer = rasterizer
self.compositor = compositor
def to(self, device):
# Manually move to device rasterizer as the cameras
# within the class are not of type nn.Module
self.rasterizer = self.rasterizer.to(device)
self.compositor = self.compositor.to(device)
return self
def forward(self, point_clouds, **kwargs) -> torch.Tensor:
fragments = self.rasterizer(point_clouds, **kwargs)
# Construct weights based on the distance of a point to the true point.
# However, this could be done differently: e.g. predicted as opposed
# to a function of the weights.
r = self.rasterizer.raster_settings.radius
if type(r) == torch.Tensor:
if r.shape[-1] > 1:
idx = fragments.idx.clone()
idx[idx == -1] = 0
r = r[:, idx.squeeze().long()]
r = r.permute(0, 3, 1, 2)
dists2 = fragments.dists.permute(0, 3, 1, 2)
weights = 1 - dists2 / (r * r)
images = self.compositor(
fragments.idx.long().permute(0, 3, 1, 2),
weights,
point_clouds.features_packed().permute(1, 0),
**kwargs,
)
# permute so image comes at the end
images = images.permute(0, 2, 3, 1)
return images
def linear_interpolation(data0, data1, time):
return (1. - time) * data0 + time * data1
def create_pcd_renderer(h, w, intrinsics, R=None, T=None, radius=None, device="cuda"):
# Initialize a camera.
fx = intrinsics[0, 0]
fy = intrinsics[1, 1]
if R is None:
R = torch.eye(3)[None] # (1, 3, 3)
if T is None:
T = torch.zeros(1, 3) # (1, 3)
cameras = PerspectiveCameras(R=R, T=T,
device=device,
focal_length=((-fx, -fy),),
principal_point=(tuple(intrinsics[:2, -1]),),
image_size=((h, w),),
in_ndc=False,
)
# Define the settings for rasterization and shading. Here we set the output image to be of size
# 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1
# and blur_radius=0.0. Refer to raster_points.py for explanations of these parameters.
if radius is None:
radius = args.point_radius / min(h, w) * 2.0
if args.vary_pts_radius:
if np.random.choice([0, 1], p=[0.6, 0.4]):
factor = 1 + (0.2 * (np.random.rand() - 0.5))
radius *= factor
raster_settings = PointsRasterizationSettings(
image_size=(h, w),
radius=radius,
points_per_pixel=8,
)
# Create a points renderer by compositing points using an alpha compositor (nearer points
# are weighted more heavily). See [1] for an explanation.
rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
renderer = PointsRenderer(
rasterizer=rasterizer,
compositor=AlphaCompositor()
)
return renderer
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from kornia.filters import gaussian_blur2d, box_blur, median_blur
from kornia.filters.kernels import get_binary_kernel2d
from kornia.morphology import erosion
from scipy.interpolate import interp1d
from scipy.ndimage import median_filter
def float2uint8(x):
return (255. * x).astype(np.uint8)
def float2uint16(x):
return (65535 * x).astype(np.uint16)
def normalize_0_1(x):
x_min, x_max = x.min(), x.max()
return (x - x_min) / (x_max - x_min)
def homogenize_np(coord):
"""
append ones in the last dimension
:param coord: [...., 2/3]
:return: homogenous coordinates
"""
return np.concatenate([coord, np.ones_like(coord[..., :1])], axis=-1)
def homogenize_pt(coord):
return torch.cat([coord, torch.ones_like(coord[..., :1])], dim=-1)
def get_coord_grids_pt(h, w, device, homogeneous=False):
"""
create pxiel coordinate grid
:param h: height
:param w: weight
:param device: device
:param homogeneous: if homogeneous coordinate
:return: coordinates [h, w, 2]
"""
y = torch.arange(0, h).to(device)
x = torch.arange(0, w).to(device)
grid_y, grid_x = torch.meshgrid(y, x)
if homogeneous:
return torch.stack([grid_x, grid_y, torch.ones_like(grid_x)], dim=-1)
return torch.stack([grid_x, grid_y], dim=-1) # [h, w, 2]
def normalize_for_grid_sample(coords, h, w):
device = coords.device
coords_normed = coords / torch.tensor([w-1., h-1.]).to(device) * 2. - 1.
return coords_normed
def unproject_pts_np(intrinsics, coords, depth):
if coords.shape[-1] == 2:
coords = homogenize_np(coords)
intrinsics = intrinsics.squeeze()[:3, :3]
coords = np.linalg.inv(intrinsics).dot(coords.T) * depth.reshape(1, -1)
return coords.T # [n, 3]
def unproject_pts_pt(intrinsics, coords, depth):
if coords.shape[-1] == 2:
coords = homogenize_pt(coords)
intrinsics = intrinsics.squeeze()[:3, :3]
coords = torch.inverse(intrinsics).mm(coords.T) * depth.reshape(1, -1)
return coords.T # [n, 3]
def pixel2cam(depth, pixel_coords, intrinsics, is_homogeneous=True):
"""Transforms coordinates in the pixel frame to the camera frame.
Args:
depth: [batch, height, width]
pixel_coords: homogeneous pixel coordinates [batch, 3, height, width]
intrinsics: camera intrinsics [batch, 3, 3]
is_homogeneous: return in homogeneous coordinates
Returns:
Coords in the camera frame [batch, 3 (4 if homogeneous), height, width]
"""
if depth.ndim == 4:
assert depth.shape[1] == 1
depth = depth.squeeze(1)
batch, height, width = depth.shape
depth = depth.reshape(batch, 1, -1)
pixel_coords = pixel_coords.reshape(batch, 3, -1)
cam_coords = torch.inverse(intrinsics).bmm(pixel_coords) * depth
if is_homogeneous:
ones = torch.ones_like(depth)
cam_coords = torch.cat([cam_coords, ones], dim=1)
cam_coords = cam_coords.reshape(batch, -1, height, width)
return cam_coords
def transform_pts_in_3D(pts, pose, return_homogeneous=False):
'''
:param pts: nx3, tensor
:param pose: 4x4, tensor
:return: nx3 or nx4, tensor
'''
pts_h = homogenize_pt(pts)
pose = pose.squeeze()
assert pose.shape == (4, 4)
transformed_pts_h = pose.mm(pts_h.T).T # [n, 4]
if return_homogeneous:
return transformed_pts_h
return transformed_pts_h[..., :3]
def crop_boundary(x, ratio):
h, w = x.shape[-2:]
crop_h = int(h * ratio)
crop_w = int(w * ratio)
return x[:, :, crop_h:h-crop_h, crop_w:w-crop_w]
def masked_smooth_filter(x, mask, kernel_size=9, sigma=1):
'''
:param x: [B, n, h, w]
:param mask: [B, 1, h, w]
:return: [B, n, h, w]
'''
x_ = x * mask
x_ = box_blur(x_, (kernel_size, kernel_size), border_type='constant')
mask_ = box_blur(mask, (kernel_size, kernel_size), border_type='constant')
x_ = x_ / torch.clamp(mask_, min=1e-6)
mask_bool = (mask.repeat(1, x.shape[1], 1, 1) > 1e-6).float()
out = mask_bool * x + (1. - mask_bool) * x_
return out, mask_
def remove_noise_in_dpt_disparity(disparity, kernel_size=5):
return median_filter(disparity, size=kernel_size)
def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]:
r"""Utility function that computes zero padding tuple."""
computed: List[int] = [(k - 1) // 2 for k in kernel_size]
return computed[0], computed[1]
def masked_median_blur(input, mask, kernel_size=9):
assert input.shape == mask.shape
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not len(input.shape) == 4:
raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {input.shape}")
padding: Tuple[int, int] = _compute_zero_padding((kernel_size, kernel_size))
# prepare kernel
kernel: torch.Tensor = get_binary_kernel2d((kernel_size, kernel_size)).to(input)
b, c, h, w = input.shape
# map the local window to single vector
features: torch.Tensor = F.conv2d(input.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1)
masks: torch.Tensor = F.conv2d(mask.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1)
features = features.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxxHxWx(K_h * K_w)
min_value, max_value = features.min(), features.max()
masks = masks.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxHxWx(K_h * K_w)
index_invalid = (1 - masks).nonzero(as_tuple=True)
index_b, index_c, index_h, index_w, index_k = index_invalid
features[(index_b[::2], index_c[::2], index_h[::2], index_w[::2], index_k[::2])] = min_value
features[(index_b[1::2], index_c[1::2], index_h[1::2], index_w[1::2], index_k[1::2])] = max_value
# compute the median along the feature axis
median: torch.Tensor = torch.median(features, dim=-1)[0]
return median
def define_camera_path(num_frames, x, y, z, path_type='circle', return_t_only=False):
generic_pose = np.eye(4)
tgt_poses = []
if path_type == 'straight-line':
corner_points = np.array([[0, 0, 0], [(0 + x) * 0.5, (0 + y) * 0.5, (0 + z) * 0.5], [x, y, z]])
corner_t = np.linspace(0, 1, len(corner_points))
t = np.linspace(0, 1, num_frames)
cs = interp1d(corner_t, corner_points, axis=0, kind='quadratic')
spline = cs(t)
xs, ys, zs = [xx.squeeze() for xx in np.split(spline, 3, 1)]
elif path_type == 'double-straight-line':
corner_points = np.array([[-x, -y, -z], [0, 0, 0], [x, y, z]])
corner_t = np.linspace(0, 1, len(corner_points))
t = np.linspace(0, 1, num_frames)
cs = interp1d(corner_t, corner_points, axis=0, kind='quadratic')
spline = cs(t)
xs, ys, zs = [xx.squeeze() for xx in np.split(spline, 3, 1)]
elif path_type == 'circle':
xs, ys, zs = [], [], []
for frame_id, bs_shift_val in enumerate(np.arange(-2.0, 2.0, (4./num_frames))):
xs += [np.cos(bs_shift_val * np.pi) * 1 * x]
ys += [np.sin(bs_shift_val * np.pi) * 1 * y]
zs += [np.cos(bs_shift_val * np.pi/2.) * 1 * z]
xs, ys, zs = np.array(xs), np.array(ys), np.array(zs)
elif path_type == 'debug':
xs = np.array([x, 0, -x, 0, 0])
ys = np.array([0, y, 0, -y, 0])
zs = np.array([0, 0, 0, 0, z])
else:
raise NotImplementedError
xs, ys, zs = np.array(xs), np.array(ys), np.array(zs)
if return_t_only:
return np.stack([xs, ys, zs], axis=1) # [n, 3]
for xx, yy, zz in zip(xs, ys, zs):
tgt_poses.append(generic_pose * 1.)
tgt_poses[-1][:3, -1] = np.array([xx, yy, zz])
return tgt_poses |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
import importlib
def class_for_name(module_name, class_name):
# load the module, will raise ImportError if module cannot be loaded
m = importlib.import_module(module_name)
return getattr(m, class_name)
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation, padding_mode='reflect')
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False, padding_mode='reflect')
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes, track_running_stats=False, affine=True)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes, track_running_stats=False, affine=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width, track_running_stats=False, affine=True)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width, track_running_stats=False, affine=True)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion, track_running_stats=False, affine=True)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class conv(nn.Module):
def __init__(self, num_in_layers, num_out_layers, kernel_size, stride):
super(conv, self).__init__()
self.kernel_size = kernel_size
self.conv = nn.Conv2d(num_in_layers,
num_out_layers,
kernel_size=kernel_size,
stride=stride,
padding=(self.kernel_size - 1) // 2,
padding_mode='reflect')
self.bn = nn.BatchNorm2d(num_out_layers, track_running_stats=False, affine=True)
def forward(self, x):
return F.elu(self.bn(self.conv(x)), inplace=True)
class upconv(nn.Module):
def __init__(self, num_in_layers, num_out_layers, kernel_size, scale):
super(upconv, self).__init__()
self.scale = scale
self.conv = conv(num_in_layers, num_out_layers, kernel_size, 1)
def forward(self, x):
x = nn.functional.interpolate(x, scale_factor=self.scale, align_corners=True, mode='bilinear')
return self.conv(x)
class ResUNet(nn.Module):
def __init__(self, args,
encoder='resnet34',
in_ch=8,
out_ch=32,
norm_layer=None,
):
super(ResUNet, self).__init__()
assert encoder in ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'], "Incorrect encoder type"
if encoder in ['resnet18', 'resnet34']:
filters = [64, 128, 256, 512]
else:
filters = [256, 512, 1024, 2048]
# resnet = class_for_name("torchvision.models", encoder)(pretrained=True).to("cuda:{}".format(args.local_rank))
# original
layers = [3, 4, 6, 3]
if norm_layer is None:
norm_layer = nn.BatchNorm2d
# norm_layer = nn.InstanceNorm2d
self._norm_layer = norm_layer
self.dilation = 1
block = BasicBlock
replace_stride_with_dilation = [False, False, False]
self.inplanes = 64
self.groups = 1
self.base_width = 64
self.conv1 = nn.Conv2d(in_ch, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False,
padding_mode='reflect')
self.bn1 = norm_layer(self.inplanes, track_running_stats=False, affine=True)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
# if in_ch != 3: # Number of input channels
# self.conv1 = nn.Conv2d(in_ch, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
# else:
# self.conv1 = resnet.conv1 # H/2
# self.bn1 = resnet.bn1
# self.relu = resnet.relu
# self.maxpool = resnet.maxpool # H/4
#
# # encoder
# self.layer1 = resnet.layer1 # H/4
# self.layer2 = resnet.layer2 # H/8
# self.layer3 = resnet.layer3 # H/16
# decoder
self.upconv3 = upconv(filters[2], 128, 3, 2)
self.iconv3 = conv(filters[1] + 128, 128, 3, 1)
self.upconv2 = upconv(128, 64, 3, 2)
self.iconv2 = conv(filters[0] + 64, out_ch, 3, 1)
# fine-level conv
self.out_conv = nn.Conv2d(out_ch, out_ch, 1, 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion, track_running_stats=False, affine=True),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def skipconnect(self, x1, x2):
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2))
# for padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return x
def forward(self, x):
x = self.relu(self.bn1(self.conv1(x)))
x = self.maxpool(x)
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x = self.upconv3(x3)
x = self.skipconnect(x2, x)
x = self.iconv3(x)
x = self.upconv2(x)
x = self.skipconnect(x1, x)
x = self.iconv2(x)
x_out = self.out_conv(x)
return x_out
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
class BaseNetwork(nn.Module):
def __init__(self):
super(BaseNetwork, self).__init__()
def init_weights(self, init_type='normal', gain=0.02):
'''
initialize network's weights
init_type: normal | xavier | kaiming | orthogonal
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/9451e70673400885567d08a9e97ade2524c700d0/models/networks.py#L39
'''
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
nn.init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight.data, gain=gain)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
nn.init.normal_(m.weight.data, 1.0, gain)
nn.init.constant_(m.bias.data, 0.0)
self.apply(init_func)
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find(
'Linear') == 0) and hasattr(m, 'weight'):
if init_type == 'gaussian':
nn.init.normal_(m.weight, 0.0, 0.02)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight, gain=math.sqrt(2))
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, "Unsupported initialization: {}".format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
return init_fun
class PartialConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__()
self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, False)
self.input_conv.apply(weights_init('kaiming'))
self.slide_winsize = in_channels * kernel_size * kernel_size
torch.nn.init.constant_(self.mask_conv.weight, 1.0)
# mask is not updated
for param in self.mask_conv.parameters():
param.requires_grad = False
def forward(self, input, mask):
# http://masc.cs.gmu.edu/wiki/partialconv
# C(X) = W^T * X + b, C(0) = b, D(M) = 1 * M + 0 = sum(M)
# W^T* (M .* X) / sum(M) + b = [C(M .* X) – C(0)] / D(M) + C(0)
output = self.input_conv(input * mask)
if self.input_conv.bias is not None:
output_bias = self.input_conv.bias.view(1, -1, 1, 1).expand_as(
output)
else:
output_bias = torch.zeros_like(output)
with torch.no_grad():
output_mask = self.mask_conv(mask)
no_update_holes = output_mask == 0
mask_sum = output_mask.masked_fill_(no_update_holes, 1.0)
output_pre = ((output - output_bias) * self.slide_winsize) / mask_sum + output_bias
output = output_pre.masked_fill_(no_update_holes, 0.0)
new_mask = torch.ones_like(output)
new_mask = new_mask.masked_fill_(no_update_holes, 0.0)
return output, new_mask
class PCBActiv(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, no_tracking_stats=False, sample='none-3', activ='relu',
conv_bias=False):
super().__init__()
if sample == 'down-5':
self.conv = PartialConv(in_ch, out_ch, 5, 2, 2, bias=conv_bias)
elif sample == 'down-7':
self.conv = PartialConv(in_ch, out_ch, 7, 2, 3, bias=conv_bias)
elif sample == 'down-3':
self.conv = PartialConv(in_ch, out_ch, 3, 2, 1, bias=conv_bias)
else:
self.conv = PartialConv(in_ch, out_ch, 3, 1, 1, bias=conv_bias)
if bn:
if no_tracking_stats:
self.bn = nn.BatchNorm2d(out_ch, track_running_stats=False, affine=True)
else:
self.bn = nn.BatchNorm2d(out_ch)
if activ == 'relu':
self.activation = nn.ReLU()
elif activ == 'leaky':
self.activation = nn.LeakyReLU(negative_slope=0.2)
def forward(self, input, input_mask):
h, h_mask = self.conv(input, input_mask)
if hasattr(self, 'bn'):
h = self.bn(h)
if hasattr(self, 'activation'):
h = self.activation(h)
return h, h_mask
class Inpaint_Depth_Net(nn.Module):
def __init__(self, layer_size=7, upsampling_mode='nearest'):
super().__init__()
in_channels = 4
out_channels = 1
self.freeze_enc_bn = False
self.upsampling_mode = upsampling_mode
self.layer_size = layer_size
self.enc_1 = PCBActiv(in_channels, 64, bn=False, sample='down-7', conv_bias=True)
self.enc_2 = PCBActiv(64, 128, sample='down-5', conv_bias=True)
self.enc_3 = PCBActiv(128, 256, sample='down-5')
self.enc_4 = PCBActiv(256, 512, sample='down-3')
for i in range(4, self.layer_size):
name = 'enc_{:d}'.format(i + 1)
setattr(self, name, PCBActiv(512, 512, sample='down-3'))
for i in range(4, self.layer_size):
name = 'dec_{:d}'.format(i + 1)
setattr(self, name, PCBActiv(512 + 512, 512, activ='leaky'))
self.dec_4 = PCBActiv(512 + 256, 256, activ='leaky')
self.dec_3 = PCBActiv(256 + 128, 128, activ='leaky')
self.dec_2 = PCBActiv(128 + 64, 64, activ='leaky')
self.dec_1 = PCBActiv(64 + in_channels, out_channels,
bn=False, activ=None, conv_bias=True)
def add_border(self, input, mask_flag, PCONV=True):
with torch.no_grad():
h = input.shape[-2]
w = input.shape[-1]
require_len_unit = 2 ** self.layer_size
residual_h = int(np.ceil(h / float(require_len_unit)) * require_len_unit - h) # + 2*require_len_unit
residual_w = int(np.ceil(w / float(require_len_unit)) * require_len_unit - w) # + 2*require_len_unit
enlarge_input = torch.zeros((input.shape[0], input.shape[1], h + residual_h, w + residual_w)).to(input.device)
if mask_flag:
if PCONV is False:
enlarge_input += 1.0
enlarge_input = enlarge_input.clamp(0.0, 1.0)
else:
enlarge_input[:, 2, ...] = 0.0
anchor_h = residual_h//2
anchor_w = residual_w//2
enlarge_input[..., anchor_h:anchor_h+h, anchor_w:anchor_w+w] = input
return enlarge_input, [anchor_h, anchor_h+h, anchor_w, anchor_w+w]
def forward_3P(self, mask, context, depth, edge, unit_length=128, cuda=None):
input = torch.cat((depth, edge, context, mask), dim=1)
n, c, h, w = input.shape
residual_h = int(np.ceil(h / float(unit_length)) * unit_length - h)
residual_w = int(np.ceil(w / float(unit_length)) * unit_length - w)
anchor_h = residual_h//2
anchor_w = residual_w//2
enlarge_input = torch.zeros((n, c, h + residual_h, w + residual_w)).to(cuda)
enlarge_input[..., anchor_h:anchor_h+h, anchor_w:anchor_w+w] = input
# enlarge_input[:, 3] = 1. - enlarge_input[:, 3]
depth_output = self.forward(enlarge_input)
depth_output = depth_output[..., anchor_h:anchor_h+h, anchor_w:anchor_w+w]
# import pdb; pdb.set_trace()
return depth_output
def forward(self, input_feat, refine_border=False, sample=False, PCONV=True):
input = input_feat
input_mask = (input_feat[:, -2:-1] + input_feat[:, -1:]).clamp(0, 1).repeat(1, input.shape[1], 1, 1)
vis_input = input.cpu().data.numpy()
vis_input_mask = input_mask.cpu().data.numpy()
H, W = input.shape[-2:]
if refine_border is True:
input, anchor = self.add_border(input, mask_flag=False)
input_mask, anchor = self.add_border(input_mask, mask_flag=True, PCONV=PCONV)
h_dict = {} # for the output of enc_N
h_mask_dict = {} # for the output of enc_N
h_dict['h_0'], h_mask_dict['h_0'] = input, input_mask
h_key_prev = 'h_0'
for i in range(1, self.layer_size + 1):
l_key = 'enc_{:d}'.format(i)
h_key = 'h_{:d}'.format(i)
h_dict[h_key], h_mask_dict[h_key] = getattr(self, l_key)(
h_dict[h_key_prev], h_mask_dict[h_key_prev])
h_key_prev = h_key
h_key = 'h_{:d}'.format(self.layer_size)
h, h_mask = h_dict[h_key], h_mask_dict[h_key]
for i in range(self.layer_size, 0, -1):
enc_h_key = 'h_{:d}'.format(i - 1)
dec_l_key = 'dec_{:d}'.format(i)
h = F.interpolate(h, scale_factor=2, mode=self.upsampling_mode)
h_mask = F.interpolate(h_mask, scale_factor=2, mode='nearest')
h = torch.cat([h, h_dict[enc_h_key]], dim=1)
h_mask = torch.cat([h_mask, h_mask_dict[enc_h_key]], dim=1)
h, h_mask = getattr(self, dec_l_key)(h, h_mask)
output = h
if refine_border is True:
h_mask = h_mask[..., anchor[0]:anchor[1], anchor[2]:anchor[3]]
output = output[..., anchor[0]:anchor[1], anchor[2]:anchor[3]]
return output
class Inpaint_Edge_Net(BaseNetwork):
def __init__(self, residual_blocks=8, init_weights=True):
super(Inpaint_Edge_Net, self).__init__()
in_channels = 7
out_channels = 1
self.encoder = []
# 0
self.encoder_0 = nn.Sequential(
nn.ReflectionPad2d(3),
spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=7, padding=0), True),
nn.InstanceNorm2d(64, track_running_stats=False),
nn.ReLU(True))
# 1
self.encoder_1 = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4, stride=2, padding=1), True),
nn.InstanceNorm2d(128, track_running_stats=False),
nn.ReLU(True))
# 2
self.encoder_2 = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=128, out_channels=256, kernel_size=4, stride=2, padding=1), True),
nn.InstanceNorm2d(256, track_running_stats=False),
nn.ReLU(True))
# 3
blocks = []
for _ in range(residual_blocks):
block = ResnetBlock(256, 2)
blocks.append(block)
self.middle = nn.Sequential(*blocks)
# + 3
self.decoder_0 = nn.Sequential(
spectral_norm(nn.ConvTranspose2d(in_channels=256+256, out_channels=128, kernel_size=4, stride=2, padding=1), True),
nn.InstanceNorm2d(128, track_running_stats=False),
nn.ReLU(True))
# + 2
self.decoder_1 = nn.Sequential(
spectral_norm(nn.ConvTranspose2d(in_channels=128+128, out_channels=64, kernel_size=4, stride=2, padding=1), True),
nn.InstanceNorm2d(64, track_running_stats=False),
nn.ReLU(True))
# + 1
self.decoder_2 = nn.Sequential(
nn.ReflectionPad2d(3),
nn.Conv2d(in_channels=64+64, out_channels=out_channels, kernel_size=7, padding=0),
)
if init_weights:
self.init_weights()
def add_border(self, input, channel_pad_1=None):
h = input.shape[-2]
w = input.shape[-1]
require_len_unit = 16
residual_h = int(np.ceil(h / float(require_len_unit)) * require_len_unit - h) # + 2*require_len_unit
residual_w = int(np.ceil(w / float(require_len_unit)) * require_len_unit - w) # + 2*require_len_unit
enlarge_input = torch.zeros((input.shape[0], input.shape[1], h + residual_h, w + residual_w)).to(input.device)
if channel_pad_1 is not None:
for channel in channel_pad_1:
enlarge_input[:, channel] = 1
anchor_h = residual_h//2
anchor_w = residual_w//2
enlarge_input[..., anchor_h:anchor_h+h, anchor_w:anchor_w+w] = input
return enlarge_input, [anchor_h, anchor_h+h, anchor_w, anchor_w+w]
def forward_3P(self, mask, context, rgb, disp, edge, unit_length=128, cuda=None):
input = torch.cat((rgb, disp/disp.max(), edge, context, mask), dim=1)
n, c, h, w = input.shape
residual_h = int(np.ceil(h / float(unit_length)) * unit_length - h)
residual_w = int(np.ceil(w / float(unit_length)) * unit_length - w)
anchor_h = residual_h//2
anchor_w = residual_w//2
enlarge_input = torch.zeros((n, c, h + residual_h, w + residual_w)).to(cuda)
enlarge_input[..., anchor_h:anchor_h+h, anchor_w:anchor_w+w] = input
edge_output = self.forward(enlarge_input)
edge_output = edge_output[..., anchor_h:anchor_h+h, anchor_w:anchor_w+w]
return edge_output
def forward(self, x, refine_border=False):
if refine_border:
x, anchor = self.add_border(x, [5])
x1 = self.encoder_0(x)
x2 = self.encoder_1(x1)
x3 = self.encoder_2(x2)
x4 = self.middle(x3)
x5 = self.decoder_0(torch.cat((x4, x3), dim=1))
x6 = self.decoder_1(torch.cat((x5, x2), dim=1))
x7 = self.decoder_2(torch.cat((x6, x1), dim=1))
x = torch.sigmoid(x7)
if refine_border:
x = x[..., anchor[0]:anchor[1], anchor[2]:anchor[3]]
return x
class Inpaint_Color_Net(nn.Module):
def __init__(self, layer_size=7, upsampling_mode='nearest', add_hole_mask=False, add_two_layer=False, add_border=False):
super().__init__()
self.freeze_enc_bn = False
self.upsampling_mode = upsampling_mode
self.layer_size = layer_size
in_channels = 6
self.enc_1 = PCBActiv(in_channels, 64, bn=False, sample='down-7')
self.enc_2 = PCBActiv(64, 128, sample='down-5')
self.enc_3 = PCBActiv(128, 256, sample='down-5')
self.enc_4 = PCBActiv(256, 512, sample='down-3')
self.enc_5 = PCBActiv(512, 512, sample='down-3')
self.enc_6 = PCBActiv(512, 512, sample='down-3')
self.enc_7 = PCBActiv(512, 512, sample='down-3')
self.dec_7 = PCBActiv(512+512, 512, activ='leaky')
self.dec_6 = PCBActiv(512+512, 512, activ='leaky')
self.dec_5A = PCBActiv(512 + 512, 512, activ='leaky')
self.dec_4A = PCBActiv(512 + 256, 256, activ='leaky')
self.dec_3A = PCBActiv(256 + 128, 128, activ='leaky')
self.dec_2A = PCBActiv(128 + 64, 64, activ='leaky')
self.dec_1A = PCBActiv(64 + in_channels, 3, bn=False, activ=None, conv_bias=True)
'''
self.dec_5B = PCBActiv(512 + 512, 512, activ='leaky')
self.dec_4B = PCBActiv(512 + 256, 256, activ='leaky')
self.dec_3B = PCBActiv(256 + 128, 128, activ='leaky')
self.dec_2B = PCBActiv(128 + 64, 64, activ='leaky')
self.dec_1B = PCBActiv(64 + 4, 1, bn=False, activ=None, conv_bias=True)
'''
def cat(self, A, B):
return torch.cat((A, B), dim=1)
def upsample(self, feat, mask):
feat = F.interpolate(feat, scale_factor=2, mode=self.upsampling_mode)
mask = F.interpolate(mask, scale_factor=2, mode='nearest')
return feat, mask
def forward_3P(self, mask, context, rgb, edge, unit_length=128, cuda=None):
input = torch.cat((rgb, edge, context, mask), dim=1)
n, c, h, w = input.shape
residual_h = int(np.ceil(h / float(unit_length)) * unit_length - h) # + 128
residual_w = int(np.ceil(w / float(unit_length)) * unit_length - w) # + 256
anchor_h = residual_h//2
anchor_w = residual_w//2
enlarge_input = torch.zeros((n, c, h + residual_h, w + residual_w)).to(cuda)
enlarge_input[..., anchor_h:anchor_h+h, anchor_w:anchor_w+w] = input
# enlarge_input[:, 3] = 1. - enlarge_input[:, 3]
enlarge_input = enlarge_input.to(cuda)
rgb_output = self.forward(enlarge_input)
rgb_output = rgb_output[..., anchor_h:anchor_h+h, anchor_w:anchor_w+w]
return rgb_output
def forward(self, input, add_border=False):
input_mask = (input[:, -2:-1] + input[:, -1:]).clamp(0, 1)
H, W = input.shape[-2:]
f_0, h_0 = input, input_mask.repeat((1,input.shape[1],1,1))
f_1, h_1 = self.enc_1(f_0, h_0)
f_2, h_2 = self.enc_2(f_1, h_1)
f_3, h_3 = self.enc_3(f_2, h_2)
f_4, h_4 = self.enc_4(f_3, h_3)
f_5, h_5 = self.enc_5(f_4, h_4)
f_6, h_6 = self.enc_6(f_5, h_5)
f_7, h_7 = self.enc_7(f_6, h_6)
o_7, k_7 = self.upsample(f_7, h_7)
o_6, k_6 = self.dec_7(self.cat(o_7, f_6), self.cat(k_7, h_6))
o_6, k_6 = self.upsample(o_6, k_6)
o_5, k_5 = self.dec_6(self.cat(o_6, f_5), self.cat(k_6, h_5))
o_5, k_5 = self.upsample(o_5, k_5)
o_5A, k_5A = o_5, k_5
o_5B, k_5B = o_5, k_5
###############
o_4A, k_4A = self.dec_5A(self.cat(o_5A, f_4), self.cat(k_5A, h_4))
o_4A, k_4A = self.upsample(o_4A, k_4A)
o_3A, k_3A = self.dec_4A(self.cat(o_4A, f_3), self.cat(k_4A, h_3))
o_3A, k_3A = self.upsample(o_3A, k_3A)
o_2A, k_2A = self.dec_3A(self.cat(o_3A, f_2), self.cat(k_3A, h_2))
o_2A, k_2A = self.upsample(o_2A, k_2A)
o_1A, k_1A = self.dec_2A(self.cat(o_2A, f_1), self.cat(k_2A, h_1))
o_1A, k_1A = self.upsample(o_1A, k_1A)
o_0A, k_0A = self.dec_1A(self.cat(o_1A, f_0), self.cat(k_1A, h_0))
return torch.sigmoid(o_0A)
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super().train(mode)
if self.freeze_enc_bn:
for name, module in self.named_modules():
if isinstance(module, nn.BatchNorm2d) and 'enc' in name:
module.eval()
class Discriminator(BaseNetwork):
def __init__(self, use_sigmoid=True, use_spectral_norm=True, init_weights=True, in_channels=None):
super(Discriminator, self).__init__()
self.use_sigmoid = use_sigmoid
self.conv1 = self.features = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=4, stride=2, padding=1, bias=not use_spectral_norm), use_spectral_norm),
nn.LeakyReLU(0.2, inplace=True),
)
self.conv2 = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4, stride=2, padding=1, bias=not use_spectral_norm), use_spectral_norm),
nn.LeakyReLU(0.2, inplace=True),
)
self.conv3 = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=128, out_channels=256, kernel_size=4, stride=2, padding=1, bias=not use_spectral_norm), use_spectral_norm),
nn.LeakyReLU(0.2, inplace=True),
)
self.conv4 = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=256, out_channels=512, kernel_size=4, stride=1, padding=1, bias=not use_spectral_norm), use_spectral_norm),
nn.LeakyReLU(0.2, inplace=True),
)
self.conv5 = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=512, out_channels=1, kernel_size=4, stride=1, padding=1, bias=not use_spectral_norm), use_spectral_norm),
)
if init_weights:
self.init_weights()
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
outputs = conv5
if self.use_sigmoid:
outputs = torch.sigmoid(conv5)
return outputs, [conv1, conv2, conv3, conv4, conv5]
class ResnetBlock(nn.Module):
def __init__(self, dim, dilation=1):
super(ResnetBlock, self).__init__()
self.conv_block = nn.Sequential(
nn.ReflectionPad2d(dilation),
spectral_norm(nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=3, padding=0, dilation=dilation, bias=not True), True),
nn.InstanceNorm2d(dim, track_running_stats=False),
nn.LeakyReLU(negative_slope=0.2),
nn.ReflectionPad2d(1),
spectral_norm(nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=3, padding=0, dilation=1, bias=not True), True),
nn.InstanceNorm2d(dim, track_running_stats=False),
)
def forward(self, x):
out = x + self.conv_block(x)
# Remove ReLU at the end of the residual block
# http://torch.ch/blog/2016/02/04/resnets.html
return out
def spectral_norm(module, mode=True):
if mode:
return nn.utils.spectral_norm(module)
return module
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Parts of the U-Net model """
import torch
import torch.nn as nn
import torch.nn.functional as F
class DoubleConv(nn.Module):
"""(convolution => [GN] => PReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None, group=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
#if not group:
# group = out_channels//16
self.double_conv1 = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, padding_mode='reflect'),
nn.GroupNorm(mid_channels//32, mid_channels),
nn.PReLU()
)
self.double_conv2 = nn.Sequential(
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1, padding_mode='reflect'),
nn.GroupNorm(out_channels//32, out_channels),
nn.PReLU()
)
def forward(self, x):
x1 = self.double_conv1(x)
x2 = self.double_conv2(x1)
return x1, x2
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels, group=None):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels, group)
)
def forward(self, x):
return self.maxpool_conv(x)
class ConcatDoubleConv(nn.Module):
"""(convolution => [GN] => PReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None, group=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
#if not group:
# group = out_channels//16
self.double_conv1 = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, padding_mode='reflect'),
nn.GroupNorm(mid_channels//32, mid_channels),
nn.PReLU()
)
self.double_conv2 = nn.Sequential(
nn.Conv2d(mid_channels*2, out_channels, kernel_size=3, padding=1, padding_mode='reflect'),
nn.GroupNorm(out_channels//32, out_channels),
nn.PReLU()
)
def forward(self, x, xc1):
x1 = self.double_conv1(x)
x2 = self.double_conv2(torch.cat([xc1, x1], dim=1))
return x2
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, mid_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = ConcatDoubleConv(in_channels, out_channels, mid_channels)
else:
self.up = nn.ConvTranspose2d(in_channels , in_channels // 2, kernel_size=2, stride=2)
self.conv = ConcatDoubleConv(in_channels, out_channels, mid_channels)
def forward(self, x, xc1, xc2):
x1 = self.up(x)
# input is CHW
diffY = xc1.size()[2] - x1.size()[2]
diffX = xc1.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2], mode='reflect')
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([xc1, x1], dim=1)
return self.conv(x, xc2)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, padding_mode='reflect')
#self.gn = nn.GroupNorm(1, out_channels)
self.act = nn.Sigmoid()
def forward(self, x, xc1):
x1 = self.up(x)
x2 = torch.cat([xc1, x1], dim=1)
#return self.act(self.gn(self.conv(x2)))
return self.act(self.conv(x2))
class ImgDecoder(nn.Module):
def __init__(self, args, in_ch, out_ch):
super(ImgDecoder, self).__init__()
self.firstconv = nn.Conv2d(in_ch, 32, kernel_size=7, stride=1, padding=3, bias=False) #256x256
self.firstprelu = nn.PReLU()
self.down1 = Down(32, 64) #128x128
self.down2 = Down(64, 128) #64x64
self.down3 = Down(128, 256) #32x32
self.down4 = Down(256, 512) #16x16
self.conv5 = DoubleConv(512, 512) #16x16
self.up1 = Up(1024, 256, 512)
self.up2 = Up(512, 128, 256)
self.up3 = Up(256, 64, 128)
self.up4 = Up(128, 32, 64)
self.outc = OutConv(64, out_ch)
self.alpha = nn.Parameter(torch.tensor(1.))
def compute_weight_for_two_frame_blending(self, time, disp1, disp2, alpha0, alpha1):
weight1 = (1 - time) * torch.exp(self.alpha*disp1) * alpha0
weight2 = time * torch.exp(self.alpha*disp2) * alpha1
sum_weight = torch.clamp(weight1 + weight2, min=1e-6)
out_weight1 = weight1 / sum_weight
out_weight2 = weight2 / sum_weight
return out_weight1, out_weight2
def forward(self, x0, x1, time):
disp0 = x0[:, [-1]]
disp1 = x1[:, [-1]]
alpha0 = x0[:, [3]]
alpha1 = x1[:, [3]]
w0, w1 = self.compute_weight_for_two_frame_blending(time, disp0, disp1, alpha0, alpha1)
x = w0 * x0 + w1 * x1
x0 = self.firstprelu(self.firstconv(x))
x20, x21 = self.down1(x0)
x30, x31 = self.down2(x21)
x40, x41 = self.down3(x31)
x50, x51 = self.down4(x41)
x60, x61 = self.conv5(x51)
xt1 = self.up1(x61, x51, x50)
xt2 = self.up2(xt1, x41, x40)
xt3 = self.up3(xt2, x31, x30)
xt4 = self.up4(xt3, x21, x20)
target_img = self.outc(xt4, x0)
return target_img |
"""Compute depth maps for images in the input folder.
"""
import os
import glob
import torch
import cv2
import argparse
from third_party.DPT.util import io
from torchvision.transforms import Compose
from third_party.DPT.dpt.models import DPTDepthModel
from third_party.DPT.dpt.midas_net import MidasNet_large
from third_party.DPT.dpt.transforms import Resize, NormalizeImage, PrepareForNet
def run_dpt(input_path, output_path, model_path, model_type="dpt_hybrid", optimize=True,
absolute_depth=False, kitti_crop=False):
"""Run MonoDepthNN to compute depth maps.
Args:
input_path (str): path to input folder
output_path (str): path to output folder
model_path (str): path to saved model
"""
# select device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# load network
if model_type == "dpt_large": # DPT-Large
net_w = net_h = 384
model = DPTDepthModel(
path=model_path,
backbone="vitl16_384",
non_negative=True,
enable_attention_hooks=False,
)
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "dpt_hybrid": # DPT-Hybrid
net_w = net_h = 384
model = DPTDepthModel(
path=model_path,
backbone="vitb_rn50_384",
non_negative=True,
enable_attention_hooks=False,
)
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "dpt_hybrid_kitti":
net_w = 1216
net_h = 352
model = DPTDepthModel(
path=model_path,
scale=0.00006016,
shift=0.00579,
invert=True,
backbone="vitb_rn50_384",
non_negative=True,
enable_attention_hooks=False,
)
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "dpt_hybrid_nyu":
net_w = 640
net_h = 480
model = DPTDepthModel(
path=model_path,
scale=0.000305,
shift=0.1378,
invert=True,
backbone="vitb_rn50_384",
non_negative=True,
enable_attention_hooks=False,
)
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "midas_v21": # Convolutional model
net_w = net_h = 384
model = MidasNet_large(model_path, non_negative=True)
normalization = NormalizeImage(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
else:
assert (
False
), f"model_type '{model_type}' not implemented, use: " \
f"--model_type [dpt_large|dpt_hybrid|dpt_hybrid_kitti|dpt_hybrid_nyu|midas_v21]"
transform = Compose(
[
Resize(
net_w,
net_h,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method="minimal",
image_interpolation_method=cv2.INTER_CUBIC,
),
normalization,
PrepareForNet(),
]
)
model.eval()
if optimize == True and device == torch.device("cuda"):
model = model.to(memory_format=torch.channels_last)
model = model.half()
model.to(device)
# get input
img_names = glob.glob(os.path.join(input_path, "*.png")) + glob.glob(os.path.join(input_path, "*.jpg"))
num_images = len(img_names)
# create output folder
os.makedirs(output_path, exist_ok=True)
print('=========================computing DPT depth maps...=========================')
for ind, img_name in enumerate(img_names):
if os.path.isdir(img_name):
continue
print(" processing {} ({}/{})".format(img_name, ind + 1, num_images))
# input
img = io.read_image(img_name)
if kitti_crop is True:
height, width, _ = img.shape
top = height - 352
left = (width - 1216) // 2
img = img[top: top + 352, left: left + 1216, :]
img_input = transform({"image": img})["image"]
# compute
with torch.no_grad():
sample = torch.from_numpy(img_input).to(device).unsqueeze(0)
if optimize == True and device == torch.device("cuda"):
sample = sample.to(memory_format=torch.channels_last)
sample = sample.half()
prediction = model.forward(sample)
prediction = (
torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=img.shape[:2],
mode="bicubic",
align_corners=False,
)
.squeeze()
.cpu()
.numpy()
)
if model_type == "dpt_hybrid_kitti":
prediction *= 256
if model_type == "dpt_hybrid_nyu":
prediction *= 1000.0
filename = os.path.join(
output_path, os.path.splitext(os.path.basename(img_name))[0]
)
io.write_depth(filename, prediction, bits=2, absolute_depth=absolute_depth)
print("finished")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-i", "--input_path", default="input", help="folder with input images"
)
parser.add_argument(
"-o",
"--output_path",
default="output_monodepth",
help="folder for output images",
)
parser.add_argument(
"-m", "--model_weights", default=None, help="path to model weights"
)
parser.add_argument(
"-t",
"--model_type",
default="dpt_hybrid",
help="model type [dpt_large|dpt_hybrid|midas_v21]",
)
parser.add_argument("--kitti_crop", dest="kitti_crop", action="store_true")
parser.add_argument("--absolute_depth", dest="absolute_depth", action="store_true")
parser.add_argument("--optimize", dest="optimize", action="store_true")
parser.add_argument("--no-optimize", dest="optimize", action="store_false")
parser.set_defaults(optimize=True)
parser.set_defaults(kitti_crop=False)
parser.set_defaults(absolute_depth=False)
args = parser.parse_args()
default_models = {
"midas_v21": "weights/midas_v21-f6b98070.pt",
"dpt_large": "weights/dpt_large-midas-2f21e586.pt",
"dpt_hybrid": "weights/dpt_hybrid-midas-501f0c75.pt",
"dpt_hybrid_kitti": "weights/dpt_hybrid_kitti-cb926ef4.pt",
"dpt_hybrid_nyu": "weights/dpt_hybrid_nyu-2ce69ec7.pt",
}
if args.model_weights is None:
args.model_weights = default_models[args.model_type]
# set torch options
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
# compute depth maps
run_dpt(
args.input_path,
args.output_path,
args.model_weights,
args.model_type,
args.optimize,
args.absolute_depth,
args.kitti_crop
)
|
import setuptools
__version__ = '0.0.1dev1'
setuptools.setup(
name='dpt',
version=__version__,
packages=setuptools.find_packages(),
# Only put dependencies that's not depends on cuda directly.
install_requires=['timm']
)
|
import matplotlib.pyplot as plt
from dpt.vit import get_mean_attention_map
def visualize_attention(input, model, prediction, model_type):
input = (input + 1.0)/2.0
attn1 = model.pretrained.attention["attn_1"]
attn2 = model.pretrained.attention["attn_2"]
attn3 = model.pretrained.attention["attn_3"]
attn4 = model.pretrained.attention["attn_4"]
plt.subplot(3,4,1), plt.imshow(input.squeeze().permute(1,2,0)), plt.title("Input", fontsize=8), plt.axis("off")
plt.subplot(3,4,2), plt.imshow(prediction), plt.set_cmap("inferno"), plt.title("Prediction", fontsize=8), plt.axis("off")
if model_type == "dpt_hybrid":
h = [3,6,9,12]
else:
h = [6,12,18,24]
# upper left
plt.subplot(345),
ax1 = plt.imshow(get_mean_attention_map(attn1, 1, input.shape))
plt.ylabel("Upper left corner", fontsize=8)
plt.title(f"Layer {h[0]}", fontsize=8)
gc = plt.gca()
gc.axes.xaxis.set_ticklabels([])
gc.axes.yaxis.set_ticklabels([])
gc.axes.xaxis.set_ticks([])
gc.axes.yaxis.set_ticks([])
plt.subplot(346),
plt.imshow(get_mean_attention_map(attn2, 1, input.shape))
plt.title(f"Layer {h[1]}", fontsize=8)
plt.axis("off"),
plt.subplot(347),
plt.imshow(get_mean_attention_map(attn3, 1, input.shape))
plt.title(f"Layer {h[2]}", fontsize=8)
plt.axis("off"),
plt.subplot(348),
plt.imshow(get_mean_attention_map(attn4, 1, input.shape))
plt.title(f"Layer {h[3]}", fontsize=8)
plt.axis("off"),
# lower right
plt.subplot(3,4,9), plt.imshow(get_mean_attention_map(attn1, -1, input.shape))
plt.ylabel("Lower right corner", fontsize=8)
gc = plt.gca()
gc.axes.xaxis.set_ticklabels([])
gc.axes.yaxis.set_ticklabels([])
gc.axes.xaxis.set_ticks([])
gc.axes.yaxis.set_ticks([])
plt.subplot(3,4,10), plt.imshow(get_mean_attention_map(attn2, -1, input.shape)), plt.axis("off")
plt.subplot(3,4,11), plt.imshow(get_mean_attention_map(attn3, -1, input.shape)), plt.axis("off")
plt.subplot(3,4,12), plt.imshow(get_mean_attention_map(attn4, -1, input.shape)), plt.axis("off")
plt.tight_layout()
plt.show()
|
"""Utils for monoDepth.
"""
import sys
import re
import numpy as np
import cv2
import torch
from PIL import Image
from .pallete import get_mask_pallete
def read_pfm(path):
"""Read pfm file.
Args:
path (str): path to file
Returns:
tuple: (data, scale)
"""
with open(path, "rb") as file:
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header.decode("ascii") == "PF":
color = True
elif header.decode("ascii") == "Pf":
color = False
else:
raise Exception("Not a PFM file: " + path)
dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii"))
if dim_match:
width, height = list(map(int, dim_match.groups()))
else:
raise Exception("Malformed PFM header.")
scale = float(file.readline().decode("ascii").rstrip())
if scale < 0:
# little-endian
endian = "<"
scale = -scale
else:
# big-endian
endian = ">"
data = np.fromfile(file, endian + "f")
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data, scale
def write_pfm(path, image, scale=1):
"""Write pfm file.
Args:
path (str): pathto file
image (array): data
scale (int, optional): Scale. Defaults to 1.
"""
with open(path, "wb") as file:
color = None
if image.dtype.name != "float32":
raise Exception("Image dtype must be float32.")
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif (
len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
): # greyscale
color = False
else:
raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
file.write("PF\n" if color else "Pf\n".encode())
file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == "<" or endian == "=" and sys.byteorder == "little":
scale = -scale
file.write("%f\n".encode() % scale)
image.tofile(file)
def read_image(path):
"""Read image and output RGB image (0-1).
Args:
path (str): path to file
Returns:
array: RGB image (0-1)
"""
img = cv2.imread(path)
if img.ndim == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
return img
def resize_image(img):
"""Resize image and make it fit for network.
Args:
img (array): image
Returns:
tensor: data ready for network
"""
height_orig = img.shape[0]
width_orig = img.shape[1]
if width_orig > height_orig:
scale = width_orig / 384
else:
scale = height_orig / 384
height = (np.ceil(height_orig / scale / 32) * 32).astype(int)
width = (np.ceil(width_orig / scale / 32) * 32).astype(int)
img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
img_resized = (
torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float()
)
img_resized = img_resized.unsqueeze(0)
return img_resized
def resize_depth(depth, width, height):
"""Resize depth map and bring to CPU (numpy).
Args:
depth (tensor): depth
width (int): image width
height (int): image height
Returns:
array: processed depth
"""
depth = torch.squeeze(depth[0, :, :, :]).to("cpu")
depth_resized = cv2.resize(
depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC
)
return depth_resized
def write_depth(path, depth, bits=1, absolute_depth=False):
"""Write depth map to pfm and png file.
Args:
path (str): filepath without extension
depth (array): depth
"""
write_pfm(path + ".pfm", depth.astype(np.float32))
if absolute_depth:
out = depth
else:
depth_min = depth.min()
depth_max = depth.max()
max_val = (2 ** (8 * bits)) - 1
if depth_max - depth_min > np.finfo("float").eps:
out = max_val * (depth - depth_min) / (depth_max - depth_min)
else:
out = np.zeros(depth.shape, dtype=depth.dtype)
if bits == 1:
cv2.imwrite(path + ".png", out.astype("uint8"), [cv2.IMWRITE_PNG_COMPRESSION, 0])
elif bits == 2:
cv2.imwrite(path + ".png", out.astype("uint16"), [cv2.IMWRITE_PNG_COMPRESSION, 0])
return
def write_segm_img(path, image, labels, palette="detail", alpha=0.5):
"""Write depth map to pfm and png file.
Args:
path (str): filepath without extension
image (array): input image
labels (array): labeling of the image
"""
mask = get_mask_pallete(labels, "ade20k")
img = Image.fromarray(np.uint8(255*image)).convert("RGBA")
seg = mask.convert("RGBA")
out = Image.blend(img, seg, alpha)
out.save(path + ".png")
return
|
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## ECE Department, Rutgers University
## Email: [email protected]
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from PIL import Image
def get_mask_pallete(npimg, dataset='detail'):
"""Get image color pallete for visualizing masks"""
# recovery boundary
if dataset == 'pascal_voc':
npimg[npimg==21] = 255
# put colormap
out_img = Image.fromarray(npimg.squeeze().astype('uint8'))
if dataset == 'ade20k':
out_img.putpalette(adepallete)
elif dataset == 'citys':
out_img.putpalette(citypallete)
elif dataset in ('detail', 'pascal_voc', 'pascal_aug'):
out_img.putpalette(vocpallete)
return out_img
def _get_voc_pallete(num_cls):
n = num_cls
pallete = [0]*(n*3)
for j in range(0,n):
lab = j
pallete[j*3+0] = 0
pallete[j*3+1] = 0
pallete[j*3+2] = 0
i = 0
while (lab > 0):
pallete[j*3+0] |= (((lab >> 0) & 1) << (7-i))
pallete[j*3+1] |= (((lab >> 1) & 1) << (7-i))
pallete[j*3+2] |= (((lab >> 2) & 1) << (7-i))
i = i + 1
lab >>= 3
return pallete
vocpallete = _get_voc_pallete(256)
adepallete = [0,0,0,120,120,120,180,120,120,6,230,230,80,50,50,4,200,3,120,120,80,140,140,140,204,5,255,230,230,230,4,250,7,224,5,255,235,255,7,150,5,61,120,120,70,8,255,51,255,6,82,143,255,140,204,255,4,255,51,7,204,70,3,0,102,200,61,230,250,255,6,51,11,102,255,255,7,71,255,9,224,9,7,230,220,220,220,255,9,92,112,9,255,8,255,214,7,255,224,255,184,6,10,255,71,255,41,10,7,255,255,224,255,8,102,8,255,255,61,6,255,194,7,255,122,8,0,255,20,255,8,41,255,5,153,6,51,255,235,12,255,160,150,20,0,163,255,140,140,140,250,10,15,20,255,0,31,255,0,255,31,0,255,224,0,153,255,0,0,0,255,255,71,0,0,235,255,0,173,255,31,0,255,11,200,200,255,82,0,0,255,245,0,61,255,0,255,112,0,255,133,255,0,0,255,163,0,255,102,0,194,255,0,0,143,255,51,255,0,0,82,255,0,255,41,0,255,173,10,0,255,173,255,0,0,255,153,255,92,0,255,0,255,255,0,245,255,0,102,255,173,0,255,0,20,255,184,184,0,31,255,0,255,61,0,71,255,255,0,204,0,255,194,0,255,82,0,10,255,0,112,255,51,0,255,0,194,255,0,122,255,0,255,163,255,153,0,0,255,10,255,112,0,143,255,0,82,0,255,163,255,0,255,235,0,8,184,170,133,0,255,0,255,92,184,0,255,255,0,31,0,184,255,0,214,255,255,0,112,92,255,0,0,224,255,112,224,255,70,184,160,163,0,255,153,0,255,71,255,0,255,0,163,255,204,0,255,0,143,0,255,235,133,255,0,255,0,235,245,0,255,255,0,122,255,245,0,10,190,212,214,255,0,0,204,255,20,0,255,255,255,0,0,153,255,0,41,255,0,255,204,41,0,255,41,255,0,173,0,255,0,245,255,71,0,255,122,0,255,0,255,184,0,92,255,184,255,0,0,133,255,255,214,0,25,194,194,102,255,0,92,0,255]
citypallete = [
128,64,128,244,35,232,70,70,70,102,102,156,190,153,153,153,153,153,250,170,30,220,220,0,107,142,35,152,251,152,70,130,180,220,20,60,255,0,0,0,0,142,0,0,70,0,60,100,0,80,100,0,0,230,119,11,32,128,192,0,0,64,128,128,64,128,0,192,128,128,192,128,64,64,0,192,64,0,64,192,0,192,192,0,64,64,128,192,64,128,64,192,128,192,192,128,0,0,64,128,0,64,0,128,64,128,128,64,0,0,192,128,0,192,0,128,192,128,128,192,64,0,64,192,0,64,64,128,64,192,128,64,64,0,192,192,0,192,64,128,192,192,128,192,0,64,64,128,64,64,0,192,64,128,192,64,0,64,192,128,64,192,0,192,192,128,192,192,64,64,64,192,64,64,64,192,64,192,192,64,64,64,192,192,64,192,64,192,192,192,192,192,32,0,0,160,0,0,32,128,0,160,128,0,32,0,128,160,0,128,32,128,128,160,128,128,96,0,0,224,0,0,96,128,0,224,128,0,96,0,128,224,0,128,96,128,128,224,128,128,32,64,0,160,64,0,32,192,0,160,192,0,32,64,128,160,64,128,32,192,128,160,192,128,96,64,0,224,64,0,96,192,0,224,192,0,96,64,128,224,64,128,96,192,128,224,192,128,32,0,64,160,0,64,32,128,64,160,128,64,32,0,192,160,0,192,32,128,192,160,128,192,96,0,64,224,0,64,96,128,64,224,128,64,96,0,192,224,0,192,96,128,192,224,128,192,32,64,64,160,64,64,32,192,64,160,192,64,32,64,192,160,64,192,32,192,192,160,192,192,96,64,64,224,64,64,96,192,64,224,192,64,96,64,192,224,64,192,96,192,192,224,192,192,0,32,0,128,32,0,0,160,0,128,160,0,0,32,128,128,32,128,0,160,128,128,160,128,64,32,0,192,32,0,64,160,0,192,160,0,64,32,128,192,32,128,64,160,128,192,160,128,0,96,0,128,96,0,0,224,0,128,224,0,0,96,128,128,96,128,0,224,128,128,224,128,64,96,0,192,96,0,64,224,0,192,224,0,64,96,128,192,96,128,64,224,128,192,224,128,0,32,64,128,32,64,0,160,64,128,160,64,0,32,192,128,32,192,0,160,192,128,160,192,64,32,64,192,32,64,64,160,64,192,160,64,64,32,192,192,32,192,64,160,192,192,160,192,0,96,64,128,96,64,0,224,64,128,224,64,0,96,192,128,96,192,0,224,192,128,224,192,64,96,64,192,96,64,64,224,64,192,224,64,64,96,192,192,96,192,64,224,192,192,224,192,32,32,0,160,32,0,32,160,0,160,160,0,32,32,128,160,32,128,32,160,128,160,160,128,96,32,0,224,32,0,96,160,0,224,160,0,96,32,128,224,32,128,96,160,128,224,160,128,32,96,0,160,96,0,32,224,0,160,224,0,32,96,128,160,96,128,32,224,128,160,224,128,96,96,0,224,96,0,96,224,0,224,224,0,96,96,128,224,96,128,96,224,128,224,224,128,32,32,64,160,32,64,32,160,64,160,160,64,32,32,192,160,32,192,32,160,192,160,160,192,96,32,64,224,32,64,96,160,64,224,160,64,96,32,192,224,32,192,96,160,192,224,160,192,32,96,64,160,96,64,32,224,64,160,224,64,32,96,192,160,96,192,32,224,192,160,224,192,96,96,64,224,96,64,96,224,64,224,224,64,96,96,192,224,96,192,96,224,192,0,0,0]
|
import numpy as np
import cv2
import math
def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
"""Rezise the sample to ensure the given size. Keeps aspect ratio.
Args:
sample (dict): sample
size (tuple): image size
Returns:
tuple: new size
"""
shape = list(sample["disparity"].shape)
if shape[0] >= size[0] and shape[1] >= size[1]:
return sample
scale = [0, 0]
scale[0] = size[0] / shape[0]
scale[1] = size[1] / shape[1]
scale = max(scale)
shape[0] = math.ceil(scale * shape[0])
shape[1] = math.ceil(scale * shape[1])
# resize
sample["image"] = cv2.resize(
sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
)
sample["disparity"] = cv2.resize(
sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
)
sample["mask"] = cv2.resize(
sample["mask"].astype(np.float32),
tuple(shape[::-1]),
interpolation=cv2.INTER_NEAREST,
)
sample["mask"] = sample["mask"].astype(bool)
return tuple(shape)
class Resize(object):
"""Resize sample to given size (width, height)."""
def __init__(
self,
width,
height,
resize_target=True,
keep_aspect_ratio=False,
ensure_multiple_of=1,
resize_method="lower_bound",
image_interpolation_method=cv2.INTER_AREA,
):
"""Init.
Args:
width (int): desired output width
height (int): desired output height
resize_target (bool, optional):
True: Resize the full sample (image, mask, target).
False: Resize image only.
Defaults to True.
keep_aspect_ratio (bool, optional):
True: Keep the aspect ratio of the input sample.
Output sample might not have the given width and height, and
resize behaviour depends on the parameter 'resize_method'.
Defaults to False.
ensure_multiple_of (int, optional):
Output width and height is constrained to be multiple of this parameter.
Defaults to 1.
resize_method (str, optional):
"lower_bound": Output will be at least as large as the given size.
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
Defaults to "lower_bound".
"""
self.__width = width
self.__height = height
self.__resize_target = resize_target
self.__keep_aspect_ratio = keep_aspect_ratio
self.__multiple_of = ensure_multiple_of
self.__resize_method = resize_method
self.__image_interpolation_method = image_interpolation_method
def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
if max_val is not None and y > max_val:
y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
if y < min_val:
y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
return y
def get_size(self, width, height):
# determine new height and width
scale_height = self.__height / height
scale_width = self.__width / width
if self.__keep_aspect_ratio:
if self.__resize_method == "lower_bound":
# scale such that output size is lower bound
if scale_width > scale_height:
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
elif self.__resize_method == "upper_bound":
# scale such that output size is upper bound
if scale_width < scale_height:
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
elif self.__resize_method == "minimal":
# scale as least as possbile
if abs(1 - scale_width) < abs(1 - scale_height):
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
else:
raise ValueError(
f"resize_method {self.__resize_method} not implemented"
)
if self.__resize_method == "lower_bound":
new_height = self.constrain_to_multiple_of(
scale_height * height, min_val=self.__height
)
new_width = self.constrain_to_multiple_of(
scale_width * width, min_val=self.__width
)
elif self.__resize_method == "upper_bound":
new_height = self.constrain_to_multiple_of(
scale_height * height, max_val=self.__height
)
new_width = self.constrain_to_multiple_of(
scale_width * width, max_val=self.__width
)
elif self.__resize_method == "minimal":
new_height = self.constrain_to_multiple_of(scale_height * height)
new_width = self.constrain_to_multiple_of(scale_width * width)
else:
raise ValueError(f"resize_method {self.__resize_method} not implemented")
return (new_width, new_height)
def __call__(self, sample):
width, height = self.get_size(
sample["image"].shape[1], sample["image"].shape[0]
)
# resize sample
sample["image"] = cv2.resize(
sample["image"],
(width, height),
interpolation=self.__image_interpolation_method,
)
if self.__resize_target:
if "disparity" in sample:
sample["disparity"] = cv2.resize(
sample["disparity"],
(width, height),
interpolation=cv2.INTER_NEAREST,
)
if "depth" in sample:
sample["depth"] = cv2.resize(
sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST
)
sample["mask"] = cv2.resize(
sample["mask"].astype(np.float32),
(width, height),
interpolation=cv2.INTER_NEAREST,
)
sample["mask"] = sample["mask"].astype(bool)
return sample
class NormalizeImage(object):
"""Normlize image by given mean and std."""
def __init__(self, mean, std):
self.__mean = mean
self.__std = std
def __call__(self, sample):
sample["image"] = (sample["image"] - self.__mean) / self.__std
return sample
class PrepareForNet(object):
"""Prepare sample for usage as network input."""
def __init__(self):
pass
def __call__(self, sample):
image = np.transpose(sample["image"], (2, 0, 1))
sample["image"] = np.ascontiguousarray(image).astype(np.float32)
if "mask" in sample:
sample["mask"] = sample["mask"].astype(np.float32)
sample["mask"] = np.ascontiguousarray(sample["mask"])
if "disparity" in sample:
disparity = sample["disparity"].astype(np.float32)
sample["disparity"] = np.ascontiguousarray(disparity)
if "depth" in sample:
depth = sample["depth"].astype(np.float32)
sample["depth"] = np.ascontiguousarray(depth)
return sample
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .base_model import BaseModel
from .blocks import (
FeatureFusionBlock,
FeatureFusionBlock_custom,
Interpolate,
_make_encoder,
forward_vit,
)
def _make_fusion_block(features, use_bn):
return FeatureFusionBlock_custom(
features,
nn.ReLU(False),
deconv=False,
bn=use_bn,
expand=False,
align_corners=True,
)
class DPT(BaseModel):
def __init__(
self,
head,
features=256,
backbone="vitb_rn50_384",
readout="project",
channels_last=False,
use_bn=False,
enable_attention_hooks=False,
):
super(DPT, self).__init__()
self.channels_last = channels_last
hooks = {
"vitb_rn50_384": [0, 1, 8, 11],
"vitb16_384": [2, 5, 8, 11],
"vitl16_384": [5, 11, 17, 23],
}
# Instantiate backbone and reassemble blocks
self.pretrained, self.scratch = _make_encoder(
backbone,
features,
False, # Set to true of you want to train from scratch, uses ImageNet weights
groups=1,
expand=False,
exportable=False,
hooks=hooks[backbone],
use_readout=readout,
enable_attention_hooks=enable_attention_hooks,
)
self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
self.scratch.output_conv = head
def forward(self, x):
if self.channels_last == True:
x.contiguous(memory_format=torch.channels_last)
layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x)
layer_1_rn = self.scratch.layer1_rn(layer_1)
layer_2_rn = self.scratch.layer2_rn(layer_2)
layer_3_rn = self.scratch.layer3_rn(layer_3)
layer_4_rn = self.scratch.layer4_rn(layer_4)
path_4 = self.scratch.refinenet4(layer_4_rn)
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
out = self.scratch.output_conv(path_1)
return out
class DPTDepthModel(DPT):
def __init__(
self, path=None, non_negative=True, scale=1.0, shift=0.0, invert=False, **kwargs
):
features = kwargs["features"] if "features" in kwargs else 256
self.scale = scale
self.shift = shift
self.invert = invert
head = nn.Sequential(
nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
nn.ReLU(True) if non_negative else nn.Identity(),
nn.Identity(),
)
super().__init__(head, **kwargs)
if path is not None:
self.load(path)
def forward(self, x):
inv_depth = super().forward(x).squeeze(dim=1)
if self.invert:
depth = self.scale * inv_depth + self.shift
depth[depth < 1e-8] = 1e-8
depth = 1.0 / depth
return depth
else:
return inv_depth
class DPTSegmentationModel(DPT):
def __init__(self, num_classes, path=None, **kwargs):
features = kwargs["features"] if "features" in kwargs else 256
kwargs["use_bn"] = True
head = nn.Sequential(
nn.Conv2d(features, features, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(features),
nn.ReLU(True),
nn.Dropout(0.1, False),
nn.Conv2d(features, num_classes, kernel_size=1),
Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
)
super().__init__(head, **kwargs)
self.auxlayer = nn.Sequential(
nn.Conv2d(features, features, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(features),
nn.ReLU(True),
nn.Dropout(0.1, False),
nn.Conv2d(features, num_classes, kernel_size=1),
)
if path is not None:
self.load(path)
|
"""MidashNet: Network for monocular depth estimation trained by mixing several datasets.
This file contains code that is adapted from
https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
"""
import torch
import torch.nn as nn
from .base_model import BaseModel
from .blocks import FeatureFusionBlock, Interpolate, _make_encoder
class MidasNet_large(BaseModel):
"""Network for monocular depth estimation."""
def __init__(self, path=None, features=256, non_negative=True):
"""Init.
Args:
path (str, optional): Path to saved model. Defaults to None.
features (int, optional): Number of features. Defaults to 256.
backbone (str, optional): Backbone network for encoder. Defaults to resnet50
"""
print("Loading weights: ", path)
super(MidasNet_large, self).__init__()
use_pretrained = False if path is None else True
self.pretrained, self.scratch = _make_encoder(
backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained
)
self.scratch.refinenet4 = FeatureFusionBlock(features)
self.scratch.refinenet3 = FeatureFusionBlock(features)
self.scratch.refinenet2 = FeatureFusionBlock(features)
self.scratch.refinenet1 = FeatureFusionBlock(features)
self.scratch.output_conv = nn.Sequential(
nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),
Interpolate(scale_factor=2, mode="bilinear"),
nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
nn.ReLU(True) if non_negative else nn.Identity(),
)
if path:
self.load(path)
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input data (image)
Returns:
tensor: depth
"""
layer_1 = self.pretrained.layer1(x)
layer_2 = self.pretrained.layer2(layer_1)
layer_3 = self.pretrained.layer3(layer_2)
layer_4 = self.pretrained.layer4(layer_3)
layer_1_rn = self.scratch.layer1_rn(layer_1)
layer_2_rn = self.scratch.layer2_rn(layer_2)
layer_3_rn = self.scratch.layer3_rn(layer_3)
layer_4_rn = self.scratch.layer4_rn(layer_4)
path_4 = self.scratch.refinenet4(layer_4_rn)
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
out = self.scratch.output_conv(path_1)
return torch.squeeze(out, dim=1)
|
import torch
class BaseModel(torch.nn.Module):
def load(self, path):
"""Load model from file.
Args:
path (str): file path
"""
parameters = torch.load(path, map_location=torch.device("cpu"))
if "optimizer" in parameters:
parameters = parameters["model"]
self.load_state_dict(parameters)
|
import torch
import torch.nn as nn
import timm
import types
import math
import torch.nn.functional as F
activations = {}
def get_activation(name):
def hook(model, input, output):
activations[name] = output
return hook
attention = {}
def get_attention(name):
def hook(module, input, output):
x = input[0]
B, N, C = x.shape
qkv = (
module.qkv(x)
.reshape(B, N, 3, module.num_heads, C // module.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = (
qkv[0],
qkv[1],
qkv[2],
) # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * module.scale
attn = attn.softmax(dim=-1) # [:,:,1,1:]
attention[name] = attn
return hook
def get_mean_attention_map(attn, token, shape):
attn = attn[:, :, token, 1:]
attn = attn.unflatten(2, torch.Size([shape[2] // 16, shape[3] // 16])).float()
attn = torch.nn.functional.interpolate(
attn, size=shape[2:], mode="bicubic", align_corners=False
).squeeze(0)
all_attn = torch.mean(attn, 0)
return all_attn
class Slice(nn.Module):
def __init__(self, start_index=1):
super(Slice, self).__init__()
self.start_index = start_index
def forward(self, x):
return x[:, self.start_index :]
class AddReadout(nn.Module):
def __init__(self, start_index=1):
super(AddReadout, self).__init__()
self.start_index = start_index
def forward(self, x):
if self.start_index == 2:
readout = (x[:, 0] + x[:, 1]) / 2
else:
readout = x[:, 0]
return x[:, self.start_index :] + readout.unsqueeze(1)
class ProjectReadout(nn.Module):
def __init__(self, in_features, start_index=1):
super(ProjectReadout, self).__init__()
self.start_index = start_index
self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU())
def forward(self, x):
readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index :])
features = torch.cat((x[:, self.start_index :], readout), -1)
return self.project(features)
class Transpose(nn.Module):
def __init__(self, dim0, dim1):
super(Transpose, self).__init__()
self.dim0 = dim0
self.dim1 = dim1
def forward(self, x):
x = x.transpose(self.dim0, self.dim1)
return x
def forward_vit(pretrained, x):
b, c, h, w = x.shape
glob = pretrained.model.forward_flex(x)
layer_1 = pretrained.activations["1"]
layer_2 = pretrained.activations["2"]
layer_3 = pretrained.activations["3"]
layer_4 = pretrained.activations["4"]
layer_1 = pretrained.act_postprocess1[0:2](layer_1)
layer_2 = pretrained.act_postprocess2[0:2](layer_2)
layer_3 = pretrained.act_postprocess3[0:2](layer_3)
layer_4 = pretrained.act_postprocess4[0:2](layer_4)
unflatten = nn.Sequential(
nn.Unflatten(
2,
torch.Size(
[
h // pretrained.model.patch_size[1],
w // pretrained.model.patch_size[0],
]
),
)
)
if layer_1.ndim == 3:
layer_1 = unflatten(layer_1)
if layer_2.ndim == 3:
layer_2 = unflatten(layer_2)
if layer_3.ndim == 3:
layer_3 = unflatten(layer_3)
if layer_4.ndim == 3:
layer_4 = unflatten(layer_4)
layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1)
layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2)
layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3)
layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4)
return layer_1, layer_2, layer_3, layer_4
def _resize_pos_embed(self, posemb, gs_h, gs_w):
posemb_tok, posemb_grid = (
posemb[:, : self.start_index],
posemb[0, self.start_index :],
)
gs_old = int(math.sqrt(len(posemb_grid)))
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear")
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def forward_flex(self, x):
b, c, h, w = x.shape
pos_embed = self._resize_pos_embed(
self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]
)
B = x.shape[0]
if hasattr(self.patch_embed, "backbone"):
x = self.patch_embed.backbone(x)
if isinstance(x, (list, tuple)):
x = x[-1] # last feature if backbone outputs list/tuple of features
x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
if getattr(self, "dist_token", None) is not None:
cls_tokens = self.cls_token.expand(
B, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
dist_token = self.dist_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, dist_token, x), dim=1)
else:
cls_tokens = self.cls_token.expand(
B, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
x = x + pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x
def get_readout_oper(vit_features, features, use_readout, start_index=1):
if use_readout == "ignore":
readout_oper = [Slice(start_index)] * len(features)
elif use_readout == "add":
readout_oper = [AddReadout(start_index)] * len(features)
elif use_readout == "project":
readout_oper = [
ProjectReadout(vit_features, start_index) for out_feat in features
]
else:
assert (
False
), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
return readout_oper
def _make_vit_b16_backbone(
model,
features=[96, 192, 384, 768],
size=[384, 384],
hooks=[2, 5, 8, 11],
vit_features=768,
use_readout="ignore",
start_index=1,
enable_attention_hooks=False,
):
pretrained = nn.Module()
pretrained.model = model
pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
pretrained.activations = activations
if enable_attention_hooks:
pretrained.model.blocks[hooks[0]].attn.register_forward_hook(
get_attention("attn_1")
)
pretrained.model.blocks[hooks[1]].attn.register_forward_hook(
get_attention("attn_2")
)
pretrained.model.blocks[hooks[2]].attn.register_forward_hook(
get_attention("attn_3")
)
pretrained.model.blocks[hooks[3]].attn.register_forward_hook(
get_attention("attn_4")
)
pretrained.attention = attention
readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
# 32, 48, 136, 384
pretrained.act_postprocess1 = nn.Sequential(
readout_oper[0],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[0],
kernel_size=1,
stride=1,
padding=0,
),
nn.ConvTranspose2d(
in_channels=features[0],
out_channels=features[0],
kernel_size=4,
stride=4,
padding=0,
bias=True,
dilation=1,
groups=1,
),
)
pretrained.act_postprocess2 = nn.Sequential(
readout_oper[1],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[1],
kernel_size=1,
stride=1,
padding=0,
),
nn.ConvTranspose2d(
in_channels=features[1],
out_channels=features[1],
kernel_size=2,
stride=2,
padding=0,
bias=True,
dilation=1,
groups=1,
),
)
pretrained.act_postprocess3 = nn.Sequential(
readout_oper[2],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[2],
kernel_size=1,
stride=1,
padding=0,
),
)
pretrained.act_postprocess4 = nn.Sequential(
readout_oper[3],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[3],
kernel_size=1,
stride=1,
padding=0,
),
nn.Conv2d(
in_channels=features[3],
out_channels=features[3],
kernel_size=3,
stride=2,
padding=1,
),
)
pretrained.model.start_index = start_index
pretrained.model.patch_size = [16, 16]
# We inject this function into the VisionTransformer instances so that
# we can use it with interpolated position embeddings without modifying the library source.
pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
pretrained.model._resize_pos_embed = types.MethodType(
_resize_pos_embed, pretrained.model
)
return pretrained
def _make_vit_b_rn50_backbone(
model,
features=[256, 512, 768, 768],
size=[384, 384],
hooks=[0, 1, 8, 11],
vit_features=768,
use_vit_only=False,
use_readout="ignore",
start_index=1,
enable_attention_hooks=False,
):
pretrained = nn.Module()
pretrained.model = model
if use_vit_only == True:
pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
else:
pretrained.model.patch_embed.backbone.stages[0].register_forward_hook(
get_activation("1")
)
pretrained.model.patch_embed.backbone.stages[1].register_forward_hook(
get_activation("2")
)
pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
if enable_attention_hooks:
pretrained.model.blocks[2].attn.register_forward_hook(get_attention("attn_1"))
pretrained.model.blocks[5].attn.register_forward_hook(get_attention("attn_2"))
pretrained.model.blocks[8].attn.register_forward_hook(get_attention("attn_3"))
pretrained.model.blocks[11].attn.register_forward_hook(get_attention("attn_4"))
pretrained.attention = attention
pretrained.activations = activations
readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
if use_vit_only == True:
pretrained.act_postprocess1 = nn.Sequential(
readout_oper[0],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[0],
kernel_size=1,
stride=1,
padding=0,
),
nn.ConvTranspose2d(
in_channels=features[0],
out_channels=features[0],
kernel_size=4,
stride=4,
padding=0,
bias=True,
dilation=1,
groups=1,
),
)
pretrained.act_postprocess2 = nn.Sequential(
readout_oper[1],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[1],
kernel_size=1,
stride=1,
padding=0,
),
nn.ConvTranspose2d(
in_channels=features[1],
out_channels=features[1],
kernel_size=2,
stride=2,
padding=0,
bias=True,
dilation=1,
groups=1,
),
)
else:
pretrained.act_postprocess1 = nn.Sequential(
nn.Identity(), nn.Identity(), nn.Identity()
)
pretrained.act_postprocess2 = nn.Sequential(
nn.Identity(), nn.Identity(), nn.Identity()
)
pretrained.act_postprocess3 = nn.Sequential(
readout_oper[2],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[2],
kernel_size=1,
stride=1,
padding=0,
),
)
pretrained.act_postprocess4 = nn.Sequential(
readout_oper[3],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[3],
kernel_size=1,
stride=1,
padding=0,
),
nn.Conv2d(
in_channels=features[3],
out_channels=features[3],
kernel_size=3,
stride=2,
padding=1,
),
)
pretrained.model.start_index = start_index
pretrained.model.patch_size = [16, 16]
# We inject this function into the VisionTransformer instances so that
# we can use it with interpolated position embeddings without modifying the library source.
pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
# We inject this function into the VisionTransformer instances so that
# we can use it with interpolated position embeddings without modifying the library source.
pretrained.model._resize_pos_embed = types.MethodType(
_resize_pos_embed, pretrained.model
)
return pretrained
def _make_pretrained_vitb_rn50_384(
pretrained,
use_readout="ignore",
hooks=None,
use_vit_only=False,
enable_attention_hooks=False,
):
model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained)
hooks = [0, 1, 8, 11] if hooks == None else hooks
return _make_vit_b_rn50_backbone(
model,
features=[256, 512, 768, 768],
size=[384, 384],
hooks=hooks,
use_vit_only=use_vit_only,
use_readout=use_readout,
enable_attention_hooks=enable_attention_hooks,
)
def _make_pretrained_vitl16_384(
pretrained, use_readout="ignore", hooks=None, enable_attention_hooks=False
):
model = timm.create_model("vit_large_patch16_384", pretrained=pretrained)
hooks = [5, 11, 17, 23] if hooks == None else hooks
return _make_vit_b16_backbone(
model,
features=[256, 512, 1024, 1024],
hooks=hooks,
vit_features=1024,
use_readout=use_readout,
enable_attention_hooks=enable_attention_hooks,
)
def _make_pretrained_vitb16_384(
pretrained, use_readout="ignore", hooks=None, enable_attention_hooks=False
):
model = timm.create_model("vit_base_patch16_384", pretrained=pretrained)
hooks = [2, 5, 8, 11] if hooks == None else hooks
return _make_vit_b16_backbone(
model,
features=[96, 192, 384, 768],
hooks=hooks,
use_readout=use_readout,
enable_attention_hooks=enable_attention_hooks,
)
def _make_pretrained_deitb16_384(
pretrained, use_readout="ignore", hooks=None, enable_attention_hooks=False
):
model = timm.create_model("vit_deit_base_patch16_384", pretrained=pretrained)
hooks = [2, 5, 8, 11] if hooks == None else hooks
return _make_vit_b16_backbone(
model,
features=[96, 192, 384, 768],
hooks=hooks,
use_readout=use_readout,
enable_attention_hooks=enable_attention_hooks,
)
def _make_pretrained_deitb16_distil_384(
pretrained, use_readout="ignore", hooks=None, enable_attention_hooks=False
):
model = timm.create_model(
"vit_deit_base_distilled_patch16_384", pretrained=pretrained
)
hooks = [2, 5, 8, 11] if hooks == None else hooks
return _make_vit_b16_backbone(
model,
features=[96, 192, 384, 768],
hooks=hooks,
use_readout=use_readout,
start_index=2,
enable_attention_hooks=enable_attention_hooks,
)
|
import torch
import torch.nn as nn
from .vit import (
_make_pretrained_vitb_rn50_384,
_make_pretrained_vitl16_384,
_make_pretrained_vitb16_384,
forward_vit,
)
def _make_encoder(
backbone,
features,
use_pretrained,
groups=1,
expand=False,
exportable=True,
hooks=None,
use_vit_only=False,
use_readout="ignore",
enable_attention_hooks=False,
):
if backbone == "vitl16_384":
pretrained = _make_pretrained_vitl16_384(
use_pretrained,
hooks=hooks,
use_readout=use_readout,
enable_attention_hooks=enable_attention_hooks,
)
scratch = _make_scratch(
[256, 512, 1024, 1024], features, groups=groups, expand=expand
) # ViT-L/16 - 85.0% Top1 (backbone)
elif backbone == "vitb_rn50_384":
pretrained = _make_pretrained_vitb_rn50_384(
use_pretrained,
hooks=hooks,
use_vit_only=use_vit_only,
use_readout=use_readout,
enable_attention_hooks=enable_attention_hooks,
)
scratch = _make_scratch(
[256, 512, 768, 768], features, groups=groups, expand=expand
) # ViT-H/16 - 85.0% Top1 (backbone)
elif backbone == "vitb16_384":
pretrained = _make_pretrained_vitb16_384(
use_pretrained,
hooks=hooks,
use_readout=use_readout,
enable_attention_hooks=enable_attention_hooks,
)
scratch = _make_scratch(
[96, 192, 384, 768], features, groups=groups, expand=expand
) # ViT-B/16 - 84.6% Top1 (backbone)
elif backbone == "resnext101_wsl":
pretrained = _make_pretrained_resnext101_wsl(use_pretrained)
scratch = _make_scratch(
[256, 512, 1024, 2048], features, groups=groups, expand=expand
) # efficientnet_lite3
else:
print(f"Backbone '{backbone}' not implemented")
assert False
return pretrained, scratch
def _make_scratch(in_shape, out_shape, groups=1, expand=False):
scratch = nn.Module()
out_shape1 = out_shape
out_shape2 = out_shape
out_shape3 = out_shape
out_shape4 = out_shape
if expand == True:
out_shape1 = out_shape
out_shape2 = out_shape * 2
out_shape3 = out_shape * 4
out_shape4 = out_shape * 8
scratch.layer1_rn = nn.Conv2d(
in_shape[0],
out_shape1,
kernel_size=3,
stride=1,
padding=1,
bias=False,
groups=groups,
)
scratch.layer2_rn = nn.Conv2d(
in_shape[1],
out_shape2,
kernel_size=3,
stride=1,
padding=1,
bias=False,
groups=groups,
)
scratch.layer3_rn = nn.Conv2d(
in_shape[2],
out_shape3,
kernel_size=3,
stride=1,
padding=1,
bias=False,
groups=groups,
)
scratch.layer4_rn = nn.Conv2d(
in_shape[3],
out_shape4,
kernel_size=3,
stride=1,
padding=1,
bias=False,
groups=groups,
)
return scratch
def _make_resnet_backbone(resnet):
pretrained = nn.Module()
pretrained.layer1 = nn.Sequential(
resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1
)
pretrained.layer2 = resnet.layer2
pretrained.layer3 = resnet.layer3
pretrained.layer4 = resnet.layer4
return pretrained
def _make_pretrained_resnext101_wsl(use_pretrained):
resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl")
return _make_resnet_backbone(resnet)
class Interpolate(nn.Module):
"""Interpolation module."""
def __init__(self, scale_factor, mode, align_corners=False):
"""Init.
Args:
scale_factor (float): scaling
mode (str): interpolation mode
"""
super(Interpolate, self).__init__()
self.interp = nn.functional.interpolate
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: interpolated data
"""
x = self.interp(
x,
scale_factor=self.scale_factor,
mode=self.mode,
align_corners=self.align_corners,
)
return x
class ResidualConvUnit(nn.Module):
"""Residual convolution module."""
def __init__(self, features):
"""Init.
Args:
features (int): number of features
"""
super().__init__()
self.conv1 = nn.Conv2d(
features, features, kernel_size=3, stride=1, padding=1, bias=True
)
self.conv2 = nn.Conv2d(
features, features, kernel_size=3, stride=1, padding=1, bias=True
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: output
"""
out = self.relu(x)
out = self.conv1(out)
out = self.relu(out)
out = self.conv2(out)
return out + x
class FeatureFusionBlock(nn.Module):
"""Feature fusion block."""
def __init__(self, features):
"""Init.
Args:
features (int): number of features
"""
super(FeatureFusionBlock, self).__init__()
self.resConfUnit1 = ResidualConvUnit(features)
self.resConfUnit2 = ResidualConvUnit(features)
def forward(self, *xs):
"""Forward pass.
Returns:
tensor: output
"""
output = xs[0]
if len(xs) == 2:
output += self.resConfUnit1(xs[1])
output = self.resConfUnit2(output)
output = nn.functional.interpolate(
output, scale_factor=2, mode="bilinear", align_corners=True
)
return output
class ResidualConvUnit_custom(nn.Module):
"""Residual convolution module."""
def __init__(self, features, activation, bn):
"""Init.
Args:
features (int): number of features
"""
super().__init__()
self.bn = bn
self.groups = 1
self.conv1 = nn.Conv2d(
features,
features,
kernel_size=3,
stride=1,
padding=1,
bias=not self.bn,
groups=self.groups,
)
self.conv2 = nn.Conv2d(
features,
features,
kernel_size=3,
stride=1,
padding=1,
bias=not self.bn,
groups=self.groups,
)
if self.bn == True:
self.bn1 = nn.BatchNorm2d(features)
self.bn2 = nn.BatchNorm2d(features)
self.activation = activation
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: output
"""
out = self.activation(x)
out = self.conv1(out)
if self.bn == True:
out = self.bn1(out)
out = self.activation(out)
out = self.conv2(out)
if self.bn == True:
out = self.bn2(out)
if self.groups > 1:
out = self.conv_merge(out)
return self.skip_add.add(out, x)
class FeatureFusionBlock_custom(nn.Module):
"""Feature fusion block."""
def __init__(
self,
features,
activation,
deconv=False,
bn=False,
expand=False,
align_corners=True,
):
"""Init.
Args:
features (int): number of features
"""
super(FeatureFusionBlock_custom, self).__init__()
self.deconv = deconv
self.align_corners = align_corners
self.groups = 1
self.expand = expand
out_features = features
if self.expand == True:
out_features = features // 2
self.out_conv = nn.Conv2d(
features,
out_features,
kernel_size=1,
stride=1,
padding=0,
bias=True,
groups=1,
)
self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, *xs):
"""Forward pass.
Returns:
tensor: output
"""
output = xs[0]
if len(xs) == 2:
res = self.resConfUnit1(xs[1])
output = self.skip_add.add(output, res)
# output += res
output = self.resConfUnit2(output)
output = nn.functional.interpolate(
output, scale_factor=2, mode="bilinear", align_corners=self.align_corners
)
output = self.out_conv(output)
return output
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FlowHead(nn.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class ConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128):
super(ConvGRU, self).__init__()
self.convz = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
self.convr = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
self.convq = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
def forward(self, h, x):
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz(hx))
r = torch.sigmoid(self.convr(hx))
q = torch.tanh(self.convq(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
return h
class SepConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128):
super(SepConvGRU, self).__init__()
self.convz1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convr1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convq1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convz2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convr2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convq2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
def forward(self, h, x):
# horizontal
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz1(hx))
r = torch.sigmoid(self.convr1(hx))
q = torch.tanh(self.convq1(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
# vertical
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz2(hx))
r = torch.sigmoid(self.convr2(hx))
q = torch.tanh(self.convq2(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
return h
class SmallMotionEncoder(nn.Module):
def __init__(self, args):
super(SmallMotionEncoder, self).__init__()
cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2
self.convc1 = nn.Conv2d(cor_planes, 96, 1, padding=0)
self.convf1 = nn.Conv2d(2, 64, 7, padding=3)
self.convf2 = nn.Conv2d(64, 32, 3, padding=1)
self.conv = nn.Conv2d(128, 80, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
class BasicMotionEncoder(nn.Module):
def __init__(self, args):
super(BasicMotionEncoder, self).__init__()
cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2
self.convc1 = nn.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = nn.Conv2d(256, 192, 3, padding=1)
self.convf1 = nn.Conv2d(2, 128, 7, padding=3)
self.convf2 = nn.Conv2d(128, 64, 3, padding=1)
self.conv = nn.Conv2d(64+192, 128-2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
class SmallUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=96):
super(SmallUpdateBlock, self).__init__()
self.encoder = SmallMotionEncoder(args)
self.gru = ConvGRU(hidden_dim=hidden_dim, input_dim=82+64)
self.flow_head = FlowHead(hidden_dim, hidden_dim=128)
def forward(self, net, inp, corr, flow):
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
return net, None, delta_flow
class BasicUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=128, input_dim=128):
super(BasicUpdateBlock, self).__init__()
self.args = args
self.encoder = BasicMotionEncoder(args)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128+hidden_dim)
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 64*9, 1, padding=0))
def forward(self, net, inp, corr, flow, upsample=True):
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
# scale mask to balence gradients
mask = .25 * self.mask(net)
return net, mask, delta_flow
|
import torch
import torch.nn.functional as F
from .utils.utils import bilinear_sampler, coords_grid
try:
import alt_cuda_corr
except:
# alt_cuda_corr is not compiled
pass
class CorrBlock:
def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
self.num_levels = num_levels
self.radius = radius
self.corr_pyramid = []
# all pairs correlation
corr = CorrBlock.corr(fmap1, fmap2)
batch, h1, w1, dim, h2, w2 = corr.shape
corr = corr.reshape(batch*h1*w1, dim, h2, w2)
self.corr_pyramid.append(corr)
for i in range(self.num_levels-1):
corr = F.avg_pool2d(corr, 2, stride=2)
self.corr_pyramid.append(corr)
def __call__(self, coords):
r = self.radius
coords = coords.permute(0, 2, 3, 1)
batch, h1, w1, _ = coords.shape
out_pyramid = []
for i in range(self.num_levels):
corr = self.corr_pyramid[i]
dx = torch.linspace(-r, r, 2*r+1)
dy = torch.linspace(-r, r, 2*r+1)
delta = torch.stack(torch.meshgrid(dy, dx), axis=-1).to(coords.device)
centroid_lvl = coords.reshape(batch*h1*w1, 1, 1, 2) / 2**i
delta_lvl = delta.view(1, 2*r+1, 2*r+1, 2)
coords_lvl = centroid_lvl + delta_lvl
corr = bilinear_sampler(corr, coords_lvl)
corr = corr.view(batch, h1, w1, -1)
out_pyramid.append(corr)
out = torch.cat(out_pyramid, dim=-1)
return out.permute(0, 3, 1, 2).contiguous().float()
@staticmethod
def corr(fmap1, fmap2):
batch, dim, ht, wd = fmap1.shape
fmap1 = fmap1.view(batch, dim, ht*wd)
fmap2 = fmap2.view(batch, dim, ht*wd)
corr = torch.matmul(fmap1.transpose(1,2), fmap2)
corr = corr.view(batch, ht, wd, 1, ht, wd)
return corr / torch.sqrt(torch.tensor(dim).float())
class AlternateCorrBlock:
def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
self.num_levels = num_levels
self.radius = radius
self.pyramid = [(fmap1, fmap2)]
for i in range(self.num_levels):
fmap1 = F.avg_pool2d(fmap1, 2, stride=2)
fmap2 = F.avg_pool2d(fmap2, 2, stride=2)
self.pyramid.append((fmap1, fmap2))
def __call__(self, coords):
coords = coords.permute(0, 2, 3, 1)
B, H, W, _ = coords.shape
dim = self.pyramid[0][0].shape[1]
corr_list = []
for i in range(self.num_levels):
r = self.radius
fmap1_i = self.pyramid[0][0].permute(0, 2, 3, 1).contiguous()
fmap2_i = self.pyramid[i][1].permute(0, 2, 3, 1).contiguous()
coords_i = (coords / 2**i).reshape(B, 1, H, W, 2).contiguous()
corr, = alt_cuda_corr.forward(fmap1_i, fmap2_i, coords_i, r)
corr_list.append(corr.squeeze(1))
corr = torch.stack(corr_list, dim=1)
corr = corr.reshape(B, -1, H, W)
return corr / torch.sqrt(torch.tensor(dim).float())
|
# Data loading based on https://github.com/NVIDIA/flownet2-pytorch
import numpy as np
import torch
import torch.utils.data as data
import torch.nn.functional as F
import os
import math
import random
from glob import glob
import os.path as osp
from utils import frame_utils
from utils.augmentor import FlowAugmentor, SparseFlowAugmentor
class FlowDataset(data.Dataset):
def __init__(self, aug_params=None, sparse=False):
self.augmentor = None
self.sparse = sparse
if aug_params is not None:
if sparse:
self.augmentor = SparseFlowAugmentor(**aug_params)
else:
self.augmentor = FlowAugmentor(**aug_params)
self.is_test = False
self.init_seed = False
self.flow_list = []
self.image_list = []
self.extra_info = []
def __getitem__(self, index):
if self.is_test:
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
img1 = np.array(img1).astype(np.uint8)[..., :3]
img2 = np.array(img2).astype(np.uint8)[..., :3]
img1 = torch.from_numpy(img1).permute(2, 0, 1).float()
img2 = torch.from_numpy(img2).permute(2, 0, 1).float()
return img1, img2, self.extra_info[index]
if not self.init_seed:
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
torch.manual_seed(worker_info.id)
np.random.seed(worker_info.id)
random.seed(worker_info.id)
self.init_seed = True
index = index % len(self.image_list)
valid = None
if self.sparse:
flow, valid = frame_utils.readFlowKITTI(self.flow_list[index])
else:
flow = frame_utils.read_gen(self.flow_list[index])
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
flow = np.array(flow).astype(np.float32)
img1 = np.array(img1).astype(np.uint8)
img2 = np.array(img2).astype(np.uint8)
# grayscale images
if len(img1.shape) == 2:
img1 = np.tile(img1[...,None], (1, 1, 3))
img2 = np.tile(img2[...,None], (1, 1, 3))
else:
img1 = img1[..., :3]
img2 = img2[..., :3]
if self.augmentor is not None:
if self.sparse:
img1, img2, flow, valid = self.augmentor(img1, img2, flow, valid)
else:
img1, img2, flow = self.augmentor(img1, img2, flow)
img1 = torch.from_numpy(img1).permute(2, 0, 1).float()
img2 = torch.from_numpy(img2).permute(2, 0, 1).float()
flow = torch.from_numpy(flow).permute(2, 0, 1).float()
if valid is not None:
valid = torch.from_numpy(valid)
else:
valid = (flow[0].abs() < 1000) & (flow[1].abs() < 1000)
return img1, img2, flow, valid.float()
def __rmul__(self, v):
self.flow_list = v * self.flow_list
self.image_list = v * self.image_list
return self
def __len__(self):
return len(self.image_list)
class MpiSintel(FlowDataset):
def __init__(self, aug_params=None, split='training', root='datasets/Sintel', dstype='clean'):
super(MpiSintel, self).__init__(aug_params)
flow_root = osp.join(root, split, 'flow')
image_root = osp.join(root, split, dstype)
if split == 'test':
self.is_test = True
for scene in os.listdir(image_root):
image_list = sorted(glob(osp.join(image_root, scene, '*.png')))
for i in range(len(image_list)-1):
self.image_list += [ [image_list[i], image_list[i+1]] ]
self.extra_info += [ (scene, i) ] # scene and frame_id
if split != 'test':
self.flow_list += sorted(glob(osp.join(flow_root, scene, '*.flo')))
class FlyingChairs(FlowDataset):
def __init__(self, aug_params=None, split='train', root='datasets/FlyingChairs_release/data'):
super(FlyingChairs, self).__init__(aug_params)
images = sorted(glob(osp.join(root, '*.ppm')))
flows = sorted(glob(osp.join(root, '*.flo')))
assert (len(images)//2 == len(flows))
split_list = np.loadtxt('chairs_split.txt', dtype=np.int32)
for i in range(len(flows)):
xid = split_list[i]
if (split=='training' and xid==1) or (split=='validation' and xid==2):
self.flow_list += [ flows[i] ]
self.image_list += [ [images[2*i], images[2*i+1]] ]
class FlyingThings3D(FlowDataset):
def __init__(self, aug_params=None, root='datasets/FlyingThings3D', dstype='frames_cleanpass'):
super(FlyingThings3D, self).__init__(aug_params)
for cam in ['left']:
for direction in ['into_future', 'into_past']:
image_dirs = sorted(glob(osp.join(root, dstype, 'TRAIN/*/*')))
image_dirs = sorted([osp.join(f, cam) for f in image_dirs])
flow_dirs = sorted(glob(osp.join(root, 'optical_flow/TRAIN/*/*')))
flow_dirs = sorted([osp.join(f, direction, cam) for f in flow_dirs])
for idir, fdir in zip(image_dirs, flow_dirs):
images = sorted(glob(osp.join(idir, '*.png')) )
flows = sorted(glob(osp.join(fdir, '*.pfm')) )
for i in range(len(flows)-1):
if direction == 'into_future':
self.image_list += [ [images[i], images[i+1]] ]
self.flow_list += [ flows[i] ]
elif direction == 'into_past':
self.image_list += [ [images[i+1], images[i]] ]
self.flow_list += [ flows[i+1] ]
class KITTI(FlowDataset):
def __init__(self, aug_params=None, split='training', root='datasets/KITTI'):
super(KITTI, self).__init__(aug_params, sparse=True)
if split == 'testing':
self.is_test = True
root = osp.join(root, split)
images1 = sorted(glob(osp.join(root, 'image_2/*_10.png')))
images2 = sorted(glob(osp.join(root, 'image_2/*_11.png')))
for img1, img2 in zip(images1, images2):
frame_id = img1.split('/')[-1]
self.extra_info += [ [frame_id] ]
self.image_list += [ [img1, img2] ]
if split == 'training':
self.flow_list = sorted(glob(osp.join(root, 'flow_occ/*_10.png')))
class HD1K(FlowDataset):
def __init__(self, aug_params=None, root='datasets/HD1k'):
super(HD1K, self).__init__(aug_params, sparse=True)
seq_ix = 0
while 1:
flows = sorted(glob(os.path.join(root, 'hd1k_flow_gt', 'flow_occ/%06d_*.png' % seq_ix)))
images = sorted(glob(os.path.join(root, 'hd1k_input', 'image_2/%06d_*.png' % seq_ix)))
if len(flows) == 0:
break
for i in range(len(flows)-1):
self.flow_list += [flows[i]]
self.image_list += [ [images[i], images[i+1]] ]
seq_ix += 1
def fetch_dataloader(args, TRAIN_DS='C+T+K+S+H'):
""" Create the data loader for the corresponding trainign set """
if args.stage == 'chairs':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.1, 'max_scale': 1.0, 'do_flip': True}
train_dataset = FlyingChairs(aug_params, split='training')
elif args.stage == 'things':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.4, 'max_scale': 0.8, 'do_flip': True}
clean_dataset = FlyingThings3D(aug_params, dstype='frames_cleanpass')
final_dataset = FlyingThings3D(aug_params, dstype='frames_finalpass')
train_dataset = clean_dataset + final_dataset
elif args.stage == 'sintel':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.6, 'do_flip': True}
things = FlyingThings3D(aug_params, dstype='frames_cleanpass')
sintel_clean = MpiSintel(aug_params, split='training', dstype='clean')
sintel_final = MpiSintel(aug_params, split='training', dstype='final')
if TRAIN_DS == 'C+T+K+S+H':
kitti = KITTI({'crop_size': args.image_size, 'min_scale': -0.3, 'max_scale': 0.5, 'do_flip': True})
hd1k = HD1K({'crop_size': args.image_size, 'min_scale': -0.5, 'max_scale': 0.2, 'do_flip': True})
train_dataset = 100*sintel_clean + 100*sintel_final + 200*kitti + 5*hd1k + things
elif TRAIN_DS == 'C+T+K/S':
train_dataset = 100*sintel_clean + 100*sintel_final + things
elif args.stage == 'kitti':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.4, 'do_flip': False}
train_dataset = KITTI(aug_params, split='training')
train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size,
pin_memory=False, shuffle=True, num_workers=4, drop_last=True)
print('Training with %d image pairs' % len(train_dataset))
return train_loader
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResidualBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes)
self.norm2 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm3 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes)
self.norm2 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm3 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
if not stride == 1:
self.norm3 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(BottleneckBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes//4, kernel_size=1, padding=0)
self.conv2 = nn.Conv2d(planes//4, planes//4, kernel_size=3, padding=1, stride=stride)
self.conv3 = nn.Conv2d(planes//4, planes, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes//4)
self.norm2 = nn.BatchNorm2d(planes//4)
self.norm3 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm4 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes//4)
self.norm2 = nn.InstanceNorm2d(planes//4)
self.norm3 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm4 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
self.norm3 = nn.Sequential()
if not stride == 1:
self.norm4 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
y = self.relu(self.norm3(self.conv3(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
class BasicEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
super(BasicEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(64)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(64)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 64
self.layer1 = self._make_layer(64, stride=1)
self.layer2 = self._make_layer(96, stride=2)
self.layer3 = self._make_layer(128, stride=2)
# output convolution
self.conv2 = nn.Conv2d(128, output_dim, kernel_size=1)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
class SmallEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
super(SmallEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=32)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(32)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(32)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 32, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 32
self.layer1 = self._make_layer(32, stride=1)
self.layer2 = self._make_layer(64, stride=2)
self.layer3 = self._make_layer(96, stride=2)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
self.conv2 = nn.Conv2d(96, output_dim, kernel_size=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = BottleneckBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = BottleneckBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .update import BasicUpdateBlock, SmallUpdateBlock
from .extractor import BasicEncoder, SmallEncoder
from .corr import CorrBlock, AlternateCorrBlock
from .utils.utils import bilinear_sampler, coords_grid, upflow8, InputPadder
try:
autocast = torch.cuda.amp.autocast
except:
# dummy autocast for PyTorch < 1.6
class autocast:
def __init__(self, enabled):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
class RAFT(nn.Module):
def __init__(self, args):
super(RAFT, self).__init__()
self.args = args
if args.small:
self.hidden_dim = hdim = 96
self.context_dim = cdim = 64
args.corr_levels = 4
args.corr_radius = 3
else:
self.hidden_dim = hdim = 128
self.context_dim = cdim = 128
args.corr_levels = 4
args.corr_radius = 4
if 'dropout' not in self.args:
self.args.dropout = 0
if 'alternate_corr' not in self.args:
self.args.alternate_corr = False
# feature network, context network, and update block
if args.small:
self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout)
self.cnet = SmallEncoder(output_dim=hdim+cdim, norm_fn='none', dropout=args.dropout)
self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim)
else:
self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout)
self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=args.dropout)
self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim)
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def initialize_flow(self, img):
""" Flow is represented as difference between two coordinate grids flow = coords1 - coords0"""
N, C, H, W = img.shape
coords0 = coords_grid(N, H//8, W//8, img.device)
coords1 = coords_grid(N, H//8, W//8, img.device)
# optical flow computed as difference: flow = coords1 - coords0
return coords0, coords1
def upsample_flow(self, flow, mask):
""" Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination """
N, _, H, W = flow.shape
mask = mask.view(N, 1, 9, 8, 8, H, W)
mask = torch.softmax(mask, dim=2)
up_flow = F.unfold(8 * flow, [3,3], padding=1)
up_flow = up_flow.view(N, 2, 9, 1, 1, H, W)
up_flow = torch.sum(mask * up_flow, dim=2)
up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)
return up_flow.reshape(N, 2, 8*H, 8*W)
def forward(self, image1, image2, iters=12, flow_init=None, upsample=True, test_mode=False, padder=None):
""" Estimate optical flow between pair of frames """
image1 = 2 * (image1 / 255.0) - 1.0
image2 = 2 * (image2 / 255.0) - 1.0
image1 = image1.contiguous()
image2 = image2.contiguous()
hdim = self.hidden_dim
cdim = self.context_dim
# run the feature network
with autocast(enabled=self.args.mixed_precision):
fmap1, fmap2 = self.fnet([image1, image2])
fmap1 = fmap1.float()
fmap2 = fmap2.float()
if self.args.alternate_corr:
corr_fn = AlternateCorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
else:
corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
# run the context network
with autocast(enabled=self.args.mixed_precision):
cnet = self.cnet(image1)
net, inp = torch.split(cnet, [hdim, cdim], dim=1)
net = torch.tanh(net)
inp = torch.relu(inp)
coords0, coords1 = self.initialize_flow(image1)
if flow_init is not None:
coords1 = coords1 + flow_init
flow_predictions = []
for itr in range(iters):
coords1 = coords1.detach()
corr = corr_fn(coords1) # index correlation volume
flow = coords1 - coords0
with autocast(enabled=self.args.mixed_precision):
net, up_mask, delta_flow = self.update_block(net, inp, corr, flow)
# F(t+1) = F(t) + \Delta(t)
coords1 = coords1 + delta_flow
if up_mask is None:
flow_up = upflow8(coords1 - coords0)
else:
flow_up = self.upsample_flow(coords1 - coords0, up_mask)
if padder is not None:
flow_up = padder.unpad(flow_up)
flow_predictions.append(flow_up)
if test_mode:
return coords1 - coords0, flow_up
return flow_predictions
|
import numpy as np
import random
import math
from PIL import Image
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
import torch
from torchvision.transforms import ColorJitter
import torch.nn.functional as F
class FlowAugmentor:
def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=True):
# spatial augmentation params
self.crop_size = crop_size
self.min_scale = min_scale
self.max_scale = max_scale
self.spatial_aug_prob = 0.8
self.stretch_prob = 0.8
self.max_stretch = 0.2
# flip augmentation params
self.do_flip = do_flip
self.h_flip_prob = 0.5
self.v_flip_prob = 0.1
# photometric augmentation params
self.photo_aug = ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.5/3.14)
self.asymmetric_color_aug_prob = 0.2
self.eraser_aug_prob = 0.5
def color_transform(self, img1, img2):
""" Photometric augmentation """
# asymmetric
if np.random.rand() < self.asymmetric_color_aug_prob:
img1 = np.array(self.photo_aug(Image.fromarray(img1)), dtype=np.uint8)
img2 = np.array(self.photo_aug(Image.fromarray(img2)), dtype=np.uint8)
# symmetric
else:
image_stack = np.concatenate([img1, img2], axis=0)
image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8)
img1, img2 = np.split(image_stack, 2, axis=0)
return img1, img2
def eraser_transform(self, img1, img2, bounds=[50, 100]):
""" Occlusion augmentation """
ht, wd = img1.shape[:2]
if np.random.rand() < self.eraser_aug_prob:
mean_color = np.mean(img2.reshape(-1, 3), axis=0)
for _ in range(np.random.randint(1, 3)):
x0 = np.random.randint(0, wd)
y0 = np.random.randint(0, ht)
dx = np.random.randint(bounds[0], bounds[1])
dy = np.random.randint(bounds[0], bounds[1])
img2[y0:y0+dy, x0:x0+dx, :] = mean_color
return img1, img2
def spatial_transform(self, img1, img2, flow):
# randomly sample scale
ht, wd = img1.shape[:2]
min_scale = np.maximum(
(self.crop_size[0] + 8) / float(ht),
(self.crop_size[1] + 8) / float(wd))
scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
scale_x = scale
scale_y = scale
if np.random.rand() < self.stretch_prob:
scale_x *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
scale_y *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
scale_x = np.clip(scale_x, min_scale, None)
scale_y = np.clip(scale_y, min_scale, None)
if np.random.rand() < self.spatial_aug_prob:
# rescale the images
img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow = cv2.resize(flow, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow = flow * [scale_x, scale_y]
if self.do_flip:
if np.random.rand() < self.h_flip_prob: # h-flip
img1 = img1[:, ::-1]
img2 = img2[:, ::-1]
flow = flow[:, ::-1] * [-1.0, 1.0]
if np.random.rand() < self.v_flip_prob: # v-flip
img1 = img1[::-1, :]
img2 = img2[::-1, :]
flow = flow[::-1, :] * [1.0, -1.0]
y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0])
x0 = np.random.randint(0, img1.shape[1] - self.crop_size[1])
img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
return img1, img2, flow
def __call__(self, img1, img2, flow):
img1, img2 = self.color_transform(img1, img2)
img1, img2 = self.eraser_transform(img1, img2)
img1, img2, flow = self.spatial_transform(img1, img2, flow)
img1 = np.ascontiguousarray(img1)
img2 = np.ascontiguousarray(img2)
flow = np.ascontiguousarray(flow)
return img1, img2, flow
class SparseFlowAugmentor:
def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=False):
# spatial augmentation params
self.crop_size = crop_size
self.min_scale = min_scale
self.max_scale = max_scale
self.spatial_aug_prob = 0.8
self.stretch_prob = 0.8
self.max_stretch = 0.2
# flip augmentation params
self.do_flip = do_flip
self.h_flip_prob = 0.5
self.v_flip_prob = 0.1
# photometric augmentation params
self.photo_aug = ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.3/3.14)
self.asymmetric_color_aug_prob = 0.2
self.eraser_aug_prob = 0.5
def color_transform(self, img1, img2):
image_stack = np.concatenate([img1, img2], axis=0)
image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8)
img1, img2 = np.split(image_stack, 2, axis=0)
return img1, img2
def eraser_transform(self, img1, img2):
ht, wd = img1.shape[:2]
if np.random.rand() < self.eraser_aug_prob:
mean_color = np.mean(img2.reshape(-1, 3), axis=0)
for _ in range(np.random.randint(1, 3)):
x0 = np.random.randint(0, wd)
y0 = np.random.randint(0, ht)
dx = np.random.randint(50, 100)
dy = np.random.randint(50, 100)
img2[y0:y0+dy, x0:x0+dx, :] = mean_color
return img1, img2
def resize_sparse_flow_map(self, flow, valid, fx=1.0, fy=1.0):
ht, wd = flow.shape[:2]
coords = np.meshgrid(np.arange(wd), np.arange(ht))
coords = np.stack(coords, axis=-1)
coords = coords.reshape(-1, 2).astype(np.float32)
flow = flow.reshape(-1, 2).astype(np.float32)
valid = valid.reshape(-1).astype(np.float32)
coords0 = coords[valid>=1]
flow0 = flow[valid>=1]
ht1 = int(round(ht * fy))
wd1 = int(round(wd * fx))
coords1 = coords0 * [fx, fy]
flow1 = flow0 * [fx, fy]
xx = np.round(coords1[:,0]).astype(np.int32)
yy = np.round(coords1[:,1]).astype(np.int32)
v = (xx > 0) & (xx < wd1) & (yy > 0) & (yy < ht1)
xx = xx[v]
yy = yy[v]
flow1 = flow1[v]
flow_img = np.zeros([ht1, wd1, 2], dtype=np.float32)
valid_img = np.zeros([ht1, wd1], dtype=np.int32)
flow_img[yy, xx] = flow1
valid_img[yy, xx] = 1
return flow_img, valid_img
def spatial_transform(self, img1, img2, flow, valid):
# randomly sample scale
ht, wd = img1.shape[:2]
min_scale = np.maximum(
(self.crop_size[0] + 1) / float(ht),
(self.crop_size[1] + 1) / float(wd))
scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
scale_x = np.clip(scale, min_scale, None)
scale_y = np.clip(scale, min_scale, None)
if np.random.rand() < self.spatial_aug_prob:
# rescale the images
img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow, valid = self.resize_sparse_flow_map(flow, valid, fx=scale_x, fy=scale_y)
if self.do_flip:
if np.random.rand() < 0.5: # h-flip
img1 = img1[:, ::-1]
img2 = img2[:, ::-1]
flow = flow[:, ::-1] * [-1.0, 1.0]
valid = valid[:, ::-1]
margin_y = 20
margin_x = 50
y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0] + margin_y)
x0 = np.random.randint(-margin_x, img1.shape[1] - self.crop_size[1] + margin_x)
y0 = np.clip(y0, 0, img1.shape[0] - self.crop_size[0])
x0 = np.clip(x0, 0, img1.shape[1] - self.crop_size[1])
img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
valid = valid[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
return img1, img2, flow, valid
def __call__(self, img1, img2, flow, valid):
img1, img2 = self.color_transform(img1, img2)
img1, img2 = self.eraser_transform(img1, img2)
img1, img2, flow, valid = self.spatial_transform(img1, img2, flow, valid)
img1 = np.ascontiguousarray(img1)
img2 = np.ascontiguousarray(img2)
flow = np.ascontiguousarray(flow)
valid = np.ascontiguousarray(valid)
return img1, img2, flow, valid
|
import torch
import torch.nn.functional as F
import numpy as np
from scipy import interpolate
class InputPadder:
""" Pads images such that dimensions are divisible by 8 """
def __init__(self, dims, mode='sintel'):
self.ht, self.wd = dims[-2:]
pad_ht = (((self.ht // 8) + 1) * 8 - self.ht) % 8
pad_wd = (((self.wd // 8) + 1) * 8 - self.wd) % 8
if mode == 'sintel':
self._pad = [pad_wd//2, pad_wd - pad_wd//2, pad_ht//2, pad_ht - pad_ht//2]
else:
self._pad = [pad_wd//2, pad_wd - pad_wd//2, 0, pad_ht]
def pad(self, *inputs):
return [F.pad(x, self._pad, mode='replicate') for x in inputs]
def unpad(self,x):
ht, wd = x.shape[-2:]
c = [self._pad[2], ht-self._pad[3], self._pad[0], wd-self._pad[1]]
return x[..., c[0]:c[1], c[2]:c[3]]
def forward_interpolate(flow):
flow = flow.detach().cpu().numpy()
dx, dy = flow[0], flow[1]
ht, wd = dx.shape
x0, y0 = np.meshgrid(np.arange(wd), np.arange(ht))
x1 = x0 + dx
y1 = y0 + dy
x1 = x1.reshape(-1)
y1 = y1.reshape(-1)
dx = dx.reshape(-1)
dy = dy.reshape(-1)
valid = (x1 > 0) & (x1 < wd) & (y1 > 0) & (y1 < ht)
x1 = x1[valid]
y1 = y1[valid]
dx = dx[valid]
dy = dy[valid]
flow_x = interpolate.griddata(
(x1, y1), dx, (x0, y0), method='nearest', fill_value=0)
flow_y = interpolate.griddata(
(x1, y1), dy, (x0, y0), method='nearest', fill_value=0)
flow = np.stack([flow_x, flow_y], axis=0)
return torch.from_numpy(flow).float()
def bilinear_sampler(img, coords, mode='bilinear', mask=False):
""" Wrapper for grid_sample, uses pixel coordinates """
H, W = img.shape[-2:]
xgrid, ygrid = coords.split([1,1], dim=-1)
xgrid = 2*xgrid/(W-1) - 1
ygrid = 2*ygrid/(H-1) - 1
grid = torch.cat([xgrid, ygrid], dim=-1)
img = F.grid_sample(img, grid, align_corners=True)
if mask:
mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1)
return img, mask.float()
return img
def coords_grid(batch, ht, wd, device):
coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device))
coords = torch.stack(coords[::-1], dim=0).float()
return coords[None].repeat(batch, 1, 1, 1)
def upflow8(flow, mode='bilinear'):
new_size = (8 * flow.shape[2], 8 * flow.shape[3])
return 8 * F.interpolate(flow, size=new_size, mode=mode, align_corners=True)
|
# Flow visualization code used from https://github.com/tomrunia/OpticalFlow_Visualization
# MIT License
#
# Copyright (c) 2018 Tom Runia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to conditions.
#
# Author: Tom Runia
# Date Created: 2018-08-03
import numpy as np
def make_colorwheel():
"""
Generates a color wheel for optical flow visualization as presented in:
Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf
Code follows the original C++ source code of Daniel Scharstein.
Code follows the the Matlab source code of Deqing Sun.
Returns:
np.ndarray: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros((ncols, 3))
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY)
col = col+RY
# YG
colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG)
colorwheel[col:col+YG, 1] = 255
col = col+YG
# GC
colorwheel[col:col+GC, 1] = 255
colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC)
col = col+GC
# CB
colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB)
colorwheel[col:col+CB, 2] = 255
col = col+CB
# BM
colorwheel[col:col+BM, 2] = 255
colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM)
col = col+BM
# MR
colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR)
colorwheel[col:col+MR, 0] = 255
return colorwheel
def flow_uv_to_colors(u, v, convert_to_bgr=False):
"""
Applies the flow color wheel to (possibly clipped) flow components u and v.
According to the C++ source code of Daniel Scharstein
According to the Matlab source code of Deqing Sun
Args:
u (np.ndarray): Input horizontal flow of shape [H,W]
v (np.ndarray): Input vertical flow of shape [H,W]
convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
Returns:
np.ndarray: Flow visualization image of shape [H,W,3]
"""
flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)
colorwheel = make_colorwheel() # shape [55x3]
ncols = colorwheel.shape[0]
rad = np.sqrt(np.square(u) + np.square(v))
a = np.arctan2(-v, -u)/np.pi
fk = (a+1) / 2*(ncols-1)
k0 = np.floor(fk).astype(np.int32)
k1 = k0 + 1
k1[k1 == ncols] = 0
f = fk - k0
for i in range(colorwheel.shape[1]):
tmp = colorwheel[:,i]
col0 = tmp[k0] / 255.0
col1 = tmp[k1] / 255.0
col = (1-f)*col0 + f*col1
idx = (rad <= 1)
col[idx] = 1 - rad[idx] * (1-col[idx])
col[~idx] = col[~idx] * 0.75 # out of range
# Note the 2-i => BGR instead of RGB
ch_idx = 2-i if convert_to_bgr else i
flow_image[:,:,ch_idx] = np.floor(255 * col)
return flow_image
def flow_to_image(flow_uv, clip_flow=None, convert_to_bgr=False):
"""
Expects a two dimensional flow image of shape.
Args:
flow_uv (np.ndarray): Flow UV image of shape [H,W,2]
clip_flow (float, optional): Clip maximum of flow values. Defaults to None.
convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
Returns:
np.ndarray: Flow visualization image of shape [H,W,3]
"""
assert flow_uv.ndim == 3, 'input flow must have three dimensions'
assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'
if clip_flow is not None:
flow_uv = np.clip(flow_uv, 0, clip_flow)
u = flow_uv[:,:,0]
v = flow_uv[:,:,1]
rad = np.sqrt(np.square(u) + np.square(v))
rad_max = np.max(rad)
epsilon = 1e-5
u = u / (rad_max + epsilon)
v = v / (rad_max + epsilon)
return flow_uv_to_colors(u, v, convert_to_bgr) |
import numpy as np
from PIL import Image
from os.path import *
import re
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
TAG_CHAR = np.array([202021.25], np.float32)
def readFlow(fn):
""" Read .flo file in Middlebury format"""
# Code adapted from:
# http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy
# WARNING: this will work on little-endian architectures (eg Intel x86) only!
# print 'fn = %s'%(fn)
with open(fn, 'rb') as f:
magic = np.fromfile(f, np.float32, count=1)
if 202021.25 != magic:
print('Magic number incorrect. Invalid .flo file')
return None
else:
w = np.fromfile(f, np.int32, count=1)
h = np.fromfile(f, np.int32, count=1)
# print 'Reading %d x %d flo file\n' % (w, h)
data = np.fromfile(f, np.float32, count=2*int(w)*int(h))
# Reshape data into 3D array (columns, rows, bands)
# The reshape here is for visualization, the original code is (w,h,2)
return np.resize(data, (int(h), int(w), 2))
def readPFM(file):
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header == b'PF':
color = True
elif header == b'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(rb'^(\d+)\s(\d+)\s$', file.readline())
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data
def writeFlow(filename,uv,v=None):
""" Write optical flow to file.
If v is None, uv is assumed to contain both u and v channels,
stacked in depth.
Original code by Deqing Sun, adapted from Daniel Scharstein.
"""
nBands = 2
if v is None:
assert(uv.ndim == 3)
assert(uv.shape[2] == 2)
u = uv[:,:,0]
v = uv[:,:,1]
else:
u = uv
assert(u.shape == v.shape)
height,width = u.shape
f = open(filename,'wb')
# write the header
f.write(TAG_CHAR)
np.array(width).astype(np.int32).tofile(f)
np.array(height).astype(np.int32).tofile(f)
# arrange into matrix form
tmp = np.zeros((height, width*nBands))
tmp[:,np.arange(width)*2] = u
tmp[:,np.arange(width)*2 + 1] = v
tmp.astype(np.float32).tofile(f)
f.close()
def readFlowKITTI(filename):
flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH|cv2.IMREAD_COLOR)
flow = flow[:,:,::-1].astype(np.float32)
flow, valid = flow[:, :, :2], flow[:, :, 2]
flow = (flow - 2**15) / 64.0
return flow, valid
def readDispKITTI(filename):
disp = cv2.imread(filename, cv2.IMREAD_ANYDEPTH) / 256.0
valid = disp > 0.0
flow = np.stack([-disp, np.zeros_like(disp)], -1)
return flow, valid
def writeFlowKITTI(filename, uv):
uv = 64.0 * uv + 2**15
valid = np.ones([uv.shape[0], uv.shape[1], 1])
uv = np.concatenate([uv, valid], axis=-1).astype(np.uint16)
cv2.imwrite(filename, uv[..., ::-1])
def read_gen(file_name, pil=False):
ext = splitext(file_name)[-1]
if ext == '.png' or ext == '.jpeg' or ext == '.ppm' or ext == '.jpg':
return Image.open(file_name)
elif ext == '.bin' or ext == '.raw':
return np.load(file_name)
elif ext == '.flo':
return readFlow(file_name).astype(np.float32)
elif ext == '.pfm':
flow = readPFM(file_name).astype(np.float32)
if len(flow.shape) == 2:
return flow
else:
return flow[:, :, :-1]
return [] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import imageio
import numpy as np
import torch
import glob
from torch.utils.data import Dataset
from data_loaders.data_utils import get_src_tgt_ids, resize_img
import torchvision
from core.utils import remove_noise_in_dpt_disparity
def get_black_boundary_size(img):
h, w = img.shape[:2]
mean_img = np.mean(img, axis=-1)
mask_mean_x_axis = mean_img.mean(axis=0)
x_valid = np.nonzero(mask_mean_x_axis > 1e-3)
if len(x_valid[0]) == 0:
left, right = 0, w
else:
left, right = x_valid[0][0], x_valid[0][-1]+1
mask_mean_y_axis = mean_img.mean(axis=1)
y_valid = np.nonzero(mask_mean_y_axis > 1e-3)
if len(y_valid[0]) == 0:
top, bottom = 0, h
else:
top, bottom = y_valid[0][0], y_valid[0][-1]+1
assert 0 <= top <= h and 0 <= bottom <= h and 0 <= left <= w and 0 <= right <= w
top = top + (16 - top % 16) if top % 16 != 0 else top
left = left + (16 - left % 16) if left % 16 != 0 else left
bottom = bottom - bottom % 16 if bottom % 16 != 0 else bottom
right = right - right % 16 if right % 16 != 0 else right
if bottom - top < 128:
top = 0
bottom = h
if right - left < 128:
left = 0
right = w
return top, bottom, left, right
class VimeoDataset(Dataset):
def __init__(self, args, subset, **kwargs):
base_dir = 'data/vimeo/sequences/'
scene_dirs = sorted(glob.glob(os.path.join(base_dir, '*/*')))
if subset == 'train':
self.scene_dirs = scene_dirs[:-100]
else:
self.scene_dirs = scene_dirs[-100:]
self.to_tensor = torchvision.transforms.ToTensor()
self.ds_factor = 1
def __len__(self):
return len(self.scene_dirs)
def __getitem__(self, idx):
scene_dir = self.scene_dirs[idx]
img_dir = scene_dir
depth_dir = os.path.join(scene_dir, 'dpt_depth')
img_files = sorted(glob.glob(os.path.join(img_dir, '*.png')))
dpt_files = sorted(glob.glob(os.path.join(depth_dir, '*.png')))
assert len(img_files) == len(dpt_files), print(scene_dir)
num_frames = len(img_files)
src_id1, src_id2, tgt_id = get_src_tgt_ids(num_frames, max_interval=3)
if np.random.choice([0, 1], p=[0.996, 0.004]):
src_id = np.random.choice([src_id1, src_id2])
tgt_id = src_id
time = np.clip((tgt_id - src_id1 + np.random.rand() - 0.5) / (src_id2 - src_id1), a_min=0., a_max=1.)
src_img1 = imageio.imread(img_files[src_id1]) / 255.
src_img2 = imageio.imread(img_files[src_id2]) / 255.
tgt_img = imageio.imread(img_files[tgt_id]) / 255.
t, b, l, r = get_black_boundary_size(src_img1)
src_img1 = resize_img(src_img1, self.ds_factor)
src_img2 = resize_img(src_img2, self.ds_factor)
tgt_img = resize_img(tgt_img, self.ds_factor)
src_disp1 = imageio.imread(dpt_files[src_id1]) / 65535.
src_disp2 = imageio.imread(dpt_files[src_id2]) / 65535.
src_disp1 = remove_noise_in_dpt_disparity(src_disp1)
src_disp2 = remove_noise_in_dpt_disparity(src_disp2)
src_depth1 = 1. / np.maximum(src_disp1, 1e-2)
src_depth2 = 1. / np.maximum(src_disp2, 1e-2)
# src_depth1 = sparse_bilateral_filtering(src_depth1, num_iter=1) # fixme
# src_depth2 = sparse_bilateral_filtering(src_depth2, num_iter=1)
src_depth1 = resize_img(src_depth1, self.ds_factor)
src_depth2 = resize_img(src_depth2, self.ds_factor)
src_img1 = src_img1[t:b, l:r]
src_img2 = src_img2[t:b, l:r]
tgt_img = tgt_img[t:b, l:r]
src_depth1 = src_depth1[t:b, l:r]
src_depth2 = src_depth2[t:b, l:r]
h1, w1 = src_img1.shape[:2]
h2, w2 = src_img2.shape[:2]
ht, wt = tgt_img.shape[:2]
intrinsic1 = np.array([[max(h1, w1), 0, w1 // 2],
[0, max(h1, w1), h1 // 2],
[0, 0, 1]])
intrinsic2 = np.array([[max(h2, w2), 0, w2 // 2],
[0, max(h2, w2), h2 // 2],
[0, 0, 1]])
tgt_intrinsic = np.array([[max(ht, wt), 0, wt // 2],
[0, max(ht, wt), ht // 2],
[0, 0, 1]])
relative_pose = np.eye(4)
tgt_pose = np.eye(4)
return {
'src_img1': self.to_tensor(src_img1).float(),
'src_img2': self.to_tensor(src_img2).float(),
'src_depth1': self.to_tensor(src_depth1).float(),
'src_depth2': self.to_tensor(src_depth2).float(),
'tgt_img': self.to_tensor(tgt_img).float(),
'intrinsic1': torch.from_numpy(intrinsic1).float(),
'intrinsic2': torch.from_numpy(intrinsic2).float(),
'tgt_intrinsic': torch.from_numpy(tgt_intrinsic).float(),
'pose': torch.from_numpy(relative_pose).float(),
'tgt_pose': torch.from_numpy(tgt_pose).float(),
'scale_shift1': torch.tensor([1., 0.]).float(),
'scale_shift2': torch.tensor([1., 0.]).float(),
'time': time,
'src_rgb_file1': img_files[src_id1],
'src_rgb_file2': img_files[src_id2],
'tgt_rgb_file': img_files[tgt_id],
'scene_dir': scene_dir,
'multi_view': False
}
if __name__ == '__main__':
dataset = VimeoDataset()
for data in dataset:
continue
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .vimeo import VimeoDataset
dataset_dict = {
'vimeo': VimeoDataset,
}
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
def resize_img_intrinsic(img, intrinsic, w_out, h_out):
h, w = img.shape[:2]
if w_out > w:
interpolation_method = cv2.INTER_LINEAR
else:
interpolation_method = cv2.INTER_AREA
img = cv2.resize(img, (int(w_out), int(h_out)), interpolation=interpolation_method)
intrinsic[0] *= 1. * w_out / w
intrinsic[1] *= 1. * h_out / h
return img, intrinsic
def resize_img(img, ds_factor, w_out=None, h_out=None):
h, w = img.shape[:2]
if w_out is None and h_out is None:
if ds_factor == 1:
return img
if ds_factor > 1:
interpolation_method = cv2.INTER_LINEAR
else:
interpolation_method = cv2.INTER_AREA
img = cv2.resize(img, (int(w*ds_factor), int(h*ds_factor)), interpolation=interpolation_method)
else:
if w_out > w:
interpolation_method = cv2.INTER_LINEAR
else:
interpolation_method = cv2.INTER_AREA
img = cv2.resize(img, (int(w_out), int(h_out)), interpolation=interpolation_method)
return img
def get_src_tgt_ids(num_frames, max_interval=1):
assert num_frames > max_interval + 1
src_id1 = np.random.choice(num_frames-max_interval-1)
interval = np.random.randint(low=0, high=max_interval) + 1
src_id2 = src_id1 + interval + 1
tgt_id = np.random.randint(src_id1 + 1, src_id2)
return src_id1, src_id2, tgt_id
def skew(x):
return np.array([[0, -x[2], x[1]],
[x[2], 0, -x[0]],
[-x[1], x[0], 0]])
class Camera(object):
def __init__(self, entry):
fx, fy, cx, cy = entry[1:5]
self.intrinsics = np.array([[fx, 0, cx, 0],
[0, fy, cy, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
w2c_mat = np.array(entry[7:]).reshape(3, 4)
w2c_mat_4x4 = np.eye(4)
w2c_mat_4x4[:3, :] = w2c_mat
self.w2c_mat = w2c_mat_4x4
self.c2w_mat = np.linalg.inv(w2c_mat_4x4)
def unnormalize_intrinsics(intrinsics, h, w):
intrinsics[0] *= w
intrinsics[1] *= h
return intrinsics
def parse_pose_file(file):
f = open(file, 'r')
cam_params = {}
for i, line in enumerate(f):
if i == 0:
video_id = line.replace('https://www.youtube.com/watch?v=', '')[:-1]
continue
entry = [float(x) for x in line.split()]
id = int(entry[0])
cam_params[id] = Camera(entry)
return video_id, cam_params
def crop_img(img, factor=16):
h, w = img.shape[:2]
ho = h // factor * factor
wo = w // factor * factor
img = img[:ho, :wo]
return img
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from . import dataset_dict
from torch.utils.data import Dataset, Sampler
from torch.utils.data import DistributedSampler, WeightedRandomSampler
from typing import Optional
from operator import itemgetter
import torch
class DatasetFromSampler(Dataset):
"""Dataset to create indexes from `Sampler`.
Args:
sampler: PyTorch sampler
"""
def __init__(self, sampler: Sampler):
"""Initialisation for DatasetFromSampler."""
self.sampler = sampler
self.sampler_list = None
def __getitem__(self, index: int):
"""Gets element of the dataset.
Args:
index: index of the element in the dataset
Returns:
Single element by index
"""
if self.sampler_list is None:
self.sampler_list = list(self.sampler)
return self.sampler_list[index]
def __len__(self) -> int:
"""
Returns:
int: length of the dataset
"""
return len(self.sampler)
class DistributedSamplerWrapper(DistributedSampler):
"""
Wrapper over `Sampler` for distributed training.
Allows you to use any sampler in distributed mode.
It is especially useful in conjunction with
`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSamplerWrapper instance as a DataLoader
sampler, and load a subset of subsampled data of the original dataset
that is exclusive to it.
.. note::
Sampler is assumed to be of constant size.
"""
def __init__(
self,
sampler,
num_replicas: Optional[int] = None,
rank: Optional[int] = None,
shuffle: bool = True,
):
"""
Args:
sampler: Sampler used for subsampling
num_replicas (int, optional): Number of processes participating in
distributed training
rank (int, optional): Rank of the current process
within ``num_replicas``
shuffle (bool, optional): If true (default),
sampler will shuffle the indices
"""
super(DistributedSamplerWrapper, self).__init__(
DatasetFromSampler(sampler),
num_replicas=num_replicas,
rank=rank,
shuffle=shuffle,
)
self.sampler = sampler
def __iter__(self):
self.dataset = DatasetFromSampler(self.sampler)
indexes_of_indexes = super().__iter__()
subsampler_indexes = self.dataset
return iter(itemgetter(*indexes_of_indexes)(subsampler_indexes))
def create_training_dataset(args):
# parse args.train_dataset, "+" indicates that multiple datasets are used, for example "vimeo+mannequin"
# otherwise only one dataset is used
# args.dataset_weights should be a list representing the resampling rate for each dataset, and should sum up to 1
print('training dataset: {}'.format(args.train_dataset))
mode = 'train'
if '+' not in args.train_dataset:
train_dataset = dataset_dict[args.train_dataset](args, mode)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None
else:
train_dataset_names = args.train_dataset.split('+')
weights = args.dataset_weights
assert len(train_dataset_names) == len(weights)
assert np.abs(np.sum(weights) - 1.) < 1e-6
print('weights:{}'.format(weights))
train_datasets = []
train_weights_samples = []
for training_dataset_name, weight in zip(train_dataset_names, weights):
train_dataset = dataset_dict[training_dataset_name](args, mode)
train_datasets.append(train_dataset)
num_samples = len(train_dataset)
weight_each_sample = weight / num_samples
train_weights_samples.extend([weight_each_sample]*num_samples)
train_dataset = torch.utils.data.ConcatDataset(train_datasets)
train_weights = torch.from_numpy(np.array(train_weights_samples))
sampler = WeightedRandomSampler(train_weights, len(train_weights))
train_sampler = DistributedSamplerWrapper(sampler) if args.distributed else sampler
return train_dataset, train_sampler |
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for exporting SavedModels and TFLite models."""
import os
from typing import Sequence
from absl import logging
from chirp.taxonomy import namespace
from jax.experimental import jax2tf
import tensorflow as tf
class Jax2TfModelWrapper(tf.Module):
"""Wrapper for Jax models for exporting with variable input shape."""
def __init__(
self,
infer_fn,
jax_params,
input_shape: Sequence[int | None],
enable_xla: bool = False,
coord_ids: str = 'bt',
name=None,
):
"""Initialize the wrapper.
Args:
infer_fn: The inference function for the Jax model.
jax_params: Parameters (ie, model weights) for the Jax model.
input_shape: Input shape, with 'None' for any axes which will be variable.
enable_xla: Whether to use XLA ops in the exported model. Defaults to
False, which is necessary for subsequent TFLite conversion.
coord_ids: String with length matching the length of the input_shape, used
for identifying polymorphic shape parameters.
name: Model name.
"""
super(Jax2TfModelWrapper, self).__init__(name=name)
# The automatically generated variable names in the checkpoint end up being
# very uninformative. There may be a good way to map in better names.
self._structured_variables = tf.nest.map_structure(tf.Variable, jax_params)
self.input_shape = input_shape
# Construct the jax polymorphic shape.
jp_shape = []
for i, s in enumerate(input_shape):
if s is None:
jp_shape.append(coord_ids[i])
else:
jp_shape.append('_')
jp_shape = '(' + ','.join(jp_shape) + ')'
# The variables structure needs to be flattened for the saved_model.
self._variables = tf.nest.flatten(self._structured_variables)
logging.info('Running jax2tf conversion...')
converted_infer_fn = jax2tf.convert(
infer_fn,
enable_xla=enable_xla,
with_gradient=False,
polymorphic_shapes=[jp_shape, None],
)
infer_partial = lambda inputs: converted_infer_fn( # pylint:disable=g-long-lambda
inputs, self._structured_variables
)
self.infer_tf = tf.function(
infer_partial,
jit_compile=True,
input_signature=[tf.TensorSpec(input_shape)],
)
logging.info('Jax2TfModelWrapper initialized.')
def __call__(self, inputs):
return self.infer_tf(inputs)
def get_tf_zero_inputs(self):
"""Construct some dummy inputs with self.input_shape."""
fake_shape = []
for s in self.input_shape:
if s is None:
fake_shape.append(1)
else:
fake_shape.append(s)
return tf.zeros(fake_shape)
def export_converted_model(
self,
workdir: str,
train_step: int,
class_lists: dict[str, namespace.ClassList] | None = None,
export_tf_lite: bool = True,
tf_lite_dtype: str = 'float16',
tf_lite_select_ops: bool = True,
):
"""Export converted TF models."""
fake_inputs = self.get_tf_zero_inputs()
logging.info('Creating concrete function...')
concrete_fn = self.infer_tf.get_concrete_function(fake_inputs)
logging.info('Saving TF SavedModel...')
tf.saved_model.save(
self, os.path.join(workdir, 'savedmodel'), signatures=concrete_fn
)
with tf.io.gfile.GFile(
os.path.join(workdir, 'savedmodel', 'ckpt.txt'), 'w'
) as f:
f.write(f'train_state.step: {train_step}\n')
logging.info('Writing class lists...')
if class_lists is not None:
for key, class_list in class_lists.items():
with tf.io.gfile.GFile(os.path.join(workdir, f'{key}.csv'), 'w') as f:
# NOTE: Although the namespace is written to the file, there is no
# guarantee that the class list will still be compatible with the
# namespace if the latter gets updated.
f.write(class_list.to_csv())
if not export_tf_lite:
logging.info('Skipping TFLite export.')
logging.info('Export complete.')
return
# Export TFLite model.
logging.info('Converting to TFLite...')
converter = tf.lite.TFLiteConverter.from_concrete_functions(
[concrete_fn], self
)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
if tf_lite_dtype == 'float16':
converter.target_spec.supported_types = [tf.float16]
elif tf_lite_dtype == 'float32':
converter.target_spec.supported_types = [tf.float32]
elif tf_lite_dtype == 'auto':
# Note that the default with optimizations is int8, which requires further
# tuning.
pass
else:
raise ValueError(f'Unsupported dtype: {tf_lite_dtype}')
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
]
if tf_lite_select_ops:
converter.target_spec.supported_ops += [tf.lite.OpsSet.SELECT_TF_OPS]
tflite_float_model = converter.convert()
if not tf.io.gfile.exists(workdir):
tf.io.gfile.makedirs(workdir)
with tf.io.gfile.GFile(os.path.join(workdir, 'model.tflite'), 'wb') as f:
f.write(tflite_float_model)
with tf.io.gfile.GFile(os.path.join(workdir, 'tflite_ckpt.txt'), 'w') as f:
f.write(f'train_state.step: {train_step}\n')
logging.info('Export complete.')
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Signal processing operations.
Ports from `tf.signal`.
"""
import functools
import jax
from jax import lax
from jax import numpy as jnp
_MEL_HIGH_FREQUENCY_Q = 1127.0
_MEL_BREAK_FREQUENCY_HERTZ = 700.0
def hertz_to_mel(frequencies_hertz: jnp.ndarray) -> jnp.ndarray:
"""Converts frequencies in `frequencies_hertz` in Hertz to the mel scale.
Args:
frequencies_hertz: An array of frequencies in Hertz.
Returns:
An array of the same shape and type of `frequencies_hertz` containing
frequencies in the mel scale.
"""
return _MEL_HIGH_FREQUENCY_Q * jnp.log1p(
frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ
)
def mel_to_hertz(frequencies_mel: jnp.ndarray) -> jnp.ndarray:
"""Converts frequencies in `frequencies_mel` in the mel scale to Hertz.
Args:
frequencies_mel: An array of frequencies in the mel scale.
Returns:
An array of the same shape and type of `frequencies_mel` containing
frequencies in Hertz.
"""
return _MEL_BREAK_FREQUENCY_HERTZ * (
jnp.expm1(frequencies_mel / _MEL_HIGH_FREQUENCY_Q)
)
@functools.partial(jax.jit, static_argnums=(0, 1))
def linear_to_mel_weight_matrix(
num_mel_bins: int = 20,
num_spectrogram_bins: int = 129,
sample_rate: int = 8000,
lower_edge_hertz: float = 125.0,
upper_edge_hertz: float = 3800.0,
) -> jnp.ndarray:
"""Returns a matrix to warp linear scale spectrograms to the mel scale.
A port of tf.signal.linear_to_mel_weight_matrix.
Args:
num_mel_bins: How many bands in the resulting mel spectrum.
num_spectrogram_bins: How many bins there are in the source spectrogram
data, which is understood to be `fft_size // 2 + 1`, i.e. the spectrogram
only contains the nonredundant FFT bins.
sample_rate: Samples per second of the input signal used to create the
spectrogram. Used to figure out the frequencies corresponding to each
spectrogram bin, which dictates how they are mapped into the mel scale.
lower_edge_hertz: Lower bound on the frequencies to be included in the mel
spectrum. This corresponds to the lower edge of the lowest triangular
band.
upper_edge_hertz: The desired top edge of the highest frequency band.
Returns:
An array of shape `(num_spectrogram_bins, num_mel_bins)`.
"""
# HTK excludes the spectrogram DC bin.
bands_to_zero = 1
nyquist_hertz = sample_rate / 2.0
linear_frequencies = jnp.linspace(0.0, nyquist_hertz, num_spectrogram_bins)[
bands_to_zero:
]
spectrogram_bins_mel = hertz_to_mel(linear_frequencies)[:, jnp.newaxis]
# Compute num_mel_bins triples of (lower_edge, center, upper_edge). The
# center of each band is the lower and upper edge of the adjacent bands.
# Accordingly, we divide [lower_edge_hertz, upper_edge_hertz] into
# num_mel_bins + 2 pieces.
band_edges_mel = jnp.linspace(
hertz_to_mel(lower_edge_hertz), # pytype: disable=wrong-arg-types # jax-ndarray
hertz_to_mel(upper_edge_hertz), # pytype: disable=wrong-arg-types # jax-ndarray
num_mel_bins + 2,
)
# Split the triples up and reshape them into [1, num_mel_bins] tensors.
lower_edge_mel = band_edges_mel[jnp.newaxis, :-2]
center_mel = band_edges_mel[jnp.newaxis, 1:-1]
upper_edge_mel = band_edges_mel[jnp.newaxis, 2:]
# Calculate lower and upper slopes for every spectrogram bin.
# Line segments are linear in the mel domain, not Hertz.
lower_slopes = (spectrogram_bins_mel - lower_edge_mel) / (
center_mel - lower_edge_mel
)
upper_slopes = (upper_edge_mel - spectrogram_bins_mel) / (
upper_edge_mel - center_mel
)
# Intersect the line segments with each other and zero.
mel_weights_matrix = jnp.maximum(0.0, jnp.minimum(lower_slopes, upper_slopes))
# Re-add the zeroed lower bins we sliced out above.
return jnp.pad(mel_weights_matrix, ((bands_to_zero, 0), (0, 0)))
def frame(
signal: jnp.ndarray,
frame_length: int,
frame_step: int,
pad_end: bool = False,
pad_value: float = 0.0, # pylint: disable=unused-argument
axis: int = -1,
) -> jnp.ndarray:
"""Split a spectrogram into multiple bands.
JAX version of `tf.signal.frame`.
Args:
signal: A `(..., samples, ...)` array. Rank must be at least 1.
frame_length: The frame length in samples.
frame_step: The frame hop size in samples.
pad_end: Whether to pad the end of `signal` with `pad_value`.
pad_value: A value to use where the input signal does not exist when
`pad_end` is True.
axis: Indicating the axis to frame. Defaults to the last axis. Supports
negative values for indexing from the end.
Returns:
An array of frames, size `(..., num_frames, frame_length, ...)`.
"""
axis = axis % signal.ndim
remainder = (signal.shape[axis] - frame_length) % frame_step
if pad_end and remainder:
no_pad = ((0, 0),)
zero_pad = ((0, remainder),)
pad_width = no_pad * axis + zero_pad + no_pad * (signal.ndim - axis - 1)
signal = jnp.pad(signal, pad_width, constant_values=pad_value)
num_frames = (signal.shape[axis] - frame_length) // frame_step + 1
start_indices = (jnp.arange(num_frames) * frame_step)[:, None]
# The axis not in offset_dims is where the frames will be put
offset_dims = tuple(range(axis)) + tuple(range(axis + 1, signal.ndim + 1))
dimension_numbers = lax.GatherDimensionNumbers(
offset_dims=offset_dims, collapsed_slice_dims=(), start_index_map=(axis,)
)
slice_sizes = signal.shape[:axis] + (frame_length,) + signal.shape[axis + 1 :]
frames = lax.gather(signal, start_indices, dimension_numbers, slice_sizes)
return frames
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2022 The Chirp Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Chirp project."""
__version__ = "0.1.0"
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Audio utilities.
General utilities for processing audio and spectrograms.
"""
import concurrent
import functools
import logging
import os
import tempfile
from typing import Generator, Sequence
import warnings
from chirp import path_utils
from chirp import signal
from etils import epath
from jax import lax
from jax import numpy as jnp
from jax import random
from jax import scipy as jsp
import librosa
import numpy as np
import requests
from scipy import signal as scipy_signal
import soundfile
import tensorflow as tf
_WINDOW_FNS = {
'hann': tf.signal.hann_window,
'hamming': tf.signal.hamming_window,
}
_BOUNDARY_TO_PADDING_MODE = {'zeros': 'CONSTANT'}
def load_audio(
path: epath.PathLike, target_sample_rate: int, **kwargs
) -> jnp.ndarray:
"""Load a general audio resource."""
path = os.fspath(path)
if path.startswith('xc'):
return load_xc_audio(path, target_sample_rate)
elif path.startswith('http'):
return load_url_audio(path, target_sample_rate)
else:
return load_audio_file(path, target_sample_rate, **kwargs)
def load_audio_file(
filepath: str | epath.Path,
target_sample_rate: int,
resampling_type: str = 'polyphase',
) -> jnp.ndarray:
"""Read an audio file and resample it using librosa."""
filepath = epath.Path(filepath)
if target_sample_rate <= 0:
# Use the native sample rate.
target_sample_rate = None
with tempfile.NamedTemporaryFile(
mode='w+b', suffix=os.path.splitext(filepath)[-1]
) as f:
with filepath.open('rb') as sf:
f.write(sf.read())
# librosa outputs lots of warnings which we can safely ignore when
# processing all Xeno-Canto files and PySoundFile is unavailable.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
audio, _ = librosa.load(
f.name,
sr=target_sample_rate,
res_type=resampling_type,
)
return audio
def load_audio_window_soundfile(
filepath: str, offset_s: float, sample_rate: int, window_size_s: float
) -> jnp.ndarray:
"""Load an audio window using Soundfile.
Args:
filepath: Path to audio file.
offset_s: Read offset within the file.
sample_rate: Sample rate for returned audio.
window_size_s: Length of audio to read. Reads all if <0.
Returns:
Numpy array of loaded audio.
"""
with epath.Path(filepath).open('rb') as f:
sf = soundfile.SoundFile(f)
if offset_s > 0:
offset = int(offset_s * sf.samplerate)
sf.seek(offset)
if window_size_s < 0:
a = sf.read()
else:
window_size = int(window_size_s * sf.samplerate)
a = sf.read(window_size)
if len(a.shape) == 2:
# Downstream ops expect mono audio, so reduce to mono.
a = a[:, 0]
if sample_rate > 0:
a = librosa.resample(
y=a, orig_sr=sf.samplerate, target_sr=sample_rate, res_type='polyphase'
)
return a
def load_audio_window(
filepath: str, offset_s: float, sample_rate: int, window_size_s: float
) -> jnp.ndarray:
"""Load a slice of audio from a file, hopefully efficiently."""
# TODO(tomdenton): Fine a reliable way to load a flac audio window.
# If a flac file has the incorrect length in its header, seeking past the
# end of the file causes the system to hang. This is a bad enough outcome
# that we don't risk it.
try:
return load_audio_window_soundfile(
filepath, offset_s, sample_rate, window_size_s
)
except soundfile.LibsndfileError:
logging.warning('Failed to load audio with libsndfile: %s', filepath)
# This fail-over is much slower but more reliable; the entire audio file
# is loaded (and possible resampled) and then we extract the target audio.
audio = load_audio(filepath, sample_rate)
offset = int(offset_s * sample_rate)
window_size = int(window_size_s * sample_rate)
return audio[offset : offset + window_size]
def multi_load_audio_window(
filepaths: Sequence[str],
offsets: Sequence[int] | None,
sample_rate: int,
window_size_s: float,
max_workers: int = 5,
) -> Generator[np.ndarray, None, None]:
"""Generator for loading audio windows in parallel.
Note that audio is returned in the same order as the filepaths.
Also, this ultimately relies on soundfile, which can be buggy in some cases.
Args:
filepaths: Paths to audio to load.
offsets: Read offset in seconds for each file, or None if no offsets are
needed.
sample_rate: Sample rate for returned audio.
window_size_s: Window length to read from each file. Set <0 to read all.
max_workers: Number of threads to allocate.
Yields:
Loaded audio windows.
"""
loader = functools.partial(
load_audio_window, sample_rate=sample_rate, window_size_s=window_size_s
)
if offsets is None:
offsets = [0.0 for _ in filepaths]
# ThreadPoolExecutor works well despite the
with concurrent.futures.ThreadPoolExecutor(
max_workers=max_workers
) as executor:
futures = []
for fp, offset in zip(filepaths, offsets):
future = executor.submit(loader, offset_s=offset, filepath=fp)
futures.append(future)
while futures:
yield futures.pop(0).result()
def load_xc_audio(xc_id: str, sample_rate: int) -> jnp.ndarray:
"""Load audio from Xeno-Canto given an ID like 'xc12345'."""
if not xc_id.startswith('xc'):
raise ValueError(f'XenoCanto id {xc_id} does not start with "xc".')
xc_id = xc_id[2:]
try:
int(xc_id)
except ValueError as exc:
raise ValueError(f'XenoCanto id xc{xc_id} is not an integer.') from exc
session = requests.Session()
session.mount(
'https://',
requests.adapters.HTTPAdapter(
max_retries=requests.adapters.Retry(total=5, backoff_factor=0.1)
),
)
url = f'https://xeno-canto.org/{xc_id}/download'
try:
data = session.get(url=url).content
except requests.exceptions.RequestException as e:
raise requests.exceptions.RequestException(
f'Failed to load audio from Xeno-Canto {xc_id}'
) from e
with tempfile.NamedTemporaryFile(suffix='.mp3', mode='wb') as f:
f.write(data)
f.flush()
audio = load_audio_file(f.name, target_sample_rate=sample_rate)
return audio
def load_url_audio(url: str, sample_rate: int) -> jnp.ndarray:
"""Load audio from a URL."""
data = requests.get(url).content
with tempfile.NamedTemporaryFile(mode='wb') as f:
f.write(data)
f.flush()
audio = load_audio_file(f.name, target_sample_rate=sample_rate)
return audio
# pylint: disable=g-doc-return-or-yield,g-doc-args,unused-argument
def stft_tf(
x,
fs=1.0,
window='hann',
nperseg=256,
noverlap=None,
nfft=None,
detrend=False,
return_onesided=True,
boundary='zeros',
padded=True,
) -> tf.Tensor:
"""Computes the Short Time Fourier Transform (STFT).
This is a port of `scipy.signal.stft` to TensorFlow. This allows us to exactly
reproduce the frontend in the data preprocessing pipeline.
"""
# Use SciPy's original variable names
# pylint: disable=invalid-name
nfft = nperseg if nfft is None else nfft
noverlap = nperseg // 2 if noverlap is None else noverlap
nstep = nperseg - noverlap
if x.dtype.is_complex:
raise ValueError('tf.signal.stft only supports real signals')
if window not in _WINDOW_FNS:
raise ValueError(
(
f'tf.signal.stft does not support window {window}, '
'supported functions are {'
),
'.join(_WINDOW_FNS)}',
)
if boundary is not None and boundary not in _BOUNDARY_TO_PADDING_MODE:
raise ValueError(
'tf.signal.stft only supports boundary modes None and , '.join(
_BOUNDARY_TO_PADDING_MODE
)
)
if detrend:
raise ValueError('tf.signal.stft only supports detrend = False')
if not return_onesided:
raise ValueError('tf.signal.stft only supports return_onesided = True')
input_length = tf.shape(x)[-1]
# Put the time axis at the end and then put it back
if boundary in _BOUNDARY_TO_PADDING_MODE:
mode = _BOUNDARY_TO_PADDING_MODE[boundary]
paddings = tf.concat(
[
tf.repeat([[0, 0]], tf.rank(x) - 1, axis=0),
[[nperseg // 2, nperseg // 2]],
],
axis=0,
)
x = tf.pad(x, paddings, mode)
input_length += nperseg
Zxx = tf.signal.stft(
x,
frame_length=nperseg,
frame_step=nstep,
fft_length=nfft,
window_fn=_WINDOW_FNS[window],
pad_end=padded,
)
Zxx = tf.linalg.matrix_transpose(Zxx)
# TODO(bartvm): tf.signal.frame seems to have a bug which sometimes adds
# too many frames, so we strip those if necessary
nadd = (-(input_length - nperseg) % nstep) % nperseg if padded else 0
length = -((input_length + nadd - nperseg + 1) // (noverlap - nperseg))
Zxx = Zxx[..., :length]
# Scaling
Zxx *= 2 / nperseg
return Zxx
def ema(
xs: jnp.ndarray,
gamma: float | jnp.ndarray,
initial_state: jnp.ndarray | None = None,
axis: int = 0,
) -> jnp.ndarray:
"""Computes the exponential moving average along one axis."""
# Bring target axis to front.
xs = jnp.swapaxes(xs, 0, axis)
if initial_state is None:
initial_state = xs[0]
def ema_fn(state, x):
new_state = gamma * x + (1.0 - gamma) * state
return new_state, new_state
# NOTE: For small batches this is potentially an expensive and inefficient
# computation, as it requires a loop over potentially long sequences with
# minimal computation each step. This could be addressed by partially
# unrolling the loop or by a truncated EMA using convolutions.
final_state, ys = lax.scan(ema_fn, init=initial_state, xs=xs)
ys = jnp.swapaxes(ys, 0, axis)
return ys, final_state # pytype: disable=bad-return-type # jax-ndarray
def ema_conv1d(
xs: jnp.ndarray, gamma: float | jnp.ndarray, conv_width: int
) -> jnp.ndarray:
"""Uses a depth-wise conv1d to approximate the EMA operation."""
if conv_width == -1:
conv_width = xs.shape[1]
left_pad = jnp.repeat(xs[:, 0:1], conv_width - 1, axis=1)
padded_inp = jnp.concatenate([left_pad, xs], axis=1)
kernel = jnp.array(
[(1.0 - gamma) ** k for k in range(conv_width - 1)] + [gamma]
).astype(xs.dtype)
if isinstance(gamma, float) or gamma.ndim == 0:
kernel = kernel[jnp.newaxis, jnp.newaxis, :]
kernel = jnp.repeat(kernel, xs.shape[-1], axis=1)
else:
kernel = jnp.swapaxes(kernel, 0, 1)
kernel = kernel[jnp.newaxis, :, :]
outp = lax.conv_general_dilated(
padded_inp,
kernel,
(1,),
padding='VALID',
feature_group_count=xs.shape[-1],
dimension_numbers=('NTC', 'IOT', 'NTC'),
)
return outp
def pcen(
filterbank_energy: jnp.ndarray,
smoothing_coef: float = 0.05638943879134889,
gain: float = 0.98,
bias: float = 2.0,
root: float = 2.0,
eps: float = 1e-6,
state: jnp.ndarray | None = None,
conv_width: int = 0,
) -> tuple[jnp.ndarray, jnp.ndarray | None]:
"""Per-Channel Energy Normalization (PCEN).
See https://arxiv.org/abs/1607.05666 for details.
Args:
filterbank_energy: A [..., num_frames, num_frequency_bins] array of
power-domain filterbank energies. If a scalar, we return 0.0 as the
spectral floor value (for padding purposes).
smoothing_coef: The coefficient of the IIR smoothing filter (scalar or for
each bin). Referred to as s in the paper.
gain: The normalization coefficient (scalar or for each bin). Alpha in the
paper.
bias: Constant stabilizer offset for the root compression (scalar or for
each bin). Delta in the paper.
root: Root compression coefficient (scalar or for each bin). The reciprocal
of r in the paper.
eps: Epsilon floor value to prevent division by zero.
state: Optional state produced by a previous call to fixed_pcen. Used in
streaming mode.
conv_width: If non-zero, use a convolutional approximation of the EMA, with
kernel size indicated here. If set to -1, the sequence length will be used
as the kernel size.
Returns:
Filterbank energies with PCEN compression applied (type and shape are
unchanged). Also returns a state tensor to be used in the next call to
fixed_pcen.
"""
if filterbank_energy.ndim < 2:
raise ValueError('Filterbank energy must have rank >= 2.')
for name, arr, max_rank in (
('gain', gain, 1),
('bias', bias, 1),
('root', root, 1),
('smoothing_coef', smoothing_coef, 1),
('eps', eps, 0),
):
if jnp.ndim(arr) > max_rank:
raise ValueError(f'{name} must have rank at most {max_rank}')
if conv_width == 0:
smoothed_energy, filter_state = ema(
filterbank_energy, smoothing_coef, initial_state=state, axis=-2
)
elif len(filterbank_energy.shape) == 3:
smoothed_energy = ema_conv1d(filterbank_energy, smoothing_coef, conv_width)
filter_state = None
else:
raise ValueError(
'Can only apply convolutional EMA to inputs with shape [B, T, D].'
)
inv_root = 1.0 / root
pcen_output = (
filterbank_energy / (eps + smoothed_energy) ** gain + bias
) ** inv_root - bias**inv_root
return pcen_output, filter_state
def log_scale(
x: jnp.ndarray, floor: float, offset: float, scalar: float
) -> jnp.ndarray:
"""Apply log-scaling.
Args:
x: The data to scale.
floor: Clip input values below this value. This avoids taking the logarithm
of negative or very small numbers.
offset: Shift all values by this amount, after clipping. This too avoids
taking the logarithm of negative or very small numbers.
scalar: Scale the output by this value.
Returns:
The log-scaled data.
"""
x = jnp.log(jnp.maximum(x, floor) + offset)
return scalar * x
def random_low_pass_filter(
key: jnp.ndarray,
melspec: jnp.ndarray,
time_axis: int = -2,
channel_axis: int = -1,
min_slope: float = 2.0,
max_slope: float = 8.0,
min_offset: float = 0.0,
max_offset: float = 5.0,
) -> jnp.ndarray:
"""Applies a random low-pass rolloff frequency envelope.
Args:
key: A random key used to sample a random slope and offset.
melspec: A (batch) of mel-spectrograms, assumed to have frequencies on the
last axis.
time_axis: The axis representing time.
channel_axis: The axis representing the different frequencies.
min_slope: The minimum slope of the low-pass filter.
max_slope: The maximum slope of the low-pass filter.
min_offset: The minimum offset of the low-pass filter.
max_offset: The maximum offset of the low-pass filte.r
Returns:
The mel-spectrogram with a random low-pass filter applied, same size as the
input.
"""
shape = list(melspec.shape)
shape[time_axis] = shape[channel_axis] = 1
slope_key, offset_key = random.split(key)
slope = random.uniform(slope_key, shape, minval=min_slope, maxval=max_slope)
offset = random.uniform(
offset_key, shape, minval=min_offset, maxval=max_offset
)
shape = [1] * melspec.ndim
shape[channel_axis] = melspec.shape[channel_axis]
xspace = jnp.linspace(0.0, 1.0, melspec.shape[channel_axis])
xspace = jnp.reshape(xspace, shape)
envelope = 1 - 0.5 * (jnp.tanh(slope * (xspace - 0.5) - offset) + 1)
return melspec * envelope
def apply_mixture_denoising(
melspec: jnp.ndarray, threshold: float
) -> jnp.ndarray:
"""Denoises the melspectrogram using an estimated Gaussian noise distribution.
Forms a noise estimate by a) estimating mean+std, b) removing extreme
values, c) re-estimating mean+std for the noise, and then d) classifying
values in the spectrogram as 'signal' or 'noise' based on likelihood under
the revised estimate. We then apply a mask to return the signal values.
Args:
melspec: input melspectrogram of rank 2 (time, frequency).
threshold: z-score theshold for separating signal from noise. On the first
pass, we use 2 * threshold, and on the second pass we use threshold
directly.
Returns:
The denoised melspectrogram.
"""
x = melspec
feature_mean = jnp.mean(x, axis=0, keepdims=True)
feature_std = jnp.std(x, axis=0, keepdims=True)
is_noise = (x - feature_mean) < 2 * threshold * feature_std
noise_counts = jnp.sum(is_noise.astype(x.dtype), axis=0, keepdims=True)
noise_mean = jnp.sum(x * is_noise, axis=0, keepdims=True) / (noise_counts + 1)
noise_var = jnp.sum(
is_noise * jnp.square(x - noise_mean), axis=0, keepdims=True
)
noise_std = jnp.sqrt(noise_var / (noise_counts + 1))
# Recompute signal/noise separation.
demeaned = x - noise_mean
is_signal = demeaned >= threshold * noise_std
is_signal = is_signal.astype(x.dtype)
is_noise = 1.0 - is_signal
signal_part = is_signal * x
noise_part = is_noise * noise_mean
reconstructed = signal_part + noise_part - noise_mean
return reconstructed
def pad_to_length_if_shorter(audio: jnp.ndarray, target_length: int):
"""Wraps the audio sequence if it's shorter than the target length.
Args:
audio: input audio sequence of shape [num_samples].
target_length: target sequence length.
Returns:
The audio sequence, padded through wrapping (if it's shorter than the target
length).
"""
if audio.shape[0] < target_length:
missing = target_length - audio.shape[0]
pad_left = missing // 2
pad_right = missing - pad_left
audio = jnp.pad(audio, [[pad_left, pad_right]], mode='wrap')
return audio
def slice_peaked_audio(
audio: jnp.ndarray,
sample_rate_hz: int,
interval_length_s: float = 6.0,
max_intervals: int = 5,
) -> jnp.ndarray:
"""Extracts audio intervals from melspec peaks.
Args:
audio: input audio sequence of shape [num_samples].
sample_rate_hz: sample rate of the audio sequence (Hz).
interval_length_s: length each extracted audio interval.
max_intervals: upper-bound on the number of audio intervals to extract.
Returns:
Sequence of extracted audio intervals, each of shape
[sample_rate_hz * interval_length_s].
"""
target_length = int(sample_rate_hz * interval_length_s)
# Wrap audio to the target length if it's shorter than that.
audio = pad_to_length_if_shorter(audio, target_length)
peaks = find_peaks_from_audio(audio, sample_rate_hz, max_intervals)
left_shift = target_length // 2
right_shift = target_length - left_shift
# Ensure that the peak locations are such that
# `audio[peak - left_shift: peak + right_shift]` is a non-truncated slice.
peaks = jnp.clip(peaks, left_shift, audio.shape[0] - right_shift)
# As a result, it's possible that some (start, stop) pairs become identical;
# eliminate duplicates.
start_stop = jnp.unique(
jnp.stack([peaks - left_shift, peaks + right_shift], axis=-1), axis=0
)
return start_stop
def find_peaks_from_audio(
audio: jnp.ndarray,
sample_rate_hz: int,
max_peaks: int,
num_mel_bins: int = 160,
) -> jnp.ndarray:
"""Construct melspec and find peaks.
Args:
audio: input audio sequence of shape [num_samples].
sample_rate_hz: sample rate of the audio sequence (Hz).
max_peaks: upper-bound on the number of peaks to return.
num_mel_bins: The number of mel-spectrogram bins to use.
Returns:
Sequence of scalar indices for the peaks found in the audio sequence.
"""
melspec_rate_hz = 100
frame_length_s = 0.08
nperseg = int(frame_length_s * sample_rate_hz)
nstep = sample_rate_hz // melspec_rate_hz
_, _, spectrogram = jsp.signal.stft(
audio, nperseg=nperseg, noverlap=nperseg - nstep
)
# apply_mixture_denoising/find_peaks_from_melspec expect frequency axis last
spectrogram = jnp.swapaxes(spectrogram, -1, -2)
magnitude_spectrogram = jnp.abs(spectrogram)
# For backwards compatibility, we scale the spectrogram here the same way
# that the TF spectrogram is scaled. If we don't, the values are too small and
# end up being clipped by the default configuration of the logarithmic scaling
magnitude_spectrogram *= nperseg / 2
# Construct mel-spectrogram
num_spectrogram_bins = magnitude_spectrogram.shape[-1]
mel_matrix = signal.linear_to_mel_weight_matrix(
num_mel_bins,
num_spectrogram_bins,
sample_rate_hz,
lower_edge_hertz=60,
upper_edge_hertz=10_000,
)
mel_spectrograms = magnitude_spectrogram @ mel_matrix
melspec = log_scale(mel_spectrograms, floor=1e-2, offset=0.0, scalar=0.1)
melspec = apply_mixture_denoising(melspec, 0.75)
peaks = find_peaks_from_melspec(melspec, melspec_rate_hz)
peak_energies = jnp.sum(melspec, axis=1)[peaks]
t_mel_to_t_au = lambda tm: 1.0 * tm * sample_rate_hz / melspec_rate_hz
peaks = [t_mel_to_t_au(p) for p in peaks]
peak_set = sorted(zip(peak_energies, peaks), reverse=True)
if max_peaks > 0 and len(peaks) > max_peaks:
peak_set = peak_set[:max_peaks]
return jnp.asarray([p[1] for p in peak_set], dtype=jnp.int32)
def find_peaks_from_melspec(melspec: jnp.ndarray, stft_fps: int) -> jnp.ndarray:
"""Locate peaks inside signal of summed spectral magnitudes.
Args:
melspec: input melspectrogram of rank 2 (time, frequency).
stft_fps: Number of summed magnitude bins per second. Calculated from the
original sample of the waveform.
Returns:
A list of filtered peak indices.
"""
summed_spectral_magnitudes = jnp.sum(melspec, axis=1)
threshold = jnp.mean(summed_spectral_magnitudes) * 1.5
min_width = int(round(0.5 * stft_fps))
max_width = int(round(2 * stft_fps))
width_step_size = int(round((max_width - min_width) / 10))
peaks = scipy_signal.find_peaks_cwt(
summed_spectral_magnitudes,
jnp.arange(min_width, max_width, width_step_size),
)
margin_frames = int(round(0.3 * stft_fps))
start_stop = jnp.clip(
jnp.stack([peaks - margin_frames, peaks + margin_frames], axis=-1),
0,
summed_spectral_magnitudes.shape[0],
)
peaks = [
p
for p, (a, b) in zip(peaks, start_stop)
if summed_spectral_magnitudes[a:b].max() >= threshold
]
return jnp.asarray(peaks, dtype=jnp.int32)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to be able to construct Python objects from configurations.
First use `callable_config` to construct a `ConfigDict` as follows:
config.foo = callable_config("my_module.Foo", bar=4)
This will construct a `ConfigDict` that looks as follows:
ConfigDict({
'foo': ConfigDict({
"__constructor": "my_module.Foo",
"__config": ConfigDict({
"bar": 4
})
})
})
This configuration dictionary can be serialized and the keys can even be
overridden on the command line. The objects can be constructed by calling
`parse_config(config, {"my_module": my_module})` which returns the following:
ConfigDict({
'foo': my_module.Foo(bar=4)
})
"""
from typing import Any
from ml_collections import config_dict
_CALLABLE = "__constructor"
_KWARGS = "__config"
_OBJECT = "__object"
def callable_config(
callable_: str, *args: config_dict.ConfigDict, **kwargs: Any
) -> config_dict.ConfigDict:
"""Create a configuration for constructing a Python object.
Args:
callable_: A string that resolves to a Python object to call in order to
construct the configuration value.
*args: Configuration dictionaries containing keyword arguments to pass to
the callable.
**kwargs: The keyword arguments to pass to the callable.
Returns:
A ConfigDict object containing the callable and its arguments. This
dictionary can later be parsed by the `parse_config` function.
"""
kwargs = config_dict.ConfigDict(kwargs)
for arg in args:
kwargs.update(arg)
return config_dict.ConfigDict({_CALLABLE: callable_, _KWARGS: kwargs})
def object_config(object_: str) -> config_dict.ConfigDict:
"""Create a configuration for a Python object.
Args:
object_: A string that resolve to a Python object.
Returns:
A ConfigDict object containing the name of the object. This dictionary can
later be parsed by the `parse_config` function.
"""
return config_dict.ConfigDict({_OBJECT: object_})
def either(object_a: Any, object_b: Any, return_a: bool) -> Any | None:
"""Returns returns object_a if `predicate` is True and object_b otherwise.
While trivial in appearance, this function can be used in conjunction with
`callable_config` to implement control flow with a boolean
`config_dict.FieldReference`:
config.some_attribute = callable_config(
'either',
object_a=callable_config(...),
object_b=callable_config(...),
return_a=config.get_ref('some_boolean_field_reference')
)
Args:
object_a: The first object to (maybe) return.
object_b: The second object to (maybe) return.
return_a: Whether to return object_a (True) or object_b (False).
Returns:
object_a or object_b, depending on `return_a`.
"""
return object_a if return_a else object_b
def get_melspec_defaults(config: config_dict.ConfigDict) -> tuple[Any, Any]:
"""Determines the default melspectrogram kernel size and nftt values.
Args:
config: The base ConfigDict. Expected to contain 'sample_rate_hz' and
'frame_rate_hz' attributes.
Returns:
The default kernel size and nftt values.
Raises:
ValueError, if the default kernel size is determined to be larger than 4096.
If this is the case, the config is expected to define a default nftt value
directly.
"""
melspec_stride = config.get_ref("sample_rate_hz") // config.get_ref(
"frame_rate_hz"
)
# This gives 50% overlap, which is optimal for the Hanning window.
# See Heinz, et al: "Spectrum and spectral density estimation by the
# Discrete Fourier transform (DFT), including a comprehensive list of window
# functions and some new flat-top windows", Section 10.
# https://holometer.fnal.gov/GH_FFT.pdf
# In brief, 50% overlap gives no amplitude distortion, and minimizes the
# overlap correlation. Longer windows average over longer time periods,
# losing signal locality, and are also more expensive to compute.
melspec_kernel_size = 2 * melspec_stride
# nfft is preferably the smallest power of two containing the kernel.
# This yields a no-nonsense FFT, implemented everywhere.
# Note that we can"t use fancy math like ceil(log2(ks)) on field references...
if melspec_kernel_size <= 256:
melspec_nfft = 256
elif 256 < melspec_kernel_size <= 512:
melspec_nfft = 512
elif 512 < melspec_kernel_size <= 1024:
melspec_nfft = 1024
elif 1024 < melspec_kernel_size <= 2048:
melspec_nfft = 2048
elif 2048 < melspec_kernel_size <= 4096:
melspec_nfft = 4096
else:
raise ValueError("Large kernel {kernel_size}; please define nfft.")
return melspec_kernel_size, melspec_nfft
def parse_config(
config: config_dict.ConfigDict, globals_: dict[str, Any]
) -> config_dict.ConfigDict:
"""Parse a configuration.
This handles nested configurations, as long as the values are callables
created using `callable_config`, or if the values are lists or tuples
containing elements created the same way.
Args:
config: A configuration object, potentially containing callables which were
created using `callable_config`.
globals_: The dictionary of globals to use to resolve the callable.
Returns:
The parsed configuration dictionary.
"""
def _parse_value(value: config_dict.ConfigDict) -> Any:
if isinstance(value, dict):
value = config_dict.ConfigDict(value)
if isinstance(value, config_dict.ConfigDict):
if set(value.keys()) == {_CALLABLE, _KWARGS}:
return _parse_value(
eval(value[_CALLABLE], globals_)( # pylint: disable=eval-used
**parse_config(value[_KWARGS], globals_)
)
)
elif set(value.keys()) == {_OBJECT}:
return _parse_value(eval(value[_OBJECT], globals_)) # pylint: disable=eval-used
else:
return parse_config(value, globals_)
elif isinstance(value, config_dict.FieldReference):
return value.get()
else:
return value
with config.ignore_type():
for key, value in config.items():
# We purposefully only attempt to parse values inside list and tuple
# instances (and not e.g. namedtuple instances, since optax defines
# GradientTransformation as a namedtuple and we don't want to parse its
# values), which precludes using isinstance(value, (list, tuple)).
if type(value) in (list, tuple):
config[key] = type(value)(_parse_value(v) for v in value)
else:
config[key] = _parse_value(value)
return config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Path utilities.
General utilities to help with handling paths.
"""
import os
from typing import BinaryIO, TextIO
from etils import epath
def get_absolute_path(relative_path: os.PathLike[str] | str) -> epath.Path:
"""Returns the absolute epath.Path associated with the relative_path.
Args:
relative_path: The relative path (w.r.t. root) to the resource.
Returns:
The absolute path to the resource.
"""
file_path = epath.Path(__file__).parent / relative_path
return file_path
def open_file(relative_path: os.PathLike[str] | str, mode) -> TextIO | BinaryIO:
return open(get_absolute_path(relative_path), mode)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Entry point for project scripts.
This binary provides a common entry point for all project scripts. In order to
be compatible, a project must provide a `run` callable which accepts the
arguments `mode` (e.g., `train`, `eval`, `finetune`), a `config` in the form of
a `ConfigDict`, and a `workdir` where temporary files can be stored. Finally,
the `tf_data_service_address` argument is a string which is empty or contains
the address of the tf.data service dispatcher.
"""
from typing import Protocol, Sequence
from absl import app
from absl import flags
from absl import logging
from chirp import config_utils
from chirp.configs import config_globals
from chirp.train import classifier
from chirp.train import hubert
from chirp.train import mae
from chirp.train import separator
from ml_collections import config_dict
from ml_collections.config_flags import config_flags
import tensorflow as tf
from xmanager import xm # pylint: disable=unused-import
class Run(Protocol):
"""Protocol for entry points of project scripts.
These scripts should aim to include project-specific arguments into the config
argument as much as possible, since updating this interface would require
changing every project that uses this entry point.
"""
def __call__(
self,
mode: str,
config: config_dict.ConfigDict,
workdir: str,
tf_data_service_address: str,
):
...
TARGETS: dict[str, Run] = {
"classifier": classifier.run,
"mae": mae.run,
"hubert": hubert.run,
"separator": separator.run,
}
_CONFIG = config_flags.DEFINE_config_file("config")
_WORKDIR = flags.DEFINE_string(
"workdir", None, "Work unit checkpointing directory."
)
_TARGET = flags.DEFINE_enum(
"target", None, TARGETS.keys(), "The module to run."
)
_MODE = flags.DEFINE_string("mode", None, "The mode to run.")
_TF_DATA_SERVICE_ADDRESS = flags.DEFINE_string(
"tf_data_service_address",
"",
"The dispatcher's address.",
allow_override_cpp=True,
)
flags.mark_flags_as_required(["config", "workdir", "target", "mode"])
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
logging.info(_CONFIG.value)
# We assume that scripts use JAX, so here we prevent TensorFlow from reserving
# all the GPU memory (which leaves nothing for JAX to use).
tf.config.experimental.set_visible_devices([], "GPU")
config = config_utils.parse_config(
_CONFIG.value, config_globals.get_globals()
)
TARGETS[_TARGET.value](
_MODE.value,
config,
_WORKDIR.value,
_TF_DATA_SERVICE_ADDRESS.value,
)
if __name__ == "__main__":
app.run(main)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration and init library for Search Bootstrap projects."""
import dataclasses
from typing import Sequence
from chirp.inference import embed_lib
from chirp.inference import interface
from chirp.inference import models
from chirp.inference import tf_examples
from etils import epath
from ml_collections import config_dict
import tensorflow as tf
@dataclasses.dataclass
class BootstrapState:
"""Union of data and models useful to go from a few examples to a detector."""
config: 'BootstrapConfig'
embedding_model: interface.EmbeddingModel | None = None
embeddings_dataset: tf.data.Dataset | None = None
source_map: dict[str, embed_lib.SourceInfo] | None = None
def __post_init__(self):
if self.embedding_model is None:
self.embedding_model = models.model_class_map()[
self.config.model_key
].from_config(self.config.model_config)
self.create_embeddings_dataset()
self.create_source_map()
def create_embeddings_dataset(self):
"""Create a TF Dataset of the embeddings."""
if self.embeddings_dataset:
return self.embeddings_dataset
ds = tf_examples.create_embeddings_dataset(
self.config.embeddings_path, 'embeddings-*'
)
self.embeddings_dataset = ds
return ds
def create_source_map(self):
"""Map filenames to full filepaths."""
if self.config.audio_globs is None:
raise ValueError('Cannot create source map with no audio globs.')
source_infos = embed_lib.create_source_infos(self.config.audio_globs, 1, -1)
self.source_map = {}
for s in source_infos:
file_id = epath.Path(
*epath.Path(s.filepath).parts[-(self.config.file_id_depth + 1) :]
).as_posix()
dupe = self.source_map.get(file_id)
if dupe:
raise ValueError(
'All base filenames must be unique. '
f'Filename {file_id} appears in both {s.filepath} and {dupe}.'
)
self.source_map[file_id] = s.filepath
@dataclasses.dataclass
class BootstrapConfig:
"""Configuration for Search Bootstrap project."""
# Embeddings dataset info.
embeddings_path: str
# Annotations info.
annotated_path: str
# The following are populated automatically from the embedding config.
embedding_hop_size_s: float | None = None
file_id_depth: int | None = None
audio_globs: Sequence[str] | None = None
model_key: str | None = None
model_config: config_dict.ConfigDict | None = None
@classmethod
def load_from_embedding_config(
cls, embeddings_path: str, annotated_path: str
):
"""Instantiate from a configuration written alongside embeddings."""
embedding_config = embed_lib.load_embedding_config(embeddings_path)
embed_fn_config = embedding_config.embed_fn_config
# Extract the embedding model config from the embedding_config.
if embed_fn_config.model_key == 'separate_embed_model':
# If a separation model was applied, get the embedding model config only.
model_key = 'taxonomy_model_tf'
model_config = embed_fn_config.model_config.taxonomy_model_tf_config
else:
model_key = embed_fn_config.model_key
model_config = embed_fn_config.model_config
return BootstrapConfig(
embeddings_path=embeddings_path,
annotated_path=annotated_path,
model_key=model_key,
model_config=model_config,
embedding_hop_size_s=model_config.hop_size_s,
file_id_depth=embed_fn_config.file_id_depth,
audio_globs=embedding_config.source_file_patterns,
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for displaying audio and results in Colab/Jupyter."""
import functools
from typing import Sequence
from chirp import audio_utils
from chirp.models import frontend
from chirp.projects.bootstrap import search
import IPython
from IPython.display import display as ipy_display
import ipywidgets
from librosa import display as librosa_display
import matplotlib.pyplot as plt
import numpy as np
@functools.cache
def get_melspec_layer(sample_rate: int, root=4.0):
"""Creates a melspec layer for easy visualization."""
# Usage: melspec_layer.apply({}, audio)
stride = sample_rate // 100
melspec_layer = frontend.MelSpectrogram(
96,
stride,
4 * stride,
sample_rate,
(60.0, sample_rate / 2.0),
scaling_config=frontend.PCENScalingConfig(root=root, bias=0.0),
)
return melspec_layer
def plot_melspec(
melspec: np.ndarray,
newfig: bool = False,
sample_rate: int = 32000,
frame_rate: int = 100,
**specshow_kwargs,
):
"""Plot a melspectrogram."""
if newfig:
plt.figure(figsize=(12, 5))
librosa_display.specshow(
melspec.T,
sr=sample_rate,
y_axis='mel',
x_axis='time',
hop_length=sample_rate // frame_rate,
cmap='Greys',
**specshow_kwargs,
)
def plot_audio_melspec(
audio: np.ndarray,
sample_rate: int,
newfig: bool = False,
display_audio=True,
):
"""Plot a melspectrogram from audio."""
melspec_layer = get_melspec_layer(sample_rate)
melspec = melspec_layer.apply({}, audio[np.newaxis, :])[0]
plot_melspec(melspec, newfig=newfig, sample_rate=sample_rate, frame_rate=100)
plt.show()
if display_audio:
ipy_display(IPython.display.Audio(audio, rate=sample_rate))
def display_search_results(
results: search.TopKSearchResults,
embedding_sample_rate: int,
source_map: dict[str, str],
window_s: float = 5.0,
checkbox_labels: Sequence[str] = (),
max_workers=5,
):
"""Display search results, and add audio and annotation info to results."""
# Parallel load the audio windows.
filepaths = [source_map[r.filename] for r in results]
offsets = [r.timestamp_offset for r in results]
for rank, (r, result_audio_window) in enumerate(
zip(
results,
audio_utils.multi_load_audio_window(
filepaths, offsets, embedding_sample_rate, window_s, max_workers
),
)
):
plot_audio_melspec(result_audio_window, embedding_sample_rate)
plt.show()
print(f'rank : {rank}')
print(f'source file : {r.filename}')
offset_s = r.timestamp_offset
print(f'offset_s : {offset_s:.2f}')
print(f'score : {(r.score):.2f}')
label_widgets = []
def button_callback(x):
x.value = not x.value
if x.value:
x.button_style = 'success'
else:
x.button_style = ''
for lbl in checkbox_labels:
check = ipywidgets.Button(
description=lbl,
disabled=False,
button_style='',
)
check.value = False
check.on_click(button_callback)
label_widgets.append(check)
ipy_display(check)
# Attach audio and widgets to the SearchResult.
r.audio = result_audio_window
r.label_widgets = label_widgets
print('-' * 80)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for searching an embeddings dataset."""
import collections
import dataclasses
import functools
from typing import Any, Callable, List, Sequence
from chirp.inference import tf_examples
from etils import epath
import numpy as np
from scipy.io import wavfile
import tensorflow as tf
import tqdm
@dataclasses.dataclass
class SearchResult:
"""Container for a single search result."""
# Embedding vector.
embedding: np.ndarray
# Raw score for this result.
score: float
# Score used for sorting the result.
sort_score: float
# Source file contianing corresponding audio.
filename: str
# Time offset for audio.
timestamp_offset: int
# The following are populated as needed.
audio: np.ndarray | None = None
label_widgets: Sequence[Any] = ()
def __hash__(self):
"""Return an identifier for this result."""
return hash((self.filename, self.timestamp_offset))
@dataclasses.dataclass
class TopKSearchResults:
"""Top-K search results."""
search_results: List[SearchResult]
top_k: int
min_score: float = -1.0
_min_score_idx: int = -1
def __iter__(self):
for r in self.search_results:
yield r
def update(self, search_result: SearchResult) -> None:
"""Update Results with the new result."""
if len(self.search_results) < self.top_k:
# Add the result, regardless of score, until we have k results.
pass
elif search_result.sort_score < self.min_score:
# Early return to save compute.
return
elif len(self.search_results) >= self.top_k:
self.search_results.pop(self._min_score_idx)
self.search_results.append(search_result)
self._update_deseridata()
def will_filter(self, score: float) -> bool:
"""Check whether a score is relevant."""
if len(self.search_results) < self.top_k:
# Add the result, regardless of score, until we have k results.
return False
return score < self.min_score
def _update_deseridata(self):
self._min_score_idx = np.argmin([r.sort_score for r in self.search_results])
self.min_score = self.search_results[self._min_score_idx].sort_score
def sort(self):
"""Sort the results."""
scores = np.array([r.sort_score for r in self.search_results])
idxs = np.argsort(-scores)
self.search_results = [self.search_results[idx] for idx in idxs]
self._update_deseridata()
def write_labeled_data(self, labeled_data_path: str, sample_rate: int):
"""Write labeled results to the labeled data collection."""
labeled_data_path = epath.Path(labeled_data_path)
counts = collections.defaultdict(int)
for r in self.search_results:
labels = [ch.description for ch in r.label_widgets if ch.value]
if not labels:
continue
extension = epath.Path(r.filename).suffix
filename = epath.Path(r.filename).name[: -len(extension)]
output_filename = f'{filename}___{r.timestamp_offset}{extension}'
for label in labels:
output_path = labeled_data_path / label
output_path.mkdir(parents=True, exist_ok=True)
output_filepath = epath.Path(output_path / output_filename)
if output_filepath.exists():
counts[f'{label} exists'] += 1
continue
else:
counts[label] += 1
with output_filepath.open('wb') as f:
wavfile.write(f, sample_rate, np.float32(r.audio))
for label, count in counts.items():
print(f'Wrote {count} examples for label {label}')
@dataclasses.dataclass
class DistanceStats:
min_dist: float
max_dist: float
mean_dist: float
std_dist: float
num_windows: int
def _euclidean_score(ex, query_embedding_batch):
"""Update example with Euclidean distance scores."""
# Expand queries from shape [B, D] to shape [B, 1, 1, D]
queries = query_embedding_batch[:, np.newaxis, np.newaxis, :]
# Expand embedding from shape [T, C, D] to [1, T, C, D].
embeddings = ex[tf_examples.EMBEDDING][tf.newaxis, :, :, :]
dists = (embeddings - queries) ** 2
# Take min distance over channels and queries, leaving only time.
dists = tf.reduce_sum(dists, axis=-1) # Reduce over vector depth
dists = tf.math.sqrt(dists)
dists = tf.reduce_min(dists, axis=-1) # Reduce over channels
dists = tf.reduce_min(dists, axis=0) # Reduce over query batch
ex['scores'] = dists
return ex
def _mip_score(ex, query_embedding_batch):
"""Update example with MIP distance scores."""
# embedding shape is [T, C, D].
# queries have shape [B, D]
keys = ex[tf_examples.EMBEDDING]
scores = tf.matmul(keys, query_embedding_batch, transpose_b=True)
# Product has shape [T, C, B]
# Take max score over channels and queries, leaving only time.
scores = tf.reduce_max(scores, axis=-1) # Reduce over query batch
scores = tf.reduce_max(scores, axis=-1) # Reduce over channels
ex['scores'] = scores
return ex
def _cosine_score(ex, query_embedding_batch):
"""Update example with MIP distance scores."""
# embedding shape is [T, C, D].
# queries have shape [B, D]
keys = ex[tf_examples.EMBEDDING]
keys_norm = tf.norm(keys, axis=-1, keepdims=True)
query_norm = tf.norm(query_embedding_batch, axis=-1, keepdims=True)
keys = keys / keys_norm
query = query_embedding_batch / query_norm
scores = tf.matmul(keys, query, transpose_b=True)
# Product has shape [T, C, B]
# Take max score over channels and queries, leaving only time.
scores = tf.reduce_max(scores, axis=-1) # Reduce over query batch
scores = tf.reduce_max(scores, axis=-1) # Reduce over channels
ex['scores'] = scores
return ex
def _update_sort_scores(ex, invert: bool, target_score: float | None):
"""Update example with sort scores."""
if target_score is not None:
# We need large values to be good, so we use the inverse distance to the
# target score as our sorting score.
ex['sort_scores'] = 1.0 / (tf.abs(ex['scores'] - target_score) + 1e-12)
elif invert:
ex['sort_scores'] = -ex['scores']
else:
ex['sort_scores'] = ex['scores']
# Precompute the max score in the example, allowing us to save
# time by skipping irrelevant examples.
ex['max_sort_score'] = tf.reduce_max(ex['sort_scores'])
return ex
def _random_sort_scores(ex):
ex['sort_scores'] = tf.random.uniform(
[tf.shape(ex[tf_examples.EMBEDDING])[0]]
)
ex['max_sort_score'] = tf.reduce_max(ex['sort_scores'])
return ex
def search_embeddings_parallel(
embeddings_dataset: tf.data.Dataset,
query_embedding_batch: np.ndarray | None,
hop_size_s: int,
top_k: int = 10,
target_score: float | None = None,
score_fn: Callable[[Any, np.ndarray], Any] | str = 'euclidean', # pylint: disable=g-bare-generic
random_sample: bool = False,
invert_sort_score: bool = False,
):
"""Run a brute-force search.
Uses tf dataset manipulation to parallelize.
Args:
embeddings_dataset: tf.data.Dataset over embeddings
query_embedding_batch: Batch of query embeddings with shape [Batch, Depth],
or None if the metric does not require queries.
hop_size_s: Embedding hop size in seconds.
top_k: Number of results desired.
target_score: Get results closest to the target_score.
score_fn: Scoring function to use.
random_sample: If True, obtain a uniformly random sample of data.
invert_sort_score: Set to True if low scores are preferable to high scores.
Ignored if a string score_fn is given.
Returns:
TopKSearchResults and distance statistics reduced per-file.
"""
# Convert string to score_fn.
if score_fn == 'euclidean':
score_fn = _euclidean_score
invert_sort_score = True
elif score_fn == 'mip':
score_fn = _mip_score
invert_sort_score = False
elif score_fn == 'cosine':
score_fn = _cosine_score
invert_sort_score = False
elif isinstance(score_fn, str):
raise ValueError(f'Unknown score_fn: {score_fn}')
if query_embedding_batch is None:
pass
elif len(query_embedding_batch.shape) == 1:
query_embedding_batch = query_embedding_batch[np.newaxis, :]
elif len(query_embedding_batch.shape) > 2:
raise ValueError(
'query_embedding_batch should be rank 1 or 2, but has shape '
f'{query_embedding_batch.shape}'
)
score_fn = functools.partial(
score_fn, query_embedding_batch=query_embedding_batch
)
if random_sample:
sort_scores_fn = _random_sort_scores
else:
sort_scores_fn = functools.partial(
_update_sort_scores, target_score=target_score, invert=invert_sort_score
)
ex_map_fn = lambda ex: sort_scores_fn(score_fn(ex))
embeddings_dataset = (
embeddings_dataset.shuffle(1024)
.map(
ex_map_fn,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=False,
)
.prefetch(1024)
)
results = TopKSearchResults([], top_k=top_k)
all_distances = []
try:
for ex in tqdm.tqdm(embeddings_dataset.as_numpy_iterator()):
all_distances.append(ex['scores'].reshape([-1]))
if results.will_filter(ex['max_sort_score']):
continue
for t in range(ex[tf_examples.EMBEDDING].shape[0]):
offset_s = t * hop_size_s + ex[tf_examples.TIMESTAMP_S]
result = SearchResult(
ex[tf_examples.EMBEDDING][t, :, :],
ex['scores'][t],
ex['sort_scores'][t],
ex['filename'].decode(),
offset_s,
)
results.update(result)
except KeyboardInterrupt:
pass
all_distances = np.concatenate(all_distances)
results.sort()
return results, all_distances
def classifer_search_embeddings_parallel(
embeddings_classifier: tf.keras.Model,
target_index: int,
**kwargs,
):
"""Get examples for a target class with logit near the target logit.
Args:
embeddings_classifier: Keras model turning embeddings into logits.
target_index: Choice of class index.
**kwargs: Arguments passed on to search_embeddings_parallel.
Returns:
TopKSearchResults and all logits.
"""
def classify_batch(batch, query_embedding_batch):
del query_embedding_batch
emb = batch[tf_examples.EMBEDDING]
emb_shape = tf.shape(emb)
flat_emb = tf.reshape(emb, [-1, emb_shape[-1]])
logits = embeddings_classifier(flat_emb)
logits = tf.reshape(
logits, [emb_shape[0], emb_shape[1], tf.shape(logits)[-1]]
)
# Restrict to target class.
logits = logits[..., target_index]
# Take the maximum logit over channels.
logits = tf.reduce_max(logits, axis=-1)
batch['scores'] = logits
return batch
return search_embeddings_parallel(
score_fn=classify_batch, query_embedding_batch=None, **kwargs
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the bootstrap search component."""
from chirp.projects.bootstrap import search
import numpy as np
from absl.testing import absltest
from absl.testing import parameterized
class SearchTest(parameterized.TestCase):
def test_top_k_search_results(self):
np.random.seed(42)
random_embeddings = np.random.normal(size=[100, 5])
query = np.random.normal(size=[1, 5])
dists = np.sum((random_embeddings - query) ** 2, axis=1)
fake_results = []
for i in range(100):
r = search.SearchResult(
random_embeddings[i],
score=dists[i],
# Sort with negative distance so that the high score is best.
sort_score=-dists[i],
filename=f'result_{i:03d}',
timestamp_offset=i,
)
fake_results.append(r)
results = search.TopKSearchResults([], top_k=10)
for i, r in enumerate(fake_results):
results.update(r)
self.assertLen(results.search_results, min([i + 1, 10]))
# Get the 10th largest value amongst the dists seen so far.
true_min_neg_dist = -np.max(sorted(dists[: i + 1])[:10])
arg_min_dist = np.argmin([r.sort_score for r in results])
self.assertEqual(results.min_score, true_min_neg_dist)
self.assertEqual(
results.search_results[arg_min_dist].sort_score, results.min_score
)
self.assertLen(results.search_results, results.top_k)
results.sort()
for i in range(1, 10):
self.assertGreater(
results.search_results[i - 1].sort_score,
results.search_results[i].sort_score,
)
@parameterized.product(
metric_name=('euclidean', 'cosine', 'mip'),
)
def test_metric_apis(self, metric_name):
example = {
'embedding': np.random.normal(size=[12, 5, 128]),
}
query = np.random.normal(size=[3, 128])
if metric_name == 'euclidean':
got = search._euclidean_score(example, query)
elif metric_name == 'cosine':
got = search._cosine_score(example, query)
elif metric_name == 'mip':
got = search._mip_score(example, query)
else:
raise ValueError(f'Unknown metric: {metric_name}')
self.assertIn('scores', got)
self.assertSequenceEqual(got['scores'].shape, (12,))
# Embeddings should be unchanged.
self.assertEqual(np.max(np.abs(got['embedding'] - example['embedding'])), 0)
def test_update_sort_scores(self):
example = {
'embedding': np.random.normal(size=[12, 5, 128]),
'scores': np.random.normal(size=[12]),
}
got = search._update_sort_scores(example, invert=False, target_score=None)
self.assertIn('sort_scores', got)
self.assertSequenceEqual(got['sort_scores'].shape, got['scores'].shape)
self.assertIn('max_sort_score', got)
self.assertEqual(np.max(example['scores']), got['max_sort_score'])
# Embeddings should be unchanged.
self.assertEqual(np.max(np.abs(got['embedding'] - example['embedding'])), 0)
got = search._update_sort_scores(example, invert=True, target_score=None)
self.assertIn('sort_scores', got)
self.assertSequenceEqual(got['sort_scores'].shape, got['scores'].shape)
self.assertIn('max_sort_score', got)
self.assertEqual(-np.min(example['scores']), got['max_sort_score'])
# Embeddings should be unchanged.
self.assertEqual(np.max(np.abs(got['embedding'] - example['embedding'])), 0)
got = search._update_sort_scores(example, invert=False, target_score=1.0)
self.assertIn('sort_scores', got)
self.assertSequenceEqual(got['sort_scores'].shape, got['scores'].shape)
self.assertIn('max_sort_score', got)
expect_max_score = np.max(1.0 / (np.abs(example['scores'] - 1.0) + 1e-12))
self.assertEqual(got['max_sort_score'], expect_max_score)
# Embeddings should be unchanged.
self.assertEqual(np.max(np.abs(got['embedding'] - example['embedding'])), 0)
if __name__ == '__main__':
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to prepare models for SFDA methods."""
import enum
from typing import Any, Callable
from absl import logging
import chex
from chirp import config_utils
from chirp.configs import config_globals
from chirp.google import config_utils as google_config_utils
from chirp.models import metrics
from chirp.projects.sfda import data_utils
from chirp.projects.sfda import models
from chirp.projects.sfda.models import image_model
from chirp.projects.sfda.models import taxonomy_model
from chirp.taxonomy import class_utils
from chirp.train import classifier
from clu import metrics as clu_metrics
import flax
from flax import traverse_util
from flax.core import FrozenDict
from flax.core import scope
import flax.linen as nn
import jax
from jax import tree_util
import jax.numpy as jnp
from ml_collections import config_dict
import optax
# Projected gradient descent utilities
def mask_by_name(name, pytree):
"""Create a mask which is only true for leaves with the given name."""
flat_tree = traverse_util.flatten_dict(pytree)
mask = {k: k[-1] == name for k in flat_tree}
return traverse_util.unflatten_dict(mask)
def project(min_value: float, max_value: float) -> optax.GradientTransformation:
"""Optax gradient transformation that projects values within a range."""
def clip_value(updates, params):
return tree_util.tree_map(
lambda p, u: jnp.clip(p + u, min_value, max_value) - p, params, updates
)
return optax.stateless(clip_value)
@flax.struct.dataclass
class CMAP(clu_metrics.Metric):
"""Class Mean Average Precision metric.
Accumulates logits and labels to allow computing MAP per-class across the
full dataset. See the definition here:
https://www.imageclef.org/BirdCLEF2019
Caution: This implementation does not work in jit'ed functions, because
concatenation of scores and labels has non-static shape. Workarounds for this
exist, but are very ugly. Alternatively, a streaming approximation is
possible, similar to the Keras implementation of AUC.
Thus, it is recommended to compute CMAP metrics by accumulating scores
and labels outside of the jit'ed loop.
Attributes:
scores: An array of logits or scores, with shape [batch, num_classes].
labels: Array of ground-truth labels with shape [batch, num_classes].
"""
scores: jnp.ndarray | None = None
labels: jnp.ndarray | None = None
@classmethod
def empty(cls) -> "CMAP":
return cls(scores=None, labels=None)
@classmethod
def from_model_output(
cls, values: tuple[jnp.ndarray, jnp.ndarray], **_
) -> clu_metrics.Metric:
scores, labels = values
return cls(scores=scores, labels=labels)
def merge(self, other):
if self.scores is None:
return other
if other.scores is None:
return self
if other.scores.ndim not in [2, 3]:
raise ValueError(
"Expecting the scores to be in one of the following"
"formats: [n_devices, batch_size, n_classes] or"
"[batch_size, n_classes]. Current shape is"
f"{self.scores.shape}"
)
return type(self)(
scores=jnp.concatenate((self.scores, other.scores), axis=-2),
labels=jnp.concatenate((self.labels, other.labels), axis=-2),
)
def compute(self, class_wise=False, sample_threshold: int = 0) -> Any:
"""Compute cmap only using classes that have > sample_threshold samples."""
# Compute average precision over the batch axis.
class_av_prec = metrics.average_precision(self.scores.T, self.labels.T)
class_counts = jnp.sum(self.labels, axis=0, keepdims=True)
# Mask classes with no labels to avoid inflating the CMAP.
class_av_prec *= class_counts > sample_threshold
if class_wise:
return jnp.squeeze(class_av_prec)
return jnp.sum(class_av_prec) / jnp.sum(class_counts > sample_threshold)
def make_cmap_metrics_dict(label_names):
"""Create a dict of empty cmap_metrics."""
return {label: CMAP.empty() for label in label_names}
def update_cmap_metrics_dict(cmap_metrics, model_outputs, batch):
"""Update a dict of cmap_metrics from model_outputs and a batch."""
for label_name in cmap_metrics:
cmap_metrics[label_name] = cmap_metrics[label_name].merge(
CMAP(getattr(model_outputs, label_name), batch[label_name])
)
return cmap_metrics
class TrainableParams(enum.Enum):
"""Defines which set of trainable parameters will be adapted.
Attributes:
ALL: All parameters will be adapted.
BN: Only BatchNorm scale and bias trainable parameters will be adapted.
Whether the population statistics will be adapted or not is orthogonal,
and controlled by the 'update_bn_statistics' option, in each method's
config file.
"""
ALL = "all"
BN = "batch_norm"
class LearningRateDecay(enum.Enum):
"""Used to specify the learning rate decay schedule."""
COSINE = "cosine"
NRC = "nrc"
NONE = "none"
def mask_parameters(
params: flax.core.scope.VariableDict,
strategy: TrainableParams,
model: image_model.ImageModel | taxonomy_model.TaxonomyModel,
):
"""Creates the mask of parameters to which zero_grad() will be applied.
Args:
params: The Pytree representing all of the model's parameters.
strategy: The strategy to use for masking.
model: The model considered. Used to determine whether some parameter
belongs a BatchNorm layer or not.
Returns:
unflattened_mask: A mask with the same Tree structure as Pytree, but
with boolean leaves indicating whether some parameter should be masked or
not.
"""
flat_tree = flax.traverse_util.flatten_dict(params)
if strategy == TrainableParams.BN:
mask = {k: not model.is_bn_parameter(k) for k in flat_tree}
elif strategy == TrainableParams.ALL:
mask = {k: False for k in flat_tree}
else:
raise NotImplementedError(f"Strategy {strategy} is not supported yet.")
frozen_parameters = [p for p, masked in mask.items() if masked]
trainable_parameters = [p for p, masked in mask.items() if not masked]
logging.info(
"The following parameters will be kept frozen during adaptation: %s",
frozen_parameters,
)
logging.info(
"The following parameters will be trained during adaptation: %s",
trainable_parameters,
)
return flax.traverse_util.unflatten_dict(mask)
@flax.struct.dataclass
class ModelBundle:
"""Model and optimizer definition.
Attributes:
model: The model used for adaptation.
optimizer: The optimizer used for adaptation.
"""
model: nn.Module
optimizer: optax.GradientTransformation | None
def identity_rename(params, *unused_args):
del unused_args
return params
def prepare_learning_rates(optimizer_config, total_steps):
"""Prepare the learning rate(s)."""
mult_lr_base = optimizer_config.mult_learning_rate_resnet_base
learning_rate, learning_rate_base_resnet, learning_rate_top = None, None, None
if optimizer_config.learning_rate_decay == LearningRateDecay.COSINE:
if mult_lr_base != 1:
learning_rate_base_resnet = optax.cosine_decay_schedule(
init_value=optimizer_config.learning_rate * mult_lr_base,
decay_steps=total_steps,
)
learning_rate_top = optax.cosine_decay_schedule(
init_value=optimizer_config.learning_rate, decay_steps=total_steps
)
else:
learning_rate = optax.cosine_decay_schedule(
optimizer_config.learning_rate, decay_steps=total_steps
)
elif optimizer_config.learning_rate_decay == LearningRateDecay.NRC:
if mult_lr_base != 1:
learning_rate_base_resnet = nrc_schedule(
init_value=optimizer_config.learning_rate * mult_lr_base,
power=0.75,
transition_steps=total_steps,
)
learning_rate_top = nrc_schedule(
init_value=optimizer_config.learning_rate,
power=0.75,
transition_steps=total_steps,
)
else:
learning_rate = nrc_schedule(
init_value=optimizer_config.learning_rate,
power=0.75,
transition_steps=total_steps,
)
elif optimizer_config.learning_rate_decay == LearningRateDecay.NONE:
if mult_lr_base != 1:
learning_rate_base_resnet = optimizer_config.learning_rate * mult_lr_base
learning_rate_top = optimizer_config.learning_rate
else:
learning_rate = optimizer_config.learning_rate
else:
raise NotImplementedError(
f"Decay schedule {optimizer_config.learning_rate_decay} is not "
"supported yet."
)
return learning_rate, learning_rate_base_resnet, learning_rate_top
def prepare_optimizer(optimizer_config, total_steps, params):
"""Prepare the optimizer."""
mult_lr_base = optimizer_config.mult_learning_rate_resnet_base
(learning_rate, learning_rate_base_resnet, learning_rate_top) = (
prepare_learning_rates(optimizer_config, total_steps)
)
opt = getattr(optax, optimizer_config.optimizer)
def get_opt_kwarg(key, default):
if key in optimizer_config.opt_kwargs:
return optimizer_config.opt_kwargs[key]
else:
return default
opt_kwargs = {}
if optimizer_config.optimizer == "adam":
opt_kwargs["b1"] = get_opt_kwarg("b1", 0.9)
opt_kwargs["b2"] = get_opt_kwarg("b2", 0.999)
opt_kwargs["eps"] = get_opt_kwarg("eps", 1e-08)
opt_kwargs["eps_root"] = get_opt_kwarg("eps_root", 0.0)
opt_kwargs["mu_dtype"] = get_opt_kwarg("mu_dtype", None)
elif optimizer_config.optimizer == "sgd":
opt_kwargs["momentum"] = get_opt_kwarg("momentum", None)
opt_kwargs["nesterov"] = get_opt_kwarg("nesterov", False)
opt_kwargs["accumulator_dtype"] = get_opt_kwarg("accumulator_dtype", None)
else:
raise NotImplementedError(
f"Optimizer {optimizer_config.optimizer} is not supported."
)
logging.info(
"Using optimizer %s with the following arguments: %s", opt, opt_kwargs
)
if mult_lr_base == 1:
# This is the simple case: use only one optimizer for all parameters.
rename_params = identity_rename
inverse_rename_params = identity_rename
optimizer = opt(learning_rate=learning_rate, **opt_kwargs)
else:
# Use different optimizers for base resnet than for bottleneck/classifier
# in the nrc_resnet architecture.
optimizer_base_resnet = opt(
learning_rate=learning_rate_base_resnet, **opt_kwargs
)
optimizer_top = opt(learning_rate=learning_rate_top, **opt_kwargs)
label_fn = map_nested_fn(lambda k, _: k)
def rename_params(params, renamed_params, prefix):
"""Rename the keys of the `params` dictionary."""
renamed_params = {}
for k, v in params.items():
if not isinstance(v, dict) and not isinstance(v, FrozenDict):
renamed_params[prefix + k] = v
else:
renamed_params[prefix + k] = rename_params(
v, renamed_params, prefix + "{}/".format(k)
)
return renamed_params
def inverse_rename_params(renamed_params):
"""Reverse the renaming of the parameter keys."""
params = {}
for k, v in renamed_params.items():
if not isinstance(v, dict) and not isinstance(v, FrozenDict):
# Remove prefix
if k.rfind("/") == -1:
params[k] = v
else:
k_base = k[k.rfind("/") + 1 :]
params[k_base] = v
else:
if k.rfind("/") == -1:
k_base = k
else:
k_base = k[k.rfind("/") + 1 :]
params[k_base] = inverse_rename_params(v)
return params
renamed_params = rename_params(params, {}, "")
def get_all_leaves(params):
leaves = []
if not isinstance(params, dict):
leaves.append(params)
else:
for v in params.values():
leaves.extend(get_all_leaves(v))
return leaves
leaves = get_all_leaves(label_fn(renamed_params))
params_to_opt = {}
for leaf in leaves:
if (
"BottleneckResNetBlock" in leaf
or "conv_init" in leaf
or "bn_init" in leaf
):
params_to_opt[leaf] = optimizer_base_resnet
else:
params_to_opt[leaf] = optimizer_top
optimizer = optax.multi_transform(params_to_opt, label_fn(renamed_params))
return optimizer, rename_params, inverse_rename_params
def nrc_schedule(
init_value: chex.Scalar, power: chex.Scalar, transition_steps: chex.Scalar
) -> optax.Schedule:
"""Constructs a schedule identical to that of NRC.
Args:
init_value: initial value for the scalar to be annealed.
power: the power of the polynomial used to transition from init to end.
transition_steps: number of steps over which annealing takes place.
Returns:
schedule: A function that maps step counts to values.
"""
def schedule(count):
count = jnp.clip(count, 0, transition_steps)
frac = count / transition_steps
return init_value / ((1 + 10 * frac) ** power)
return schedule
def map_nested_fn(fn):
"""Recursively apply `fn` to the key-value pairs of a nested dict."""
def map_fn(nested_dict):
return {
k: (
map_fn(v)
if isinstance(v, dict) or isinstance(v, FrozenDict)
else fn(k, v)
)
for k, v in nested_dict.items()
}
return map_fn
def prepare_audio_model(
model_config: config_dict.ConfigDict,
optimizer_config: config_dict.ConfigDict | None,
pretrained: bool,
total_steps: int,
rng_seed: int,
input_shape: tuple[int, ...],
target_class_list: str,
) -> tuple[
ModelBundle,
scope.VariableDict,
scope.FrozenVariableDict,
scope.FrozenVariableDict | None,
Callable[[Any, Any, str], Any],
Callable[[Any], Any],
]:
"""Loads the taxonomic classifier's and optimizer's params and states.
Args:
model_config: The model configuration, including the definitions of the
different parts of the architecture.
optimizer_config: The optimizer configuration, including the name of the
optimizer, the learning rate etc. If set to None, the returned ModelBundle
will contain None in place of the optimizer, and the returned opt_state
will be None.
pretrained: Whether to load the pretrained model. If set to True,
model_config.pretrained_ckpt_dir will be used to load the model.
total_steps: The total number of steps used for adaptation. Used to
adequately define learning rate scheduling.
rng_seed: The random seed used to initialize the model.
input_shape: The shape of the input (for audio, equals to [sample_rate_hz *
audio_length_s]).
target_class_list: The classlist in which labels are expressed. Used to
define the size of the classifier's head.
Returns:
model_bundle: The ModelBundle, include the taxonomic model and its
optimizer.
params: The model's params after loading.
model_state: The model' state.
opt_state: The optimizer's state.
"""
# Define the main model
class_lists = class_utils.get_class_lists(
target_class_list, add_taxonomic_labels=False
)
num_classes = {k: len(v.classes) for (k, v) in class_lists.items()}
model = taxonomy_model.TaxonomyModel(
num_classes=num_classes,
encoder=model_config.encoder,
frontend=model_config.frontend,
taxonomy_loss_weight=0.0,
)
if pretrained:
# Load main classification model from pretrained checkpoint
ckpt_dir = model_config.pretrained_ckpt_dir
# 'pretrained_ckpt_dir' interferes with train.initialize_model, as the
# creation of a TaxonomyModel does not expect this argument. Therefore,
# we delete it here to ensure compatibility.
delattr(model_config, "pretrained_ckpt_dir")
kwargs = {
"model_config": model_config,
"rng_seed": rng_seed,
"input_shape": input_shape,
"learning_rate": 0.0,
"optimizer": optax.adam(learning_rate=0.0),
}
model_bundle, train_state = classifier.initialize_model(
workdir=ckpt_dir,
target_class_list=target_class_list,
**kwargs,
)
train_state = model_bundle.ckpt.restore(train_state)
params = train_state.params
model_state = train_state.model_state
else:
variables = model.init(
jax.random.PRNGKey(rng_seed), jnp.zeros((1,) + input_shape), train=False
)
model_state, params = flax.core.pop(variables, "params")
params = flax.core.unfreeze(params)
# Define the optimizer
if optimizer_config is None:
optimizer = None
opt_state = None
rename_params = identity_rename
inverse_rename_params = identity_rename
else:
std_to_fwhm = jnp.sqrt(2 * jnp.log(2)) / jnp.pi
optimizer, rename_params, inverse_rename_params = prepare_optimizer(
optimizer_config, total_steps, params
)
optimizer = optax.chain(
optimizer,
optax.masked(
project(0.0, 1.0),
mask_by_name("spcen_smoothing_coef", params),
),
optax.masked(
project(0.0, jnp.pi),
mask_by_name("gabor_mean", params),
),
optax.masked(
project(0.0, jnp.pi),
mask_by_name("gabor_mean", params),
),
optax.masked(
project(4 * std_to_fwhm, model.frontend.kernel_size * std_to_fwhm), # pytype: disable=wrong-arg-types # jax-types
mask_by_name("gabor_std", params),
),
optax.masked(
zero_grads(),
mask_parameters(
params, optimizer_config.trainable_params_strategy, model
),
),
)
opt_state = optimizer.init(params)
model_bundle = ModelBundle(model, optimizer)
return (
model_bundle,
params,
model_state,
opt_state,
rename_params,
inverse_rename_params,
)
def prepare_image_model(
model_config: config_dict.ConfigDict,
optimizer_config: config_dict.ConfigDict | None,
total_steps: int,
rng_seed: int,
pretrained: bool,
target_class_list: str,
**_,
) -> tuple[
ModelBundle,
scope.VariableDict,
scope.FrozenVariableDict,
scope.FrozenVariableDict | None,
Callable[[Any, Any, str], Any],
Callable[[Any], Any],
]:
"""Prepare an image model for source-free domain adaptation.
Args:
model_config: The model configuration, including the specification of the
encoder's architecture.
optimizer_config: The optimizer configuration, including the name of the
optimizer, the learning rate etc.
total_steps: The total number of steps used for adaptation. Used to
adequately define learning rate scheduling.
rng_seed: The seed to initialize the model, in case no pretrained checkpoint
is provided.
pretrained: Whether to load the model from a pretrained checkpoint or not.
If set to True, the model will use the 'load_ckpt' method from the
corresponding model.
target_class_list: The name of the dataset used for adaptation. This is used
to grab the correct checkpoint for each model.
Returns:
model_bundle: The ModelBundle, including the image model and its
optimizer.
params: The model's params after loading.
model_state: The model' state.
opt_state: The optimizer's state.
"""
data_info = data_utils.get_metadata(target_class_list)
model = models.MODEL_REGISTRY[model_config.encoder](
num_classes=data_info["num_classes"]
)
if (
optimizer_config is not None
and optimizer_config.mult_learning_rate_resnet_base != 1
and model_config.encoder
not in (
models.ImageModelName.NRC_RESNET,
models.ImageModelName.NRC_RESNET_OFFICE_HOME,
)
):
raise ValueError(
"Setting `mult_learning_rate_resnet_base` in "
"`optimizer_config` to be != 1 is only supported for "
"the `nrc_resnet` encoder but the current "
"encoder is {}.".format(model_config.encoder)
)
if pretrained:
variables = model.load_ckpt(target_class_list)
else:
input_shape = (data_info["resolution"], data_info["resolution"], 3)
variables = model.init(
jax.random.PRNGKey(rng_seed),
jnp.zeros((1,) + input_shape),
False,
False,
)
model_state, params = flax.core.pop(variables, "params")
params = flax.core.unfreeze(params)
# Define the optimizer
if optimizer_config is None:
optimizer = None
opt_state = None
rename_params = identity_rename
inverse_rename_params = identity_rename
else:
optimizer, rename_params, inverse_rename_params = prepare_optimizer(
optimizer_config, total_steps, params
)
renamed_params = rename_params(params, {}, "")
optimizer = optax.chain(
optimizer,
optax.masked(
zero_grads(),
mask_parameters(
renamed_params,
optimizer_config.trainable_params_strategy,
model,
),
),
)
opt_state = optimizer.init(renamed_params)
model_bundle = ModelBundle(model, optimizer)
return (
model_bundle,
params,
model_state,
opt_state,
rename_params,
inverse_rename_params,
)
def zero_grads() -> optax.GradientTransformation:
"""Creates a GradientTransformation that zeros out gradients."""
def init_fn(_):
return ()
def update_fn(updates, state, params=None): # pylint: disable=unused-argument
return jax.tree_map(jnp.zeros_like, updates), ()
return optax.GradientTransformation(init_fn, update_fn)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom metrics for SFDA."""
from chirp.projects.sfda import losses
from clu import metrics as clu_metrics
import flax
import jax.numpy as jnp
@flax.struct.dataclass
class Accuracy(clu_metrics.Average):
"""Computes the accuracy from model outputs `probabilities` and `labels`.
`label` is expected to be of dtype=int32 and to have 0 <= ndim <= 2, and
`probabilities` is expected to have ndim = labels.ndim + 1.
See also documentation of `Metric`.
"""
@classmethod
def from_model_output(
cls, probabilities: jnp.ndarray, label: jnp.ndarray, **kwargs
) -> clu_metrics.Metric:
return super().from_model_output(
values=(probabilities.argmax(axis=-1) == label.argmax(axis=-1)).astype(
jnp.float32
),
**kwargs
)
@flax.struct.dataclass
class MarginalEntropy(clu_metrics.Metric):
"""Computes the marginal entropy of a model's output distribution.
Marginal entropy is a useful metric to track. To compute it, one requires
computing the marginal label distribution of the model, which requires access
to all of the model's outputs on batch of interest. That makes it a
non-`averageable` metric, which is why we dedicate a separate metric for it.
"""
probability_sum: jnp.ndarray
n_samples: int
multi_label: bool
label_mask: jnp.ndarray | None
@classmethod
def from_model_output(
cls,
probabilities: jnp.ndarray,
multi_label: bool,
label_mask: jnp.ndarray,
**_
) -> "MarginalEntropy":
return cls(
probability_sum=probabilities.sum(axis=0),
n_samples=probabilities.shape[0],
multi_label=multi_label,
label_mask=label_mask,
)
def merge(self, other: "MarginalEntropy") -> "MarginalEntropy":
return type(self)(
probability_sum=self.probability_sum + other.probability_sum,
n_samples=self.n_samples + other.n_samples,
multi_label=other.multi_label,
label_mask=other.label_mask,
)
def compute(self):
proba_marginal = self.probability_sum * (1 / self.n_samples)
reference_mask = None if self.label_mask is None else self.label_mask[0]
return losses.label_ent(proba_marginal, reference_mask)
@classmethod
def empty(cls) -> "MarginalEntropy":
return cls( # pytype: disable=wrong-arg-types # jnp-array
probability_sum=0.0, n_samples=0, multi_label=False, label_mask=None
)
@flax.struct.dataclass
class MarginalBinaryEntropy(clu_metrics.Metric):
"""A version of MarginalEntropy, for binary entropies.
TODO(mboudiaf) Merge this metric with MarginalEntropy using jax.lax.cond on
multi_label.
"""
probability_sum: jnp.ndarray
n_samples: int
label_mask: jnp.ndarray
multi_label: bool
@classmethod
def from_model_output(
cls,
label_mask: jnp.ndarray,
probabilities: jnp.ndarray,
multi_label: bool,
**_
) -> "MarginalBinaryEntropy":
# TODO(mboudiaf). Verify here that label_mask is the same across samples.
# Problem is to make assert inside jitted function. Right now, this is done
# before each iteration, but ideally should be here.
return cls(
probability_sum=probabilities.sum(axis=0),
label_mask=label_mask[0],
n_samples=probabilities.shape[0],
multi_label=multi_label,
)
def merge(self, other: "MarginalBinaryEntropy") -> "MarginalBinaryEntropy":
return type(self)(
probability_sum=self.probability_sum + other.probability_sum,
n_samples=self.n_samples + other.n_samples,
label_mask=other.label_mask,
multi_label=other.multi_label,
)
def compute(self):
proba_marginal = self.probability_sum * (1 / self.n_samples)
return losses.label_binary_ent(
probabilities=proba_marginal, label_mask=self.label_mask
)
@classmethod
def empty(cls) -> "MarginalBinaryEntropy":
return cls( # pytype: disable=wrong-arg-types # jnp-array
probability_sum=0.0, n_samples=0, label_mask=0.0, multi_label=False
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metric for Mean Class Accuracy (MCA)."""
from typing import Any
from clu import metrics as clu_metrics
import flax
from jax import numpy as jnp
import numpy as np
@flax.struct.dataclass
class MCA(clu_metrics.Metric):
"""Mean Class Accuracy metric.
Accumulates information from individual batches to compute the mean class
accuracy over the dataset. Specifically, this involves computing the per-class
accuracy for each class in the dataset, and then averaging those.
Attributes:
scores: An array of logits or scores, with shape [batch, num_classes].
labels: Array of ground-truth labels with shape [batch, num_classes].
"""
counts_correct: jnp.ndarray | None = None
counts_total: jnp.ndarray | None = None
@classmethod
def empty(cls) -> "MCA":
return cls(counts_correct=None, counts_total=None)
@classmethod
def from_model_output(
cls, scores: jnp.ndarray, label: jnp.ndarray, **_
) -> clu_metrics.Metric:
num_classes = label.shape[-1]
if scores.shape[-1] != num_classes:
raise ValueError(
"Expected the last dims of `scores` and `label` to be the same."
)
if scores.ndim not in [2, 3]:
raise ValueError(
"Expecting the scores to be in one of the following"
"formats: [n_devices, batch_size, n_classes] or"
"[batch_size, n_classes]. Current shape is"
f"{scores.shape}"
)
if label.ndim not in [2, 3]:
raise ValueError(
"Expecting the label to be in one of the following"
"formats: [n_devices, batch_size, n_classes] or"
"[batch_size, n_classes]. Current shape is"
f"{label.shape}"
)
is_correct = scores.argmax(axis=-1) == label.argmax(
axis=-1
) # [*, batch_size].
is_correct_flat = jnp.reshape(is_correct, [1, -1])
label_flat = jnp.reshape(label, [-1, num_classes])
# [1, *] x [*, num_classes] where * = batch_size or n_devices * batch_size.
per_class_correct_counts = jnp.squeeze(
jnp.matmul(is_correct_flat, label_flat)
) # [num_classes].
counts = jnp.sum(label_flat, axis=0).astype(jnp.float32) # [num classes].
return cls(counts_correct=per_class_correct_counts, counts_total=counts)
def merge(self, other):
if self.counts_correct is None:
assert self.counts_total is None
counts_correct = other.counts_correct
counts_total = other.counts_total
elif other.counts_correct is None:
assert other.counts_total is None
counts_correct = self.counts_correct
counts_total = self.counts_total
else:
num_classes = self.counts_correct.shape[-1]
# [num_devices, new bs, num_classes] or [new bs, num_classes] where new bs
# is the combined batch size.
counts_correct = jnp.concatenate(
(
jnp.reshape(self.counts_correct, [-1, num_classes]),
jnp.reshape(other.counts_correct, [-1, num_classes]),
),
axis=-2,
)
counts_total = jnp.concatenate(
(
jnp.reshape(self.counts_total, [-1, num_classes]),
jnp.reshape(other.counts_total, [-1, num_classes]),
),
axis=-2,
)
# [num_devices, num_classes] or [num_classes].
counts_correct = jnp.sum(counts_correct, axis=-2)
counts_total = jnp.sum(counts_total, axis=-2)
return type(self)(
counts_correct=counts_correct,
counts_total=counts_total,
)
def compute(self) -> Any:
"""Compute cmap only using classes that have > sample_threshold samples."""
per_class_acc = self.counts_correct / self.counts_total
mca = jnp.mean(per_class_acc)
return mca, per_class_acc
def make_mca_metric():
"""Create an empty MAC metric."""
return MCA.empty()
def update_mca_metric(mca_metric, model_outputs, batch):
"""Update a mac_metric from model_outputs and a batch."""
label = batch["label"].astype(np.int32)
new_metric = MCA.from_model_output(model_outputs.label, label)
mca_metric = mca_metric.merge(new_metric)
return mca_metric
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to load data for Source-free Domain Adaptation."""
import ast
from typing import Any
from absl import logging
from chirp.data import utils as data_utils
from chirp.projects.sfda import models
import jax
from ml_collections import config_dict
import tensorflow as tf
import tensorflow_datasets as tfds
def to_tf_compatible_split(split_tuple: list[tuple[int, int]]) -> str:
"""Converts the splits specified in the config file into tf-readable format.
TF dataset expects a split in the form 'train[x%:y%]'. The % sign
can cause trouble when trying to iterate over splits (in a shell script for
instance). As a quick workaround, at the config file level, we specify the
splits using [(a, b), (x, y), ...], which will be converted to
'train[a%:b%]+train[x%:y%]' in this function.
Args:
split_tuple: The train split, in the form [(a, b), (x, y), ...]
Returns:
The TF-readible version of split_tuple, i.e. 'train[a%:b%]+train[x%:y%]+...'
"""
splits = []
for st, end in split_tuple:
splits.append(f'train[{st}%:{end}%]')
return '+'.join(splits)
def get_audio_datasets(
adaptation_data_config: config_dict.ConfigDict,
eval_data_config: config_dict.ConfigDict,
sample_rate_hz: float,
cache_data: bool = True,
) -> tuple[tf.data.Dataset, tf.data.Dataset]:
"""Get audio datasets used for adaptation and evaluation.
Args:
adaptation_data_config: The configuration containing relevant information
(e.g. transformation pipeline) to build the adaptation dataset.
eval_data_config: The configuration containing relevant information to build
the evaluation dataset.
sample_rate_hz: The sample rate used by the current model. Used to
double-check that this sample rate matches the one the data was created
with.
cache_data: Whether to cache the dataset for fast subsequent access.
Returns:
The datasets used for adaptation and evaluation.
Raises:
ValueError: If the model's sample_rate and data's sample_rate do not match.
"""
adaptation_split = to_tf_compatible_split(
ast.literal_eval(adaptation_data_config.split)
)
eval_split = to_tf_compatible_split(ast.literal_eval(eval_data_config.split))
# is_train only affects how data is processed by tensorflow internally,
# in the case of a distributed setting. For now, SFDA is only supported in a
# a non-distributed setting. Therefore, the is_train argument has no effect.
adaptation_dataset, adaptation_dataset_info = data_utils.get_dataset(
split=adaptation_split,
is_train=False,
dataset_directory=adaptation_data_config.dataset_directory,
tfds_data_dir=adaptation_data_config.tfds_data_dir,
pipeline=adaptation_data_config.pipeline,
)
if adaptation_dataset_info.features['audio'].sample_rate != sample_rate_hz:
raise ValueError(
'Dataset sample rate must match config sample rate. To address this, '
'need to set the sample rate in the config to {}.'.format(
adaptation_dataset_info.features['audio'].sample_rate
)
)
# Grab the data used for evaluation
val_dataset, val_dataset_info = data_utils.get_dataset(
split=eval_split,
is_train=False,
dataset_directory=eval_data_config.dataset_directory,
tfds_data_dir=eval_data_config.tfds_data_dir,
pipeline=eval_data_config.pipeline,
)
if val_dataset_info.features['audio'].sample_rate != sample_rate_hz:
raise ValueError(
'Dataset sample rate must match config sample rate. To address this, '
'need to set the sample rate in the config to {}.'.format(
val_dataset_info.features['audio'].sample_rate
)
)
if cache_data:
adaptation_dataset = adaptation_dataset.cache()
val_dataset = val_dataset.cache()
return adaptation_dataset, val_dataset
def get_image_datasets(
image_model: models.ImageModelName,
dataset_name: str,
batch_size_train: int,
batch_size_eval: int,
data_seed: int,
builder_kwargs: dict[str, Any],
cache_data: bool = True,
) -> tuple[tf.data.Dataset, tf.data.Dataset]:
"""Get image dataset used for adaptation and evaluation.
Args:
image_model: The image model used for adaptation. This dictates the input
pipeline to use.
dataset_name: The name of the dataset used for adaptation and evaluation.
batch_size_train: The batch size used for adaptation.
batch_size_eval: The batch size used for evaluation.
data_seed: Used to seed data shuffling.
builder_kwargs: Kwargs to pass when creating the data builder.
cache_data: Whether to cache the dataset for fast subsequent access.
Returns:
The adaptation and evaluation datasets.
"""
input_pipeline = models.MODEL_REGISTRY[image_model](
num_classes=0
).get_input_pipeline
dataset_metadata = get_metadata(dataset_name)
num_devices = jax.local_device_count()
def build_image_dataset(split: str, batch_size: int):
data_builder = tfds.builder(dataset_name, **builder_kwargs)
tfds_split = dataset_metadata['splits'][split]
logging.info('Using split %s for dataset %s', tfds_split, dataset_name)
dataset = input_pipeline(
data_builder=data_builder,
split=tfds_split,
image_size=dataset_metadata['resolution'],
)
if split == 'train':
dataset = dataset.shuffle(512, seed=data_seed)
if num_devices is not None:
dataset = dataset.batch(batch_size // num_devices, drop_remainder=False)
dataset = dataset.batch(num_devices, drop_remainder=False)
else:
dataset = dataset.batch(batch_size, drop_remainder=False)
return dataset.prefetch(10)
adaptation_dataset = build_image_dataset('train', batch_size_train)
val_dataset = build_image_dataset('eval', batch_size_eval)
if cache_data:
adaptation_dataset = adaptation_dataset.cache()
val_dataset = val_dataset.cache()
return adaptation_dataset, val_dataset
def get_metadata(dataset_name: str) -> dict[str, Any]:
"""Maps image dataset names to metadata.
Args:
dataset_name: The raw dataset_name.
Returns:
A dictionary of metadata for this dataset, including:
- num_classes: The number of classes.
- resolution: The image resolution.
Raises:
NotImplementedError: If the dataset is unknown.
"""
if 'imagenet' in dataset_name:
if 'corrupted' in dataset_name:
split = {'train': 'validation[:75%]', 'eval': 'validation[75%:]'}
else:
split = {'train': 'test[:75%]', 'eval': 'test[75%:]'}
return {'num_classes': 1000, 'resolution': 224, 'splits': split}
elif 'cifar' in dataset_name:
split = {'train': 'test[:75%]', 'eval': 'test[75%:]'}
return {'num_classes': 10, 'resolution': 32, 'splits': split}
elif dataset_name == 'fake_image_dataset':
split = {'train': 'train[:1]', 'eval': 'train[1:2]'}
return {'num_classes': 2, 'resolution': 12, 'splits': split}
elif dataset_name == 'vis_da_c':
# In line with NRC's results, we do both the adaptation and the evaluation
# on the validation set of VisDA-C.
split = {'train': 'validation[:75%]', 'eval': 'validation[75%:]'}
return {'num_classes': 12, 'resolution': 224, 'splits': split}
elif 'office_home' in dataset_name:
# In line with NRC's results, we do both the adaptation and the evaluation
# on all of Office-Home.
split = {'train': 'train', 'eval': 'train'}
return {'num_classes': 65, 'resolution': 224, 'splits': split}
else:
raise NotImplementedError(
f'Unknown number of classes for dataset {dataset_name}.'
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adaptation and evaluation utilities for source-free domain adaptation."""
import abc
import enum
import functools
import time
from typing import Any, Callable
from absl import logging
from chirp.models import metrics as chirp_metrics
from chirp.models import output
from chirp.projects.sfda import losses
from chirp.projects.sfda import mca
from chirp.projects.sfda import metrics
from chirp.projects.sfda import model_utils
from clu import metric_writers
from clu import metrics as clu_metrics
from clu import periodic_actions
import flax
from flax import linen as nn
import flax.jax_utils as flax_utils
import jax
from jax import numpy as jnp
from ml_collections import config_dict
import numpy as np
import optax
import tensorflow as tf
import tqdm
ForwardStepType = Callable[
[
dict[str, jnp.ndarray],
flax.core.scope.FrozenVariableDict,
flax.core.scope.VariableDict,
jax.random.PRNGKeyArray | None,
],
output.ClassifierOutput,
]
@flax.struct.dataclass
class AdaptationState:
"""All useful states and parameters kept during adaptation.
Unlike in train.py where TrainState contains a single model, adaptation
methods may use several methods (e.g. a teacher and a student, or an
auxiliary GAN). Therefore, model_params, model_state and opts_states are
stored in Dict in which the keys refer to the name of the model. The key
'main' will be used for evaluation.
Attributes:
step: The batch iteration of adaptation (no reset after each epoch).
epoch: The epoch of adaptation.
model_params: The parameters of the model.
model_state: The state of the model.
method_state: All values a method needs to keep track of during adaptation.
opt_state: The optimizer's state.
restrict_classes: Whether to restrict to classes present in the target
dataset.
"""
step: int
epoch: int
model_params: flax.core.scope.VariableDict
model_state: flax.core.scope.FrozenVariableDict
method_state: dict[str, Any]
opt_state: optax.OptState
restrict_classes: bool
class Modality(enum.Enum):
"""Used to specify which modality we're using for adaptation."""
IMAGE = "image"
AUDIO = "audio"
def keep_jax_types(batch: dict[str, np.ndarray]) -> dict[str, np.ndarray]:
"""Remove non-numeric arrays from batch, to make it jit-compliant.
Args:
batch: The batch of data.
Returns:
The filtered batch of data, where any array containing non-numeric values
has been filtered out.
"""
return {k: v for k, v in batch.items() if v.dtype != np.dtype("O")}
class SFDAMethod(metaclass=abc.ABCMeta):
"""A template for a Source-Free Domain Adaptation (SFDA) method."""
def initialize(
self,
model_config: config_dict.ConfigDict,
rng_seed: int,
modality: Modality,
input_shape: tuple[int, ...],
target_class_list: str,
adaptation_iterations: int,
optimizer_config: config_dict.ConfigDict,
pretrained: bool,
) -> tuple[
model_utils.ModelBundle,
AdaptationState,
jax.random.PRNGKeyArray,
Callable[[Any, Any, str], Any],
Callable[[Any], Any],
]:
"""Loads model's params and state, and instantiates the adaptation state.
Args:
model_config: The model configuration, including the definitions of the
different parts of the architecture.
rng_seed: The random seed used to define the jax random key and seed other
non-jax random operations.
modality: The modality currently used between 'image' and 'audio'.
input_shape: The shape of the input.
target_class_list: The classlist in which labels are expressed. Used to
define the size of the classifier's head.
adaptation_iterations: The total number of steps used for adaptation. Used
to adequately define learning rate scheduling.
optimizer_config: The optimizer configuration, including the name of the
optimizer, the learning rate etc.
pretrained: Whether to use a pretrained model or not.
Returns:
The model_bundle to use, the initial adaptation_state, and the jax
random key to use for adaptation and the functions for renaming and
reversing the renaming of parameters, needed in the case where different
learning rates are used for different subsets of parameters.
Raises:
ValueError: In case the chosen modality is neither Modality.AUDIO
nor Modality.IMAGE.
"""
# Generate a random key
key = jax.random.PRNGKey(rng_seed)
if modality == Modality.AUDIO:
prepare_fn = model_utils.prepare_audio_model
restrict_classes = True
elif modality == Modality.IMAGE:
prepare_fn = model_utils.prepare_image_model
restrict_classes = False
else:
raise ValueError(f"Modality {modality} not supported.")
(
model_bundle,
params,
model_state,
opt_state,
rename_fn,
inverse_rename_fn,
) = prepare_fn(
model_config=model_config,
optimizer_config=optimizer_config,
pretrained=pretrained,
rng_seed=rng_seed,
input_shape=input_shape,
target_class_list=target_class_list,
total_steps=adaptation_iterations,
)
# Package model, parameters and states in structures.
# TODO(mboudiaf): Add support for restoring previous adaptation state.
adaptation_state = AdaptationState(
step=0,
epoch=0,
model_params=params,
opt_state=opt_state,
method_state={},
model_state=model_state,
restrict_classes=restrict_classes,
)
return model_bundle, adaptation_state, key, rename_fn, inverse_rename_fn
@abc.abstractmethod
def get_adaptation_metrics(
self, supervised: bool, multi_label: bool, **_
) -> type[clu_metrics.Collection]:
"""Define metrics tracked during adaptation.
On top of common metrics (accuracy/mAP ...), SFDA methods should
specify the field 'main_loss', corresponding to the loss minimized during
adaptation.
Args:
supervised: Whether the adaptation dataset is supervised. Used to
determine if supervised metrics (e.g. accuracy) can be tracked or not.
multi_label: Whether the current classification dataset is single-label or
multi-label. Used to define appropriate metrics.
"""
pass
def before_run(
self,
key: jax.random.PRNGKeyArray,
model_bundle: model_utils.ModelBundle,
adaptation_state: AdaptationState,
adaptation_dataset: tf.data.Dataset,
modality: Modality,
multi_label: bool,
**method_kwargs,
) -> AdaptationState:
"""Any operation that a method needs to do before a run.
An example of application is to initialize memories.
Args:
key: The jax random key used for random operations in this epoch.
model_bundle: The ModelBundle used for adaptation.
adaptation_state: The current state of adaptation.
adaptation_dataset: The dataset used for adaptation.
modality: The current modality.
multi_label: Whether this is a multi-label problem.
**method_kwargs: Additional method-specific kwargs.
Returns:
A potentially updated adaptation_state.
"""
del (
key,
model_bundle,
modality,
multi_label,
method_kwargs,
adaptation_dataset,
)
return adaptation_state
def before_epoch(
self,
key: jax.random.PRNGKeyArray,
model_bundle: model_utils.ModelBundle,
adaptation_state: AdaptationState,
adaptation_dataset: tf.data.Dataset,
modality: Modality,
multi_label: bool,
**method_kwargs,
) -> AdaptationState:
"""Any operation that a method needs to do before an epoch.
An example of application is to compute all pseudo-labels for the next
epoch.
Args:
key: The jax random key used for random operations in this epoch.
model_bundle: The ModelBundle used for adaptation.
adaptation_state: The current state of adaptation.
adaptation_dataset: The dataset used for adaptation.
modality: The modality.
multi_label: Whether this is a multi-label problem.
**method_kwargs: Additional method-specific kwargs.
Returns:
The adaptation state, with a potentially updated 'method_state' attribute.
"""
del (
key,
model_bundle,
adaptation_dataset,
modality,
multi_label,
method_kwargs,
)
return adaptation_state
# Prevent the forward_step from recompiling every step.
def cache_get_forward_step(
self,
model: nn.Module,
modality: Modality,
use_batch_statistics: bool,
train: bool = False,
) -> ForwardStepType:
"""Cache the forward step."""
if hasattr(self, "_forward_step") and self._forward_step is not None:
return self._forward_step
self._forward_step = batch_forward(
model, modality, use_batch_statistics, train
)
return self._forward_step
# Prevent the update_step from recompiling every time do_epoch is called.
def cache_get_update_step(self, update_step):
"""Cache the update step."""
if hasattr(self, "_update_step") and self._update_step is not None:
return self._update_step
self._update_step = update_step
return self._update_step
# Prevent the update_step from recompiling every time do_epoch is called.
def cache_get_update_metrics(self, update_metrics):
"""Cache the update step."""
if hasattr(self, "_update_metrics") and self._update_metrics is not None:
return self._update_metrics
self._update_metrics = update_metrics
return self._update_metrics
def before_iter(
self,
key: jax.random.PRNGKeyArray,
model_bundle: model_utils.ModelBundle,
adaptation_state: AdaptationState,
batch: dict[str, np.ndarray],
modality: Modality,
multi_label: bool,
**method_kwargs,
) -> tuple[AdaptationState, dict[str, jnp.ndarray]]:
"""Any operation that a method needs to do before an adaptation iteration.
An example of application is to compute the pseudo-labels needed for the
current iteration.
Args:
key: The jax random key used for random operations in this epoch.
model_bundle: The ModelBundle used for adaptation.
adaptation_state: The current state of adaptation.
batch: The iteration's batch of data.
modality: The modality.
multi_label: Whether this is a multi-label problem.
**method_kwargs: Additional method-specific kwargs.
Returns:
A potentially updated version of the adaptation state
A dictionary containing any variable the method may need during the next
epoch of adaptation. All values in this dictionnary must be
numeric-typed (they will be passed to a jitted function).
"""
del (key, model_bundle, batch, modality, multi_label, method_kwargs)
return adaptation_state, {}
def do_epoch(
self,
key: jax.random.PRNGKeyArray,
model_bundle: model_utils.ModelBundle,
adaptation_state: AdaptationState,
rename_fn: Callable[[Any, Any, str], Any],
inverse_rename_fn: Callable[[Any], Any],
adaptation_dataset: tf.data.Dataset,
modality: Modality,
multi_label: bool,
batchwise_metrics: type[clu_metrics.Collection],
writer: metric_writers.MetricWriter,
reporter: periodic_actions.ReportProgress,
use_supervised_metrics: bool,
**method_kwargs,
) -> AdaptationState:
"""Perform an epoch of adaptation.
Args:
key: The jax random key used for random operations in this epoch.
model_bundle: The model_utils.ModelBundle to use for adaptation.
adaptation_state: The current AdaptationState. Once the epoch is over, an
update version of it is returned.
rename_fn: The function to use to rename the parameter keys. This is
needed if using `optax.multi_transform`, which can be used to define
different learning rates for different parameters.
inverse_rename_fn: A callable that reverses the changes of `rename_fn`.
adaptation_dataset: The dataset used for adaptation.
modality: The current modality.
multi_label: Whether the current classification dataset is single-label or
multi-label. Important to choose the adequate metrics and losses.
batchwise_metrics: The collection of metrics to keep track of during
adaptation.
writer: A MetricWriter that logs all metrics.
reporter: A ReportProgress that helps keep track of speed of adaptation.
use_supervised_metrics: Whether the current dataset is supervised or not.
**method_kwargs: Additional method-specific kwargs.
Returns:
An updated version of the adaptation state.
Raises:
ValueError: If model_bundle's optimizer is not None and batchwise_metrics
does not contain a 'main_loss' metric.
"""
def forward(params, key, batch, model_state, **method_gather_args):
"""Forwards the batch through the current model."""
dropout_key, low_pass_key = jax.random.split(key)
variables = {"params": params, **model_state}
# Foward pass through the model
if method_kwargs["update_bn_statistics"]:
model_outputs, model_state = model_bundle.model.apply(
variables,
batch[modality.value],
train=method_kwargs["use_dropout"],
mutable=list(model_state.keys()),
use_running_average=False,
rngs={"dropout": dropout_key, "low_pass": low_pass_key},
)
else:
model_outputs = model_bundle.model.apply(
variables,
batch[modality.value],
use_running_average=True,
train=method_kwargs["use_dropout"],
rngs={"dropout": dropout_key, "low_pass": low_pass_key},
)
# Compute metrics and loss
label_mask = batch.get("label_mask", None)
gather_args = {
"multi_label": multi_label,
"outputs": model_outputs,
"probabilities": logit2proba(
model_outputs.label, label_mask, multi_label
),
"label_mask": label_mask,
}
gather_args.update(method_gather_args)
if use_supervised_metrics:
gather_args.update({"label": batch["label"].astype(np.int32)})
# Compute the current metrics
batch_metrics = batchwise_metrics.gather_from_model_output(
**gather_args, **method_kwargs
).compute()
# Extract the loss to optimize, and add weight decay.
if "main_loss" not in batch_metrics:
if model_bundle.optimizer is not None:
raise ValueError(
"Any SFDA method that defines an optimizer should also specify "
"the key 'main_loss' by overriding 'get_adaptation_metrics'."
)
main_loss = None
else:
main_loss = batch_metrics["main_loss"]
if method_kwargs["optimizer_config"].weight_decay > 0.0:
main_loss += method_kwargs[
"optimizer_config"
].weight_decay * losses.l2_loss(params)
return main_loss, (batch_metrics, model_state)
@functools.partial(jax.pmap, axis_name="batch")
def update_step(
batch: dict[str, jnp.ndarray],
adaptation_state: AdaptationState,
key: jax.random.PRNGKeyArray,
**method_gather_args,
) -> tuple[dict[str, jnp.ndarray], AdaptationState]:
"""Updates the model's state and params using the given batch."""
params = adaptation_state.model_params
model_state = adaptation_state.model_state
opt_state = adaptation_state.opt_state
if model_bundle.optimizer is not None:
# If an optimizer is defined, compute gradient transformations.
# Doing so, get the new model state.
grads, (batch_metrics, model_state) = jax.grad(forward, has_aux=True)(
params,
key=key,
batch=batch,
model_state=model_state,
**method_gather_args,
)
grads = jax.lax.pmean(grads, axis_name="batch")
# Update model's parameters from gradient transformations.
# Rename params and gradients as the optimizer expects.
grads = rename_fn(grads, {}, "")
renamed_params = rename_fn(params, {}, "")
updates, opt_state = model_bundle.optimizer.update(
grads, opt_state, renamed_params
)
params = optax.apply_updates(renamed_params, updates)
# Undo the renaming, else the next forward pass will fail.
params = inverse_rename_fn(params)
else:
# Otherwise, we simply forward the data through the model.
_, (batch_metrics, model_state) = forward(
params,
key=key,
batch=batch,
model_state=model_state,
**method_gather_args,
)
# Update adaptation state
adaptation_state = adaptation_state.replace(
step=adaptation_state.step + 1,
model_params=params,
opt_state=opt_state,
model_state=model_state,
)
return batch_metrics, adaptation_state
# Iterate over batches.
adaptation_state = flax_utils.replicate(adaptation_state)
for batch in tqdm.tqdm(adaptation_dataset.as_numpy_iterator()):
batch = jax.tree_map(np.asarray, batch)
verify_batch(batch)
current_step = int(flax_utils.unreplicate(adaptation_state.step))
step_key, key = jax.random.split(key)
step_key = jax.random.split(step_key, num=jax.local_device_count())
st = time.time()
adaptation_state, method_gather_args = self.before_iter(
key=step_key,
model_bundle=model_bundle,
adaptation_state=adaptation_state,
batch=batch,
modality=modality,
multi_label=multi_label,
**method_kwargs,
)
elapsed = time.time() - st
logging.info("sfda_method.before_iter completed in %5.3f", elapsed)
# Perform the update
st = time.time()
update_step = self.cache_get_update_step(update_step)
batch_metrics, adaptation_state = update_step(
batch=keep_jax_types(batch),
adaptation_state=adaptation_state,
key=step_key,
**method_gather_args,
)
elapsed = time.time() - st
logging.info("sfda_method update_step completed in %5.3f", elapsed)
if current_step % 100 == 0:
st = time.time()
reporter(current_step)
writer.write_scalars(
current_step, flax_utils.unreplicate(batch_metrics)
)
elapsed = time.time() - st
logging.info("sfda_method reporting completed in %5.3f", elapsed)
return flax_utils.unreplicate(adaptation_state)
def evaluate(
self,
model_bundle: model_utils.ModelBundle,
writer: jnp.ndarray,
adaptation_state: AdaptationState,
eval_dataset: tf.data.Dataset,
multi_label: bool,
modality: Modality,
sample_threshold: int = 5,
compute_mca: bool = False,
) -> None:
"""Evaluate the current adaptation state.
The writer is in charge of logging all results.
Args:
model_bundle: The model_utils.ModelBundle to use for evaluation.
writer: The evaluation writer.
adaptation_state: The current AdaptationState to evaluate.
eval_dataset: The dataset to perform evaluation on.
multi_label: Whether the problem is multi-label or not. Used to determine
adequate metrics.
modality: Which modality are we using.
sample_threshold: Class that have fewer samples than this thresold are
discarded when computing cmAP metric in order to reduce noise caused by
sample size.
compute_mca: Whether to compute the mean class accuracy metric.
"""
# Define validation metrics.
valid_metrics = get_common_metrics(supervised=True, multi_label=multi_label)
valid_metrics = flax_utils.replicate(valid_metrics.empty())
cmap_metrics = (
model_utils.make_cmap_metrics_dict(("label",)) if multi_label else {}
)
mca_metric = mca.make_mca_metric() if not multi_label else None
@functools.partial(jax.pmap, axis_name="batch")
def update_metrics(
metric_collection: clu_metrics.Collection,
batch: dict[str, jnp.ndarray],
adaptation_state: AdaptationState,
):
variables = {
"params": adaptation_state.model_params,
**adaptation_state.model_state,
}
model_outputs = model_bundle.model.apply(
variables,
batch[modality.value],
train=False,
use_running_average=True,
)
label_mask = batch.get("label_mask", None)
return model_outputs, metric_collection.merge(
metric_collection.gather_from_model_output(
multi_label=multi_label,
outputs=model_outputs,
probabilities=logit2proba(
model_outputs.label, label_mask, multi_label
),
label_mask=label_mask,
label=batch["label"].astype(np.int32),
)
)
update_metrics = self.cache_get_update_metrics(update_metrics)
current_epoch = int(adaptation_state.epoch)
# Loop over validation dataset
for batch in tqdm.tqdm(eval_dataset.as_numpy_iterator()):
batch = jax.tree_map(np.asarray, batch)
verify_batch(batch)
model_outputs, valid_metrics = update_metrics(
metric_collection=valid_metrics,
batch=keep_jax_types(batch),
adaptation_state=flax_utils.replicate(adaptation_state),
)
cmap_metrics = model_utils.update_cmap_metrics_dict(
cmap_metrics, model_outputs, batch
)
if compute_mca:
mca_metric = mca.update_mca_metric(mca_metric, model_outputs, batch)
# Metrics computations and logging
valid_metrics = flax_utils.unreplicate(valid_metrics).compute()
if compute_mca and mca_metric is not None:
mca_metric_value, per_class_accs_value = mca_metric.compute()
valid_metrics["mean_class_accuracy"] = mca_metric_value
for i in range(len(per_class_accs_value)):
valid_metrics[f"{i}_class_accuracy"] = per_class_accs_value[i]
valid_metrics = {k.replace("___", "/"): v for k, v in valid_metrics.items()}
cmap_metrics = flax_utils.unreplicate(cmap_metrics)
for key in cmap_metrics:
cmap_value = cmap_metrics[key].compute(sample_threshold=sample_threshold)
valid_metrics[f"{key}_cmap"] = cmap_value
if writer is not None:
writer.write_scalars(current_epoch, valid_metrics) # pytype: disable=attribute-error # jax-ndarray
def perform_adaptation(
key: jax.random.PRNGKeyArray,
sfda_method: SFDAMethod,
adaptation_state: AdaptationState,
rename_fn: Callable[[Any, Any, str], Any],
inverse_rename_fn: Callable[[Any], Any],
adaptation_dataset: tf.data.Dataset,
use_supervised_metrics: bool,
validation_dataset: tf.data.Dataset,
model_bundle: model_utils.ModelBundle,
logdir: str,
multi_label: bool,
modality: Modality,
eval_every: int,
eval_mca_every: int,
**method_kwargs,
) -> AdaptationState:
"""Given the adaptation method and dataset, perform the full adaptation.
Args:
key: The initial jax random key to use for random operations.
sfda_method: The Source-Free Domain Adaptation method to use.
adaptation_state: The initial AdaptationState to adapt. Once adaptation is
over, its updated version is returned.
rename_fn: The function to use to rename the parameter keys. This is needed
if using `optax.multi_transform`, which can be used to define different
learning rates for different parameters.
inverse_rename_fn: A callable that reverses the changes of `rename_fn`.
adaptation_dataset: The dataset used for adaptation.
use_supervised_metrics: Whether the current adaptation dataset is supervised
or not.
validation_dataset: The dataset used for evaluation.
model_bundle: The model_utils.ModelBundle to use for adaptation.
logdir: Where to write logs.
multi_label: Whether the current problem is multi-label or single-label.
modality: The current modality used.
eval_every: Frequency (in epochs) to trigger evaluation.
eval_mca_every: The frequency (in epochs) to trigger computation of the mean
class accuracy (mca) metric during evaluation, as long as `eval_every` is
set to a multiple of this frequency. That is, each time evaluation is
triggered (based on the value of `eval_every`), the computation of mca may
also be triggered, if the epoch is also a multiple of `eval_mca_every`.
Note also that `eval_mca_every` is any negative number, mca will never be
computed.
**method_kwargs: Method's additional keywargs.
Returns:
An updated version of the Adaptation state.
"""
# Initialize metrics
batchwise_metrics = sfda_method.get_adaptation_metrics(
supervised=use_supervised_metrics,
multi_label=multi_label,
**method_kwargs,
)
# Logging
adaptation_writer = metric_writers.create_default_writer(
logdir, asynchronous=False, collection="adaptation"
)
reporter = periodic_actions.ReportProgress(writer=adaptation_writer)
validation_writer = metric_writers.create_default_writer(
logdir, asynchronous=False, collection="validation"
)
# Before run.
st = time.time()
adaptation_state = sfda_method.before_run(
key=key,
model_bundle=model_bundle,
adaptation_state=adaptation_state,
multi_label=multi_label,
modality=modality,
adaptation_dataset=adaptation_dataset,
**method_kwargs,
)
elapsed = time.time() - st
logging.info("sfda.before_run completed in %5.3f", elapsed)
for epoch in range(method_kwargs["num_epochs"]):
# Before every epoch, perform a round of evaluation on the validation set.
if epoch % eval_every == 0:
compute_mca = (epoch % eval_mca_every == 0) and eval_mca_every >= 0
st = time.time()
sfda_method.evaluate( # pytype: disable=wrong-arg-types # jax-ndarray
model_bundle=model_bundle,
adaptation_state=adaptation_state,
eval_dataset=validation_dataset,
writer=validation_writer,
modality=modality,
multi_label=multi_label,
compute_mca=compute_mca,
)
validation_writer.flush()
elapsed = time.time() - st
logging.info("sfda_method.evaluate completed in %5.3f", elapsed)
st = time.time()
adaptation_state = sfda_method.before_epoch(
key=key,
model_bundle=model_bundle,
adaptation_state=adaptation_state,
multi_label=multi_label,
modality=modality,
adaptation_dataset=adaptation_dataset,
writer=adaptation_writer,
**method_kwargs,
)
elapsed = time.time() - st
logging.info("sfda_method.before_epoch completed in %5.3f", elapsed)
st = time.time()
adaptation_state = sfda_method.do_epoch(
key=key,
model_bundle=model_bundle,
adaptation_state=adaptation_state,
rename_fn=rename_fn,
inverse_rename_fn=inverse_rename_fn,
multi_label=multi_label,
modality=modality,
adaptation_dataset=adaptation_dataset,
batchwise_metrics=batchwise_metrics,
writer=adaptation_writer,
reporter=reporter,
workdir=logdir,
use_supervised_metrics=use_supervised_metrics,
**method_kwargs,
)
elapsed = time.time() - st
logging.info("sfda_method.do_epoch completed in %5.3f", elapsed)
adaptation_state = adaptation_state.replace(
epoch=adaptation_state.epoch + 1
)
adaptation_writer.flush()
# When adaptation is finished, we perform a final round of evaluation on the
# validation set.
sfda_method.evaluate( # pytype: disable=wrong-arg-types # jax-ndarray
model_bundle=model_bundle,
adaptation_state=adaptation_state,
eval_dataset=validation_dataset,
writer=validation_writer,
modality=modality,
multi_label=multi_label,
compute_mca=eval_mca_every >= 0,
)
adaptation_writer.close()
validation_writer.close()
return adaptation_state
def logit2proba(
logits: jnp.ndarray, label_mask: jnp.ndarray | None, multi_label: bool
) -> jnp.ndarray:
"""Converts model logits to valid probabilities.
When multi_label=False, uses label_mask to select classes that will be used
in the softmax normalization term. The output probabilities should
verify jnp.all(probabilities[..., label_mask].sum(-1) == 1.0).
Args:
logits: The logits to be transformed, shape [*, num_classes]
label_mask: The mask conveying the classes to be used. Shape [*,
num_classes]
multi_label: Whether we're in the multi-label setting or not.
Returns:
The resulting probabilities, shape [*, num_classes]
"""
fn = (
nn.sigmoid
if multi_label
else functools.partial(nn.softmax, where=label_mask, initial=0)
)
return fn(logits)
def keyed_map(
key: str, outputs: output.AnyOutput, **kwargs
) -> jnp.ndarray | None:
label_mask = kwargs.get(key + "_mask", None)
return chirp_metrics.average_precision(
scores=getattr(outputs, key), labels=kwargs[key], label_mask=label_mask
)
def get_common_metrics(
supervised: bool, multi_label: bool
) -> type[clu_metrics.Collection]:
"""Obtain a common set of metrics and losses.
Args:
supervised: Whether the dataset over which those metrics will be tracked has
labels or not.
multi_label: Whether the current problem is multi-label or single-label.
Returns:
A collection of metrics.
"""
metrics_dict = {}
if supervised:
if multi_label:
metrics_dict["label_map"] = clu_metrics.Average.from_fun(
functools.partial(keyed_map, key="label")
)
metrics_dict["supervised_loss"] = clu_metrics.Average.from_fun(
losses.label_binary_xent
)
metrics_dict["entropy_loss"] = clu_metrics.Average.from_fun(
losses.label_binary_ent
)
metrics_dict["marginal_entropy"] = metrics.MarginalBinaryEntropy
else:
metrics_dict["supervised_loss"] = clu_metrics.Average.from_fun(
losses.label_xent
)
metrics_dict["entropy_loss"] = clu_metrics.Average.from_fun(
losses.label_ent
)
metrics_dict["accuracy"] = metrics.Accuracy
metrics_dict["marginal_entropy"] = metrics.MarginalEntropy
return clu_metrics.Collection.create(**metrics_dict)
def verify_batch(batch: dict[str, jnp.ndarray]) -> None:
"""Performs non-jittable verifications on a batch of data.
Args:
batch: The current batch of data.
Raises:
ValueError: If the batch has a label_mask, and label_mask differs across
samples.
"""
if "label_mask" in batch:
label_mask = flax_utils.unreplicate(batch["label_mask"])
if not (
jnp.tile(label_mask[0], (label_mask.shape[0], 1)) == label_mask
).all():
raise ValueError(
"Some metrics (e.g. marginal entropy) can only be computed if each "
"sample's probability distribution is defined over the same set of"
"classes. Therefore, we verify that `label_mask` is the same across "
"samples."
)
def batch_forward(
model: nn.Module,
modality: Modality,
use_batch_statistics: bool,
train: bool = False,
) -> ForwardStepType:
"""Collects the model's output on the current batch of data.
Args:
model: The model.
modality: The modality used.
use_batch_statistics: Whether to use BatchNorm's running statistics, or the
batch's statistics.
train: Whether to use the model in training mode. Default to False, as this
function is nominally used to compute pseudo-labels (e.g. Teacher step of
Notela), which usually removes any source of noise (including dropout).
Returns:
Batch forward step callable.
"""
@jax.pmap
def forward(batch, model_state, params, rngs):
if use_batch_statistics:
outputs, _ = model.apply(
{"params": params, **model_state},
batch[modality.value],
train=train,
mutable=list(model_state.keys()),
use_running_average=False,
rngs=rngs,
)
else:
outputs = model.apply(
{"params": params, **model_state},
batch[modality.value],
use_running_average=True,
train=train,
rngs=rngs,
)
return outputs
return forward
def get_dataset_length(dataset):
try:
return len(dataset)
except TypeError:
pass
# Dataset length is unknown, so let's just iterate over it once...
k = 0
for _ in dataset.as_numpy_iterator():
k += 1
return k + 1
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some utils functions shared across methods."""
from typing import Callable
from absl import logging
from chirp.models import output
from chirp.projects.sfda import adapt
from chirp.projects.sfda import model_utils
import flax
import flax.jax_utils as flax_utils
import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
import tqdm
ForwardStepType = Callable[
[
dict[str, jnp.ndarray],
flax.core.scope.FrozenVariableDict,
flax.core.scope.VariableDict,
jax.random.PRNGKeyArray | None,
],
output.ClassifierOutput,
]
@jax.jit
def jax_cdist(features_a: jnp.ndarray, features_b: jnp.ndarray) -> jnp.ndarray:
"""A jax equivalent of scipy.spatial.distance.cdist.
Computes the pairwise squared euclidean distance between each pair of features
from features_a and features_b.
Args:
features_a: The first batch of features, expected shape [*, batch_size_a,
feature_dim]
features_b: The second batch of features, expected shape [*, batch_size_b,
feature_dim]
Returns:
The pairwise squared euclidean distance between each pair of features from
features_a and features_b. Shape [*, batch_size_a, batch_size_b]
Raises:
ValueError: If the shape of features_a's last dimension does not match the
shape of feature_b's last dimension.
"""
if features_a.shape[-1] != features_b.shape[-1]:
raise ValueError(
"The feature dimension should be the same. Currently features_a: "
f"{features_a.shape} and features_b: {features_b.shape}"
)
feature_dim = features_a.shape[-1]
flat_features_a = jnp.reshape(features_a, [-1, feature_dim])
flat_features_b = jnp.reshape(features_b, [-1, feature_dim])
flat_transpose_b = flat_features_b.T
distances = (
jnp.sum(jnp.square(flat_features_a), 1, keepdims=True)
- 2 * jnp.matmul(flat_features_a, flat_transpose_b)
+ jnp.sum(jnp.square(flat_transpose_b), 0, keepdims=True)
)
return distances
def forward_dataset(
dataset: tf.data.Dataset,
adaptation_state: adapt.AdaptationState,
model_bundle: model_utils.ModelBundle,
modality: adapt.Modality,
multi_label: bool,
use_batch_statistics: bool = False,
train: bool = False,
key: jax.random.PRNGKeyArray | None = None,
) -> dict[str, jnp.ndarray | np.ndarray]:
"""Fowards a dataset through a given model.
Args:
dataset: The dataset to extract from.
adaptation_state: The current adaptation state, including the model's state
and parameters used for extraction.
model_bundle: The current ModelBundle.
modality: The current data modality.
multi_label: Whether this is a multi-label problem. This affects how model's
probabilities are packaged.
use_batch_statistics: Whether to use batch's statistics for BatchNorm layers
during feature extraction.
train: Whether to use dropout or not during the forward pass.
key: The random key to use if train is set to True.
Returns:
A dictionnary with the following keys:
-embeddings: The extracted embeddings of shape [dataset_size,
embedding_dimension].
-proba: The output classwise probabities of shape [dataset_size,
num_classes].
-ids: The ids of the examples extracted, consistent with embeddings and
proba, of shape [N]. ids[i] corresponds to embeddings[i] and proba[i].
Raises:
ValueError: In case the ids do not uniquely identify each sample, or if
the samples don't have the same label_mask.
"""
logging.info("Starting feature extraction...")
all_ouputs = []
all_ids = [] # used to identify all samples
model_state = flax_utils.replicate(adaptation_state.model_state)
params = flax_utils.replicate(adaptation_state.model_params)
model = model_bundle.model
only_keep_unmasked_classes = adaptation_state.restrict_classes
forward_step = adapt.batch_forward(
model, modality, use_batch_statistics, train
)
# Forward the whole dataset. Store embeddings, samples' ids, labels, and
# model's probabilities.
for index, batch in tqdm.tqdm(enumerate(dataset.as_numpy_iterator())):
batch = jax.tree_map(np.asarray, batch)
if key is not None:
batch_key, key = jax.random.split(key)
batch_key = jax.random.split(batch_key, num=jax.local_device_count())
batch_key = {"dropout": batch_key}
else:
batch_key = None
if "label_mask" in batch and only_keep_unmasked_classes and index == 0:
# We will use the first sample's label_mask as a reference, and ensure
# all label_masks are the same.
reference_mask = flax_utils.unreplicate(batch["label_mask"])[0]
model_outputs = forward_step( # pytype: disable=wrong-arg-types # jax-ndarray
adapt.keep_jax_types(batch), model_state, params, batch_key
)
if "label_mask" in batch and only_keep_unmasked_classes:
# We make sure that the label_mask is the same for all samples in the
# dataset.
if not (
jnp.tile(reference_mask, (batch["label_mask"].shape[0], 1))
== batch["label_mask"]
).all():
raise ValueError(
"All samples should have the same label_mask for the"
"'only_keep_unmasked_classes' option to work"
"adequately."
)
# We only keep unmasked classes.
model_outputs = model_outputs.replace(
label=model_outputs.label[..., reference_mask.astype(bool)]
)
all_ouputs.append(flax_utils.unreplicate(model_outputs))
all_ids += list(batch["tfds_id"].reshape(-1))
# Concatenate every list to obtain a single array for each field. Store these
# arrays in the result dictionary.
logits2proba = nn.sigmoid if multi_label else nn.softmax
result = {}
result["embedding"] = jnp.concatenate(
[x.embedding for x in all_ouputs], axis=0
) # [dataset_size, n_dimensions]
result["proba"] = jnp.concatenate(
[logits2proba(x.label) for x in all_ouputs], axis=0
) # [dataset_size, num_classes]
ids = np.array(all_ids) # [dataset_size,]
# Make some verifications.
if np.unique(ids).shape[0] != ids.shape[0]:
raise ValueError("Ids should uniquely define each sample.")
result["id"] = ids
if "label_mask" in batch:
result["label_mask"] = reference_mask
return result
def maybe_restrict_labels(
model_outputs, reference_label_mask, adaptation_state
):
"""Restrict model_outputs to target classes, if appropriate."""
if not adaptation_state.restrict_classes:
return model_outputs
if reference_label_mask is None:
raise ValueError("Asked to restrict classes, but no label mask provided.")
# We restrict the model's logits to the classes that appear in the
# current dataset to ensure compatibility with
# method_state["dataset_proba"].
model_outputs = model_outputs.replace(
label=model_outputs.label[..., reference_label_mask.astype(bool)]
)
return model_outputs
def get_label_mask(batch) -> jnp.ndarray | None:
if "label_mask" in batch:
label_mask = flax_utils.unreplicate(batch["label_mask"])
reference_label_mask = label_mask[0] # [num_classes]
# Ensure that the label_mask is the same for all samples.
assert (
jnp.tile(reference_label_mask, (label_mask.shape[0], 1)) == label_mask
).all()
else:
reference_label_mask = None
return reference_label_mask
def pad_pseudo_label(
reference_label_mask: jnp.ndarray | None,
pseudo_label: jnp.ndarray,
adaptation_state: adapt.AdaptationState,
) -> jnp.ndarray:
"""Pads pseudo-labels back to the global probability space.
Args:
reference_label_mask: The mask indicating which 'global' classes are used
for the adaptation, shape [num_classes].
pseudo_label: Pseudo-label, expressed in a potentially reduced probability
space, shape [batch_size, label_mask.sum()].
adaptation_state: The adaptation state.
Returns:
The zero-padded pseudo-labels, of shape [batch_size, num_classes]
Raises:
ValueError: If pseudo_label's last dimension does not match the number of
classes used for adaptation, as indicated by label_mask
"""
if not adaptation_state.restrict_classes:
return pseudo_label
if reference_label_mask is None:
raise ValueError("Asked to pad pseudolabels, but no label mask provided.")
if reference_label_mask.ndim != 1:
raise ValueError(
"Expecting a vector for label_mask. Current shape is"
f" {reference_label_mask.shape}"
)
batch_size = pseudo_label.shape[0]
num_classes_used = reference_label_mask.sum()
num_classes_total = reference_label_mask.shape[0]
if pseudo_label.shape[-1] != num_classes_used:
raise ValueError(
"Pseudo-labels should be expressed in the same"
"restricted set of classes provided by the label_mask."
"Currently, label_mask indicates that "
f"{num_classes_used} should be used, but pseudo_label "
f"is defined over {pseudo_label.shape[-1]} classes."
)
padded_pseudo_label = jnp.zeros((batch_size, num_classes_total))
col_index = jnp.tile(jnp.where(reference_label_mask)[0], batch_size)
row_index = jnp.repeat(jnp.arange(batch_size), num_classes_used)
return padded_pseudo_label.at[(row_index, col_index)].set(
pseudo_label.flatten()
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A set of common losses used by several SFDA methods."""
import enum
import flax
import jax
import jax.numpy as jnp
class ReduceStrategy(enum.Enum):
"""Strategies to reduce an axis.
Attributes:
NONE: No reduction
AVERAGE: Average reduction
"""
NONE = "none"
AVERAGE = "average"
def label_kl(
probabilities: jnp.ndarray,
label: jnp.ndarray,
label_mask: jnp.ndarray | None,
eps: float = 1e-10,
**_,
) -> jnp.ndarray:
"""Kulback-Leibler divergence for single-class classification settings.
Args:
probabilities: Model's probabilities, expected shape [*, num_classes].
label: One-hot labels, expected shape [*, num_classes].
label_mask: Array representing classes to be kept, shape [*, num_classes].
eps: For numerical stability
Returns:
Single-class Kulback-Leibler divergence between probabilities and label.
Shape [*,].
"""
return label_xent(probabilities, label, label_mask, eps) - label_ent(
probabilities, label_mask, eps
) # pytype: disable=wrong-arg-types # jax-ndarray
def label_binary_kl(
probabilities: jnp.ndarray, label: jnp.ndarray, **_
) -> jnp.ndarray:
"""Kulback-Leibler divergence for multi-class classification settings.
Args:
probabilities: Model's probabilities, expected shape [*, num_classes].
label: One-hot labels, expected shape [*, num_classes].
Returns:
Multi-class Kulback-Leibler divergence between probabilities and label.
Shape [*, num_classes].
"""
return label_binary_xent(
probabilities, label, class_reduce=ReduceStrategy.NONE
) - label_binary_ent(probabilities, class_reduce=ReduceStrategy.NONE)
def label_xent(
probabilities: jnp.ndarray,
label: jnp.ndarray,
label_mask: jnp.ndarray | None,
sample_mask: jnp.ndarray | None = None,
eps: float = 1e-10,
**_,
) -> jnp.ndarray:
"""Cross entropy for single-label classification settings.
Args:
probabilities: Model's probabilities, expected shape [*, num_classes].
label: One-hot labels, expected shape [*, num_classes].
label_mask: label_mask: Array representing classes to be kept, shape [*,
num_classes].
sample_mask: A way to mask out some samples when computing the loss. Useful
for instance to only keep high-confidence samples in pseudo-labelling.
eps: For numerical stability
Returns:
Multi-class xent. Shape [*,].
"""
if sample_mask is None:
sample_mask = jnp.ones(probabilities.shape[:-1])
if label_mask is not None and (
label_mask.shape[-1] == probabilities.shape[-1]
):
xent = -((label * jnp.log(probabilities + eps)) * label_mask).sum(-1)
else:
# TODO(mboudiaf) If label_mask is not None, check that probabilities are
# already masked. In other words, ensure
# probabilities.shape[-1] == label_mask.sum(-1)
xent = -(label * jnp.log(probabilities + eps)).sum(-1)
return sample_mask * xent
def label_ent(
probabilities: jnp.ndarray,
label_mask: jnp.ndarray | None,
eps: float = 1e-10,
**_,
) -> jnp.ndarray:
"""Standard entropy used for single-label classification settings.
Args:
probabilities: Model's probabilities, expected shape [*, num_classes]
label_mask: label_mask: Array representing classes to be kept, shape [*,
num_classes].
eps: For numerical stability.
Returns:
The entropy of probabilities, shape [*,]
"""
if label_mask is not None and label_mask.shape[-1] == probabilities.shape[-1]:
ent = -((probabilities * jnp.log(probabilities + eps)) * label_mask).sum(-1)
else:
# TODO(mboudiaf) If label_mask is not None, check that probabilities are
# already masked. In other words, ensure
# probabilities.shape[-1] == label_mask.sum(-1)
ent = -((probabilities * jnp.log(probabilities + eps))).sum(-1)
return ent
def label_binary_ent(
probabilities: jnp.ndarray,
label_mask: jnp.ndarray | None = None,
eps: float = 1e-10,
class_reduce: ReduceStrategy = ReduceStrategy.AVERAGE,
**_,
) -> jnp.ndarray:
"""Computes averaged classwise binary entropy.
Args:
probabilities: Probabilities used to compute the binary entropies. Expected
shape [*, num_classes].
label_mask: Used to mask classes before averaging across classes. Expected
shape [*, num_classes].
eps: For numerical stability.
class_reduce: Class reduction strategy.
Returns:
The binary entropies, averaged across classes shape [*,]
Raises:
ValueError: In case class_reduce is not a known ReduceStrategy.
"""
if label_mask is None:
label_mask = jnp.ones_like(probabilities)
assert probabilities.shape == label_mask.shape, (
probabilities.shape,
label_mask.shape,
)
binary_entropies = -(
probabilities * jnp.log(probabilities + eps)
+ (1 - probabilities) * jnp.log((1 - probabilities) + eps)
) # [..., num_classes]
if class_reduce == ReduceStrategy.AVERAGE:
return (label_mask * binary_entropies).sum(axis=-1) / (
label_mask.sum(axis=-1) + eps
)
elif class_reduce == ReduceStrategy.NONE:
return label_mask * binary_entropies
else:
raise ValueError(f"Unknown reduce strategy {class_reduce} used.")
def label_binary_xent(
probabilities: jnp.ndarray,
label: jnp.ndarray,
label_mask: jnp.ndarray | None = None,
eps: float = 1e-10,
class_reduce: ReduceStrategy = ReduceStrategy.AVERAGE,
**_,
) -> jnp.ndarray:
"""Computes averaged classwise binary cross-entropy.
Args:
probabilities: Shape [*, num_classes]
label: Shape [*, num_classes]
label_mask: Shape [*, num_classes]
eps: For numerical stability.
class_reduce: Class reduction strategy.
Returns:
Average of per-class binary xent. Shape [*]
Raises:
ValueError: In case class_reduce is not a known ReduceStrategy.
"""
if label_mask is None:
label_mask = jnp.ones_like(probabilities)
assert probabilities.shape == label_mask.shape == label_mask.shape, (
probabilities.shape,
label_mask.shape,
label_mask.shape,
)
binary_entropies = -(
label * jnp.log(probabilities + eps)
+ (1 - label) * jnp.log((1 - probabilities) + eps)
) # [..., num_classes]
if class_reduce == ReduceStrategy.AVERAGE:
return (label_mask * binary_entropies).sum(axis=-1) / (
label_mask.sum(axis=-1) + eps
)
elif class_reduce == ReduceStrategy.NONE:
return label_mask * binary_entropies
else:
raise ValueError(f"Unknown reduce strategy {class_reduce} used.")
def l2_loss(params: flax.core.scope.VariableDict):
"""Used to simulate weight decay."""
loss = 0.0
for p in jax.tree_util.tree_leaves(params):
loss += (p**2).sum()
return loss
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entry script for source-free domain adaptation."""
from typing import Sequence
from absl import app
from absl import flags
from absl import logging
from chirp import config_utils
from chirp.projects.sfda import adapt
from chirp.projects.sfda import data_utils
from chirp.projects.sfda.configs import config_globals
import jax
from ml_collections.config_flags import config_flags
import tensorflow as tf
_CONFIG = config_flags.DEFINE_config_file("config")
_METHOD_CONFIG = config_flags.DEFINE_config_file(
"method_config",
help_string="Configuration file for method-specific hyperparamaters.",
)
_LOGDIR = flags.DEFINE_string("logdir", None, "Work unit logging directory.")
_VISDA_DIR = flags.DEFINE_string("visda_dir", None,
"Data directory for VisDa dataset.")
flags.mark_flags_as_required(["config", "logdir"])
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
logging.info(_CONFIG.value)
logging.info(_METHOD_CONFIG.value)
# Preventing tensorflow from taking any GPU memory and starving Jax.
tf.config.experimental.set_visible_devices([], "GPU")
config = config_utils.parse_config(
_CONFIG.value, config_globals.get_globals()
)
method_config = config_utils.parse_config(
_METHOD_CONFIG.value, config_globals.get_globals()
)
if jax.local_device_count() > 1:
raise NotImplementedError(
"Only supporting non-distributed setting for now."
)
# Recover the SDFA method
sfda_method = method_config.sfda_method
# Convert the splits
config = config.unlock()
method_config = getattr(method_config, config.modality.value)
# Target class list to use for initialization. If None, defaults to
# config.init_config.target_class_list.
init_target_class_list = None
if config.modality == adapt.Modality.AUDIO:
adaptation_dataset, val_dataset = data_utils.get_audio_datasets(
adaptation_data_config=config.adaptation_data_config,
eval_data_config=config.eval_data_config,
sample_rate_hz=config.sample_rate_hz,
)
else:
if "corrupted" in config.init_config.target_class_list:
builder_kwargs = {
"config": "{}_{}".format(
config.init_config.corruption_name,
config.init_config.corruption_severity,
)
}
elif config.init_config.target_class_list == "vis_da_c":
builder_kwargs = {
"data_dir": _VISDA_DIR.value,
}
elif config.init_config.target_class_list == "office_home":
# We use the source domain name in the target class list for
# initialization, which indicates to the model which checkpoint to load.
init_target_class_list = f"office_home/{config.init_config.source_domain}"
builder_kwargs = {
"data_dir": _VISDA_DIR.value,
"config": config.init_config.target_domain,
}
else:
builder_kwargs = {}
adaptation_dataset, val_dataset = data_utils.get_image_datasets(
image_model=config.model_config.encoder,
dataset_name=config.init_config.target_class_list,
batch_size_train=config.batch_size_adaptation,
batch_size_eval=config.batch_size_eval,
data_seed=config.init_config.rng_seed,
builder_kwargs=builder_kwargs,
)
# Initialize state and bundles
(model_bundle, adaptation_state, key, rename_fn, inverse_rename_fn) = (
sfda_method.initialize(
model_config=config.model_config,
pretrained=config.init_config.pretrained_model,
rng_seed=config.init_config.rng_seed,
input_shape=None
if config.modality == adapt.Modality.IMAGE
else config.init_config.input_shape,
target_class_list=(
init_target_class_list or config.init_config.target_class_list
),
adaptation_iterations=adapt.get_dataset_length(adaptation_dataset)
* method_config.num_epochs,
modality=config.modality,
optimizer_config=method_config.optimizer_config,
)
)
# Perform adaptation
adaptation_state = adapt.perform_adaptation(
key=key,
adaptation_state=adaptation_state,
rename_fn=rename_fn,
inverse_rename_fn=inverse_rename_fn,
adaptation_dataset=adaptation_dataset,
validation_dataset=val_dataset,
model_bundle=model_bundle,
logdir=_LOGDIR.value,
use_supervised_metrics=True,
target_class_list=config.init_config.target_class_list,
multi_label=config.multi_label,
modality=config.modality,
eval_every=config.eval_every,
eval_mca_every=config.eval_mca_every,
sfda_method=sfda_method,
**method_config,
)
if __name__ == "__main__":
app.run(main)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Our adaptation of the Noisy Student method for SFDA."""
from chirp.projects.sfda import adapt
from chirp.projects.sfda import losses
from chirp.projects.sfda import method_utils
from chirp.projects.sfda import model_utils
from clu import metrics as clu_metrics
import flax.jax_utils as flax_utils
import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
class DropoutStudent(adapt.SFDAMethod):
"""Our adaptation of the Noisy Student method for SFDA.
As opposed to the original method, this adpatation only uses a single network.
The teacher produces predictions from clean images, that the student tries to
match using Dropout as sole source of noise. In the end, Dropout Student
is equivalent to NOTELA when setting the weight controlling the Laplacian
regularization to 0.
"""
_CITATION = (
"Xie, Qizhe, et al. 'Self-training with noisy student improves imagenet "
"classification.' Proceedings of the IEEE/CVF conference on computer "
"vision and pattern recognition. 2020."
)
def compute_pseudo_label(
self,
probabilities: jnp.ndarray,
label_mask: jnp.ndarray,
multi_label: bool,
alpha: float,
normalize_pseudo_labels: bool = True,
) -> jnp.ndarray:
"""Compute the pseudo-labels from the model's probabilities.
Args:
probabilities: Model's output probabilities. Shape [*, num_classes]
label_mask: Array representing classes to be kep, shape [*, num_classes].
multi_label: Whether this is a multi-label problem.
alpha: Weight controlling the 'softness' of pseudo-labels.
normalize_pseudo_labels: Whether to normalize pseudo-labels to turn them
into valid probability distributions. This option should be kept to
True, and only be used for experimental purposes.
Returns:
The pseudo-labels.
"""
pseudo_labels = jax.lax.stop_gradient(probabilities)
if multi_label:
pseudo_labels = jnp.stack([1 - pseudo_labels, pseudo_labels], axis=-1)
pseudo_labels = pseudo_labels ** (1 / alpha)
if normalize_pseudo_labels:
pseudo_labels /= pseudo_labels.sum(-1, keepdims=True)
pseudo_labels = pseudo_labels[
..., -1
] # we only keep the 'positive' probability
else:
pseudo_labels = pseudo_labels ** (1 / alpha)
if normalize_pseudo_labels:
if label_mask is not None and (
label_mask.shape[-1] == pseudo_labels.shape[-1]
):
normalization = (pseudo_labels * label_mask).sum(-1, keepdims=True)
else:
# Then pseudo_labels are already properly masked.
# TODO(mboudiaf): If label_mask is not None, check that
# label_mask.sum(-1) == pseudo_labels.shape(-1) in a jittable
# fashion.
normalization = pseudo_labels.sum(-1, keepdims=True)
pseudo_labels /= normalization
return pseudo_labels
def before_epoch(
self,
key: jax.random.PRNGKeyArray,
model_bundle: model_utils.ModelBundle,
adaptation_state: adapt.AdaptationState,
adaptation_dataset: tf.data.Dataset,
modality: adapt.Modality,
multi_label: bool,
**method_kwargs
) -> adapt.AdaptationState:
"""Compute the pseudo-labels when used in 'offline mode'.
Args:
key: The jax random key used for random operations in this epoch.
model_bundle: The ModelBundle used for adaptation.
adaptation_state: The current state of adaptation.
adaptation_dataset: The dataset used for adaptation.
modality: The current modality.
multi_label: Whether this is a multi-label problem.
**method_kwargs: Additional method-specific kwargs.
Returns:
The adaptation state, with a potentially updated 'method_state' attribute.
"""
if not method_kwargs["online_pl_updates"]:
# Compute pseudo-labels that will be used during the next epoch of
# adaptation.
forward_result = method_utils.forward_dataset(
dataset=adaptation_dataset,
adaptation_state=adaptation_state,
model_bundle=model_bundle,
modality=modality,
multi_label=multi_label,
use_batch_statistics=method_kwargs["update_bn_statistics"],
)
sample_ids = forward_result["id"]
method_state = {
"pseudo_label": self.compute_pseudo_label(
forward_result["proba"],
multi_label=multi_label,
alpha=method_kwargs["alpha"],
label_mask=forward_result["label_mask"],
normalize_pseudo_labels=method_kwargs["normalize_pseudo_labels"],
),
"id2index": {sample_ids[i]: i for i in range(len(sample_ids))},
}
adaptation_state = adaptation_state.replace(method_state=method_state)
return adaptation_state
def before_iter(
self,
key: jax.random.PRNGKeyArray,
batch: dict[str, np.ndarray],
adaptation_state: adapt.AdaptationState,
model_bundle: model_utils.ModelBundle,
modality: adapt.Modality,
multi_label: bool,
**method_kwargs
) -> tuple[adapt.AdaptationState, dict[str, jnp.ndarray]]:
"""Grab or compute the pseudo-labels for the current batch.
In the offline mode, we only grab pre-computed pseudo-labels from the
pseudo_label memory. In the online mode, we compute the pseudo-labels
using the current batch of data.
Args:
key: The jax random key used for random operations.
batch: The current batch of data.
adaptation_state: The current state of adaptation.
model_bundle: The ModelBundle used for adaptation.
modality: The current modality.
multi_label: Whether this is a multi-label problem.
**method_kwargs: Additional method-specific kwarg
Returns:
If using offline mode, the untouched adaptation_state. Otherwise,an
updated version in which the method_state's memories have been
updated
A dictionary containing the pseudo-labels to use for the iteration.
"""
reference_label_mask = method_utils.get_label_mask(batch)
if method_kwargs["online_pl_updates"]:
# In the online version, we compute the pseudo-labels on-the-go.
forward_step = self.cache_get_forward_step(
model_bundle.model, modality, method_kwargs["update_bn_statistics"]
)
model_outputs = forward_step( # pytype: disable=wrong-arg-types # jax-ndarray
adapt.keep_jax_types(batch),
adaptation_state.model_state,
adaptation_state.model_params,
None,
)
model_outputs = flax_utils.unreplicate(model_outputs)
pseudo_label = self.compute_pseudo_label(
probabilities=adapt.logit2proba(
model_outputs.label, reference_label_mask, multi_label
),
label_mask=reference_label_mask,
multi_label=multi_label,
alpha=method_kwargs["alpha"],
normalize_pseudo_labels=method_kwargs["normalize_pseudo_labels"],
)
else:
# In the offline version, we simply grab the pseudo-labels that were
# computed before the epoch.
method_state = flax_utils.unreplicate(adaptation_state.method_state)
batch_indexes = np.array(
[
method_state["id2index"][x]
for x in flax_utils.unreplicate(batch["tfds_id"])
]
)
pseudo_label = method_state["pseudo_label"][batch_indexes]
if reference_label_mask is not None:
pseudo_label = method_utils.pad_pseudo_label(
reference_label_mask, pseudo_label, adaptation_state
)
return adaptation_state, {
"pseudo_label": flax_utils.replicate(pseudo_label)
}
def get_adaptation_metrics(
self, supervised: bool, multi_label: bool, **method_kwargs
) -> type[clu_metrics.Collection]:
"""Obtain metrics that will be monitored during adaptation."""
metrics_dict = vars(
adapt.get_common_metrics(supervised=supervised, multi_label=multi_label)
)["__annotations__"]
def single_label_loss_fn(probabilities, pseudo_label, label_mask, **_):
pl_xent = losses.label_xent(
probabilities=probabilities, label=pseudo_label, label_mask=label_mask
)
return pl_xent
def multi_label_loss_fn(
probabilities: jnp.ndarray,
pseudo_label: jnp.ndarray,
label_mask: jnp.ndarray,
**_
):
pl_xent = losses.label_binary_xent(
probabilities=probabilities,
label=pseudo_label,
label_mask=label_mask,
)
return pl_xent
loss_fn = multi_label_loss_fn if multi_label else single_label_loss_fn
metrics_dict["main_loss"] = clu_metrics.Average.from_fun(loss_fn)
return clu_metrics.Collection.create(**metrics_dict)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.